code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package shared import ( "github.com/pkg/errors" ) // Each packet includes fields including zero byte values // so it is easier to understand what is going on as // certain fields are chained together to create a packet type RRQWRQPacket struct { Opcode [] byte // 01/02 Filename string zero byte Mode string // octet only for assignment zeroTwo byte Options map[string]string } type DataPacket struct { Opcode [] byte // 03 BlockNumber [] byte Data [] byte } type ACKPacket struct { Opcode [] byte // 04/06 BlockNumber [] byte IsOACK bool Options map[string]string } type ErrorPacket struct { Opcode [] byte //05 ErrorCode [] byte //00 - 08 ErrorMessage string zero byte } type SlidingWindowPacket struct { Opcode [] byte // 07 } // Helper so there isn't a need for a 2D array (even though it would probably be more efficient) type ArrayBytesHelper struct { Bytes [] byte } // Creates a RRQ or WRQ Packet func CreateRRQWRQPacket(isRRQ bool, fileName string, options map[string]string) *RRQWRQPacket { var z RRQWRQPacket if isRRQ { z.Opcode = []byte{0, 1} } else { z.Opcode = []byte{0, 2} } z.Filename = fileName z.Mode = "octet" z.Options = options return &z } // Creates a Data Packet func CreateDataPacket(blockNumber [] byte, data [] byte) *DataPacket { var d DataPacket d.Opcode = []byte{0, 3} d.BlockNumber = blockNumber d.Data = data return &d } // Creates an ACK Packet func CreateACKPacket() *ACKPacket { var a ACKPacket a.Opcode = []byte{0, 4} return &a } // Creates an Error Packet func CreateErrorPacket(errorCode [] byte, errorMessage string) *ErrorPacket { var e ErrorPacket e.Opcode = []byte{0, 5} e.ErrorCode = errorCode e.ErrorMessage = errorMessage return &e } func CreateSlidingWindowPacket() *SlidingWindowPacket { var sw SlidingWindowPacket sw.Opcode = []byte{0, 7} return &sw } // Returns a byte array of a RRQ or WRQ Packet func (z *RRQWRQPacket) ByteArray() [] byte { var byteArray []byte byteArray = append(byteArray, z.Opcode...) byteArray = append(byteArray, z.Filename...) byteArray = append(byteArray, z.zero) byteArray = append(byteArray, z.Mode...) byteArray = append(byteArray, z.zeroTwo) for k := range z.Options { byteArray = append(byteArray, []byte(k)...) byteArray = append(byteArray, 0) byteArray = append(byteArray, []byte(z.Options[k])...) byteArray = append(byteArray, 0) } return byteArray } // Returns a byte array of a Data Packet func (d *DataPacket) ByteArray() [] byte { var byteArray []byte byteArray = append(byteArray, d.Opcode...) byteArray = append(byteArray, d.BlockNumber...) byteArray = append(byteArray, d.Data...) return byteArray } // Returns a byte array of an ACK Packet func (a *ACKPacket) ByteArray() [] byte { var byteArray []byte byteArray = append(byteArray, a.Opcode...) if !a.IsOACK { byteArray = append(byteArray, a.BlockNumber...) } else { for k := range a.Options { byteArray = append(byteArray, []byte(k)...) byteArray = append(byteArray, 0) byteArray = append(byteArray, []byte(a.Options[k])...) byteArray = append(byteArray, 0) } } return byteArray } // Returns a byte array of an Error Packet func (e *ErrorPacket) ByteArray() [] byte { var byteArray []byte byteArray = append(byteArray, e.Opcode...) byteArray = append(byteArray, e.ErrorCode...) byteArray = append(byteArray, e.ErrorMessage...) byteArray = append(byteArray, e.zero) return byteArray } func (sw *SlidingWindowPacket) ByteArray() [] byte { var byteArray []byte byteArray = append(byteArray, sw.Opcode...) return byteArray } // Reads a data array and returns an RRQ or WRQ packet with a possible error as well func ReadRRQWRQPacket(data []byte) (p *RRQWRQPacket, err error) { packet := RRQWRQPacket{} packet.Opcode = data[:2] var lastZeroSeen = 2 var packetBytes [] ArrayBytesHelper for index, b := range data[2:] { if b == 0 { dataBytes := data[lastZeroSeen : index+2] lastZeroSeen = index + 3 packetBytes = append(packetBytes, ArrayBytesHelper{dataBytes}) } } if packetBytes != nil { packet.Filename = string(packetBytes[0].Bytes) packet.Mode = string(packetBytes[1].Bytes) if len(packetBytes) > 2 { // we now know its an option packet var options = packetBytes[2:] var optionsMapping = make(map[string]string) if len(options)%2 == 0 { for i := 0; i <= len(options)-2; i += 2 { // loop over and map the keys to values of the options optionsMapping[string(options[i].Bytes)] = string(options[i+1].Bytes) } packet.Options = optionsMapping return &packet, nil } return nil, errors.New("Options are missing values") } return &packet, nil } return nil, errors.New("Error parsing the packet...") } // Reads a data array and returns a Data packet with a possible error as well func ReadDataPacket(data []byte) (d *DataPacket, err error) { packet := DataPacket{} packet.Opcode = data[:2] packet.BlockNumber = data[2:4] packet.Data = data[4:] return &packet, nil } // Reads a data array and returns an ACK packet with a possible error as well func ReadACKPacket(data []byte) (a *ACKPacket, err error) { packet := ACKPacket{} packet.Opcode = data[:2] packet.BlockNumber = data[2:4] return &packet, nil } // Reads a data array and returns an OACK/ACK packet with a possible error as well func ReadOACKPacket(data []byte) (a *ACKPacket, err error) { packet := ACKPacket{} packet.Opcode = data[:2] packet.IsOACK = true var lastZeroSeen = 2 var packetBytes [] ArrayBytesHelper for index, b := range data[2:] { if b == 0 { bytes := data[lastZeroSeen : index+2] lastZeroSeen = index + 3 packetBytes = append(packetBytes, ArrayBytesHelper{bytes}) } } if len(packetBytes) >= 2 { var options = packetBytes var optionsMapping = make(map[string]string) if len(options)%2 == 0 { for i := 0; i <= len(options)-2; i += 2 { // loop over and map the keys to values of the options optionsMapping[string(options[i].Bytes)] = string(options[i+1].Bytes) } packet.Options = optionsMapping return &packet, nil } return nil, errors.New("Options are missing values") } return &packet, nil } // Reads a data array and returns an Error packet with a possible error as well func ReadErrorPacket(data []byte) (e *ErrorPacket, err error) { packet := ErrorPacket{} packet.Opcode = data[:2] packet.ErrorCode = data[2:4] packet.ErrorMessage = string(data[4 : len(data)-1]) return &packet, nil }
shared/Packets.go
0.710729
0.415195
Packets.go
starcoder
package parquet import ( "bytes" "fmt" "github.com/segmentio/parquet-go/deprecated" "github.com/segmentio/parquet-go/encoding" "github.com/segmentio/parquet-go/format" ) var ( BooleanType Type = booleanType{} Int32Type Type = int32Type{} Int64Type Type = int64Type{} Int96Type Type = int96Type{} FloatType Type = floatType{} DoubleType Type = doubleType{} ByteArrayType Type = byteArrayType{} ) type primitiveType struct{} func (t primitiveType) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } func (t primitiveType) LogicalType() *format.LogicalType { return nil } func (t primitiveType) ConvertedType() *deprecated.ConvertedType { return nil } type booleanType struct{ primitiveType } func (t booleanType) String() string { return "BOOLEAN" } func (t booleanType) Kind() Kind { return Boolean } func (t booleanType) Length() int { return 1 } func (t booleanType) Compare(a, b Value) int { return compareBool(a.Boolean(), b.Boolean()) } func (t booleanType) PhysicalType() *format.Type { return &physicalTypes[Boolean] } func (t booleanType) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newBooleanColumnIndexer() } func (t booleanType) NewDictionary(columnIndex, bufferSize int) Dictionary { return newBooleanDictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t booleanType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newBooleanColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t booleanType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newBooleanColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t booleanType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readBooleanDictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } type int32Type struct{ primitiveType } func (t int32Type) String() string { return "INT32" } func (t int32Type) Kind() Kind { return Int32 } func (t int32Type) Length() int { return 32 } func (t int32Type) Compare(a, b Value) int { return compareInt32(a.Int32(), b.Int32()) } func (t int32Type) PhysicalType() *format.Type { return &physicalTypes[Int32] } func (t int32Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newInt32ColumnIndexer() } func (t int32Type) NewDictionary(columnIndex, bufferSize int) Dictionary { return newInt32Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t int32Type) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newInt32ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t int32Type) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newInt32ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t int32Type) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readInt32Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } type int64Type struct{ primitiveType } func (t int64Type) String() string { return "INT64" } func (t int64Type) Kind() Kind { return Int64 } func (t int64Type) Length() int { return 64 } func (t int64Type) Compare(a, b Value) int { return compareInt64(a.Int64(), b.Int64()) } func (t int64Type) PhysicalType() *format.Type { return &physicalTypes[Int64] } func (t int64Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newInt64ColumnIndexer() } func (t int64Type) NewDictionary(columnIndex, bufferSize int) Dictionary { return newInt64Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t int64Type) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newInt64ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t int64Type) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newInt64ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t int64Type) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readInt64Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } type int96Type struct{ primitiveType } func (t int96Type) String() string { return "INT96" } func (t int96Type) Kind() Kind { return Int96 } func (t int96Type) Length() int { return 96 } func (t int96Type) Compare(a, b Value) int { return compareInt96(a.Int96(), b.Int96()) } func (t int96Type) PhysicalType() *format.Type { return &physicalTypes[Int96] } func (t int96Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newInt96ColumnIndexer() } func (t int96Type) NewDictionary(columnIndex, bufferSize int) Dictionary { return newInt96Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t int96Type) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newInt96ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t int96Type) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newInt96ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t int96Type) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readInt96Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } type floatType struct{ primitiveType } func (t floatType) String() string { return "FLOAT" } func (t floatType) Kind() Kind { return Float } func (t floatType) Length() int { return 32 } func (t floatType) Compare(a, b Value) int { return compareFloat32(a.Float(), b.Float()) } func (t floatType) PhysicalType() *format.Type { return &physicalTypes[Float] } func (t floatType) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newFloatColumnIndexer() } func (t floatType) NewDictionary(columnIndex, bufferSize int) Dictionary { return newFloatDictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t floatType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newFloatColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t floatType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newFloatColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t floatType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readFloatDictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } type doubleType struct{ primitiveType } func (t doubleType) String() string { return "DOUBLE" } func (t doubleType) Kind() Kind { return Double } func (t doubleType) Length() int { return 64 } func (t doubleType) Compare(a, b Value) int { return compareFloat64(a.Double(), b.Double()) } func (t doubleType) PhysicalType() *format.Type { return &physicalTypes[Double] } func (t doubleType) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newDoubleColumnIndexer() } func (t doubleType) NewDictionary(columnIndex, bufferSize int) Dictionary { return newDoubleDictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t doubleType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newDoubleColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t doubleType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newDoubleColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t doubleType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readDoubleDictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } type byteArrayType struct{ primitiveType } func (t byteArrayType) String() string { return "BYTE_ARRAY" } func (t byteArrayType) Kind() Kind { return ByteArray } func (t byteArrayType) Length() int { return 0 } func (t byteArrayType) Compare(a, b Value) int { return bytes.Compare(a.ByteArray(), b.ByteArray()) } func (t byteArrayType) PhysicalType() *format.Type { return &physicalTypes[ByteArray] } func (t byteArrayType) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newByteArrayColumnIndexer(sizeLimit) } func (t byteArrayType) NewDictionary(columnIndex, bufferSize int) Dictionary { return newByteArrayDictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t byteArrayType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newByteArrayColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t byteArrayType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newByteArrayColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t byteArrayType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readByteArrayDictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } type fixedLenByteArrayType struct { primitiveType length int } func (t *fixedLenByteArrayType) String() string { return fmt.Sprintf("FIXED_LEN_BYTE_ARRAY(%d)", t.length) } func (t *fixedLenByteArrayType) Kind() Kind { return FixedLenByteArray } func (t *fixedLenByteArrayType) Length() int { return t.length } func (t *fixedLenByteArrayType) Compare(a, b Value) int { return bytes.Compare(a.ByteArray(), b.ByteArray()) } func (t *fixedLenByteArrayType) PhysicalType() *format.Type { return &physicalTypes[FixedLenByteArray] } func (t *fixedLenByteArrayType) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newFixedLenByteArrayColumnIndexer(t.length, sizeLimit) } func (t *fixedLenByteArrayType) NewDictionary(columnIndex, bufferSize int) Dictionary { return newFixedLenByteArrayDictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t *fixedLenByteArrayType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newFixedLenByteArrayColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t *fixedLenByteArrayType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newFixedLenByteArrayColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t *fixedLenByteArrayType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readFixedLenByteArrayDictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } // FixedLenByteArrayType constructs a type for fixed-length values of the given // size (in bytes). func FixedLenByteArrayType(length int) Type { return &fixedLenByteArrayType{length: length} } func (t *intType) NewColumnIndexer(sizeLimit int) ColumnIndexer { if t.IsSigned { if t.BitWidth == 64 { return newInt64ColumnIndexer() } else { return newInt32ColumnIndexer() } } else { if t.BitWidth == 64 { return newUint64ColumnIndexer() } else { return newUint32ColumnIndexer() } } } func (t *intType) NewDictionary(columnIndex, bufferSize int) Dictionary { if t.IsSigned { if t.BitWidth == 64 { return newInt64Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } else { return newInt32Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } } else { if t.BitWidth == 64 { return newUint64Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } else { return newUint32Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } } } func (t *intType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { if t.IsSigned { if t.BitWidth == 64 { return newInt64ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } else { return newInt32ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } } else { if t.BitWidth == 64 { return newUint64ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } else { return newUint32ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } } } func (t *intType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { if t.BitWidth == 64 { return newInt64ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } else { return newInt32ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } } func (t *intType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { if t.IsSigned { if t.BitWidth == 64 { return readInt64Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } else { return readInt32Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } } else { if t.BitWidth == 64 { return readUint64Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } else { return readUint32Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } } } func (t *dateType) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newInt32ColumnIndexer() } func (t *dateType) NewDictionary(columnIndex, bufferSize int) Dictionary { return newInt32Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t *dateType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newInt32ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t *dateType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newInt32ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t *dateType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readInt32Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } func (t *timeType) NewColumnIndexer(sizeLimit int) ColumnIndexer { if t.Unit.Millis != nil { return newInt32ColumnIndexer() } else { return newInt64ColumnIndexer() } } func (t *timeType) NewDictionary(columnIndex, bufferSize int) Dictionary { if t.Unit.Millis != nil { return newInt32Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } else { return newInt64Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } } func (t *timeType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { if t.Unit.Millis != nil { return newInt32ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } else { return newInt64ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } } func (t *timeType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { if t.Unit.Millis != nil { return newInt32ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } else { return newInt64ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } } func (t *timeType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { if t.Unit.Millis != nil { return readInt32Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } else { return readInt64Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) } } func (t *timestampType) NewColumnIndexer(sizeLimit int) ColumnIndexer { return newInt64ColumnIndexer() } func (t *timestampType) NewDictionary(columnIndex, bufferSize int) Dictionary { return newInt64Dictionary(t, makeColumnIndex(columnIndex), bufferSize) } func (t *timestampType) NewColumnBuffer(columnIndex, bufferSize int) ColumnBuffer { return newInt64ColumnBuffer(t, makeColumnIndex(columnIndex), bufferSize) } func (t *timestampType) NewColumnReader(columnIndex, bufferSize int) ColumnReader { return newInt64ColumnReader(t, makeColumnIndex(columnIndex), bufferSize) } func (t *timestampType) ReadDictionary(columnIndex, numValues int, decoder encoding.Decoder) (Dictionary, error) { return readInt64Dictionary(t, makeColumnIndex(columnIndex), numValues, decoder) }
type_default.go
0.737914
0.464294
type_default.go
starcoder
package topologicalsort import ( "fmt" "godev/basic/datastructure/graph" "godev/basic/datastructure/queue/deque" ) // Kahn algorithm // https://en.wikipedia.org/wiki/Topological_sorting func Kahn(g graph.Graph) (sortedIDs []graph.ID, err error) { // 1. compute in-edges for every vertex and initialize visited vertices number as 0 nodeWithInDegree := make(map[graph.ID]int, g.NodeNum()) for id := range g.GetNodes() { sources, err := g.GetSources(id) if err != nil { return nil, err } nodeWithInDegree[id] = len(sources) } visited := 0 // 2. enqueue all vertices with degree 0 Q := deque.NewDeque(g.NodeNum()) for id, inDegree := range nodeWithInDegree { if inDegree == 0 { Q.PushBack(id) } } // 3. dequeue a vertex until Q is empty // a. increment visited by 1 // b. decrease all vertex's neighbors' degree by 1 // c. if in-degree of a neighbor is reduced to 0, then enqueue this neighbor for !Q.Empty() { v, err := Q.PopFront() if err != nil { return nil, err } id := v.(graph.ID) sortedIDs = append(sortedIDs, id) visited++ tmap, err := g.GetTargets(id) if err != nil && err.Error() != graph.NodeNotExistError(id).Error() { return nil, err } for t := range tmap { nodeWithInDegree[t]-- if nodeWithInDegree[t] == 0 { Q.PushBack(t) } } } if visited != g.NodeNum() { return nil, fmt.Errorf("graph is not a DAG, can NOT do topological sort on it") } return } // DFSTopo depth-first search // it sounds like tri-color marking algorithm (golang GC algorithm) but different // https://en.wikipedia.org/wiki/Tracing_garbage_collection#Tri-color_marking // https://gist.github.com/Harold2017/7529971396e09992f879b22663726e07 func DFSTopo(g graph.Graph) (sortedIDs []graph.ID, err error) { // unmarked: 0, temporary mark: 1, permanent mark 2 mark := make(map[graph.ID]int, g.NodeNum()) // unmark all vertices for v := range g.GetNodes() { mark[v] = 0 } // recursively for v := range g.GetNodes() { if mark[v] == 0 { err := visit(g, v, &sortedIDs, &mark) if err != nil { return nil, err } } } return } func visit(g graph.Graph, id graph.ID, sortedIDs *[]graph.ID, mark *map[graph.ID]int) error { if (*mark)[id] == 2 { return nil } if (*mark)[id] == 1 { return fmt.Errorf("graph is not a DAG, can NOT do topological sort on it") } (*mark)[id] = 1 tmap, err := g.GetTargets(id) if err != nil && err.Error() != graph.NodeNotExistError(id).Error() { return err } for t := range tmap { err := visit(g, t, sortedIDs, mark) if err != nil { return err } } (*mark)[id] = 2 *sortedIDs = append([]graph.ID{id}, *sortedIDs...) return nil } // TODO: how to implement parallel algorithm in wiki?
basic/datastructure/graph/algorithm/topologicalsort/topological_sorting.go
0.528047
0.439567
topological_sorting.go
starcoder
package goqrsvg import ( "errors" "fmt" "image/color" "github.com/ajstarks/svgo" "github.com/boombuler/barcode" ) // QrSVG holds the data related to the size, location, // and block size of the QR Code. Holds unexported fields. type QrSVG struct { qr barcode.Barcode qrWidth int blockSize int startingX int startingY int } // NewQrSVG contructs a QrSVG struct. It takes a QR Code in the form // of barcode.Barcode and sets the "pixel" or block size of QR Code in // the SVG file. func NewQrSVG(qr barcode.Barcode, blockSize int) QrSVG { return QrSVG{ qr: qr, qrWidth: qr.Bounds().Max.X, blockSize: blockSize, startingX: 0, startingY: 0, } } // Colors // RGB specifies a fill color in terms of a (r)ed, (g)reen, (b)lue triple. // Standard reference: http://www.w3.org/TR/css3-color/ func (qs QrSVG) RGB(r , g , b uint32) string { //fmt.Println(fmt.Sprintf(`fill:rgb(%d,%d,%d)`, r, g, b)) return fmt.Sprintf(`fill:rgb(%d,%d,%d)`, r, g, b) } // RGBA specifies a fill color in terms of a (r)ed, (g)reen, (b)lue triple and opacity. func (qs QrSVG) RGBA(r , g , b , a uint32) string { r = r >> 8 g = g >> 8 b = b >> 8 a = a >> 8 return fmt.Sprintf(`fill-opacity:%.2f; %s`, float64(a)/255.0, qs.RGB(r, g, b)) } // WriteQrSVG writes the QR Code to SVG. func (qs *QrSVG) WriteColorQrSVG(s *svg.SVG, foreground, background color.Color) error { if qs.qr.Metadata().CodeKind == "QR Code" { currY := qs.startingY for x := 0; x < qs.qrWidth; x++ { currX := qs.startingX for y := 0; y < qs.qrWidth; y++ { if qs.qr.At(x, y) == color.Black { s.Rect(currX, currY, qs.blockSize, qs.blockSize, fmt.Sprintf("%s;stroke:none", qs.RGBA(foreground.RGBA()))) } else if qs.qr.At(x, y) == color.White { s.Rect(currX, currY, qs.blockSize, qs.blockSize, fmt.Sprintf("%s;stroke:none", qs.RGBA(background.RGBA()))) } currX += qs.blockSize } currY += qs.blockSize } return nil } return errors.New("can not write to SVG: Not a QR code") } func (qs *QrSVG) WriteQrSVG(s *svg.SVG) error { return qs.WriteColorQrSVG(s, color.Black, color.White) } // SetStartPoint sets the top left start point of QR Code. // This takes an X and Y value and then adds four white "blocks" // to create the "quiet zone" around the QR Code. func (qs *QrSVG) SetStartPoint(x, y int) { qs.startingX = x + (qs.blockSize * 4) qs.startingY = y + (qs.blockSize * 4) } // StartQrSVG creates a start for writing an SVG file that // only contains a barcode. This is similar to the svg.Start() method. // This fucntion should only be used if you only want to write a QR code // to the SVG. Otherwise use the regular svg.Start() method to start your // SVG file. func (qs *QrSVG) StartQrSVG(s *svg.SVG) { width := (qs.qrWidth * qs.blockSize) + (qs.blockSize * 8) qs.SetStartPoint(0, 0) s.Start(width, width) } func (qs QrSVG) GetWidth() int { return (qs.qrWidth * qs.blockSize) + (qs.blockSize * 8) }
goqrsvg.go
0.711932
0.48749
goqrsvg.go
starcoder
package hue import ( "math" ) const ( colorPointRed = 0 colorPointGreen = 1 colorPointBlue = 2 ) // RGB is a colour represented using the red/green/blue colour model. type RGB struct { Red uint8 Green uint8 Blue uint8 } // XY is a colour represented using the CIE colour space. type XY struct { X float64 Y float64 } // HSB is a colour represented using the hue/saturation/value representation of the RGB colour model. type HSB struct { Hue uint16 Saturation uint8 Brightness uint8 } // colourPointsForModel returns the XY bounds for the specified lightbulb model. // The returned array always has the red, then green, then blue points in that order. func colorPointsForModel(model string) (points []XY) { points = make([]XY, 3) switch model { case "LCT001", "LCT002", "LCT003": points[colorPointRed].X = 0.674 points[colorPointRed].Y = 0.322 points[colorPointGreen].X = 0.408 points[colorPointGreen].Y = 0.517 points[colorPointBlue].X = 0.168 points[colorPointBlue].Y = 0.041 return case "LLC001", "LLC005", "LLC006", "LLC007", "LLC011", "LLC012", "LLC013", "LST001": points[colorPointRed].X = 0.703 points[colorPointRed].Y = 0.296 points[colorPointGreen].X = 0.214 points[colorPointGreen].Y = 0.709 points[colorPointBlue].X = 0.139 points[colorPointBlue].Y = 0.081 return } points[colorPointRed].X = 1.0 points[colorPointRed].Y = 0.0 points[colorPointGreen].X = 0.0 points[colorPointGreen].Y = 1.0 points[colorPointBlue].X = 0.0 points[colorPointBlue].Y = 0.0 return } func crossProduct(p1, p2 XY) float64 { return p1.X*p2.Y - p1.Y*p2.X } func getClosestPointToPoints(a, b, p XY) XY { ap := XY{X: p.X - a.X, Y: p.Y - a.Y} ab := XY{X: b.X - a.X, Y: b.Y - a.Y} ab2 := ab.X*ab.X + ab.Y*ab.Y apAB := ap.X*ab.X + ap.Y*ab.Y t := apAB / ab2 if t < 0.0 { t = 0.0 } else if t > 1.0 { t = 1.0 } return XY{X: a.X + ab.X*t, Y: a.Y + ab.Y*t} } func getDistanceBetweenTwoPoints(p1, p2 XY) float64 { dx := p1.X - p2.X dy := p1.Y - p2.Y return math.Sqrt(dx*dx + dy*dy) } func checkPointInColorPointsReach(p XY, colorPoints []XY) bool { if len(colorPoints) != 3 { return false } red := colorPoints[colorPointRed] green := colorPoints[colorPointGreen] blue := colorPoints[colorPointBlue] v1 := XY{X: green.X - red.X, Y: green.Y - red.Y} v2 := XY{X: blue.X - red.X, Y: blue.Y - red.Y} q := XY{X: p.X - red.X, Y: p.Y - red.Y} s := crossProduct(q, v2) / crossProduct(v1, v2) t := crossProduct(v1, q) / crossProduct(v1, v2) if s >= 0.0 && t >= 0.0 && s+t <= 1.0 { return true } return false } // FromHSB converts the specified HSB value into the RGB colour space. // This algorithm is adapted from the code at http://www.docjar.com/html/api/java/awt/Color.java.html func (c *RGB) FromHSB(from HSB) { hue := float64(from.Hue / 65535) saturation := float64(from.Saturation / 255) brightness := float64(from.Brightness / 255) if saturation == 0 { c.Red = uint8(brightness*float64(255.0) + float64(0.5)) c.Green = uint8(brightness*float64(255.0) + float64(0.5)) c.Blue = uint8(brightness*float64(255.0) + float64(0.5)) return } h := (hue - math.Floor(hue)) * 6.0 f := h - math.Floor(h) p := brightness * (1.0 - saturation) q := brightness * (1.0 - saturation*f) t := brightness * (1.0 - (saturation * (1.0 - f))) var red, green, blue float64 switch h { case 0: red = brightness*float64(255.0) + float64(0.5) green = t*float64(255.0) + float64(0.5) blue = p*float64(255.0) + float64(0.5) break case 1: red = q*float64(255.0) + float64(0.5) green = brightness*float64(255.0) + float64(0.5) blue = p*float64(255.0) + float64(0.5) break case 2: red = p*float64(255.0) + float64(0.5) green = brightness*float64(255.0) + float64(0.5) blue = t*float64(255.0) + float64(0.5) break case 3: red = p*float64(255.0) + float64(0.5) green = q*float64(255.0) + float64(0.5) blue = brightness*float64(255.0) + float64(0.5) break case 4: red = t*float64(255.0) + float64(0.5) green = p*float64(255.0) + float64(0.5) blue = brightness*float64(255.0) + float64(0.5) break case 5: red = brightness*float64(255.0) + float64(0.5) green = p + float64(255.0) + float64(0.5) blue = q*float64(255.0) + float64(0.5) } c.Red = uint8(red) c.Green = uint8(green) c.Blue = uint8(blue) return } // FromCT converts the specified CT value into the RGB colour space. // This algorithm is adapted from the example at http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/ func (c *RGB) FromCT(from uint16) { var temp float64 temp = 1000000 / float64(from) temp = temp / 100 if temp < 66 { c.Red = 255 } else { red := temp - 60 red = 329.698727446 * math.Pow(red, -0.1332047592) if red < 0 { c.Red = 0 } else if red > 255 { c.Red = 255 } else { c.Red = uint8(red) } } if temp <= 66 { green := temp green = 99.4708025861*math.Log(green) - 161.1195681661 if green < 0 { c.Green = 0 } else if green > 255 { c.Green = 255 } else { c.Green = uint8(green) } } else { green := temp green = 288.1221695283 * math.Pow(green, -0.0755148492) if green < 0 { c.Green = 0 } else if green > 255 { c.Green = 255 } else { c.Green = uint8(green) } } if temp >= 66 { c.Blue = 255 } else { if temp <= 19 { c.Blue = 0 } else { blue := temp - 10 blue = 138.5177312231*math.Log(blue) - 305.0447927307 if blue < 0 { c.Blue = 0 } else if blue > 255 { c.Blue = 255 } else { c.Blue = uint8(blue) } } } } // FromXY converts the specified XY value into the RGB colour space. // The supplied light model is used to adjust the input value accordingly. // This algorithm is adapted from the examples at http://www.developers.meethue.com/documentation/color-conversions-rgb-xy func (c *RGB) FromXY(from XY, model string) { xy := XY{X: from.X, Y: from.Y} colorPoints := colorPointsForModel(model) isColorReachable := checkPointInColorPointsReach(xy, colorPoints) if !isColorReachable { // We will have to map the requested color to the closest representable color. pAB := getClosestPointToPoints(colorPoints[colorPointRed], colorPoints[colorPointGreen], xy) pAC := getClosestPointToPoints(colorPoints[colorPointBlue], colorPoints[colorPointRed], xy) pBC := getClosestPointToPoints(colorPoints[colorPointGreen], colorPoints[colorPointBlue], xy) dAB := getDistanceBetweenTwoPoints(xy, pAB) dAC := getDistanceBetweenTwoPoints(xy, pAC) dBC := getDistanceBetweenTwoPoints(xy, pBC) lowest := dAB closestPoint := pAB if dAC < lowest { lowest = dAC closestPoint = pAC } if dBC < lowest { lowest = dBC closestPoint = pBC } xy.X = closestPoint.X xy.Y = closestPoint.Y } x := xy.X y := xy.Y z := 1.0 - x - y Y := 1.0 X := (Y / y) * x Z := (Y / y) * z // sRGB D65 conversion // Option 1 /* r := X*1.656492 - Y*0.354851 - Z*0.255038 g := -1*X*0.707196 + Y*1.655397 + Z*0.036152 b := X*0.051713 - Y*0.121364 + Z*1.011530 */ // Option 2 r := X*1.4628067 - Y*0.1840623 - Z*0.2743606 g := -X*0.5217933 + Y*1.4472381 + Z*0.0677227 b := X*0.0349342 - Y*0.0968930 + Z*1.2884099 // Check if any color is too large and scale it down accordingly if r > b && r > g && r > 1.0 { g = g / r b = b / r r = 1.0 } else if g > b && g > r && g > 1.0 { r = r / g b = b / g g = 1.0 } else if b > r && b > g && b > 1.0 { r = r / b g = g / b b = 1.0 } // Apply gamma correction if r <= 0.0031308 { r = r * 12.92 } else { r = (1.0+0.055)*math.Pow(r, (1.0/2.4)) - 0.055 } if g <= 0.0031308 { g = g * 12.92 } else { g = (1.0+0.055)*math.Pow(g, (1.0/2.4)) - 0.055 } if b <= 0.0031308 { b = b * 12.92 } else { b = (1.0+0.055)*math.Pow(b, (1.0/2.4)) - 0.055 } // Check if any color is too large and scale it down accordingly if r > b && r > g { if r > 1.0 { g = g / r b = b / r r = 1.0 } } else if g > b && g > r { if g > 1.0 { r = r / g b = b / g g = 1.0 } } else if b > r && b > g { if b > 1.0 { r = r / b g = g / b b = 1.0 } } c.Red = uint8(r * 255) c.Green = uint8(g * 255) c.Blue = uint8(b * 255) return } // FromRGB converts the specified RGB value into the CIE colour space. // The supplied light model is used to adjust the input value accordingly. // This algorithm is adapted from the examples at http://www.developers.meethue.com/documentation/color-conversions-rgb-xy func (c *XY) FromRGB(from RGB, model string) { red := float64(from.Red / 255) green := float64(from.Green / 255) blue := float64(from.Blue / 255) r := 1.0 g := 1.0 b := 1.0 // Gamma correction if red > 0.04045 { r = math.Pow((red+0.055)/(1.0+0.055), 2.4) } else { r = red / 12.92 } if green > 0.04045 { g = math.Pow((green+0.055)/(1.0+0.055), 2.4) } else { g = green / 12.92 } if blue > 0.0405 { b = math.Pow((blue+0.055)/(1.0+0.055), 2.4) } else { b = blue / 12.92 } // Convert RGB to XYZ using Wide RGB D65 conversion // Option 1: /* X := r*0.664511 + g*0.154324 + b*0.162028 Y := r*0.283881 + g*0.668433 + b*0.047685 Z := r*0.000088 + g*0.072310 + b*0.986039 */ // Option 2 X := r*0.649926 + g*0.103455 + b*0.197109 Y := r*0.234327 + g*0.743075 + b*0.022598 Z := r*0.000000 + g*0.053077 + b*1.035763 cx := X / (X + Y + Z) cy := Y / (X + Y + Z) if math.IsNaN(cx) { cx = 0.0 } if math.IsNaN(cy) { cy = 0.0 } // Check if the requested XY value is within the color range of the light. xy := XY{X: cx, Y: cy} colorPoints := colorPointsForModel(model) isColorReachable := checkPointInColorPointsReach(xy, colorPoints) if !isColorReachable { // Find the closest color we can reach and send this instead pAB := getClosestPointToPoints(colorPoints[colorPointRed], colorPoints[colorPointGreen], xy) pAC := getClosestPointToPoints(colorPoints[colorPointBlue], colorPoints[colorPointRed], xy) pBC := getClosestPointToPoints(colorPoints[colorPointGreen], colorPoints[colorPointBlue], xy) dAB := getDistanceBetweenTwoPoints(xy, pAB) dAC := getDistanceBetweenTwoPoints(xy, pAC) dBC := getDistanceBetweenTwoPoints(xy, pBC) lowest := dAB closestPoint := pAB if dAC < lowest { lowest = dAC closestPoint = pAC } if dBC < lowest { lowest = dBC closestPoint = pBC } cx = closestPoint.X cy = closestPoint.Y } c.X = cx c.Y = cy return }
color.go
0.903571
0.563078
color.go
starcoder
package go_tsuro // Action types const ( ActionPlaceTile = "PlaceTile" ActionRotateTileRight = "RotateTileRight" ActionRotateTileLeft = "RotateRileLeft" ) // Tsuro Variants const ( VariantClassic = "Classic" // normal Tsuro VariantLongestPath = "LongestPath" // player with the longest path wins VariantMostCrossings = "MostCrossings" // player whose path crosses itself the most wins VariantOpenTiles = "OpenTiles" // tiles are shared globally VariantSolo = "Solo" // place tiles while keeping all tokens on the board ) var Variants = []string{VariantClassic, VariantLongestPath, VariantMostCrossings, VariantOpenTiles, VariantSolo} // TsuroMoreOptions are the additional options for creating a game of Tsuro type TsuroMoreOptions struct { Seed int64 Variant string } // RotateTileActionDetails is the action details for rotating a tile in hand type RotateTileActionDetails struct { Tile string } // PlaceTileActionDetails is the action details for placing a tile in the desired location on the board type PlaceTileActionDetails struct { Row, Column int Tile string } // TsuroSnapshotData is the game data unique to Tsuro type TsuroSnapshotData struct { Board [][]*tile TilesRemaining int Hands map[string][]*tile Tokens map[string]*token Dragon string `json:",omitempty"` Variant string Points map[string]int `json:",omitempty"` } // list of all the tiles that can be played var tiles = []string{ "ABCDEFGH", "AHBGCDEF", "AHBCDGEF", "AHBCDEFG", "AGBHCDEF", "ABCHDGEF", "ABCGDHEF", "AGBCDHEF", "ABCGDEFH", "AGBCDEFH", "ACBGDEFH", "ACBGDHEF", "ACBHDGEF", "ADBHCGEF", "ADBGCHEF", "ADBCEHFG", "ADBCEGFH", "AEBCDGFH", "AEBCDHFG", "AFBHCDEG", "AFBGCHDE", "AFBCDHEG", "AFBDCHEG", "AFBDCGEH", "AEBDCGFH", "ACBDEGFH", "AFBECHDG", "AFBECGDH", "AEBFCGDH", "ADBFCGEH", "ADBFCHEG", "ACBFDHEG", "ADBGCEFH", "AGBDCEFH", "ADBGCFEH", } // map of path to list of all other paths that cross the path var crossing = map[string][]string{ "AB": {}, "BA": {}, "CD": {}, "DC": {}, "EF": {}, "FE": {}, "GH": {}, "HG": {}, "AC": {"BD", "BE", "BF", "BG", "BH", "DB", "EB", "FB", "GB", "HB"}, "CA": {"BD", "BE", "BF", "BG", "BH", "DB", "EB", "FB", "GB", "HB"}, "AD": {"BE", "BF", "BG", "BH", "CE", "CF", "CG", "CH", "EB", "FB", "GB", "HB", "EC", "FC", "GC", "HC"}, "DA": {"BE", "BF", "BG", "BH", "CE", "CF", "CG", "CH", "EB", "FB", "GB", "HB", "EC", "FC", "GC", "HC"}, "AE": {"BF", "BG", "BH", "CF", "CG", "CH", "DF", "DG", "DH", "FB", "GB", "HB", "FC", "GC", "HC", "FD", "GD", "HD"}, "EA": {"BF", "BG", "BH", "CF", "CG", "CH", "DF", "DG", "DH", "FB", "GB", "HB", "FC", "GC", "HC", "FD", "GD", "HD"}, "AF": {"BG", "BH", "CG", "CH", "DG", "DH", "EG", "EH", "GB", "HB", "GC", "HC", "GD", "HD", "GE", "HE"}, "FA": {"BG", "BH", "CG", "CH", "DG", "DH", "EG", "EH", "GB", "HB", "GC", "HC", "GD", "HD", "GE", "HE"}, "AG": {"HB", "HC", "HD", "HE", "HF", "BH", "CH", "DH", "EH", "FH"}, "GA": {"HB", "HC", "HD", "HE", "HF", "BH", "CH", "DH", "EH", "FH"}, "AH": {}, "HA": {}, "BC": {}, "CB": {}, "BD": {"CA", "CE", "CF", "CG", "CH", "AC", "EC", "FC", "GC", "HC"}, "DB": {"CA", "CE", "CF", "CG", "CH", "AC", "EC", "FC", "GC", "HC"}, "BE": {"AC", "AD", "FC", "FD", "GC", "GD", "HC", "HD", "CA", "DA", "CF", "DF", "CG", "DG", "CH", "DH"}, "EB": {"AC", "AD", "FC", "FD", "GC", "GD", "HC", "HD", "CA", "DA", "CF", "DF", "CG", "DG", "CH", "DH"}, "BF": {"AE", "AC", "AD", "HE", "HC", "HD", "GE", "GC", "GD", "EA", "CA", "DA", "EH", "CH", "DH", "EG", "CG", "DG"}, "FB": {"AE", "AC", "AD", "HE", "HC", "HD", "GE", "GC", "GD", "EA", "CA", "DA", "EH", "CH", "DH", "EG", "CG", "DG"}, "BG": {"AC", "AD", "AE", "AF", "HC", "HD", "HE", "HF", "CA", "DA", "EA", "FA", "CH", "DH", "EH", "FH"}, "GB": {"AC", "AD", "AE", "AF", "HC", "HD", "HE", "HF", "CA", "DA", "EA", "FA", "CH", "DH", "EH", "FH"}, "BH": {"AC", "AD", "AE", "AF", "AG", "CA", "DA", "EA", "FA", "GA"}, "HB": {"AC", "AD", "AE", "AF", "AG", "CA", "DA", "EA", "FA", "GA"}, "CE": {"DA", "DB", "DF", "DG", "DH", "AD", "BD", "FD", "GD", "HD"}, "EC": {"DA", "DB", "DF", "DG", "DH", "AD", "BD", "FD", "GD", "HD"}, "CF": {"DA", "DB", "DG", "DH", "EA", "EB", "EG", "EH", "AD", "BD", "GD", "HD", "AE", "BE", "GE", "HE"}, "FC": {"DA", "DB", "DG", "DH", "EA", "EB", "EG", "EH", "AD", "BD", "GD", "HD", "AE", "BE", "GE", "HE"}, "CG": {"DH", "DA", "DB", "EH", "EA", "EB", "FH", "FA", "FB", "HD", "AD", "BD", "HE", "AE", "BE", "HF", "AF", "BF"}, "GC": {"DH", "DA", "DB", "EH", "EA", "EB", "FH", "FA", "FB", "HD", "AD", "BD", "HE", "AE", "BE", "HF", "AF", "BF"}, "CH": {"AD", "AE", "AF", "AG", "BD", "BE", "BF", "BG", "DA", "EA", "FA", "GA", "DB", "EB", "FB", "GB"}, "HC": {"AD", "AE", "AF", "AG", "BD", "BE", "BF", "BG", "DA", "EA", "FA", "GA", "DB", "EB", "FB", "GB"}, "DE": {}, "ED": {}, "DF": {"EA", "EB", "EC", "EG", "EH", "AE", "BE", "CE", "GE", "HE"}, "FD": {"EA", "EB", "EC", "EG", "EH", "AE", "BE", "CE", "GE", "HE"}, "DG": {"EA", "EB", "EC", "EH", "FA", "FB", "FC", "FH", "AE", "BE", "CE", "CH", "AF", "BF", "CF", "HF"}, "GD": {"EA", "EB", "EC", "EH", "FA", "FB", "FC", "FH", "AE", "BE", "CE", "CH", "AF", "BF", "CF", "HF"}, "DH": {"GA", "GB", "GC", "FA", "FB", "FC", "EA", "EB", "EC", "AG", "BG", "CG", "AF", "BF", "CF", "AE", "BE", "CE"}, "HD": {"GA", "GB", "GC", "FA", "FB", "FC", "EA", "EB", "EC", "AG", "BG", "CG", "AF", "BF", "CF", "AE", "BE", "CE"}, "EG": {"FA", "FB", "FC", "FD", "FH", "AF", "BF", "CF", "DF", "HF"}, "GE": {"FA", "FB", "FC", "FD", "FH", "AF", "BF", "CF", "DF", "HF"}, "EH": {"FA", "FB", "FC", "FD", "GA", "GB", "GC", "GD", "AF", "BF", "CF", "DF", "AG", "BG", "CG", "DG"}, "HE": {"FA", "FB", "FC", "FD", "GA", "GB", "GC", "GD", "AF", "BF", "CF", "DF", "AG", "BG", "CG", "DG"}, "FG": {}, "GF": {}, "FH": {"GA", "GB", "GC", "GD", "GE", "AG", "BG", "CG", "DG", "EG"}, "HF": {"GA", "GB", "GC", "GD", "GE", "AG", "BG", "CG", "DG", "EG"}, }
models.go
0.566139
0.448909
models.go
starcoder
package ast import ( "errors" "fmt" "github.com/eliquious/lexer" "math/big" "strconv" "time" ) // ExpressionType identifies various expressions type ExpressionType int const ( VariableDeclarationType ExpressionType = iota ScopedVariableDeclarationType ConstantDeclarationType FunctionDeclarationType StructDeclarationType UnitDeclarationType AttributeDeclarationType ArrayDeclarationType EnumDeclarationType ImportExpressionType ConversionExpressionType IfExpressionType IfElseExpressionType ElseExpressionType CallFunctionExpressionType FilterExpressionType ForExpressionType AssignmentExpressionType BinaryExpressionType UnaryExpressionType IntegerLiteralType DecimalLiteralType StringLiteralType DurationLiteralType TimestampLiteralType BooleanLiteralType StructLiteralType ConversionLiteralType ArrayLiteralType ) // Expression represents AST expressions type Expression interface { Type() ExpressionType String() string } // IsLiteral returns true for literal expressions func IsLiteral(expr Expression) bool { switch expr.Type() { case IntegerLiteralType: return true case DecimalLiteralType: return true case BooleanLiteralType: return true case StringLiteralType: return true case DurationLiteralType: return true default: return false } } // IsUnaryOperator returns true for unary operators func IsUnaryOperator(tok lexer.Token) bool { if tok == lexer.PLUSPLUS || tok == lexer.MINUSMINUS { return true } return false } // IsBinaryOperator returns true for binary operators func IsBinaryOperator(tok lexer.Token) bool { if !tok.IsOperator() { return false } else if IsUnaryOperator(tok) { return false } return true } // IntegerLiteral represents literal integers type IntegerLiteral struct { Value *big.Int } func (e IntegerLiteral) Type() ExpressionType { return IntegerLiteralType } func (e IntegerLiteral) String() string { return e.Value.String() } func (e IntegerLiteral) Add(expr Expression) (Expression, error) { switch expr.Type() { case IntegerLiteralType: i := new(big.Int) return &IntegerLiteral{i.Add(e.Value, expr.(*IntegerLiteral).Value)}, nil case DecimalLiteralType: f := new(big.Float).SetInt(e.Value) return &DecimalLiteral{f.Add(f, expr.(*DecimalLiteral).Value)}, nil default: return nil, errors.New(fmt.Sprintf("Integer addition of type '%T' unsupported", expr.Type())) } } // DecimalLiteral represents literal decimals type DecimalLiteral struct { Value *big.Float } func (e DecimalLiteral) Type() ExpressionType { return DecimalLiteralType } func (e DecimalLiteral) String() string { return e.Value.Text('E', 16) } func (e DecimalLiteral) Add(expr Expression) (Expression, error) { switch expr.Type() { case IntegerLiteralType: f := new(big.Float).SetInt(expr.(*IntegerLiteral).Value) return &DecimalLiteral{f.Add(f, e.Value)}, nil case DecimalLiteralType: f := new(big.Float) return &DecimalLiteral{f.Add(e.Value, expr.(*DecimalLiteral).Value)}, nil default: return nil, errors.New(fmt.Sprintf("Decimal addition of type '%T' unsupported", expr.Type())) } } // BooleanLiteral represents literal booleans type BooleanLiteral struct { Value bool } func (e BooleanLiteral) Type() ExpressionType { return BooleanLiteralType } func (e BooleanLiteral) String() string { return strconv.FormatBool(e.Value) } // StringLiteral represents literal strings type StringLiteral struct { Value string } func (e StringLiteral) Type() ExpressionType { return StringLiteralType } func (e StringLiteral) String() string { return strconv.Quote(e.Value) } // DurationLiteral represents literal durations type DurationLiteral struct { Value time.Duration } func (e DurationLiteral) Type() ExpressionType { return DurationLiteralType } func (e DurationLiteral) String() string { return e.Value.String() }
calculator/ast/ast.go
0.746786
0.604691
ast.go
starcoder
package state import ( "math/rand" "sync" ) type GameSession struct { Mutex *sync.Mutex renderNotificationChannel chan bool score uint GameOver bool GameBoard [4][4]uint } // NewGameSession produces a ready-to-use session state. func NewGameSession(renderNotificationChannel chan bool) *GameSession { session := &GameSession{ Mutex: &sync.Mutex{}, renderNotificationChannel: renderNotificationChannel, score: 0, GameOver: false, GameBoard: [4][4]uint{}, } //We want to start off with one filled cell. session.fillCell() return session } func isGameOver(board [4][4]uint) bool { for _, row := range board { var prevCell uint for _, cell := range row { //At least one more left / right move possible. if cell == 0 || prevCell == cell { return false } prevCell = cell } } for cellIndex := 0; cellIndex < len(board); cellIndex++ { var prevCell uint for rowIndex := 0; rowIndex < len(board); rowIndex++ { cell := board[rowIndex][cellIndex] //At least one more left / right move possible. if cell == 0 || prevCell == cell { return false } prevCell = cell } } return true } func (session *GameSession) update() { var score uint for _, row := range session.GameBoard { for _, cell := range row { score += cell } } session.score = score session.GameOver = isGameOver(session.GameBoard) // In order to avoid dead-locking the caller. go func() { session.renderNotificationChannel <- true }() } func (session *GameSession) fillCell() { if session.GameOver { return } var freeIndices [][2]int for rowIndex, row := range session.GameBoard { for cellIndex, cell := range row { if cell == 0 { freeIndices = append(freeIndices, [2]int{rowIndex, cellIndex}) } } } if len(freeIndices) == 0 { session.GameOver = true return } indexToFill := freeIndices[rand.Intn(len(freeIndices))] session.GameBoard[indexToFill[0]][indexToFill[1]] = 2 } func (session *GameSession) Down() { if session.downNoFill() { session.fillCell() session.update() } } // downNoFill is necessary for proper unit testing without the // randomness factor. func (session *GameSession) downNoFill() bool { if session.GameOver { return false } var hasChanged bool for cellIndex := 0; cellIndex < len(session.GameBoard); cellIndex++ { //Combination run //We combine from top to bottom, since that's how the original game //does it. So 2,2,2,0 would become 4,0,2,0 if session.combineVertically( len(session.GameBoard)-1, func(i int) bool { return i >= 0 }, func(i int) int { return i - 1 }, cellIndex) { hasChanged = true } //Shifting run //The previously combined 4,0,2,0 now becomes 4,2,0,0 for rowIndex := len(session.GameBoard) - 2; rowIndex >= 0; rowIndex-- { cell := session.GameBoard[rowIndex][cellIndex] if cell == 0 { continue } moveTo := -1 for tempRowIndex := rowIndex + 1; tempRowIndex < len(session.GameBoard); tempRowIndex++ { if session.GameBoard[tempRowIndex][cellIndex] == 0 { moveTo = tempRowIndex } else { break } } if moveTo != -1 { session.GameBoard[moveTo][cellIndex] = cell session.GameBoard[rowIndex][cellIndex] = 0 hasChanged = true } } } return hasChanged } func (session *GameSession) Up() { if session.upNoFill() { session.fillCell() session.update() } } func (session *GameSession) upNoFill() bool { if session.GameOver { return false } var hasChanged bool for cellIndex := 0; cellIndex < len(session.GameBoard); cellIndex++ { //Combination run //We combine from top to bottom, since that's how the original game //does it. So 2,2,2,0 would become 4,0,2,0 if session.combineVertically( 0, func(i int) bool { return i < len(session.GameBoard) }, func(i int) int { return i + 1 }, cellIndex) { hasChanged = true } //Shifting run //The previously combined 4,0,2,0 now becomes 4,2,0,0 for rowIndex := 1; rowIndex < len(session.GameBoard); rowIndex++ { cell := session.GameBoard[rowIndex][cellIndex] if cell == 0 { continue } moveTo := -1 for tempRowIndex := rowIndex - 1; tempRowIndex >= 0; tempRowIndex-- { if session.GameBoard[tempRowIndex][cellIndex] == 0 { moveTo = tempRowIndex } else { break } } if moveTo != -1 { session.GameBoard[moveTo][cellIndex] = cell session.GameBoard[rowIndex][cellIndex] = 0 hasChanged = true } } } return hasChanged } func (session *GameSession) combineVertically(start int, resume func(int) bool, update func(int) int, cellIndex int) bool { var hasChanged bool indexLastNonZero := -1 for rowIndex := start; resume(rowIndex); rowIndex = update(rowIndex) { cell := session.GameBoard[rowIndex][cellIndex] if cell == 0 { continue } if indexLastNonZero == -1 || cell != session.GameBoard[indexLastNonZero][cellIndex] { indexLastNonZero = rowIndex continue } session.GameBoard[indexLastNonZero][cellIndex] = cell * 2 session.GameBoard[rowIndex][cellIndex] = 0 indexLastNonZero = -1 hasChanged = true } return hasChanged } func (session *GameSession) Left() { if session.leftNoFill() { session.fillCell() session.update() } } func (session *GameSession) leftNoFill() bool { if session.GameOver { return false } var hasChanged bool for rowIndex := 0; rowIndex < len(session.GameBoard); rowIndex++ { //Combination run if session.combineHorizontally(0, func(i int) bool { return i < len(session.GameBoard) }, func(i int) int { return i + 1 }, rowIndex) { hasChanged = true } //Shifting run //The previously combined 4,0,2,0 now becomes 4,2,0,0 for cellIndex := 1; cellIndex < len(session.GameBoard); cellIndex++ { cell := session.GameBoard[rowIndex][cellIndex] if cell == 0 { continue } moveTo := -1 for tempCellIndex := cellIndex - 1; tempCellIndex >= 0; tempCellIndex-- { if session.GameBoard[rowIndex][tempCellIndex] == 0 { moveTo = tempCellIndex } else { break } } if moveTo != -1 { session.GameBoard[rowIndex][moveTo] = cell session.GameBoard[rowIndex][cellIndex] = 0 hasChanged = true } } } return hasChanged } func (session *GameSession) Right() { if session.rightNoFill() { session.fillCell() session.update() } } func (session *GameSession) rightNoFill() bool { if session.GameOver { return false } var hasChanged bool for rowIndex := 0; rowIndex < len(session.GameBoard); rowIndex++ { //Combination run //We combine from top to bottom, since that's how the original game //does it. So 2,2,2,0 would become 4,0,2,0 if session.combineHorizontally( len(session.GameBoard)-1, func(i int) bool { return i >= 0 }, func(i int) int { return i - 1 }, rowIndex) { hasChanged = true } //Shifting run //The previously combined 4,0,2,0 now becomes 4,2,0,0 for cellIndex := len(session.GameBoard) - 2; cellIndex >= 0; cellIndex-- { cell := session.GameBoard[rowIndex][cellIndex] if cell == 0 { continue } moveTo := -1 for tempCellIndex := cellIndex + 1; tempCellIndex < len(session.GameBoard); tempCellIndex++ { if session.GameBoard[rowIndex][tempCellIndex] == 0 { moveTo = tempCellIndex } else { break } } if moveTo != -1 { session.GameBoard[rowIndex][moveTo] = cell session.GameBoard[rowIndex][cellIndex] = 0 hasChanged = true } } } return hasChanged } func (session *GameSession) combineHorizontally(start int, resume func(int) bool, update func(int) int, rowIndex int) bool { var hasChanged bool indexLastNonZero := -1 for cellIndex := start; resume(cellIndex); cellIndex = update(cellIndex) { cell := session.GameBoard[rowIndex][cellIndex] if cell == 0 { continue } if indexLastNonZero == -1 || cell != session.GameBoard[rowIndex][indexLastNonZero] { indexLastNonZero = cellIndex continue } session.GameBoard[rowIndex][indexLastNonZero] = cell * 2 session.GameBoard[rowIndex][cellIndex] = 0 indexLastNonZero = -1 hasChanged = true } return hasChanged } func (session *GameSession) Score() uint { return session.score }
state/state.go
0.537284
0.415758
state.go
starcoder
package main import ( "fmt" "math" ) // Circle description type Circle struct { radius float64 } // Rectangle description type Rectangle struct { width float64 height float64 } // Triangle description type Triangle struct { a float64 b float64 c float64 } // Cylinder description type Cylinder struct { radius float64 height float64 } // Circle area func areaCircle(radius float64) float64 { return math.Pi * math.Pow(radius, 2) } // Circle circumference func circCircle(radius float64) float64 { return 2 * math.Pi * radius } // Rectangle area func areaRectangle(width, height float64) float64 { return width * height } // Rectangle perimeter func perimRectangle(width, height float64) float64 { return (width + height) * 2 } // Triangle area func areaTriangle(a, b, c float64) float64 { // Heron's Formula to get area from 3 sides s := ((a + b + c) / 2) return math.Sqrt(s * (s - a) * (s - a) * (s - a)) } // Triangle perimeter func perimTriangle(a, b, c float64) float64 { return a + b + c } // Cylinder volume func volCylinder(radius, height float64) float64 { return math.Pi * math.Pow(radius, 2) * height } // Cylinder surface area func surfaceCylinder(radius, height float64) float64 { return (2 * math.Pi * radius * height) + (2 * math.Pi * math.Pow(radius, 2)) } func main() { // Declare and assign circle1 := Circle{5} rectangle1 := Rectangle{5, 3} triangle1 := Triangle{4, 5, 6} cylinder1 := Cylinder{5, 3} // Get shape properties areaCircle1 := areaCircle(circle1.radius) circCircle1 := circCircle(circle1.radius) areaRectangle1 := areaRectangle(rectangle1.width, rectangle1.height) perimRectangle1 := perimRectangle(rectangle1.width, rectangle1.height) areaTriangle1 := areaTriangle(triangle1.a, triangle1.b, triangle1.c) perimTriangle1 := perimTriangle(triangle1.a, triangle1.b, triangle1.c) volumeCylinder1 := volCylinder(cylinder1.radius, cylinder1.height) surfaceCylinder1 := surfaceCylinder(cylinder1.radius, cylinder1.height) fmt.Println(circle1.radius, areaCircle1, circCircle1) fmt.Println(rectangle1.width, rectangle1.height, areaRectangle1, perimRectangle1) fmt.Println(triangle1.a, triangle1.b, triangle1.c, areaTriangle1, perimTriangle1) fmt.Println(cylinder1.radius, cylinder1.height, volumeCylinder1, surfaceCylinder1) fmt.Printf("Circle1 (radius %.2f) area is %10.3f, circumference is %10.3f\n", circle1.radius, areaCircle1, circCircle1) fmt.Printf("Rectangle1 (width %.2f, height %.2f) area is %10.3f, perimeter is %10.3f\n", rectangle1.width, rectangle1.height, areaRectangle1, perimRectangle1) fmt.Printf("Triangle1 (a %.2f, b %.2f, c %.2f) area is %10.3f, perimeter is %10.3f\n", triangle1.a, triangle1.b, triangle1.c, areaTriangle1, perimTriangle1) fmt.Printf("Cylinder1 (radius %.2f, height %.2f) vol is %10.3f, surface area is %10.3f\n", cylinder1.radius, cylinder1.height, volumeCylinder1, surfaceCylinder1) }
software/development/languages/go-cheat-sheet/src/function-method-interface-package-example/function/function.go
0.805364
0.541106
function.go
starcoder
package main /* This project uses a BME280 temperature/humidity sensor and an SSD1306 OLED display (128x64 pixels). Both use the I2C bus, so the only required pins for using both components are the SDA and SCL pins. A button is used to switch the displayed temerature units between Celcius and Farenheit. The code also demonstrates the use of the TaskScheduler library to implement the logic as concurrent tasks, rather than the usual loops and delays. The TinyFont module allows flexible usage of multple fonts and font sizes. This code was tested on the following boards: - Wemos D1 Mini (ESP8266 board) - Arduino Uno - Adafruit Trinket M0 - Arduino Nano 33 IoT See https://github.com/toyo/tinyfont-ssd1306 for a sample of using the TinyFont library with the I2C-based SSD1306 OLED display, since the TinyFont samples seem to be geared towards a specific display. Since Go routines support is needed, flash with the `--scheduler tasks` parameter. E.g.: tinygo flash --target arduino-nano33 --scheduler tasks */ import ( "fmt" "image/color" "machine" "time" "tinygo.org/x/drivers/bme280" "tinygo.org/x/drivers/ssd1306" "tinygo.org/x/tinyfont" "tinygo.org/x/tinyfont/freesans" ) var ( button = machine.PB10 led = machine.LED showCelcius bool displayText string symbolPos uint32 textWidth uint32 temp int32 humidity int32 display ssd1306.Device sensor bme280.Device black = color.RGBA{1, 1, 1, 255} ) // ............................................................................ func main() { setup() loop() } // ............................................................................ func setup() { machine.I2C0.Configure(machine.I2CConfig{}) sensor = bme280.New(machine.I2C0) sensor.Configure() display = ssd1306.NewI2C(machine.I2C0) display.Configure(ssd1306.Config{ Width: 128, Height: 64, Address: ssd1306.Address_128_32, }) display.ClearDisplay() led.Configure(machine.PinConfig{Mode: machine.PinOutput}) for !sensor.Connected() { led.High() time.Sleep(250 * time.Millisecond) led.Low() time.Sleep(1_000 * time.Millisecond) } button.Configure(machine.PinConfig{Mode: machine.PinInput}) // The button press handler is run concurrently in a go routine! go handleButton() } // ............................................................................ func loop() { for { temp, _ = sensor.ReadTemperature() humidity, _ = sensor.ReadHumidity() displayReadings() time.Sleep(500 * time.Millisecond) } } // ............................................................................ func handleButton() { for { if button.Get() { showCelcius = !showCelcius led.High() time.Sleep(500 * time.Millisecond) } if showCelcius { led.High() } else { led.Low() } time.Sleep(50 * time.Millisecond) } } // ............................................................................ func displayReadings() { if showCelcius { temp /= 1_000 } else { temp = int32(float32(temp)*1.8)/1_000 + 32 } display.ClearBuffer() displayText = fmt.Sprintf("%d", temp) tinyfont.WriteLine(&display, &freesans.Bold18pt7b, 30, 26, displayText, black) _, textWidth := tinyfont.LineWidth(&freesans.Bold18pt7b, displayText) if showCelcius { tinyfont.WriteLine(&display, &freesans.Bold12pt7b, int16(textWidth)+35, 26, "C", black) } else { tinyfont.WriteLine(&display, &freesans.Bold12pt7b, int16(textWidth)+35, 26, "F", black) } displayText = fmt.Sprintf("%d%% H", humidity/100) _, textWidth = tinyfont.LineWidth(&freesans.Regular9pt7b, displayText) tinyfont.WriteLine(&display, &freesans.Regular9pt7b, 64-(int16(textWidth)/2), 50, displayText, black) display.Display() }
projects/temperature-with-display/tinygo/main.go
0.632957
0.559471
main.go
starcoder
package astar import ( "container/heap" "errors" ) // ErrNotFound means that the final state cannot be reached from the given start state. var ErrNotFound = errors.New("final state is not reachable") // Interface describes a type suitable for A* search. Any type can do as long as // it can change its current state and tell legal moves from it. // Knowing costs and estimates helps, but not necessary. type Interface interface { // Initial state. Start() interface{} // Is this state final? Finish() bool // Move to a new state. Move(interface{}) // Available moves from the current state. Successors() []interface{} // Path cost between the current and the given state. Cost(interface{}) float64 // Heuristic estimate of “how far to go?” between the given // and the final state. Smaller values mean closer. Estimate(interface{}) float64 } type state struct { state interface{} cost, estimate float64 index int } type states []*state func (pq states) Len() int { return len(pq) } func (pq states) Empty() bool { return len(pq) == 0 } func (pq states) Less(i, j int) bool { return pq[i].cost+pq[i].estimate < pq[j].cost+pq[j].estimate } func (pq states) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] // Index is maintained for heap.Fix(). pq[i].index = i pq[j].index = j } func (pq *states) Push(x interface{}) { n := len(*pq) item := x.(*state) item.index = n *pq = append(*pq, item) } func (pq *states) Pop() interface{} { old := *pq n := len(old) x := old[n-1] *pq = old[0 : n-1] return x } // Search finds the p.Finish() state from the given p.Start() state by // invoking p.Successors() and p.Move() at each step. Search returns two slices: // 1) the shortest path to the final state, and 2) a sequence of explored states. // If the shortest path cannot be found, ErrNotFound error is returned. func Search(p Interface) ([]interface{}, []interface{}, error) { // Priority queue of states on the frontier. // Initialized with the start state. pq := states{{state: p.Start(), estimate: p.Estimate(p.Start())}} heap.Init(&pq) // States currently on the frontier. queuedLinks := map[interface{}]*state{} // States explored so far. explored := map[interface{}]bool{} // State transitions from start to finish (to reconstruct // the shortest path at the end of the search). transitions := map[interface{}]interface{}{} // Sequence of states in the order they have been explored. steps := []interface{}{} p.Move(p.Start()) // Exhaust all successor states. for !pq.Empty() { // Pick a state with a minimum Cost() + Estimate() value. current := heap.Pop(&pq).(*state) delete(queuedLinks, current.state) explored[current.state] = true // Move to the new state. p.Move(current.state) steps = append(steps, current.state) // If the state is final, terminate. if p.Finish() { // Reconstruct the path from finish to start. return func() []interface{} { path := []interface{}{current.state} for { if _, ok := transitions[current.state]; !ok { break } current.state = transitions[current.state] // Reverse. path = append([]interface{}{current.state}, path...) } return path }(), steps, nil } for _, succ := range p.Successors() { // Don't re-explore. if explored[succ] { continue } // Path cost so far. cost := current.cost + p.Cost(succ) // Add a successor to the frontier. if queuedState, ok := queuedLinks[succ]; ok { // If the successor is already on the frontier, // update its path cost. if cost < queuedState.cost { queuedState.cost = cost heap.Fix(&pq, queuedState.index) transitions[succ] = current.state } } else { state := state{ state: succ, cost: cost, estimate: p.Estimate(succ), } heap.Push(&pq, &state) queuedLinks[succ] = &state transitions[succ] = current.state } } } return nil, steps, ErrNotFound }
astar.go
0.613584
0.505249
astar.go
starcoder
package encryption import ( "crypto/hmac" "crypto/sha512" "storj.io/common/storj" ) const ( // AESGCMNonceSize is the size of an AES-GCM nonce AESGCMNonceSize = 12 // unit32Size is the number of bytes in the uint32 type uint32Size = 4 ) // AESGCMNonce represents the nonce used by the AES-GCM protocol type AESGCMNonce [AESGCMNonceSize]byte // ToAESGCMNonce returns the nonce as a AES-GCM nonce func ToAESGCMNonce(nonce *storj.Nonce) *AESGCMNonce { aes := new(AESGCMNonce) copy((*aes)[:], nonce[:AESGCMNonceSize]) return aes } // Increment increments the nonce with the given amount func Increment(nonce *storj.Nonce, amount int64) (truncated bool, err error) { return incrementBytes(nonce[:], amount) } // Encrypt encrypts data with the given cipher, key and nonce func Encrypt(data []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) { // Don't encrypt empty slice if len(data) == 0 { return []byte{}, nil } switch cipher { case storj.EncNull: return data, nil case storj.EncAESGCM: return EncryptAESGCM(data, key, ToAESGCMNonce(nonce)) case storj.EncSecretBox: return EncryptSecretBox(data, key, nonce) case storj.EncNullBase64URL: return nil, ErrInvalidConfig.New("base64 encoding not supported for this operation") default: return nil, ErrInvalidConfig.New("encryption type %d is not supported", cipher) } } // Decrypt decrypts cipherData with the given cipher, key and nonce func Decrypt(cipherData []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) { // Don't decrypt empty slice if len(cipherData) == 0 { return []byte{}, nil } switch cipher { case storj.EncNull: return cipherData, nil case storj.EncAESGCM: return DecryptAESGCM(cipherData, key, ToAESGCMNonce(nonce)) case storj.EncSecretBox: return DecryptSecretBox(cipherData, key, nonce) case storj.EncNullBase64URL: return nil, ErrInvalidConfig.New("base64 encoding not supported for this operation") default: return nil, ErrInvalidConfig.New("encryption type %d is not supported", cipher) } } // NewEncrypter creates a Transformer using the given cipher, key and nonce to encrypt data passing through it func NewEncrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { switch cipher { case storj.EncNull: return &NoopTransformer{}, nil case storj.EncAESGCM: return NewAESGCMEncrypter(key, ToAESGCMNonce(startingNonce), encryptedBlockSize) case storj.EncSecretBox: return NewSecretboxEncrypter(key, startingNonce, encryptedBlockSize) case storj.EncNullBase64URL: return nil, ErrInvalidConfig.New("base64 encoding not supported for this operation") default: return nil, ErrInvalidConfig.New("encryption type %d is not supported", cipher) } } // NewDecrypter creates a Transformer using the given cipher, key and nonce to decrypt data passing through it func NewDecrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { switch cipher { case storj.EncNull: return &NoopTransformer{}, nil case storj.EncAESGCM: return NewAESGCMDecrypter(key, ToAESGCMNonce(startingNonce), encryptedBlockSize) case storj.EncSecretBox: return NewSecretboxDecrypter(key, startingNonce, encryptedBlockSize) case storj.EncNullBase64URL: return nil, ErrInvalidConfig.New("base64 encoding not supported for this operation") default: return nil, ErrInvalidConfig.New("encryption type %d is not supported", cipher) } } // EncryptKey encrypts keyToEncrypt with the given cipher, key and nonce func EncryptKey(keyToEncrypt *storj.Key, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (storj.EncryptedPrivateKey, error) { return Encrypt(keyToEncrypt[:], cipher, key, nonce) } // DecryptKey decrypts keyToDecrypt with the given cipher, key and nonce func DecryptKey(keyToDecrypt storj.EncryptedPrivateKey, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (*storj.Key, error) { plainData, err := Decrypt(keyToDecrypt, cipher, key, nonce) if err != nil { return nil, err } var decryptedKey storj.Key copy(decryptedKey[:], plainData) return &decryptedKey, nil } // DeriveKey derives new key from the given key and message using HMAC-SHA512 func DeriveKey(key *storj.Key, message string) (*storj.Key, error) { mac := hmac.New(sha512.New, key[:]) _, err := mac.Write([]byte(message)) if err != nil { return nil, Error.Wrap(err) } derived := new(storj.Key) copy(derived[:], mac.Sum(nil)) return derived, nil } // CalcEncryptedSize calculates what would be the size of the cipher data after // encrypting data with dataSize using a Transformer with the given encryption // parameters. func CalcEncryptedSize(dataSize int64, parameters storj.EncryptionParameters) (int64, error) { transformer, err := NewEncrypter(parameters.CipherSuite, new(storj.Key), new(storj.Nonce), int(parameters.BlockSize)) if err != nil { return 0, err } inBlockSize := int64(transformer.InBlockSize()) blocks := (dataSize + uint32Size + inBlockSize - 1) / inBlockSize encryptedSize := blocks * int64(transformer.OutBlockSize()) return encryptedSize, nil }
vendor/storj.io/common/encryption/encryption.go
0.776708
0.403861
encryption.go
starcoder
package extraction import ( "github.com/alevinval/fingerprints/internal/matrix" "github.com/alevinval/fingerprints/internal/types" ) // Minutia retrieves fingerprint features from a skeletonized image. Each // feature angle is obtained from the filtered directional image. Features // outside the fingerprint itself are removed by checking against the // segmented image, that tells us what is fingerprint from background. func Minutia(skeleton *matrix.M, filteredDirectional *matrix.M, segmented *matrix.M) types.MinutiaeList { minutiaes := types.MinutiaeList{} bounds := skeleton.Bounds() for y := bounds.Min.Y + 1; y < bounds.Max.Y-1; y++ { for x := bounds.Min.X + 1; x < bounds.Max.X-1; x++ { if segmented.At(x, y) == 0 { continue } minutiaeType := matchMinutiaeType(skeleton, x, y) if minutiaeType != types.Unknown { minutiae := types.Minutiae{ X: x, Y: y, Angle: filteredDirectional.At(x, y), Type: minutiaeType, } minutiaes = append(minutiaes, minutiae) } } } return minutiaes } func matchMinutiaeType(in *matrix.M, i, j int) types.MinutiaeType { p0 := in.At(i-1, j-1) > 0 p1 := in.At(i, j-1) > 0 p2 := in.At(i+1, j-1) > 0 p3 := in.At(i+1, j) > 0 p4 := in.At(i+1, j+1) > 0 p5 := in.At(i, j+1) > 0 p6 := in.At(i-1, j+1) > 0 p7 := in.At(i-1, j) > 0 pc := in.At(i, j) > 0 and := func(f0, f1, f2, f7, fc, f3, f6, f5, f4 bool) bool { return (pc == fc) && (p0 == f0) && (p1 == f1) && (p2 == f2) && (p3 == f3) && (p4 == f4) && (p5 == f5) && (p6 == f6) && (p7 == f7) } isPore := and(o, x, o, x, o, x, o, x, o) if isPore { return types.Pore } isBifurcation := ( // Diagonals and(x, o, x, o, x, o, o, o, x) || and(x, o, x, o, x, o, o, x, o) || and(x, o, x, o, x, o, x, o, o) || and(x, o, o, o, x, x, x, o, o) || and(x, o, o, o, x, o, x, o, x) || and(o, x, o, o, x, o, x, o, x) || and(o, o, x, o, x, o, x, o, x) || and(o, o, x, x, x, o, o, o, x) || and(x, o, x, o, x, o, o, o, x) || // Orthogonals and(o, o, o, x, x, x, o, x, o) || and(o, x, o, o, x, x, o, x, o) || and(o, x, o, x, x, x, o, o, o) || and(o, x, o, x, x, o, o, x, o) || and(x, o, o, o, x, x, o, x, o) || and(o, x, o, x, x, o, o, o, x) || and(o, o, x, x, x, o, o, x, o) || and(o, x, o, o, x, x, x, o, o)) if isBifurcation { return types.Bifurcation } isTermination := ( // Terminations and(x, o, o, o, x, o, o, o, o) || and(o, x, o, o, x, o, o, o, o) || and(o, o, x, o, x, o, o, o, o) || and(o, o, o, o, x, x, o, o, o) || and(o, o, o, o, x, o, o, o, x) || and(o, o, o, o, x, o, o, x, o) || and(o, o, o, o, x, o, x, o, o) || and(o, o, o, x, x, o, o, o, o)) if isTermination { return types.Termination } return types.Unknown } const ( x = true o = false )
internal/extraction/minutia.go
0.735926
0.490175
minutia.go
starcoder
package pars import ( "fmt" "github.com/go-ascii/ascii" ) // Parser is the function signature of a parser. type Parser func(state *State, result *Result) error // Map is the function signature for a result mapper. type Map func(result *Result) error // Map applies the callback if the parser matches. func (p Parser) Map(f Map) Parser { return func(state *State, result *Result) error { state.Push() if err := p(state, result); err != nil { state.Pop() return err } state.Drop() return f(result) } } // Child will map to the i'th child of the result. func (p Parser) Child(i int) Parser { return p.Map(Child(i)) } // Children will keep the children associated to the given indices. func (p Parser) Children(indices ...int) Parser { return p.Map(Children(indices...)) } // ToString will convert the Token field to a string Value. func (p Parser) ToString() Parser { return p.Map(ToString) } // Bind will bind the given value as the parser result value. func (p Parser) Bind(v interface{}) Parser { return func(state *State, result *Result) error { if err := p(state, result); err != nil { return err } result.SetValue(v) return nil } } // Error will modify the Parser to return the given error if the Parser returns // an error. func (p Parser) Error(alt error) Parser { return func(state *State, result *Result) error { if err := p(state, result); err != nil { return BoundError{alt, state.Position()} } return nil } } // Parse the given state using the parser and return the Result. func (p Parser) Parse(s *State) (Result, error) { r := Result{} err := p(s, &r) return r, err } // AsParser attempts to create a Parser for a given argument. func AsParser(q interface{}) Parser { switch p := q.(type) { case Parser: return p case func(*State, *Result) error: return p case *Parser: return func(state *State, result *Result) error { return (*p)(state, result) } case byte: return Byte(p) case []byte: return Bytes(p) case rune: return Rune(p) case []rune: return Runes(p) case string: return String(p) case ascii.Filter: return Filter(p) default: panic(fmt.Errorf("cannot convert type `%T` to a parser", p)) } } // AsParsers applies the AsParser function to each argument. func AsParsers(qs ...interface{}) []Parser { ps := make([]Parser, len(qs)) for i, q := range qs { ps[i] = AsParser(q) } return ps }
parser.go
0.698432
0.430985
parser.go
starcoder
package block import ( "github.com/df-mc/dragonfly/dragonfly/block/model" "github.com/df-mc/dragonfly/dragonfly/block/wood" "github.com/df-mc/dragonfly/dragonfly/item" "github.com/df-mc/dragonfly/dragonfly/world" "github.com/df-mc/dragonfly/dragonfly/world/sound" "github.com/go-gl/mathgl/mgl64" ) // Door is a block that can be used as an openable 1x2 barrier. type Door struct { noNBT transparent // Wood is the type of wood of the door. This field must have one of the values found in the material // package. Wood wood.Wood // Facing is the direction the door is facing. Facing world.Direction // Open is whether or not the door is open. Open bool // Top is whether the block is the top or bottom half of a door Top bool // Right is whether the door hinge is on the right side Right bool } // Model ... func (d Door) Model() world.BlockModel { return model.Door{Facing: d.Facing, Open: d.Open, Right: d.Right} } // NeighbourUpdateTick ... func (d Door) NeighbourUpdateTick(pos, changedNeighbour world.BlockPos, w *world.World) { if d.Top { if _, ok := w.Block(pos.Side(world.FaceDown)).(Door); !ok { w.BreakBlock(pos) } } else { if solid := w.Block(pos.Side(world.FaceDown)).Model().FaceSolid(pos.Side(world.FaceDown), world.FaceUp, w); !solid { w.BreakBlock(pos) } else if _, ok := w.Block(pos.Side(world.FaceUp)).(Door); !ok { w.BreakBlock(pos) } } } // UseOnBlock handles the directional placing of doors func (d Door) UseOnBlock(pos world.BlockPos, face world.Face, clickPos mgl64.Vec3, w *world.World, user item.User, ctx *item.UseContext) bool { pos, face, used := firstReplaceable(w, pos, face, d) if !used { return false } if face != world.FaceUp { return false } if solid := w.Block(pos.Side(world.FaceDown)).Model().FaceSolid(pos.Side(world.FaceDown), world.FaceUp, w); !solid { return false } if _, ok := w.Block(pos.Side(world.FaceUp)).(Air); !ok { return false } d.Facing = user.Facing() left := w.Block(pos.Side(d.Facing.Rotate90().Opposite().Face())) right := w.Block(pos.Side(d.Facing.Rotate90().Face())) if door, ok := left.(Door); ok { if door.Wood == d.Wood { d.Right = true } } // The side the door hinge is on can be affected by the blocks to the left and right of the door. In particular, // opaque blocks on the right side of the door with transparent blocks on the left side result in a right sided // door hinge. if diffuser, ok := right.(LightDiffuser); !ok || diffuser.LightDiffusionLevel() != 0 { if diffuser, ok := left.(LightDiffuser); ok && diffuser.LightDiffusionLevel() == 0 { d.Right = true } } place(w, pos, d, user, ctx) place(w, pos.Side(world.FaceUp), Door{Wood: d.Wood, Facing: d.Facing, Top: true, Right: d.Right}, user, ctx) return placed(ctx) } // Activate ... func (d Door) Activate(pos world.BlockPos, clickedFace world.Face, w *world.World, u item.User) { d.Open = !d.Open w.PlaceBlock(pos, d) otherPos := pos.Side(world.Face(boolByte(!d.Top))) other := w.Block(otherPos) if door, ok := other.(Door); ok { door.Open = d.Open w.PlaceBlock(otherPos, door) } w.PlaySound(pos.Vec3Centre(), sound.Door{}) } // BreakInfo ... func (d Door) BreakInfo() BreakInfo { return BreakInfo{ Hardness: 3, Harvestable: alwaysHarvestable, Effective: axeEffective, Drops: simpleDrops(item.NewStack(d, 1)), } } // CanDisplace ... func (d Door) CanDisplace(l world.Liquid) bool { _, water := l.(Water) return water } // SideClosed ... func (d Door) SideClosed(pos, side world.BlockPos, w *world.World) bool { return false } // EncodeItem ... func (d Door) EncodeItem() (id int32, meta int16) { switch d.Wood { case wood.Oak(): return 324, 0 case wood.Spruce(): return 427, 0 case wood.Birch(): return 428, 0 case wood.Jungle(): return 429, 0 case wood.Acacia(): return 430, 0 case wood.DarkOak(): return 431, 0 } panic("invalid wood type") } // EncodeBlock ... func (d Door) EncodeBlock() (name string, properties map[string]interface{}) { direction := 3 switch d.Facing { case world.South: direction = 1 case world.West: direction = 2 case world.East: direction = 0 } switch d.Wood { case wood.Oak(): return "minecraft:wooden_door", map[string]interface{}{"direction": int32(direction), "door_hinge_bit": d.Right, "open_bit": d.Open, "upper_block_bit": d.Top} case wood.Spruce(): return "minecraft:spruce_door", map[string]interface{}{"direction": int32(direction), "door_hinge_bit": d.Right, "open_bit": d.Open, "upper_block_bit": d.Top} case wood.Birch(): return "minecraft:birch_door", map[string]interface{}{"direction": int32(direction), "door_hinge_bit": d.Right, "open_bit": d.Open, "upper_block_bit": d.Top} case wood.Jungle(): return "minecraft:jungle_door", map[string]interface{}{"direction": int32(direction), "door_hinge_bit": d.Right, "open_bit": d.Open, "upper_block_bit": d.Top} case wood.Acacia(): return "minecraft:acacia_door", map[string]interface{}{"direction": int32(direction), "door_hinge_bit": d.Right, "open_bit": d.Open, "upper_block_bit": d.Top} case wood.DarkOak(): return "minecraft:dark_oak_door", map[string]interface{}{"direction": int32(direction), "door_hinge_bit": d.Right, "open_bit": d.Open, "upper_block_bit": d.Top} } panic("invalid wood type") } // Hash ... func (d Door) Hash() uint64 { return hashDoor | (uint64(d.Facing) << 32) | (uint64(boolByte(d.Right)) << 35) | (uint64(boolByte(d.Open)) << 36) | (uint64(boolByte(d.Top)) << 37) | (uint64(d.Wood.Uint8()) << 38) } // allDoors returns a list of all door types func allDoors() (doors []world.Block) { for _, w := range []wood.Wood{ wood.Oak(), wood.Spruce(), wood.Birch(), wood.Jungle(), wood.Acacia(), wood.DarkOak(), } { for i := world.Direction(0); i <= 3; i++ { doors = append(doors, Door{Wood: w, Facing: i, Open: false, Top: false, Right: false}) doors = append(doors, Door{Wood: w, Facing: i, Open: false, Top: true, Right: false}) doors = append(doors, Door{Wood: w, Facing: i, Open: true, Top: true, Right: false}) doors = append(doors, Door{Wood: w, Facing: i, Open: true, Top: false, Right: false}) doors = append(doors, Door{Wood: w, Facing: i, Open: false, Top: false, Right: true}) doors = append(doors, Door{Wood: w, Facing: i, Open: false, Top: true, Right: true}) doors = append(doors, Door{Wood: w, Facing: i, Open: true, Top: true, Right: true}) doors = append(doors, Door{Wood: w, Facing: i, Open: true, Top: false, Right: true}) } } return }
dragonfly/block/door.go
0.629888
0.47792
door.go
starcoder
package yql import ( "math" "sort" "strconv" ) const ( opEqual = "=" opNotEqual = "!=" opLarger = ">" opLargerEqual = ">=" opLess = "<" opLessEqual = "<=" opInter = "∩" opNotInter = "!∩" opIn = "in" opNotIn = "!in" ) const ( epsilon = float64(1e-10) ) func cmpInt(actual, expect int64, op string) bool { switch op { case opEqual: return actual == expect case opNotEqual: return actual != expect case opLarger: return actual > expect case opLargerEqual: return actual >= expect case opLess: return actual < expect case opLessEqual: return actual <= expect default: return false } } func cmpFloat(actual, expect float64, op string) bool { switch op { case opEqual: return floatEqual(actual, expect) case opNotEqual: return !floatEqual(actual, expect) case opLarger: return actual > expect case opLargerEqual: return !(actual < expect) case opLess: return actual < expect case opLessEqual: return floatLessEqual(actual, expect) default: return false } } func cmpStr(actual, expect string, op string) bool { switch op { case opEqual: return actual == expect case opNotEqual: return actual != expect case opLarger: return actual > expect case opLargerEqual: return actual >= expect case opLess: return actual < expect case opLessEqual: return actual <= expect default: return false } } func cmpBool(actual, expect bool, op string) bool { switch op { case opEqual: return actual == expect case opNotEqual: return actual != expect default: return false } } func compareSet(actual interface{}, expect []string, op string) bool { switch op { case opEqual, opNotEqual, opInter, opNotInter, opIn, opNotIn: default: return false } switch actualArr := actual.(type) { case int: return cmpIntSet([]int64{int64(actualArr)}, expect, op) case int64: return cmpIntSet([]int64{actualArr}, expect, op) case float64: return cmpFloatSet([]float64{actualArr}, expect, op) case string: return cmpStringSet([]string{actualArr}, expect, op) case []int: return cmpIntSet(intArr2i64Arr(actualArr), expect, op) case []int64: return cmpIntSet(actualArr, expect, op) case []float64: return cmpFloatSet(actualArr, expect, op) case []string: return cmpStringSet(actualArr, expect, op) default: return false } } func intArr2i64Arr(arr []int) []int64 { length := len(arr) if length == 0 { return nil } res := make([]int64, 0, length) for _, v := range arr { res = append(res, int64(v)) } return res } var intSetCmpFunc = map[string]func([]int64, []int64) bool{ opInter: intSetInter, opNotInter: intSetNotInter, opIn: intSetBelong, opNotIn: intSetNotBelong, } func cmpIntSet(actualVals []int64, expectVals []string, op string) bool { expectArr := make([]int64, 0, len(expectVals)) for _, expect := range expectVals { v, err := strconv.ParseInt(removeQuote(expect), 10, 64) if nil != err { return false } expectArr = append(expectArr, v) } cmp, ok := intSetCmpFunc[op] if ok { return cmp(actualVals, expectArr) } return false } var floatSetCmpFunc = map[string]func([]float64, []float64) bool{ opInter: floatSetInter, opNotInter: floatSetNotInter, opIn: floatSetBelong, opNotIn: floatSetNotBelong, } func cmpFloatSet(actualVals []float64, expectVals []string, op string) bool { expectArr := make([]float64, 0, len(expectVals)) for _, expect := range expectVals { v, err := strconv.ParseFloat(removeQuote(expect), 64) if nil != err { return false } expectArr = append(expectArr, v) } cmp, ok := floatSetCmpFunc[op] if ok { return cmp(actualVals, expectArr) } return false } var stringSetCmpFunc = map[string]func([]string, []string) bool{ opInter: strSetInter, opNotInter: strSetNotInter, opIn: strSetBelong, opNotIn: strSetNotBelong, } func cmpStringSet(actual []string, expect []string, op string) bool { cmp, ok := stringSetCmpFunc[op] if !ok { return false } expectVals := make([]string, 0, len(expect)) for _, v := range expect { expectVals = append(expectVals, removeQuote(v)) } return cmp(actual, expectVals) } func intSetBelong(actualVals []int64, expectVals []int64) bool { length := len(expectVals) if len(actualVals) == 0 || len(actualVals) > length { return false } expectArr := i64Arr(expectVals) sort.Sort(expectArr) for _, actual := range actualVals { t := sort.Search(length, func(i int) bool { return actual <= expectArr[i] }) if t >= length || actual != expectArr[t] { return false } } return true } func intSetNotBelong(actualVals []int64, expectVals []int64) bool { return !intSetBelong(actualVals, expectVals) } func intSetInter(actualVals []int64, expectVals []int64) bool { length := len(expectVals) if len(actualVals) == 0 || length == 0 { return false } expectArr := i64Arr(expectVals) sort.Sort(expectArr) for _, actual := range actualVals { t := sort.Search(length, func(i int) bool { return actual <= expectArr[i] }) if t < length && actual == expectArr[t] { return true } } return false } func intSetNotInter(actualVals []int64, expectVals []int64) bool { return !intSetInter(actualVals, expectVals) } type i64Arr []int64 func (arr i64Arr) Len() int { return len(arr) } func (arr i64Arr) Less(i, j int) bool { return arr[i] < arr[j] } func (arr i64Arr) Swap(i, j int) { arr[i], arr[j] = arr[j], arr[i] } func floatSetBelong(actualVals []float64, expectVals []float64) bool { length := len(expectVals) if len(actualVals) == 0 || len(actualVals) > length { return false } sort.Float64s(expectVals) for _, actual := range actualVals { t := sort.Search(length, func(i int) bool { return floatLessEqual(actual, expectVals[i]) }) if t >= length || !floatEqual(actual, expectVals[t]) { return false } } return true } func floatSetNotBelong(actualVals []float64, expectVals []float64) bool { return !floatSetBelong(actualVals, expectVals) } func floatSetInter(actualVals []float64, expectVals []float64) bool { length := len(expectVals) if len(actualVals) == 0 || length == 0 { return false } sort.Float64s(expectVals) for _, actual := range actualVals { t := sort.Search(length, func(i int) bool { return floatLessEqual(actual, expectVals[i]) }) if t < length && floatEqual(actual, expectVals[t]) { return true } } return false } func floatSetNotInter(actualVals []float64, expectVals []float64) bool { return !floatSetInter(actualVals, expectVals) } func strSetBelong(actualVals []string, expectVals []string) bool { length := len(expectVals) if len(actualVals) == 0 || len(actualVals) > length { return false } sort.Strings(expectVals) for _, actual := range actualVals { t := sort.SearchStrings(expectVals, actual) if t >= length || actual != expectVals[t] { return false } } return true } func strSetNotBelong(actualVals []string, expectVals []string) bool { return !strSetBelong(actualVals, expectVals) } func strSetInter(actualVals []string, expectVals []string) bool { length := len(expectVals) if len(actualVals) == 0 || length == 0 { return false } sort.Strings(expectVals) for _, actual := range actualVals { t := sort.SearchStrings(expectVals, actual) if t < length && actual == expectVals[t] { return true } } return false } func strSetNotInter(actualVals []string, expectVals []string) bool { return !strSetInter(actualVals, expectVals) } func floatEqual(a, b float64) bool { return math.Abs(a-b) < epsilon } func floatLessEqual(a, b float64) bool { return a < b || floatEqual(a, b) }
plugins/data/parser/ql/yql/cmp.go
0.582966
0.696436
cmp.go
starcoder
package props import ( "github.com/johnfercher/maroto/pkg/color" "github.com/johnfercher/maroto/pkg/consts" ) // Proportion represents a proportion from a rectangle, example: 16x9, 4x3... type Proportion struct { // Width from the rectangle: Barcode, image and etc Width float64 // Height from the rectangle: Barcode, image and etc Height float64 } // Barcode represents properties from a barcode inside a cell type Barcode struct { // Left is the space between the left cell boundary to the barcode, if center is false Left float64 // Top is space between the upper cell limit to the barcode, if center is false Top float64 // Percent is how much the barcode will occupy the cell, // ex 100%: The barcode will fulfill the entire cell // ex 50%: The greater side from the barcode will have half the size of the cell Percent float64 // Proportion is the proportion between size of the barcode // Ex: 16x9, 4x3... Proportion Proportion // Center define that the barcode will be vertically and horizontally centralized Center bool } // Rect represents properties from a rectangle (Image, QrCode or Barcode) inside a cell type Rect struct { // Left is the space between the left cell boundary to the rectangle, if center is false Left float64 // Top is space between the upper cell limit to the barcode, if center is false Top float64 // Percent is how much the rectangle will occupy the cell, // ex 100%: The rectangle will fulfill the entire cell // ex 50%: The greater side from the rectangle will have half the size of the cell Percent float64 // Center define that the barcode will be vertically and horizontally centralized Center bool } // Text represents properties from a Text inside a cell type Text struct { // Top is space between the upper cell limit to the barcode, if align is not center Top float64 // Family of the text, ex: consts.Arial, helvetica and etc Family string // Style of the text, ex: consts.Normal, bold and etc Style consts.Style // Size of the text Size float64 // Align of the text Align consts.Align // Extrapolate define if the text will automatically add a new line when // text reach the right cell boundary Extrapolate bool // VerticalPadding define an additional space between lines VerticalPadding float64 // Color define the font color Color color.Color } // Font represents properties from a text type Font struct { // Family of the text, ex: consts.Arial, helvetica and etc Family string // Style of the text, ex: consts.Normal, bold and etc Style consts.Style // Size of the text Size float64 // Color define the font color Color color.Color } // TableListContent represents properties from a line (header/content) from a TableList type TableListContent struct { // Family of the text, ex: consts.Arial, helvetica and etc Family string // Style of the text, ex: consts.Normal, bold and etc Style consts.Style // Size of the text Size float64 // GridSizes is the custom properties of the size of the grid // the sum of the values cannot be greater than 12, if this // value is not provided the width of all columns will be the // same GridSizes []uint } // TableList represents properties from a TableList type TableList struct { // HeaderProp is the custom properties of the text inside // the headers HeaderProp TableListContent // ContentProp is the custom properties of the text inside // the contents ContentProp TableListContent // Align is the align of the text (header and content) inside the columns Align consts.Align // AlternatedBackground define the background color from even rows // i.e rows with index (0, 2, 4, ..., N) will have background colorized, // rows with index (1, 3, 5, ..., N) will not AlternatedBackground *color.Color // HeaderContentSpace is the space between the header and the contents HeaderContentSpace float64 // Line adds a line after every content-row to separate rows. The line's spaceHeight is set to 1.0 Line bool } // MakeValid from Rect will make the properties from a rectangle reliable to fit inside a cell // and define default values for a rectangle func (s *Rect) MakeValid() { if s.Percent <= 0.0 || s.Percent > 100.0 { s.Percent = 100.0 } if s.Center { s.Left = 0 s.Top = 0 } if s.Left < 0.0 { s.Left = 0.0 } if s.Top < 0.0 { s.Top = 0 } } // MakeValid from Barcode will make the properties from a barcode reliable to fit inside a cell // and define default values for a barcode func (s *Barcode) MakeValid() { if s.Percent <= 0.0 || s.Percent > 100.0 { s.Percent = 100.0 } if s.Center { s.Left = 0 s.Top = 0 } if s.Left < 0.0 { s.Left = 0.0 } if s.Top < 0.0 { s.Top = 0 } if s.Proportion.Width <= 0 { s.Proportion.Width = 1 } if s.Proportion.Height <= 0 { s.Proportion.Height = 1 } if s.Proportion.Height > s.Proportion.Width*0.20 { s.Proportion.Height = s.Proportion.Width * 0.20 } else if s.Proportion.Height < s.Proportion.Width*0.10 { s.Proportion.Height = s.Proportion.Width * 0.10 } } // MakeValid from Text define default values for a Text func (s *Text) MakeValid(defaultFamily string) { if s.Family == "" { s.Family = defaultFamily } if s.Style == "" { s.Style = consts.Normal } if s.Align == "" { s.Align = consts.Left } if s.Size == 0.0 { s.Size = 10.0 } if s.Top < 0.0 { s.Top = 0.0 } if s.VerticalPadding < 0 { s.VerticalPadding = 0 } } // MakeValid from Font define default values for a Signature func (s *Font) MakeValid(defaultFamily string) { if s.Family == "" { s.Family = defaultFamily } if s.Style == "" { s.Style = consts.Bold } if s.Size == 0.0 { s.Size = 8.0 } } // ToTextProp from Font return a Text based on Font func (s *Font) ToTextProp(align consts.Align, top float64, extrapolate bool, verticalPadding float64) Text { textProp := Text{ Family: s.Family, Style: s.Style, Size: s.Size, Align: align, Top: top, Extrapolate: extrapolate, VerticalPadding: verticalPadding, Color: s.Color, } textProp.MakeValid(s.Family) return textProp } // ToTextProp from Font return a TableListContent based on Font func (s *TableListContent) ToTextProp(align consts.Align, top float64, extrapolate bool, verticalPadding float64) Text { textProp := Text{ Family: s.Family, Style: s.Style, Size: s.Size, Align: align, Top: top, Extrapolate: extrapolate, VerticalPadding: verticalPadding, } textProp.MakeValid(s.Family) return textProp } // MakeValid from TableList define default values for a TableList func (s *TableList) MakeValid(header []string, defaultFamily string) { if s.HeaderProp.Size == 0.0 { s.HeaderProp.Size = 10.0 } if s.HeaderProp.Family == "" { s.HeaderProp.Family = defaultFamily } if s.HeaderProp.Style == "" { s.HeaderProp.Style = consts.Bold } if len(s.HeaderProp.GridSizes) == 0 { gridSize := uint(12.0 / len(header)) s.HeaderProp.GridSizes = []uint{} for range header { s.HeaderProp.GridSizes = append(s.HeaderProp.GridSizes, gridSize) } } if s.Align == "" { s.Align = consts.Left } if s.ContentProp.Size == 0.0 { s.ContentProp.Size = 10.0 } if s.ContentProp.Family == "" { s.ContentProp.Family = defaultFamily } if s.ContentProp.Style == "" { s.ContentProp.Style = consts.Normal } if len(s.ContentProp.GridSizes) == 0 { gridSize := uint(12.0 / len(header)) s.ContentProp.GridSizes = []uint{} for range header { s.ContentProp.GridSizes = append(s.ContentProp.GridSizes, gridSize) } } if s.HeaderContentSpace == 0.0 { s.HeaderContentSpace = 4.0 } }
pkg/props/prop.go
0.741674
0.633269
prop.go
starcoder
package schema const ModelSchema = `{ "$id": "docs/spec/errors/error.json", "type": "object", "description": "An error or a logged error message captured by an agent occurring in a monitored service", "allOf": [ { "$id": "doc/spec/timestamp_epoch.json", "title": "Timestamp Epoch", "description": "Object with 'timestamp' property.", "type": ["object"], "properties": { "timestamp": { "description": "Recorded time of the event, UTC based and formatted as microseconds since Unix epoch", "type": ["integer", "null"] } } }, { "properties": { "id": { "type": ["string"], "description": "Hex encoded 128 random bits ID of the error.", "maxLength": 1024 }, "trace_id": { "description": "Hex encoded 128 random bits ID of the correlated trace. Must be present if transaction_id and parent_id are set.", "type": ["string", "null"], "maxLength": 1024 }, "transaction_id": { "type": ["string", "null"], "description": "Hex encoded 64 random bits ID of the correlated transaction. Must be present if trace_id and parent_id are set.", "maxLength": 1024 }, "parent_id": { "description": "Hex encoded 64 random bits ID of the parent transaction or span. Must be present if trace_id and transaction_id are set.", "type": ["string", "null"], "maxLength": 1024 }, "transaction": { "type": ["object", "null"], "description": "Data for correlating errors with transactions", "properties": { "sampled": { "type": ["boolean", "null"], "description": "Transactions that are 'sampled' will include all available information. Transactions that are not sampled will not have 'spans' or 'context'. Defaults to true." }, "type": { "type": ["string", "null"], "description": "Keyword of specific relevance in the service's domain (eg: 'request', 'backgroundjob', etc)", "maxLength": 1024 } } }, "context": { "$id": "doc/spec/context.json", "title": "Context", "description": "Any arbitrary contextual information regarding the event, captured by the agent, optionally provided by the user", "type": ["object", "null"], "properties": { "custom": { "description": "An arbitrary mapping of additional metadata to store with the event.", "type": ["object", "null"], "patternProperties": { "^[^.*\"]*$": {} }, "additionalProperties": false }, "response": { "type": ["object", "null"], "properties": { "finished": { "description": "A boolean indicating whether the response was finished or not", "type": ["boolean", "null"] }, "headers": { "description": "A mapping of HTTP headers of the response object", "type": ["object", "null"], "patternProperties": { "[.*]*$": { "type": ["string", "array", "null"], "items": { "type": ["string"] } } } }, "headers_sent": { "type": ["boolean", "null"] }, "status_code": { "description": "The HTTP status code of the response.", "type": ["integer", "null"] } } }, "request": { "$id": "docs/spec/http.json", "title": "Request", "description": "If a log record was generated as a result of a http request, the http interface can be used to collect this information.", "type": ["object", "null"], "properties": { "body": { "description": "Data should only contain the request body (not the query string). It can either be a dictionary (for standard HTTP requests) or a raw request body.", "type": ["object", "string", "null"] }, "env": { "description": "The env variable is a compounded of environment information passed from the webserver.", "type": ["object", "null"], "properties": {} }, "headers": { "description": "Should include any headers sent by the requester. Cookies will be taken by headers if supplied.", "type": ["object", "null"], "patternProperties": { "[.*]*$": { "type": ["string", "array", "null"], "items": { "type": ["string"] } } } }, "http_version": { "description": "HTTP version.", "type": ["string", "null"], "maxLength": 1024 }, "method": { "description": "HTTP method.", "type": "string", "maxLength": 1024 }, "socket": { "type": ["object", "null"], "properties": { "encrypted": { "description": "Indicates whether request was sent as SSL/HTTPS request.", "type": ["boolean", "null"] }, "remote_address": { "description": "The network address sending the request. Should be obtained through standard APIs and not parsed from any headers like 'Forwarded'.", "type": ["string", "null"] } } }, "url": { "description": "A complete Url, with scheme, host and path.", "type": "object", "properties": { "raw": { "type": ["string", "null"], "description": "The raw, unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.", "maxLength": 1024 }, "protocol": { "type": ["string", "null"], "description": "The protocol of the request, e.g. 'https:'.", "maxLength": 1024 }, "full": { "type": ["string", "null"], "description": "The full, possibly agent-assembled URL of the request, e.g https://example.com:443/search?q=elasticsearch#top.", "maxLength": 1024 }, "hostname": { "type": ["string", "null"], "description": "The hostname of the request, e.g. 'example.com'.", "maxLength": 1024 }, "port": { "type": ["string", "integer","null"], "description": "The port of the request, e.g. '443'", "maxLength": 1024 }, "pathname": { "type": ["string", "null"], "description": "The path of the request, e.g. '/search'", "maxLength": 1024 }, "search": { "description": "The search describes the query string of the request. It is expected to have values delimited by ampersands.", "type": ["string", "null"], "maxLength": 1024 }, "hash": { "type": ["string", "null"], "description": "The hash of the request URL, e.g. 'top'", "maxLength": 1024 } } }, "cookies": { "description": "A parsed key-value object of cookies", "type": ["object", "null"] } }, "required": ["url", "method"] }, "tags": { "$id": "doc/spec/tags.json", "title": "Tags", "type": ["object", "null"], "description": "A flat mapping of user-defined tags with string, boolean or number values.", "patternProperties": { "^[^.*\"]*$": { "type": ["string", "boolean", "number", "null"], "maxLength": 1024 } }, "additionalProperties": false }, "user": { "description": "Describes the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.", "$id": "docs/spec/user.json", "title": "User", "type": ["object", "null"], "properties": { "id": { "description": "Identifier of the logged in user, e.g. the primary key of the user", "type": ["string", "integer", "null"], "maxLength": 1024 }, "email": { "description": "Email of the logged in user", "type": ["string", "null"], "maxLength": 1024 }, "username": { "description": "The username of the logged in user", "type": ["string", "null"], "maxLength": 1024 } } }, "page": { "description": "", "type": ["object", "null"], "properties": { "referer": { "description": "RUM specific field that stores the URL of the page that 'linked' to the current page.", "type": ["string", "null"] }, "url": { "description": "RUM specific field that stores the URL of the current page", "type": ["string", "null"] } } }, "service": { "description": "Service related information can be sent per event. Provided information will override the more generic information from metadata, non provided fields will be set according to the metadata information.", "$id": "doc/spec/service.json", "title": "Service", "type": ["object", "null"], "properties": { "agent": { "description": "Name and version of the Elastic APM agent", "type": ["object", "null"], "properties": { "name": { "description": "Name of the Elastic APM agent, e.g. \"Python\"", "type": ["string", "null"], "maxLength": 1024 }, "version": { "description": "Version of the Elastic APM agent, e.g.\"1.0.0\"", "type": ["string", "null"], "maxLength": 1024 }, "ephemeral_id": { "description": "Free format ID used for metrics correlation by some agents", "type": ["string", "null"], "maxLength": 1024 } } }, "framework": { "description": "Name and version of the web framework used", "type": ["object", "null"], "properties": { "name": { "type": ["string", "null"], "maxLength": 1024 }, "version": { "type": ["string", "null"], "maxLength": 1024 } } }, "language": { "description": "Name and version of the programming language used", "type": ["object", "null"], "properties": { "name": { "type": ["string", "null"], "maxLength": 1024 }, "version": { "type": ["string", "null"], "maxLength": 1024 } } }, "name": { "description": "Immutable name of the service emitting this event", "type": ["string", "null"], "pattern": "^[a-zA-Z0-9 _-]+$", "maxLength": 1024 }, "environment": { "description": "Environment name of the service, e.g. \"production\" or \"staging\"", "type": ["string", "null"], "maxLength": 1024 }, "runtime": { "description": "Name and version of the language runtime running this service", "type": ["object", "null"], "properties": { "name": { "type": ["string", "null"], "maxLength": 1024 }, "version": { "type": ["string", "null"], "maxLength": 1024 } } }, "version": { "description": "Version of the service emitting this event", "type": ["string", "null"], "maxLength": 1024 }, "node": { "description": "Unique meaningful name of the service node.", "type": ["object", "null"], "properties": { "configured_name": { "type": ["string", "null"], "maxLength": 1024 } } } } }, "message": { "$id": "doc/spec/message.json", "title": "Message", "description": "Details related to message receiving and publishing if the captured event integrates with a messaging system", "type": ["object", "null"], "properties": { "queue": { "type": ["object", "null"], "properties": { "name": { "description": "Name of the message queue where the message is received.", "type": ["string","null"], "maxLength": 1024 } } }, "age": { "type": ["object", "null"], "properties": { "ms": { "description": "The age of the message in milliseconds. If the instrumented messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.", "type": ["integer", "null"] } } }, "body": { "description": "messsage body, similar to an http request body", "type": ["string", "null"] }, "headers": { "description": "messsage headers, similar to http request headers", "type": ["object", "null"], "patternProperties": { "[.*]*$": { "type": ["string", "array", "null"], "items": { "type": ["string"] } } } } } } } }, "culprit": { "description": "Function call which was the primary perpetrator of this event.", "type": ["string", "null"], "maxLength": 1024 }, "exception": { "description": "Information about the originally thrown error.", "type": ["object", "null"], "properties": { "code": { "type": ["string", "integer", "null"], "maxLength": 1024, "description": "The error code set when the error happened, e.g. database error code." }, "message": { "description": "The original error message.", "type": ["string", "null"] }, "module": { "description": "Describes the exception type's module namespace.", "type": ["string", "null"], "maxLength": 1024 }, "attributes": { "type": ["object", "null"] }, "stacktrace": { "type": ["array", "null"], "items": { "$id": "docs/spec/stacktrace_frame.json", "title": "Stacktrace", "type": "object", "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", "properties": { "abs_path": { "description": "The absolute path of the file involved in the stack frame", "type": ["string", "null"] }, "colno": { "description": "Column number", "type": ["integer", "null"] }, "context_line": { "description": "The line of code part of the stack frame", "type": ["string", "null"] }, "filename": { "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", "type": ["string", "null"] }, "classname": { "description": "The classname of the code involved in the stack frame", "type": ["string", "null"] }, "function": { "description": "The function involved in the stack frame", "type": ["string", "null"] }, "library_frame": { "description": "A boolean, indicating if this frame is from a library or user code", "type": ["boolean", "null"] }, "lineno": { "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", "type": ["integer", "null"] }, "module": { "description": "The module to which frame belongs to", "type": ["string", "null"] }, "post_context": { "description": "The lines of code after the stack frame", "type": ["array", "null"], "minItems": 0, "items": { "type": "string" } }, "pre_context": { "description": "The lines of code before the stack frame", "type": ["array", "null"], "minItems": 0, "items": { "type": "string" } }, "vars": { "description": "Local variables for this stack frame", "type": ["object", "null"], "properties": {} } }, "anyOf": [ { "required": ["filename"], "properties": {"filename": { "type": "string" }} }, { "required": ["classname"], "properties": {"classname": { "type": "string" }} } ] }, "minItems": 0 }, "type": { "type": ["string", "null"], "maxLength": 1024 }, "handled": { "type": ["boolean", "null"], "description": "Indicator whether the error was caught somewhere in the code or not." }, "cause": { "type": ["array", "null"], "items": { "type": ["object", "null"], "description": "Recursive exception object" }, "minItems": 0, "description": "Exception tree" } }, "anyOf": [ {"required": ["message"], "properties": {"message": {"type": "string"}}}, {"required": ["type"], "properties": {"type": {"type": "string"}}} ] }, "log": { "type": ["object", "null"], "description": "Additional information added when logging the error.", "properties": { "level": { "description": "The severity of the record.", "type": ["string", "null"], "maxLength": 1024 }, "logger_name": { "description": "The name of the logger instance used.", "type": ["string", "null"], "default": "default", "maxLength": 1024 }, "message": { "description": "The additionally logged error message.", "type": "string" }, "param_message": { "description": "A parametrized message. E.g. 'Could not connect to %s'. The property message is still required, and should be equal to the param_message, but with placeholders replaced. In some situations the param_message is used to group errors together. The string is not interpreted, so feel free to use whichever placeholders makes sense in the client languange.", "type": ["string", "null"], "maxLength": 1024 }, "stacktrace": { "type": ["array", "null"], "items": { "$id": "docs/spec/stacktrace_frame.json", "title": "Stacktrace", "type": "object", "description": "A stacktrace frame, contains various bits (most optional) describing the context of the frame", "properties": { "abs_path": { "description": "The absolute path of the file involved in the stack frame", "type": ["string", "null"] }, "colno": { "description": "Column number", "type": ["integer", "null"] }, "context_line": { "description": "The line of code part of the stack frame", "type": ["string", "null"] }, "filename": { "description": "The relative filename of the code involved in the stack frame, used e.g. to do error checksumming", "type": ["string", "null"] }, "classname": { "description": "The classname of the code involved in the stack frame", "type": ["string", "null"] }, "function": { "description": "The function involved in the stack frame", "type": ["string", "null"] }, "library_frame": { "description": "A boolean, indicating if this frame is from a library or user code", "type": ["boolean", "null"] }, "lineno": { "description": "The line number of code part of the stack frame, used e.g. to do error checksumming", "type": ["integer", "null"] }, "module": { "description": "The module to which frame belongs to", "type": ["string", "null"] }, "post_context": { "description": "The lines of code after the stack frame", "type": ["array", "null"], "minItems": 0, "items": { "type": "string" } }, "pre_context": { "description": "The lines of code before the stack frame", "type": ["array", "null"], "minItems": 0, "items": { "type": "string" } }, "vars": { "description": "Local variables for this stack frame", "type": ["object", "null"], "properties": {} } }, "anyOf": [ { "required": ["filename"], "properties": {"filename": { "type": "string" }} }, { "required": ["classname"], "properties": {"classname": { "type": "string" }} } ] }, "minItems": 0 } }, "required": ["message"] } }, "allOf": [ { "required": ["id"] }, { "if": {"required": ["transaction_id"], "properties": {"transaction_id": { "type": "string" }}}, "then": { "required": ["trace_id", "parent_id"], "properties": {"trace_id": { "type": "string" }, "parent_id": {"type": "string"}}}}, { "if": {"required": ["trace_id"], "properties": {"trace_id": { "type": "string" }}}, "then": { "required": ["parent_id"], "properties": {"parent_id": { "type": "string" }}} }, { "if": {"required": ["parent_id"], "properties": {"parent_id": { "type": "string" }}}, "then": { "required": ["trace_id"], "properties": {"trace_id": { "type": "string" }}} } ], "anyOf": [ { "required": ["exception"], "properties": {"exception": { "type": "object" }} }, { "required": ["log"], "properties": {"log": { "type": "object" }} } ] } ] } `
model/error/generated/schema/error.go
0.866175
0.464841
error.go
starcoder
package main import ( "github.com/gen2brain/raylib-go/raylib" ) func main() { screenWidth := int32(800) screenHeight := int32(450) rl.InitWindow(screenWidth, screenHeight, "raylib [models] example - box collisions") camera := rl.Camera{} camera.Position = rl.NewVector3(0.0, 10.0, 10.0) camera.Target = rl.NewVector3(0.0, 0.0, 0.0) camera.Up = rl.NewVector3(0.0, 1.0, 0.0) camera.Fovy = 45.0 camera.Projection = rl.CameraPerspective playerPosition := rl.NewVector3(0.0, 1.0, 2.0) playerSize := rl.NewVector3(1.0, 2.0, 1.0) playerColor := rl.Green enemyBoxPos := rl.NewVector3(-4.0, 1.0, 0.0) enemyBoxSize := rl.NewVector3(2.0, 2.0, 2.0) enemySpherePos := rl.NewVector3(4.0, 0.0, 0.0) enemySphereSize := float32(1.5) collision := false rl.SetTargetFPS(60) for !rl.WindowShouldClose() { // Update // Move player if rl.IsKeyDown(rl.KeyRight) { playerPosition.X += 0.2 } else if rl.IsKeyDown(rl.KeyLeft) { playerPosition.X -= 0.2 } else if rl.IsKeyDown(rl.KeyDown) { playerPosition.Z += 0.2 } else if rl.IsKeyDown(rl.KeyUp) { playerPosition.Z -= 0.2 } collision = false // Check collisions player vs enemy-box if rl.CheckCollisionBoxes( rl.NewBoundingBox( rl.NewVector3(playerPosition.X-playerSize.X/2, playerPosition.Y-playerSize.Y/2, playerPosition.Z-playerSize.Z/2), rl.NewVector3(playerPosition.X+playerSize.X/2, playerPosition.Y+playerSize.Y/2, playerPosition.Z+playerSize.Z/2)), rl.NewBoundingBox( rl.NewVector3(enemyBoxPos.X-enemyBoxSize.X/2, enemyBoxPos.Y-enemyBoxSize.Y/2, enemyBoxPos.Z-enemyBoxSize.Z/2), rl.NewVector3(enemyBoxPos.X+enemyBoxSize.X/2, enemyBoxPos.Y+enemyBoxSize.Y/2, enemyBoxPos.Z+enemyBoxSize.Z/2)), ) { collision = true } // Check collisions player vs enemy-sphere if rl.CheckCollisionBoxSphere( rl.NewBoundingBox( rl.NewVector3(playerPosition.X-playerSize.X/2, playerPosition.Y-playerSize.Y/2, playerPosition.Z-playerSize.Z/2), rl.NewVector3(playerPosition.X+playerSize.X/2, playerPosition.Y+playerSize.Y/2, playerPosition.Z+playerSize.Z/2)), enemySpherePos, enemySphereSize, ) { collision = true } if collision { playerColor = rl.Red } else { playerColor = rl.Green } // Draw rl.BeginDrawing() rl.ClearBackground(rl.RayWhite) rl.BeginMode3D(camera) // Draw enemy-box rl.DrawCube(enemyBoxPos, enemyBoxSize.X, enemyBoxSize.Y, enemyBoxSize.Z, rl.Gray) rl.DrawCubeWires(enemyBoxPos, enemyBoxSize.X, enemyBoxSize.Y, enemyBoxSize.Z, rl.DarkGray) // Draw enemy-sphere rl.DrawSphere(enemySpherePos, enemySphereSize, rl.Gray) rl.DrawSphereWires(enemySpherePos, enemySphereSize, 16, 16, rl.DarkGray) // Draw player rl.DrawCubeV(playerPosition, playerSize, playerColor) rl.DrawGrid(10, 1.0) // Draw a grid rl.EndMode3D() rl.DrawText("Move player with cursors to collide", 220, 40, 20, rl.Gray) rl.DrawFPS(10, 10) rl.EndDrawing() } rl.CloseWindow() }
examples/models/box_collisions/main.go
0.586404
0.441553
main.go
starcoder
package draw // A Point is an X, Y coordinate pair. type Point struct { X, Y int; } // ZP is the zero Point. var ZP Point // A Rectangle contains the Points with Min.X <= X < Max.X, Min.Y <= Y < Max.Y. type Rectangle struct { Min, Max Point; } // ZR is the zero Rectangle. var ZR Rectangle // Pt is shorthand for Point{X, Y}. func Pt(X, Y int) Point { return Point{X, Y} } // Rect is shorthand for Rectangle{Pt(x0, y0), Pt(x1, y1)}. func Rect(x0, y0, x1, y1 int) Rectangle { return Rectangle{Point{x0, y0}, Point{x1, y1}} } // Rpt is shorthand for Rectangle{min, max}. func Rpt(min, max Point) Rectangle { return Rectangle{min, max} } // Add returns the sum of p and q: Pt(p.X+q.X, p.Y+q.Y). func (p Point) Add(q Point) Point { return Point{p.X + q.X, p.Y + q.Y} } // Sub returns the difference of p and q: Pt(p.X-q.X, p.Y-q.Y). func (p Point) Sub(q Point) Point { return Point{p.X - q.X, p.Y - q.Y} } // Mul returns p scaled by k: Pt(p.X*k p.Y*k). func (p Point) Mul(k int) Point { return Point{p.X * k, p.Y * k} } // Div returns p divided by k: Pt(p.X/k, p.Y/k). func (p Point) Div(k int) Point { return Point{p.X / k, p.Y / k} } // Eq returns true if p and q are equal. func (p Point) Eq(q Point) bool { return p.X == q.X && p.Y == q.Y } // Inset returns the rectangle r inset by n: Rect(r.Min.X+n, r.Min.Y+n, r.Max.X-n, r.Max.Y-n). func (r Rectangle) Inset(n int) Rectangle { return Rectangle{Point{r.Min.X + n, r.Min.Y + n}, Point{r.Max.X - n, r.Max.Y - n}} } // Add returns the rectangle r translated by p: Rpt(r.Min.Add(p), r.Max.Add(p)). func (r Rectangle) Add(p Point) Rectangle { return Rectangle{r.Min.Add(p), r.Max.Add(p)} } // Sub returns the rectangle r translated by -p: Rpt(r.Min.Sub(p), r.Max.Sub(p)). func (r Rectangle) Sub(p Point) Rectangle { return Rectangle{r.Min.Sub(p), r.Max.Sub(p)} } // Canon returns a canonical version of r: the returned rectangle // has Min.X <= Max.X and Min.Y <= Max.Y. func (r Rectangle) Canon() Rectangle { if r.Max.X < r.Min.X { r.Max.X = r.Min.X } if r.Max.Y < r.Min.Y { r.Max.Y = r.Min.Y } return r; } // Overlaps returns true if r and r1 cross; that is, it returns true if they share any point. func (r Rectangle) Overlaps(r1 Rectangle) bool { return r.Min.X < r1.Max.X && r1.Min.X < r.Max.X && r.Min.Y < r1.Max.Y && r1.Min.Y < r.Max.Y } // Empty retruns true if r contains no points. func (r Rectangle) Empty() bool { return r.Max.X <= r.Min.X || r.Max.Y <= r.Min.Y } // InRect returns true if all the points in r are also in r1. func (r Rectangle) In(r1 Rectangle) bool { if r.Empty() { return true } if r1.Empty() { return false } return r1.Min.X <= r.Min.X && r.Max.X <= r1.Max.X && r1.Min.Y <= r.Min.Y && r.Max.Y <= r1.Max.Y; } // Combine returns the smallest rectangle containing all points from r and from r1. func (r Rectangle) Combine(r1 Rectangle) Rectangle { if r.Empty() { return r1 } if r1.Empty() { return r } if r.Min.X > r1.Min.X { r.Min.X = r1.Min.X } if r.Min.Y > r1.Min.Y { r.Min.Y = r1.Min.Y } if r.Max.X < r1.Max.X { r.Max.X = r1.Max.X } if r.Max.Y < r1.Max.Y { r.Max.Y = r1.Max.Y } return r; } // Clip returns the largest rectangle containing only points shared by r and r1. func (r Rectangle) Clip(r1 Rectangle) Rectangle { if r.Empty() { return r } if r1.Empty() { return r1 } if r.Min.X < r1.Min.X { r.Min.X = r1.Min.X } if r.Min.Y < r1.Min.Y { r.Min.Y = r1.Min.Y } if r.Max.X > r1.Max.X { r.Max.X = r1.Max.X } if r.Max.Y > r1.Max.Y { r.Max.Y = r1.Max.Y } return r; } // Dx returns the width of the rectangle r: r.Max.X - r.Min.X. func (r Rectangle) Dx() int { return r.Max.X - r.Min.X } // Dy returns the width of the rectangle r: r.Max.Y - r.Min.Y. func (r Rectangle) Dy() int { return r.Max.Y - r.Min.Y }
src/pkg/exp/draw/arith.go
0.928132
0.675725
arith.go
starcoder
package cell import ( "github.com/PetrusJPrinsloo/gameoflife/config" "github.com/PetrusJPrinsloo/gameoflife/graphics" "github.com/PetrusJPrinsloo/gameoflife/shape" "github.com/go-gl/gl/v4.1-core/gl" ) type Cell struct { Drawable uint32 Alive bool AliveNext bool X int Y int } func NewCell(x, y int, cnf *config.Config) *Cell { points := make([]float32, len(shape.Square), len(shape.Square)) copy(points, shape.Square) for i := 0; i < len(points); i++ { var position float32 var size float32 switch i % 3 { case 0: size = 1.0 / float32(cnf.Columns) position = float32(x) * size case 1: size = 1.0 / float32(cnf.Rows) position = float32(y) * size default: continue } if points[i] < 0 { points[i] = (position * 2) - 1 } else { points[i] = ((position + size) * 2) - 1 } } return &Cell{ Drawable: graphics.MakeVao(points), X: x, Y: y, } } func (c *Cell) CheckState(cells [][]*Cell) { c.Alive = c.AliveNext c.AliveNext = c.Alive liveCount := c.liveNeighbors(cells) if c.Alive { // 1. Any live Cell with fewer than two live neighbours dies, as if caused by underpopulation. if liveCount < 2 { c.AliveNext = false } // 2. Any live Cell with two or three live neighbours lives on to the next generation. if liveCount == 2 || liveCount == 3 { c.AliveNext = true } // 3. Any live Cell with more than three live neighbours dies, as if by overpopulation. if liveCount > 3 { c.AliveNext = false } } else { // 4. Any dead Cell with exactly three live neighbours becomes a live Cell, as if by reproduction. if liveCount == 3 { c.AliveNext = true } } } // liveNeighbors returns the number of live neighbors for a Cell. func (c *Cell) liveNeighbors(cells [][]*Cell) int { var liveCount int add := func(x, y int) { // If we're at an edge, check the other side of the board. if x == len(cells) { x = 0 } else if x == -1 { x = len(cells) - 1 } if y == len(cells[x]) { y = 0 } else if y == -1 { y = len(cells[x]) - 1 } if cells[x][y].Alive { liveCount++ } } add(c.X-1, c.Y) // To the left add(c.X+1, c.Y) // To the right add(c.X, c.Y+1) // up add(c.X, c.Y-1) // down add(c.X-1, c.Y+1) // top-left add(c.X+1, c.Y+1) // top-right add(c.X-1, c.Y-1) // bottom-left add(c.X+1, c.Y-1) // bottom-right return liveCount } func (c *Cell) Draw() { if !c.Alive { return } gl.BindVertexArray(c.Drawable) gl.DrawArrays(gl.TRIANGLES, 0, int32(len(shape.Square)/3)) }
cell/cell.go
0.602997
0.446615
cell.go
starcoder
package sio import "math/rand" // anchor represents relative position like beloW: // 7 8 9 // 4 5 6 // 1 2 3 // Rect is a simple rect type Rect struct { X, Y, W, H float64 anchor int } // NewRect returns a neW rect. // Position is Hidden, use Pos metHod. func NewRect(anchor int, X, Y, W, H float64) *Rect { var r Rect r.Set(anchor, X, Y, W, H) return &r } // Set sets tHe data func (r *Rect) Set(anchor int, X, Y, W, H float64) { r.anchor = anchor r.W, r.H = W, H switch anchor { case 7, 8, 9: r.Y = Y case 4, 5, 6: r.Y = Y - H/2 case 1, 2, 3: r.Y = Y - H } switch anchor { case 7, 4, 1: r.X = X case 8, 5, 2: r.X = X - W/2 case 9, 6, 3: r.X = X - W } } // Drive cHanges tHe anchor func (r *Rect) Drive(anchor int) *Rect { r.anchor = anchor return r } // Move sets position relativelY func (r *Rect) Move(X, Y float64) *Rect { r.Set(r.anchor, X, Y, r.W, r.H) return r } // Shift shifts the rect relatively. func (r *Rect) Shift(x, y float64) *Rect { r.X += x r.Y += y return r } // Pos returns relative position func (r *Rect) Pos(anchor int) (float64, float64) { var X, Y float64 switch anchor { case 7, 8, 9: Y = r.Y case 4, 5, 6: Y = r.Y + r.H/2 case 1, 2, 3: Y = r.Y + r.H } switch anchor { case 7, 4, 1: X = r.X case 8, 5, 2: X = r.X + r.W/2 case 9, 6, 3: X = r.X + r.W } return X, Y } func (r *Rect) CPos(anchor int) complex128 { x, y := r.Pos(anchor) return complex(x, y) } func (r *Rect) RandPos() (float64, float64) { return r.X + r.W*rand.Float64(), r.Y + r.H*rand.Float64() } // Clone clones tHe rect, able to set neW anchor func (r *Rect) Clone(oldAnchor, neWAnchor int) *Rect { X, Y := r.Pos(oldAnchor) return NewRect(neWAnchor, X, Y, r.W, r.H) } // Resize resizes tHe rect. func (r *Rect) Resize(diffX, diffY float64) *Rect { X, Y := r.Pos(r.anchor) r.W += diffX r.H += diffY r.Move(X, Y) return r } // Scale returns a neW scaled rect func (r *Rect) Scale(scaleX, scaleY float64) *Rect { X, Y := r.Pos(r.anchor) r.W *= scaleX r.H *= scaleY r.Move(X, Y) return r } func (r *Rect) SetSize(w, h float64) *Rect { X, Y := r.Pos(r.anchor) if w >= 0 { r.W = w } if h >= 0 { r.H = h } r.Move(X, Y) return r } // Contains reports it contains tHe point func (r *Rect) Contains(pos complex128) bool { return r.ContainsF(real(pos), imag(pos)) } // ContainsF reports it contains tHe point func (r *Rect) ContainsF(X, Y float64) bool { left, top := r.Pos(7) rigHt, bottom := r.Pos(3) return left <= X && X < rigHt && top <= Y && Y < bottom } // Wraps reports WHetHer tHe passed rect is Wrapped bY tHe rect. func (r *Rect) Wraps(rHs *Rect) bool { return r.ContainsF(rHs.Pos(7)) && r.ContainsF(rHs.Pos(3)) } // Intersects reports WHetHer tHe rects intersect. func (r *Rect) Intersects(rHs *Rect) bool { r1, t1 := r.Pos(7) l1, b1 := r.Pos(3) r2, t2 := rHs.Pos(7) l2, b2 := rHs.Pos(3) X := (r1 <= r2 && r2 <= l1) || (r1 <= l2 && l2 <= l1) Y := (t1 <= t2 && t2 <= b1) || (t1 <= b2 && b2 <= b1) return X && Y }
rect.go
0.854384
0.526282
rect.go
starcoder
package mocks import ( "errors" "time" "github.com/wardana/currency-exchange/models" ) //MockRateRepository is a mock type for the Interface type type MockRateRepository struct{} //Create provides a mock function with given fields func (r MockRateRepository) Create(params models.Rate) (models.Rate, error) { if params.CurrencyPairID == 1 { return models.Rate{}, errors.New("mock some err") } return models.Rate{}, nil } //Find provides a mock function with given fields func (r MockRateRepository) Find(params *models.Rate) ([]models.Rate, error) { if params.CurrencyPairID == 2 { return []models.Rate{models.Rate{ID: 2}, models.Rate{ID: 3}}, nil } if params.CurrencyPairID == 3 { return []models.Rate{models.Rate{ID: 2}}, nil } if params.CurrencyPairID == 4 { return []models.Rate{}, errors.New("mock some err") } if params.ID == 1 { return []models.Rate{models.Rate{ID: 1}}, nil } return []models.Rate{}, nil } //Update provides a mock function with given fields func (r MockRateRepository) Update(id int64, params models.Rate) (models.Rate, error) { if id == 3 { return models.Rate{}, errors.New("mock some err") } if params.ID == 3 { return models.Rate{}, errors.New("mock some err") } if id == 4 { return models.Rate{}, errors.New("mock some err") } return models.Rate{}, nil } //RemoveByPairID provides a mock function with given fields func (r MockRateRepository) RemoveByPairID(id int64) error { if id == 2 { return errors.New("id not found") } return nil } //TrendDataByCurrency provides a mock function with given fields func (r MockRateRepository) TrendDataByCurrency(base, counter string) ([]models.ExchangeData, error) { if base == "USD" && counter == "IDR" { return []models.ExchangeData{}, errors.New("mock some err ") } return []models.ExchangeData{}, nil } //ExchangeDataByDate provides a mock function with given fields func (r MockRateRepository) ExchangeDataByDate(date time.Time) ([]models.RatePayload, error) { dateWithoutTime, _ := time.Parse("2006-01-02", "2018-08-12") if dateWithoutTime == date { return []models.RatePayload{}, errors.New("mock some err") } return []models.RatePayload{}, nil }
repositories/mocks/rate.go
0.756268
0.410284
rate.go
starcoder
package packages import ( "github.com/montanaflynn/stats" ) func init() { Packages["stats"] = map[string]interface{}{ "ChebyshevDistance": stats.ChebyshevDistance, "Correlation": stats.Correlation, "Covariance": stats.Covariance, "CovariancePopulation": stats.CovariancePopulation, "EuclideanDistance": stats.EuclideanDistance, "GeometricMean": stats.GeometricMean, "HarmonicMean": stats.HarmonicMean, "InterQuartileRange": stats.InterQuartileRange, "ManhattanDistance": stats.ManhattanDistance, "Max": stats.Max, "Mean": stats.Mean, "Median": stats.Median, "MedianAbsoluteDeviation": stats.MedianAbsoluteDeviation, "MedianAbsoluteDeviationPopulation": stats.MedianAbsoluteDeviationPopulation, "Midhinge": stats.Midhinge, "Min": stats.Min, "MinkowskiDistance": stats.MinkowskiDistance, "Mode": stats.Mode, "Pearson": stats.Pearson, "Percentile": stats.Percentile, "PercentileNearestRank": stats.PercentileNearestRank, "PopulationVariance": stats.PopulationVariance, "Round": stats.Round, "Sample": stats.Sample, "SampleVariance": stats.SampleVariance, "StandardDeviation": stats.StandardDeviation, "StandardDeviationPopulation": stats.StandardDeviationPopulation, "StandardDeviationSample": stats.StandardDeviationSample, "StdDevP": stats.StdDevP, "StdDevS": stats.StdDevS, "Sum": stats.Sum, "Trimean": stats.Trimean, "VarP": stats.VarP, "VarS": stats.VarS, "Variance": stats.Variance, "Float64Data": Float64Data, "ToFloat64Data": ToFloat64Data, "LoadRawData": stats.LoadRawData, "QuartileOutliers": stats.QuartileOutliers, "Series": Series, "ToSeries": ToSeries, } PackageTypes["stats"] = map[string]interface{}{ "Coordinate": stats.Coordinate{}, "Outliers": stats.Outliers{}, "Quartiles": stats.Quartiles{}, } } func Float64Data() stats.Float64Data { var temp stats.Float64Data return temp } func ToFloat64Data(f []float64) stats.Float64Data { return stats.Float64Data(f) } func Series() stats.Series { var temp stats.Series return temp } func ToSeries(s []stats.Coordinate) stats.Series { return stats.Series(s) }
packages/stats.go
0.742888
0.596962
stats.go
starcoder
package ginkgo import ( "fmt" "reflect" "strings" "github.com/onsi/ginkgo/v2/internal" "github.com/onsi/ginkgo/v2/types" ) /* The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via: fmt.Sprintf(formatString, parameters...) where parameters are the parameters passed into the entry. When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions. You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions */ type EntryDescription string func (ed EntryDescription) render(args ...interface{}) string { return fmt.Sprintf(string(ed), args...) } /* DescribeTable describes a table-driven spec. For example: DescribeTable("a simple table", func(x int, y int, expected bool) { Ω(x > y).Should(Equal(expected)) }, Entry("x > y", 1, 0, true), Entry("x == y", 0, 0, false), Entry("x < y", 0, 1, false), ) You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ func DescribeTable(description string, args ...interface{}) bool { generateTable(description, args...) return true } /* You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. */ func FDescribeTable(description string, args ...interface{}) bool { args = append(args, internal.Focus) generateTable(description, args...) return true } /* You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. */ func PDescribeTable(description string, args ...interface{}) bool { args = append(args, internal.Pending) generateTable(description, args...) return true } /* You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. */ var XDescribeTable = PDescribeTable /* TableEntry represents an entry in a table test. You generally use the `Entry` constructor. */ type TableEntry struct { description interface{} decorations []interface{} parameters []interface{} codeLocation types.CodeLocation } /* Entry constructs a TableEntry. The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description. Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table. Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in. You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs */ func Entry(description interface{}, args ...interface{}) TableEntry { decorations, parameters := internal.PartitionDecorations(args...) return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} } /* You can focus a particular entry with FEntry. This is equivalent to FIt. */ func FEntry(description interface{}, args ...interface{}) TableEntry { decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Focus) return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} } /* You can mark a particular entry as pending with PEntry. This is equivalent to PIt. */ func PEntry(description interface{}, args ...interface{}) TableEntry { decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Pending) return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} } /* You can mark a particular entry as pending with XEntry. This is equivalent to XIt. */ var XEntry = PEntry func generateTable(description string, args ...interface{}) { cl := types.NewCodeLocation(2) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} var itBody interface{} var tableLevelEntryDescription interface{} tableLevelEntryDescription = func(args ...interface{}) string { out := []string{} for _, arg := range args { out = append(out, fmt.Sprint(arg)) } return "Entry: " + strings.Join(out, ", ") } for _, arg := range args { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(TableEntry{}): entries = append(entries, arg.(TableEntry)) case t == reflect.TypeOf([]TableEntry{}): entries = append(entries, arg.([]TableEntry)...) case t == reflect.TypeOf(EntryDescription("")): tableLevelEntryDescription = arg.(EntryDescription).render case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): tableLevelEntryDescription = arg case t.Kind() == reflect.Func: if itBody != nil { exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) } itBody = arg default: containerNodeArgs = append(containerNodeArgs, arg) } } containerNodeArgs = append(containerNodeArgs, func() { for _, entry := range entries { var err error entry := entry var description string switch t := reflect.TypeOf(entry.description); { case t == nil: err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation) if err == nil { description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String() } case t == reflect.TypeOf(EntryDescription("")): description = entry.description.(EntryDescription).render(entry.parameters...) case t == reflect.TypeOf(""): description = entry.description.(string) case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation) if err == nil { description = invokeFunction(entry.description, entry.parameters)[0].String() } default: err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } if err == nil { err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation) } itNodeArgs := []interface{}{entry.codeLocation} itNodeArgs = append(itNodeArgs, entry.decorations...) itNodeArgs = append(itNodeArgs, func() { if err != nil { panic(err) } invokeFunction(itBody, entry.parameters) }) pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) } }) pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) } func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value { inValues := make([]reflect.Value, len(parameters)) funcType := reflect.TypeOf(function) limit := funcType.NumIn() if funcType.IsVariadic() { limit = limit - 1 } for i := 0; i < limit && i < len(parameters); i++ { inValues[i] = computeValue(parameters[i], funcType.In(i)) } if funcType.IsVariadic() { variadicType := funcType.In(limit).Elem() for i := limit; i < len(parameters); i++ { inValues[i] = computeValue(parameters[i], variadicType) } } return reflect.ValueOf(function).Call(inValues) } func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation) error { funcType := reflect.TypeOf(function) limit := funcType.NumIn() if funcType.IsVariadic() { limit = limit - 1 } if len(parameters) < limit { return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl) } if len(parameters) > limit && !funcType.IsVariadic() { return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl) } var i = 0 for ; i < limit; i++ { actual := reflect.TypeOf(parameters[i]) expected := funcType.In(i) if !(actual == nil) && !actual.AssignableTo(expected) { return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl) } } if funcType.IsVariadic() { expected := funcType.In(limit).Elem() for ; i < len(parameters); i++ { actual := reflect.TypeOf(parameters[i]) if !(actual == nil) && !actual.AssignableTo(expected) { return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl) } } } return nil } func computeValue(parameter interface{}, t reflect.Type) reflect.Value { if parameter == nil { return reflect.Zero(t) } else { return reflect.ValueOf(parameter) } }
vendor/github.com/onsi/ginkgo/v2/table_dsl.go
0.721449
0.430327
table_dsl.go
starcoder
// +build appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can // be used on App Engine. package proto import ( "math" "reflect" ) // A structPointer is a pointer to a struct. type structPointer struct { v reflect.Value } // toStructPointer returns a structPointer equivalent to the given reflect value. // The reflect value must itself be a pointer to a struct. func toStructPointer(v reflect.Value) structPointer { return structPointer{v} } // IsNil reports whether p is nil. func structPointer_IsNil(p structPointer) bool { return p.v.IsNil() } // Interface returns the struct pointer as an interface value. func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { return p.v.Interface() } // A field identifies a field in a struct, accessible from a structPointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int // toField returns a field equivalent to the given reflect field. func toField(f *reflect.StructField) field { return f.Index } // invalidField is an invalid field identifier. var invalidField = field(nil) // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } // field returns the given field in the struct as a reflect value. func structPointer_field(p structPointer, f field) reflect.Value { // Special case: an extension map entry with a value of type T // passes a *T to the struct-handling code with a zero field, // expecting that it will be treated as equivalent to *struct{ X T }, // which has the same memory layout. We have to handle that case // specially, because reflect will panic if we call FieldByIndex on a // non-struct. if f == nil { return p.v.Elem() } return p.v.Elem().FieldByIndex(f) } // ifield returns the given field in the struct as an interface value. func structPointer_ifield(p structPointer, f field) interface{} { return structPointer_field(p, f).Addr().Interface() } // Bytes returns the address of a []byte field in the struct. func structPointer_Bytes(p structPointer, f field) *[]byte { return structPointer_ifield(p, f).(*[]byte) } // BytesSlice returns the address of a [][]byte field in the struct. func structPointer_BytesSlice(p structPointer, f field) *[][]byte { return structPointer_ifield(p, f).(*[][]byte) } // Bool returns the address of a *bool field in the struct. func structPointer_Bool(p structPointer, f field) **bool { return structPointer_ifield(p, f).(**bool) } // BoolVal returns the address of a bool field in the struct. func structPointer_BoolVal(p structPointer, f field) *bool { return structPointer_ifield(p, f).(*bool) } // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return structPointer_ifield(p, f).(*[]bool) } // String returns the address of a *string field in the struct. func structPointer_String(p structPointer, f field) **string { return structPointer_ifield(p, f).(**string) } // StringVal returns the address of a string field in the struct. func structPointer_StringVal(p structPointer, f field) *string { return structPointer_ifield(p, f).(*string) } // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return structPointer_ifield(p, f).(*[]string) } // Extensions returns the address of an extension map field in the struct. func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { return structPointer_ifield(p, f).(*XXX_InternalExtensions) } // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) } // NewAt returns the reflect.Value for a pointer to a field in the struct. func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { return structPointer_field(p, f).Addr() } // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { structPointer_field(p, f).Set(q.v) } // GetStructPointer reads a *struct field in the struct. func structPointer_GetStructPointer(p structPointer, f field) structPointer { return structPointer{structPointer_field(p, f)} } // StructPointerSlice the address of a []*struct field in the struct. func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { return structPointerSlice{structPointer_field(p, f)} } // A structPointerSlice represents the address of a slice of pointers to structs // (themselves messages or groups). That is, v.Type() is *[]*struct{...}. type structPointerSlice struct { v reflect.Value } func (p structPointerSlice) Len() int { return p.v.Len() } func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } func (p structPointerSlice) Append(q structPointer) { p.v.Set(reflect.Append(p.v, q.v)) } var ( int32Type = reflect.TypeOf(int32(0)) uint32Type = reflect.TypeOf(uint32(0)) float32Type = reflect.TypeOf(float32(0)) int64Type = reflect.TypeOf(int64(0)) uint64Type = reflect.TypeOf(uint64(0)) float64Type = reflect.TypeOf(float64(0)) ) // A word32 represents a field of type *int32, *uint32, *float32, or *enum. // That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. type word32 struct { v reflect.Value } // IsNil reports whether p is nil. func word32_IsNil(p word32) bool { return p.v.IsNil() } // Set sets p to point at a newly allocated word with bits set to x. func word32_Set(p word32, o *Buffer, x uint32) { t := p.v.Type().Elem() switch t { case int32Type: if len(o.int32s) == 0 { o.int32s = make([]int32, uint32PoolSize) } o.int32s[0] = int32(x) p.v.Set(reflect.ValueOf(&o.int32s[0])) o.int32s = o.int32s[1:] return case uint32Type: if len(o.uint32s) == 0 { o.uint32s = make([]uint32, uint32PoolSize) } o.uint32s[0] = x p.v.Set(reflect.ValueOf(&o.uint32s[0])) o.uint32s = o.uint32s[1:] return case float32Type: if len(o.float32s) == 0 { o.float32s = make([]float32, uint32PoolSize) } o.float32s[0] = math.Float32frombits(x) p.v.Set(reflect.ValueOf(&o.float32s[0])) o.float32s = o.float32s[1:] return } // must be enum p.v.Set(reflect.New(t)) p.v.Elem().SetInt(int64(int32(x))) } // Get gets the bits pointed at by p, as a uint32. func word32_Get(p word32) uint32 { elem := p.v.Elem() switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32(p structPointer, f field) word32 { return word32{structPointer_field(p, f)} } // A word32Val represents a field of type int32, uint32, float32, or enum. // That is, v.Type() is int32, uint32, float32, or enum and v is assignable. type word32Val struct { v reflect.Value } // Set sets *p to x. func word32Val_Set(p word32Val, x uint32) { switch p.v.Type() { case int32Type: p.v.SetInt(int64(x)) return case uint32Type: p.v.SetUint(uint64(x)) return case float32Type: p.v.SetFloat(float64(math.Float32frombits(x))) return } // must be enum p.v.SetInt(int64(int32(x))) } // Get gets the bits pointed at by p, as a uint32. func word32Val_Get(p word32Val) uint32 { elem := p.v switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. func structPointer_Word32Val(p structPointer, f field) word32Val { return word32Val{structPointer_field(p, f)} } // A word32Slice is a slice of 32-bit values. // That is, v.Type() is []int32, []uint32, []float32, or []enum. type word32Slice struct { v reflect.Value } func (p word32Slice) Append(x uint32) { n, m := p.v.Len(), p.v.Cap() if n < m { p.v.SetLen(n + 1) } else { t := p.v.Type().Elem() p.v.Set(reflect.Append(p.v, reflect.Zero(t))) } elem := p.v.Index(n) switch elem.Kind() { case reflect.Int32: elem.SetInt(int64(int32(x))) case reflect.Uint32: elem.SetUint(uint64(x)) case reflect.Float32: elem.SetFloat(float64(math.Float32frombits(x))) } } func (p word32Slice) Len() int { return p.v.Len() } func (p word32Slice) Index(i int) uint32 { elem := p.v.Index(i) switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. func structPointer_Word32Slice(p structPointer, f field) word32Slice { return word32Slice{structPointer_field(p, f)} } // word64 is like word32 but for 64-bit values. type word64 struct { v reflect.Value } func word64_Set(p word64, o *Buffer, x uint64) { t := p.v.Type().Elem() switch t { case int64Type: if len(o.int64s) == 0 { o.int64s = make([]int64, uint64PoolSize) } o.int64s[0] = int64(x) p.v.Set(reflect.ValueOf(&o.int64s[0])) o.int64s = o.int64s[1:] return case uint64Type: if len(o.uint64s) == 0 { o.uint64s = make([]uint64, uint64PoolSize) } o.uint64s[0] = x p.v.Set(reflect.ValueOf(&o.uint64s[0])) o.uint64s = o.uint64s[1:] return case float64Type: if len(o.float64s) == 0 { o.float64s = make([]float64, uint64PoolSize) } o.float64s[0] = math.Float64frombits(x) p.v.Set(reflect.ValueOf(&o.float64s[0])) o.float64s = o.float64s[1:] return } panic("unreachable") } func word64_IsNil(p word64) bool { return p.v.IsNil() } func word64_Get(p word64) uint64 { elem := p.v.Elem() switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return elem.Uint() case reflect.Float64: return math.Float64bits(elem.Float()) } panic("unreachable") } func structPointer_Word64(p structPointer, f field) word64 { return word64{structPointer_field(p, f)} } // word64Val is like word32Val but for 64-bit values. type word64Val struct { v reflect.Value } func word64Val_Set(p word64Val, o *Buffer, x uint64) { switch p.v.Type() { case int64Type: p.v.SetInt(int64(x)) return case uint64Type: p.v.SetUint(x) return case float64Type: p.v.SetFloat(math.Float64frombits(x)) return } panic("unreachable") } func word64Val_Get(p word64Val) uint64 { elem := p.v switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return elem.Uint() case reflect.Float64: return math.Float64bits(elem.Float()) } panic("unreachable") } func structPointer_Word64Val(p structPointer, f field) word64Val { return word64Val{structPointer_field(p, f)} } type word64Slice struct { v reflect.Value } func (p word64Slice) Append(x uint64) { n, m := p.v.Len(), p.v.Cap() if n < m { p.v.SetLen(n + 1) } else { t := p.v.Type().Elem() p.v.Set(reflect.Append(p.v, reflect.Zero(t))) } elem := p.v.Index(n) switch elem.Kind() { case reflect.Int64: elem.SetInt(int64(int64(x))) case reflect.Uint64: elem.SetUint(uint64(x)) case reflect.Float64: elem.SetFloat(float64(math.Float64frombits(x))) } } func (p word64Slice) Len() int { return p.v.Len() } func (p word64Slice) Index(i int) uint64 { elem := p.v.Index(i) switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return uint64(elem.Uint()) case reflect.Float64: return math.Float64bits(float64(elem.Float())) } panic("unreachable") } func structPointer_Word64Slice(p structPointer, f field) word64Slice { return word64Slice{structPointer_field(p, f)} }
vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
0.743075
0.524882
pointer_reflect.go
starcoder
package phomath import "math" const numMatrix4Values = 4 * 4 // NewMatrix4 creates a new four-dimensional matrix. Argument is optional Matrix4 to copy from. func NewMatrix4(from *Matrix4) *Matrix4 { m := &Matrix4{Values: [numMatrix4Values]float64{}} if from != nil { return m.Copy(from) } return m.Identity() } // Matrix4 is a four-dimensional matrix type Matrix4 struct { Values [numMatrix4Values]float64 } // Clone makes a clone of this Matrix4. func (m *Matrix4) Clone() *Matrix4 { return NewMatrix4(m) } // Copy the values of a given Matrix into this Matrix. func (m *Matrix4) Copy(other *Matrix4) *Matrix4 { if other == nil { return m.Zero() } a := other.Values return m.SetValues( a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15], ) } // Set is an alias for Matrix4.Copy func (m *Matrix4) Set(other *Matrix4) *Matrix4 { return m.Copy(other) } // SetValues sets the values of this Matrix4. func (m *Matrix4) SetValues(a, b, c, d, e, f, g, h, i, j, k, l, mm, n, o, p float64) *Matrix4 { v := m.Values v[0], v[1], v[2], v[3] = a, b, c, d v[4], v[5], v[6], v[7] = e, f, g, h v[8], v[9], v[10], v[11] = i, j, k, l v[12], v[13], v[14], v[15] = mm, n, o, p return m } // Identity resets this Matrix to an identity (default) matrix. func (m *Matrix4) Identity() *Matrix4 { return m.SetValues( 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, ) } // FromSlice sets the values of this Matrix from the given slice. func (m *Matrix4) FromSlice(s []float64) *Matrix4 { if s != nil { numVals := len(s) for idx := 0; idx < numVals && idx < numMatrix4Values; idx++ { m.Values[idx] = s[idx] } } return m } // Zero resets this matrix, setting all values to 0 func (m *Matrix4) Zero() *Matrix4 { return m.SetValues( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ) } // Transform generates a transform matrix based on the given position, scale and rotation. func (m *Matrix4) Transform(position, scale Vector3Like, rotation *Quaternion) *Matrix4 { rotMat := NewMatrix4(nil) rm := rotMat.Values sx, sy, sz := scale.XYZ() px, py, pz := position.XYZ() return m.SetValues( rm[0]*sx, rm[1]*sx, rm[2]*sx, 0, rm[4]*sy, rm[5]*sy, rm[6]*sy, 0, rm[8]*sz, rm[9]*sz, rm[10]*sz, 0, px, py, pz, 1, ) } // SetXYZ sets the x, y, z values of this matrix func (m *Matrix4) SetXYZ(x, y, z float64) *Matrix4 { m.Identity() m.Values[12] = x m.Values[13] = y m.Values[14] = z return m } // SetScaling sets the scaling values of this Matrix. func (m *Matrix4) SetScaling(x, y, z float64) *Matrix4 { m.Zero() m.Values[0] = x m.Values[5] = y m.Values[10] = z m.Values[15] = 1 return m } // Transpose this Matrix. func (m *Matrix4) Transpose() *Matrix4 { a := m.Values a01 := a[1] a02 := a[2] a03 := a[3] a12 := a[6] a13 := a[7] a23 := a[11] a[1] = a[4] a[2] = a[8] a[3] = a[12] a[4] = a01 a[6] = a[9] a[7] = a[13] a[8] = a02 a[9] = a12 a[11] = a[14] a[12] = a03 a[13] = a13 a[14] = a23 return m } // GetInverse copies the given Matrix4 into this Matrix and then inverses it. func (m *Matrix4) GetInverse(other *Matrix4) *Matrix4 { return m.Copy(other).Invert() } // Invert this Matrix. func (m *Matrix4) Invert() *Matrix4 { a := m.Values a00, a01, a02, a03 := a[0], a[1], a[2], a[3] a10, a11, a12, a13 := a[4], a[5], a[6], a[7] a20, a21, a22, a23 := a[8], a[9], a[10], a[11] a30, a31, a32, a33 := a[12], a[13], a[14], a[15] b00 := a00*a11 - a01*a10 b01 := a00*a12 - a02*a10 b02 := a00*a13 - a03*a10 b03 := a01*a12 - a02*a11 b04 := a01*a13 - a03*a11 b05 := a02*a13 - a03*a12 b06 := a20*a31 - a21*a30 b07 := a20*a32 - a22*a30 b08 := a20*a33 - a23*a30 b09 := a21*a32 - a22*a31 b10 := a21*a33 - a23*a31 b11 := a22*a33 - a23*a32 // calculate the determinant det := b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06 if det < Epsilon { return m } det = 1 / det return m.SetValues( (a11*b11-a12*b10+a13*b09)*det, (a02*b10-a01*b11-a03*b09)*det, (a31*b05-a32*b04+a33*b03)*det, (a22*b04-a21*b05-a23*b03)*det, (a12*b08-a10*b11-a13*b07)*det, (a00*b11-a02*b08+a03*b07)*det, (a32*b02-a30*b05-a33*b01)*det, (a20*b05-a22*b02+a23*b01)*det, (a10*b10-a11*b08+a13*b06)*det, (a01*b08-a00*b10-a03*b06)*det, (a30*b04-a31*b02+a33*b00)*det, (a21*b02-a20*b04-a23*b00)*det, (a11*b07-a10*b09-a12*b06)*det, (a00*b09-a01*b07+a02*b06)*det, (a31*b01-a30*b03-a32*b00)*det, (a20*b03-a21*b01+a22*b00)*det, ) } // Adjoint calculates the adjoint, or adjugate, of this Matrix. func (m *Matrix4) Adjoint() *Matrix4 { a := m.Values a00, a01, a02, a03 := a[0], a[1], a[2], a[3] a10, a11, a12, a13 := a[4], a[5], a[6], a[7] a20, a21, a22, a23 := a[8], a[9], a[10], a[11] a30, a31, a32, a33 := a[12], a[13], a[14], a[15] return m.SetValues( a11*(a22*a33-a23*a32)-a21*(a12*a33-a13*a32)+a31*(a12*a23-a13*a22), -(a01*(a22*a33-a23*a32) - a21*(a02*a33-a03*a32) + a31*(a02*a23-a03*a22)), a01*(a12*a33-a13*a32)-a11*(a02*a33-a03*a32)+a31*(a02*a13-a03*a12), -(a01*(a12*a23-a13*a22) - a11*(a02*a23-a03*a22) + a21*(a02*a13-a03*a12)), -(a10*(a22*a33-a23*a32) - a20*(a12*a33-a13*a32) + a30*(a12*a23-a13*a22)), a00*(a22*a33-a23*a32)-a20*(a02*a33-a03*a32)+a30*(a02*a23-a03*a22), -(a00*(a12*a33-a13*a32) - a10*(a02*a33-a03*a32) + a30*(a02*a13-a03*a12)), a00*(a12*a23-a13*a22)-a10*(a02*a23-a03*a22)+a20*(a02*a13-a03*a12), a10*(a21*a33-a23*a31)-a20*(a11*a33-a13*a31)+a30*(a11*a23-a13*a21), -(a00*(a21*a33-a23*a31) - a20*(a01*a33-a03*a31) + a30*(a01*a23-a03*a21)), a00*(a11*a33-a13*a31)-a10*(a01*a33-a03*a31)+a30*(a01*a13-a03*a11), -(a00*(a11*a23-a13*a21) - a10*(a01*a23-a03*a21) + a20*(a01*a13-a03*a11)), -(a10*(a21*a32-a22*a31) - a20*(a11*a32-a12*a31) + a30*(a11*a22-a12*a21)), a00*(a21*a32-a22*a31)-a20*(a01*a32-a02*a31)+a30*(a01*a22-a02*a21), -(a00*(a11*a32-a12*a31) - a10*(a01*a32-a02*a31) + a30*(a01*a12-a02*a11)), a00*(a11*a22-a12*a21)-a10*(a01*a22-a02*a21)+a20*(a01*a12-a02*a11), ) } // Determinant calculates the determinant of this Matrix. func (m *Matrix4) Determinant() float64 { a := m.Values a00, a01, a02, a03 := a[0], a[1], a[2], a[3] a10, a11, a12, a13 := a[4], a[5], a[6], a[7] a20, a21, a22, a23 := a[8], a[9], a[10], a[11] a30, a31, a32, a33 := a[12], a[13], a[14], a[15] b00 := a00*a11 - a01*a10 b01 := a00*a12 - a02*a10 b02 := a00*a13 - a03*a10 b03 := a01*a12 - a02*a11 b04 := a01*a13 - a03*a11 b05 := a02*a13 - a03*a12 b06 := a20*a31 - a21*a30 b07 := a20*a32 - a22*a30 b08 := a20*a33 - a23*a30 b09 := a21*a32 - a22*a31 b10 := a21*a33 - a23*a31 b11 := a22*a33 - a23*a32 return b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06 } // Multiply this Matrix4 by the given Matrix4. func (m *Matrix4) Multiply(other *Matrix4) *Matrix4 { a, b := m.Values, other.Values a00, a01, a02, a03 := a[0], a[1], a[2], a[3] a10, a11, a12, a13 := a[4], a[5], a[6], a[7] a20, a21, a22, a23 := a[8], a[9], a[10], a[11] a30, a31, a32, a33 := a[12], a[13], a[14], a[15] // Cache only the current line of the second matrix b0, b1, b2, b3 := b[0], b[1], b[2], b[3] a[0] = b0*a00 + b1*a10 + b2*a20 + b3*a30 a[1] = b0*a01 + b1*a11 + b2*a21 + b3*a31 a[2] = b0*a02 + b1*a12 + b2*a22 + b3*a32 a[3] = b0*a03 + b1*a13 + b2*a23 + b3*a33 b0, b1, b2, b3 = b[4], b[5], b[6], b[7] a[4] = b0*a00 + b1*a10 + b2*a20 + b3*a30 a[5] = b0*a01 + b1*a11 + b2*a21 + b3*a31 a[6] = b0*a02 + b1*a12 + b2*a22 + b3*a32 a[7] = b0*a03 + b1*a13 + b2*a23 + b3*a33 b0, b1, b2, b3 = b[8], b[9], b[10], b[11] a[8] = b0*a00 + b1*a10 + b2*a20 + b3*a30 a[9] = b0*a01 + b1*a11 + b2*a21 + b3*a31 a[10] = b0*a02 + b1*a12 + b2*a22 + b3*a32 a[11] = b0*a03 + b1*a13 + b2*a23 + b3*a33 b0, b1, b2, b3 = b[12], b[13], b[14], b[15] a[12] = b0*a00 + b1*a10 + b2*a20 + b3*a30 a[13] = b0*a01 + b1*a11 + b2*a21 + b3*a31 a[14] = b0*a02 + b1*a12 + b2*a22 + b3*a32 a[15] = b0*a03 + b1*a13 + b2*a23 + b3*a33 return m } // MultiplyLocal multiplies the values of this Matrix4 by those given in the `other` argument. func (m *Matrix4) MultiplyLocal(other *Matrix4) *Matrix4 { a, b := m.Values, other.Values return m.SetValues( a[0]*b[0]+a[1]*b[4]+a[2]*b[8]+a[3]*b[12], a[0]*b[1]+a[1]*b[5]+a[2]*b[9]+a[3]*b[13], a[0]*b[2]+a[1]*b[6]+a[2]*b[10]+a[3]*b[14], a[0]*b[3]+a[1]*b[7]+a[2]*b[11]+a[3]*b[15], a[4]*b[0]+a[5]*b[4]+a[6]*b[8]+a[7]*b[12], a[4]*b[1]+a[5]*b[5]+a[6]*b[9]+a[7]*b[13], a[4]*b[2]+a[5]*b[6]+a[6]*b[10]+a[7]*b[14], a[4]*b[3]+a[5]*b[7]+a[6]*b[11]+a[7]*b[15], a[8]*b[0]+a[9]*b[4]+a[10]*b[8]+a[11]*b[12], a[8]*b[1]+a[9]*b[5]+a[10]*b[9]+a[11]*b[13], a[8]*b[2]+a[9]*b[6]+a[10]*b[10]+a[11]*b[14], a[8]*b[3]+a[9]*b[7]+a[10]*b[11]+a[11]*b[15], a[12]*b[0]+a[13]*b[4]+a[14]*b[8]+a[15]*b[12], a[12]*b[1]+a[13]*b[5]+a[14]*b[9]+a[15]*b[13], a[12]*b[2]+a[13]*b[6]+a[14]*b[10]+a[15]*b[14], a[12]*b[3]+a[13]*b[7]+a[14]*b[11]+a[15]*b[15], ) } // PreMultiply multiplies the given Matrix4 object with this Matrix. func (m *Matrix4) PreMultiply(other *Matrix4) *Matrix4 { return m.MultiplyMatrices(other, m) } // MultiplyMatrices multiplies the two given Matrix4 objects and stores the results in this Matrix. func (m *Matrix4) MultiplyMatrices(a, b *Matrix4) *Matrix4 { am, bm := a.Values, b.Values a11 := am[0] a12 := am[4] a13 := am[8] a14 := am[12] a21 := am[1] a22 := am[5] a23 := am[9] a24 := am[13] a31 := am[2] a32 := am[6] a33 := am[10] a34 := am[14] a41 := am[3] a42 := am[7] a43 := am[11] a44 := am[15] b11 := bm[0] b12 := bm[4] b13 := bm[8] b14 := bm[12] b21 := bm[1] b22 := bm[5] b23 := bm[9] b24 := bm[13] b31 := bm[2] b32 := bm[6] b33 := bm[10] b34 := bm[14] b41 := bm[3] b42 := bm[7] b43 := bm[11] b44 := bm[15] return m.SetValues( a11*b11+a12*b21+a13*b31+a14*b41, a21*b11+a22*b21+a23*b31+a24*b41, a31*b11+a32*b21+a33*b31+a34*b41, a41*b11+a42*b21+a43*b31+a44*b41, a11*b12+a12*b22+a13*b32+a14*b42, a21*b12+a22*b22+a23*b32+a24*b42, a31*b12+a32*b22+a33*b32+a34*b42, a41*b12+a42*b22+a43*b32+a44*b42, a11*b13+a12*b23+a13*b33+a14*b43, a21*b13+a22*b23+a23*b33+a24*b43, a31*b13+a32*b23+a33*b33+a34*b43, a41*b13+a42*b23+a43*b33+a44*b43, a11*b14+a12*b24+a13*b34+a14*b44, a21*b14+a22*b24+a23*b34+a24*b44, a31*b14+a32*b24+a33*b34+a34*b44, a41*b14+a42*b24+a43*b34+a44*b44, ) } // Translate this Matrix using the given Vector. func (m *Matrix4) Translate(v Vector3Like) *Matrix4 { return m.TranslateXYZ(v.XYZ()) } // TranslateXYZ translates this Matrix using the given values. func (m *Matrix4) TranslateXYZ(x, y, z float64) *Matrix4 { a := m.Values a[12] = a[0]*x + a[4]*y + a[8]*z + a[12] a[13] = a[1]*x + a[5]*y + a[9]*z + a[13] a[14] = a[2]*x + a[6]*y + a[10]*z + a[14] a[15] = a[3]*x + a[7]*y + a[11]*z + a[15] return m } // Scale applies a scale transformation to this Matrix. func (m *Matrix4) Scale(v Vector3Like) *Matrix4 { return m.ScaleXYZ(v.XYZ()) } // ScaleXYZ applies a scale transformation to this Matrix. func (m *Matrix4) ScaleXYZ(x, y, z float64) *Matrix4 { a := m.Values a[0], a[1], a[2], a[3] = a[0]*x, a[1]*x, a[2]*x, a[3]*x a[4], a[5], a[6], a[7] = a[4]*y, a[5]*y, a[6]*y, a[7]*y a[8], a[9], a[10], a[11] = a[8]*z, a[9]*z, a[10]*z, a[11]*z return m } // MakeRotationAxis derives a rotation matrix around the given axis. func (m *Matrix4) MakeRotationAxis(axis Vector3Like, radians float64) *Matrix4 { c, s := math.Cos(radians), math.Sin(radians) t := 1 - c x, y, z := axis.XYZ() tx, ty := t*x, t*y return m.SetValues( tx*x+c, tx*y-s*z, tx*z+s*y, 0, tx*y+s*z, ty*y+c, ty*z-s*x, 0, tx*z-s*y, ty*z+s*x, t*z*z+c, 0, 0, 0, 0, 1, ) } // Rotate applies a rotation transformation to this Matrix. func (m *Matrix4) Rotate(radians float64, axis Vector3Like) *Matrix4 { a := m.Values x, y, z := axis.XYZ() length := math.Sqrt(x*x + y*y + z*z) if math.Abs(length) < Epsilon { return m } length = 1 / length x *= length y *= length z *= length c, s := math.Cos(radians), math.Sin(radians) t := 1 - c a00, a01, a02, a03 := a[0], a[1], a[2], a[3] a10, a11, a12, a13 := a[4], a[5], a[6], a[7] a20, a21, a22, a23 := a[8], a[9], a[10], a[11] a30, a31, a32, a33 := a[12], a[13], a[14], a[15] b00, b01, b02 := x*x*t+c, y*x*t+z*s, z*x*t-y*s b10, b11, b12 := x*y*t-z*s, y*y*t+c, z*y*t+x*s b20, b21, b22 := x*z*t+y*s, y*z*t-x*s, z*z*t+c return m.SetValues( a00*b00+a10*b01+a20*b02, a01*b00+a11*b01+a21*b02, a02*b00+a12*b01+a22*b02, a03*b00+a13*b01+a23*b02, a00*b10+a10*b11+a20*b12, a01*b10+a11*b11+a21*b12, a02*b10+a12*b11+a22*b12, a03*b10+a13*b11+a23*b12, a00*b20+a10*b21+a20*b22, a01*b20+a11*b21+a21*b22, a02*b20+a12*b21+a22*b22, a03*b20+a13*b21+a23*b22, a30, a31, a32, a33, ) } // RotateX rotates this matrix on its X axis. func (m *Matrix4) RotateX(radians float64) *Matrix4 { a := m.Values c, s := math.Cos(radians), math.Sin(radians) a10, a11, a12, a13 := a[4], a[5], a[6], a[7] a20, a21, a22, a23 := a[8], a[9], a[10], a[11] // Perform axis-specific matrix multiplication a[4] = a10*c + a20*s a[5] = a11*c + a21*s a[6] = a12*c + a22*s a[7] = a13*c + a23*s a[8] = a20*c - a10*s a[9] = a21*c - a11*s a[10] = a22*c - a12*s a[11] = a23*c - a13*s return m } // RotateY rotates this matrix on its X axis. func (m *Matrix4) RotateY(radians float64) *Matrix4 { a := m.Values c, s := math.Cos(radians), math.Sin(radians) a00, a01, a02, a03 := a[0], a[1], a[2], a[3] a20, a21, a22, a23 := a[8], a[9], a[10], a[11] // Perform axis-specific matrix multiplication a[0] = a00*c + a20*s a[1] = a01*c + a21*s a[2] = a02*c + a22*s a[3] = a03*c + a23*s a[4] = a20*c - a00*s a[5] = a21*c - a01*s a[6] = a22*c - a02*s a[7] = a23*c - a03*s return m } // RotateZ rotates this matrix on its X axis. func (m *Matrix4) RotateZ(radians float64) *Matrix4 { a := m.Values c, s := math.Cos(radians), math.Sin(radians) a00, a01, a02, a03 := a[0], a[1], a[2], a[3] a10, a11, a12, a13 := a[4], a[5], a[6], a[7] // Perform axis-specific matrix multiplication a[0] = a00*c + a10*s a[1] = a01*c + a11*s a[2] = a02*c + a12*s a[3] = a03*c + a13*s a[4] = a10*c - a00*s a[5] = a11*c - a01*s a[6] = a12*c - a02*s a[7] = a13*c - a03*s return m } // FromRotationTranslation sets the values of this Matrix from the given rotation Quaternion and // translation Vector. func (m *Matrix4) FromRotationTranslation(q *Quaternion, v *Vector3) *Matrix4 { x, y, z, w := q.X, q.Y, q.Z, q.W x2, y2, z2 := x+x, y+y, z+z xx, xy, xz := x*x2, x*y2, x*z2 yy, yz, zz := y*y2, y*z2, z*z2 wx, wy, wz := w*x2, w*y2, w*z2 return m.SetValues( 1-(yy+zz), xy+wz, xz-wy, 0, xy-wz, 1-(xx+zz), yz+wx, 0, xz+wy, yz-wx, 1-(xx+yy), 0, v.X, v.Y, v.Z, 1, ) } // FromQuaternion sets the values of this Matrix from the given Quaternion. func (m *Matrix4) FromQuaternion(q *Quaternion) *Matrix4 { x, y, z, w := q.X, q.Y, q.Z, q.W x2, y2, z2 := x+x, y+y, z+z xx, xy, xz := x*x2, x*y2, x*z2 yy, yz, zz := y*y2, y*z2, z*z2 wx, wy, wz := w*x2, w*y2, w*z2 return m.SetValues( 1-(yy+zz), xy+wz, xz-wy, 0, xy-wz, 1-(xx+zz), yz+wx, 0, xz+wy, yz-wx, 1-(xx+yy), 0, 0, 0, 0, 1, ) } // Frustum generates a frustum matrix with the given bounds. func (m *Matrix4) Frustum(left, right, bottom, top, near, far float64) *Matrix4 { rl, tb, nf := 1/(right-left), 1/(top-bottom), 1/(near-far) return m.SetValues( (near*2)*rl, 0, 0, 0, 0, (near*2)*tb, 0, 0, (right+left)*rl, (top+bottom)*tb, (far+near)*nf, -1, 0, 0, (far*near*2)*nf, 0, ) } // Perspective generates a perspective projection matrix with the given bounds. func (m *Matrix4) Perspective(fovy, aspect, near, far float64) *Matrix4 { f, nf := 1/math.Tan(fovy/2), 1/(near-far) return m.SetValues( f/aspect, 0, 0, 0, 0, f, 0, 0, 0, 0, (far+near)*nf, -1, 0, 0, (2*far*near)*nf, 0, ) } // PerspectiveLH generates a perspective projection matrix with the given bounds. func (m *Matrix4) PerspectiveLH(width, height, near, far float64) *Matrix4 { return m.SetValues( (2*near)/width, 0, 0, 0, 0, (2*near)/height, 0, 0, 0, 0, -far/(near-far), 1, 0, 0, (near*far)/(near-far), 0, ) } // Ortho generates an orthogonal projection matrix with the given bounds. func (m *Matrix4) Ortho(left, right, bottom, top, near, far float64) *Matrix4 { lr, bt, nf := left-right, bottom-top, near-far // Avoid division by zero if lr != 0 { lr = 1 / lr } if bt != 0 { bt = 1 / bt } if nf != 0 { nf = 1 / nf } return m.SetValues( -2*lr, 0, 0, 0, 0, -2*bt, 0, 0, 0, 0, 2*nf, 0, (left+right)*lr, (top+bottom)*bt, (far+near)*nf, 1, ) } // LookAtRightHanded generates a right-handed look-at matrix with the given eye position, // target and up axis. func (m *Matrix4) LookAtRightHanded(eye, target, up *Vector3) *Matrix4 { vz := eye.Clone().Subtract(target) vx := NewVector3(0, 0, 0) vy := NewVector3(0, 0, 0) if vz.LengthSquared() == 0 { // eye and target are in the same position vz.Z = 1 } vz.Normalize() vx.CrossVectors(up, vz) if vx.LengthSquared() == 0 { if math.Abs(up.Z) == 1 { vz.X += Epsilon } else { vz.Z += Epsilon } vz.Normalize() vx.CrossVectors(vz, vx) } vx.Normalize() vy.CrossVectors(vz, vx) m.Values[0] = vx.X m.Values[1] = vx.Y m.Values[2] = vx.Z m.Values[4] = vy.X m.Values[5] = vy.Y m.Values[6] = vy.Z m.Values[8] = vz.X m.Values[9] = vz.Y m.Values[10] = vz.Z return m } // LookAt generates a look-at matrix with the given eye position, target, and up axis. func (m *Matrix4) LookAt(eye, target, up *Vector3) *Matrix4 { ex, ey, ez := eye.XYZ() tx, ty, tz := target.XYZ() ux, uy, uz := up.XYZ() if math.Abs(ex-tx) < Epsilon && math.Abs(ey-ty) < Epsilon && math.Abs(ez-tz) < Epsilon { return m.Identity() } z0, z1, z2 := ex-tx, ey-ty, ez-tz length := 1 / math.Sqrt(z0*z0+z1*z1+z2*z2) x0, x1, x2 := uy*z2-uz*z1, uz*z0-ux*z2, ux*z1-uy*z0 if length == 0 { x0, x1, x2 = 0, 0, 0 } else { length = 1 / length x0 *= length x1 *= length x2 *= length } y0, y1, y2 := z1*x2-z2*x1, z2*x0-z0*x2, z0*x1-z1*x0 length = math.Sqrt(y0*y0 + y1*y1 + y2*y2) if length == 0 { y0, y1, y2 = 0, 0, 0 } else { length = 1 / length y0 *= length y1 *= length y2 *= length } return m.SetValues( x0, y0, z0, 0, x1, y1, z1, 0, x2, y2, z2, 0, -(x0*ex + x1*ey + x2*ez), -(y0*ex + y1*ey + y2*ez), -(z0*ex + z1*ey + z2*ez), 1, ) } // SetYawPitchRoll sets the values of this matrix from the given `yaw`, `pitch` and `roll` values. func (m *Matrix4) SetYawPitchRoll(y, p, r float64) *Matrix4 { m.Zero() a, b := m.Clone(), m.Clone() vm, va, vb := m.Values, a.Values, b.Values // Rotate Z s, c := math.Sin(r), math.Cos(r) vm[10], vm[15], vm[0], vm[1], vm[4], vm[5] = 1, 1, c, s, -s, c // Rotate X s, c = math.Sin(p), math.Cos(p) va[0], va[15], va[5], va[10], va[9], va[6] = 1, 1, c, c, -s, s // Rotate Y s, c = math.Sin(y), math.Cos(y) vb[5], vb[15], vb[0], vb[2], vb[8], vb[10] = 1, 1, c, -s, s, c return m.MultiplyLocal(a).MultiplyLocal(b) } // SetWorldMatrix generates a world matrix from the given (rotation, position, scale vector3), // and (view, projection matrix4). func (m *Matrix4) SetWorldMatrix(rot, pos, scale *Vector3, view, proj *Matrix4) *Matrix4 { m.SetYawPitchRoll(rot.XYZ()) a := NewMatrix4(nil).SetScaling(scale.XYZ()) b := NewMatrix4(nil).SetXYZ(pos.XYZ()) m.MultiplyLocal(a).MultiplyLocal(b) if view != nil { m.MultiplyLocal(view) } if proj != nil { m.MultiplyLocal(proj) } return m } // MultiplyToMatrix4 multiplies this Matrix4 by the given `src` Matrix4 and stores the results in // the `out` Matrix4. func (m *Matrix4) MultiplyToMatrix4(src, out *Matrix4) *Matrix4 { if out == nil { out = NewMatrix4(nil) } mv, sv := m.Values, src.Values a00, a01, a02, a03, a10, a11, a12, a13, a20, a21, a22, a23, a30, a31, a32, a33 := mv[0], mv[1], mv[2], mv[3], mv[4], mv[5], mv[6], mv[7], mv[8], mv[9], mv[10], mv[11], mv[12], mv[13], mv[14], mv[15] b00, b01, b02, b03, b10, b11, b12, b13, b20, b21, b22, b23, b30, b31, b32, b33 := sv[0], sv[1], sv[2], sv[3], sv[4], sv[5], sv[6], sv[7], sv[8], sv[9], sv[10], sv[11], sv[12], sv[13], sv[14], sv[15] return out.SetValues( b00*a00+b01*a10+b02*a20+b03*a30, b01*a01+b01*a11+b02*a21+b03*a31, b02*a02+b01*a12+b02*a22+b03*a32, b03*a03+b01*a13+b02*a23+b03*a33, b10*a00+b11*a10+b12*a20+b13*a30, b10*a01+b11*a11+b12*a21+b13*a31, b10*a02+b11*a12+b12*a22+b13*a32, b10*a03+b11*a13+b12*a23+b13*a33, b20*a00+b21*a10+b22*a20+b23*a30, b20*a01+b21*a11+b22*a21+b23*a31, b20*a02+b21*a12+b22*a22+b23*a32, b20*a03+b21*a13+b22*a23+b23*a33, b30*a00+b31*a10+b32*a20+b33*a30, b30*a01+b31*a11+b32*a21+b33*a31, b30*a02+b31*a12+b32*a22+b33*a32, b30*a03+b31*a13+b32*a23+b33*a33, ) } // FromRotationXYTranslation takes the rotation, position vectors and builds this Matrix4 from them. func (m *Matrix4) FromRotationXYTranslation(rot, pos *Vector3, translateFirst bool) *Matrix4 { x, y, z := pos.XYZ() sx, cx := math.Sin(rot.X), math.Cos(rot.X) sy, cy := math.Sin(rot.Y), math.Cos(rot.Y) a30, a31, a32 := x, y, z // rotate x b21 := -sx // rotate y c01, c02, c21, c22 := 0-b21*sy, 0-cx*sy, b21*cy, cx*cy // translate if !translateFirst { a30, a31, a32 = cy*x+sy*z, c01*x+cx*y+c21*z, c02*x+sx*y+c22*z } return m.SetValues( cy, c01, c02, 0, 0, cx, sx, 0, sy, c21, c22, 0, a30, a31, a32, 1, ) } // GetMaxScaleOnAxis returns the maximum axis scale from this Matrix4. func (m *Matrix4) GetMaxScaleOnAxis() float64 { v := m.Values sx2 := v[0]*v[0] + v[1]*v[1] + v[2]*v[2] sy2 := v[4]*v[4] + v[5]*v[5] + v[6]*v[6] sz2 := v[8]*v[8] + v[9]*v[9] + v[10]*v[10] return math.Sqrt(math.Max(math.Max(sx2, sy2), sz2)) }
phomath/matrix4.go
0.869687
0.742888
matrix4.go
starcoder
package metrics import ( "time" "go.temporal.io/server/common/log" ) // Mostly cribbed from // https://github.com/temporalio/sdk-go/blob/master/internal/common/metrics/handler.go // and adapted to depend on golang.org/x/exp/event type ( // MetricsHandler represents the main dependency for instrumentation MetricsHandler interface { // WithTags creates a new MetricProvder with provided []Tag // Tags are merged with registered Tags from the source MetricsHandler WithTags(...Tag) MetricsHandler // Counter obtains a counter for the given name and MetricOptions. Counter(string) CounterMetric // Gauge obtains a gauge for the given name and MetricOptions. Gauge(string) GaugeMetric // Timer obtains a timer for the given name and MetricOptions. Timer(string) TimerMetric // Histogram obtains a histogram for the given name and MetricOptions. Histogram(string, MetricUnit) HistogramMetric Stop(log.Logger) } // CounterMetric is an ever-increasing counter. CounterMetric interface { // Record increments the counter value. // Tags provided are merged with the source MetricsHandler Record(int64, ...Tag) } // GaugeMetric can be set to any float and repesents a latest value instrument. GaugeMetric interface { // Record updates the gauge value. // Tags provided are merged with the source MetricsHandler Record(float64, ...Tag) } // TimerMetric records time durations. TimerMetric interface { // Record sets the timer value. // Tags provided are merged with the source MetricsHandler Record(time.Duration, ...Tag) } // HistogramMetric records a distribution of values. HistogramMetric interface { // Record adds a value to the distribution // Tags provided are merged with the source MetricsHandler Record(int64, ...Tag) } CounterMetricFunc func(int64, ...Tag) GaugeMetricFunc func(float64, ...Tag) TimerMetricFunc func(time.Duration, ...Tag) HistogramMetricFunc func(int64, ...Tag) ) func (c CounterMetricFunc) Record(v int64, tags ...Tag) { c(v, tags...) } func (c GaugeMetricFunc) Record(v float64, tags ...Tag) { c(v, tags...) } func (c TimerMetricFunc) Record(v time.Duration, tags ...Tag) { c(v, tags...) } func (c HistogramMetricFunc) Record(v int64, tags ...Tag) { c(v, tags...) }
common/metrics/metrics.go
0.810929
0.430806
metrics.go
starcoder
package client import ( "bytes" "encoding/binary" "errors" "fmt" "reflect" "sort" ) type MeasurementSchema struct { Measurement string DataType TSDataType Encoding TSEncoding Compressor TSCompressionType Properties map[string]string } type Tablet struct { deviceId string measurementSchemas []*MeasurementSchema timestamps []int64 values []interface{} rowCount int } func (t *Tablet) Len() int { return t.GetRowCount() } func (t *Tablet) Swap(i, j int) { for index, schema := range t.measurementSchemas { switch schema.DataType { case BOOLEAN: sortedSlice := t.values[index].([]bool) sortedSlice[i], sortedSlice[j] = sortedSlice[j], sortedSlice[i] case INT32: sortedSlice := t.values[index].([]int32) sortedSlice[i], sortedSlice[j] = sortedSlice[j], sortedSlice[i] case INT64: sortedSlice := t.values[index].([]int64) sortedSlice[i], sortedSlice[j] = sortedSlice[j], sortedSlice[i] case FLOAT: sortedSlice := t.values[index].([]float32) sortedSlice[i], sortedSlice[j] = sortedSlice[j], sortedSlice[i] case DOUBLE: sortedSlice := t.values[index].([]float64) sortedSlice[i], sortedSlice[j] = sortedSlice[j], sortedSlice[i] case TEXT: sortedSlice := t.values[index].([]string) sortedSlice[i], sortedSlice[j] = sortedSlice[j], sortedSlice[i] } } t.timestamps[i], t.timestamps[j] = t.timestamps[j], t.timestamps[i] } func (t *Tablet) Less(i, j int) bool { return t.timestamps[i] < t.timestamps[j] } func (t *Tablet) SetTimestamp(timestamp int64, rowIndex int) { t.timestamps[rowIndex] = timestamp } func (t *Tablet) SetValueAt(value interface{}, columnIndex, rowIndex int) error { if value == nil { return errors.New("illegal argument value can't be nil") } if columnIndex < 0 || columnIndex > len(t.measurementSchemas) { return fmt.Errorf("illegal argument columnIndex %d", columnIndex) } if rowIndex < 0 || rowIndex > int(t.rowCount) { return fmt.Errorf("illegal argument rowIndex %d", rowIndex) } switch t.measurementSchemas[columnIndex].DataType { case BOOLEAN: values := t.values[columnIndex].([]bool) switch v := value.(type) { case bool: values[rowIndex] = v case *bool: values[rowIndex] = *v default: return fmt.Errorf("illegal argument value %v %v", value, reflect.TypeOf(value)) } case INT32: values := t.values[columnIndex].([]int32) switch v := value.(type) { case int32: values[rowIndex] = v case *int32: values[rowIndex] = *v default: return fmt.Errorf("illegal argument value %v %v", value, reflect.TypeOf(value)) } case INT64: values := t.values[columnIndex].([]int64) switch v := value.(type) { case int64: values[rowIndex] = v case *int64: values[rowIndex] = *v default: return fmt.Errorf("illegal argument value %v %v", value, reflect.TypeOf(value)) } case FLOAT: values := t.values[columnIndex].([]float32) switch v := value.(type) { case float32: values[rowIndex] = v case *float32: values[rowIndex] = *v default: return fmt.Errorf("illegal argument value %v %v", value, reflect.TypeOf(value)) } case DOUBLE: values := t.values[columnIndex].([]float64) switch v := value.(type) { case float64: values[rowIndex] = v case *float64: values[rowIndex] = *v default: return fmt.Errorf("illegal argument value %v %v", value, reflect.TypeOf(value)) } case TEXT: values := t.values[columnIndex].([]string) switch v := value.(type) { case string: values[rowIndex] = v case []byte: values[rowIndex] = string(v) default: return fmt.Errorf("illegal argument value %v %v", value, reflect.TypeOf(value)) } } return nil } func (t *Tablet) GetRowCount() int { return t.rowCount } func (t *Tablet) GetValueAt(columnIndex, rowIndex int) (interface{}, error) { if columnIndex < 0 || columnIndex > len(t.measurementSchemas) { return nil, fmt.Errorf("illegal argument columnIndex %d", columnIndex) } if rowIndex < 0 || rowIndex > int(t.rowCount) { return nil, fmt.Errorf("illegal argument rowIndex %d", rowIndex) } schema := t.measurementSchemas[columnIndex] switch schema.DataType { case BOOLEAN: return t.values[columnIndex].([]bool)[rowIndex], nil case INT32: return t.values[columnIndex].([]int32)[rowIndex], nil case INT64: return t.values[columnIndex].([]int64)[rowIndex], nil case FLOAT: return t.values[columnIndex].([]float32)[rowIndex], nil case DOUBLE: return t.values[columnIndex].([]float64)[rowIndex], nil case TEXT: return t.values[columnIndex].([]string)[rowIndex], nil default: return nil, fmt.Errorf("illegal datatype %v", schema.DataType) } } func (t *Tablet) GetTimestampBytes() []byte { buff := &bytes.Buffer{} for _, v := range t.timestamps { binary.Write(buff, binary.BigEndian, v) } return buff.Bytes() } func (t *Tablet) GetMeasurements() []string { measurements := make([]string, len(t.measurementSchemas)) for i, s := range t.measurementSchemas { measurements[i] = s.Measurement } return measurements } func (t *Tablet) getDataTypes() []int32 { types := make([]int32, len(t.measurementSchemas)) for i, s := range t.measurementSchemas { types[i] = int32(s.DataType) } return types } func (t *Tablet) getValuesBytes() ([]byte, error) { buff := &bytes.Buffer{} for i, schema := range t.measurementSchemas { switch schema.DataType { case BOOLEAN: binary.Write(buff, binary.BigEndian, t.values[i].([]bool)) case INT32: binary.Write(buff, binary.BigEndian, t.values[i].([]int32)) case INT64: binary.Write(buff, binary.BigEndian, t.values[i].([]int64)) case FLOAT: binary.Write(buff, binary.BigEndian, t.values[i].([]float32)) case DOUBLE: binary.Write(buff, binary.BigEndian, t.values[i].([]float64)) case TEXT: for _, s := range t.values[i].([]string) { binary.Write(buff, binary.BigEndian, int32(len(s))) binary.Write(buff, binary.BigEndian, []byte(s)) } default: return nil, fmt.Errorf("illegal datatype %v", schema.DataType) } } return buff.Bytes(), nil } func (t *Tablet) Sort() error { sort.Sort(t) return nil } func NewTablet(deviceId string, measurementSchemas []*MeasurementSchema, rowCount int) (*Tablet, error) { tablet := &Tablet{ deviceId: deviceId, measurementSchemas: measurementSchemas, rowCount: rowCount, } tablet.timestamps = make([]int64, rowCount) tablet.values = make([]interface{}, len(measurementSchemas)) for i, schema := range tablet.measurementSchemas { switch schema.DataType { case BOOLEAN: tablet.values[i] = make([]bool, rowCount) case INT32: tablet.values[i] = make([]int32, rowCount) case INT64: tablet.values[i] = make([]int64, rowCount) case FLOAT: tablet.values[i] = make([]float32, rowCount) case DOUBLE: tablet.values[i] = make([]float64, rowCount) case TEXT: tablet.values[i] = make([]string, rowCount) default: return nil, fmt.Errorf("illegal datatype %v", schema.DataType) } } return tablet, nil }
client/tablet.go
0.542136
0.594875
tablet.go
starcoder
package physics import ( "github.com/kasworld/h4o/experimental/collision" "github.com/kasworld/h4o/geometry" "github.com/kasworld/h4o/gls" "github.com/kasworld/h4o/graphic" "github.com/kasworld/h4o/material" "github.com/kasworld/h4o/math32" "github.com/kasworld/h4o/node" ) // This file contains helpful infrastructure for debugging physics type DebugHelper struct { } func ShowWorldFace(scene *node.Node, face []math32.Vector3, color *math32.Color) { if len(face) == 0 { return } vertices := math32.NewArrayF32(0, 16) for i := range face { vertices.AppendVector3(&face[i]) } vertices.AppendVector3(&face[0]) geom := geometry.NewGeometry() geom.AddVBO(gls.NewVBO(vertices).AddAttrib(gls.VertexPosition)) mat := material.NewStandard(color) faceGraphic := graphic.NewLineStrip(geom, mat) scene.Add(faceGraphic) } func ShowPenAxis(scene *node.Node, axis *math32.Vector3) { //}, min, max float32) { vertices := math32.NewArrayF32(0, 16) size := float32(100) minPoint := axis.Clone().MultiplyScalar(size) maxPoint := axis.Clone().MultiplyScalar(-size) //vertices.AppendVector3(minPoint.Clone().SetX(minPoint.X - size)) //vertices.AppendVector3(minPoint.Clone().SetX(minPoint.X + size)) //vertices.AppendVector3(minPoint.Clone().SetY(minPoint.Y - size)) //vertices.AppendVector3(minPoint.Clone().SetY(minPoint.Y + size)) //vertices.AppendVector3(minPoint.Clone().SetZ(minPoint.Z - size)) //vertices.AppendVector3(minPoint.Clone().SetZ(minPoint.Z + size)) vertices.AppendVector3(minPoint) //vertices.AppendVector3(maxPoint.Clone().SetX(maxPoint.X - size)) //vertices.AppendVector3(maxPoint.Clone().SetX(maxPoint.X + size)) //vertices.AppendVector3(maxPoint.Clone().SetY(maxPoint.Y - size)) //vertices.AppendVector3(maxPoint.Clone().SetY(maxPoint.Y + size)) //vertices.AppendVector3(maxPoint.Clone().SetZ(maxPoint.Z - size)) //vertices.AppendVector3(maxPoint.Clone().SetZ(maxPoint.Z + size)) vertices.AppendVector3(maxPoint) geom := geometry.NewGeometry() geom.AddVBO(gls.NewVBO(vertices).AddAttrib(gls.VertexPosition)) mat := material.NewStandard(&math32.Color{1, 1, 1}) faceGraphic := graphic.NewLines(geom, mat) scene.Add(faceGraphic) } func ShowContact(scene *node.Node, contact *collision.Contact) { vertices := math32.NewArrayF32(0, 16) size := float32(0.0005) otherPoint := contact.Point.Clone().Add(contact.Normal.Clone().MultiplyScalar(-contact.Depth)) vertices.AppendVector3(contact.Point.Clone().SetX(contact.Point.X - size)) vertices.AppendVector3(contact.Point.Clone().SetX(contact.Point.X + size)) vertices.AppendVector3(contact.Point.Clone().SetY(contact.Point.Y - size)) vertices.AppendVector3(contact.Point.Clone().SetY(contact.Point.Y + size)) vertices.AppendVector3(contact.Point.Clone().SetZ(contact.Point.Z - size)) vertices.AppendVector3(contact.Point.Clone().SetZ(contact.Point.Z + size)) vertices.AppendVector3(contact.Point.Clone()) vertices.AppendVector3(otherPoint) geom := geometry.NewGeometry() geom.AddVBO(gls.NewVBO(vertices).AddAttrib(gls.VertexPosition)) mat := material.NewStandard(&math32.Color{0, 0, 1}) faceGraphic := graphic.NewLines(geom, mat) scene.Add(faceGraphic) }
experimental/physics/debug.go
0.594787
0.589894
debug.go
starcoder
package matchers import ( "fmt" "github.com/cloudfoundry/bosh-utils/internal/github.com/onsi/gomega/format" "math" ) type BeNumericallyMatcher struct { Comparator string CompareTo []interface{} } func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0]) } func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0]) } func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) } if !isNumber(actual) { return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1)) } if !isNumber(matcher.CompareTo[0]) { return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) } if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) } switch matcher.Comparator { case "==", "~", ">", ">=", "<", "<=": default: return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) } if isFloat(actual) || isFloat(matcher.CompareTo[0]) { var secondOperand float64 = 1e-8 if len(matcher.CompareTo) == 2 { secondOperand = toFloat(matcher.CompareTo[1]) } success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand) } else if isInteger(actual) { var secondOperand int64 = 0 if len(matcher.CompareTo) == 2 { secondOperand = toInteger(matcher.CompareTo[1]) } success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand) } else if isUnsignedInteger(actual) { var secondOperand uint64 = 0 if len(matcher.CompareTo) == 2 { secondOperand = toUnsignedInteger(matcher.CompareTo[1]) } success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand) } else { return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1)) } return success, nil } func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) { switch matcher.Comparator { case "==", "~": diff := actual - compareTo return -threshold <= diff && diff <= threshold case ">": return (actual > compareTo) case ">=": return (actual >= compareTo) case "<": return (actual < compareTo) case "<=": return (actual <= compareTo) } return false } func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) { switch matcher.Comparator { case "==", "~": if actual < compareTo { actual, compareTo = compareTo, actual } return actual-compareTo <= threshold case ">": return (actual > compareTo) case ">=": return (actual >= compareTo) case "<": return (actual < compareTo) case "<=": return (actual <= compareTo) } return false } func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) { switch matcher.Comparator { case "~": return math.Abs(actual-compareTo) <= threshold case "==": return (actual == compareTo) case ">": return (actual > compareTo) case ">=": return (actual >= compareTo) case "<": return (actual < compareTo) case "<=": return (actual <= compareTo) } return false }
internal/github.com/onsi/gomega/matchers/be_numerically_matcher.go
0.591251
0.580947
be_numerically_matcher.go
starcoder
package number import ( "fmt" "math" "strconv" "strings" ) // Number is a fixed-point 3-digit number type type Number int64 // Zero as constant const Zero = Number(0) // One as constant const One = Number(1000) // MinusOne as constant const MinusOne = Number(-1000) // MaxValue is the largest possible value const MaxValue = Number(math.MaxInt64) // MinValue is the smallest possible value const MinValue = Number(math.MinInt64) const scale Number = 1000 const decimals = 3 // FromString parses a string into a Number func FromString(str string) (Number, error) { parts := strings.Split(str, ".") switch len(parts) { case 1: str += "000" break case 2: if len(parts[1]) > decimals { return Zero, fmt.Errorf("Numbers can have at most 3 decimal-places") } str = parts[0] + parts[1] + strings.Repeat("0", decimals-len(parts[1])) break default: return Zero, fmt.Errorf("invalid number %s", str) } num, err := strconv.Atoi(str) if err != nil { return Zero, err } return Number(num), nil } // MustFromString parses a string into a Number. Panics on parsing erros func MustFromString(str string) Number { res, err := FromString(str) if err != nil { panic(err) } return res } // FromInt creates a Number from the given int func FromInt(in int) Number { return Number(int64(in)) * scale } // FromFloat64 creates a Number from the given float func FromFloat64(in float64) Number { return Number(int64(in * float64(scale))) } // FromFloat32 creates a Number from the given float func FromFloat32(in float32) Number { return Number(int32(in * float32(scale))) } // Float64 returns the value of the number as float func (n Number) Float64() float64 { return float64(n) / float64(scale) } // Float32 returns the value of the number as float func (n Number) Float32() float32 { return float32(n) / float32(scale) } // String returns the value of the number as string func (n Number) String() string { prefix := int(n / scale) str := strconv.Itoa(prefix) remainder := (n % scale).Abs() if remainder != 0 { if prefix == 0 { str = "" } deci := strconv.Itoa(int(remainder)) str += "." + strings.Repeat("0", 3-len(deci)) + deci str = strings.TrimRight(str, "0") if n < 0 && prefix >= 0 { str = "-" + str } } return str } // Int returns the value of the number as int func (n Number) Int() int { return int(n / 1000) } // Add adds two numbers func (n Number) Add(m Number) Number { return n + m } // Sub substracts two numbers func (n Number) Sub(m Number) Number { return n - m } // Mul multiplicates two numbers func (n Number) Mul(m Number) Number { return (n * m) / scale } // Div divides two numbers func (n Number) Div(m Number) (Number, error) { if m == 0 { return Zero, fmt.Errorf("Division by 0") } return (n * scale) / m, nil } // Abs returns the absolute value of the number func (n Number) Abs() Number { if n >= 0 { return n } return n * -1 } // Sqrt returns the square root of the number func (n Number) Sqrt() Number { if n < 0 { return MinValue } if n >= FromInt(9223372036854775) { return MinValue } // blatantly copied from: https://github.com/martindevans/Yolol/blob/837322c51836c70b89e35e8c5b2e649732cbb2ec/Yolol/Execution/Number.cs#L540 result := math.Sqrt(n.Float64()) var epsilon float64 if result < 0 { epsilon = -0.00005 } else { epsilon = 0.00005 } return FromFloat64(result + epsilon) } // Mod returns the modulus of the number func (n Number) Mod(m Number) (Number, error) { if m == Zero { return Zero, fmt.Errorf("Division by 0") } if m < One && m > MinusOne { return Zero, fmt.Errorf("The ingame-implementation thinks this is a division by 0") } return n % m, nil } // Pow exponentiates the number func (n Number) Pow(m Number) Number { res := math.Pow(n.Float64(), m.Float64()) if math.IsInf(res, 1) { return MaxValue } if math.IsInf(res, -1) { return MinValue } return FromFloat64(math.Pow(n.Float64(), m.Float64())) } // convert given degree to radians func toRad(n float32) float32 { return n * math.Pi / 180 } // convert given radian to degrees func toDeg(n float32) float32 { return n * 180 / math.Pi } // execute the given trigonometric function with the given argument, but only use 32bits of precision (to match the game's implementation) func reducedPresisionTrig(f func(float64) float64, arg float32) float32 { return float32(f(float64(arg))) } // Sin returns the sin of the number (in degrees) func (n Number) Sin() Number { return FromFloat32(reducedPresisionTrig(math.Sin, toRad(n.Float32()))) } // Cos returns the cos of the number (in degrees) func (n Number) Cos() Number { return FromFloat32(reducedPresisionTrig(math.Cos, toRad(n.Float32()))) } // Tan returns the tan of the number (in degrees) func (n Number) Tan() Number { rads := toRad(n.Float32()) i := int64(math.Tan(float64(rads)) * 1000) return Number(i) } // Asin returns the asin of the number in degrees func (n Number) Asin() Number { if n > One || n < -One { return MinValue } return FromFloat32(toDeg(reducedPresisionTrig(math.Asin, n.Float32()))) } // Acos returns the acos of the number in degrees func (n Number) Acos() Number { if n > One || n < -One { return MinValue } return FromFloat32(toDeg(reducedPresisionTrig(math.Acos, n.Float32()))) } // Atan returns the atan of the number in degrees func (n Number) Atan() Number { return FromFloat32(toDeg(reducedPresisionTrig(math.Atan, n.Float32()))) } // Factorial computes the factorial (!) func (n Number) Factorial() Number { if n < 0 { return MinValue } res := 1 i := 0 for n > 0 { i++ n = n.Sub(One) res *= i } return FromInt(res) }
pkg/number/number.go
0.868771
0.487856
number.go
starcoder
package sarif // Address A physical or virtual address, or a range of addresses, in an 'addressable region' (memory or a binary file). type Address struct { // The address expressed as a byte offset from the start of the addressable region. AbsoluteAddress int `json:"absoluteAddress,omitempty"` // A human-readable fully qualified name that is associated with the address. FullyQualifiedName string `json:"fullyQualifiedName,omitempty"` // The index within run.addresses of the cached object for this address. Index int `json:"index,omitempty"` // An open-ended string that identifies the address kind. 'data', 'function', 'header','instruction', 'module', 'page', 'section', 'segment', 'stack', 'stackFrame', 'table' are well-known values. Kind string `json:"kind,omitempty"` // The number of bytes in this range of addresses. Length int `json:"length,omitempty"` // A name that is associated with the address, e.g., '.text'. Name string `json:"name,omitempty"` // The byte offset of this address from the absolute or relative address of the parent object. OffsetFromParent int `json:"offsetFromParent,omitempty"` // The index within run.addresses of the parent object. ParentIndex int `json:"parentIndex,omitempty"` // Key/value pairs that provide additional information about the address. Properties *PropertyBag `json:"properties,omitempty"` // The address expressed as a byte offset from the absolute address of the top-most parent object. RelativeAddress int `json:"relativeAddress,omitempty"` } // Artifact A single artifact. In some cases, this artifact might be nested within another artifact. type Artifact struct { // The contents of the artifact. Contents *ArtifactContent `json:"contents,omitempty"` // A short description of the artifact. Description *Message `json:"description,omitempty"` // Specifies the encoding for an artifact object that refers to a text file. Encoding string `json:"encoding,omitempty"` // A dictionary, each of whose keys is the name of a hash function and each of whose values is the hashed value of the artifact produced by the specified hash function. Hashes map[string]string `json:"hashes,omitempty"` // The Coordinated Universal Time (UTC) date and time at which the artifact was most recently modified. See "Date/time properties" in the SARIF spec for the required format. LastModifiedTimeUtc string `json:"lastModifiedTimeUtc,omitempty"` // The length of the artifact in bytes. Length int `json:"length,omitempty"` // The location of the artifact. Location *ArtifactLocation `json:"location,omitempty"` // The MIME type (RFC 2045) of the artifact. MimeType string `json:"mimeType,omitempty"` // The offset in bytes of the artifact within its containing artifact. Offset int `json:"offset,omitempty"` // Identifies the index of the immediate parent of the artifact, if this artifact is nested. ParentIndex int `json:"parentIndex,omitempty"` // Key/value pairs that provide additional information about the artifact. Properties *PropertyBag `json:"properties,omitempty"` // The role or roles played by the artifact in the analysis. Roles []interface{} `json:"roles,omitempty"` // Specifies the source language for any artifact object that refers to a text file that contains source code. SourceLanguage string `json:"sourceLanguage,omitempty"` } // ArtifactChange A change to a single artifact. type ArtifactChange struct { // The location of the artifact to change. ArtifactLocation *ArtifactLocation `json:"artifactLocation"` // Key/value pairs that provide additional information about the change. Properties *PropertyBag `json:"properties,omitempty"` // An array of replacement objects, each of which represents the replacement of a single region in a single artifact specified by 'artifactLocation'. Replacements []*Replacement `json:"replacements"` } // ArtifactContent Represents the contents of an artifact. type ArtifactContent struct { // MIME Base64-encoded content from a binary artifact, or from a text artifact in its original encoding. Binary string `json:"binary,omitempty"` // Key/value pairs that provide additional information about the artifact content. Properties *PropertyBag `json:"properties,omitempty"` // An alternate rendered representation of the artifact (e.g., a decompiled representation of a binary region). Rendered *MultiformatMessageString `json:"rendered,omitempty"` // UTF-8-encoded content from a text artifact. Text string `json:"text,omitempty"` } // ArtifactLocation Specifies the location of an artifact. type ArtifactLocation struct { // A short description of the artifact location. Description *Message `json:"description,omitempty"` // The index within the run artifacts array of the artifact object associated with the artifact location. Index int `json:"index,omitempty"` // Key/value pairs that provide additional information about the artifact location. Properties *PropertyBag `json:"properties,omitempty"` // A string containing a valid relative or absolute URI. URI string `json:"uri,omitempty"` // A string which indirectly specifies the absolute URI with respect to which a relative URI in the "uri" property is interpreted. UriBaseID string `json:"uriBaseId,omitempty"` } // Attachment An artifact relevant to a result. type Attachment struct { // The location of the attachment. ArtifactLocation *ArtifactLocation `json:"artifactLocation"` // A message describing the role played by the attachment. Description *Message `json:"description,omitempty"` // Key/value pairs that provide additional information about the attachment. Properties *PropertyBag `json:"properties,omitempty"` // An array of rectangles specifying areas of interest within the image. Rectangles []*Rectangle `json:"rectangles,omitempty"` // An array of regions of interest within the attachment. Regions []*Region `json:"regions,omitempty"` } // CodeFlow A set of threadFlows which together describe a pattern of code execution relevant to detecting a result. type CodeFlow struct { // A message relevant to the code flow. Message *Message `json:"message,omitempty"` // Key/value pairs that provide additional information about the code flow. Properties *PropertyBag `json:"properties,omitempty"` // An array of one or more unique threadFlow objects, each of which describes the progress of a program through a thread of execution. ThreadFlows []*ThreadFlow `json:"threadFlows"` } // ConfigurationOverride Information about how a specific rule or notification was reconfigured at runtime. type ConfigurationOverride struct { // Specifies how the rule or notification was configured during the scan. Configuration *ReportingConfiguration `json:"configuration"` // A reference used to locate the descriptor whose configuration was overridden. Descriptor *ReportingDescriptorReference `json:"descriptor"` // Key/value pairs that provide additional information about the configuration override. Properties *PropertyBag `json:"properties,omitempty"` } // Conversion Describes how a converter transformed the output of a static analysis tool from the analysis tool's native output format into the SARIF format. type Conversion struct { // The locations of the analysis tool's per-run log files. AnalysisToolLogFiles []*ArtifactLocation `json:"analysisToolLogFiles,omitempty"` // An invocation object that describes the invocation of the converter. Invocation *Invocation `json:"invocation,omitempty"` // Key/value pairs that provide additional information about the conversion. Properties *PropertyBag `json:"properties,omitempty"` // A tool object that describes the converter. Tool *Tool `json:"tool"` } // Edge Represents a directed edge in a graph. type Edge struct { // A string that uniquely identifies the edge within its graph. ID string `json:"id"` // A short description of the edge. Label *Message `json:"label,omitempty"` // Key/value pairs that provide additional information about the edge. Properties *PropertyBag `json:"properties,omitempty"` // Identifies the source node (the node at which the edge starts). SourceNodeID string `json:"sourceNodeId"` // Identifies the target node (the node at which the edge ends). TargetNodeID string `json:"targetNodeId"` } // EdgeTraversal Represents the traversal of a single edge during a graph traversal. type EdgeTraversal struct { // Identifies the edge being traversed. EdgeID string `json:"edgeId"` // The values of relevant expressions after the edge has been traversed. FinalState map[string]*MultiformatMessageString `json:"finalState,omitempty"` // A message to display to the user as the edge is traversed. Message *Message `json:"message,omitempty"` // Key/value pairs that provide additional information about the edge traversal. Properties *PropertyBag `json:"properties,omitempty"` // The number of edge traversals necessary to return from a nested graph. StepOverEdgeCount int `json:"stepOverEdgeCount,omitempty"` } // Exception Describes a runtime exception encountered during the execution of an analysis tool. type Exception struct { // An array of exception objects each of which is considered a cause of this exception. InnerExceptions []*Exception `json:"innerExceptions,omitempty"` // A string that identifies the kind of exception, for example, the fully qualified type name of an object that was thrown, or the symbolic name of a signal. Kind string `json:"kind,omitempty"` // A message that describes the exception. Message string `json:"message,omitempty"` // Key/value pairs that provide additional information about the exception. Properties *PropertyBag `json:"properties,omitempty"` // The sequence of function calls leading to the exception. Stack *Stack `json:"stack,omitempty"` } // ExternalProperties The top-level element of an external property file. type ExternalProperties struct { // Addresses that will be merged with a separate run. Addresses []*Address `json:"addresses,omitempty"` // An array of artifact objects that will be merged with a separate run. Artifacts []*Artifact `json:"artifacts,omitempty"` // A conversion object that will be merged with a separate run. Conversion *Conversion `json:"conversion,omitempty"` // The analysis tool object that will be merged with a separate run. Driver *ToolComponent `json:"driver,omitempty"` // Tool extensions that will be merged with a separate run. Extensions []*ToolComponent `json:"extensions,omitempty"` // Key/value pairs that provide additional information that will be merged with a separate run. ExternalizedProperties *PropertyBag `json:"externalizedProperties,omitempty"` // An array of graph objects that will be merged with a separate run. Graphs []*Graph `json:"graphs,omitempty"` // A stable, unique identifer for this external properties object, in the form of a GUID. GUID string `json:"guid,omitempty"` // Describes the invocation of the analysis tool that will be merged with a separate run. Invocations []*Invocation `json:"invocations,omitempty"` // An array of logical locations such as namespaces, types or functions that will be merged with a separate run. LogicalLocations []*LogicalLocation `json:"logicalLocations,omitempty"` // Tool policies that will be merged with a separate run. Policies []*ToolComponent `json:"policies,omitempty"` // Key/value pairs that provide additional information about the external properties. Properties *PropertyBag `json:"properties,omitempty"` // An array of result objects that will be merged with a separate run. Results []*Result `json:"results,omitempty"` // A stable, unique identifer for the run associated with this external properties object, in the form of a GUID. RunGUID string `json:"runGuid,omitempty"` // The URI of the JSON schema corresponding to the version of the external property file format. Schema string `json:"schema,omitempty"` // Tool taxonomies that will be merged with a separate run. Taxonomies []*ToolComponent `json:"taxonomies,omitempty"` // An array of threadFlowLocation objects that will be merged with a separate run. ThreadFlowLocations []*ThreadFlowLocation `json:"threadFlowLocations,omitempty"` // Tool translations that will be merged with a separate run. Translations []*ToolComponent `json:"translations,omitempty"` // The SARIF format version of this external properties object. Version interface{} `json:"version,omitempty"` // Requests that will be merged with a separate run. WebRequests []*WebRequest `json:"webRequests,omitempty"` // Responses that will be merged with a separate run. WebResponses []*WebResponse `json:"webResponses,omitempty"` } // ExternalPropertyFileReference Contains information that enables a SARIF consumer to locate the external property file that contains the value of an externalized property associated with the run. type ExternalPropertyFileReference struct { // A stable, unique identifer for the external property file in the form of a GUID. GUID string `json:"guid,omitempty"` // A non-negative integer specifying the number of items contained in the external property file. ItemCount int `json:"itemCount,omitempty"` // The location of the external property file. Location *ArtifactLocation `json:"location,omitempty"` // Key/value pairs that provide additional information about the external property file. Properties *PropertyBag `json:"properties,omitempty"` } // ExternalPropertyFileReferences References to external property files that should be inlined with the content of a root log file. type ExternalPropertyFileReferences struct { // An array of external property files containing run.addresses arrays to be merged with the root log file. Addresses []*ExternalPropertyFileReference `json:"addresses,omitempty"` // An array of external property files containing run.artifacts arrays to be merged with the root log file. Artifacts []*ExternalPropertyFileReference `json:"artifacts,omitempty"` // An external property file containing a run.conversion object to be merged with the root log file. Conversion *ExternalPropertyFileReference `json:"conversion,omitempty"` // An external property file containing a run.driver object to be merged with the root log file. Driver *ExternalPropertyFileReference `json:"driver,omitempty"` // An array of external property files containing run.extensions arrays to be merged with the root log file. Extensions []*ExternalPropertyFileReference `json:"extensions,omitempty"` // An external property file containing a run.properties object to be merged with the root log file. ExternalizedProperties *ExternalPropertyFileReference `json:"externalizedProperties,omitempty"` // An array of external property files containing a run.graphs object to be merged with the root log file. Graphs []*ExternalPropertyFileReference `json:"graphs,omitempty"` // An array of external property files containing run.invocations arrays to be merged with the root log file. Invocations []*ExternalPropertyFileReference `json:"invocations,omitempty"` // An array of external property files containing run.logicalLocations arrays to be merged with the root log file. LogicalLocations []*ExternalPropertyFileReference `json:"logicalLocations,omitempty"` // An array of external property files containing run.policies arrays to be merged with the root log file. Policies []*ExternalPropertyFileReference `json:"policies,omitempty"` // Key/value pairs that provide additional information about the external property files. Properties *PropertyBag `json:"properties,omitempty"` // An array of external property files containing run.results arrays to be merged with the root log file. Results []*ExternalPropertyFileReference `json:"results,omitempty"` // An array of external property files containing run.taxonomies arrays to be merged with the root log file. Taxonomies []*ExternalPropertyFileReference `json:"taxonomies,omitempty"` // An array of external property files containing run.threadFlowLocations arrays to be merged with the root log file. ThreadFlowLocations []*ExternalPropertyFileReference `json:"threadFlowLocations,omitempty"` // An array of external property files containing run.translations arrays to be merged with the root log file. Translations []*ExternalPropertyFileReference `json:"translations,omitempty"` // An array of external property files containing run.requests arrays to be merged with the root log file. WebRequests []*ExternalPropertyFileReference `json:"webRequests,omitempty"` // An array of external property files containing run.responses arrays to be merged with the root log file. WebResponses []*ExternalPropertyFileReference `json:"webResponses,omitempty"` } // Fix A proposed fix for the problem represented by a result object. A fix specifies a set of artifacts to modify. For each artifact, it specifies a set of bytes to remove, and provides a set of new bytes to replace them. type Fix struct { // One or more artifact changes that comprise a fix for a result. ArtifactChanges []*ArtifactChange `json:"artifactChanges"` // A message that describes the proposed fix, enabling viewers to present the proposed change to an end user. Description *Message `json:"description,omitempty"` // Key/value pairs that provide additional information about the fix. Properties *PropertyBag `json:"properties,omitempty"` } // Graph A network of nodes and directed edges that describes some aspect of the structure of the code (for example, a call graph). type Graph struct { // A description of the graph. Description *Message `json:"description,omitempty"` // An array of edge objects representing the edges of the graph. Edges []*Edge `json:"edges,omitempty"` // An array of node objects representing the nodes of the graph. Nodes []*Node `json:"nodes,omitempty"` // Key/value pairs that provide additional information about the graph. Properties *PropertyBag `json:"properties,omitempty"` } // GraphTraversal Represents a path through a graph. type GraphTraversal struct { // A description of this graph traversal. Description *Message `json:"description,omitempty"` // The sequences of edges traversed by this graph traversal. EdgeTraversals []*EdgeTraversal `json:"edgeTraversals,omitempty"` // Values of relevant expressions at the start of the graph traversal that remain constant for the graph traversal. ImmutableState map[string]*MultiformatMessageString `json:"immutableState,omitempty"` // Values of relevant expressions at the start of the graph traversal that may change during graph traversal. InitialState map[string]*MultiformatMessageString `json:"initialState,omitempty"` // Key/value pairs that provide additional information about the graph traversal. Properties *PropertyBag `json:"properties,omitempty"` // The index within the result.graphs to be associated with the result. ResultGraphIndex int `json:"resultGraphIndex,omitempty"` // The index within the run.graphs to be associated with the result. RunGraphIndex int `json:"runGraphIndex,omitempty"` } // Invocation The runtime environment of the analysis tool run. type Invocation struct { // The account under which the invocation occurred. Account string `json:"account,omitempty"` // An array of strings, containing in order the command line arguments passed to the tool from the operating system. Arguments []string `json:"arguments,omitempty"` // The command line used to invoke the tool. CommandLine string `json:"commandLine,omitempty"` // The Coordinated Universal Time (UTC) date and time at which the invocation ended. See "Date/time properties" in the SARIF spec for the required format. EndTimeUtc string `json:"endTimeUtc,omitempty"` // The environment variables associated with the analysis tool process, expressed as key/value pairs. EnvironmentVariables map[string]string `json:"environmentVariables,omitempty"` // An absolute URI specifying the location of the executable that was invoked. ExecutableLocation *ArtifactLocation `json:"executableLocation,omitempty"` // Specifies whether the tool's execution completed successfully. ExecutionSuccessful bool `json:"executionSuccessful"` // The process exit code. ExitCode int `json:"exitCode,omitempty"` // The reason for the process exit. ExitCodeDescription string `json:"exitCodeDescription,omitempty"` // The name of the signal that caused the process to exit. ExitSignalName string `json:"exitSignalName,omitempty"` // The numeric value of the signal that caused the process to exit. ExitSignalNumber int `json:"exitSignalNumber,omitempty"` // The machine on which the invocation occurred. Machine string `json:"machine,omitempty"` // An array of configurationOverride objects that describe notifications related runtime overrides. NotificationConfigurationOverrides []*ConfigurationOverride `json:"notificationConfigurationOverrides,omitempty"` // The id of the process in which the invocation occurred. ProcessId int `json:"processId,omitempty"` // The reason given by the operating system that the process failed to start. ProcessStartFailureMessage string `json:"processStartFailureMessage,omitempty"` // Key/value pairs that provide additional information about the invocation. Properties *PropertyBag `json:"properties,omitempty"` // The locations of any response files specified on the tool's command line. ResponseFiles []*ArtifactLocation `json:"responseFiles,omitempty"` // An array of configurationOverride objects that describe rules related runtime overrides. RuleConfigurationOverrides []*ConfigurationOverride `json:"ruleConfigurationOverrides,omitempty"` // The Coordinated Universal Time (UTC) date and time at which the invocation started. See "Date/time properties" in the SARIF spec for the required format. StartTimeUtc string `json:"startTimeUtc,omitempty"` // A file containing the standard error stream from the process that was invoked. Stderr *ArtifactLocation `json:"stderr,omitempty"` // A file containing the standard input stream to the process that was invoked. Stdin *ArtifactLocation `json:"stdin,omitempty"` // A file containing the standard output stream from the process that was invoked. Stdout *ArtifactLocation `json:"stdout,omitempty"` // A file containing the interleaved standard output and standard error stream from the process that was invoked. StdoutStderr *ArtifactLocation `json:"stdoutStderr,omitempty"` // A list of conditions detected by the tool that are relevant to the tool's configuration. ToolConfigurationNotifications []*Notification `json:"toolConfigurationNotifications,omitempty"` // A list of runtime conditions detected by the tool during the analysis. ToolExecutionNotifications []*Notification `json:"toolExecutionNotifications,omitempty"` // The working directory for the invocation. WorkingDirectory *ArtifactLocation `json:"workingDirectory,omitempty"` } // Location A location within a programming artifact. type Location struct { // A set of regions relevant to the location. Annotations []*Region `json:"annotations,omitempty"` // Value that distinguishes this location from all other locations within a single result object. Id int `json:"id,omitempty"` // The logical locations associated with the result. LogicalLocations []*LogicalLocation `json:"logicalLocations,omitempty"` // A message relevant to the location. Message *Message `json:"message,omitempty"` // Identifies the artifact and region. PhysicalLocation *PhysicalLocation `json:"physicalLocation,omitempty"` // Key/value pairs that provide additional information about the location. Properties *PropertyBag `json:"properties,omitempty"` // An array of objects that describe relationships between this location and others. Relationships []*LocationRelationship `json:"relationships,omitempty"` } // LocationRelationship Information about the relation of one location to another. type LocationRelationship struct { // A description of the location relationship. Description *Message `json:"description,omitempty"` // A set of distinct strings that categorize the relationship. Well-known kinds include 'includes', 'isIncludedBy' and 'relevant'. Kinds []string `json:"kinds,omitempty"` // Key/value pairs that provide additional information about the location relationship. Properties *PropertyBag `json:"properties,omitempty"` // A reference to the related location. Target int `json:"target"` } // LogicalLocation A logical location of a construct that produced a result. type LogicalLocation struct { // The machine-readable name for the logical location, such as a mangled function name provided by a C++ compiler that encodes calling convention, return type and other details along with the function name. DecoratedName string `json:"decoratedName,omitempty"` // The human-readable fully qualified name of the logical location. FullyQualifiedName string `json:"fullyQualifiedName,omitempty"` // The index within the logical locations array. Index int `json:"index,omitempty"` // The type of construct this logical location component refers to. Should be one of 'function', 'member', 'module', 'namespace', 'parameter', 'resource', 'returnType', 'type', 'variable', 'object', 'array', 'property', 'value', 'element', 'text', 'attribute', 'comment', 'declaration', 'dtd' or 'processingInstruction', if any of those accurately describe the construct. Kind string `json:"kind,omitempty"` // Identifies the construct in which the result occurred. For example, this property might contain the name of a class or a method. Name string `json:"name,omitempty"` // Identifies the index of the immediate parent of the construct in which the result was detected. For example, this property might point to a logical location that represents the namespace that holds a type. ParentIndex int `json:"parentIndex,omitempty"` // Key/value pairs that provide additional information about the logical location. Properties *PropertyBag `json:"properties,omitempty"` } // Message Encapsulates a message intended to be read by the end user. type Message struct { // An array of strings to substitute into the message string. Arguments []string `json:"arguments,omitempty"` // The identifier for this message. ID string `json:"id,omitempty"` // A Markdown message string. Markdown string `json:"markdown,omitempty"` // Key/value pairs that provide additional information about the message. Properties *PropertyBag `json:"properties,omitempty"` // A plain text message string. Text string `json:"text,omitempty"` } // MultiformatMessageString A message string or message format string rendered in multiple formats. type MultiformatMessageString struct { // A Markdown message string or format string. Markdown string `json:"markdown,omitempty"` // Key/value pairs that provide additional information about the message. Properties *PropertyBag `json:"properties,omitempty"` // A plain text message string or format string. Text string `json:"text"` } // Node Represents a node in a graph. type Node struct { // Array of child nodes. Children []*Node `json:"children,omitempty"` // A string that uniquely identifies the node within its graph. ID string `json:"id"` // A short description of the node. Label *Message `json:"label,omitempty"` // A code location associated with the node. Location *Location `json:"location,omitempty"` // Key/value pairs that provide additional information about the node. Properties *PropertyBag `json:"properties,omitempty"` } // Notification Describes a condition relevant to the tool itself, as opposed to being relevant to a target being analyzed by the tool. type Notification struct { // A reference used to locate the rule descriptor associated with this notification. AssociatedRule *ReportingDescriptorReference `json:"associatedRule,omitempty"` // A reference used to locate the descriptor relevant to this notification. Descriptor *ReportingDescriptorReference `json:"descriptor,omitempty"` // The runtime exception, if any, relevant to this notification. Exception *Exception `json:"exception,omitempty"` // A value specifying the severity level of the notification. Level interface{} `json:"level,omitempty"` // The locations relevant to this notification. Locations []*Location `json:"locations,omitempty"` // A message that describes the condition that was encountered. Message *Message `json:"message"` // Key/value pairs that provide additional information about the notification. Properties *PropertyBag `json:"properties,omitempty"` // The thread identifier of the code that generated the notification. ThreadID int `json:"threadId,omitempty"` // The Coordinated Universal Time (UTC) date and time at which the analysis tool generated the notification. TimeUtc string `json:"timeUtc,omitempty"` } // PhysicalLocation A physical location relevant to a result. Specifies a reference to a programming artifact together with a range of bytes or characters within that artifact. type PhysicalLocation struct { // The address of the location. Address *Address `json:"address,omitempty"` // The location of the artifact. ArtifactLocation *ArtifactLocation `json:"artifactLocation,omitempty"` // Specifies a portion of the artifact that encloses the region. Allows a viewer to display additional context around the region. ContextRegion *Region `json:"contextRegion,omitempty"` // Key/value pairs that provide additional information about the physical location. Properties *PropertyBag `json:"properties,omitempty"` // Specifies a portion of the artifact. Region *Region `json:"region,omitempty"` } // PropertyBag Key/value pairs that provide additional information about the object. type PropertyBag map[string]interface{} // Rectangle An area within an image. type Rectangle struct { // The Y coordinate of the bottom edge of the rectangle, measured in the image's natural units. Bottom float64 `json:"bottom,omitempty"` // The X coordinate of the left edge of the rectangle, measured in the image's natural units. Left float64 `json:"left,omitempty"` // A message relevant to the rectangle. Message *Message `json:"message,omitempty"` // Key/value pairs that provide additional information about the rectangle. Properties *PropertyBag `json:"properties,omitempty"` // The X coordinate of the right edge of the rectangle, measured in the image's natural units. Right float64 `json:"right,omitempty"` // The Y coordinate of the top edge of the rectangle, measured in the image's natural units. Top float64 `json:"top,omitempty"` } // Region A region within an artifact where a result was detected. type Region struct { // The length of the region in bytes. ByteLength int `json:"byteLength,omitempty"` // The zero-based offset from the beginning of the artifact of the first byte in the region. ByteOffset int `json:"byteOffset,omitempty"` // The length of the region in characters. CharLength int `json:"charLength,omitempty"` // The zero-based offset from the beginning of the artifact of the first character in the region. CharOffset int `json:"charOffset,omitempty"` // The column number of the character following the end of the region. EndColumn int `json:"endColumn,omitempty"` // The line number of the last character in the region. EndLine int `json:"endLine,omitempty"` // A message relevant to the region. Message *Message `json:"message,omitempty"` // Key/value pairs that provide additional information about the region. Properties *PropertyBag `json:"properties,omitempty"` // The portion of the artifact contents within the specified region. Snippet *ArtifactContent `json:"snippet,omitempty"` // Specifies the source language, if any, of the portion of the artifact specified by the region object. SourceLanguage string `json:"sourceLanguage,omitempty"` // The column number of the first character in the region. StartColumn int `json:"startColumn,omitempty"` // The line number of the first character in the region. StartLine int `json:"startLine,omitempty"` } // Replacement The replacement of a single region of an artifact. type Replacement struct { // The region of the artifact to delete. DeletedRegion *Region `json:"deletedRegion"` // The content to insert at the location specified by the 'deletedRegion' property. InsertedContent *ArtifactContent `json:"insertedContent,omitempty"` // Key/value pairs that provide additional information about the replacement. Properties *PropertyBag `json:"properties,omitempty"` } // ReportingConfiguration Information about a rule or notification that can be configured at runtime. type ReportingConfiguration struct { // Specifies whether the report may be produced during the scan. Enabled bool `json:"enabled,omitempty"` // Specifies the failure level for the report. Level interface{} `json:"level,omitempty"` // Contains configuration information specific to a report. Parameters *PropertyBag `json:"parameters,omitempty"` // Key/value pairs that provide additional information about the reporting configuration. Properties *PropertyBag `json:"properties,omitempty"` // Specifies the relative priority of the report. Used for analysis output only. Rank float64 `json:"rank,omitempty"` } // ReportingDescriptor Metadata that describes a specific report produced by the tool, as part of the analysis it provides or its runtime reporting. type ReportingDescriptor struct { // Default reporting configuration information. DefaultConfiguration *ReportingConfiguration `json:"defaultConfiguration,omitempty"` // An array of unique identifies in the form of a GUID by which this report was known in some previous version of the analysis tool. DeprecatedGuids []string `json:"deprecatedGuids,omitempty"` // An array of stable, opaque identifiers by which this report was known in some previous version of the analysis tool. DeprecatedIds []string `json:"deprecatedIds,omitempty"` // An array of readable identifiers by which this report was known in some previous version of the analysis tool. DeprecatedNames []string `json:"deprecatedNames,omitempty"` // A description of the report. Should, as far as possible, provide details sufficient to enable resolution of any problem indicated by the result. FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"` // A unique identifer for the reporting descriptor in the form of a GUID. GUID string `json:"guid,omitempty"` // Provides the primary documentation for the report, useful when there is no online documentation. Help *MultiformatMessageString `json:"help,omitempty"` // A URI where the primary documentation for the report can be found. HelpURI string `json:"helpUri,omitempty"` // A stable, opaque identifier for the report. ID string `json:"id"` // A set of name/value pairs with arbitrary names. Each value is a multiformatMessageString object, which holds message strings in plain text and (optionally) Markdown format. The strings can include placeholders, which can be used to construct a message in combination with an arbitrary number of additional string arguments. MessageStrings map[string]*MultiformatMessageString `json:"messageStrings,omitempty"` // A report identifier that is understandable to an end user. Name string `json:"name,omitempty"` // Key/value pairs that provide additional information about the report. Properties *PropertyBag `json:"properties,omitempty"` // An array of objects that describe relationships between this reporting descriptor and others. Relationships []*ReportingDescriptorRelationship `json:"relationships,omitempty"` // A concise description of the report. Should be a single sentence that is understandable when visible space is limited to a single line of text. ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"` } // ReportingDescriptorReference Information about how to locate a relevant reporting descriptor. type ReportingDescriptorReference struct { // A guid that uniquely identifies the descriptor. GUID string `json:"guid,omitempty"` // The id of the descriptor. ID string `json:"id,omitempty"` // The index into an array of descriptors in toolComponent.ruleDescriptors, toolComponent.notificationDescriptors, or toolComponent.taxonomyDescriptors, depending on context. Index int `json:"index,omitempty"` // Key/value pairs that provide additional information about the reporting descriptor reference. Properties *PropertyBag `json:"properties,omitempty"` // A reference used to locate the toolComponent associated with the descriptor. ToolComponent *ToolComponentReference `json:"toolComponent,omitempty"` } // ReportingDescriptorRelationship Information about the relation of one reporting descriptor to another. type ReportingDescriptorRelationship struct { // A description of the reporting descriptor relationship. Description *Message `json:"description,omitempty"` // A set of distinct strings that categorize the relationship. Well-known kinds include 'canPrecede', 'canFollow', 'willPrecede', 'willFollow', 'superset', 'subset', 'equal', 'disjoint', 'relevant', and 'incomparable'. Kinds []string `json:"kinds,omitempty"` // Key/value pairs that provide additional information about the reporting descriptor reference. Properties *PropertyBag `json:"properties,omitempty"` // A reference to the related reporting descriptor. Target *ReportingDescriptorReference `json:"target"` } // Result A result produced by an analysis tool. type Result struct { // Identifies the artifact that the analysis tool was instructed to scan. This need not be the same as the artifact where the result actually occurred. AnalysisTarget *ArtifactLocation `json:"analysisTarget,omitempty"` // A set of artifacts relevant to the result. Attachments []*Attachment `json:"attachments,omitempty"` // The state of a result relative to a baseline of a previous run. BaselineState interface{} `json:"baselineState,omitempty"` // An array of 'codeFlow' objects relevant to the result. CodeFlows []*CodeFlow `json:"codeFlows,omitempty"` // A stable, unique identifier for the equivalence class of logically identical results to which this result belongs, in the form of a GUID. CorrelationGUID string `json:"correlationGuid,omitempty"` // A set of strings each of which individually defines a stable, unique identity for the result. Fingerprints map[string]string `json:"fingerprints,omitempty"` // An array of 'fix' objects, each of which represents a proposed fix to the problem indicated by the result. Fixes []*Fix `json:"fixes,omitempty"` // An array of one or more unique 'graphTraversal' objects. GraphTraversals []*GraphTraversal `json:"graphTraversals,omitempty"` // An array of zero or more unique graph objects associated with the result. Graphs []*Graph `json:"graphs,omitempty"` // A stable, unique identifer for the result in the form of a GUID. GUID string `json:"guid,omitempty"` // An absolute URI at which the result can be viewed. HostedViewerURI string `json:"hostedViewerUri,omitempty"` // A value that categorizes results by evaluation state. Kind interface{} `json:"kind,omitempty"` // A value specifying the severity level of the result. Level interface{} `json:"level,omitempty"` // The set of locations where the result was detected. Specify only one location unless the problem indicated by the result can only be corrected by making a change at every specified location. Locations []*Location `json:"locations,omitempty"` // A message that describes the result. The first sentence of the message only will be displayed when visible space is limited. Message *Message `json:"message"` // A positive integer specifying the number of times this logically unique result was observed in this run. OccurrenceCount int `json:"occurrenceCount,omitempty"` // A set of strings that contribute to the stable, unique identity of the result. PartialFingerprints map[string]string `json:"partialFingerprints,omitempty"` // Key/value pairs that provide additional information about the result. Properties *PropertyBag `json:"properties,omitempty"` // Information about how and when the result was detected. Provenance *ResultProvenance `json:"provenance,omitempty"` // A number representing the priority or importance of the result. Rank float64 `json:"rank,omitempty"` // A set of locations relevant to this result. RelatedLocations []*Location `json:"relatedLocations,omitempty"` // A reference used to locate the rule descriptor relevant to this result. Rule *ReportingDescriptorReference `json:"rule,omitempty"` // The stable, unique identifier of the rule, if any, to which this result is relevant. RuleID string `json:"ruleId,omitempty"` // The index within the tool component rules array of the rule object associated with this result. RuleIndex int `json:"ruleIndex,omitempty"` // An array of 'stack' objects relevant to the result. Stacks []*Stack `json:"stacks,omitempty"` // A set of suppressions relevant to this result. Suppressions []*Suppression `json:"suppressions,omitempty"` // An array of references to taxonomy reporting descriptors that are applicable to the result. Taxa []*ReportingDescriptorReference `json:"taxa,omitempty"` // A web request associated with this result. WebRequest *WebRequest `json:"webRequest,omitempty"` // A web response associated with this result. WebResponse *WebResponse `json:"webResponse,omitempty"` // The URIs of the work items associated with this result. WorkItemUris []string `json:"workItemUris,omitempty"` } // ResultProvenance Contains information about how and when a result was detected. type ResultProvenance struct { // An array of physicalLocation objects which specify the portions of an analysis tool's output that a converter transformed into the result. ConversionSources []*PhysicalLocation `json:"conversionSources,omitempty"` // A GUID-valued string equal to the automationDetails.guid property of the run in which the result was first detected. FirstDetectionRunGUID string `json:"firstDetectionRunGuid,omitempty"` // The Coordinated Universal Time (UTC) date and time at which the result was first detected. See "Date/time properties" in the SARIF spec for the required format. FirstDetectionTimeUtc string `json:"firstDetectionTimeUtc,omitempty"` // The index within the run.invocations array of the invocation object which describes the tool invocation that detected the result. InvocationIndex int `json:"invocationIndex,omitempty"` // A GUID-valued string equal to the automationDetails.guid property of the run in which the result was most recently detected. LastDetectionRunGUID string `json:"lastDetectionRunGuid,omitempty"` // The Coordinated Universal Time (UTC) date and time at which the result was most recently detected. See "Date/time properties" in the SARIF spec for the required format. LastDetectionTimeUtc string `json:"lastDetectionTimeUtc,omitempty"` // Key/value pairs that provide additional information about the result. Properties *PropertyBag `json:"properties,omitempty"` } // Run Describes a single run of an analysis tool, and contains the reported output of that run. type Run struct { // Addresses associated with this run instance, if any. Addresses []*Address `json:"addresses,omitempty"` // An array of artifact objects relevant to the run. Artifacts []*Artifact `json:"artifacts,omitempty"` // Automation details that describe this run. AutomationDetails *RunAutomationDetails `json:"automationDetails,omitempty"` // The 'guid' property of a previous SARIF 'run' that comprises the baseline that was used to compute result 'baselineState' properties for the run. BaselineGUID string `json:"baselineGuid,omitempty"` // Specifies the unit in which the tool measures columns. ColumnKind interface{} `json:"columnKind,omitempty"` // A conversion object that describes how a converter transformed an analysis tool's native reporting format into the SARIF format. Conversion *Conversion `json:"conversion,omitempty"` // Specifies the default encoding for any artifact object that refers to a text file. DefaultEncoding string `json:"defaultEncoding,omitempty"` // Specifies the default source language for any artifact object that refers to a text file that contains source code. DefaultSourceLanguage string `json:"defaultSourceLanguage,omitempty"` // References to external property files that should be inlined with the content of a root log file. ExternalPropertyFileReferences *ExternalPropertyFileReferences `json:"externalPropertyFileReferences,omitempty"` // An array of zero or more unique graph objects associated with the run. Graphs []*Graph `json:"graphs,omitempty"` // Describes the invocation of the analysis tool. Invocations []*Invocation `json:"invocations,omitempty"` // The language of the messages emitted into the log file during this run (expressed as an ISO 639-1 two-letter lowercase culture code) and an optional region (expressed as an ISO 3166-1 two-letter uppercase subculture code associated with a country or region). The casing is recommended but not required (in order for this data to conform to RFC5646). Language string `json:"language,omitempty"` // An array of logical locations such as namespaces, types or functions. LogicalLocations []*LogicalLocation `json:"logicalLocations,omitempty"` // An ordered list of character sequences that were treated as line breaks when computing region information for the run. NewlineSequences []string `json:"newlineSequences,omitempty"` // The artifact location specified by each uriBaseId symbol on the machine where the tool originally ran. OriginalUriBaseIds map[string]*ArtifactLocation `json:"originalUriBaseIds,omitempty"` // Contains configurations that may potentially override both reportingDescriptor.defaultConfiguration (the tool's default severities) and invocation.configurationOverrides (severities established at run-time from the command line). Policies []*ToolComponent `json:"policies,omitempty"` // Key/value pairs that provide additional information about the run. Properties *PropertyBag `json:"properties,omitempty"` // An array of strings used to replace sensitive information in a redaction-aware property. RedactionTokens []string `json:"redactionTokens,omitempty"` // The set of results contained in an SARIF log. The results array can be omitted when a run is solely exporting rules metadata. It must be present (but may be empty) if a log file represents an actual scan. Results []*Result `json:"results,omitempty"` // Automation details that describe the aggregate of runs to which this run belongs. RunAggregates []*RunAutomationDetails `json:"runAggregates,omitempty"` // A specialLocations object that defines locations of special significance to SARIF consumers. SpecialLocations *SpecialLocations `json:"specialLocations,omitempty"` // An array of toolComponent objects relevant to a taxonomy in which results are categorized. Taxonomies []*ToolComponent `json:"taxonomies,omitempty"` // An array of threadFlowLocation objects cached at run level. ThreadFlowLocations []*ThreadFlowLocation `json:"threadFlowLocations,omitempty"` // Information about the tool or tool pipeline that generated the results in this run. A run can only contain results produced by a single tool or tool pipeline. A run can aggregate results from multiple log files, as long as context around the tool run (tool command-line arguments and the like) is identical for all aggregated files. Tool *Tool `json:"tool"` // The set of available translations of the localized data provided by the tool. Translations []*ToolComponent `json:"translations,omitempty"` // Specifies the revision in version control of the artifacts that were scanned. VersionControlProvenance []*VersionControlDetails `json:"versionControlProvenance,omitempty"` // An array of request objects cached at run level. WebRequests []*WebRequest `json:"webRequests,omitempty"` // An array of response objects cached at run level. WebResponses []*WebResponse `json:"webResponses,omitempty"` } // RunAutomationDetails Information that describes a run's identity and role within an engineering system process. type RunAutomationDetails struct { // A stable, unique identifier for the equivalence class of runs to which this object's containing run object belongs in the form of a GUID. CorrelationGUID string `json:"correlationGuid,omitempty"` // A description of the identity and role played within the engineering system by this object's containing run object. Description *Message `json:"description,omitempty"` // A stable, unique identifer for this object's containing run object in the form of a GUID. GUID string `json:"guid,omitempty"` // A hierarchical string that uniquely identifies this object's containing run object. ID string `json:"id,omitempty"` // Key/value pairs that provide additional information about the run automation details. Properties *PropertyBag `json:"properties,omitempty"` } // SpecialLocations Defines locations of special significance to SARIF consumers. type SpecialLocations struct { // Provides a suggestion to SARIF consumers to display file paths relative to the specified location. DisplayBase *ArtifactLocation `json:"displayBase,omitempty"` // Key/value pairs that provide additional information about the special locations. Properties *PropertyBag `json:"properties,omitempty"` } // Stack A call stack that is relevant to a result. type Stack struct { // An array of stack frames that represents a sequence of calls, rendered in reverse chronological order, that comprise the call stack. Frames []*StackFrame `json:"frames"` // A message relevant to this call stack. Message *Message `json:"message,omitempty"` // Key/value pairs that provide additional information about the stack. Properties *PropertyBag `json:"properties,omitempty"` } // StackFrame A function call within a stack trace. type StackFrame struct { // The location to which this stack frame refers. Location *Location `json:"location,omitempty"` // The name of the module that contains the code of this stack frame. Module string `json:"module,omitempty"` // The parameters of the call that is executing. Parameters []string `json:"parameters,omitempty"` // Key/value pairs that provide additional information about the stack frame. Properties *PropertyBag `json:"properties,omitempty"` // The thread identifier of the stack frame. ThreadID int `json:"threadId,omitempty"` } // Report Static Analysis Results Format (SARIF) Version 2.1.0 JSON Schema: a standard format for the output of static analysis tools. type Report struct { // References to external property files that share data between runs. InlineExternalProperties []*ExternalProperties `json:"inlineExternalProperties,omitempty"` // Key/value pairs that provide additional information about the log file. Properties *PropertyBag `json:"properties,omitempty"` // The set of runs contained in this log file. Runs []*Run `json:"runs"` // The URI of the JSON schema corresponding to the version. Schema string `json:"$schema,omitempty"` // The SARIF format version of this log file. Version interface{} `json:"version"` } // Suppression A suppression that is relevant to a result. type Suppression struct { // A stable, unique identifer for the suprression in the form of a GUID. GUID string `json:"guid,omitempty"` // A string representing the justification for the suppression. Justification string `json:"justification,omitempty"` // A string that indicates where the suppression is persisted. Kind interface{} `json:"kind"` // Identifies the location associated with the suppression. Location *Location `json:"location,omitempty"` // Key/value pairs that provide additional information about the suppression. Properties *PropertyBag `json:"properties,omitempty"` // A string that indicates the review status of the suppression. Status interface{} `json:"status,omitempty"` } // ThreadFlow Describes a sequence of code locations that specify a path through a single thread of execution such as an operating system or fiber. type ThreadFlow struct { // An string that uniquely identifies the threadFlow within the codeFlow in which it occurs. ID string `json:"id,omitempty"` // Values of relevant expressions at the start of the thread flow that remain constant. ImmutableState map[string]*MultiformatMessageString `json:"immutableState,omitempty"` // Values of relevant expressions at the start of the thread flow that may change during thread flow execution. InitialState map[string]*MultiformatMessageString `json:"initialState,omitempty"` // A temporally ordered array of 'threadFlowLocation' objects, each of which describes a location visited by the tool while producing the result. Locations []*ThreadFlowLocation `json:"locations"` // A message relevant to the thread flow. Message *Message `json:"message,omitempty"` // Key/value pairs that provide additional information about the thread flow. Properties *PropertyBag `json:"properties,omitempty"` } // ThreadFlowLocation A location visited by an analysis tool while simulating or monitoring the execution of a program. type ThreadFlowLocation struct { // An integer representing the temporal order in which execution reached this location. ExecutionOrder int `json:"executionOrder,omitempty"` // The Coordinated Universal Time (UTC) date and time at which this location was executed. ExecutionTimeUtc string `json:"executionTimeUtc,omitempty"` // Specifies the importance of this location in understanding the code flow in which it occurs. The order from most to least important is "essential", "important", "unimportant". Default: "important". Importance interface{} `json:"importance,omitempty"` // The index within the run threadFlowLocations array. Index int `json:"index,omitempty"` // A set of distinct strings that categorize the thread flow location. Well-known kinds include 'acquire', 'release', 'enter', 'exit', 'call', 'return', 'branch', 'implicit', 'false', 'true', 'caution', 'danger', 'unknown', 'unreachable', 'taint', 'function', 'handler', 'lock', 'memory', 'resource', 'scope' and 'value'. Kinds []string `json:"kinds,omitempty"` // The code location. Location *Location `json:"location,omitempty"` // The name of the module that contains the code that is executing. Module string `json:"module,omitempty"` // An integer representing a containment hierarchy within the thread flow. NestingLevel int `json:"nestingLevel,omitempty"` // Key/value pairs that provide additional information about the threadflow location. Properties *PropertyBag `json:"properties,omitempty"` // The call stack leading to this location. Stack *Stack `json:"stack,omitempty"` // A dictionary, each of whose keys specifies a variable or expression, the associated value of which represents the variable or expression value. For an annotation of kind 'continuation', for example, this dictionary might hold the current assumed values of a set of global variables. State map[string]*MultiformatMessageString `json:"state,omitempty"` // An array of references to rule or taxonomy reporting descriptors that are applicable to the thread flow location. Taxa []*ReportingDescriptorReference `json:"taxa,omitempty"` // A web request associated with this thread flow location. WebRequest *WebRequest `json:"webRequest,omitempty"` // A web response associated with this thread flow location. WebResponse *WebResponse `json:"webResponse,omitempty"` } // Tool The analysis tool that was run. type Tool struct { // The analysis tool that was run. Driver *ToolComponent `json:"driver"` // Tool extensions that contributed to or reconfigured the analysis tool that was run. Extensions []*ToolComponent `json:"extensions,omitempty"` // Key/value pairs that provide additional information about the tool. Properties *PropertyBag `json:"properties,omitempty"` } // ToolComponent A component, such as a plug-in or the driver, of the analysis tool that was run. type ToolComponent struct { // The component which is strongly associated with this component. For a translation, this refers to the component which has been translated. For an extension, this is the driver that provides the extension's plugin model. AssociatedComponent *ToolComponentReference `json:"associatedComponent,omitempty"` // The kinds of data contained in this object. Contents []interface{} `json:"contents,omitempty"` // The binary version of the tool component's primary executable file expressed as four non-negative integers separated by a period (for operating systems that express file versions in this way). DottedQuadFileVersion string `json:"dottedQuadFileVersion,omitempty"` // The absolute URI from which the tool component can be downloaded. DownloadURI string `json:"downloadUri,omitempty"` // A comprehensive description of the tool component. FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"` // The name of the tool component along with its version and any other useful identifying information, such as its locale. FullName string `json:"fullName,omitempty"` // A dictionary, each of whose keys is a resource identifier and each of whose values is a multiformatMessageString object, which holds message strings in plain text and (optionally) Markdown format. The strings can include placeholders, which can be used to construct a message in combination with an arbitrary number of additional string arguments. GlobalMessageStrings map[string]*MultiformatMessageString `json:"globalMessageStrings,omitempty"` // A unique identifer for the tool component in the form of a GUID. GUID string `json:"guid,omitempty"` // The absolute URI at which information about this version of the tool component can be found. InformationURI string `json:"informationUri,omitempty"` // Specifies whether this object contains a complete definition of the localizable and/or non-localizable data for this component, as opposed to including only data that is relevant to the results persisted to this log file. IsComprehensive bool `json:"isComprehensive,omitempty"` // The language of the messages emitted into the log file during this run (expressed as an ISO 639-1 two-letter lowercase language code) and an optional region (expressed as an ISO 3166-1 two-letter uppercase subculture code associated with a country or region). The casing is recommended but not required (in order for this data to conform to RFC5646). Language string `json:"language,omitempty"` // The semantic version of the localized strings defined in this component; maintained by components that provide translations. LocalizedDataSemanticVersion string `json:"localizedDataSemanticVersion,omitempty"` // An array of the artifactLocation objects associated with the tool component. Locations []*ArtifactLocation `json:"locations,omitempty"` // The minimum value of localizedDataSemanticVersion required in translations consumed by this component; used by components that consume translations. MinimumRequiredLocalizedDataSemanticVersion string `json:"minimumRequiredLocalizedDataSemanticVersion,omitempty"` // The name of the tool component. Name string `json:"name"` // An array of reportingDescriptor objects relevant to the notifications related to the configuration and runtime execution of the tool component. Notifications []*ReportingDescriptor `json:"notifications,omitempty"` // The organization or company that produced the tool component. Organization string `json:"organization,omitempty"` // A product suite to which the tool component belongs. Product string `json:"product,omitempty"` // A localizable string containing the name of the suite of products to which the tool component belongs. ProductSuite string `json:"productSuite,omitempty"` // Key/value pairs that provide additional information about the tool component. Properties *PropertyBag `json:"properties,omitempty"` // A string specifying the UTC date (and optionally, the time) of the component's release. ReleaseDateUtc string `json:"releaseDateUtc,omitempty"` // An array of reportingDescriptor objects relevant to the analysis performed by the tool component. Rules []*ReportingDescriptor `json:"rules,omitempty"` // The tool component version in the format specified by Semantic Versioning 2.0. SemanticVersion string `json:"semanticVersion,omitempty"` // A brief description of the tool component. ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"` // An array of toolComponentReference objects to declare the taxonomies supported by the tool component. SupportedTaxonomies []*ToolComponentReference `json:"supportedTaxonomies,omitempty"` // An array of reportingDescriptor objects relevant to the definitions of both standalone and tool-defined taxonomies. Taxa []*ReportingDescriptor `json:"taxa,omitempty"` // Translation metadata, required for a translation, not populated by other component types. TranslationMetadata *TranslationMetadata `json:"translationMetadata,omitempty"` // The tool component version, in whatever format the component natively provides. Version string `json:"version,omitempty"` } // ToolComponentReference Identifies a particular toolComponent object, either the driver or an extension. type ToolComponentReference struct { // The 'guid' property of the referenced toolComponent. GUID string `json:"guid,omitempty"` // An index into the referenced toolComponent in tool.extensions. Index int `json:"index,omitempty"` // The 'name' property of the referenced toolComponent. Name string `json:"name,omitempty"` // Key/value pairs that provide additional information about the toolComponentReference. Properties *PropertyBag `json:"properties,omitempty"` } // TranslationMetadata Provides additional metadata related to translation. type TranslationMetadata struct { // The absolute URI from which the translation metadata can be downloaded. DownloadURI string `json:"downloadUri,omitempty"` // A comprehensive description of the translation metadata. FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"` // The full name associated with the translation metadata. FullName string `json:"fullName,omitempty"` // The absolute URI from which information related to the translation metadata can be downloaded. InformationURI string `json:"informationUri,omitempty"` // The name associated with the translation metadata. Name string `json:"name"` // Key/value pairs that provide additional information about the translation metadata. Properties *PropertyBag `json:"properties,omitempty"` // A brief description of the translation metadata. ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"` } // VersionControlDetails Specifies the information necessary to retrieve a desired revision from a version control system. type VersionControlDetails struct { // A Coordinated Universal Time (UTC) date and time that can be used to synchronize an enlistment to the state of the repository at that time. AsOfTimeUtc string `json:"asOfTimeUtc,omitempty"` // The name of a branch containing the revision. Branch string `json:"branch,omitempty"` // The location in the local file system to which the root of the repository was mapped at the time of the analysis. MappedTo *ArtifactLocation `json:"mappedTo,omitempty"` // Key/value pairs that provide additional information about the version control details. Properties *PropertyBag `json:"properties,omitempty"` // The absolute URI of the repository. RepositoryURI string `json:"repositoryUri"` // A string that uniquely and permanently identifies the revision within the repository. RevisionID string `json:"revisionId,omitempty"` // A tag that has been applied to the revision. RevisionTag string `json:"revisionTag,omitempty"` } // WebRequest Describes an HTTP request. type WebRequest struct { // The body of the request. Body *ArtifactContent `json:"body,omitempty"` // The request headers. Headers map[string]string `json:"headers,omitempty"` // The index within the run.webRequests array of the request object associated with this result. Index int `json:"index,omitempty"` // The HTTP method. Well-known values are 'GET', 'PUT', 'POST', 'DELETE', 'PATCH', 'HEAD', 'OPTIONS', 'TRACE', 'CONNECT'. Method string `json:"method,omitempty"` // The request parameters. Parameters map[string]string `json:"parameters,omitempty"` // Key/value pairs that provide additional information about the request. Properties *PropertyBag `json:"properties,omitempty"` // The request protocol. Example: 'http'. Protocol string `json:"protocol,omitempty"` // The target of the request. Target string `json:"target,omitempty"` // The request version. Example: '1.1'. Version string `json:"version,omitempty"` } // WebResponse Describes the response to an HTTP request. type WebResponse struct { // The body of the response. Body *ArtifactContent `json:"body,omitempty"` // The response headers. Headers map[string]string `json:"headers,omitempty"` // The index within the run.webResponses array of the response object associated with this result. Index int `json:"index,omitempty"` // Specifies whether a response was received from the server. NoResponseReceived bool `json:"noResponseReceived,omitempty"` // Key/value pairs that provide additional information about the response. Properties *PropertyBag `json:"properties,omitempty"` // The response protocol. Example: 'http'. Protocol string `json:"protocol,omitempty"` // The response reason. Example: 'Not found'. ReasonPhrase string `json:"reasonPhrase,omitempty"` // The response status code. Example: 451. StatusCode int `json:"statusCode,omitempty"` // The response version. Example: '1.1'. Version string `json:"version,omitempty"` }
vendor/github.com/securego/gosec/v2/report/sarif/types.go
0.875415
0.532182
types.go
starcoder
package tetra3d import ( "github.com/hajimehoshi/ebiten/v2" "github.com/kvartborg/vector" ) const ( TriangleSortModeBackToFront = iota // TriangleSortBackToFront sorts the triangles from back to front (naturally). This is the default. TriangleSortModeFrontToBack // TriangleSortFrontToBack sorts the triangles in reverse order. TriangleSortModeNone // TriangleSortNone doesn't sort the triangles at all; this is the fastest triangle sorting mode, while also being the most graphically inaccurate. Usable if triangles don't visually intersect. ) const ( // TransparencyModeAuto means it will be opaque if the object or material's alpha >= 1, and transparent otherwise. TransparencyModeAuto = iota // TransparencyModeOpaque means the triangles are rendered to the color and depth buffer as normal. TransparencyModeOpaque // TransparencyModeAlphaClip means the triangles are rendered to the color and depth buffer, using the alpha of the triangles' texture to "cut out" the triangles. TransparencyModeAlphaClip // TransparencyModeTransparent means the triangles are not rendered to the depth buffer, but are rendered in a second pass after opaque and alpha-clip triangles. They are automatically sorted from back-to-front. TransparencyModeTransparent ) type Material struct { library *Library // library is a reference to the Library that this Material came from. Name string // Name is the name of the Material. Color *Color // The overall color of the Material. Texture *ebiten.Image // The texture applied to the Material. TexturePath string // The path to the texture, if it was not packed into the exporter. TextureFilterMode ebiten.Filter // Texture filtering mode TextureWrapMode ebiten.Address // Texture wrapping mode Tags *Tags // Tags is a Tags object, allowing you to specify auxiliary data on the Material. This is loaded from GLTF files if / Blender's Custom Properties if the setting is enabled on the export menu. BackfaceCulling bool // If backface culling is enabled (which it is by default), faces turned away from the camera aren't rendered. TriangleSortMode int // TriangleSortMode influences how triangles with this Material are sorted. Shadeless bool // If the material should be shadeless (unlit) or not CompositeMode ebiten.CompositeMode // Blend mode to use when rendering the material (i.e. additive, multiplicative, etc) // VertexProgram is a function that runs on the world position of each vertex position rendered with the material. // One can use this to simply transform the vertices of the mesh (note that this is, of course, not as performant as // a traditional vertex shader, but is fine for simple / low-poly mesh transformations). This function is run after // skinning the vertex (if the material belongs to a mesh that is skinned by an armature). // If the VertexProgram returns nil, the triangle that the vertex belongs to will not be rendered. VertexProgram func(vector.Vector) vector.Vector // ClipProgram is a function that runs on the clipped result of each vertex position rendered with the material. ClipProgram func(vector.Vector) vector.Vector // fragmentShader represents a shader used to render the material with. This shader is activated after rendering // to the depth texture, but before compositing the finished render to the screen after fog. fragmentShader *ebiten.Shader // FragmentShaderOn is an easy boolean toggle to control whether the shader is activated or not (it defaults to on). FragmentShaderOn bool // FragmentShaderOptions allows you to customize the custom fragment shader with uniforms or images. It does NOT take the // CompositeMode property from the Material's CompositeMode. By default, it's an empty DrawTrianglesShaderOptions struct. FragmentShaderOptions *ebiten.DrawTrianglesShaderOptions fragmentSrc []byte // If a material is tagged as transparent, it's rendered in a separate render pass. // Objects with transparent materials don't render to the depth texture and are sorted and rendered back-to-front, AFTER // all non-transparent materials. TransparencyMode int } // NewMaterial creates a new Material with the name given. func NewMaterial(name string) *Material { return &Material{ Name: name, Color: NewColor(1, 1, 1, 1), Tags: NewTags(), TextureFilterMode: ebiten.FilterNearest, TextureWrapMode: ebiten.AddressRepeat, BackfaceCulling: true, TriangleSortMode: TriangleSortModeBackToFront, TransparencyMode: TransparencyModeAuto, FragmentShaderOptions: &ebiten.DrawTrianglesShaderOptions{}, FragmentShaderOn: true, CompositeMode: ebiten.CompositeModeSourceOver, } } // Clone creates a clone of the specified Material. Note that Clone() cannot clone the Material's fragment shader or shader options. func (material *Material) Clone() *Material { newMat := NewMaterial(material.Name) newMat.library = material.library newMat.Color = material.Color.Clone() newMat.Texture = material.Texture newMat.Tags = material.Tags.Clone() newMat.BackfaceCulling = material.BackfaceCulling newMat.TriangleSortMode = material.TriangleSortMode newMat.Shadeless = material.Shadeless newMat.TransparencyMode = material.TransparencyMode newMat.TextureFilterMode = material.TextureFilterMode newMat.TextureWrapMode = material.TextureWrapMode newMat.CompositeMode = material.CompositeMode newMat.VertexProgram = material.VertexProgram newMat.ClipProgram = material.ClipProgram newMat.SetShader(material.fragmentSrc) newMat.FragmentShaderOn = material.FragmentShaderOn newMat.FragmentShaderOptions.CompositeMode = material.FragmentShaderOptions.CompositeMode newMat.FragmentShaderOptions.FillRule = material.FragmentShaderOptions.FillRule for i := range material.FragmentShaderOptions.Images { newMat.FragmentShaderOptions.Images[i] = material.FragmentShaderOptions.Images[i] } for k, v := range newMat.FragmentShaderOptions.Uniforms { newMat.FragmentShaderOptions.Uniforms[k] = v } return newMat } // SetShader creates a new custom Kage fragment shader for the Material if provided the shader's source code, provided as a []byte. // This custom shader would be used to render the mesh utilizing the material after rendering to the depth texture, but before // compositing the finished render to the screen after fog. If the shader is nil, the Material will render using the default Tetra3D // render setup (e.g. texture, UV values, vertex colors, and vertex lighting). // SetShader will return the Shader, and an error if the Shader failed to compile. func (material *Material) SetShader(src []byte) (*ebiten.Shader, error) { if src == nil { material.fragmentShader = nil material.fragmentSrc = nil return nil, nil } newShader, err := ebiten.NewShader(src) if err != nil { return nil, err } material.fragmentShader = newShader material.fragmentSrc = src return material.fragmentShader, nil } // Shader returns the custom Kage fragment shader for the Material. func (material *Material) Shader() *ebiten.Shader { return material.fragmentShader } // DisposeShader disposes the custom fragment Shader for the Material (assuming it has one). If it does not have a Shader, nothing happens. func (material *Material) DisposeShader() { if material.fragmentShader != nil { material.fragmentShader.Dispose() } material.fragmentSrc = nil material.fragmentShader = nil } // Library returns the Library from which this Material was loaded. If it was created through code, this function will return nil. func (material *Material) Library() *Library { return material.library }
material.go
0.757705
0.501648
material.go
starcoder
package tools import ( "bytes" "fmt" "sort" "strings" "github.com/miekg/dns" ) // RRArray represents an array of rrs // It implements Swapper interface, and is sortable. type RRArray []dns.RR // RRSetList is an array of RRArrays. type RRSetList []RRArray type NSEC3List struct { hashed map[string]string rrs map[string]*dns.NSEC3 } // Len returns the length of an RRArray. func (array RRArray) Len() int { return len(array) } // Swap swaps elements on positions i and j from RRArray func (array RRArray) Swap(i, j int) { array[i], array[j] = array[j], array[i] } // Less returns true if the element in the position i of RRArray is less than the element in position j of RRArray. func (array RRArray) Less(i, j int) bool { // RR Canonical order: // 1.- Canonical Owner Name (RFC 3034 6.1) // 2.- RR Class // 3.- Type // 4.- RRData (as left-aligned canonical form) si := dns.SplitDomainName(array[i].Header().Name) sj := dns.SplitDomainName(array[j].Header().Name) // Comparing tags, right to left ii, ij := len(si)-1, len(sj)-1 for ii >= 0 && ij >= 0 { if si[ii] != sj[ij] { return si[ii] < sj[ij] } ii-- ij-- } // Now one is a subdomain (or the same domain) of the other if ii != ij { return ii < ij } // Equal subdomain if array[i].Header().Class != array[j].Header().Class { return array[i].Header().Class < array[j].Header().Class } else if array[i].Header().Rrtype != array[j].Header().Rrtype { return array[i].Header().Rrtype < array[j].Header().Rrtype } else { return compareRRData(array[i], array[j]) } } // getTypeMap returns an map with the types contained in the array. func (array RRArray) getTypeMap() map[uint16]bool { typeMap := make(map[uint16]bool) for _, rr := range array { typeMap[rr.Header().Rrtype] = true } return typeMap } func newNSEC3List() *NSEC3List { return &NSEC3List{ hashed: make(map[string]string), rrs: make(map[string]*dns.NSEC3), } } func (nsec3Map NSEC3List) toSortedArray() RRArray { arr := make(RRArray, 0) for _, rr := range nsec3Map.rrs { arr = append(arr, rr) } quickSort(arr) return arr } func (nsec3Map NSEC3List) add(ownerName string, param *dns.NSEC3PARAM, typeMap map[uint16]bool) error { hName := dns.HashName(ownerName, param.Hash, param.Iterations, param.Salt) if hName == "" { return fmt.Errorf("empty NSEC3") } if name, hashedBefore := nsec3Map.hashed[hName]; hashedBefore && ownerName != name { return fmt.Errorf("hash collision") } if nsec3, ok := nsec3Map.rrs[hName]; !ok { // It does not exist in the map. nsec3 := &dns.NSEC3{ Hdr: dns.RR_Header{ Name: hName, Rrtype: dns.TypeNSEC3, Class: param.Hdr.Class, Ttl: param.Hdr.Ttl, }, Hash: param.Hash, Flags: param.Flags, Iterations: param.Iterations, SaltLength: param.SaltLength, Salt: param.Salt, HashLength: 20, TypeBitMap: newTypeArray(typeMap), } nsec3Map.rrs[hName] = nsec3 } else { // It exists in the map. We need to update it subTypeMap := make(map[uint16]bool) for k, v := range typeMap { subTypeMap[k] = v } for _, t := range nsec3.TypeBitMap { subTypeMap[t] = true } nsec3.TypeBitMap = newTypeArray(subTypeMap) } return nil } func compareRRData(rri, rrj dns.RR) bool { bytei := make([]byte, dns.MaxMsgSize) sizei, err := dns.PackRR(rri, bytei, 0, nil, false) if err != nil { return false } rrdatai := bytei[uint16(sizei)-rri.Header().Rdlength : sizei] // We remove the header from the representation bytej := make([]byte, dns.MaxMsgSize) sizej, err := dns.PackRR(rrj, bytej, 0, nil, false) if err != nil { return false } rrdataj := bytej[uint16(sizej)-rrj.Header().Rdlength : sizej] // We remove the header from the representation return bytes.Compare(rrdatai, rrdataj) < 0 } // Len returns the length of an RRSetList. func (setList RRSetList) Len() int { return len(setList) } // Swap swaps elements on positions i and j from RRSetList func (setList RRSetList) Swap(i, j int) { setList[i], setList[j] = setList[j], setList[i] } // Less returns true if the element in the position i of RRSetList is less than the element in position j of RRSetList. func (setList RRSetList) Less(i, j int) bool { iRRArray := setList[i] jRRArray := setList[j] if len(iRRArray) == 0 { return len(jRRArray) != 0 } // Create and array to reuse Less method from rrArrays cmpArray := append(make(RRArray, 0), iRRArray[0], jRRArray[0]) return cmpArray.Less(0, 1) } // WriteZone prints on writer all the rrs on the array. // The format of the text printed is the format of a DNS zone. func (ctx *Context) WriteZone() error { if ctx.Output == nil { return fmt.Errorf("output not defined in context") } ctx.Log.Printf("Writing zone") if _, err := fmt.Fprintln(ctx.Output, ctx.soa); err != nil { return err } for _, rr := range ctx.rrs { if rr.Header().Rrtype == dns.TypeSOA { continue // Skipping SOA because is the first one } if _, err := fmt.Fprintln(ctx.Output, rr); err != nil { return err } } ctx.Log.Printf("Zone written") return nil } // getRRSetList groups the rrs by owner name and class if byType is false, or owner name, class and type if byType is true // NSEC/NSEC3 uses the version with byType = false, and RRSIG uses the other version. func (ctx *Context) getRRSetList(byType bool) (set RRSetList) { // RRsets are RR grouped by rsaLabel and class for NSEC/NSEC3 // and by rsaLabel, class, type for RRSIG: // An RRSIG record contains the signature for an RRset with a particular // name, class, and type. RFC4034 setMap := make(map[string]RRArray) for _, rr := range ctx.rrs { hash := getHash(rr, byType) hashArr, ok := setMap[hash] if !ok { hashArr = make(RRArray, 0) setMap[hash] = hashArr } setMap[hash] = append(hashArr, rr) } set = make(RRSetList, 0) for _, rrSet := range setMap { set = append(set, rrSet) } return set } // addNSECRecords edits an RRArray and adds the respective NSEC records to it. // Finally, it sorts the records func (ctx *Context) addNSECRecords() { set := ctx.getRRSetList(false) n := len(set) for i, rrs := range set { typeMap := make(map[uint16]struct{}) typeArray := make([]uint16, 0) for _, rr := range rrs { typeMap[rr.Header().Rrtype] = struct{}{} } typeMap[dns.TypeNSEC] = struct{}{} rrSetName := rrs[0].Header().Name if rrSetName == ctx.Config.Zone { typeMap[dns.TypeDNSKEY] = struct{}{} } for k := range typeMap { typeArray = append(typeArray, k) } sort.Slice(typeArray, func(i, j int) bool { return typeArray[i] < typeArray[j] }) nsec := &dns.NSEC{} nsec.Hdr.Name = rrSetName nsec.Hdr.Rrtype = dns.TypeNSEC nsec.Hdr.Class = dns.ClassINET nsec.Hdr.Ttl = rrs[0].Header().Ttl nsec.NextDomain = set[(i+1)%n][0].Header().Name nsec.TypeBitMap = typeArray ctx.rrs = append(ctx.rrs, nsec) } } // addNSEC3Records edits an RRArray and adds the respective NSEC3 records to it. // If optOut is true, it sets the flag for NSEC3PARAM RR, following RFC5155 section 6. // It returns an error if there is a colission on the hashes. func (ctx *Context) addNSEC3Records() (err error) { setList := ctx.getRRSetList(false) var salt string if ctx.Config.NSEC3SaltValue == "" { salt, err = generateSalt(ctx.Config.NSEC3SaltLength) if err != nil { return err } } else { salt = ctx.Config.NSEC3SaltValue } param := &dns.NSEC3PARAM{ Hdr: dns.RR_Header{ Name: ctx.soa.Hdr.Name, Rrtype: dns.TypeNSEC3PARAM, Class: dns.ClassINET, Ttl: ctx.soa.Minttl, }, Hash: dns.SHA1, Iterations: ctx.Config.NSEC3Iterations, Salt: salt, SaltLength: uint8(len(salt) / 2), } if ctx.Config.OptOut { param.Flags = 1 } nsec3list := newNSEC3List() for _, rrSet := range setList { typeMap := rrSet.getTypeMap() if typeMap[dns.TypeSOA] { typeMap[dns.TypeNSEC3PARAM] = true } rrSetName := rrSet[0].Header().Name if rrSetName == ctx.Config.Zone { typeMap[dns.TypeDNSKEY] = true } if !(ctx.isSignable(rrSetName)) { continue } // Add current NSEC3 RR err := nsec3list.add(rrSet[0].Header().Name, param, typeMap) if err != nil { return err } // Add NSEC3 RRS for each sublabel labels := dns.SplitDomainName(strings.TrimSuffix(rrSet[0].Header().Name, ctx.Config.Zone)) for i := range labels { label := strings.Join(labels[i:], ".") + "." + ctx.Config.Zone if len(label) == 0 { break } err := nsec3list.add(label, param, typeMap) // we don't know if it is signable if err != nil { return err } } } // transform nsec3list to Sorted RRArray (to link to next hashes) sortedList := nsec3list.toSortedArray() // Link NSEC3s with their next domains. for i, nsec3 := range sortedList { nsec3.(*dns.NSEC3).NextDomain = sortedList[(i+1)%len(sortedList)].Header().Name } // Add zone name to each NSEC3 name. for i := 0; i < len(sortedList); i++ { sortedList[i].Header().Name += "." if ctx.Config.Zone != "." { sortedList[i].Header().Name += ctx.Config.Zone } ctx.rrs = append(ctx.rrs, sortedList[i]) } ctx.rrs = append(ctx.rrs, param) return nil } // sameRRSet returns true if both rrs provided should be on the same RRSetList. func sameRRSet(rr1, rr2 dns.RR, byType bool) bool { if rr1 == nil || rr2 == nil { return false } return rr1.Header().Class == rr2.Header().Class && strings.EqualFold(dns.Fqdn(rr1.Header().Name), dns.Fqdn(rr2.Header().Name)) && (!byType || rr1.Header().Rrtype == rr2.Header().Rrtype) } // String returns a string representation of the RRArray, based on the name, class and Rrtype of the first element. func (array RRArray) String() string { if len(array) == 0 { return "<empty_setlist>" } return fmt.Sprintf("%s#%s#%s", array[0].Header().Name, dns.ClassToString[array[0].Header().Class], dns.TypeToString[array[0].Header().Rrtype]) }
tools/rr_set.go
0.692954
0.456228
rr_set.go
starcoder
This is the Fission Router package. Its job is to: 1. Keep track of HTTP triggers and their mappings to functions Use the controller API to get and watch this state. 2. Given a function, get a reference to a routable function run service Use the ContainerPoolManager API to get a service backed by one or more function run containers. The container(s) backing the service may be newly created, or they might be reused. The only requirement is that one or more containers backs the service. 3. Forward the request to the service, and send the response back. Plain ol HTTP. */ package router import ( "context" "fmt" "log" "net/http" "time" "github.com/gorilla/mux" "github.com/fission/fission" "github.com/fission/fission/crd" executorClient "github.com/fission/fission/executor/client" ) // request url ---[mux]---> Function(name,uid) ----[fmap]----> k8s service url // request url ---[trigger]---> Function(name, deployment) ----[deployment]----> Function(name, uid) ----[pool mgr]---> k8s service url func router(ctx context.Context, httpTriggerSet *HTTPTriggerSet, resolver *functionReferenceResolver) *mutableRouter { muxRouter := mux.NewRouter() mr := NewMutableRouter(muxRouter) muxRouter.Use(fission.LoggingMiddleware) httpTriggerSet.subscribeRouter(ctx, mr, resolver) return mr } func serve(ctx context.Context, port int, httpTriggerSet *HTTPTriggerSet, resolver *functionReferenceResolver) { mr := router(ctx, httpTriggerSet, resolver) url := fmt.Sprintf(":%v", port) http.ListenAndServe(url, mr) } func Start(port int, executorUrl string) { // setup a signal handler for SIGTERM fission.SetupStackTraceHandler() fmap := makeFunctionServiceMap(time.Minute) fissionClient, _, _, err := crd.MakeFissionClient() if err != nil { log.Fatalf("Error connecting to kubernetes API: %v", err) } restClient := fissionClient.GetCrdClient() executor := executorClient.MakeClient(executorUrl) triggers, _, fnStore := makeHTTPTriggerSet(fmap, fissionClient, executor, restClient) resolver := makeFunctionReferenceResolver(fnStore) log.Printf("Starting router at port %v\n", port) ctx, cancel := context.WithCancel(context.Background()) defer cancel() serve(ctx, port, triggers, resolver) }
router/router.go
0.731442
0.405684
router.go
starcoder
package model import ( "encoding/binary" "encoding/hex" "fmt" "io" "sort" "strconv" ) // These constants are kept mostly for backwards compatibility. const ( // StringType indicates the value is a unicode string StringType = ValueType_STRING // BoolType indicates the value is a Boolean encoded as int64 number 0 or 1 BoolType = ValueType_BOOL // Int64Type indicates the value is an int64 number Int64Type = ValueType_INT64 // Float64Type indicates the value is a float64 number stored as int64 Float64Type = ValueType_FLOAT64 // BinaryType indicates the value is binary blob stored as a byte array BinaryType = ValueType_BINARY ) // KeyValues is a type alias that exposes convenience functions like Sort, FindByKey. type KeyValues []KeyValue // String creates a String-typed KeyValue func String(key string, value string) KeyValue { return KeyValue{Key: key, VType: StringType, VStr: value} } // Bool creates a Bool-typed KeyValue func Bool(key string, value bool) KeyValue { return KeyValue{Key: key, VType: BoolType, VBool: value} } // Int64 creates a Int64-typed KeyValue func Int64(key string, value int64) KeyValue { return KeyValue{Key: key, VType: Int64Type, VInt64: value} } // Float64 creates a Float64-typed KeyValue func Float64(key string, value float64) KeyValue { return KeyValue{Key: key, VType: Float64Type, VFloat64: value} } // Binary creates a Binary-typed KeyValue func Binary(key string, value []byte) KeyValue { return KeyValue{Key: key, VType: BinaryType, VBinary: value} } // Bool returns the Boolean value stored in this KeyValue or false if it stores a different type. // The caller must check VType before using this method. func (kv *KeyValue) Bool() bool { if kv.VType == BoolType { return kv.VBool } return false } // Int64 returns the Int64 value stored in this KeyValue or 0 if it stores a different type. // The caller must check VType before using this method. func (kv *KeyValue) Int64() int64 { if kv.VType == Int64Type { return kv.VInt64 } return 0 } // Float64 returns the Float64 value stored in this KeyValue or 0 if it stores a different type. // The caller must check VType before using this method. func (kv *KeyValue) Float64() float64 { if kv.VType == Float64Type { return kv.VFloat64 } return 0 } // Binary returns the blob ([]byte) value stored in this KeyValue or nil if it stores a different type. // The caller must check VType before using this method. func (kv *KeyValue) Binary() []byte { if kv.VType == BinaryType { return kv.VBinary } return nil } // Value returns typed values stored in KeyValue as interface{}. func (kv *KeyValue) Value() interface{} { switch kv.VType { case StringType: return kv.VStr case BoolType: return kv.VBool case Int64Type: return kv.VInt64 case Float64Type: return kv.VFloat64 case BinaryType: return kv.VBinary default: return fmt.Errorf("unknown type %d", kv.VType) } } // AsStringLossy returns a potentially lossy string representation of the value. func (kv *KeyValue) AsStringLossy() string { return kv.asString(true) } // AsString returns a string representation of the value. func (kv *KeyValue) AsString() string { return kv.asString(false) } func (kv *KeyValue) asString(truncate bool) string { switch kv.VType { case StringType: return kv.VStr case BoolType: if kv.Bool() { return "true" } return "false" case Int64Type: return strconv.FormatInt(kv.Int64(), 10) case Float64Type: return strconv.FormatFloat(kv.Float64(), 'g', 10, 64) case BinaryType: if truncate && len(kv.VBinary) > 256 { return hex.EncodeToString(kv.VBinary[0:256]) + "..." } return hex.EncodeToString(kv.VBinary) default: return fmt.Sprintf("unknown type %d", kv.VType) } } // IsLess compares KeyValue object with another KeyValue. // The order is based first on the keys, then on type, and finally on the value. func (kv *KeyValue) IsLess(two *KeyValue) bool { return kv.Compare(two) < 0 } func (kvs KeyValues) Len() int { return len(kvs) } func (kvs KeyValues) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } func (kvs KeyValues) Less(i, j int) bool { return kvs[i].IsLess(&kvs[j]) } // Sort does in-place sorting of KeyValues, then by value type, then by value. func (kvs KeyValues) Sort() { sort.Sort(kvs) } // FindByKey scans the list of key-values searching for the first one with the given key. // Returns found tag and a boolean flag indicating if the search was successful. func (kvs KeyValues) FindByKey(key string) (KeyValue, bool) { for _, kv := range kvs { if kv.Key == key { return kv, true } } return KeyValue{}, false } // Equal compares KeyValues with another list. Both lists must be already sorted. func (kvs KeyValues) Equal(other KeyValues) bool { l1, l2 := len(kvs), len(other) if l1 != l2 { return false } for i := 0; i < l1; i++ { if !kvs[i].Equal(&other[i]) { return false } } return true } // Hash implements Hash from Hashable. func (kvs KeyValues) Hash(w io.Writer) error { for i := range kvs { if err := kvs[i].Hash(w); err != nil { return err } } return nil } // Hash implements Hash from Hashable. func (kv KeyValue) Hash(w io.Writer) error { if _, err := w.Write([]byte(kv.Key)); err != nil { return err } if err := binary.Write(w, binary.BigEndian, uint16(kv.VType)); err != nil { return err } var err error switch kv.VType { case StringType: _, err = w.Write([]byte(kv.VStr)) case BoolType: err = binary.Write(w, binary.BigEndian, kv.VBool) case Int64Type: err = binary.Write(w, binary.BigEndian, kv.VInt64) case Float64Type: err = binary.Write(w, binary.BigEndian, kv.VFloat64) case BinaryType: _, err = w.Write(kv.VBinary) default: err = fmt.Errorf("unknown type %d", kv.VType) } return err }
model/keyvalue.go
0.728459
0.444083
keyvalue.go
starcoder
package erts import ( "fmt" filter "github.com/soypat/go-estimate" "github.com/soypat/go-estimate/estimate" "github.com/soypat/go-estimate/noise" "gonum.org/v1/gonum/diff/fd" "gonum.org/v1/gonum/mat" ) // JacFunc defines jacobian function to calculate Jacobian matrix type JacFunc func(u mat.Vector) func(y, x []float64) // ERTS is Extended Rauch-Tung-Striebel smoother type ERTS struct { // q is state noise a.k.a. process noise q filter.Noise // FJacFn is propagation Jacobian function FJacFn JacFunc // f is EKF jacobian matrix f *mat.Dense // m is system model m filter.DiscreteModel // start is initial condition start filter.InitCond } // New creates new ERTS and returns it. // It returns error if it fails to create ERTS smoother. func New(m filter.DiscreteModel, init filter.InitCond, q filter.Noise) (*ERTS, error) { in, _, out, _ := m.SystemDims() if in <= 0 || out <= 0 { return nil, fmt.Errorf("Invalid model dimensions: [%d x %d]", in, out) } if q != nil { if q.Cov().Symmetric() != in { return nil, fmt.Errorf("Invalid state noise dimension: %d", q.Cov().Symmetric()) } } else { q, _ = noise.NewNone() } // propagation Jacobian fJacFn := func(u mat.Vector) func([]float64, []float64) { q, _ := noise.NewZero(in) return func(xOut, xNow []float64) { x := mat.NewVecDense(len(xNow), xNow) xNext, err := m.Propagate(x, u, q.Sample()) if err != nil { panic(err) } for i := 0; i < len(xOut); i++ { xOut[i] = xNext.At(i, 0) } } } f := mat.NewDense(in, in, nil) return &ERTS{ q: q, FJacFn: fJacFn, f: f, m: m, start: init, }, nil } // Smooth implements Rauch-Tung-Striebel smoothing algorithm. // It uses estimates est to compute smoothed estimates and returns them. // It returns error if either est is nil or smoothing could not be computed. func (s *ERTS) Smooth(est []filter.Estimate, u []mat.Vector) ([]filter.Estimate, error) { if est == nil { return nil, fmt.Errorf("Invalid estimates size") } if u != nil && len(u) != len(est) { return nil, fmt.Errorf("Invalid input vector size") } sx := make([]filter.Estimate, len(est)) // create initial estimate to work from recursively e, err := estimate.NewBaseWithCov(s.start.State(), s.start.Cov()) if err != nil { return nil, err } // smoothed state x := &mat.Dense{} pk := &mat.Dense{} var uEst mat.Vector = nil for i := len(est) - 1; i >= 0; i-- { // propagate input state to the next step if u != nil { uEst = u[i] } // propagate input state to the next step xk1, err := s.m.Propagate(est[i].Val(), uEst, s.q.Sample()) if err != nil { return nil, fmt.Errorf("Model state propagation failed: %v", err) } // calculate propagation Jacobian matrix fd.Jacobian(s.f, s.FJacFn(uEst), mat.Col(nil, 0, est[i].Val()), &fd.JacobianSettings{ Formula: fd.Central, Concurrent: true, }) pk1 := &mat.Dense{} pk1.Mul(s.f, est[i].Cov()) pk1.Mul(pk1, s.f.T()) if _, ok := s.q.(*noise.None); !ok { pk1.Add(pk1, s.q.Cov()) } // calculat smoothing matrix c := &mat.Dense{} // Pk*Ak' c.Mul(est[i].Cov(), s.f.T()) // P_(k+1)^-1 inverse pinv := &mat.Dense{} // invert predicted P_k+1 covariance if err := pinv.Inverse(pk1); err != nil { return nil, err } // Pk*Fk'* P_(k+1)^-1 c.Mul(c, pinv) // smooth the state x.Sub(e.Val(), xk1) // c*x x.Mul(c, x) // xk + Ck*x_sub x.Add(est[i].Val(), x) // smoothed covariance cov := &mat.Dense{} // smooth covariance cov.Sub(e.Cov(), pk1) // Ck*P_sub pk.Mul(c, cov) // Ck*P_sub*Ck' pk.Mul(pk, c.T()) // Pk + Ck*P_sub*Ck' pk.Add(est[i].Cov(), pk) r, _ := cov.Dims() pSmooth := mat.NewSymDense(r, nil) // update KF covariance matrix for i := 0; i < r; i++ { for j := i; j < r; j++ { pSmooth.SetSym(i, j, pk.At(i, j)) } } e, err = estimate.NewBaseWithCov(x.ColView(0), pSmooth) if err != nil { return nil, err } sx[i] = e } return sx, nil }
smooth/erts/erts.go
0.693473
0.419588
erts.go
starcoder
package sqlset import ( "fmt" "math" "github.com/pbanos/botanic/feature" ) /* FeatureCriterion are used to represent feature.Criterion on SQL DB-backed sets, they should be easily translatable to a condition on an SQL SELECT statement's WHERE clause on a samples table. */ type FeatureCriterion struct { /* FeatureColumn is the column name for the feature.Feature the criterion is applying the restriction to. */ FeatureColumn string /* DiscreteFeature defines whether the feature criterion applies to a discrete feature */ DiscreteFeature bool /* Operator is a string representing the comparison against the value in the criterion that is applied to samples. It must be one of the following: "=", "<", ">", "<=" or ">=". The semantics are the result from reading the criterion as Feature Operator Value */ Operator string /* Value is the value against which a comparison is applied to samples. It should be either an integer for discrete features or a float64 for continuous features. */ Value interface{} } /* ColumnNameFunc is a function that takes the name of a feature and returns column name for it or an error if the name could not be transformed. */ type ColumnNameFunc func(string) (string, error) /* NewFeatureCriteria takes a feature.Criterion, a ColumnNameFunc and a map of string to int containing a dictionary for converting discrete string values into their integer representations and returns a slice of FeatureCriterion equivalent to the given feature.Criterion or an error. An error will be returned the ColumnNameFunc cannot provide a name for the feature of the feature criterion, or if the given feature.Criterion is a feature.DiscreteCriterion and its value has no representation defined on the given dictionary. For a feature.Criterion that is no feature.DiscreteCriterion nor feature.ContinuousCriterion it returns an empty slice and no error. In other words, it is interpreted as an undefined feature criterion, which imposes no conditions on samples. */ func NewFeatureCriteria(fc feature.Criterion, cnf ColumnNameFunc, dictionary map[string]int) ([]*FeatureCriterion, error) { columnName, err := cnf(fc.Feature().Name()) if err != nil { return nil, fmt.Errorf("cannot obtain column name for feature '%s': %v", fc.Feature().Name(), err) } result := []*FeatureCriterion{} switch fc := fc.(type) { case feature.ContinuousCriterion: a, b := fc.Interval() if !math.IsInf(a, 0) { result = append(result, &FeatureCriterion{columnName, false, ">=", a}) } if !math.IsInf(b, 0) { result = append(result, &FeatureCriterion{columnName, false, "<", b}) } case feature.DiscreteCriterion: dvr, ok := dictionary[fc.Value()] if !ok { return nil, fmt.Errorf("non representable discrete value '%s' in feature criterion", fc.Value()) } result = append(result, &FeatureCriterion{columnName, true, "=", dvr}) } return result, nil }
set/sqlset/feature_criterion.go
0.601008
0.416381
feature_criterion.go
starcoder
package controllers import ( "fmt" "math" "math/rand" "strconv" "github.com/astaxie/beego" ) type MainController struct { beego.Controller } func F_rasp(x float64) float64 { if x <= 0 { return 0 } if x >= 1 { return 1 } return 2.0 / math.Pi * math.Asin(x) } func F_plot(x float64) float64 { if x <= 0 || x >= 1 { return 0 } return 2.0 / math.Pi / math.Sqrt(1-x*x) } func UpdateN(this *MainController) int { newN := this.GetString("N") oldN := this.GetSession("N") if oldN == nil || oldN.(int) == 0 { if i, err := strconv.Atoi(newN); len(newN) > 0 && err == nil && i > 0 { this.SetSession("N", i) } else { this.SetSession("N", 10000) } newN = strconv.Itoa(this.GetSession("N").(int)) } i, _ := strconv.Atoi(newN) this.Data["N"] = newN return i } func UpdateIntervals(this *MainController) int { newN := this.GetString("Gist") oldN := this.GetSession("Gist") if oldN == nil || oldN.(int) == 0 { if i, err := strconv.Atoi(newN); len(newN) > 0 && err == nil && i > 0 { this.SetSession("Gist", i) } else { this.SetSession("Gist", 10) } newN = strconv.Itoa(this.GetSession("Gist").(int)) } i, _ := strconv.Atoi(newN) this.Data["GistIntervals"] = newN return i } func myrand(a int, b int) float64 { return float64(a) + (float64(b)-float64(a))*math.Sin(math.Pi/2*rand.Float64()) } var L float64 func appendInterval(vector *[]int, v float64, a int, b int, Int int) { L = (float64(b) - float64(a)) / float64(Int) num := int((v - float64(a)) / L) (*vector)[num]++ } func med(v []float64) float64 { sum := float64(0.0) for i := 0; i < len(v); i++ { sum += v[i] } return sum / float64(len(v)) } func cmoment(v []float64, med float64, I float64) float64 { sum := float64(0.0) for i := 0; i < len(v); i++ { sum += math.Pow(v[i]-med, I) } return sum / float64(len(v)) } func disp(v []float64, med float64) float64 { return cmoment(v, med, 2) } func cmoment3(v []float64, med float64) float64 { return cmoment(v, med, 3) } func cmoment4(v []float64, med float64) float64 { return cmoment(v, med, 4) } func assim(v []float64, med float64) float64 { return cmoment3(v, med) / math.Pow(disp(v, med), 3.0/2.0) } func ekscess(v []float64, med float64) float64 { return cmoment4(v, med)/math.Pow(disp(v, med), 4.0/2.0) - 3 } func (this *MainController) Main() { N := UpdateN(this) Int := UpdateIntervals(this) v := []float64{} a, b := 0, 1 vecInt := make([]int, Int) for i := 0; i < N; i++ { val := (myrand(a, b)) v = append(v, val) appendInterval(&vecInt, val, a, b, Int) } srednee := med(v) this.Data["A"] = a this.Data["B"] = b this.Data["Gist"] = vecInt this.Data["N"] = N this.Data["L"] = L this.Data["Intervals"] = Int this.Data["Srednee"] = fmt.Sprintf("%.8f", srednee) this.Data["SredneeAn"] = 0.63662977 this.Data["SredneeDif"] = fmt.Sprintf("%.8f", srednee-0.63662977) d := disp(v, srednee) this.Data["Disp"] = fmt.Sprintf("%.8f", d) this.Data["DispAn"] = 0.094715 this.Data["DispDif"] = fmt.Sprintf("%.8f", d-0.094715) e := ekscess(v, srednee) this.Data["Ekscess"] = fmt.Sprintf("%.8f", e) this.Data["EkscessAn"] = -1.06842795 this.Data["EkscessDif"] = fmt.Sprintf("%.8f", e-(-1.06842795)) as := assim(v, srednee) this.Data["Assim"] = fmt.Sprintf("%.8f", as) this.Data["AssimAn"] = -0.49716835 this.Data["AssimDif"] = fmt.Sprintf("%.8f", as-(-0.49716835)) this.TplName = "lab2.tpl" }
controllers/lab2.go
0.518546
0.405566
lab2.go
starcoder
package gl import ( "image/color" "math" "fyne.io/fyne/v2" "fyne.io/fyne/v2/canvas" "fyne.io/fyne/v2/internal/painter" ) func (p *glPainter) drawTextureWithDetails(o fyne.CanvasObject, creator func(canvasObject fyne.CanvasObject) Texture, pos fyne.Position, size, frame fyne.Size, fill canvas.ImageFill, alpha float32, pad float32) { texture := p.getTexture(o, creator) if texture == NoTexture { return } aspect := float32(0) if img, ok := o.(*canvas.Image); ok { aspect = painter.GetAspect(img) if aspect == 0 { aspect = 1 // fallback, should not occur - normally an image load error } } points := p.rectCoords(size, pos, frame, fill, aspect, pad) vbo := p.glCreateBuffer(points) p.glDrawTexture(texture, alpha) p.glFreeBuffer(vbo) } func (p *glPainter) drawCircle(circle *canvas.Circle, pos fyne.Position, frame fyne.Size) { p.drawTextureWithDetails(circle, p.newGlCircleTexture, pos, circle.Size(), frame, canvas.ImageFillStretch, 1.0, painter.VectorPad(circle)) } func (p *glPainter) drawLine(line *canvas.Line, pos fyne.Position, frame fyne.Size) { if line.StrokeColor == color.Transparent || line.StrokeColor == nil || line.StrokeWidth == 0 { return } points, halfWidth, feather := p.lineCoords(pos, line.Position1, line.Position2, line.StrokeWidth, 0.5, frame) vbo := p.glCreateLineBuffer(points) p.glDrawLine(halfWidth, line.StrokeColor, feather) p.glFreeBuffer(vbo) } func (p *glPainter) drawImage(img *canvas.Image, pos fyne.Position, frame fyne.Size) { p.drawTextureWithDetails(img, p.newGlImageTexture, pos, img.Size(), frame, img.FillMode, float32(img.Alpha()), 0) } func (p *glPainter) drawRaster(img *canvas.Raster, pos fyne.Position, frame fyne.Size) { p.drawTextureWithDetails(img, p.newGlRasterTexture, pos, img.Size(), frame, canvas.ImageFillStretch, float32(img.Alpha()), 0) } func (p *glPainter) drawGradient(o fyne.CanvasObject, texCreator func(fyne.CanvasObject) Texture, pos fyne.Position, frame fyne.Size) { p.drawTextureWithDetails(o, texCreator, pos, o.Size(), frame, canvas.ImageFillStretch, 1.0, 0) } func (p *glPainter) drawRectangle(rect *canvas.Rectangle, pos fyne.Position, frame fyne.Size) { if (rect.FillColor == color.Transparent || rect.FillColor == nil) && (rect.StrokeColor == color.Transparent || rect.StrokeColor == nil || rect.StrokeWidth == 0) { return } p.drawTextureWithDetails(rect, p.newGlRectTexture, pos, rect.Size(), frame, canvas.ImageFillStretch, 1.0, painter.VectorPad(rect)) } func (p *glPainter) drawText(text *canvas.Text, pos fyne.Position, frame fyne.Size) { if text.Text == "" || text.Text == " " { return } size := text.MinSize() containerSize := text.Size() switch text.Alignment { case fyne.TextAlignTrailing: pos = fyne.NewPos(pos.X+containerSize.Width-size.Width, pos.Y) case fyne.TextAlignCenter: pos = fyne.NewPos(pos.X+(containerSize.Width-size.Width)/2, pos.Y) } if containerSize.Height > size.Height { pos = fyne.NewPos(pos.X, pos.Y+(containerSize.Height-size.Height)/2) } p.drawTextureWithDetails(text, p.newGlTextTexture, pos, size, frame, canvas.ImageFillStretch, 1.0, 0) } func (p *glPainter) drawObject(o fyne.CanvasObject, pos fyne.Position, frame fyne.Size) { if !o.Visible() { return } switch obj := o.(type) { case *canvas.Circle: p.drawCircle(obj, pos, frame) case *canvas.Line: p.drawLine(obj, pos, frame) case *canvas.Image: p.drawImage(obj, pos, frame) case *canvas.Raster: p.drawRaster(obj, pos, frame) case *canvas.Rectangle: p.drawRectangle(obj, pos, frame) case *canvas.Text: p.drawText(obj, pos, frame) case *canvas.LinearGradient: p.drawGradient(obj, p.newGlLinearGradientTexture, pos, frame) case *canvas.RadialGradient: p.drawGradient(obj, p.newGlRadialGradientTexture, pos, frame) } } func (p *glPainter) lineCoords(pos, pos1, pos2 fyne.Position, lineWidth, feather float32, frame fyne.Size) ([]float32, float32, float32) { // Shift line coordinates so that they match the target position. xPosDiff := pos.X - fyne.Min(pos1.X, pos2.X) yPosDiff := pos.Y - fyne.Min(pos1.Y, pos2.Y) pos1.X = roundToPixel(pos1.X+xPosDiff, p.pixScale) pos1.Y = roundToPixel(pos1.Y+yPosDiff, p.pixScale) pos2.X = roundToPixel(pos2.X+xPosDiff, p.pixScale) pos2.Y = roundToPixel(pos2.Y+yPosDiff, p.pixScale) if lineWidth <= 1 { offset := float32(0.5) // adjust location for lines < 1pt on regular display if lineWidth <= 0.5 && p.pixScale > 1 { // and for 1px drawing on HiDPI (width 0.5) offset = 0.25 } if pos1.X == pos2.X { pos1.X -= offset pos2.X -= offset } if pos1.Y == pos2.Y { pos1.Y -= offset pos2.Y -= offset } } x1Pos := pos1.X / frame.Width x1 := -1 + x1Pos*2 y1Pos := pos1.Y / frame.Height y1 := 1 - y1Pos*2 x2Pos := pos2.X / frame.Width x2 := -1 + x2Pos*2 y2Pos := pos2.Y / frame.Height y2 := 1 - y2Pos*2 normalX := (pos2.Y - pos1.Y) / frame.Width normalY := (pos2.X - pos1.X) / frame.Height dirLength := float32(math.Sqrt(float64(normalX*normalX + normalY*normalY))) normalX /= dirLength normalY /= dirLength normalObjX := normalX * 0.5 * frame.Width normalObjY := normalY * 0.5 * frame.Height widthMultiplier := float32(math.Sqrt(float64(normalObjX*normalObjX + normalObjY*normalObjY))) halfWidth := (roundToPixel(lineWidth+feather, p.pixScale) * 0.5) / widthMultiplier featherWidth := feather / widthMultiplier return []float32{ // coord x, y normal x, y x1, y1, normalX, normalY, x2, y2, normalX, normalY, x2, y2, -normalX, -normalY, x2, y2, -normalX, -normalY, x1, y1, normalX, normalY, x1, y1, -normalX, -normalY, }, halfWidth, featherWidth } // rectCoords calculates the openGL coordinate space of a rectangle func (p *glPainter) rectCoords(size fyne.Size, pos fyne.Position, frame fyne.Size, fill canvas.ImageFill, aspect float32, pad float32) []float32 { size, pos = rectInnerCoords(size, pos, fill, aspect) size, pos = roundToPixelCoords(size, pos, p.pixScale) xPos := (pos.X - pad) / frame.Width x1 := -1 + xPos*2 x2Pos := (pos.X + size.Width + pad) / frame.Width x2 := -1 + x2Pos*2 yPos := (pos.Y - pad) / frame.Height y1 := 1 - yPos*2 y2Pos := (pos.Y + size.Height + pad) / frame.Height y2 := 1 - y2Pos*2 return []float32{ // coord x, y, z texture x, y x1, y2, 0, 0.0, 1.0, // top left x1, y1, 0, 0.0, 0.0, // bottom left x2, y2, 0, 1.0, 1.0, // top right x2, y1, 0, 1.0, 0.0, // bottom right } } func rectInnerCoords(size fyne.Size, pos fyne.Position, fill canvas.ImageFill, aspect float32) (fyne.Size, fyne.Position) { if fill == canvas.ImageFillContain || fill == canvas.ImageFillOriginal { // change pos and size accordingly viewAspect := size.Width / size.Height newWidth, newHeight := size.Width, size.Height widthPad, heightPad := float32(0), float32(0) if viewAspect > aspect { newWidth = size.Height * aspect widthPad = (size.Width - newWidth) / 2 } else if viewAspect < aspect { newHeight = size.Width / aspect heightPad = (size.Height - newHeight) / 2 } return fyne.NewSize(newWidth, newHeight), fyne.NewPos(pos.X+widthPad, pos.Y+heightPad) } return size, pos } func roundToPixel(v float32, pixScale float32) float32 { if pixScale == 1.0 { return float32(math.Round(float64(v))) } return float32(math.Round(float64(v*pixScale))) / pixScale } func roundToPixelCoords(size fyne.Size, pos fyne.Position, pixScale float32) (fyne.Size, fyne.Position) { size.Width = roundToPixel(size.Width, pixScale) size.Height = roundToPixel(size.Height, pixScale) pos.X = roundToPixel(pos.X, pixScale) pos.Y = roundToPixel(pos.Y, pixScale) return size, pos }
internal/painter/gl/draw.go
0.682574
0.446555
draw.go
starcoder
package gorgonia import ( "fmt" "hash" "math" "sort" "github.com/chewxy/hm" "github.com/pkg/errors" "gorgonia.org/tensor" ) type sparsemaxOp struct { axis int } func newSparsemaxOp(axes ...int) *sparsemaxOp { axis := -1 if len(axes) > 0 { axis = axes[0] } sparsemaxop := &sparsemaxOp{ axis: axis, } return sparsemaxop } // Sparsemax - implements the sparsemax operation described here: http://proceedings.mlr.press/v48/martins16.pdf func Sparsemax(x *Node, axes ...int) (*Node, error) { op := newSparsemaxOp(axes...) return ApplyOp(op, x) } func (op *sparsemaxOp) Arity() int { return 1 } func (op *sparsemaxOp) ReturnsPtr() bool { return false } func (op *sparsemaxOp) CallsExtern() bool { return false } func (op *sparsemaxOp) WriteHash(h hash.Hash) { fmt.Fprintf(h, "Sparsemax{}()") } func (op *sparsemaxOp) Hashcode() uint32 { return simpleHash(op) } func (op *sparsemaxOp) String() string { return fmt.Sprintf("Sparsemax{}()") } func (op *sparsemaxOp) InferShape(inputs ...DimSizer) (tensor.Shape, error) { s := inputs[0].(tensor.Shape).Clone() return s, nil } func (op *sparsemaxOp) Type() hm.Type { a := hm.TypeVariable('a') return hm.NewFnType(a, a) } func (op *sparsemaxOp) OverwritesInput() int { return -1 } func (op *sparsemaxOp) checkInput(inputs ...Value) (tensor.Tensor, error) { if err := checkArity(op, len(inputs)); err != nil { return nil, err } var in tensor.Tensor var ok bool if in, ok = inputs[0].(tensor.Tensor); !ok { return nil, errors.Errorf("Expected input to be a tensor, got %T", inputs[0]) } return in, nil } func (op *sparsemaxOp) Do(inputs ...Value) (Value, error) { inputTensor, err := op.checkInput(inputs...) if err != nil { return nil, fmt.Errorf("Can't check Sparsemax input: %w", err) } inputShape := inputTensor.Shape() if op.axis != -1 { axes := make([]int, inputTensor.Dims()) axes[op.axis] = 1 inputTensor, err = tensor.Transpose(inputTensor, axes...) if err != nil { return nil, fmt.Errorf("error tranposing the input tensor: %w", err) } } var output interface{} switch inputTensor.Dtype() { case tensor.Float64: output, err = op.float64sparseMax(inputTensor) if err != nil { return nil, err } case tensor.Float32: output, err = op.float32sparseMax(inputTensor) if err != nil { return nil, err } default: return nil, fmt.Errorf("invalid input type for Sparsemax, expected float64 or float32, got: %v", inputTensor.Dtype()) } return tensor.New(tensor.Of(inputTensor.Dtype()), tensor.WithShape(inputShape.Clone()...), tensor.WithEngine(inputTensor.Engine()), tensor.WithBacking(output)), nil } // FIXME: go2 generics func (op *sparsemaxOp) float32sparseMax(inputTensor tensor.Tensor) (interface{}, error) { inputData := inputTensor.Data().([]float32) dims := inputTensor.Dims() it := 0 to := inputTensor.Shape()[dims-1] from := tensor.Shape(inputTensor.Shape()[0 : dims-1]).TotalSize() if from == 0 { from = 1 } maxValues := make([]float32, from) for i := 0; i < from; i++ { maxValue := float32(-math.MaxFloat32) for j := 0; j < to; j++ { if inputData[it] > maxValue { maxValue = inputData[it] } it++ } maxValues[i] = maxValue } // this is math trick for numerical stability stableInput := make([]float32, len(inputData)) it = 0 for i := 0; i < from; i++ { for j := 0; j < to; j++ { stableInput[it] = inputData[it] - maxValues[i] it++ } } sortedData := make([]float32, len(inputData)) copy(sortedData, stableInput) it = 0 for i := 0; i < from; i++ { start := it it += to sort.Slice(sortedData[start:it], func(i, j int) bool { return sortedData[start:it][i] > sortedData[start:it][j] }) } thresholds := make([]float32, from) it = 0 for i := 0; i < from; i++ { cumSum := float32(0.0) prevCum := float32(0.0) maxIndex := 0 for j := 0; j < to; j++ { k := 1 + float32(j+1)*sortedData[it] prevCum += sortedData[it] if k > prevCum { maxIndex = j + 1 cumSum += sortedData[it] } it++ } thresholds[i] = (cumSum - 1) / float32(maxIndex) } output := make([]float32, len(stableInput)) it = 0 for i := 0; i < from; i++ { for j := 0; j < to; j++ { vF := stableInput[it] if vF-thresholds[i] > 0 { output[it] = vF - thresholds[i] } it++ } } return output, nil } func (op *sparsemaxOp) float64sparseMax(inputTensor tensor.Tensor) (interface{}, error) { inputData := inputTensor.Data().([]float64) dims := inputTensor.Dims() it := 0 to := inputTensor.Shape()[dims-1] from := tensor.Shape(inputTensor.Shape()[0 : dims-1]).TotalSize() if from == 0 { from = 1 } maxValues := make([]float64, from) for i := 0; i < from; i++ { maxValue := -math.MaxFloat64 for j := 0; j < to; j++ { if inputData[it] > maxValue { maxValue = inputData[it] } it++ } maxValues[i] = maxValue } // this is math trick for numerical stability stableInput := make([]float64, len(inputData)) it = 0 for i := 0; i < from; i++ { for j := 0; j < to; j++ { stableInput[it] = inputData[it] - maxValues[i] it++ } } sortedData := make([]float64, len(inputData)) copy(sortedData, stableInput) it = 0 for i := 0; i < from; i++ { start := it it += to sort.Slice(sortedData[start:it], func(i, j int) bool { return sortedData[start:it][i] > sortedData[start:it][j] }) } thresholds := make([]float64, from) it = 0 for i := 0; i < from; i++ { cumSum := 0.0 prevCum := 0.0 maxIndex := 0 for j := 0; j < to; j++ { k := 1 + float64(j+1)*sortedData[it] prevCum += sortedData[it] if k > prevCum { maxIndex = j + 1 cumSum += sortedData[it] } it++ } thresholds[i] = (cumSum - 1) / float64(maxIndex) } output := make([]float64, len(stableInput)) it = 0 for i := 0; i < from; i++ { for j := 0; j < to; j++ { vF := stableInput[it] if vF-thresholds[i] > 0 { output[it] = vF - thresholds[i] } it++ } } return output, nil } // DoDiff calculates the diff and sets its value to the output node. Implementation for ADOp interface. func (op *sparsemaxOp) DoDiff(ctx ExecutionContext, inputs Nodes, output *Node) error { if len(inputs) != 2 { return fmt.Errorf("SparsemaxOp.DoDiff needs 2 arguments") } odv := output.boundTo.(*dualValue) odvd := odv.Value.(tensor.Tensor) diffOp := &sparsemaxDiffOp{} result, err := diffOp.Do(odvd, inputs[1].boundTo) if err != nil { return err } err = result.(*tensor.Dense).Reshape(odvd.Shape()...) if err != nil { return err } sum, err := odvd.(*tensor.Dense).Add(result.(*tensor.Dense), tensor.UseUnsafe()) if err != nil { return err } odv.d = sum return nil } // SymDiff applies the diff op. Implementation for SDOp interface. func (op *sparsemaxOp) SymDiff(inputs Nodes, output, grad *Node) (Nodes, error) { err := checkArity(op, len(inputs)) if err != nil { return nil, err } t := output diffOp := &sparsemaxDiffOp{} nodes := make(Nodes, 1) nodes[0], err = ApplyOp(diffOp, t, grad) return nodes, err } // DiffWRT is an implementation for the SDOp interface func (op *sparsemaxOp) DiffWRT(inputs int) []bool { if inputs != 1 { panic(fmt.Sprintf("sparsemax operator only supports one input, got %d instead", inputs)) } return []bool{true} } type sparsemaxDiffOp struct { } func newSparsemaxOpDiff() *sparsemaxDiffOp { return &sparsemaxDiffOp{} } func (op *sparsemaxDiffOp) Arity() int { return 2 } func (op *sparsemaxDiffOp) ReturnsPtr() bool { return false } func (op *sparsemaxDiffOp) CallsExtern() bool { return false } func (op *sparsemaxDiffOp) WriteHash(h hash.Hash) { fmt.Fprintf(h, "SparsemaxDiff{}()") } func (op *sparsemaxDiffOp) Hashcode() uint32 { return simpleHash(op) } func (op *sparsemaxDiffOp) String() string { return fmt.Sprintf("SparsemaxDiff{}()") } func (op *sparsemaxDiffOp) InferShape(inputs ...DimSizer) (tensor.Shape, error) { s := inputs[0].(tensor.Shape).Clone() return s, nil } func (op *sparsemaxDiffOp) Type() hm.Type { a := hm.TypeVariable('a') return hm.NewFnType(a, a, a) } func (op *sparsemaxDiffOp) OverwritesInput() int { return -1 } func (op *sparsemaxDiffOp) checkInput(inputs ...Value) (*tensor.Dense, *tensor.Dense, error) { if err := checkArity(op, len(inputs)); err != nil { return nil, nil, err } var ( in *tensor.Dense gradient *tensor.Dense ok bool ) switch t := inputs[0].(type) { case *dualValue: if in, ok = t.Value.(*tensor.Dense); !ok { return nil, nil, errors.Errorf("input should be a tensor.Tensor, got %T", inputs[0]) } case *tensor.Dense: in = t default: return nil, nil, errors.Errorf("input type is not supported, got %T", inputs[0]) } switch t := inputs[1].(type) { case *dualValue: if gradient, ok = t.Value.(*tensor.Dense); !ok { return nil, nil, errors.Errorf("gradient should be a tensor, got %T", inputs[1]) } case *tensor.Dense: gradient = t default: return nil, nil, errors.Errorf("gradient type is not supported, got %T", inputs[1]) } return in, gradient, nil } func (op *sparsemaxDiffOp) mul(a tensor.Tensor, b tensor.Tensor) (tensor.Tensor, error) { if a.Dims() != b.Dims() { return tensor.Outer(a, b) } return tensor.Mul(a, b) } func (op *sparsemaxDiffOp) Do(inputs ...Value) (Value, error) { inputTensor, gradTensor, err := op.checkInput(inputs...) if err != nil { return nil, fmt.Errorf("Can't check SparsemaxDiff input: %w", err) } if inputTensor.Size() != gradTensor.Size() { return nil, fmt.Errorf("sparsemaxDiffOp.Do inputs sizes should be equal") } var zero interface{} if inputTensor.Dtype() == tensor.Float32 { zero = float32(0.0) } else { zero = float64(0.0) } nonZeros, err := inputTensor.ElNeScalar(zero, false, tensor.AsSameType()) if err != nil { return nil, fmt.Errorf("sparsemaxDiffOp.Do failed to get non-zeros: %w", err) } mul, err := op.mul(nonZeros, gradTensor) if err != nil { return nil, fmt.Errorf("sparsemaxDiffOp.Do failed to mul grad tensor: %w", err) } a, err := tensor.Sum(mul, 1) if err != nil { return nil, err } b, err := tensor.Sum(nonZeros, 1) if err != nil { return nil, err } sum, err := tensor.Div(a, b) if err != nil { return nil, err } if sum.Dims() == 1 && gradTensor.Dims() == 2 { err := sum.Reshape(sum.Shape()[0], 1) if err != nil { return nil, err } sum, err = tensor.Repeat(sum, 1, gradTensor.Shape()[1]) if err != nil { panic(err) } } sub, err := tensor.Sub(gradTensor, sum) if err != nil { return nil, err } result, err := op.mul(nonZeros, sub) if err != nil { return nil, err } return result, nil } // ensure it complies with the Op interface var ( _ Op = &sparsemaxDiffOp{} _ Op = &sparsemaxOp{} _ SDOp = &sparsemaxOp{} _ ADOp = &sparsemaxOp{} )
op_sparsemax.go
0.715523
0.535463
op_sparsemax.go
starcoder
package eaopt import ( "errors" "fmt" "math/rand" "sort" ) // Selector chooses a subset of size n from a group of individuals. The group of // individuals a Selector is applied to is expected to be sorted. type Selector interface { Apply(n uint, indis Individuals, rng *rand.Rand) (selected Individuals, indexes []int, err error) Validate() error } // SelElitism selection returns the n best individuals of a group. type SelElitism struct{} // Apply SelElitism. func (sel SelElitism) Apply(n uint, indis Individuals, rng *rand.Rand) (Individuals, []int, error) { indis.SortByFitness() return indis[:n].Clone(rng), newInts(n), nil } // Validate SelElitism fields. func (sel SelElitism) Validate() error { return nil } // SelTournament samples individuals through tournament selection. The // tournament is composed of randomly chosen individuals. The winner of the // tournament is the chosen individual with the lowest fitness. The obtained // individuals are all distinct, in other words there are no repetitions. type SelTournament struct { NContestants uint } // Apply SelTournament. func (sel SelTournament) Apply(n uint, indis Individuals, rng *rand.Rand) (Individuals, []int, error) { // Check that the number of individuals is large enough if uint(len(indis))-n < sel.NContestants-1 || len(indis) < int(n) { return nil, nil, fmt.Errorf("Not enough individuals to select %d "+ "with NContestants = %d, have %d individuals and need at least %d", n, sel.NContestants, len(indis), sel.NContestants+n-1) } var ( winners = make(Individuals, n) indexes = make([]int, n) notSelectedIdxs = newInts(uint(len(indis))) ) for i := range winners { // Sample contestants var ( contestants, idxs, _ = sampleInts(notSelectedIdxs, sel.NContestants, rng) winnerIdx int ) // Find the best contestant winners[i] = indis[contestants[0]] winners[i].Evaluate() winnerIdx = idxs[0] for j, idx := range contestants { if indis[idx].GetFitness() < winners[i].Fitness { winners[i] = indis[idx] indexes[i] = idx winnerIdx = idxs[j] } } // Ban the winner from re-participating notSelectedIdxs = append(notSelectedIdxs[:winnerIdx], notSelectedIdxs[winnerIdx+1:]...) } return winners.Clone(rng), indexes, nil } // Validate SelTournament fields. func (sel SelTournament) Validate() error { if sel.NContestants < 1 { return errors.New("NContestants should be higher than 0") } return nil } // SelRoulette samples individuals through roulette wheel selection (also known // as fitness proportionate selection). type SelRoulette struct{} func buildWheel(fitnesses []float64) []float64 { var ( n = len(fitnesses) wheel = make([]float64, n) ) for i, v := range fitnesses { wheel[i] = fitnesses[n-1] - v + 1 } return cumsum(divide(wheel, sumFloat64s(wheel))) } // Apply SelRoulette. func (sel SelRoulette) Apply(n uint, indis Individuals, rng *rand.Rand) (Individuals, []int, error) { var ( selected = make(Individuals, n) indexes = make([]int, n) wheel = buildWheel(indis.getFitnesses()) ) for i := range selected { var ( index = sort.SearchFloat64s(wheel, rand.Float64()) winner = indis[index] ) indexes[i] = index selected[i] = winner } return selected.Clone(rng), indexes, nil } // Validate SelRoulette fields. func (sel SelRoulette) Validate() error { return nil }
selection.go
0.644896
0.646139
selection.go
starcoder
package msgraph // RatingAustraliaMoviesType undocumented type RatingAustraliaMoviesType string const ( // RatingAustraliaMoviesTypeVAllAllowed undocumented RatingAustraliaMoviesTypeVAllAllowed RatingAustraliaMoviesType = "AllAllowed" // RatingAustraliaMoviesTypeVAllBlocked undocumented RatingAustraliaMoviesTypeVAllBlocked RatingAustraliaMoviesType = "AllBlocked" // RatingAustraliaMoviesTypeVGeneral undocumented RatingAustraliaMoviesTypeVGeneral RatingAustraliaMoviesType = "General" // RatingAustraliaMoviesTypeVParentalGuidance undocumented RatingAustraliaMoviesTypeVParentalGuidance RatingAustraliaMoviesType = "ParentalGuidance" // RatingAustraliaMoviesTypeVMature undocumented RatingAustraliaMoviesTypeVMature RatingAustraliaMoviesType = "Mature" // RatingAustraliaMoviesTypeVAgesAbove15 undocumented RatingAustraliaMoviesTypeVAgesAbove15 RatingAustraliaMoviesType = "AgesAbove15" // RatingAustraliaMoviesTypeVAgesAbove18 undocumented RatingAustraliaMoviesTypeVAgesAbove18 RatingAustraliaMoviesType = "AgesAbove18" ) // RatingAustraliaMoviesTypePAllAllowed returns a pointer to RatingAustraliaMoviesTypeVAllAllowed func RatingAustraliaMoviesTypePAllAllowed() *RatingAustraliaMoviesType { v := RatingAustraliaMoviesTypeVAllAllowed return &v } // RatingAustraliaMoviesTypePAllBlocked returns a pointer to RatingAustraliaMoviesTypeVAllBlocked func RatingAustraliaMoviesTypePAllBlocked() *RatingAustraliaMoviesType { v := RatingAustraliaMoviesTypeVAllBlocked return &v } // RatingAustraliaMoviesTypePGeneral returns a pointer to RatingAustraliaMoviesTypeVGeneral func RatingAustraliaMoviesTypePGeneral() *RatingAustraliaMoviesType { v := RatingAustraliaMoviesTypeVGeneral return &v } // RatingAustraliaMoviesTypePParentalGuidance returns a pointer to RatingAustraliaMoviesTypeVParentalGuidance func RatingAustraliaMoviesTypePParentalGuidance() *RatingAustraliaMoviesType { v := RatingAustraliaMoviesTypeVParentalGuidance return &v } // RatingAustraliaMoviesTypePMature returns a pointer to RatingAustraliaMoviesTypeVMature func RatingAustraliaMoviesTypePMature() *RatingAustraliaMoviesType { v := RatingAustraliaMoviesTypeVMature return &v } // RatingAustraliaMoviesTypePAgesAbove15 returns a pointer to RatingAustraliaMoviesTypeVAgesAbove15 func RatingAustraliaMoviesTypePAgesAbove15() *RatingAustraliaMoviesType { v := RatingAustraliaMoviesTypeVAgesAbove15 return &v } // RatingAustraliaMoviesTypePAgesAbove18 returns a pointer to RatingAustraliaMoviesTypeVAgesAbove18 func RatingAustraliaMoviesTypePAgesAbove18() *RatingAustraliaMoviesType { v := RatingAustraliaMoviesTypeVAgesAbove18 return &v }
v1.0/RatingAustraliaMoviesTypeEnum.go
0.59843
0.420124
RatingAustraliaMoviesTypeEnum.go
starcoder
package collections const blockSize = 8 // FloatArray represents a float array type FloatArray interface { // Iterator returns an iterator over the array Iterator() FloatArrayIterator // GetValue returns value with pos, if has not value return 0 GetValue(pos int) float64 // HasValue returns if has value with pos HasValue(pos int) bool // SetValue sets value with pos, if pos out of bounds, return it SetValue(pos int, value float64) // IsEmpty tests if array is empty IsEmpty() bool // Size returns size of array Size() int // Capacity returns the capacity of array Capacity() int // Marks returns the marks of array Marks() []uint8 Reset() } // floatArray represents a float array, support mark pos if has value type floatArray struct { marks []uint8 values []float64 capacity int size int it *floatArrayIterator } // NewFloatArray creates a float array func NewFloatArray(capacity int) FloatArray { markLen := capacity / blockSize if capacity%blockSize > 0 { markLen++ } return &floatArray{ capacity: capacity, values: make([]float64, capacity), marks: make([]uint8, markLen), } } // HasValue returns if has value with pos func (f *floatArray) HasValue(pos int) bool { if !f.checkPos(pos) { return false } blockIdx := pos / blockSize idx := pos % blockSize mark := f.marks[blockIdx] return mark&(1<<uint64(idx)) != 0 } // GetValue returns value with pos, if has not value return 0 func (f *floatArray) GetValue(pos int) float64 { if !f.checkPos(pos) { return 0 } return f.values[pos] } // SetValue sets value with pos, if pos out of bounds, return it func (f *floatArray) SetValue(pos int, value float64) { if !f.checkPos(pos) { return } f.values[pos] = value if !f.HasValue(pos) { blockIdx := pos / blockSize idx := pos - pos/blockSize*blockSize mark := f.marks[blockIdx] mark |= 1 << uint64(idx) f.marks[blockIdx] = mark f.size++ } } // IsEmpty tests if array is empty func (f *floatArray) IsEmpty() bool { return f.size == 0 } // Size returns size of array func (f *floatArray) Size() int { return f.size } // Iterator returns an iterator over the array func (f *floatArray) Iterator() FloatArrayIterator { if f.it == nil { f.it = newFloatArrayIterator(f) } else { f.it.reset() } return f.it } // Capacity returns the capacity of array func (f *floatArray) Capacity() int { return f.capacity } // Marks returns the marks of array func (f *floatArray) Marks() []uint8 { return f.marks } // checkPos checks pos if out of bounds func (f *floatArray) checkPos(pos int) bool { if pos < 0 || pos >= f.capacity { return false } return true } func (f *floatArray) Reset() { f.size = 0 for i := range f.marks { f.marks[i] = 0 } } // FloatArrayIterator represents a float array iterator type FloatArrayIterator interface { // HasNext returns if this iterator has more values HasNext() bool // Next returns the next value and index Next() (idx int, value float64) } // floatArrayIterator represents a float array iterator type floatArrayIterator struct { fa FloatArray idx int count int hasValue bool marks []uint8 mark uint8 } // newFloatArrayIterator creates a float array iterator func newFloatArrayIterator(fa FloatArray) *floatArrayIterator { return &floatArrayIterator{ fa: fa, hasValue: true, marks: fa.Marks(), } } func (it *floatArrayIterator) reset() { it.idx = 0 it.count = 0 it.marks = it.fa.Marks() it.hasValue = true } // HasNext returns if this iterator has more values func (it *floatArrayIterator) HasNext() bool { for it.idx < it.fa.Capacity() && it.count < it.fa.Size() { blockIdx := it.idx / blockSize idx := it.idx % blockSize if idx == 0 { it.mark = it.marks[blockIdx] } it.idx++ if it.mark&(1<<uint64(idx)) != 0 { it.count++ return true } } it.hasValue = false return false } // Next returns the next value and index func (it *floatArrayIterator) Next() (idx int, value float64) { if !it.hasValue { return -1, 0 } idx = it.idx - 1 value = it.fa.GetValue(idx) return idx, value }
pkg/collections/array_list.go
0.838812
0.557665
array_list.go
starcoder
package twistededwards import ( "math/big" "github.com/consensys/gnark-crypto/ecc" "github.com/consensys/gnark/frontend" ) // Point point on a twisted Edwards curve in a Snark cs type Point struct { X, Y frontend.Variable } // MustBeOnCurve checks if a point is on the twisted Edwards curve // ax^2 + y^2 = 1 + d*x^2*y^2 func (p *Point) MustBeOnCurve(cs *frontend.ConstraintSystem, curve EdCurve) { one := big.NewInt(1) l1 := cs.Mul(p.X, &curve.A) axx := cs.Mul(l1, p.X) yy := cs.Mul(p.Y, p.Y) lhs := cs.Add(axx, yy) l1 = cs.Mul(p.X, &curve.D) dxx := cs.Mul(l1, p.X) dxxyy := cs.Mul(dxx, yy) rhs := cs.Add(dxxyy, one) cs.AssertIsEqual(lhs, rhs) } // AddFixedPoint Adds two points, among which is one fixed point (the base), on a twisted edwards curve (eg jubjub) // p1, base, ecurve are respectively: the point to add, a known base point, and the parameters of the twisted edwards curve func (p *Point) AddFixedPoint(cs *frontend.ConstraintSystem, p1 *Point /*basex*/, x /*basey*/, y interface{}, curve EdCurve) *Point { // https://eprint.iacr.org/2008/013.pdf n11 := cs.Mul(p1.X, y) n12 := cs.Mul(p1.Y, x) n1 := cs.Add(n11, n12) n21 := cs.Mul(p1.Y, y) n22 := cs.Mul(p1.X, x, curve.A) n2 := cs.Sub(n21, n22) d11 := cs.Mul(curve.D, x, y, p1.X, p1.Y) d1 := cs.Add(1, d11) d2 := cs.Sub(1, d11) p.X = cs.Div(n1, d1) p.Y = cs.Div(n2, d2) return p } // AddGeneric Adds two points on a twisted edwards curve (eg jubjub) // p1, p2, c are respectively: the point to add, a known base point, and the parameters of the twisted edwards curve func (p *Point) AddGeneric(cs *frontend.ConstraintSystem, p1, p2 *Point, curve EdCurve) *Point { // https://eprint.iacr.org/2008/013.pdf n11 := cs.Mul(p1.X, p2.Y) n12 := cs.Mul(p1.Y, p2.X) n1 := cs.Add(n11, n12) n21 := cs.Mul(p1.Y, p2.Y) n22 := cs.Mul(p1.X, p2.X, curve.A) n2 := cs.Sub(n21, n22) d11 := cs.Mul(curve.D, p2.X, p2.Y, p1.X, p1.Y) d1 := cs.Add(1, d11) d2 := cs.Sub(1, d11) p.X = cs.Div(n1, d1) p.Y = cs.Div(n2, d2) return p } // Double doubles a points in SNARK coordinates func (p *Point) Double(cs *frontend.ConstraintSystem, p1 *Point, curve EdCurve) *Point { p.AddGeneric(cs, p1, p1, curve) return p } // ScalarMulNonFixedBase computes the scalar multiplication of a point on a twisted Edwards curve // p1: base point (as snark point) // curve: parameters of the Edwards curve // scal: scalar as a SNARK constraint // Standard left to right double and add func (p *Point) ScalarMulNonFixedBase(cs *frontend.ConstraintSystem, p1 *Point, scalar frontend.Variable, curve EdCurve) *Point { // first unpack the scalar // TODO handle this properly (put the size in curve struct probably) var frSize int if curve.ID == ecc.BW6_761 { frSize = 384 } else { frSize = 256 } b := cs.ToBinary(scalar, frSize) res := Point{ cs.Constant(0), cs.Constant(1), } for i := len(b) - 1; i >= 0; i-- { res.Double(cs, &res, curve) tmp := Point{} tmp.AddGeneric(cs, &res, p1, curve) res.X = cs.Select(b[i], tmp.X, res.X) res.Y = cs.Select(b[i], tmp.Y, res.Y) } p.X = res.X p.Y = res.Y return p } // ScalarMulFixedBase computes the scalar multiplication of a point on a twisted Edwards curve // x, y: coordinates of the base point // curve: parameters of the Edwards curve // scal: scalar as a SNARK constraint // Standard left to right double and add func (p *Point) ScalarMulFixedBase(cs *frontend.ConstraintSystem, x, y interface{}, scalar frontend.Variable, curve EdCurve) *Point { // first unpack the scalar // TODO handle this properly (put the size in curve struct probably) var frSize int if curve.ID == ecc.BW6_761 { frSize = 384 } else { frSize = 256 } b := cs.ToBinary(scalar, frSize) res := Point{ cs.Constant(0), cs.Constant(1), } for i := len(b) - 1; i >= 0; i-- { res.Double(cs, &res, curve) tmp := Point{} tmp.AddFixedPoint(cs, &res, x, y, curve) res.X = cs.Select(b[i], tmp.X, res.X) res.Y = cs.Select(b[i], tmp.Y, res.Y) } p.X = res.X p.Y = res.Y return p }
std/algebra/twistededwards/point.go
0.775945
0.468061
point.go
starcoder
package parseutil import ( "github.com/lighttiger2505/sqls/ast" "github.com/lighttiger2505/sqls/ast/astutil" ) func ExtractSelectExpr(parsed ast.TokenList) []ast.Node { prefixMatcher := astutil.NodeMatcher{ ExpectKeyword: []string{ "SELECT", "ALL", "DISTINCT", }, } peekMatcher := astutil.NodeMatcher{ NodeTypes: []ast.NodeType{ ast.TypeIdentiferList, ast.TypeIdentifer, ast.TypeMemberIdentifer, ast.TypeOperator, ast.TypeAliased, ast.TypeParenthesis, ast.TypeFunctionLiteral, }, } return filterPrefixGroup(astutil.NewNodeReader(parsed), prefixMatcher, peekMatcher) } func ExtractTableReferences(parsed ast.TokenList) []ast.Node { prefixMatcher := astutil.NodeMatcher{ ExpectKeyword: []string{ "FROM", "UPDATE", }, } peekMatcher := astutil.NodeMatcher{ NodeTypes: []ast.NodeType{ ast.TypeIdentiferList, ast.TypeIdentifer, ast.TypeMemberIdentifer, ast.TypeAliased, }, } return filterPrefixGroupOnce(astutil.NewNodeReader(parsed), prefixMatcher, peekMatcher) } func ExtractTableReference(parsed ast.TokenList) []ast.Node { prefixMatcher := astutil.NodeMatcher{ ExpectKeyword: []string{ "INSERT INTO", "DELETE FROM", }, } peekMatcher := astutil.NodeMatcher{ NodeTypes: []ast.NodeType{ ast.TypeIdentifer, ast.TypeMemberIdentifer, ast.TypeAliased, }, } return filterPrefixGroup(astutil.NewNodeReader(parsed), prefixMatcher, peekMatcher) } func ExtractTableFactor(parsed ast.TokenList) []ast.Node { prefixMatcher := astutil.NodeMatcher{ ExpectKeyword: []string{ "JOIN", "INNER JOIN", "CROSS JOIN", "OUTER JOIN", "LEFT JOIN", "RIGHT JOIN", "LEFT OUTER JOIN", "RIGHT OUTER JOIN", }, } peekMatcher := astutil.NodeMatcher{ NodeTypes: []ast.NodeType{ ast.TypeIdentifer, ast.TypeMemberIdentifer, ast.TypeAliased, }, } return filterPrefixGroup(astutil.NewNodeReader(parsed), prefixMatcher, peekMatcher) } func ExtractWhereCondition(parsed ast.TokenList) []ast.Node { prefixMatcher := astutil.NodeMatcher{ ExpectKeyword: []string{ "WHERE", }, } peekMatcher := astutil.NodeMatcher{ NodeTypes: []ast.NodeType{ ast.TypeComparison, ast.TypeIdentiferList, }, } return filterPrefixGroup(astutil.NewNodeReader(parsed), prefixMatcher, peekMatcher) } func ExtractAliasedIdentifer(parsed ast.TokenList) []ast.Node { reader := astutil.NewNodeReader(parsed) matcher := astutil.NodeMatcher{NodeTypes: []ast.NodeType{ast.TypeAliased}} aliases := reader.FindRecursive(matcher) results := []ast.Node{} for _, node := range aliases { alias, ok := node.(*ast.Aliased) if !ok { continue } list, ok := alias.RealName.(ast.TokenList) if !ok { results = append(results, node) continue } if isSubQuery(list) { continue } results = append(results, node) } return results } func filterPrefixGroup(reader *astutil.NodeReader, prefixMatcher astutil.NodeMatcher, peekMatcher astutil.NodeMatcher) []ast.Node { var results []ast.Node for reader.NextNode(false) { if reader.CurNodeIs(prefixMatcher) && reader.PeekNodeIs(true, peekMatcher) { _, node := reader.PeekNode(true) results = append(results, node) } if list, ok := reader.CurNode.(ast.TokenList); ok { newReader := astutil.NewNodeReader(list) results = append(results, filterPrefixGroup(newReader, prefixMatcher, peekMatcher)...) } } return results } func filterPrefixGroupOnce(reader *astutil.NodeReader, prefixMatcher astutil.NodeMatcher, peekMatcher astutil.NodeMatcher) []ast.Node { results := filterPrefixGroup(reader, prefixMatcher, peekMatcher) if len(results) > 0 { return []ast.Node{results[0]} } return nil }
parser/parseutil/extract.go
0.560253
0.63535
extract.go
starcoder
package main func main() { } // https://www.allaboutcircuits.com/technical-articles/understanding-transfer-functions-for-low-pass-filters/ // 1st order low pass filter has the transfer function of the form // H(s) = K / (1 + s/w0) // If we want to know the magnitude/phase information at a certain // frequency, replace s with jw at that given angular frequency // A RC low pass filter in the s-domain has // Resistance = R // Impedance = 1/sC // Vout/Vin = 1/sC / (1/sC + R) = 1/(1 + sRC) // compare to H(s), we can see K=1 and w0 = 1/RC // using standard form of H(s), we can say K is the gain at DC and // w0 is the cutoff frequency, so the gain here is 1 (unity gain) and // cutoff frequency is at 1/RC, we might want to normalize it at 2pi so // it wil be 1/(2*pi*R*C) // There is another form we can use to represent gain/cutoff frequency // H(s) = a0 / (s + w0) // H(s = 0) represents the DC gain of our filter which is a0/w0 // since it represents Vout/Vin // To see why w0 is the cutoff frequency, use following derivation // H(s) = K / (1 + s/w0) // evaluate at the cutoff frequency (at w0) // H(jw = jw0) = K / (1 + jw0/w0) = K / (1 + j) // now the denominator is the complex number (1 + j) (j is the imaginary unit) // the magnitude is then K/(sqrt(1^2 + 1^2)) = K/sqrt(2) // For phase shift theta(w) = tan^-1(y/x) // evaluate at the cutoff frequency // and get -tan^-1(w0/w0) = -tan^-1(1) = -45 degrees // maximum phase shift of a first order low pass filter is 90 degree, so // this tells us the cutoff frequency is the center of the circuit phase response, where // the filter generate half of its maximum phase shift // https://www.allaboutcircuits.com/technical-articles/understanding-poles-and-zeros-in-transfer-functions/ // To understand more about poles/zeroes use the transfer function // H(s) = K / (1 + s/w0), if we multiply top and bottom by s, we get // H(s) = Ks / (s + w0), hence we have 1 zero and 1 pole // zeroes = s=0 // pole = s=-w0 // A zero correspond to a corner frequency that increases 20db/decade while // a pole correspond to a corner frequency that decreases 20db/decade // In some representation such as // H(s) = a0 / (s + w0), there does not seem to be a zero since there is s in the numerator, // but we need to take the limit where lim s->z H(s) if we want to find asymptotic behavior of a filter // which case will give as H(s) goes to infinity, so the zero will be at w0=infinity // to cascade filters together, multiply the transfer function (s domain) together // filter1(t) -> filter2(t) is H1(s) * H2(s) (this is like the fourier transform where convolution becomes multiplication)
electronics/filters/lpf1.go
0.816882
0.526465
lpf1.go
starcoder
package cmaes import ( "errors" "math" "math/rand" "sort" "gonum.org/v1/gonum/floats" "gonum.org/v1/gonum/mat" ) type Solution struct { // Params is a parameter transformed to N(m, σ^2 C) from Z. Params []float64 // Value represents an evaluation value. Value float64 } // Optimizer is CMA-ES stochastic optimizer class with ask-and-tell interface. type Optimizer struct { mean *mat.VecDense sigma float64 c *mat.SymDense dim int mu int muEff float64 popsize int cc float64 c1 float64 cmu float64 cSigma float64 dSigma float64 cm float64 chiN float64 pSigma *mat.VecDense pc *mat.VecDense weights *mat.VecDense bounds mat.Matrix maxReSampling int rng *rand.Rand g int } // NewOptimizer returns an optimizer object based on CMA-ES. func NewOptimizer(mean []float64, sigma float64, opts ...OptimizerOption) (*Optimizer, error) { if sigma <= 0 { return nil, errors.New("sigma should be non-zero positive number") } dim := len(mean) popsize := 4 + int(math.Floor(3*math.Log(float64(dim)))) mu := popsize / 2 sumWeightsPrimeBeforeMu := 0. sumWeightsPrimeSquareBeforeMu := 0. sumWeightsPrimeAfterMu := 0. sumWeightsPrimeSquareAfterMu := 0. weightsPrime := make([]float64, popsize) weightsPrimePositiveSum := 0.0 weightsPrimeNegativeSum := 0.0 for i := 0; i < popsize; i++ { wp := math.Log((float64(popsize)+1)/2) - math.Log(float64(i+1)) weightsPrime[i] = wp if i < mu { sumWeightsPrimeBeforeMu += wp sumWeightsPrimeSquareBeforeMu += math.Pow(wp, 2) } else { sumWeightsPrimeAfterMu += weightsPrime[i] sumWeightsPrimeSquareAfterMu += math.Pow(wp, 2) } if wp > 0 { weightsPrimePositiveSum += wp } else { weightsPrimeNegativeSum -= wp } } muEff := math.Pow(sumWeightsPrimeBeforeMu, 2) / sumWeightsPrimeSquareBeforeMu muEffMinus := math.Pow(sumWeightsPrimeAfterMu, 2) / sumWeightsPrimeSquareAfterMu alphaCov := 2.0 // learning rate for the rank-one update c1 := alphaCov / (math.Pow(float64(dim)+1.3, 2) + muEff) // learning rate for the rank-μ update cmu := math.Min( 1-c1, alphaCov*(muEff-2+1/muEff)/(math.Pow(float64(dim+2), 2)+alphaCov*muEff/2), ) if c1+cmu > 1 { return nil, errors.New("invalid learning rate for the rank-one and rank-μ update") } alphaMin := math.Min( 1+c1/cmu, // α_μ- 1+(2*muEffMinus)/(muEff+2), // α_μ_eff- ) alphaMin = math.Min(alphaMin, (1-c1-cmu)/(float64(dim)*cmu)) // α_{pos_def}^{minus} weights := make([]float64, popsize) for i := 0; i < popsize; i++ { if weightsPrime[i] > 0 { weights[i] = 1 / weightsPrimePositiveSum * weightsPrime[i] } else { weights[i] = alphaMin / weightsPrimeNegativeSum * weightsPrime[i] } } cm := 1.0 // learning rate for the cumulation for the step-size control (eq.55) cSigma := (muEff + 2) / (float64(dim) + muEff + 5) dSigma := 1 + 2*math.Max(0, math.Sqrt((muEff-1)/(float64(dim)+1))-1) + cSigma if cSigma >= 1 { return nil, errors.New("invalid learning rate for cumulation for the ste-size control") } // learning rate for cumulation for the rank-one update (eq.56) cc := (4 + muEff/float64(dim)) / (float64(dim) + 4 + 2*muEff/float64(dim)) if cc > 1 { return nil, errors.New("invalid learning rate for cumulation for the rank-one update") } chiN := math.Sqrt(float64(dim)) * (1.0 - (1.0 / (4.0 * float64(dim))) + 1.0/(21.0*(math.Pow(float64(dim), 2)))) cma := &Optimizer{ mean: mat.NewVecDense(dim, mean), sigma: sigma, c: initC(dim), dim: dim, popsize: popsize, mu: mu, muEff: muEff, cc: cc, c1: c1, cmu: cmu, cSigma: cSigma, dSigma: dSigma, cm: cm, chiN: chiN, pSigma: mat.NewVecDense(dim, make([]float64, dim)), pc: mat.NewVecDense(dim, make([]float64, dim)), weights: mat.NewVecDense(popsize, weights), bounds: nil, maxReSampling: 100, rng: rand.New(rand.NewSource(0)), g: 0, } for _, opt := range opts { opt(cma) } return cma, nil } // Generation is incremented when a multivariate normal distribution is updated. func (o *Optimizer) Generation() int { return o.g } // PopulationSize returns the population size. func (o *Optimizer) PopulationSize() int { return o.popsize } // Ask a next parameter. func (o *Optimizer) Ask() ([]float64, error) { x, err := o.sampleSolution() if err != nil { return nil, err } for i := 0; i < o.maxReSampling; i++ { if o.isFeasible(x) { return x.RawVector().Data, nil } x, err = o.sampleSolution() if err != nil { return nil, err } } err = o.repairInfeasibleParams(x) if err != nil { return nil, err } return x.RawVector().Data, nil } func (o *Optimizer) isFeasible(values *mat.VecDense) bool { if o.bounds == nil { return true } if values.Len() != o.dim { return false } for i := 0; i < o.dim; i++ { v := values.AtVec(i) if !(o.bounds.At(i, 0) < v && o.bounds.At(i, 1) > v) { return false } } return true } func (o *Optimizer) repairInfeasibleParams(values *mat.VecDense) error { if o.bounds == nil { return nil } if values.Len() != o.dim { return errors.New("invalid matrix size") } for i := 0; i < o.dim; i++ { v := values.AtVec(i) if o.bounds.At(i, 0) > v { values.SetVec(i, o.bounds.At(i, 0)) } if o.bounds.At(i, 1) < v { values.SetVec(i, o.bounds.At(i, 1)) } } return nil } func (o *Optimizer) sampleSolution() (*mat.VecDense, error) { // TODO(o-bata): Cache B and D var eigsym mat.EigenSym ok := eigsym.Factorize(o.c, true) if !ok { return nil, errors.New("symmetric eigendecomposition failed") } var b mat.Dense eigsym.VectorsTo(&b) d := make([]float64, o.dim) eigsym.Values(d) // d^2 floatsSqrtTo(d) // d z := make([]float64, o.dim) for i := 0; i < o.dim; i++ { z[i] = o.rng.NormFloat64() } var bd mat.Dense bd.Mul(&b, mat.NewDiagDense(o.dim, d)) values := mat.NewVecDense(o.dim, z) // ~ N(0, I) values.MulVec(&bd, values) // ~ N(0, C) values.ScaleVec(o.sigma, values) // ~ N(0, σ^2 C) values.AddVec(values, o.mean) // ~ N(m, σ^2 C) return values, nil } // Tell evaluation values. func (o *Optimizer) Tell(solutions []*Solution) error { if len(solutions) != o.popsize { return errors.New("must tell popsize-length solutions") } o.g++ sort.Slice(solutions, func(i, j int) bool { return solutions[i].Value < solutions[j].Value }) var eigsym mat.EigenSym ok := eigsym.Factorize(o.c, true) if !ok { return errors.New("symmetric eigendecomposition failed") } var b mat.Dense eigsym.VectorsTo(&b) d := make([]float64, o.dim) eigsym.Values(d) // d^2 floatsSqrtTo(d) // d yk := mat.NewDense(o.popsize, o.dim, nil) for i := 0; i < o.popsize; i++ { xi := solutions[i].Params // ~ N(m, σ^2 C) xiSubMean := make([]float64, o.dim) // ~ N(0, σ^2 C) floats.SubTo(xiSubMean, xi, o.mean.RawVector().Data) yk.SetRow(i, xiSubMean) } yk.Scale(1/o.sigma, yk) // ~ N(0, C) // Selection and recombination ydotw := mat.NewDense(o.mu, o.dim, nil) ydotw.Copy(yk.Slice(0, o.mu, 0, o.dim)) weightsmu := stackvec(o.dim, o.mu, o.weights) ydotw.MulElem(ydotw, weightsmu.T()) yw := sumColumns(ydotw.T()) meandiff := mat.NewVecDense(o.dim, nil) meandiff.CopyVec(yw) meandiff.ScaleVec(o.cm*o.sigma, meandiff) o.mean.AddVec(o.mean, meandiff) // Step-size control dinv := mat.NewDiagDense(o.dim, arrinv(d)) c2 := mat.NewDense(o.dim, o.dim, nil) c2.Product(&b, dinv, b.T()) // C^(-1/2) = B D^(-1) B^T c2yw := mat.NewDense(o.dim, 1, nil) c2yw.Product(c2, yw) c2yw.Scale(math.Sqrt(o.cSigma*(2-o.cSigma)*o.muEff), c2yw) o.pSigma.ScaleVec(1-o.cSigma, o.pSigma) o.pSigma.AddVec(o.pSigma, mat.NewVecDense(o.dim, c2yw.RawMatrix().Data)) normPSigma := mat.Norm(o.pSigma, 2) o.sigma *= math.Exp((o.cSigma / o.dSigma) * (normPSigma/o.chiN - 1)) hSigmaCondLeft := normPSigma / math.Sqrt( 1-math.Pow(1-o.cSigma, float64(2*(o.g+1)))) hSigmaCondRight := (1.4 + 2/float64(o.dim+1)) * o.chiN hSigma := 0.0 if hSigmaCondLeft < hSigmaCondRight { hSigma = 1.0 } // eq.45 o.pc.ScaleVec(1-o.cc, o.pc) o.pc.AddScaledVec(o.pc, hSigma*math.Sqrt(o.cc*(2-o.cc)*o.muEff), yw) // eq.46 wio := mat.NewVecDense(o.weights.Len(), nil) wio.CopyVec(o.weights) c2yk := mat.NewDense(o.dim, o.popsize, nil) c2yk.Product(c2, yk.T()) wio.MulElemVec(wio, vecapply(o.weights, func(i int, a float64) float64 { if a > 0 { return 1.0 } c2xinorm := mat.Norm(c2yk.ColView(i), 2) return float64(o.dim) / math.Pow(c2xinorm, 2) })) deltaHSigma := (1 - hSigma) * o.cc * (2 - o.cc) if deltaHSigma > 1 { panic("invalid delta_h_sigma") } // eq.47 rankOne := mat.NewSymDense(o.dim, nil) rankOne.SymOuterK(1.0, o.pc) rankMu := mat.NewSymDense(o.dim, nil) for i := 0; i < o.popsize; i++ { wi := wio.AtVec(i) yi := yk.RowView(i) s := mat.NewSymDense(o.dim, nil) s.SymOuterK(wi, yi) rankMu.AddSym(rankMu, s) } o.c.ScaleSym(1+o.c1*deltaHSigma-o.c1-o.cmu*mat.Sum(o.weights), o.c) rankOne.ScaleSym(o.c1, rankOne) rankMu.ScaleSym(o.cmu, rankMu) o.c.AddSym(o.c, rankOne) o.c.AddSym(o.c, rankMu) // Avoid eigendecomposition error by arithmetic overflow o.c.AddSym(o.c, initMinC(o.dim)) return nil } // OptimizerOption is a type of the function to customizing CMA-ES. type OptimizerOption func(*Optimizer) // OptimizerOptionSeed sets seed number. func OptimizerOptionSeed(seed int64) OptimizerOption { return func(cma *Optimizer) { cma.rng = rand.New(rand.NewSource(seed)) } } // OptimizerOptionMaxReSampling sets a number of max re-sampling. func OptimizerOptionMaxReSampling(n int) OptimizerOption { return func(cma *Optimizer) { cma.maxReSampling = n } } // OptimizerOptionBounds sets the range of parameters. func OptimizerOptionBounds(bounds *mat.Dense) OptimizerOption { row, column := bounds.Dims() if column != 2 { panic("invalid matrix size") } return func(cma *Optimizer) { if row != cma.dim { panic("invalid dimensions") } cma.bounds = bounds } }
vendor/github.com/c-bata/goptuna/cmaes/optimizer.go
0.722429
0.48871
optimizer.go
starcoder
package main import "bytes" /* Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M. Symbol Value I 1 V 5 X 10 L 50 C 100 D 500 M 1000 For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II. Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used: I can be placed before V (5) and X (10) to make 4 and 9. X can be placed before L (50) and C (100) to make 40 and 90. C can be placed before D (500) and M (1000) to make 400 and 900. Given a roman numeral, convert it to an integer. Input is guaranteed to be within the range from 1 to 3999. Example 1: Input: "III" Output: 3 Example 2: Input: "IV" Output: 4 Example 3: Input: "IX" Output: 9 Example 4: Input: "LVIII" Output: 58 Explanation: L = 50, V= 5, III = 3. Example 5: Input: "MCMXCIV" Output: 1994 Explanation: M = 1000, CM = 900, XC = 90 and IV = 4. 罗马数字包含以下七种字符: I, V, X, L,C,D 和 M。 字符 数值 I 1 V 5 X 10 L 50 C 100 D 500 M 1000 例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为 X + II 。 27 写做 XXVII, 即为 XX + V + II 。 通常情况下,罗马数字中小的数字在大的数字的右边。但也存在特例,例如 4 不写做 IIII,而是 IV。数字 1 在数字 5 的左边,所表示的数等于大数 5 减小数 1 得到的数值 4 。同样地,数字 9 表示为 IX。这个特殊的规则只适用于以下六种情况: I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。 X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。 C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。 给定一个罗马数字,将其转换成整数。输入确保在 1 到 3999 的范围内。 示例 1: 输入: "III" 输出: 3 示例 2: 输入: "IV" 输出: 4 示例 3: 输入: "IX" 输出: 9 示例 4: 输入: "LVIII" 输出: 58 解释: L = 50, V= 5, III = 3. 示例 5: 输入: "MCMXCIV" 输出: 1994 解释: M = 1000, CM = 900, XC = 90, IV = 4. */ //解法,罗马转换,题目繁琐,个人比较讨厌,所以直接搬了国外大神一个解答 var order = [7]byte{'M', 'D', 'C', 'L', 'X', 'V', 'I'} var value = [7]int{1000, 500, 100, 50, 10, 5, 1} func romanToInt(s string) int { if "" == s { return 0 } return convert([]byte(s)) } func convert(num []byte) int { if nil == num || 0 == len(num) { return 0 } var idx int for i, c := range order { idx = bytes.IndexByte(num, c) if -1 == idx { continue } if idx != 0 { return value[i] - convert(num[:idx]) + convert(num[idx+1:]) } return value[i] + convert(num[1:]) } return 0 } func main() { }
Programs/013Roman to Integer/013Roman to Integer.go
0.605099
0.552781
013Roman to Integer.go
starcoder
package colorpicker import ( "image/color" "math" "fyne.io/fyne/v2" "fyne.io/fyne/v2/canvas" ) var ( markerFillColor = color.NRGBA{50, 50, 50, 120} markerStrokeColor = color.NRGBA{50, 50, 50, 200} ) type marker interface { fyne.CanvasObject position() fyne.Position setPosition(fyne.Position) object() fyne.CanvasObject } func setPositionX(m marker, x float32) { m.setPosition(fyne.NewPos(x, m.position().Y)) } func setPositionY(m marker, y float32) { m.setPosition(fyne.NewPos(m.position().X, y)) } type defaultMarker struct { *canvas.Circle center fyne.Position radius float32 } func newDefaultMarker(radius float32) marker { marker := &defaultMarker{ Circle: &canvas.Circle{ FillColor: markerFillColor, StrokeColor: markerStrokeColor, StrokeWidth: 1, }, radius: radius, } marker.setPosition(fyne.NewPos(0, 0)) return marker } func (m *defaultMarker) position() fyne.Position { return m.center } func (m *defaultMarker) setPosition(p fyne.Position) { m.center = p m.Position1 = fyne.NewPos(p.X-float32(m.radius), p.Y-float32(m.radius)) m.Position2 = fyne.NewPos(p.X+float32(m.radius), p.Y+float32(m.radius)) } func (m *defaultMarker) object() fyne.CanvasObject { return m.Circle } type barMarker interface { marker setPositionFromValue(v float32) calcValueFromPosition(p fyne.Position) float32 } type defaultBarMarker struct { marker } func newDefaultBarMarker(barWidth float32) barMarker { m := newDefaultMarker(barWidth / 2) return &defaultBarMarker{marker: m} } func (m *defaultBarMarker) setPositionFromValue(v float32) { panic("not implemented") } func (m *defaultBarMarker) calcValueFromPosition(p fyne.Position) float32 { panic("not implemented") } type circleBarMarker struct { *defaultMarker cx, cy float32 } func newCircleBarMarker(w, h float32, barWidth float32) *circleBarMarker { fw := float64(w) fh := float64(h) fr := barWidth / 2 marker := &circleBarMarker{ defaultMarker: newDefaultMarker(fr).(*defaultMarker), cx: w / 2, cy: h / 2, } markerCenter := fyne.NewPos(float32(math.Round(fw-float64(fr))), float32(math.Round(fh/2))) marker.defaultMarker.setPosition(markerCenter) return marker } func (m *circleBarMarker) setPosition(p fyne.Position) { v := newVectorFromPoints(float64(m.cx), float64(m.cy), float64(p.X), float64(p.Y)) nv := v.normalize() center := newVector(float64(m.cx), float64(m.cy)) markerCenter := center.add(nv.multiply(float64(m.cx - m.radius))).toPosition() m.defaultMarker.setPosition(markerCenter) } func (m *circleBarMarker) setPositionFromValue(v float32) { hue := v rad := float64(-2 * math.Pi * hue) center := newVector(float64(m.cx), float64(m.cy)) dir := newVector(1, 0).rotate(rad).multiply(float64(m.cx - m.radius)) markerCenter := center.add(dir).toPosition() m.defaultMarker.setPosition(markerCenter) } func (m *circleBarMarker) calcValueFromPosition(p fyne.Position) float32 { v := newVectorFromPoints(float64(m.cx), float64(m.cy), float64(p.X), float64(p.Y)) baseV := newVector(1, 0) rad := math.Acos(baseV.dot(v) / (v.norm() * baseV.norm())) if float64(p.Y-m.cy) >= 0 { rad = math.Pi*2 - rad } hue := rad / (math.Pi * 2) return float32(hue) }
marker.go
0.724968
0.433382
marker.go
starcoder
package statistics import ( "fmt" "io" "log" "github.com/gonum/floats" "github.com/gonum/stat" "github.com/kniren/gota/dataframe" "github.com/montanaflynn/stats" ) func Basics(b io.Reader) { // Create a dataframe from the CSV source. irisDF := dataframe.ReadCSV(b) // Get the float values from the "sepal_length" column as // we will be looking at the measures for this variable. sepalLength := irisDF.Col("petal_length").Float() // Calculate the Mean of the variable. meanVal := stat.Mean(sepalLength, nil) // Calculate the Mode of the variable. modeVal, modeCount := stat.Mode(sepalLength, nil) // Calculate the Median of the variable. medianVal, err := stats.Median(sepalLength) if err != nil { log.Fatal(err) } // Output the results to standard out. fmt.Printf("\nSepal Length Summary Statistics:\n") fmt.Printf("Mean value: %0.2f\n", meanVal) fmt.Printf("Mode value: %0.2f\n", modeVal) fmt.Printf("Mode count: %d\n", int(modeCount)) fmt.Printf("Median value: %0.2f\n\n", medianVal) } func Spread(b io.Reader) { // Create a dataframe from the CSV file. irisDF := dataframe.ReadCSV(b) // Get the float values from the "sepal_length" column as // we will be looking at the measures for this variable. sepalLength := irisDF.Col("petal_length").Float() // Calculate the Max of the variable. minVal := floats.Min(sepalLength) // Calculate the Max of the variable. maxVal := floats.Max(sepalLength) // Calculate the Median of the variable. rangeVal := maxVal - minVal // Calculate the variance of the variable. varianceVal := stat.Variance(sepalLength, nil) // Calculate the standard deviation of the variable. stdDevVal := stat.StdDev(sepalLength, nil) // Sort the values. inds := make([]int, len(sepalLength)) floats.Argsort(sepalLength, inds) // Get the Spread. quant25 := stat.Quantile(0.25, stat.Empirical, sepalLength, nil) quant50 := stat.Quantile(0.50, stat.Empirical, sepalLength, nil) quant75 := stat.Quantile(0.75, stat.Empirical, sepalLength, nil) // Output the results to standard out. fmt.Printf("\nSepal Length Summary Statistics:\n") fmt.Printf("Max value: %0.2f\n", maxVal) fmt.Printf("Min value: %0.2f\n", minVal) fmt.Printf("Range value: %0.2f\n", rangeVal) fmt.Printf("Variance value: %0.2f\n", varianceVal) fmt.Printf("Std Dev value: %0.2f\n", stdDevVal) fmt.Printf("25 Quantile: %0.2f\n", quant25) fmt.Printf("50 Quantile: %0.2f\n", quant50) fmt.Printf("75 Quantile: %0.2f\n\n", quant75) }
golang/machine-learning/statistics/basic.go
0.733643
0.41052
basic.go
starcoder
package model import ( "fmt" golog "log" "github.com/aunum/gold/pkg/v1/track" cgraph "github.com/aunum/goro/pkg/v1/common/graph" "github.com/aunum/goro/pkg/v1/layer" "github.com/aunum/log" g "gorgonia.org/gorgonia" ) // Model is a prediction model. type Model interface { // Compile the model. Compile(x InputOr, y *Input, opts ...Opt) error // Predict x. Predict(x g.Value) (prediction g.Value, err error) // Fit x to y. Fit(x ValueOr, y g.Value) error // FitBatch fits x to y as batches. FitBatch(x ValueOr, y g.Value) error // PredictBatch predicts x as a batch PredictBatch(x g.Value) (prediction g.Value, err error) // ResizeBatch resizes the batch graphs. ResizeBatch(n int) error // Visualize the model by graph name. Visualize(name string) // Graph returns the expression graph for the model. Graphs() map[string]*g.ExprGraph // X is the inputs to the model. X() InputOr // Y is the expected output of the model. Y() *Input // Learnables for the model. Learnables() g.Nodes } // Sequential model. type Sequential struct { // Chain of layers in the model. Chain *layer.Chain // Tracker of values. Tracker *track.Tracker noTracker bool logger *log.Logger name string x Inputs y *Input fwd *Input trainChain, trainBatchChain *layer.Chain onlineChain, onlineBatchChain *layer.Chain backwardChain *layer.Chain trainGraph, trainBatchGraph *g.ExprGraph onlineGraph, onlineBatchGraph *g.ExprGraph backwardGraph *g.ExprGraph xTrain, xTrainBatch Inputs xTrainFwd, xTrainBatchFwd *Input xOnline, xOnlineBatch Inputs xOnlineFwd, xOnlineBatchFwd *Input yTrain, yTrainBatch *Input trainPredVal, trainBatchPredVal g.Value onlinePredVal, onlineBatchPredVal g.Value loss Loss trainLoss, trainBatchLoss Loss metrics Metrics batchSize int optimizer g.Solver trainVM, trainBatchVM g.VM onlineVM, onlineBatchVM g.VM backwardVM g.VM vmOpts []g.VMOpt } // NewSequential returns a new sequential model. func NewSequential(name string) (*Sequential, error) { return &Sequential{ Chain: layer.NewChain(), name: name, batchSize: 32, metrics: AllMetrics, }, nil } // Opt is a model option. type Opt func(Model) // Metric tracked by the model. type Metric string const ( // TrainLossMetric is the metric for training loss. TrainLossMetric Metric = "train_loss" // TrainBatchLossMetric is the metric for batch training loss. TrainBatchLossMetric Metric = "train_batch_loss" ) // Metrics is a set of metric. type Metrics []Metric // Contains tells whether the set contains the given metric. func (m Metrics) Contains(metric Metric) bool { for _, mt := range m { if mt == metric { return true } } return false } // AllMetrics are all metrics. var AllMetrics = Metrics{TrainLossMetric, TrainBatchLossMetric} // WithMetrics sets the metrics that the model should track. // Defaults to AllMetrics. func WithMetrics(metrics ...Metric) func(Model) { return func(m Model) { switch t := m.(type) { case *Sequential: t.metrics = metrics default: log.Fatal("unknown model type") } } } // WithLoss uses a specific loss function with the model. // Defaults to MSE. func WithLoss(loss Loss) func(Model) { return func(m Model) { switch t := m.(type) { case *Sequential: t.loss = loss default: log.Fatal("unknown model type") } } } // WithOptimizer uses a specific optimizer function. // Defaults to Adam. func WithOptimizer(optimizer g.Solver) func(Model) { return func(m Model) { switch t := m.(type) { case *Sequential: t.optimizer = optimizer default: log.Fatal("unknown model type") } } } // WithTracker adds a tracker to the model, if not provided one will be created. func WithTracker(tracker *track.Tracker) func(Model) { return func(m Model) { switch t := m.(type) { case *Sequential: t.Tracker = tracker default: log.Fatal("unknown model type") } } } // WithoutTracker uses no tracking with the model. func WithoutTracker() func(Model) { return func(m Model) { switch t := m.(type) { case *Sequential: t.noTracker = true default: log.Fatal("unknown model type") } } } // WithBatchSize sets the batch size for the model. // Defaults to 32. func WithBatchSize(size int) func(Model) { return func(m Model) { switch t := m.(type) { case *Sequential: t.batchSize = size default: log.Fatal("unknown model type") } } } // WithGraphLogger adds a logger to the model which will print out the graph operations // as they occur. func WithGraphLogger(log *golog.Logger) func(Model) { return func(m Model) { switch t := m.(type) { case *Sequential: t.vmOpts = append(t.vmOpts, g.WithLogger(log)) default: log.Fatal("unknown model type") } } } // WithLogger adds a logger to the model. func WithLogger(logger *log.Logger) func(Model) { return func(m Model) { switch t := m.(type) { case *Sequential: t.logger = logger default: log.Fatal("unknown model type") } } } // AddLayer adds a layer. func (s *Sequential) AddLayer(layer layer.Config) { s.Chain.Add(layer) } // AddLayers adds a number of layer. func (s *Sequential) AddLayers(layers ...layer.Config) { for _, layer := range layers { s.Chain.Add(layer) } } // Fwd tells the model which input should be sent through the layer. // If not provided, the first input will be used. func (s *Sequential) Fwd(x *Input) { s.fwd = x } // Compile the model. func (s *Sequential) Compile(x InputOr, y *Input, opts ...Opt) error { s.x = x.Inputs() err := y.Validate() if err != nil { return err } s.y = y for _, opt := range opts { opt(s) } if s.logger == nil { s.logger = log.DefaultLogger } if s.loss == nil { s.loss = MSE } if s.optimizer == nil { s.optimizer = g.NewAdamSolver() } if s.Tracker == nil && !s.noTracker { tracker, err := track.NewTracker(track.WithLogger(s.logger)) if err != nil { return err } s.Tracker = tracker } if s.fwd == nil { s.fwd = x.Inputs()[0] err = s.fwd.Validate() if err != nil { return err } s.logger.Infof("setting forward for layers to input %q", s.fwd.Name()) } err = s.buildTrainGraph(s.x, s.y) if err != nil { return err } err = s.buildTrainBatchGraph(s.x, s.y) if err != nil { return err } err = s.buildOnlineGraph(s.x) if err != nil { return err } err = s.buildOnlineBatchGraph(s.x) if err != nil { return err } return nil } func (s *Sequential) buildTrainGraph(x Inputs, y *Input) (err error) { s.trainGraph = g.NewGraph() s.trainLoss = s.loss.CloneTo(s.trainGraph) for _, input := range x { if i, err := s.trainLoss.Inputs().Get(input.Name()); err == nil { s.xTrain = append(s.xTrain, i) continue } i := input.CloneTo(s.trainGraph) s.xTrain = append(s.xTrain, i) } s.xTrainFwd, err = s.xTrain.Get(s.fwd.Name()) if err != nil { return err } s.trainLoss = s.loss.CloneTo(s.trainGraph) s.yTrain = y.Clone() s.yTrain.Compile(s.trainGraph) s.trainChain = s.Chain.Clone() s.trainChain.Compile(s.trainGraph) prediction, err := s.trainChain.Fwd(s.xTrainFwd.Node()) if err != nil { return err } g.Read(prediction, &s.trainPredVal) loss, err := s.trainLoss.Compute(prediction, s.yTrain.Node()) if err != nil { return err } if s.metrics.Contains(TrainLossMetric) { if s.Tracker != nil { s.Tracker.TrackValue("train_loss", loss, track.WithNamespace(s.name)) } } _, err = g.Grad(loss, s.trainChain.Learnables()...) if err != nil { return err } vmOpts := []g.VMOpt{} copy(vmOpts, s.vmOpts) vmOpts = append(vmOpts, g.BindDualValues(s.trainChain.Learnables()...)) s.trainVM = g.NewTapeMachine(s.trainGraph, vmOpts...) return nil } func (s *Sequential) buildTrainBatchGraph(x Inputs, y *Input) (err error) { s.trainBatchGraph = g.NewGraph() s.trainBatchLoss = s.loss.CloneTo(s.trainBatchGraph, AsBatch(s.batchSize)) for _, input := range x { // TODO: need to validate input names for duplicates. if i, err := s.trainBatchLoss.Inputs().Get(input.Name()); err == nil { s.xTrainBatch = append(s.xTrainBatch, i) continue } i := input.CloneTo(s.trainBatchGraph, AsBatch(s.batchSize)) s.xTrainBatch = append(s.xTrainBatch, i) } s.xTrainBatchFwd, err = s.xTrainBatch.Get(NameAsBatch(s.fwd.Name())) if err != nil { return err } s.yTrainBatch = s.y.AsBatch(s.batchSize) s.yTrainBatch.Compile(s.trainBatchGraph) s.trainBatchChain = s.Chain.Clone() s.trainBatchChain.Compile(s.trainBatchGraph, layer.WithSharedChainLearnables(s.trainChain), layer.WithLayerOpts(layer.AsBatch())) prediction, err := s.trainBatchChain.Fwd(s.xTrainBatchFwd.Node()) if err != nil { return err } g.Read(prediction, &s.trainBatchPredVal) loss, err := s.trainBatchLoss.Compute(prediction, s.yTrainBatch.Node()) if err != nil { return err } if s.metrics.Contains(TrainBatchLossMetric) { if s.Tracker != nil { s.Tracker.TrackValue("train_batch_loss", loss, track.WithNamespace(s.name)) } } _, err = g.Grad(loss, s.trainBatchChain.Learnables()...) if err != nil { return err } vmOpts := []g.VMOpt{} copy(vmOpts, s.vmOpts) vmOpts = append(vmOpts, g.BindDualValues(s.trainBatchChain.Learnables()...)) s.trainBatchVM = g.NewTapeMachine(s.trainBatchGraph, vmOpts...) return nil } func (s *Sequential) buildOnlineGraph(x Inputs) (err error) { s.onlineGraph = g.NewGraph() s.xOnline = s.x.Clone() s.xOnline.Compile(s.onlineGraph) s.xOnlineFwd, err = s.xOnline.Get(s.fwd.Name()) if err != nil { return err } s.onlineChain = s.Chain.Clone() s.onlineChain.Compile(s.onlineGraph, layer.WithSharedChainLearnables(s.trainChain)) prediction, err := s.onlineChain.Fwd(s.xOnlineFwd.Node()) if err != nil { return err } g.Read(prediction, &s.onlinePredVal) vmOpts := []g.VMOpt{} copy(vmOpts, s.vmOpts) s.onlineVM = g.NewTapeMachine(s.onlineGraph, vmOpts...) return nil } func (s *Sequential) buildOnlineBatchGraph(x Inputs) (err error) { s.onlineBatchGraph = g.NewGraph() for _, input := range x { if input.Name() == s.fwd.Name() { s.xOnlineBatchFwd = input.AsBatch(s.batchSize) s.xOnlineBatchFwd.Compile(s.onlineBatchGraph) s.xOnlineBatch = append(s.xOnlineBatch, s.xOnlineBatchFwd) continue } i := input.CloneTo(s.onlineBatchGraph) s.xOnlineBatch = append(s.xOnlineBatch, i) } s.xOnlineBatchFwd, err = s.xOnlineBatch.Get(NameAsBatch(s.fwd.Name())) if err != nil { return err } s.onlineBatchChain = s.Chain.Clone() s.onlineBatchChain.Compile(s.onlineBatchGraph, layer.WithSharedChainLearnables(s.trainChain), layer.WithLayerOpts(layer.AsBatch())) prediction, err := s.onlineBatchChain.Fwd(s.xOnlineBatchFwd.Node()) if err != nil { return err } g.Read(prediction, &s.onlineBatchPredVal) vmOpts := []g.VMOpt{} copy(vmOpts, s.vmOpts) s.onlineBatchVM = g.NewTapeMachine(s.onlineBatchGraph, vmOpts...) return nil } // ResizeBatch will resize the batch graph. // Note: this is expensive as it recompiles the graph. func (s *Sequential) ResizeBatch(n int) (err error) { log.Debugf("resizing batch graphs to %d", n) s.batchSize = n s.xTrainBatch = Inputs{} s.xTrainBatchFwd = nil err = s.buildTrainBatchGraph(s.x, s.y) if err != nil { return } s.xOnlineBatch = Inputs{} s.xOnlineBatchFwd = nil return s.buildOnlineBatchGraph(s.x) } // Predict x. func (s *Sequential) Predict(x g.Value) (prediction g.Value, err error) { err = s.xOnlineFwd.Set(x) if err != nil { return prediction, err } err = s.onlineVM.RunAll() if err != nil { return prediction, err } prediction = s.onlinePredVal s.onlineVM.Reset() return } // PredictBatch predicts x as a batch. func (s *Sequential) PredictBatch(x g.Value) (prediction g.Value, err error) { err = s.xOnlineBatchFwd.Set(x) if err != nil { return prediction, err } err = s.onlineBatchVM.RunAll() if err != nil { return prediction, err } prediction = s.onlineBatchPredVal s.onlineBatchVM.Reset() return } // Fit x to y. func (s *Sequential) Fit(x ValueOr, y g.Value) error { err := s.yTrain.Set(y) if err != nil { return err } xVals := ValuesFrom(x) err = s.xTrain.Set(xVals) if err != nil { return err } err = s.trainVM.RunAll() if err != nil { return err } grads := g.NodesToValueGrads(s.trainChain.Learnables()) s.optimizer.Step(grads) s.trainVM.Reset() return nil } // FitBatch fits x to y as a batch. func (s *Sequential) FitBatch(x ValueOr, y g.Value) error { err := s.yTrainBatch.Set(y) if err != nil { return err } xVals := ValuesFrom(x) err = s.xTrainBatch.Set(xVals) if err != nil { return err } err = s.trainBatchVM.RunAll() if err != nil { return err } // log.Infovb("pred val", s.trainBatchPredVal) grads := g.NodesToValueGrads(s.trainBatchChain.Learnables()) s.optimizer.Step(grads) s.trainBatchVM.Reset() return nil } // Visualize the model by graph name. func (s *Sequential) Visualize(name string) { cgraph.Visualize(s.Graphs()[name]) } // Graphs returns the expression graphs for the model. func (s *Sequential) Graphs() map[string]*g.ExprGraph { return map[string]*g.ExprGraph{ "train": s.trainGraph, "trainBatch": s.trainBatchGraph, "online": s.onlineGraph, "onlineBatch": s.onlineBatchGraph, } } // X is is the input to the model. func (s *Sequential) X() InputOr { return s.x } // Y is is the output of the model. func (s *Sequential) Y() *Input { return s.y } // Learnables are the model learnables. func (s *Sequential) Learnables() g.Nodes { return s.trainChain.Learnables() } // CloneLearnablesTo another model. func (s *Sequential) CloneLearnablesTo(to *Sequential) error { desired := s.trainChain.Learnables() destination := to.trainChain.Learnables() if len(desired) != len(destination) { return fmt.Errorf("models must be identical to clone learnables") } for i, learnable := range destination { c := desired[i].Clone() err := g.Let(learnable, c.(*g.Node).Value()) if err != nil { return err } } new := to.trainChain.Learnables() shared := map[string]*layer.Chain{ "trainBatch": to.trainBatchChain, "online": to.onlineChain, "onlineBatch": to.onlineBatchChain, } for name, chain := range shared { s.logger.Debugv("chain", name) for i, learnable := range chain.Learnables() { err := g.Let(learnable, new[i].Value()) if err != nil { return err } s.logger.Debugvb(learnable.Name(), learnable.Value()) } } return nil } // SetLearnables sets learnables to model func (s *Sequential) SetLearnables(desired g.Nodes) error { destination := s.trainChain.Learnables() if len(desired) != len(destination) { return fmt.Errorf("cannot set learnables: number of desired nodes not equal to number of nodes in model") } for i, learnable := range destination { c := desired[i].Clone() err := g.Let(learnable, c.(*g.Node).Value()) if err != nil { return err } } new := s.trainChain.Learnables() shared := map[string]*layer.Chain{ "trainBatch": s.trainBatchChain, "online": s.onlineChain, "onlineBatch": s.onlineBatchChain, } for name, chain := range shared { s.logger.Debugv("chain", name) for i, learnable := range chain.Learnables() { err := g.Let(learnable, new[i].Value()) if err != nil { return err } s.logger.Debugvb(learnable.Name(), learnable.Value()) } } return nil } // Opts are optsion for a model type Opts struct { opts []Opt } // NewOpts returns a new set of options for a model. func NewOpts() *Opts { return &Opts{opts: []Opt{}} } // Add an option to the options. func (o *Opts) Add(opts ...Opt) { o.opts = append(o.opts, opts...) } // Values are the options. func (o *Opts) Values() []Opt { return o.opts }
pkg/v1/model/model.go
0.754734
0.436562
model.go
starcoder
// Package blake256 implements BLAKE-256 and BLAKE-224 hash functions (SHA-3 // candidate). package blake256 import "hash" // The block size of the hash algorithm in bytes. const BlockSize = 64 // The size of BLAKE-256 hash in bytes. const Size = 32 // The size of BLAKE-224 hash in bytes. const Size224 = 28 type digest struct { hashSize int // hash output size in bits (224 or 256) h [8]uint32 // current chain value s [4]uint32 // salt (zero by default) t uint64 // message bits counter nullt bool // special case for finalization: skip counter x [BlockSize]byte // buffer for data not yet compressed nx int // number of bytes in buffer } var ( // Initialization values. iv256 = [8]uint32{ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19} iv224 = [8]uint32{ 0xC1059ED8, 0x367CD507, 0x3070DD17, 0xF70E5939, 0xFFC00B31, 0x68581511, 0x64F98FA7, 0xBEFA4FA4} pad = [64]byte{0x80} ) // Reset resets the state of digest. It leaves salt intact. func (d *digest) Reset() { if d.hashSize == 224 { d.h = iv224 } else { d.h = iv256 } d.t = 0 d.nx = 0 d.nullt = false } func (d *digest) Size() int { return d.hashSize >> 3 } func (d *digest) BlockSize() int { return BlockSize } func (d *digest) Write(p []byte) (nn int, err error) { nn = len(p) if d.nx > 0 { n := len(p) if n > BlockSize-d.nx { n = BlockSize - d.nx } d.nx += copy(d.x[d.nx:], p) if d.nx == BlockSize { block(d, d.x[:]) d.nx = 0 } p = p[n:] } if len(p) >= BlockSize { n := len(p) &^ (BlockSize - 1) block(d, p[:n]) p = p[n:] } if len(p) > 0 { d.nx = copy(d.x[:], p) } return } // Sum returns the calculated checksum. func (d0 *digest) Sum(in []byte) []byte { // Make a copy of d0 so that caller can keep writing and summing. d := *d0 sum := d.checkSum() if d.Size() == Size224 { return append(in, sum[:Size224]...) } return append(in, sum[:]...) } func (d *digest) checkSum() [Size]byte { nx := uint64(d.nx) l := d.t + nx<<3 var len [8]byte len[0] = byte(l >> 56) len[1] = byte(l >> 48) len[2] = byte(l >> 40) len[3] = byte(l >> 32) len[4] = byte(l >> 24) len[5] = byte(l >> 16) len[6] = byte(l >> 8) len[7] = byte(l) if nx == 55 { // One padding byte. d.t -= 8 if d.hashSize == 224 { d.Write([]byte{0x80}) } else { d.Write([]byte{0x81}) } } else { if nx < 55 { // Enough space to fill the block. if nx == 0 { d.nullt = true } d.t -= 440 - nx<<3 d.Write(pad[0 : 55-nx]) } else { // Need 2 compressions. d.t -= 512 - nx<<3 d.Write(pad[0 : 64-nx]) d.t -= 440 d.Write(pad[1:56]) d.nullt = true } if d.hashSize == 224 { d.Write([]byte{0x00}) } else { d.Write([]byte{0x01}) } d.t -= 8 } d.t -= 64 d.Write(len[:]) var out [Size]byte j := 0 for _, s := range d.h[:d.hashSize>>5] { out[j+0] = byte(s >> 24) out[j+1] = byte(s >> 16) out[j+2] = byte(s >> 8) out[j+3] = byte(s >> 0) j += 4 } return out } func (d *digest) setSalt(s []byte) { if len(s) != 16 { panic("salt length must be 16 bytes") } d.s[0] = uint32(s[0])<<24 | uint32(s[1])<<16 | uint32(s[2])<<8 | uint32(s[3]) d.s[1] = uint32(s[4])<<24 | uint32(s[5])<<16 | uint32(s[6])<<8 | uint32(s[7]) d.s[2] = uint32(s[8])<<24 | uint32(s[9])<<16 | uint32(s[10])<<8 | uint32(s[11]) d.s[3] = uint32(s[12])<<24 | uint32(s[13])<<16 | uint32(s[14])<<8 | uint32(s[15]) } // New returns a new hash.Hash computing the BLAKE-256 checksum. func New() hash.Hash { return &digest{ hashSize: 256, h: iv256, } } // NewSalt is like New but initializes salt with the given 16-byte slice. func NewSalt(salt []byte) hash.Hash { d := &digest{ hashSize: 256, h: iv256, } d.setSalt(salt) return d } // New224 returns a new hash.Hash computing the BLAKE-224 checksum. func New224() hash.Hash { return &digest{ hashSize: 224, h: iv224, } } // New224Salt is like New224 but initializes salt with the given 16-byte slice. func New224Salt(salt []byte) hash.Hash { d := &digest{ hashSize: 224, h: iv224, } d.setSalt(salt) return d } // Sum256 returns the BLAKE-256 checksum of the data. func Sum256(data []byte) [Size]byte { var d digest d.hashSize = 256 d.Reset() d.Write(data) return d.checkSum() } // Sum224 returns the BLAKE-224 checksum of the data. func Sum224(data []byte) (sum224 [Size224]byte) { var d digest d.hashSize = 224 d.Reset() d.Write(data) sum := d.checkSum() copy(sum224[:], sum[:Size224]) return }
crypto/blake256/blake256.go
0.643777
0.439206
blake256.go
starcoder
package models import ( i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) // PrivilegedRoleSummary type PrivilegedRoleSummary struct { Entity // The number of users that have the role assigned and the role is activated. elevatedCount *int32 // The number of users that have the role assigned but the role is deactivated. managedCount *int32 // true if the role activation requires MFA. false if the role activation doesn't require MFA. mfaEnabled *bool // Possible values are: ok, bad. The value depends on the ratio of (managedCount / usersCount). If the ratio is less than a predefined threshold, ok is returned. Otherwise, bad is returned. status *RoleSummaryStatus // The number of users that are assigned with the role. usersCount *int32 } // NewPrivilegedRoleSummary instantiates a new privilegedRoleSummary and sets the default values. func NewPrivilegedRoleSummary()(*PrivilegedRoleSummary) { m := &PrivilegedRoleSummary{ Entity: *NewEntity(), } return m } // CreatePrivilegedRoleSummaryFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreatePrivilegedRoleSummaryFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { return NewPrivilegedRoleSummary(), nil } // GetElevatedCount gets the elevatedCount property value. The number of users that have the role assigned and the role is activated. func (m *PrivilegedRoleSummary) GetElevatedCount()(*int32) { if m == nil { return nil } else { return m.elevatedCount } } // GetFieldDeserializers the deserialization information for the current model func (m *PrivilegedRoleSummary) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { res := m.Entity.GetFieldDeserializers() res["elevatedCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt32Value() if err != nil { return err } if val != nil { m.SetElevatedCount(val) } return nil } res["managedCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt32Value() if err != nil { return err } if val != nil { m.SetManagedCount(val) } return nil } res["mfaEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetBoolValue() if err != nil { return err } if val != nil { m.SetMfaEnabled(val) } return nil } res["status"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetEnumValue(ParseRoleSummaryStatus) if err != nil { return err } if val != nil { m.SetStatus(val.(*RoleSummaryStatus)) } return nil } res["usersCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt32Value() if err != nil { return err } if val != nil { m.SetUsersCount(val) } return nil } return res } // GetManagedCount gets the managedCount property value. The number of users that have the role assigned but the role is deactivated. func (m *PrivilegedRoleSummary) GetManagedCount()(*int32) { if m == nil { return nil } else { return m.managedCount } } // GetMfaEnabled gets the mfaEnabled property value. true if the role activation requires MFA. false if the role activation doesn't require MFA. func (m *PrivilegedRoleSummary) GetMfaEnabled()(*bool) { if m == nil { return nil } else { return m.mfaEnabled } } // GetStatus gets the status property value. Possible values are: ok, bad. The value depends on the ratio of (managedCount / usersCount). If the ratio is less than a predefined threshold, ok is returned. Otherwise, bad is returned. func (m *PrivilegedRoleSummary) GetStatus()(*RoleSummaryStatus) { if m == nil { return nil } else { return m.status } } // GetUsersCount gets the usersCount property value. The number of users that are assigned with the role. func (m *PrivilegedRoleSummary) GetUsersCount()(*int32) { if m == nil { return nil } else { return m.usersCount } } // Serialize serializes information the current object func (m *PrivilegedRoleSummary) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { err := m.Entity.Serialize(writer) if err != nil { return err } { err = writer.WriteInt32Value("elevatedCount", m.GetElevatedCount()) if err != nil { return err } } { err = writer.WriteInt32Value("managedCount", m.GetManagedCount()) if err != nil { return err } } { err = writer.WriteBoolValue("mfaEnabled", m.GetMfaEnabled()) if err != nil { return err } } if m.GetStatus() != nil { cast := (*m.GetStatus()).String() err = writer.WriteStringValue("status", &cast) if err != nil { return err } } { err = writer.WriteInt32Value("usersCount", m.GetUsersCount()) if err != nil { return err } } return nil } // SetElevatedCount sets the elevatedCount property value. The number of users that have the role assigned and the role is activated. func (m *PrivilegedRoleSummary) SetElevatedCount(value *int32)() { if m != nil { m.elevatedCount = value } } // SetManagedCount sets the managedCount property value. The number of users that have the role assigned but the role is deactivated. func (m *PrivilegedRoleSummary) SetManagedCount(value *int32)() { if m != nil { m.managedCount = value } } // SetMfaEnabled sets the mfaEnabled property value. true if the role activation requires MFA. false if the role activation doesn't require MFA. func (m *PrivilegedRoleSummary) SetMfaEnabled(value *bool)() { if m != nil { m.mfaEnabled = value } } // SetStatus sets the status property value. Possible values are: ok, bad. The value depends on the ratio of (managedCount / usersCount). If the ratio is less than a predefined threshold, ok is returned. Otherwise, bad is returned. func (m *PrivilegedRoleSummary) SetStatus(value *RoleSummaryStatus)() { if m != nil { m.status = value } } // SetUsersCount sets the usersCount property value. The number of users that are assigned with the role. func (m *PrivilegedRoleSummary) SetUsersCount(value *int32)() { if m != nil { m.usersCount = value } }
models/privileged_role_summary.go
0.599485
0.416263
privileged_role_summary.go
starcoder
package gotetris type Piece struct { Id rune Coordinates [][][2]int LowestPoints [4][10]int MoveSet [][2]int } func NewPiece(id rune, coordSets [][][2]int) *Piece { moveSet := make([][2]int, 0, 40) lowestPoints := new([4][10]int) setPoints := new([4][10]bool) for i, rotation := range coordSets { max := rotation[0][0] min := max for _, coord := range rotation { x := coord[0] y := coord[1] // Calculate the range which the piece can move through (distance from walls) if x < min { min = x } else if x > max { max = x } // Cache the lowest points of the piece (for collisions) if lowestPoints[i][x] > y || !setPoints[i][x] { setPoints[i][x] = true lowestPoints[i][x] = y } } moveRange := min + (10 - max) // The MoveSet is organised in such a way that moves closer to the center // are at lower indexes in the array. // Useful for a Depth-First search moves which require fewer keypresses // will be found first for move := 0; move < moveRange; move++ { if move <= min { moveSet = append(moveSet, [2]int{i, -move}) } if move != 0 && move < (10-max) { moveSet = append(moveSet, [2]int{i, move}) } } //TODO: Order rotations in a similar manner } return &Piece{Id: id, Coordinates: coordSets, LowestPoints: *lowestPoints, MoveSet: moveSet} } var PieceMap = map[rune]*Piece{ 'I': NewPiece('I', [][][2]int{ {{3, 0}, {4, 0}, {5, 0}, {6, 0}}, {{5, 0}, {5, 1}, {5, 2}, {5, 3}}, }), 'O': NewPiece('O', [][][2]int{ {{4, 0}, {5, 0}, {4, 1}, {5, 1}}, }), 'J': NewPiece('J', [][][2]int{ {{3, 0}, {4, 0}, {5, 0}, {3, 1}}, {{4, 0}, {4, 1}, {4, 2}, {5, 2}}, {{5, 0}, {3, 1}, {4, 1}, {5, 1}}, {{4, 0}, {3, 0}, {4, 1}, {4, 2}}, }), 'L': NewPiece('L', [][][2]int{ {{3, 0}, {4, 0}, {5, 0}, {5, 1}}, {{4, 0}, {5, 0}, {4, 1}, {4, 2}}, {{3, 0}, {3, 1}, {4, 1}, {5, 1}}, {{4, 0}, {4, 1}, {4, 2}, {3, 2}}, }), 'S': NewPiece('S', [][][2]int{ {{3, 0}, {4, 0}, {4, 1}, {5, 1}}, {{5, 0}, {4, 1}, {5, 1}, {4, 2}}, }), 'Z': NewPiece('Z', [][][2]int{ {{5, 0}, {4, 0}, {4, 1}, {3, 1}}, {{4, 0}, {4, 1}, {5, 1}, {5, 2}}, }), 'T': NewPiece('T', [][][2]int{ {{3, 0}, {4, 0}, {5, 0}, {4, 1}}, {{4, 0}, {4, 1}, {5, 1}, {4, 2}}, {{4, 0}, {3, 1}, {4, 1}, {5, 1}}, {{4, 0}, {3, 1}, {4, 1}, {4, 2}}, }), }
piece.go
0.533397
0.54952
piece.go
starcoder
package testutil import ( "context" "reflect" "github.com/stretchr/testify/require" ) // AssertReceive verifies that a channel returns a value before the given context closes, and writes into // into out, which should be a pointer to the value type func AssertReceive(ctx context.Context, t TestingT, channel interface{}, out interface{}, errorMessage string) { AssertReceiveFirst(t, channel, out, errorMessage, ctx.Done()) } // AssertReceiveFirst verifies that a channel returns a value on the specified channel before the other channels, // and writes the value into out, which should be a pointer to the value type func AssertReceiveFirst(t TestingT, channel interface{}, out interface{}, errorMessage string, incorrectChannels ...interface{}) { chanValue := reflect.ValueOf(channel) outValue := reflect.ValueOf(out) require.Equal(t, reflect.Chan, chanValue.Kind(), "incorrect argument: should pass channel to read from") require.Contains(t, []reflect.ChanDir{reflect.BothDir, reflect.RecvDir}, chanValue.Type().ChanDir(), "incorrect argument: should pass a receiving channel") require.Equal(t, reflect.Ptr, outValue.Kind(), "incorrect argument: should pass a pointer for out value") require.True(t, chanValue.Type().Elem().AssignableTo(outValue.Elem().Type()), "incorrect argument: out value is incorrect type") var incorrectSelectCases []reflect.SelectCase for _, incorrectChannel := range incorrectChannels { incorrectChanValue := reflect.ValueOf(incorrectChannel) require.Equal(t, reflect.Chan, incorrectChanValue.Kind(), "incorrect argument: should pass channel to read from") require.Contains(t, []reflect.ChanDir{reflect.BothDir, reflect.RecvDir}, incorrectChanValue.Type().ChanDir(), "incorrect argument: should pass a receiving channel") incorrectSelectCases = append(incorrectSelectCases, reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: incorrectChanValue, }) } chosen, recv, recvOk := reflect.Select(append([]reflect.SelectCase{ { Dir: reflect.SelectRecv, Chan: chanValue, }, }, incorrectSelectCases...)) require.Equal(t, 0, chosen, errorMessage) require.True(t, recvOk, errorMessage) outValue.Elem().Set(recv) } // AssertDoesReceive verifies that a channel returns some value before the given context closes func AssertDoesReceive(ctx context.Context, t TestingT, channel interface{}, errorMessage string) { AssertDoesReceiveFirst(t, channel, errorMessage, ctx.Done()) } // AssertDoesReceiveFirst asserts that the given channel receives a value before any of the other channels specified func AssertDoesReceiveFirst(t TestingT, channel interface{}, errorMessage string, incorrectChannels ...interface{}) { chanValue := reflect.ValueOf(channel) require.Equal(t, reflect.Chan, chanValue.Kind(), "incorrect argument: should pass channel to read from") require.Contains(t, []reflect.ChanDir{reflect.BothDir, reflect.RecvDir}, chanValue.Type().ChanDir(), "incorrect argument: should pass a receiving channel") var incorrectSelectCases []reflect.SelectCase for _, incorrectChannel := range incorrectChannels { incorrectChanValue := reflect.ValueOf(incorrectChannel) require.Equal(t, reflect.Chan, incorrectChanValue.Kind(), "incorrect argument: should pass channel to read from") require.Contains(t, []reflect.ChanDir{reflect.BothDir, reflect.RecvDir}, incorrectChanValue.Type().ChanDir(), "incorrect argument: should pass a receiving channel") incorrectSelectCases = append(incorrectSelectCases, reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: incorrectChanValue, }) } chosen, _, _ := reflect.Select(append([]reflect.SelectCase{ { Dir: reflect.SelectRecv, Chan: chanValue, }, }, incorrectSelectCases...)) require.Equal(t, 0, chosen, errorMessage) } // AssertChannelEmpty verifies that a channel has no value currently func AssertChannelEmpty(t TestingT, channel interface{}, errorMessage string) { chanValue := reflect.ValueOf(channel) require.Equal(t, reflect.Chan, chanValue.Kind(), "incorrect argument: should pass channel to read from") require.Contains(t, []reflect.ChanDir{reflect.BothDir, reflect.RecvDir}, chanValue.Type().ChanDir(), "incorrect argument: should pass a receiving channel") chosen, _, _ := reflect.Select([]reflect.SelectCase{ { Dir: reflect.SelectRecv, Chan: chanValue, }, { Dir: reflect.SelectDefault, }, }) require.NotEqual(t, chosen, 0, errorMessage) } // AssertSends attempts to send the given input value to the given channel before the given context closes func AssertSends(ctx context.Context, t TestingT, channel interface{}, in interface{}, errorMessage string) { chanValue := reflect.ValueOf(channel) inValue := reflect.ValueOf(in) require.Equal(t, reflect.Chan, chanValue.Kind(), "incorrect argument: should pass channel to send to") require.Contains(t, []reflect.ChanDir{reflect.BothDir, reflect.SendDir}, chanValue.Type().ChanDir(), "incorrect argument: should pass a sending channel") require.True(t, inValue.Type().AssignableTo(chanValue.Type().Elem()), "incorrect argument: in value is incorrect type") chosen, _, _ := reflect.Select([]reflect.SelectCase{ { Dir: reflect.SelectSend, Chan: chanValue, Send: inValue, }, { Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ctx.Done()), }, }) require.Equal(t, 0, chosen, errorMessage) }
testutil/channelassertions.go
0.734215
0.422743
channelassertions.go
starcoder
package engine // Direction represents a query sort direction type Direction byte const ( // Ascending means going up, A-Z Ascending Direction = 1 << iota // Descending means reverse order, Z-A Descending ) // Condition represents a filter comparison operation // between a field and a value type Condition byte const ( // Equal if it should be the same Equal Condition = 1 << iota // LessThan if it should be smaller LessThan // LessThanOrEqual if it should be smaller or equal LessThanOrEqual // GreaterThan if it should be larger GreaterThan // GreaterThanOrEqual if it should be equal or greater than GreaterThanOrEqual ) type ( // Query represents a query specification for filtering // sorting, paging and limiting the data requested Query struct { Name string Offset int Limit int Filters []*Filter Orders []*Order } // QueryBuilder helps with query creation QueryBuilder interface { Filter(property string, value interface{}) QueryBuilder Order(property string, direction Direction) } // Filter represents a filter operation on a single field Filter struct { Property string Condition Condition Value interface{} } // Order represents a sort operation on a single field Order struct { Property string Direction Direction } ) // NewQuery creates a new database query spec. The name is what // the storage system should use to identify the types, usually // a table or collection name. func NewQuery(name string) *Query { return &Query{ Name: name, } } // Filter adds a filter to the query func (q *Query) Filter(property string, condition Condition, value interface{}) *Query { filter := NewFilter(property, condition, value) q.Filters = append(q.Filters, filter) return q } // Order adds a sort order to the query func (q *Query) Order(property string, direction Direction) *Query { order := NewOrder(property, direction) q.Orders = append(q.Orders, order) return q } // Slice adds a slice operation to the query func (q *Query) Slice(offset, limit int) *Query { q.Offset = offset q.Limit = limit return q } // NewFilter creates a new property filter func NewFilter(property string, condition Condition, value interface{}) *Filter { return &Filter{ Property: property, Condition: condition, Value: value, } } // NewOrder creates a new query order func NewOrder(property string, direction Direction) *Order { return &Order{ Property: property, Direction: direction, } }
services/backend/wallet/pkg/engine/query.go
0.841728
0.469216
query.go
starcoder
package dataselect import ( "errors" "log" "sort" metricapi "github.com/kubernetes/dashboard/src/app/backend/integration/metric/api" ) // GenericDataCell describes the interface of the data cell that contains all the necessary methods needed to perform // complex data selection // GenericDataSelect takes a list of these interfaces and performs selection operation. // Therefore as long as the list is composed of GenericDataCells you can perform any data selection! type DataCell interface { // GetPropertyAtIndex returns the property of this data cell. // Value returned has to have Compare method which is required by Sort functionality of DataSelect. GetProperty(PropertyName) ComparableValue } // MetricDataCell extends interface of DataCells and additionally supports metric download. type MetricDataCell interface { DataCell // GetResourceSelector returns ResourceSelector for this resource. The ResourceSelector can be used to get, // HeapsterSelector which in turn can be used to download metrics. GetResourceSelector() *metricapi.ResourceSelector } // ComparableValue hold any value that can be compared to its own kind. type ComparableValue interface { // Compares self with other value. Returns 1 if other value is smaller, 0 if they are the same, -1 if other is larger. Compare(ComparableValue) int // Returns true if self value contains or is equal to other value, false otherwise. Contains(ComparableValue) bool } // SelectableData contains all the required data to perform data selection. // It implements sort.Interface so its sortable under sort.Sort // You can use its Select method to get selected GenericDataCell list. type DataSelector struct { // GenericDataList hold generic data cells that are being selected. GenericDataList []DataCell // DataSelectQuery holds instructions for data select. DataSelectQuery *DataSelectQuery // CachedResources stores resources that may be needed during data selection process CachedResources *metricapi.CachedResources // CumulativeMetricsPromises is a list of promises holding aggregated metrics for resources in GenericDataList. // The metrics will be calculated after calling GetCumulativeMetrics method. CumulativeMetricsPromises metricapi.MetricPromises // MetricsPromises is a list of promises holding metrics for resources in GenericDataList. // The metrics will be calculated after calling GetMetrics method. Metric will not be // aggregated and can are used to display sparklines on pod list. MetricsPromises metricapi.MetricPromises } // Implementation of sort.Interface so that we can use built-in sort function (sort.Sort) for sorting SelectableData // Len returns the length of data inside SelectableData. func (self DataSelector) Len() int { return len(self.GenericDataList) } // Swap swaps 2 indices inside SelectableData. func (self DataSelector) Swap(i, j int) { self.GenericDataList[i], self.GenericDataList[j] = self.GenericDataList[j], self.GenericDataList[i] } // Less compares 2 indices inside SelectableData and returns true if first index is larger. func (self DataSelector) Less(i, j int) bool { for _, sortBy := range self.DataSelectQuery.SortQuery.SortByList { a := self.GenericDataList[i].GetProperty(sortBy.Property) b := self.GenericDataList[j].GetProperty(sortBy.Property) // ignore sort completely if property name not found if a == nil || b == nil { break } cmp := a.Compare(b) if cmp == 0 { // values are the same. Just continue to next sortBy continue } else { // values different return (cmp == -1 && sortBy.Ascending) || (cmp == 1 && !sortBy.Ascending) } } return false } // Sort sorts the data inside as instructed by DataSelectQuery and returns itself to allow method chaining. func (self *DataSelector) Sort() *DataSelector { sort.Sort(*self) return self } // Filter the data inside as instructed by DataSelectQuery and returns itself to allow method chaining. func (self *DataSelector) Filter() *DataSelector { filteredList := []DataCell{} for _, c := range self.GenericDataList { matches := true for _, filterBy := range self.DataSelectQuery.FilterQuery.FilterByList { v := c.GetProperty(filterBy.Property) if v == nil { matches = false continue } if !v.Contains(filterBy.Value) { matches = false continue } } if matches { filteredList = append(filteredList, c) } } self.GenericDataList = filteredList return self } func (self *DataSelector) getMetrics(metricClient metricapi.MetricClient) ( []metricapi.MetricPromises, error) { metricPromises := make([]metricapi.MetricPromises, 0) if metricClient == nil { return metricPromises, errors.New("No metric client provided. Skipping metrics.") } metricNames := self.DataSelectQuery.MetricQuery.MetricNames if metricNames == nil { return metricPromises, errors.New("No metrics specified. Skipping metrics.") } selectors := make([]metricapi.ResourceSelector, len(self.GenericDataList)) for i, dataCell := range self.GenericDataList { // make sure data cells support metrics metricDataCell, ok := dataCell.(MetricDataCell) if !ok { log.Printf("Data cell does not implement MetricDataCell. Skipping. %v", dataCell) continue } selectors[i] = *metricDataCell.GetResourceSelector() } for _, metricName := range metricNames { promises := metricClient.DownloadMetric(selectors, metricName, self.CachedResources) metricPromises = append(metricPromises, promises) } return metricPromises, nil } // GetMetrics downloads metrics for data cells currently present in self.GenericDataList as instructed // by MetricQuery and inserts resulting MetricPromises to self.MetricsPromises. func (self *DataSelector) GetMetrics(metricClient metricapi.MetricClient) *DataSelector { metricPromisesList, err := self.getMetrics(metricClient) if err != nil { log.Print(err) return self } metricPromises := make(metricapi.MetricPromises, 0) for _, promises := range metricPromisesList { metricPromises = append(metricPromises, promises...) } self.MetricsPromises = metricPromises return self } // GetCumulativeMetrics downloads and aggregates metrics for data cells currently present in self.GenericDataList as instructed // by MetricQuery and inserts resulting MetricPromises to self.CumulativeMetricsPromises. func (self *DataSelector) GetCumulativeMetrics(metricClient metricapi.MetricClient) *DataSelector { metricPromisesList, err := self.getMetrics(metricClient) if err != nil { log.Print(err) return self } metricNames := self.DataSelectQuery.MetricQuery.MetricNames if metricNames == nil { log.Print("No metrics specified. Skipping metrics.") return self } aggregations := self.DataSelectQuery.MetricQuery.Aggregations if aggregations == nil { aggregations = metricapi.OnlyDefaultAggregation } metricPromises := make(metricapi.MetricPromises, 0) for i, metricName := range metricNames { promises := metricClient.AggregateMetrics(metricPromisesList[i], metricName, aggregations) metricPromises = append(metricPromises, promises...) } self.CumulativeMetricsPromises = metricPromises return self } // Paginates the data inside as instructed by DataSelectQuery and returns itself to allow method chaining. func (self *DataSelector) Paginate() *DataSelector { pQuery := self.DataSelectQuery.PaginationQuery dataList := self.GenericDataList startIndex, endIndex := pQuery.GetPaginationSettings(len(dataList)) // Return all items if provided settings do not meet requirements if !pQuery.IsValidPagination() { return self } // Return no items if requested page does not exist if !pQuery.IsPageAvailable(len(self.GenericDataList), startIndex) { self.GenericDataList = []DataCell{} return self } self.GenericDataList = dataList[startIndex:endIndex] return self } // GenericDataSelect takes a list of GenericDataCells and DataSelectQuery and returns selected data as instructed by dsQuery. func GenericDataSelect(dataList []DataCell, dsQuery *DataSelectQuery) []DataCell { SelectableData := DataSelector{ GenericDataList: dataList, DataSelectQuery: dsQuery, } return SelectableData.Sort().Paginate().GenericDataList } // GenericDataSelectWithFilter takes a list of GenericDataCells and DataSelectQuery and returns selected data as instructed by dsQuery. func GenericDataSelectWithFilter(dataList []DataCell, dsQuery *DataSelectQuery) ([]DataCell, int) { SelectableData := DataSelector{ GenericDataList: dataList, DataSelectQuery: dsQuery, } // Pipeline is Filter -> Sort -> CollectMetrics -> Paginate filtered := SelectableData.Filter() filteredTotal := len(filtered.GenericDataList) processed := filtered.Sort().Paginate() return processed.GenericDataList, filteredTotal } // GenericDataSelect takes a list of GenericDataCells and DataSelectQuery and returns selected data as instructed by dsQuery. func GenericDataSelectWithMetrics(dataList []DataCell, dsQuery *DataSelectQuery, cachedResources *metricapi.CachedResources, metricClient metricapi.MetricClient) ( []DataCell, metricapi.MetricPromises) { SelectableData := DataSelector{ GenericDataList: dataList, DataSelectQuery: dsQuery, CachedResources: cachedResources, } // Pipeline is Filter -> Sort -> CollectMetrics -> Paginate processed := SelectableData.Sort().GetCumulativeMetrics(metricClient).Paginate() return processed.GenericDataList, processed.CumulativeMetricsPromises } // GenericDataSelect takes a list of GenericDataCells and DataSelectQuery and returns selected data as instructed by dsQuery. func GenericDataSelectWithFilterAndMetrics(dataList []DataCell, dsQuery *DataSelectQuery, cachedResources *metricapi.CachedResources, metricClient metricapi.MetricClient) ( []DataCell, metricapi.MetricPromises, int) { SelectableData := DataSelector{ GenericDataList: dataList, DataSelectQuery: dsQuery, CachedResources: cachedResources, } // Pipeline is Filter -> Sort -> CollectMetrics -> Paginate filtered := SelectableData.Filter() filteredTotal := len(filtered.GenericDataList) processed := filtered.Sort().GetCumulativeMetrics(metricClient).Paginate() return processed.GenericDataList, processed.CumulativeMetricsPromises, filteredTotal } // PodListMetrics returns metrics for every resource on the dataList without aggregating data. func PodListMetrics(dataList []DataCell, dsQuery *DataSelectQuery, metricClient metricapi.MetricClient) metricapi.MetricPromises { selectableData := DataSelector{ GenericDataList: dataList, DataSelectQuery: dsQuery, CachedResources: metricapi.NoResourceCache, } processed := selectableData.GetMetrics(metricClient) return processed.MetricsPromises }
src/app/backend/resource/dataselect/dataselect.go
0.755907
0.424949
dataselect.go
starcoder
package vec2 import "math" // T represents a two dimensonal vector based on float64. // Imported by other packages as vec2.T, therefore we don't repeat type name. type T struct { X, Y float64 } // I represents a two dimensonal vector based on integers. type I struct { X, Y int } // New creates a new *T from two coordinates func New(x, y float64) *T { return &T{X: x, Y: y} } // NewI creates a new *I from two coordinates func NewI(x, y int) *I { return &I{X: x, Y: y} } // UX is unit vector where X=1 func UX() *T { return &T{1.0, 0.0} } // UY is unit vector where Y=1 func UY() *T { return &T{0.0, 1.0} } // UXY is unit vector where X=1 and Y=1 func UXY() *T { return &T{1.0, 1.0} } // Coords returns the single cooridnates of vector func (t *T) Coords() (float64, float64) { return t.X, t.Y } func (t *T) Set(x, y float64) { t.X = x t.Y = y } func (t *T) Copy() *T { p := *t return &p } func (t *T) Null() bool { return t.X == 0 && t.Y == 0 } // Add other to receiver. Other is unchanged. // Receiver is returned for easy chaning. func (t *T) Add(other *T) *T { t.X += other.X t.Y += other.Y return t } // Added returns a new *T which is the sum of receiver and other. func (t *T) Added(other *T) *T { p := *t return p.Add(other) } // Sub subtracts other from receiver. Other is unchanged. // Receiver is returned for easy chaning. func (t *T) Sub(other *T) *T { t.X -= other.X t.Y -= other.Y return t } // Subed returns a new *T which is the difference of receiver and other. func (t *T) Subed(other *T) *T { p := *t return p.Sub(other) } // Mul multiplies the scalar to receiver. // Receiver is returned for easy chaning. func (t *T) Mul(scalar float64) *T { t.X *= scalar t.Y *= scalar return t } // Muled returns a new *T which is receiver multiplied by scalar. func (t *T) Muled(scalar float64) *T { p := *t return p.Mul(scalar) } // Invert vector. // Receiver is returned for easy chaning. func (t *T) Invert() *T { t.X = -t.X t.Y = -t.Y return t } // Inverted returns a new *T which is receiver inverted. func (t *T) Inverted() *T { p := *t return p.Invert() } // Length returns length of receiver. func (t *T) Length() float64 { return math.Sqrt(t.X*t.X + t.Y*t.Y) } // Normalize receiver. After calling Normalize, Length is always 1. func (t *T) Normalize() { if t.Length() != 0 { t.Mul(1 / t.Length()) } } // AsI converts a float64 based vector to an integer based one. func (t *T) AsI() *I { return &I{int(t.X), int(t.Y)} } // AsT converts an integer based vector to a float64 based one. func (i *I) AsT() *T { return &T{X: float64(i.X), Y: float64(i.Y)} }
vec2/vec2.go
0.936663
0.654384
vec2.go
starcoder
package ring // NTT performes the NTT transformation on the CRT coefficients a Polynomial, based on the target context. func (context *Context) NTT(p1, p2 *Poly) { for x := range context.Modulus { NTT(p1.Coeffs[x], p2.Coeffs[x], context.N, context.nttPsi[x], context.Modulus[x], context.mredParams[x], context.bredParams[x]) } } // InvNTT performes the inverse NTT transformation on the CRT coefficients of a polynomial, based on the target context. func (context *Context) InvNTT(p1, p2 *Poly) { for x := range context.Modulus { InvNTT(p1.Coeffs[x], p2.Coeffs[x], context.N, context.nttPsiInv[x], context.nttNInv[x], context.Modulus[x], context.mredParams[x]) } } // Buttefly computes X, Y = U + V*Psi, U - V*Psi mod Q. func Butterfly(U, V, Psi, Q, Qinv uint64) (X, Y uint64) { if U > 2*Q { U -= 2 * Q } V = MRedConstant(V, Psi, Q, Qinv) X = U + V Y = U + 2*Q - V return } // InvButterfly computes X, Y = U + V, (U - V) * Psi mod Q. func InvButterfly(U, V, Psi, Q, Qinv uint64) (X, Y uint64) { X = U + V if X > 2*Q { X -= 2 * Q } Y = MRedConstant(U+2*Q-V, Psi, Q, Qinv) // At the moment it is not possible to use MRedConstant if Q > 61 bits return } // NTT computes the NTT transformation on the input coefficients given the provided params. func NTT(coeffs_in, coeffs_out []uint64, N uint64, nttPsi []uint64, Q, mredParams uint64, bredParams []uint64) { var j1, j2, t uint64 var F uint64 // Copies the result of the first round of butterflies on p2 with approximate reduction t = N >> 1 j2 = t - 1 F = nttPsi[1] for j := uint64(0); j <= j2; j++ { coeffs_out[j], coeffs_out[j+t] = Butterfly(coeffs_in[j], coeffs_in[j+t], F, Q, mredParams) } // Continues the rest of the second to the n-1 butterflies on p2 with approximate reduction for m := uint64(2); m < N; m <<= 1 { t >>= 1 for i := uint64(0); i < m; i++ { j1 = (i * t) << 1 j2 = j1 + t - 1 F = nttPsi[m+i] for j := j1; j <= j2; j++ { coeffs_out[j], coeffs_out[j+t] = Butterfly(coeffs_out[j], coeffs_out[j+t], F, Q, mredParams) } } } // Finishes with an exact reduction for i := uint64(0); i < N; i++ { coeffs_out[i] = BRedAdd(coeffs_out[i], Q, bredParams) } } // InvNTT computes the InvNTT transformation on the input coefficients given the provided params. func InvNTT(coeffs_in, coeffs_out []uint64, N uint64, nttPsiInv []uint64, nttNInv, Q, mredParams uint64) { var j1, j2, h, t uint64 var F uint64 // Copies the result of the first round of butterflies on p2 with approximate reduction t = 1 j1 = 0 h = N >> 1 for i := uint64(0); i < h; i++ { j2 = j1 F = nttPsiInv[h+i] for j := j1; j <= j2; j++ { coeffs_out[j], coeffs_out[j+t] = InvButterfly(coeffs_in[j], coeffs_in[j+t], F, Q, mredParams) } j1 = j1 + (t << 1) } // Continues the rest of the second to the n-1 butterflies on p2 with approximate reduction t <<= 1 for m := N >> 1; m > 1; m >>= 1 { j1 = 0 h = m >> 1 for i := uint64(0); i < h; i++ { j2 = j1 + t - 1 F = nttPsiInv[h+i] for j := j1; j <= j2; j++ { coeffs_out[j], coeffs_out[j+t] = InvButterfly(coeffs_out[j], coeffs_out[j+t], F, Q, mredParams) } j1 = j1 + (t << 1) } t <<= 1 } // Finishes with an exact reduction given for j := uint64(0); j < N; j++ { coeffs_out[j] = MRed(coeffs_out[j], nttNInv, Q, mredParams) } }
ring/ntt.go
0.801781
0.54958
ntt.go
starcoder
package binp import "encoding/binary" // Printer type. Don't touch the internals. type Printer struct { w []byte } // Create a new printer with empty output. func Out() *Printer { return &Printer{[]byte{}} } // Create a new printer with output prefixed with the given byte slice. func OutWith(b []byte) *Printer { return &Printer{b} } // Create a new printer with an empty slice with the capacity given below. func OutCap(initialcap int) *Printer { return &Printer{make([]byte, 0, initialcap)} } // Output a byte. func (p *Printer) Byte(d byte) *Printer { p.w = append(p.w, d) return p } // Output a byte, synonym for .Byte. func (p *Printer) B8(d byte) *Printer { p.w = append(p.w, d) return p } // Output a byte, synonym for .Byte. func (p *Printer) N8(d byte) *Printer { p.w = append(p.w, d) return p } // Output 2 native endian bytes. func (p *Printer) N16(d uint16) *Printer { p.w = append(p.w, byte(d), byte(d>>8)) return p } // Output 4 native endian bytes. func (p *Printer) N32(d uint32) *Printer { p.w = append(p.w, byte(d), byte(d>>8), byte(d>>16), byte(d>>24)) return p } // Output 4 native endian bytes. func (p *Printer) N64(d uint64) *Printer { p.w = append(p.w, byte(d), byte(d>>8), byte(d>>16), byte(d>>24), byte(d>>32), byte(d>>40), byte(d>>48), byte(d>>56)) return p } var z16 = make([]byte, 16) // Align to boundary func (p *Printer) Align(n int) *Printer { r := len(p.w) % n if r == 0 { return p } r = n - r for r > 0 { cur := r if cur > 16 { cur = 16 } p.w = append(p.w, z16[:cur]...) r -= cur } return p } // Skip (zero-fill) some bytes. func (p *Printer) Skip(n int) *Printer { for n > 0 { cur := n if cur > 16 { cur = 16 } p.w = append(p.w, z16[:cur]...) n -= cur } return p } // Output a raw byte slice with no length prefix. func (p *Printer) Bytes(d []byte) *Printer { p.w = append(p.w, d...) return p } // Output a raw string with no length prefix. func (p *Printer) String(d string) *Printer { p.w = append(p.w, []byte(d)...) return p } // Output a string with a 4 byte native endian length prefix and no trailing null. func (p *Printer) N32String(d string) *Printer { return p.N32(uint32(len(d))).String(d) } // Output bytes with a 4 byte native endian length prefix and no trailing null. func (p *Printer) N32Bytes(d []byte) *Printer { return p.N32(uint32(len(d))).Bytes(d) } // Output a string with a 2 byte native endian length prefix and no trailing null. func (p *Printer) N16String(d string) *Printer { if len(d) > 0xffff { panic("binprinter: string too long") } return p.N16(uint16(len(d))).String(d) } // Output a string with a 1 byte native endian length prefix and no trailing null. func (p *Printer) N8String(d string) *Printer { if len(d) > 0xff { panic("binprinter: string too long") } return p.Byte(byte(len(d))).String(d) } // Output a string terminated by a null-byte func (p *Printer) String0(d string) *Printer { return p.String(d).Byte(0) } // Get the output as a byte slice. func (p *Printer) Out() []byte { return p.w } // Start counting bytes for the length field in question. func (p *Printer) LenStart(l *Len) *Printer { l.start = len(p.w) return p } // Add a 16 bit field at the current location that will be filled with the length. func (p *Printer) LenN16(l *Len) *Printer { l.ls = append(l.ls, ls{uint32(len(p.w)), 2 | lenMaskNative}) return p.N16(0) } // Add a 32 bit field at the current location that will be filled with the length. func (p *Printer) LenN32(l *Len) *Printer { l.ls = append(l.ls, ls{uint32(len(p.w)), 4 | lenMaskNative}) return p.N32(0) } // Call LenDone for all the arguments func (p *Printer) LensDone(ls ...*Len) *Printer { for _, l := range ls { p.LenDone(l) } return p } // Fill fields associated with this length with the current offset. func (p *Printer) LenDone(l *Len) *Printer { plen := len(p.w) - l.start for _, ls := range l.ls { switch ls.size { case 2 | lenMaskNative: NativeEndian.PutUint16(p.w[ls.offset:], uint16(plen)) case 4 | lenMaskNative: NativeEndian.PutUint32(p.w[ls.offset:], uint32(plen)) case 2 | lenMaskBE: binary.BigEndian.PutUint16(p.w[ls.offset:], uint16(plen)) case 4 | lenMaskBE: binary.BigEndian.PutUint32(p.w[ls.offset:], uint32(plen)) } } return p } // Type for handling length fields. type Len struct { ls []ls start int } type ls struct { offset uint32 size uint32 } const ( lenMaskNative = 1 << 30 lenMaskBE = 1 << 31 )
binprinter_native_le.go
0.731059
0.444927
binprinter_native_le.go
starcoder
package neat import ( "bytes" "encoding/json" "fmt" "sort" . "github.com/rqme/errors" ) type ExperimentSettings interface { Iterations() int Traits() Traits FitnessType() FitnessType ExperimentName() string } // Experiment provides the definition of how to solve the problem using NEAT type Experiment struct { ExperimentSettings ctx Context // State population Population `neat:"state"` cache map[int]Phenome best Genome iteration int stopped bool } func (e *Experiment) SetContext(x Context) error { e.ctx = x e.ctx.State()["population"] = &e.population return nil } func (e Experiment) Context() Context { return e.ctx } func (e Experiment) Population() Population { return e.population } func (e Experiment) Stopped() bool { return e.stopped } func (e Experiment) Iteration() int { return e.iteration } // String returns a description of the experiment func (e Experiment) String() string { return fmt.Sprintf("Experiment %s at iteration %d has best genome %d with fitness %f", e.ExperimentName(), e.iteration, e.best.ID, e.best.Fitness) } // Runs a configured experiment. If restoring, including just the configuration, this must be done // prior to calling Run. func Run(e *Experiment) error { // Ensure this is a valid experiment if e.Iterations() < 1 { return fmt.Errorf("Invalid value for Iterations: %d", e.Iterations()) } // Iterate the experiment for e.iteration = 0; e.iteration < e.Iterations(); e.iteration++ { //fmt.Println("iteration", e.iteration, "best", e.best.Fitness) // Reset the innovation history //e.mrk.Reset() // Advance the population if err := advance(e); err != nil { return fmt.Errorf("Could not advance the population: %v", err) } // Update the phenome cache if err := updateCache(e); err != nil { return fmt.Errorf("Couuld not update cache in the experiment: %v", err) } // Evaluate the population if stop, err := search(e); err != nil { return fmt.Errorf("Error evaluating the population: %v", err) } else if stop { e.stopped = true break } } // Take one last archive and return if err := e.ctx.Archiver().Archive(e.ctx); err != nil { return fmt.Errorf("Could not take last archive of experiment: %v", err) } if err := e.ctx.Visualizer().Visualize(e.population); err != nil { return fmt.Errorf("Could not visualize the experiment for the last time: %v", err) } return nil } // Advances the experiment to the next generation func advance(e *Experiment) error { curr := e.population next, err := e.ctx.Generator().Generate(curr) if err != nil { return err } if next.Generation > curr.Generation { if err = e.ctx.Visualizer().Visualize(e.population); err != nil { return err } if err = e.ctx.Archiver().Archive(e.ctx); err != nil { return err } if err = updateSettings(e, e.best); err != nil { return err } } e.population = next return nil } // Update the settings based on the traits of a genome func updateSettings(e *Experiment, g Genome) error { cnt := 0 b := bytes.NewBufferString("{") for t, trait := range e.Traits() { if trait.IsSetting { if cnt > 0 { b.WriteString(",\n") } b.WriteString(fmt.Sprintf(`"%s": %f`, trait.Name, g.Traits[t])) cnt += 1 } } b.WriteString("\n}") enc := json.NewEncoder(b) return enc.Encode(&e.ctx) } // Updates the cache of phenomes func updateCache(e *Experiment) (err error) { var old map[int]Phenome if len(e.cache) == 0 { old = make(map[int]Phenome, 0) } else { old = e.cache } e.cache = make(map[int]Phenome, len(e.population.Genomes)) errs := new(Errors) pc := make(chan Phenome) cnt := 0 for _, g := range e.population.Genomes { if p, ok := old[g.ID]; ok { e.cache[g.ID] = p } else { cnt += 1 go func(g Genome) { p, err := e.ctx.Decoder().Decode(g) if err != nil { errs.Add(fmt.Errorf("Unable to decode genome [%d]: %v", g.ID, err)) } pc <- p }(g) } } for i := 0; i < cnt; i++ { p := <-pc if p != nil { e.cache[p.ID()] = p } } return errs.Err() } // Searches the population and updates the genomes' fitness func search(e *Experiment) (stop bool, err error) { // Map the genomes for convenience m := make(map[int]int, len(e.population.Genomes)) for i, g := range e.population.Genomes { m[g.ID] = i } // Perform the search phenomes := make([]Phenome, 0, len(e.cache)) for _, p := range e.cache { phenomes = append(phenomes, p) } for _, h := range []interface{}{e.ctx.Searcher(), e.ctx.Evaluator()} { if ph, ok := h.(Phenomable); ok { if err = ph.SetPhenomes(phenomes); err != nil { return } } if sh, ok := h.(Setupable); ok { if err = sh.Setup(); err != nil { return } } } var rs Results if rs, err = e.ctx.Searcher().Search(phenomes); err != nil { return } for _, h := range []interface{}{e.ctx.Evaluator(), e.ctx.Searcher()} { if th, ok := h.(Takedownable); ok { if err = th.Takedown(); err != nil { return } } } // Update the fitnesses var best Genome errs := new(Errors) // := make([]float64, len(e.population.Genomes)) // TODO: make this concurrent for _, r := range rs { i := m[r.ID()] if err = r.Err(); err != nil { errs.Add(fmt.Errorf("Error updating fitness for genome [%d]: %v", r.ID(), r.Err())) } e.population.Genomes[i].Fitness = r.Fitness() if imp, ok := r.(Improvable); ok { e.population.Genomes[i].Improvement = imp.Improvement() } else { e.population.Genomes[i].Improvement = e.population.Genomes[i].Fitness } //fit[i] = e.population.Genomes[i].Fitness if e.population.Genomes[i].Fitness > best.Fitness { best = e.population.Genomes[i] } stop = stop || r.Stop() } // Update the best genome if errs.Err() == nil { if e.FitnessType() == Absolute { if best.Fitness > e.best.Fitness { e.best = best } } else { e.best = best } } // Leave the genomes sorted by their fitness descending sort.Sort(sort.Reverse(e.population.Genomes)) return stop, errs.Err() }
experiment.go
0.676192
0.445952
experiment.go
starcoder
package timeutil import "time" func BeginningOfMinute(t time.Time) time.Time { return t.Truncate(time.Minute) } func BeginningOfHour(t time.Time) time.Time { return t.Truncate(time.Hour) } func BeginningOfDay(t time.Time) time.Time { d := time.Duration(-t.Hour()) * time.Hour return BeginningOfHour(t).Add(d) } func BeginningOfWeek(t time.Time) time.Time { t = BeginningOfDay(t) weekday := int(t.Weekday()) if weekday == 0 { weekday = 7 } weekday = weekday - 1 d := time.Duration(-weekday) * 24 * time.Hour return t.Add(d) } func BeginningOfMonth(t time.Time) time.Time { t = BeginningOfDay(t) d := time.Duration(-int(t.Day())+1) * 24 * time.Hour return t.Add(d) } func BeginningOfQuarter(t time.Time) time.Time { month := BeginningOfMonth(t) offset := (int(month.Month()) - 1) % 3 return month.AddDate(0, -offset, 0) } func BeginningOfYear(t time.Time) time.Time { t = BeginningOfDay(t) d := time.Duration(-int(t.YearDay())+1) * 24 * time.Hour return t.Truncate(time.Hour).Add(d) } func EndOfMinute(t time.Time) time.Time { return BeginningOfMinute(t).Add(time.Minute - time.Nanosecond) } func EndOfHour(t time.Time) time.Time { return BeginningOfHour(t).Add(time.Hour - time.Nanosecond) } func EndOfDay(t time.Time) time.Time { return BeginningOfDay(t).Add(24*time.Hour - time.Nanosecond) } func EndOfWeek(t time.Time) time.Time { return BeginningOfWeek(t).AddDate(0, 0, 7).Add(-time.Nanosecond) } func EndOfMonth(t time.Time) time.Time { return BeginningOfMonth(t).AddDate(0, 1, 0).Add(-time.Nanosecond) } func EndOfQuarter(t time.Time) time.Time { return BeginningOfQuarter(t).AddDate(0, 3, 0).Add(-time.Nanosecond) } func EndOfYear(t time.Time) time.Time { return BeginningOfYear(t).AddDate(1, 0, 0).Add(-time.Nanosecond) } func Monday(t time.Time) time.Time { t = BeginningOfDay(t) weekday := int(t.Weekday()) if weekday == 0 { weekday = 7 } d := time.Duration(-weekday+1) * 24 * time.Hour return t.Truncate(time.Hour).Add(d) } func Sunday(t time.Time) time.Time { t = BeginningOfDay(t) weekday := int(t.Weekday()) if weekday == 0 { return t } else { d := time.Duration(7-weekday) * 24 * time.Hour return t.Truncate(time.Hour).Add(d) } } func EndOfSunday(t time.Time) time.Time { return Sunday(t).Add(24*time.Hour - time.Nanosecond) } func WeekOfMonth(t time.Time) int { _, firstWeek := BeginningOfMonth(t).ISOWeek() _, thisWeek := t.ISOWeek() return 1 + thisWeek - firstWeek }
timeutil/util.go
0.79999
0.657923
util.go
starcoder
package vm // OP represents an opcode for the VM. Operations take 0 or 1 operands. type Op int const ( // Read a value of the operand type from the wire and put itin the frame Read Op = iota // Set the current target to the value of the operand type from the frame Set // Allocate a new frame and make the target the field with the operand index Enter // Move to the previous frame Exit // Set a flag to null this field on exit SetExitNull // Append a value to the current target and enter the new value AppendArray // Append a new key-value pair (where the key is the String value in the current frame) to the current target and enter the new value AppendMap // Set the value of the field at the operand index to it's default value SetDefault // Push the current address onto the call stack and move the PC to the operand address Call // Pop the top value frmm the call stack and set the PC to that address Return // Stop the VM. If the operand is greater than zero, look up the corresponding error message and return it Halt // Move the PC to the operand Jump // Evaluate whether the Long register is equal to the operand, and set the condition register to the result EvalEqual // Evaluate whether the Long register is greater than the operand, and set the condition register to the result EvalGreater // If the condition register is true, jump to the operand instruction CondJump // Set the Long register to the operand value SetLong // Add the operand value to the Long register AddLong // Multiply the operand value by the Long register MultLong // Push the current Long register value onto the loop stack PushLoop // Pop the top of the loop stack and store the value in the Long register PopLoop // Set the field with the target index to nil NullField ) func (o Op) String() string { switch o { case Read: return "read" case Set: return "set" case Enter: return "enter" case Exit: return "exit" case SetExitNull: return "set_exit_null" case AppendArray: return "append_array" case AppendMap: return "append_map" case Call: return "call" case Return: return "return" case Halt: return "halt" case Jump: return "jump" case EvalEqual: return "eval_equal" case EvalGreater: return "eval_greater" case CondJump: return "cond_jump" case AddLong: return "add_long" case MultLong: return "mult_long" case SetDefault: return "set_def" case PushLoop: return "push_loop" case PopLoop: return "pop_loop" case SetLong: return "set_long" } return "Unknown" }
v7/vm/op.go
0.599954
0.749706
op.go
starcoder
package linalg import "math" // a sparse vector of uint64->float64s type IFVector map[uint64]float64 type IFVectorEach func(key uint64, value float64) type IFVectorReduce func(key uint64, value, lastValue float64) float64 type IFVectorMap func(key uint64, value float64) float64 type IFVectorFilter func(key uint64, value float64) bool func NewIfVector() IFVector { return make(IFVector) } func (vec IFVector) ForEach(apply IFVectorEach) { for key, val := range vec { apply(key, val) } } func (vec IFVector) Reduce(init float64, reducer IFVectorReduce) float64 { for key, val := range vec { init = reducer(key, val, init) } return init } func (vec IFVector) Map(mapper IFVectorMap) IFVector { for key, val := range vec { vec[key] = mapper(key, val) } return vec } func (vec IFVector) Filter(filter IFVectorFilter) { for key, val := range vec { keep := filter(key, val) if !keep { delete(vec, key) } } } func (vec IFVector) FilterMap(filter IFVectorFilter, mapper IFVectorMap) { for key, val := range vec { keep := filter(key, val) if keep { vec[key] = mapper(key, val) } else { delete(vec, key) } } } func (vec IFVector) Sum() float64 { return vec.Reduce(0, func(k uint64, v, last float64) float64 { return last + v }) } func (vec IFVector) Max() float64 { max := func(k uint64, v, last float64) float64 { return math.Max(v, last) } return vec.Reduce(0, max) } func (vec IFVector) L1Norm() float64 { l1norm := func(key uint64, curr, last float64) float64 { return math.Abs(curr) + last } return vec.Reduce(0, l1norm) } func (vec IFVector) L2Norm() float64 { l2norm := func(key uint64, curr, last float64) float64 { return (curr * curr) + last } return math.Sqrt(vec.Reduce(0, l2norm)) } func (vec IFVector) Clone() IFVector { clone := NewIfVector() vec.ForEach(func(key uint64, value float64) { clone[key] = value }) return clone } // Dot computes the dot product of the two vectors func (vec IFVector) Dot(multiplier IFVector) float64 { var sum float64 small, large := orderIFVectors(vec, multiplier) small.ForEach(func(key uint64, value float64) { sum += large[key] * value }) return sum } func (vec IFVector) MultF(multiplier float64) { mult := func(key uint64, val float64) float64 { return multiplier * val } vec.Map(mult) } // Prod is an element-wise product between vectors. // a.Prod(b) = [a[1] * b[1], a[2] * b[2], ...] func (vec IFVector) Prod(multiplier IFVector) { mult := func(key uint64, val float64) float64 { return multiplier[key] * val } vec.Map(mult) } // DivF divides each element in the vector by the given value. func (vec IFVector) DivF(divisor float64) { div := func(key uint64, dividend float64) float64 { return dividend / divisor } vec.Map(div) } func (vec IFVector) CosineSimilarity(other IFVector) float64 { x, y := orderIFVectors(vec, other) return x.Dot(y) / (x.L2Norm() * y.L2Norm()) } func orderIFVectors(a, b IFVector) (smaller, larger IFVector) { if len(a) < len(b) { return a, b } else { return b, a } }
paraphrase/linalg/vector.go
0.844697
0.699934
vector.go
starcoder
package histogram import ( "image" "github.com/anthonynsimon/bild/clone" ) // RGBAHistogram holds a sub-histogram per RGBA channel. // Each channel histogram contains 256 bins (8-bit color depth per channel). type RGBAHistogram struct { R Histogram G Histogram B Histogram A Histogram } // Histogram holds a variable length slice of bins, which keeps track of sample counts. type Histogram struct { Bins []int } // Max returns the highest count found in the histogram bins. func (h *Histogram) Max() int { var max int if len(h.Bins) > 0 { max = h.Bins[0] for i := 1; i < len(h.Bins); i++ { if h.Bins[i] > max { max = h.Bins[i] } } } return max } // Min returns the lowest count found in the histogram bins. func (h *Histogram) Min() int { var min int if len(h.Bins) > 0 { min = h.Bins[0] for i := 1; i < len(h.Bins); i++ { if h.Bins[i] < min { min = h.Bins[i] } } } return min } // Cumulative returns a new Histogram in which each bin is the cumulative // value of its previous bins func (h *Histogram) Cumulative() *Histogram { binCount := len(h.Bins) out := Histogram{make([]int, binCount)} if binCount > 0 { out.Bins[0] = h.Bins[0] } for i := 1; i < binCount; i++ { out.Bins[i] = out.Bins[i-1] + h.Bins[i] } return &out } // Image returns a grayscale image representation of the Histogram. // The width and height of the image will be equivalent to the number of Bins in the Histogram. func (h *Histogram) Image() *image.Gray { dstW, dstH := len(h.Bins), len(h.Bins) dst := image.NewGray(image.Rect(0, 0, dstW, dstH)) max := h.Max() if max == 0 { max = 1 } for x := 0; x < dstW; x++ { value := ((int(h.Bins[x]) << 16 / max) * dstH) >> 16 // Fill from the bottom up for y := dstH - 1; y > dstH-value-1; y-- { dst.Pix[y*dst.Stride+x] = 0xFF } } return dst } // NewRGBAHistogram constructs a RGBAHistogram out of the provided image. // A sub-histogram is created per RGBA channel with 256 bins each. func NewRGBAHistogram(img image.Image) *RGBAHistogram { src := clone.AsRGBA(img) binCount := 256 r := Histogram{make([]int, binCount)} g := Histogram{make([]int, binCount)} b := Histogram{make([]int, binCount)} a := Histogram{make([]int, binCount)} for y := 0; y < src.Bounds().Dy(); y++ { for x := 0; x < src.Bounds().Dx(); x++ { pos := y*src.Stride + x*4 r.Bins[src.Pix[pos+0]]++ g.Bins[src.Pix[pos+1]]++ b.Bins[src.Pix[pos+2]]++ a.Bins[src.Pix[pos+3]]++ } } return &RGBAHistogram{R: r, G: g, B: b, A: a} } // Cumulative returns a new RGBAHistogram in which each bin is the cumulative // value of its previous bins per channel. func (h *RGBAHistogram) Cumulative() *RGBAHistogram { binCount := len(h.R.Bins) r := Histogram{make([]int, binCount)} g := Histogram{make([]int, binCount)} b := Histogram{make([]int, binCount)} a := Histogram{make([]int, binCount)} out := RGBAHistogram{R: r, G: g, B: b, A: a} if binCount > 0 { out.R.Bins[0] = h.R.Bins[0] out.G.Bins[0] = h.G.Bins[0] out.B.Bins[0] = h.B.Bins[0] out.A.Bins[0] = h.A.Bins[0] } for i := 1; i < binCount; i++ { out.R.Bins[i] = out.R.Bins[i-1] + h.R.Bins[i] out.G.Bins[i] = out.G.Bins[i-1] + h.G.Bins[i] out.B.Bins[i] = out.B.Bins[i-1] + h.B.Bins[i] out.A.Bins[i] = out.A.Bins[i-1] + h.A.Bins[i] } return &out } // Image returns an RGBA image representation of the RGBAHistogram. // An image width of 256 represents the 256 Bins per channel and the // image height of 256 represents the max normalized histogram value per channel. // Each RGB channel from the histogram is mapped to its corresponding channel in the image, // so that for example if the red channel is extracted from the image, it corresponds to the // red channel histogram. func (h *RGBAHistogram) Image() *image.RGBA { if len(h.R.Bins) != 256 || len(h.G.Bins) != 256 || len(h.B.Bins) != 256 || len(h.A.Bins) != 256 { panic("RGBAHistogram bins length not equal to 256") } dstW, dstH := 256, 256 dst := image.NewRGBA(image.Rect(0, 0, dstW, dstH)) maxR := h.R.Max() if maxR == 0 { maxR = 1 } maxG := h.G.Max() if maxG == 0 { maxG = 1 } maxB := h.B.Max() if maxB == 0 { maxB = 1 } for x := 0; x < dstW; x++ { binHeightR := ((int(h.R.Bins[x]) << 16 / maxR) * dstH) >> 16 binHeightG := ((int(h.G.Bins[x]) << 16 / maxG) * dstH) >> 16 binHeightB := ((int(h.B.Bins[x]) << 16 / maxB) * dstH) >> 16 // Fill from the bottom up for y := dstH - 1; y >= 0; y-- { pos := y*dst.Stride + x*4 iy := dstH - 1 - y if iy < binHeightR { dst.Pix[pos+0] = 0xFF } if iy < binHeightG { dst.Pix[pos+1] = 0xFF } if iy < binHeightB { dst.Pix[pos+2] = 0xFF } dst.Pix[pos+3] = 0xFF } } return dst }
histogram/histogram.go
0.829906
0.718026
histogram.go
starcoder
package mlpack /* #cgo CFLAGS: -I./capi -Wall #cgo LDFLAGS: -L. -lmlpack_go_hmm_train #include <capi/hmm_train.h> #include <stdlib.h> */ import "C" type HmmTrainOptionalParam struct { Batch bool Gaussians int InputModel *hmmModel LabelsFile string Seed int States int Tolerance float64 Type string Verbose bool } func HmmTrainOptions() *HmmTrainOptionalParam { return &HmmTrainOptionalParam{ Batch: false, Gaussians: 0, InputModel: nil, LabelsFile: "", Seed: 0, States: 0, Tolerance: 1e-05, Type: "gaussian", Verbose: false, } } /* This program allows a Hidden Markov Model to be trained on labeled or unlabeled data. It supports four types of HMMs: Discrete HMMs, Gaussian HMMs, GMM HMMs, or Diagonal GMM HMMs Either one input sequence can be specified (with "InputFile"), or, a file containing files in which input sequences can be found (when "InputFile"and"Batch" are used together). In addition, labels can be provided in the file specified by "LabelsFile", and if "Batch" is used, the file given to "LabelsFile" should contain a list of files of labels corresponding to the sequences in the file given to "InputFile". The HMM is trained with the Baum-Welch algorithm if no labels are provided. The tolerance of the Baum-Welch algorithm can be set with the "Tolerance"option. By default, the transition matrix is randomly initialized and the emission distributions are initialized to fit the extent of the data. Optionally, a pre-created HMM model can be used as a guess for the transition matrix and emission probabilities; this is specifiable with "OutputModel". Input parameters: - inputFile (string): File containing input observations. - Batch (bool): If true, input_file (and if passed, labels_file) are expected to contain a list of files to use as input observation sequences (and label sequences). - Gaussians (int): Number of gaussians in each GMM (necessary when type is 'gmm'). Default value 0. - InputModel (hmmModel): Pre-existing HMM model to initialize training with. - LabelsFile (string): Optional file of hidden states, used for labeled training. Default value ''. - Seed (int): Random seed. If 0, 'std::time(NULL)' is used. Default value 0. - States (int): Number of hidden states in HMM (necessary, unless model_file is specified). Default value 0. - Tolerance (float64): Tolerance of the Baum-Welch algorithm. Default value 1e-05. - Type (string): Type of HMM: discrete | gaussian | diag_gmm | gmm. Default value 'gaussian'. - Verbose (bool): Display informational messages and the full list of parameters and timers at the end of execution. Output parameters: - outputModel (hmmModel): Output for trained HMM. */ func HmmTrain(inputFile string, param *HmmTrainOptionalParam) (hmmModel) { resetTimers() enableTimers() disableBacktrace() disableVerbose() restoreSettings("Hidden Markov Model (HMM) Training") // Detect if the parameter was passed; set if so. setParamString("input_file", inputFile) setPassed("input_file") // Detect if the parameter was passed; set if so. if param.Batch != false { setParamBool("batch", param.Batch) setPassed("batch") } // Detect if the parameter was passed; set if so. if param.Gaussians != 0 { setParamInt("gaussians", param.Gaussians) setPassed("gaussians") } // Detect if the parameter was passed; set if so. if param.InputModel != nil { setHMMModel("input_model", param.InputModel) setPassed("input_model") } // Detect if the parameter was passed; set if so. if param.LabelsFile != "" { setParamString("labels_file", param.LabelsFile) setPassed("labels_file") } // Detect if the parameter was passed; set if so. if param.Seed != 0 { setParamInt("seed", param.Seed) setPassed("seed") } // Detect if the parameter was passed; set if so. if param.States != 0 { setParamInt("states", param.States) setPassed("states") } // Detect if the parameter was passed; set if so. if param.Tolerance != 1e-05 { setParamDouble("tolerance", param.Tolerance) setPassed("tolerance") } // Detect if the parameter was passed; set if so. if param.Type != "gaussian" { setParamString("type", param.Type) setPassed("type") } // Detect if the parameter was passed; set if so. if param.Verbose != false { setParamBool("verbose", param.Verbose) setPassed("verbose") enableVerbose() } // Mark all output options as passed. setPassed("output_model") // Call the mlpack program. C.mlpackHmmTrain() // Initialize result variable and get output. var outputModel hmmModel outputModel.getHMMModel("output_model") // Clear settings. clearSettings() // Return output(s). return outputModel }
hmm_train.go
0.644225
0.455986
hmm_train.go
starcoder
package main import ( "fmt" "github.com/go-gl/gl/v4.1-core/gl" "github.com/go-gl/glfw/v3.3/glfw" "github.com/go-gl/mathgl/mgl32" "golang.org/x/image/font" "golang.org/x/image/font/sfnt" "golang.org/x/image/math/fixed" "io/ioutil" "math" ) var ( ProjMat = mgl32.Ident4() VeiwMat = mgl32.Ident4() ProjMatVao uint32 VeiwMatVao uint32 ) type Drawable interface { Draw() GenVao() } type Point struct { // Position Vectors P mgl32.Vec3 // Color Vectors C mgl32.Vec4 // Normal Vectors N mgl32.Vec3 // Texture Coords T mgl32.Vec2 // Is this corner rounded Threshold float32 } func (p *Point) X() float32 { return p.P[0] } func (p *Point) Y() float32 { return p.P[1] } func (p *Point) Z() float32 { return p.P[2] } func (p *Point) Dist(p1 *Point) float32 { return float32(math.Sqrt(float64((p.X()-p1.X())*(p.X()-p1.X()) + (p.Y()+p1.Y())*(p.Y()+p1.Y())*(p.Y()+p1.Y())))) } /* Returns a point with x, y, z as its position with white color and normal in the positive z axis */ func P(x, y, z float32) *Point { return &Point{P: mgl32.Vec3{x, y, z}, C: mgl32.Vec4{1, 1, 1, 1}, N: mgl32.Vec3{0, 0, 1}, T: mgl32.Vec2{0, 0}, } } /* Returns a point with x, y, z as its position, r,g,b,a as red, green, blue and alpha respectively and normal in the positive z axis direction */ func PC(x, y, z, r, g, b, a float32) *Point { return &Point{P: mgl32.Vec3{x, y, z}, C: mgl32.Vec4{r, g, b, a}, N: mgl32.Vec3{0, 0, 1}, T: mgl32.Vec2{0, 0}, } } /* Returns a point with x, y, z as its position, r,g,b,a as red, green, blue and alpha respectively and normal in the direction of normal of i,j,k */ func PCN(x, y, z, r, g, b, a, i, j, k float32) *Point { return &Point{P: mgl32.Vec3{x, y, z}, C: mgl32.Vec4{r, g, b, a}, N: mgl32.Vec3{i, j, k}.Normalize(), T: mgl32.Vec2{0, 0}, } } func PCNT(x, y, z, r, g, b, a, i, j, k, tx, ty float32) *Point { return &Point{P: mgl32.Vec3{x, y, z}, C: mgl32.Vec4{r, g, b, a}, N: mgl32.Vec3{i, j, k}.Normalize(), T: mgl32.Vec2{tx, ty}, } } /* NOTE: This function returns a new Point with the given position */ func (p *Point) SetP(x, y, z float32) *Point { return &Point{P: mgl32.Vec3{x, y, z}, C: p.C, N: p.N, T: p.T, } } /* NOTE: This function returns a new Point with the given Color */ func (p *Point) SetC(r, g, b, a float32) *Point { return &Point{P: p.P, C: mgl32.Vec4{r, g, b, a}, N: p.N, T: p.T, } } /* NOTE: This function returns a new Point with the given Normal */ func (p *Point) SetN(i, j, k float32) *Point { return &Point{P: p.P, C: p.C, N: mgl32.Vec3{i, j, k}, T: p.T, } } func (p *Point) SetT(x, y float32) *Point { return &Point{P: p.P, C: p.C, N: p.N, T: mgl32.Vec2{x, y}, } } /* Offsets all of the given points with the positional coords of the parent point NOTE: This function returns the new points */ func (p *Point) MassOffset(pts ...*Point) []*Point { Offseted := make([]*Point, len(pts)) for i, val := range pts { Offseted[i] = P(0, 0, 0).SetP(val.X()+p.X(), val.Y()+p.Y(), val.Z()+p.Y()) Offseted[i].C, Offseted[i].N = val.C, val.N } return Offseted } type Circle struct { // Center point determines the center of the circle // And the color of the center of the circle Center *Point Vao uint32 Vbo uint32 IsFilled bool ModelMat *mgl32.Mat4 // r is the complete radius of the circle // the alpha at r is 0 // t is threshold upto which the color of the circle // does not fade R, T float32 } func NewCircle(center *Point, r, t float32, isFilled bool, modelMat mgl32.Mat4) *Circle { return &Circle{ Center: center, IsFilled: isFilled, ModelMat: &modelMat, R: r, T: t, } } func (s *Circle) PointData() []byte { arr := []byte{} radius := s.R factor := 3 + math.Sqrt2/2 for i := 0; i < 3; i++ { x := radius * float32(math.Cos(math.Pi/2+float64(i)*2*math.Pi/3)*factor) * 1.1 y := radius * float32(math.Sin(math.Pi/2+float64(i)*2*math.Pi/3)*factor) * 1.1 floatBytes := Float32SlicetoBytes(PCNT( x, y, 1, s.Center.C[0], s.Center.C[1], s.Center.C[2], s.Center.C[3], s.Center.N[0], s.Center.N[1], s.Center.N[2], x, y, ).Arr()) fmt.Println(x, y) arr = append(arr, floatBytes...) arr = append(arr, Float32SlicetoBytes([]float32{s.T})...) } return arr } func (s *Circle) GenVao() { data := s.PointData() fmt.Println(float64(len(data)) / float64(pointByteSize)) var vbo uint32 // Generate the buffer for the Vertex data gl.GenBuffers(1, &vbo) gl.BindBuffer(gl.ARRAY_BUFFER, vbo) // Fill the buffer with the Points data in our shape gl.BufferData(gl.ARRAY_BUFFER, len(data), gl.Ptr(data), gl.STATIC_DRAW) var vao uint32 // Generate our Vertex Array gl.GenVertexArrays(1, &vao) gl.BindVertexArray(vao) gl.BindBuffer(gl.ARRAY_BUFFER, vbo) // At index 0, Put all the Position data gl.EnableVertexAttribArray(0) gl.VertexAttribPointer(0, 3, gl.FLOAT, false, pointByteSize, nil) // At index 1, Put all the Color data gl.EnableVertexAttribArray(1) gl.VertexAttribPointer(1, 4, gl.FLOAT, false, pointByteSize, gl.PtrOffset(12)) // At index 2, Put all the Normal's data gl.EnableVertexAttribArray(2) gl.VertexAttribPointer(2, 3, gl.FLOAT, false, pointByteSize, gl.PtrOffset(28)) // At index 3, Put the texture coords gl.EnableVertexAttribArray(3) gl.VertexAttribPointer(3, 2, gl.FLOAT, false, pointByteSize, gl.PtrOffset(40)) // At index 4, Put texture coordinate threshold after which color fades gl.EnableVertexAttribArray(4) gl.VertexAttribPointer(4, 1, gl.FLOAT, false, pointByteSize, gl.PtrOffset(48)) // store the Vao and Vbo representatives in the shape s.Vbo = vbo s.Vao = vao } func (s *Circle) Draw() { UpdateUniformMat4fv("model", program, &s.ModelMat[0]) gl.BindVertexArray(s.Vao) gl.DrawArrays(gl.TRIANGLES, 0, 3) } type Ray struct { Pts []*mgl32.Vec3 Type uint8 } func NewRay(RayType uint8, modelMat mgl32.Mat4, points ...mgl32.Vec3) *Ray { transformedPoints := make([]*mgl32.Vec3, len(points)) for i, val := range points { transformedPoint := mgl32.TransformCoordinate(val, modelMat) transformedPoints[i] = &transformedPoint } return &Ray{ Pts: transformedPoints, Type: RayType, } } // Takes a shape and check for collison the the ray r, if there is collision // IsColliding is true, CollidingAt is where the collision happend and // s can only be of type TRIANGLES, TRIANGLE_STRIP, TRIANGLE_FAN func (r *Ray) PolyCollide(s *Shape) (IsColliding bool, CollidingAt []*mgl32.Vec3, CollTri [][3]*mgl32.Vec3) { triang := make([]mgl32.Vec3, len(s.Triangulated)) for i, v := range s.Triangulated { triang[i] = mgl32.TransformCoordinate(*v, s.ModelMat) } switch r.Type { case RAY_TYPE_CENTERED: InitVec := r.Pts[0] for i := 1; i < len(r.Pts); i++ { for j := 0; j < len(triang)/3; j++ { IsItColling, WhereIsIt := RayTriangleCollision([2]*mgl32.Vec3{InitVec, r.Pts[i]}, [3]*mgl32.Vec3{&triang[3*j], &triang[3*j+1], &triang[3*j+2]}, ) if !IsColliding { IsColliding = IsItColling CollidingAt = append(CollidingAt, &WhereIsIt) CollTri = append(CollTri, [3]*mgl32.Vec3{&triang[3*j], &triang[3*j+1], &triang[3*j+2]}) } } } } return IsColliding, CollidingAt, CollTri } type Shape struct { // Points making up the shape Pts []*Point ModelMat mgl32.Mat4 Vao uint32 Vbo uint32 Prog uint32 Type uint32 Primitives int32 Triangulated []*mgl32.Vec3 } func NewShape(mat mgl32.Mat4, prog uint32, pts ...*Point) *Shape { return &Shape{ Pts: pts, ModelMat: mat, Prog: prog, } } func (s *Shape) Triangulate() { var triang []*mgl32.Vec3 switch s.Type { case gl.TRIANGLES: triang = make([]*mgl32.Vec3, len(s.Pts)) for i, v := range s.Pts { triang[i] = &v.P } case gl.TRIANGLE_FAN: triang = make([]*mgl32.Vec3, (len(s.Pts)-2)*3) InitVec := s.Pts[0].P n := 1 for i := 0; i < len(triang)/3; i++ { triang[3*i] = &InitVec triang[3*i+1] = &s.Pts[n].P n++ triang[3*i+2] = &s.Pts[n].P } case gl.TRIANGLE_STRIP: triang = make([]*mgl32.Vec3, (len(s.Pts)-2)*3) var prevV, prevPrevV *mgl32.Vec3 prevPrevV = &s.Pts[0].P prevV = &s.Pts[1].P for i := 2; i < len(s.Pts); i++ { triang[(i-2)*3] = prevPrevV triang[(i-2)*3+1] = prevV triang[(i-2)*3+2] = &s.Pts[i].P prevPrevV = prevV prevV = &s.Pts[i].P } } s.Triangulated = triang } func (p *Point) Arr() []float32 { return []float32{ p.P[0], p.P[1], p.P[2], p.C[0], p.C[1], p.C[2], p.C[3], p.N[0], p.N[1], p.N[2], p.T[0], p.T[1], } } // Do not use this function frequently, // Instead use ModelMat to transform the shapes func (p *Point) ReScale(x, y, z float32) *Point { return &Point{ P: mgl32.Vec3{p.X() * x, p.Y() * y, p.Z() * z}, C: p.C, N: p.N, } } // Do not use this function frequently, // Instead use ModelMat to transform the shapes func (s *Shape) ReScale(x, y, z float32) *Shape { S := NewShape(mgl32.Ident4(), program) ps := make([]*Point, len(s.Pts)) for i, p := range s.Pts { ps[i] = p.ReScale(x, y, z) } S.Pts = ps return S } func (s *Shape) PointData() []byte { var data []byte for _, p := range s.Pts { dataFloat := make([]float32, 0) dataFloat = append(dataFloat, p.Arr()...) dataFloat = append(dataFloat, p.Threshold) data = append(data, Float32SlicetoBytes(dataFloat)...) } return data } func (s *Shape) TransformData() []float32 { var data []float32 for i, val := range s.ModelMat { data[i] = val } return data } func (s *Shape) GenVao() { floatBytes := s.PointData() fmt.Println(float64(len(floatBytes)) / float64(pointByteSize)) var vbo uint32 // Generate the buffer for the Vertex data gl.GenBuffers(1, &vbo) gl.BindBuffer(gl.ARRAY_BUFFER, vbo) // Fill the buffer with the Points data in our shape // 49bytes = Bytes of Position + Color + Normal + Texture + byte for roundedness gl.BufferData(gl.ARRAY_BUFFER, 49*len(s.Pts), gl.Ptr(floatBytes), gl.STATIC_DRAW) var vao uint32 // Generate our Vertex Array gl.GenVertexArrays(1, &vao) gl.BindVertexArray(vao) gl.BindBuffer(gl.ARRAY_BUFFER, vbo) // At index 0, Put all the Position data gl.EnableVertexAttribArray(0) gl.VertexAttribPointer(0, 3, gl.FLOAT, false, pointByteSize, nil) // At index 1, Put all the Color data gl.EnableVertexAttribArray(1) gl.VertexAttribPointer(1, 4, gl.FLOAT, false, pointByteSize, gl.PtrOffset(12)) // At index 2, Put all the Normal's data gl.EnableVertexAttribArray(2) gl.VertexAttribPointer(2, 3, gl.FLOAT, false, pointByteSize, gl.PtrOffset(28)) // At index 3, Put all the Texture Coords's data gl.EnableVertexAttribArray(3) gl.VertexAttribPointer(3, 2, gl.FLOAT, false, pointByteSize, gl.PtrOffset(40)) // At index 4, Put the texture coords threshold after which color fades gl.EnableVertexAttribArray(4) gl.VertexAttribPointer(4, 1, gl.FLOAT, false, pointByteSize, gl.PtrOffset(48)) // store the Vao and Vbo representatives in the shape s.Vbo = vbo s.Vao = vao } func (s *Shape) SetTypes(mode uint32) { s.Type = mode s.Primitives = int32(len(s.Pts)) } func (s *Shape) Free() { gl.DeleteBuffers(1, &s.Vao) gl.DeleteVertexArrays(1, &s.Vao) } func (s *Shape) Draw() { UpdateUniformMat4fv("model", program, &s.ModelMat[0]) gl.BindVertexArray(s.Vao) gl.DrawArrays(s.Type, 0, s.Primitives) } type Button struct { Win *glfw.Window Geometry *Shape Text string TextShape *Shape CB Callback } type Callback func(w *glfw.Window, MX, MY float64, click3D []*mgl32.Vec3, NearTri [][3]*mgl32.Vec3) type Font struct { GlyphMap map[rune]*Shape TtfFont *sfnt.Font OgScale fixed.Int26_6 } func NewButton(x1, y1, x2, y2 float32, w *glfw.Window, text string, cb Callback, font *Font) *Button { b := new(Button) b.Geometry = NewShape(mgl32.Ident4(), program, PC(x1, y1, 1, 1, 0, 1, 1), PC(x1, y2, 1, 1, 0, 1, 1), PC(y2, x1, 1, 1, 0, 1, 1), PC(x2, y2, 1, 1, 0, 1, 1), ) b.Geometry.SetTypes(gl.TRIANGLE_STRIP) b.Win = w b.Text = text b.CB = cb b.TextShape = TextToShape(font, text) b.TextShape.ModelMat = mgl32.Translate3D(x1-x2, (y1-y2)/2, 0) ShapePrint(b.Geometry) return b } func (b *Button) Draw() { b.Geometry.Draw() b.TextShape.Draw() } func (b *Button) GenVao() { b.Geometry.GenVao() b.TextShape.GenVao() } // This function creates a new Font to be used by TextToShape function // Supply the characters to load in runes // NOTE: This function is not very memory efficient, donot call this in loop func NewFont(path string, runes string, OgScale fixed.Int26_6) *Font { // Inittialize a new Font struct f := new(Font) f.OgScale = OgScale f.GlyphMap = make(map[rune]*Shape) // Read and parse the file provided fontFile, err := ioutil.ReadFile(path) orDie(err) ttFont, err := sfnt.Parse(fontFile) orDie(err) f.TtfFont = ttFont // If Default scale is 0, set it to a default value if f.OgScale == 0 { f.OgScale = fixed.I(64) } boundR, err := ttFont.Bounds(nil, f.OgScale, font.HintingNone) orDie(err) bound := boundR.Max.Sub(boundR.Min) maxX, maxY := bound.X.Round(), bound.Y.Round() // Get the glyphs from rune 0 to 512 and create shapes out of them // and store them in the Font struct for _, i := range runes { // Initialize a new glyph for rune i, with the provided scale and no hinting glyph := &sfnt.Buffer{} I, err := ttFont.GlyphIndex(glyph, rune(i)) orDie(err) segs, err := ttFont.LoadGlyph(glyph, I, f.OgScale, nil) // Add the glyph to Font if needed elesewhere f.GlyphMap[rune(i)] = NewShape(mgl32.Ident4(), program) // If the given rune has no shape in it, then give it a line // This happens in case of space, escape codes and invalid characters if len(segs) == 0 { f.GlyphMap[rune(i)].Pts = make([]*Point, 2) f.GlyphMap[rune(i)].Pts[0] = P(-1, -1, 1) f.GlyphMap[rune(i)].Pts[1] = P(1, -1, 1) } else { // Get the bounds of the glyph // Make a point to store the coords of SegmentOpMoveTo prevP := P(0, 0, 0) for _, val := range segs { // Scale its coords to -1 to 1 x0, y0 := -float32(val.Args[0].X.Round())/float32(maxX), -float32(val.Args[0].Y.Round())/float32(maxY) x1, y1 := -float32(val.Args[1].X.Round())/float32(maxX), -float32(val.Args[1].Y.Round())/float32(maxY) x2, y2 := -float32(val.Args[2].X.Round())/float32(maxX), -float32(val.Args[2].Y.Round())/float32(maxY) //fmt.Println(x1, y1) switch val.Op { case sfnt.SegmentOpMoveTo: prevP = P(x0, y0, 1) case sfnt.SegmentOpLineTo: f.GlyphMap[rune(i)].Pts = append(f.GlyphMap[rune(i)].Pts, P(prevP.X(), prevP.Y(), 1), P(x0, y0, 1)) prevP = P(x0, y0, 1) case sfnt.SegmentOpQuadTo: f.GlyphMap[rune(i)].Pts = append(f.GlyphMap[rune(i)].Pts, LineStripToSeg(BezCurve(8/float32(f.OgScale), P(prevP.X(), prevP.Y(), 1), P(x0, y0, 1), P(x1, y1, 1))...)...) prevP = P(x1, y1, 1) case sfnt.SegmentOpCubeTo: f.GlyphMap[rune(i)].Pts = append(f.GlyphMap[rune(i)].Pts, LineStripToSeg(CubicBezCurve(8/float32(f.OgScale), P(prevP.X(), prevP.Y(), 1), P(x0, y0, 1), P(x1, y1, 1), P(x2, y2, 1))...)...) prevP = P(x2, y2, 1) } } } f.GlyphMap[rune(i)].SetTypes(gl.LINES) // f.GlyphMap[rune(i)].GenVao() orDie(err) } return f }
structs.go
0.685739
0.48438
structs.go
starcoder
package iso20022 // Provides further details on the settlement of the instruction. type SettlementInstruction3 struct { // Agent through which the instructing agent will reimburse the instructed agent. // Usage: If InstructingAgent and InstructedAgent have the same reimbursement agent, then only InstructingReimbursementAgent must be used. InstructingReimbursementAgent *BranchAndFinancialInstitutionIdentification5 `xml:"InstgRmbrsmntAgt,omitempty"` // Unambiguous identification of the account of the instructing reimbursement agent account at its servicing agent in the payment chain. InstructingReimbursementAgentAccount *CashAccount24 `xml:"InstgRmbrsmntAgtAcct,omitempty"` // Agent at which the instructed agent will be reimbursed. // Usage: If InstructedReimbursementAgent contains a branch of the InstructedAgent, then the party in InstructedAgent will claim reimbursement from that branch/will be paid by that branch. // Usage: If InstructingAgent and InstructedAgent have the same reimbursement agent, then only InstructingReimbursementAgent must be used. InstructedReimbursementAgent *BranchAndFinancialInstitutionIdentification5 `xml:"InstdRmbrsmntAgt,omitempty"` // Unambiguous identification of the account of the instructed reimbursement agent account at its servicing agent in the payment chain. InstructedReimbursementAgentAccount *CashAccount24 `xml:"InstdRmbrsmntAgtAcct,omitempty"` } func (s *SettlementInstruction3) AddInstructingReimbursementAgent() *BranchAndFinancialInstitutionIdentification5 { s.InstructingReimbursementAgent = new(BranchAndFinancialInstitutionIdentification5) return s.InstructingReimbursementAgent } func (s *SettlementInstruction3) AddInstructingReimbursementAgentAccount() *CashAccount24 { s.InstructingReimbursementAgentAccount = new(CashAccount24) return s.InstructingReimbursementAgentAccount } func (s *SettlementInstruction3) AddInstructedReimbursementAgent() *BranchAndFinancialInstitutionIdentification5 { s.InstructedReimbursementAgent = new(BranchAndFinancialInstitutionIdentification5) return s.InstructedReimbursementAgent } func (s *SettlementInstruction3) AddInstructedReimbursementAgentAccount() *CashAccount24 { s.InstructedReimbursementAgentAccount = new(CashAccount24) return s.InstructedReimbursementAgentAccount }
SettlementInstruction3.go
0.688992
0.515742
SettlementInstruction3.go
starcoder
package vector import "C" import ( "math" "os" "unsafe" ) func F32Compare(f1, f2 float32, precision float64) int { res := f1 - f2 switch { case math.Abs(float64(res)) < precision: return 0 case res < 0: return -1 } // >0 return 1 } type F32Vector []float32 func MakeF32(ln int) F32Vector { return make(F32Vector, ln) } func CopyF32(vec F32Vector) F32Vector { res := make(F32Vector, 0, vec.Len()) return append(res, vec...) } func UnmarshalF32(ptr unsafe.Pointer, count int) F32Vector { var ( data = (*[1 << 30]C.float)(ptr)[:count:count] res = make(F32Vector, 0, count) ) for _, v := range data { res = append(res, float32(v)) } return res } func (v F32Vector) Len() int { return len(v) } func (v *F32Vector) Add(vec F32Vector) error { ref := (*F32Vector)(v) if (*v).Len() != vec.Len() { return os.ErrInvalid } for i := range *ref { (*ref)[i] += vec[i] } return nil } func (v *F32Vector) Mul(vec F32Vector) error { ref := (*F32Vector)(v) if (*v).Len() != vec.Len() { return os.ErrInvalid } for i := range *ref { (*ref)[i] *= vec[i] } return nil } func (v *F32Vector) Sub(vec F32Vector) error { ref := (*F32Vector)(v) if (*v).Len() != vec.Len() { return os.ErrInvalid } for i := range *ref { (*ref)[i] -= vec[i] } return nil } func (v *F32Vector) Pow() { ref := (*F32Vector)(v) for i := range *ref { (*ref)[i] *= (*ref)[i] } } func (v F32Vector) Sum() (res float32) { for _, value := range v { res += value } return } func (v F32Vector) Distance() float32 { var ( res float64 ) for _, value := range v { v := float64(value) res += v * v } return float32(math.Sqrt(res)) } func (v *F32Vector) Normalize(normalizer float32) { ref := (*F32Vector)(v) for i := range *ref { (*ref)[i] /= normalizer } } func IsF32Equal(vec1, vec2 F32Vector) bool { return IsF32EqualExt(vec1, vec2, F32_EPS_DEFAULT) } func IsF32EqualExt(vec1, vec2 F32Vector, precision float64) bool { if vec1.Len() != vec2.Len() { return false } for i := 0; i < vec1.Len(); i++ { if F32Compare(vec1[i], vec2[i], precision) != 0 { return false } } return true } func F32Mean(vectors ...F32Vector) (F32Vector, error) { if len(vectors) == 0 { return nil, os.ErrInvalid } result := MakeF32(vectors[0].Len()) for _, vec := range vectors { if err := result.Add(vec); err != nil { return nil, err } } result.Normalize(float32(result.Len())) return result, nil } func F32Dot(vectors ...F32Vector) float32 { if len(vectors) == 0 { return .0 } result := CopyF32(vectors[0]) for i := 1; i < len(vectors); i++ { if err := result.Mul(vectors[i]); err != nil { return .0 } } return result.Sum() }
vector/f32.go
0.697712
0.470372
f32.go
starcoder
package main import ( "bytes" "fmt" "net/url" "time" "github.com/gorilla/websocket" ) // TestPacket represents a single packet that will be sent to the server and is expected to be looped back. type TestPacket struct { Type int // The packet/message types are defined in RFC 6455 Payload []byte } // TestOptions contains all needed test parameters and values for a single connection test. type TestOptions struct { URL url.URL Packets []TestPacket } // TestResult contains all measurable values from a single connection test. type TestResult struct { TotalDuration time.Duration // Total duration of DoConnectionTest. ConnectLatency time.Duration // Duration to establish the websocket connection. FirstRoundtripLatency time.Duration // Roundtrip time of the first packet. This includes the duration it takes to send the message. FullRoundtripLatency time.Duration // Roundtrip time of all packets. This includes the duration it takes to send the messages. DisconnectLatency time.Duration // Duration for connection closure. } // DoConnectionTest connects to a given web-socket server. // It will send and receive (a) message(s), and check the received message for correctness. // This assumes that the server loops back any received message. func DoConnectionTest(opt TestOptions) (TestResult, error) { startTime := time.Now() res := TestResult{} // Open connection. c, _, err := websocket.DefaultDialer.Dial(opt.URL.String(), nil) if err != nil { return res, err } defer c.Close() res.ConnectLatency = time.Now().Sub(startTime) // Receive data and/or handle errors or disconnects. done := make(chan error, 1) received := make(chan struct{}) go func() { defer close(done) index := 0 for { mType, message, err := c.ReadMessage() if err != nil { // Ignore error if the connection closed due to normal closure. if closeErr, ok := err.(*websocket.CloseError); ok && closeErr.Code == websocket.CloseNormalClosure { return } done <- err return } // Check if the amount is not over the expected packet amount. if index >= len(opt.Packets) { done <- fmt.Errorf("Received more packets that expected") return } // Get next expected packet. expectedPacket := opt.Packets[index] index++ // Check if the packet type is correct. if expectedPacket.Type != mType { done <- fmt.Errorf("Received unexpected packet type") return } // Check if the payload is correct. if bytes.Compare(expectedPacket.Payload, message) != 0 { done <- fmt.Errorf("Received unexpected packet payload") return } // Measure roundtrip time of the first packet. if index == 1 { res.FirstRoundtripLatency = time.Now().Sub(startTime) - res.ConnectLatency } // Signal that everything expected has been received. But don't stop this listener yet. if index == len(opt.Packets) { close(received) } } }() // Send payload. for _, packet := range opt.Packets { err = c.WriteMessage(packet.Type, packet.Payload) if err != nil { return res, err } } // Wait either until all the data was received correctly, an error was encountered, or until a timeout is reached. select { case err := <-done: return res, err case <-received: res.FullRoundtripLatency = time.Now().Sub(startTime) - res.ConnectLatency case <-time.After(5 * time.Second): return res, fmt.Errorf("Receive timeout. Not all packets were received in time") } // Cleanly close connection. err = c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) if err != nil { return res, err } // Wait either until the connection is closed by the server (as reaction to the close message), or until a timeout is reached. select { case err := <-done: res.DisconnectLatency = time.Now().Sub(startTime) - res.FullRoundtripLatency res.TotalDuration = res.ConnectLatency + res.FullRoundtripLatency + res.DisconnectLatency return res, err case <-time.After(5 * time.Second): return res, fmt.Errorf("Closure timeout") } }
StressTest/connection.go
0.649912
0.420481
connection.go
starcoder
package dtls /* DTLS messages are grouped into a series of message flights, according to the diagrams below. Although each flight of messages may consist of a number of messages, they should be viewed as monolithic for the purpose of timeout and retransmission. https://tools.ietf.org/html/rfc4347#section-4.2.4 Note: The flight4b and flight5b will be only used in session resumption. Client Server ------ ------ Waiting Flight 0 ClientHello --------> Flight 1 <------- HelloVerifyRequest Flight 2 ClientHello --------> Flight 3 ServerHello \ Certificate* \ ServerKeyExchange* Flight 4 CertificateRequest* / <-------- ServerHelloDone / ServerHello \ [ChangeCipherSpec] Flight 4b <-------- Finished / Certificate* \ ClientKeyExchange \ CertificateVerify* Flight 5 [ChangeCipherSpec] / Finished --------> / [ChangeCipherSpec] \ Flight 5b Finished --------> / [ChangeCipherSpec] \ Flight 6 <-------- Finished / */ type flightVal uint8 const ( flight0 flightVal = iota + 1 flight1 flight2 flight3 flight4 flight4b flight5 flight5b flight6 ) func (f flightVal) String() string { switch f { case flight0: return "Flight 0" case flight1: return "Flight 1" case flight2: return "Flight 2" case flight3: return "Flight 3" case flight4: return "Flight 4" case flight4b: return "Flight 4b" case flight5: return "Flight 5" case flight5b: return "Flight 5b" case flight6: return "Flight 6" default: return "Invalid Flight" } } func (f flightVal) isLastSendFlight() bool { return f == flight6 || f == flight5b } func (f flightVal) isLastRecvFlight() bool { return f == flight5 || f == flight4b }
flight.go
0.748812
0.467393
flight.go
starcoder
package basic_programming import ( "fmt" ) /* {a,e,i,o,u,A,E,I,O,U} Natural Language Understanding is the subdomain of Natural Language Processing where people used to design AI based applications have ability to understand the human languages. HashInclude Speech Processing team has a project named Virtual Assistant. For this project they appointed you as a data engineer (who has good knowledge of creating clean datasets by writing efficient code). As a data engineer your first task is to make vowel recognition dataset. In this task you have to find the presence of vowels in all possible substrings of the given string. For each given string you have to print the total number of vowels. Input First line contains an integer T, denoting the number of test cases. Each of the next lines contains a string, string contains both lower case and upper case . Output Print the vowel sum Answer for each test case should be printed in a new line. Input Constraints 1<=T<=10 1<=|S|<=100000 Input 1 baceb Output 16 Explanation First line is number of input string, In given example, string is "baceb" so the substrings will be like -"b, ba, bac, bace, a, ac, ace, aceb, c, ce, ceb, e, eb, baceb" now the number of vowels in each substring will be 0, 1, 1, 2, 1, 1, 2, 2, 0, 1, 1, 1, 1, 2 and the total number will be sum of all presence which is 16. */ /* Solution: Have counts that represent a list of substring vowel counts. Calculate substrings and on each iteration append a count if the new char is a vowel. At the end compute tha total sum of the counts slice. */ func PrintCountVowels() { var N int var S string fmt.Scanf("%d", &N) for i := 0;i < N; i += 1 { fmt.Scanf("%s", &S) fmt.Printf("%d ", CountVowels(S)) } } func CountVowels(s string) int { runes := []rune(s) total := 0 curr := 0 counts := []int{} for i := 0;i < len(s); i += 1 { curr = 0 for j := i+1; j <= len(s); j += 1 { if isVowel(runes[j-1]) { curr += 1 } counts = append(counts, curr) } } for _, val := range counts { total += val } return total } func isVowel(word rune) bool { switch word { case 'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U': return true default: return false } } func Substrings(s string) []string { result := []string{} seen := make(map[string]interface{}) for i := 0;i < len(s); i += 1 { for j := i+1; j <= len(s); j += 1 { sub := s[i:j] if _, ok := seen[sub]; !ok { result = append(result, sub) seen[sub] = struct {}{} } } } return result }
basic-programming/VowelRecognition.go
0.61231
0.555254
VowelRecognition.go
starcoder
package element const Conv = ` // ToMont converts z to Montgomery form // sets and returns z = z * r^2 func (z *{{.ElementName}}) ToMont() *{{.ElementName}} { return z.Mul(z, &rSquare) } // ToRegular returns z in regular form (doesn't mutate z) func (z {{.ElementName}}) ToRegular() {{.ElementName}} { return *z.FromMont() } // String returns the string form of an {{.ElementName}} in Montgomery form func (z *{{.ElementName}}) String() string { zz := *z zz.FromMont() if zz.IsUint64() { return strconv.FormatUint(zz[0], 10) } else { var zzNeg {{.ElementName}} zzNeg.Neg(z) zzNeg.FromMont() if zzNeg.IsUint64() { return "-" + strconv.FormatUint(zzNeg[0], 10) } } vv := bigIntPool.Get().(*big.Int) defer bigIntPool.Put(vv) return zz.ToBigInt(vv).String() } // ToBigInt returns z as a big.Int in Montgomery form func (z *{{.ElementName}}) ToBigInt(res *big.Int) *big.Int { var b [Limbs*8]byte {{- range $i := reverse .NbWordsIndexesFull}} {{- $j := mul $i 8}} {{- $k := sub $.NbWords 1}} {{- $k := sub $k $i}} {{- $jj := add $j 8}} binary.BigEndian.PutUint64(b[{{$j}}:{{$jj}}], z[{{$k}}]) {{- end}} return res.SetBytes(b[:]) } // ToBigIntRegular returns z as a big.Int in regular form func (z {{.ElementName}}) ToBigIntRegular(res *big.Int) *big.Int { z.FromMont() return z.ToBigInt(res) } // Bytes returns the regular (non montgomery) value // of z as a big-endian byte array. func (z *{{.ElementName}}) Bytes() (res [Limbs*8]byte) { _z := z.ToRegular() {{- range $i := reverse .NbWordsIndexesFull}} {{- $j := mul $i 8}} {{- $k := sub $.NbWords 1}} {{- $k := sub $k $i}} {{- $jj := add $j 8}} binary.BigEndian.PutUint64(res[{{$j}}:{{$jj}}], _z[{{$k}}]) {{- end}} return } // Marshal returns the regular (non montgomery) value // of z as a big-endian byte slice. func (z *{{.ElementName}}) Marshal() []byte { b := z.Bytes() return b[:] } // SetBytes interprets e as the bytes of a big-endian unsigned integer, // sets z to that value (in Montgomery form), and returns z. func (z *{{.ElementName}}) SetBytes(e []byte) *{{.ElementName}} { // get a big int from our pool vv := bigIntPool.Get().(*big.Int) vv.SetBytes(e) // set big int z.SetBigInt(vv) // put temporary object back in pool bigIntPool.Put(vv) return z } // SetBigInt sets z to v (regular form) and returns z in Montgomery form func (z *{{.ElementName}}) SetBigInt(v *big.Int) *{{.ElementName}} { z.SetZero() var zero big.Int // fast path c := v.Cmp(&_modulus) if c == 0 { // v == 0 return z } else if c != 1 && v.Cmp(&zero) != -1 { // 0 < v < q return z.setBigInt(v) } // get temporary big int from the pool vv := bigIntPool.Get().(*big.Int) // copy input + modular reduction vv.Set(v) vv.Mod(v, &_modulus) // set big int byte value z.setBigInt(vv) // release object into pool bigIntPool.Put(vv) return z } // setBigInt assumes 0 <= v < q func (z *{{.ElementName}}) setBigInt(v *big.Int) *{{.ElementName}} { vBits := v.Bits() if bits.UintSize == 64 { for i := 0; i < len(vBits); i++ { z[i] = uint64(vBits[i]) } } else { for i := 0; i < len(vBits); i++ { if i%2 == 0 { z[i/2] = uint64(vBits[i]) } else { z[i/2] |= uint64(vBits[i]) << 32 } } } return z.ToMont() } // SetString creates a big.Int with s (in base 10) and calls SetBigInt on z func (z *{{.ElementName}}) SetString( s string) *{{.ElementName}} { // get temporary big int from the pool vv := bigIntPool.Get().(*big.Int) if _, ok := vv.SetString(s, 10); !ok { panic("{{.ElementName}}.SetString failed -> can't parse number in base10 into a big.Int") } z.SetBigInt(vv) // release object into pool bigIntPool.Put(vv) return z } `
field/internal/templates/element/conv.go
0.838878
0.563618
conv.go
starcoder
package MatrixHelper import ( "fmt" "gonum.org/v1/gonum/mat" "gonum.org/v1/gonum/stat/distuv" "math" ) // PrintMatrix prints the given matrix onto the console with a nice format. func PrintMatrix(matrix mat.Matrix) { A := mat.Formatted(matrix, mat.Squeeze()) fmt.Printf("%4.6f", A) print("\n") } // AddScalar adds a scalar to each individual element. // and saves the result on the given matrix. func AddScalar(matrix mat.Dense, scalar float64) { r, c := matrix.Dims() scalarM := NewOnes(r, c) scalarM.Scale(scalar, scalarM) matrix.Add(scalarM, &matrix) } // NewOnes returns a new Ones matrix with the given dimensions. func NewOnes(r, c int) *mat.Dense { data := make([]float64, r*c) for i := 0; i < r*c; i++ { data[i] = 1 } return mat.NewDense(r, c, data) } // NewZeroes returns a new Zeroes matrix with the given dimensions. func NewZeroes(r, c int) *mat.Dense { data := make([]float64, r*c) for i := 0; i < r*c; i++ { data[i] = 0 } return mat.NewDense(r, c, data) } // RandomWeightArray returns a data array used to initialize of the specified size with random entries using uniform distribution. func RandomWeightArray(size int, lastLayerSize float64) []float64 { data := make([]float64, size) distribution := distuv.Uniform{ //Apparently this is a common type of distribution in neural networks Min: -1 / math.Sqrt(lastLayerSize), Max: 1 / math.Sqrt(lastLayerSize), } for i := 0; i < size; i++ { data[i] = distribution.Rand() } return data } // MulM wrapper for Mul in the Gonum Mat. Returns the product of the multiplication. func MulM(A, B mat.Matrix) mat.Matrix { var product mat.Dense product.Mul(A, B) return &product } // AddM wrapper for Add in the Gonum Mat. Returns the result of the addition. func AddM(A, B mat.Matrix) mat.Matrix { var sum mat.Dense sum.Add(A, B) return &sum } // ScaleM wrapper for Scale in the Gonum Mat. Returns the scaled matrix. func ScaleM(alpha float64, A mat.Matrix) mat.Matrix { var scaledM mat.Dense scaledM.Scale(alpha, A) return &scaledM } // ApplyM wrapper for Apply in the Gonum Mat. Returns the resulting matrix after applying a function to each element. func ApplyM(fn func(a, b int, c float64) float64, A mat.Matrix) mat.Matrix { var result mat.Dense result.Apply(fn, A) return &result }
RestaurantChatbot/MatrixHelper/Helper.go
0.851429
0.576125
Helper.go
starcoder
package godouble import ( "fmt" "reflect" ) //T is compatible with builtin testing.T type T interface { Errorf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Logf(format string, args ...interface{}) Helper() } //MatcherForMethod can be used to integrate a different matching framework type MatcherForMethod func(t T, m reflect.Method, chained MethodArgsMatcher, matchers ...interface{}) MethodArgsMatcher //ReturnsForMethod can be used to integrate a different return values framework type ReturnsForMethod func(t T, m reflect.Method, chained ReturnValues, returnValues ...interface{}) ReturnValues /* We could use this to determine what phase we are in as some methods are only valid in Setup Phase type TestPhase int const ( SetupPhase TestPhase = iota ExercisePhase VerifyPhase TeardownPhase ) */ /* A TestDouble is an object that can substitute for a concrete implementation of an interface in a 4 phase testing framework (Setup, Exercise, Verify, Teardown). Setup phase Expected method calls to the double can be configured as one of the following types. 1) Stub - Returns known values in response to calls against matching input arguments 2) Mock - A stub with pre-built expectations about the number and order of method invocations on matching calls 3) Spy - A stub that records calls as they execute 4) Fake - A substitute implementation for the method Exercise phase Any methods invoked on the double are sent to the first matching call that has been configured. If no matching call is available, the DefaultMethodCallType for this double is generated. Verify phase The Verify() method is used to confirm expectations on Mock methods have been met. Spies (and Fakes) have explicit methods to assert the number and order of method invocations on subsets of calls. */ type TestDouble struct { t T methods map[string]*method defaultCall func(Method) MethodCall defaultReturnValues func(Method) ReturnValues forInterface reflect.Type trace bool matcher MatcherForMethod returns ReturnsForMethod } // Enable tracing of all received method calls (via T.Logf) func (d *TestDouble) EnableTrace() { d.trace = true } func (d *TestDouble) DisableTrace() { d.trace = false } /* SetDefaultCall allows caller to provide a function to decide whether to Stub, Mock, Spy or Fake a call that was not explicitly registered in Setup phase. the default function is a mock that never expects to be called. */ func (d *TestDouble) SetDefaultCall(defaultCall func(Method) MethodCall) { d.defaultCall = defaultCall } /* SetDefaultReturnValues allows a caller to provide a function to generate default return values for a Stub, Mock, or Spy that was not explicitly registered with ReturnValues during Setup. The default is to used zeroed values via reflection. */ func (d *TestDouble) SetDefaultReturnValues(defaultReturns func(Method) ReturnValues) { d.defaultReturnValues = defaultReturns } func (d *TestDouble) SetMatcherIntegration(forMethod MatcherForMethod) { d.matcher = forMethod } func (d *TestDouble) SetReturnValuesIntegration(forMethod ReturnsForMethod) { d.returns = forMethod } func (d *TestDouble) String() string { return fmt.Sprintf("DoubleFor(%v)", d.forInterface) } func (d *TestDouble) T() T { return d.t } //MethodCall is an abstract interface of specific call types, Stub, Mock, Spy and Fake type MethodCall interface { matches(args []interface{}) bool spy(args []interface{}) ([]interface{}, error) verify(T) } /* NewDouble Constructor for TestDouble called by specific implementation of test doubles. forInterface is expected to be the nil implementation of an interface - (*Iface)(nil) configurators are used to configure tracing and default behaviour for unregistered method calls and return values */ func NewDouble(t T, forInterface interface{}, configurators ...func(*TestDouble)) *TestDouble { doubleFor := reflect.TypeOf(forInterface) if doubleFor.Kind() != reflect.Ptr || doubleFor.Elem().Kind() != reflect.Interface { t.Fatalf("Expecting '%v' to be a pointer to nil interface", forInterface) } doubleFor = doubleFor.Elem() double := &TestDouble{ t: t, forInterface: doubleFor, methods: make(map[string]*method, doubleFor.NumMethod()), } for i := 0; i < doubleFor.NumMethod(); i++ { m := doubleFor.Method(i) double.methods[m.Name] = newMethod(double, m) } defaults(double) for _, c := range configurators { c(double) } if double.matcher == nil { t.Fatalf("%v need SetMatcherIntegration() configured", doubleFor) } if double.returns == nil || double.defaultReturnValues == nil { t.Fatalf("%v needs both SetReturnValuesIntegration and SetDefaultReturnValues configured", doubleFor) } if double.defaultCall == nil { t.Fatalf("%v needs SetDefaultCall configured", doubleFor) } return double } /* Stub adds and returns a StubbedMethodCall for methodName on TestDouble d Setup phase Configure Matcher and ReturnValues. By default a StubbedMethodCall matches any arguments and returns zero values for all outputs. Exercise Phase The first stub matching the invocation arguments will provide the output values. Verify Phase Nothing to verify */ func (d *TestDouble) Stub(methodName string) (stub StubbedMethodCall) { if m, found := d.methods[methodName]; found { m.mutex.Lock() defer m.mutex.Unlock() stub = m.Stub() m.addMethodCall(stub) } else { d.t.Fatalf("Cannot Stub non existent method %s for %v", methodName, d) } return } /* Mock adds and returns a MockedMethodCall for methodName on TestDouble d Setup Phase Configure Matcher, sequencing (After), and Return Values. Set Expectation on number of matching invocations. By default a MockedMethodCall matches any arguments, returns zero values for all outputs and expects exactly one invocation. Exercise Phase The first mock matching the invocation arguments and not yet Complete in terms of Expectation will provide the output values. Verify Phase (via call to a TestDouble.Verify() usually deferred immediately after the double is created) Will assert the Expectation is met. */ func (d *TestDouble) Mock(methodName string) (mock MockedMethodCall) { if m, found := d.methods[methodName]; found { m.mutex.Lock() defer m.mutex.Unlock() mock = m.Mock() m.addMethodCall(mock) } else { d.t.Fatalf("Cannot Mock non existent method %s for %v", methodName, d) } return } /* Spy records all calls to methodName. Setup Phase Configure ReturnValues. Calling Spy twice for the same method will return the same Value (ie there is only every one spy, and it will record methods that do not match any preceding Stub or Mock calls) Exercise Phase Matches and records all invocations. Verify Phase Can be called again to retrieve the spy for the method (eg to get a dynamically created default Spy). Extract subsets of RecordedCalls and then verify an Expectations on the number of calls in the subset. */ func (d *TestDouble) Spy(methodName string) (spy SpyMethodCall) { if m, found := d.methods[methodName]; found { m.mutex.Lock() defer m.mutex.Unlock() for _, methodCall := range m.calls { if call, isa := methodCall.(SpyMethodCall); isa { return call } } spy = m.Spy() m.addMethodCall(spy) } else { d.t.Fatalf("Cannot Spy on non existent method %s for %v", methodName, d) } return } /* Fake installs a user implementation for the method. Setup Phase Install the Fake implementation, which must match the signature of the method. Only one fake is installed for a method, and clobbers any other configured calls. Exercise Phase Invokes the fake function via reflection, and records the call as per Spy. Verify Phase Explicitly verify RecordedCalls as per Spy. */ func (d *TestDouble) Fake(methodName string, impl interface{}) (fake FakeMethodCall) { if m, found := d.methods[methodName]; found { m.mutex.Lock() defer m.mutex.Unlock() for _, methodCall := range m.calls { if call, isa := methodCall.(SpyMethodCall); isa { d.t.Fatalf("unreachable fake for %s.%s which has previously registered a spy (%v)", d, methodName, call) } } fake = m.Fake(impl) m.addMethodCall(fake) } else { d.t.Fatalf("Cannot Fake non existent method %v.%s", d, methodName) } return } func (d *TestDouble) Verify() { d.t.Helper() for _, method := range d.methods { for _, methodCall := range method.calls { methodCall.verify(d.t) } } } //Invoke is called by specialised mock implementations, and sometimes by Fake implementations //to record the invocation of a method. func (d *TestDouble) Invoke(methodName string, args ...interface{}) []interface{} { d.t.Helper() method, found := d.methods[methodName] if !found { d.t.Fatalf("Unexpected call to unknown methodName %T.%s", d, methodName) } return method.invoke(args) } type Verifiable interface { Verify() } //Verify is shorthand to Verify a set of TestDoubles func Verify(testDoubles ...Verifiable) { for _, td := range testDoubles { td.Verify() } }
godouble/double.go
0.699562
0.511656
double.go
starcoder
package rgbmatrix import ( "image" "image/draw" "image/gif" "io" "time" ) // ToolKit is a convinient set of function to operate with a led of Matrix type ToolKit struct { // Canvas is the Canvas wrapping the Matrix, if you want to instanciate // a ToolKit with a custom Canvas you can use directly the struct, // without calling NewToolKit Canvas *Canvas // Transform function if present is applied just before draw the image to // the Matrix, this is a small example: // tk.Transform = func(img image.Image) *image.NRGBA { // return imaging.Fill(img, 64, 96, imaging.Center, imaging.Lanczos) // } Transform func(img image.Image) *image.NRGBA } // NewToolKit returns a new ToolKit wrapping the given Matrix func NewToolKit(m Matrix) *ToolKit { return &ToolKit{ Canvas: NewCanvas(m), } } // PlayImage draws the given image during the given delay func (tk *ToolKit) PlayImage(i image.Image, delay time.Duration) error { start := time.Now() defer func() { time.Sleep(delay - time.Since(start)) }() if tk.Transform != nil { i = tk.Transform(i) } draw.Draw(tk.Canvas, tk.Canvas.Bounds(), i, image.ZP, draw.Over) return tk.Canvas.Render() } // PlayImages draws a sequence of images during the given delays, the len of // images should be equal to the len of delay. If loop is true the function // loops over images until a true is sent to the returned chan func (tk *ToolKit) PlayImages(images []image.Image, delay []time.Duration, loop int) chan bool { quit := make(chan bool, 0) go func() { l := len(images) i := 0 for { select { case <-quit: return default: tk.PlayImage(images[i], delay[i]) } i++ if i >= l { if loop == 0 { i = 0 continue } break } } }() return quit } // PlayGIF reads and draw a gif file from r. It use the contained images and // delays and loops over it, until a true is sent to the returned chan func (tk *ToolKit) PlayGIF(r io.Reader) (chan bool, error) { gif, err := gif.DecodeAll(r) if err != nil { return nil, err } delay := make([]time.Duration, len(gif.Delay)) images := make([]image.Image, len(gif.Image)) for i, image := range gif.Image { images[i] = image delay[i] = time.Millisecond * time.Duration(gif.Delay[i]) * 10 } return tk.PlayImages(images, delay, gif.LoopCount), nil } // Close close the toolkit and the inner canvas func (tk *ToolKit) Close() error { return tk.Canvas.Close() }
toolkit.go
0.707809
0.424472
toolkit.go
starcoder
package main import "fmt" func main() { t_parity() t_kronecker() t_identities() } func assert(x bool) { if !x { panic("assertion failed") } } /* e_ijk*e_ipq = k_jp*k_kq - k_kp*k_jq e_ijk*e_ijp = k_pk ijkpq are any combination of indices with value [1, 2, 3] ijk have to be independent of each other */ func t_identities() { fmt.Println("Testing identities") for i := 1; i <= 3; i++ { for j := 1; j <= 3; j++ { for k := 1; k <= 3; k++ { if i == j || i == k || j == k { continue } a := []int{i, j, k} for p := 1; p <= 3; p++ { for q := 1; q <= 3; q++ { b := []int{i, p, q} x := parity(a) * parity(b) y := kronecker(j, p)*kronecker(k, q) - kronecker(k, p)*kronecker(j, q) fmt.Println(i, j, k, k, q, "|", x, y) assert(x == y) b = []int{i, j, p} x = parity(a) * parity(b) y = kronecker(p, k) fmt.Println(i, j, k, p, "|", x, y) assert(x == y) } } } } } } func t_parity() { tab := [][]int{ {2, 3, 3}, {1, 1, 2}, {3, 2, 3}, {1, 1, 3}, {1, 2, 2}, {1, 3, 3}, {1, 2, 3}, {2, 3, 1}, {3, 1, 2}, {2, 1, 3}, {3, 2, 1}, {1, 3, 2}, } fmt.Println("Testing Parity") for i := range tab { fmt.Println(tab[i], parity(tab[i])) } fmt.Println() } func t_kronecker() { fmt.Println("Testing Kronecker") for i := 0; i < 3; i++ { for j := 0; j < 3; j++ { fmt.Println(i, j, "|", kronecker(i, j)) } } fmt.Println() } /* Also known as the permutation, alternating, or Levi-Civita symbol. We can calculate the parity by seeing how many swaps we need to get to the standard permutation Example: (3 2 1) -> (1 2 3) would take 1 swap (odd number so parity is -1) (2 3 1) -> (1 2 3) would take 2 swap (even number so parity is +1) (2 2 1) -> (1 2 3) is impossible (parity is 0) Common variable notion to permutation index: i = x = 1 j = y = 2 k = z = 3 Example: ijk = xyz = (1 2 3) */ func parity(a []int) int { b := append([]int{}, a...) p := 1 loop: for i := 0; i < len(b); i++ { for j := i; j < len(b); j++ { if b[j]-1 == i && i != j { b[i], b[j], p = b[j], b[i], -p if b[i] == b[j] { return 0 } continue loop } } if b[i]-1 != i { return 0 } } return p } /* A kronecker symbol is also commonly used as a index symbol o_ij = 1 if i == j o_ij = 0 if i != j Represents the diagonal indices in the matrix */ func kronecker(i, j int) int { if i == j { return 1 } return 0 }
math/permutation-symbol.go
0.54577
0.454654
permutation-symbol.go
starcoder
package bit import ( "bytes" "fmt" ) // A Set256 represents a set of integers in the range [0, 256). // It does so more efficiently than a Set of capacity 256. // For efficiency, the methods of Set256 perform no bounds checking on their // arguments. type Set256 struct { sets [4]Set64 } func (s *Set256) Add(n uint8) { s.sets[n/64].Add(n % 64) } func (s *Set256) Remove(n uint8) { s.sets[n/64].Remove(n % 64) } func (s *Set256) Contains(n uint8) bool { return s.sets[n/64].Contains(n % 64) } func (s *Set256) Empty() bool { return s.sets[0].Empty() && s.sets[1].Empty() && s.sets[2].Empty() && s.sets[3].Empty() } func (s *Set256) Clear() { s.sets[0].Clear() s.sets[1].Clear() s.sets[2].Clear() s.sets[3].Clear() } func (s *Set256) Size() int { return s.sets[0].Size() + s.sets[1].Size() + s.sets[2].Size() + s.sets[3].Size() } func (Set256) Capacity() int { return 256 } func (s1 *Set256) Equal(s2 *Set256) bool { return s1.sets[0] == s2.sets[0] && s1.sets[1] == s2.sets[1] && s1.sets[2] == s2.sets[2] && s1.sets[3] == s2.sets[3] } // Position returns the 0-based position of n in the set. If // the set is {3, 8, 15}, then the position of 8 is 1. // If n is not in the set, returns 0, false. // If not a member, return where it would go. // The second return value reports whether n is a member of b. func (b *Set256) Position(n uint8) (int, bool) { var pos int i := n / 64 switch i { case 1: pos = b.sets[0].Size() case 2: pos = b.sets[0].Size() + b.sets[1].Size() case 3: pos = b.sets[0].Size() + b.sets[1].Size() + b.sets[2].Size() } p, ok := b.sets[i].Position(n % 64) return pos + p, ok } // c = a intersect b // func (c *Set256) Intersect2(a, b *Set256) { // c.sets[0] = a.sets[0] & b.sets[0] // c.sets[1] = a.sets[1] & b.sets[1] // c.sets[2] = a.sets[2] & b.sets[2] // c.sets[3] = a.sets[3] & b.sets[3] // } // c cannot be one of sets func (c *Set256) IntersectN(bs []*Set256) { if len(bs) == 0 { c.Clear() return } for i := 0; i < len(c.sets); i++ { c.sets[i] = bs[0].sets[i] for _, s := range bs[1:] { c.sets[i].IntersectWith(s.sets[i]) } } } // Fill a with set elements, starting from start. // Return the number added. func (s *Set256) Elements(a []uint8, start uint8) int { if len(a) == 0 { return 0 } si := start / 64 n := elements(s.sets[si], a, start%64, si*64) for i := si + 1; i < 4; i++ { n += elements(s.sets[i], a[n:], 0, i*64) } return n } func elements(s Set64, a []uint8, start, high uint8) int { n := s.Elements(a, start) for i := 0; i < n; i++ { a[i] |= high } return n } func (s *Set256) Elements64(a []uint64, start uint8, high uint64) int { if len(a) == 0 { return 0 } si := start / 64 n := s.sets[si].Elements64(a, start%64, high|uint64(si*64)) for i := si + 1; i < 4; i++ { n += s.sets[i].Elements64(a[n:], 0, high|uint64(i*64)) } return n } func (s Set256) String() string { var a [256]uint64 n := s.Elements64(a[:], 0, 0) if n == 0 { return "{}" } // TODO: use strings.Builder var buf bytes.Buffer fmt.Fprintf(&buf, "{%d", a[0]) for _, e := range a[1:n] { fmt.Fprintf(&buf, ", %d", e) } fmt.Fprint(&buf, "}") return buf.String() } // For subber, used in node: func (s *Set256) add(e uint64) { s.Add(uint8(e)) } func (s *Set256) remove(e uint64) bool { s.Remove(uint8(e)) return s.Empty() } func (s *Set256) contains(e uint64) bool { return s.Contains(uint8(e)) } func (s *Set256) size() int { return s.Size() } func (s *Set256) memSize() uint64 { return memSize(*s) } func (s *Set256) elements(a []uint64, start, high uint64) int { return s.Elements64(a, uint8(start), high) } func (s *Set256) equalSub(b subber) bool { return s.Equal(b.(*Set256)) }
set256.go
0.683736
0.519278
set256.go
starcoder
package mat import ( "github.com/angelsolaorbaiceta/inkmath/nums" "github.com/angelsolaorbaiceta/inkmath/vec" ) /* A DenseMat is an implementation of a dense Matrix. Dense matrices allocate all the memory required to store every value. Every value which hasn't been explecitly set is zero. */ type DenseMat struct { rows, cols int data [][]float64 } /* MakeSquareDense creates a new dense matrix (strores zeroes) with the given dimension all filled with zeroes. */ func MakeSquareDense(size int) *DenseMat { return MakeDense(size, size) } /* MakeDense creates a new dense matrix (stores zeroes) with the given rows and columns filled with zeroes. */ func MakeDense(rows, cols int) *DenseMat { data := make([][]float64, rows) for i := 0; i < rows; i++ { data[i] = make([]float64, cols) } return &DenseMat{rows, cols, data} } // MakeDenseWithData creates a new matrix initialized with the given data. func MakeDenseWithData(rows, cols int, data []float64) *DenseMat { matrix := MakeDense(rows, cols) FillMatrixWithData(matrix, data) return matrix } // Rows returns the number of rows in the matrix. func (m DenseMat) Rows() int { return m.rows } // Cols returns the number of columns in the matrix. func (m DenseMat) Cols() int { return m.cols } // Value returns the value at a given row and column. func (m DenseMat) Value(row, col int) float64 { return m.data[row][col] } // NonZeroIndicesAtRow returns a slice with all non-zero elements indices for the given row. func (m DenseMat) NonZeroIndicesAtRow(row int) []int { indices := make([]int, 0) for i, val := range m.data[row] { if !nums.IsCloseToZero(val) { indices = append(indices, i) } } return indices } // TimesVector creates a new vector result of multiplying this matrix and a vector. func (m DenseMat) TimesVector(v vec.ReadOnlyVector) vec.ReadOnlyVector { if m.Cols() != v.Length() { panic("Can't multiply matrix vs vector due to size mismatch") } var ( result = vec.Make(m.rows) sum float64 ) for rowIndex := 0; rowIndex < m.Rows(); rowIndex++ { sum = 0.0 for colIndex := 0; colIndex < m.Cols(); colIndex++ { sum += m.data[rowIndex][colIndex] * v.Value(colIndex) } result.SetValue(rowIndex, sum) } return result } // TimesMatrix multiplies this matrix with other. func (m DenseMat) TimesMatrix(other ReadOnlyMatrix) ReadOnlyMatrix { if m.Cols() != other.Rows() { panic("Can't multiply matrices due to size mismatch") } var ( rows = m.Rows() cols = other.Cols() sum float64 result = MakeDense(rows, cols) ) for i := 0; i < rows; i++ { // cummulative sum of this.row x other.column for j := 0; j < cols; j++ { sum = 0.0 for k := 0; k < other.Rows(); k++ { sum += m.data[i][k] * other.Value(k, j) } result.data[i][j] = sum } } return result } // RowTimesVector returns the result of multiplying the row at the given index times the given vector. func (m DenseMat) RowTimesVector(row int, v vec.ReadOnlyVector) float64 { if m.Cols() != v.Length() { panic("Can't multiply matrix row with vector due to size mismatch") } var ( rowData = m.data[row] result = 0.0 ) for i := 0; i < m.Cols(); i++ { result += rowData[i] * v.Value(i) } return result }
mat/dense_matrix.go
0.851197
0.752217
dense_matrix.go
starcoder
package cityhash // Some primes between 2^63 and 2^64 for various uses. const ( k0 = uint64(0xc3a5c85c97cb3127) k1 = uint64(0xb492b66fbe98f273) k2 = uint64(0x9ae16a3b2f90404f) ) // Magic numbers for 32-bit hashing. Copied from Murmur3. const ( c1 = uint32(0xcc9e2d51) c2 = uint32(0x1b873593) ) // Hash64 returns a 64-bit hash for a slice of bytes. func Hash64(s []byte) uint64 { n := uint64(len(s)) if n <= 32 { if n <= 16 { return hash64Len0to16(s) } return hash64Len17to32(s) } else if n <= 64 { return hash64Len33to64(s) } // For strings over 64 bytes we hash the end first, and then as we loop we // keep 56 bytes of state: v, w, x, y, and z. x := fetch64(s[n-40:]) y := fetch64(s[n-16:]) + fetch64(s[n-56:]) z := hash64Len16(fetch64(s[n-48:])+n, fetch64(s[n-24:])) v1, v2 := weakHashLen32WithSeeds(s[n-64:], n, z) w1, w2 := weakHashLen32WithSeeds(s[n-32:], y+k1, x) x = x*k1 + fetch64(s) // Decrease n to the nearest multiple of 64, and operate on 64-byte chunks. n = (n - 1) &^ 63 for { x = ror64(x+y+v1+fetch64(s[8:]), 37) * k1 y = ror64(y+v2+fetch64(s[48:]), 42) * k1 x ^= w2 y += v1 + fetch64(s[40:]) z = ror64(z+w1, 33) * k1 v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1) w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+fetch64(s[16:])) z, x = x, z s = s[64:] n -= 64 if n == 0 { break } } return hash64Len16(hash64Len16(v1, w1)+shiftMix(y)*k1+z, hash64Len16(v2, w2)+x) } // Hash64WithSeed returns a 64-bit hash for s that includes seed. func Hash64WithSeed(s []byte, seed uint64) uint64 { return Hash64WithSeeds(s, k2, seed) } // Hash64WithSeeds returns a 64-bit hash for s that includes the two seed // values. func Hash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 { return hash64Len16(Hash64(s)-seed0, seed1) } // Hash32 returns a 32-bit hash for s. func Hash32(s []byte) uint32 { n := uint32(len(s)) if n <= 24 { if n <= 12 { if n <= 4 { return hash32Len0to4(s) } return hash32Len5to12(s) } return hash32Len13to24(s) } // n > 24 h := n g := c1 * n f := g a0 := ror32(fetch32(s[n-4:])*c1, 17) * c2 a1 := ror32(fetch32(s[n-8:])*c1, 17) * c2 a2 := ror32(fetch32(s[n-16:])*c1, 17) * c2 a3 := ror32(fetch32(s[n-12:])*c1, 17) * c2 a4 := ror32(fetch32(s[n-20:])*c1, 17) * c2 const magic = 0xe6546b64 h ^= a0 h = ror32(h, 19) h = h*5 + magic h ^= a2 h = ror32(h, 19) h = h*5 + magic g ^= a1 g = ror32(g, 19) g = g*5 + magic g ^= a3 g = ror32(g, 19) g = g*5 + magic f += a4 f = ror32(f, 19) f = f*5 + magic for i := (n - 1) / 20; i != 0; i-- { a0 := ror32(fetch32(s)*c1, 17) * c2 a1 := fetch32(s[4:]) a2 := ror32(fetch32(s[8:])*c1, 17) * c2 a3 := ror32(fetch32(s[12:])*c1, 17) * c2 a4 := fetch32(s[16:]) h ^= a0 h = ror32(h, 18) h = h*5 + magic f += a1 f = ror32(f, 19) f = f * c1 g += a2 g = ror32(g, 18) g = g*5 + magic h ^= a3 + a1 h = ror32(h, 19) h = h*5 + magic g ^= a4 g = bswap32(g) * 5 h += a4 * 5 h = bswap32(h) f += a0 f, g, h = g, h, f // a.k.a. PERMUTE3 s = s[20:] } g = ror32(g, 11) * c1 g = ror32(g, 17) * c1 f = ror32(f, 11) * c1 f = ror32(f, 17) * c1 h = ror32(h+g, 19) h = h*5 + magic h = ror32(h, 17) * c1 h = ror32(h+f, 19) h = h*5 + magic h = ror32(h, 17) * c1 return h } // Hash128 returns a 128-bit hash value for s. func Hash128(s []byte) (lo, hi uint64) { if len(s) >= 16 { return Hash128WithSeed(s[16:], fetch64(s), fetch64(s[8:])+k0) } return Hash128WithSeed(s, k0, k1) } // Hash128WithSeed returns a 128-bit hash value for s that includes the given // 128-bit seed. func Hash128WithSeed(s []byte, seed0, seed1 uint64) (lo, hi uint64) { if len(s) < 128 { return cityMurmur(s, seed0, seed1) } // We expect len >= 128 to be the common case. // Keep 56 bytes of state: v, w, x, y, and z. n := uint64(len(s)) x := seed0 y := seed1 z := n * k1 v1 := ror64(y^k1, 49)*k1 + fetch64(s) v2 := ror64(v1, 42)*k1 + fetch64(s[8:]) w1 := ror64(y+z, 35)*k1 + x w2 := ror64(x+fetch64(s[88:]), 53) * k1 // This is the same inner loop as Hash64, manually unrolled. t := s for n >= 128 { // Iteration 1 x = ror64(x+y+v1+fetch64(t[8:]), 37) * k1 y = ror64(y+v2+fetch64(t[48:]), 42) * k1 x ^= w2 y += v1 + fetch64(t[40:]) z = ror64(z+w1, 33) * k1 v1, v2 = weakHashLen32WithSeeds(t, v2*k1, x+w1) w1, w2 = weakHashLen32WithSeeds(t[32:], z+w2, y+fetch64(t[16:])) x, z = z, x t = t[64:] // Iteration 2 x = ror64(x+y+v1+fetch64(t[8:]), 37) * k1 y = ror64(y+v2+fetch64(t[48:]), 42) * k1 x ^= w2 y += v1 + fetch64(t[40:]) z = ror64(z+w1, 33) * k1 v1, v2 = weakHashLen32WithSeeds(t, v2*k1, x+w1) w1, w2 = weakHashLen32WithSeeds(t[32:], z+w2, y+fetch64(t[16:])) x, z = z, x t = t[64:] n -= 128 } x += ror64(v1+z, 49) * k0 y = y*k0 + ror64(w2, 37) z = z*k0 + ror64(w1, 27) w1 *= 9 v1 *= k0 // Here, unlike in Hash64, we didn't do the tail block ahead of time. // We hash in 32-byte blocks working back-to-front, including as many bytes // as necessary from the chunk prior to t to ensure we have a whole number // of blocks. tail := s[len(s)-128:] for pos := 0; pos < int(n); pos += 32 { offset := len(tail) - pos - 32 block := tail[offset:] y = ror64(x+y, 42)*k0 + v2 w1 += fetch64(block[16:]) x = x*k0 + w1 z += w2 + fetch64(block) w2 += v1 v1, v2 = weakHashLen32WithSeeds(block, v1+z, v2) v1 *= k0 } // At this point our 56 bytes of state should contain more than // enough information for a strong 128-bit hash. We use two // different 56-byte-to-8-byte hashes to get a 16-byte final result. x = hash64Len16(x, v1) y = hash64Len16(y+z, w1) return hash64Len16(x+v2, w2) + y, hash64Len16(x+w2, y+v2) } // Hash128To64 returns a 64-bit hash value for an input of 128 bits. func Hash128To64(lo, hi uint64) uint64 { // Murmur-inspired hashing. const multiplier = 0x9ddfea08eb382d69 a := (lo ^ hi) * multiplier a ^= (a >> 47) b := (hi ^ a) * multiplier b ^= (b >> 47) b *= multiplier return b }
vendor/github.com/creachadair/cityhash/cityhash.go
0.722233
0.487551
cityhash.go
starcoder
package stats import ( "math" "reflect" "sort" "github.com/kelindar/binary" "github.com/kelindar/binary/sorted" ) // Sample represents a sample window type sample sorted.Int32s func (s sample) Len() int { return len(s) } func (s sample) Less(i, j int) bool { return s[i] < s[j] } func (s sample) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // GetBinaryCodec retrieves a custom binary codec. func (s *sample) GetBinaryCodec() binary.Codec { return sorted.IntsCodecAs(reflect.TypeOf(sample{}), 4) } // StdDev returns the standard deviation of the sample. func (s sample) StdDev() float64 { return math.Sqrt(s.Variance()) } // Sum returns the sum of the sample. func (s sample) Sum() (sum int64) { for _, v := range s { sum += int64(v) } return } // Variance returns the variance of the sample. func (s sample) Variance() float64 { if 0 == len(s) { return 0.0 } m := s.Mean() var sum float64 for _, v := range s { d := float64(v) - m sum += d * d } return sum / float64(len(s)) } // Variance returns the mean of the sample. func (s sample) Mean() float64 { if 0 == len(s) { return 0.0 } return float64(s.Sum()) / float64(len(s)) } // Min returns the minimum value of the sample. func (s sample) Min() int { if 0 == len(s) { return 0 } var min int32 = math.MaxInt32 for _, v := range s { if min > v { min = v } } return int(min) } // Max returns the maximum value of the sample. func (s sample) Max() int { if 0 == len(s) { return 0 } var max int32 = math.MinInt32 for _, v := range s { if max < v { max = v } } return int(max) } // Quantiles returns a slice of arbitrary quantiles of the sample. func (s sample) Quantile(quantiles ...float64) []float64 { scores := make([]float64, len(quantiles)) size := len(s) if size > 0 { sort.Sort(s) for i, quantile := range quantiles { pos := (quantile / 100) * float64(size+1) if pos < 1.0 { scores[i] = float64(s[0]) } else if pos >= float64(size) { scores[i] = float64(s[size-1]) } else { lower := float64(s[int(pos)-1]) upper := float64(s[int(pos)]) scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) } } } return scores } // Histogram creates a histogram with the bins provided. func (s sample) Histogram(bins []int) []Bin { // Get the current and next bin hist, index := binsFor(bins), 0 // Range through the sorted values sort.Sort(s) for _, v := range s { if v > hist[index].Upper { index++ } // Count hist[index].Count++ } return hist } // binsFor computes the bins for a given set of points func binsFor(points []int) []Bin { sort.Ints(points) if len(points) < 2 { return []Bin{{ Lower: math.MinInt32, Upper: math.MaxInt32, }} } arr := make([]Bin, 0, len(points)-1) for i := 0; i < len(points)-1; i++ { arr = append(arr, Bin{ Lower: int32(points[i]), Upper: int32(points[i+1]), }) } return arr } // Bin represents a bin of a histogram type Bin struct { Lower int32 // The lower bound of the bin //Center int32 // The center of the bin Upper int32 // The upper bound of the bin Count int // The number of elements in the bin }
sample.go
0.863794
0.488588
sample.go
starcoder
package graphql_models type BooleanFilter struct { IsTrue *bool `json:"isTrue"` IsFalse *bool `json:"isFalse"` IsNull *bool `json:"isNull"` } type ChangePasswordResponse struct { Ok bool `json:"ok"` } type FloatFilter struct { EqualTo *float64 `json:"equalTo"` NotEqualTo *float64 `json:"notEqualTo"` LessThan *float64 `json:"lessThan"` LessThanOrEqualTo *float64 `json:"lessThanOrEqualTo"` MoreThan *float64 `json:"moreThan"` MoreThanOrEqualTo *float64 `json:"moreThanOrEqualTo"` In []float64 `json:"in"` NotIn []float64 `json:"notIn"` } type IDFilter struct { EqualTo *string `json:"equalTo"` NotEqualTo *string `json:"notEqualTo"` In []string `json:"in"` NotIn []string `json:"notIn"` } type IntFilter struct { EqualTo *int `json:"equalTo"` NotEqualTo *int `json:"notEqualTo"` LessThan *int `json:"lessThan"` LessThanOrEqualTo *int `json:"lessThanOrEqualTo"` MoreThan *int `json:"moreThan"` MoreThanOrEqualTo *int `json:"moreThanOrEqualTo"` In []int `json:"in"` NotIn []int `json:"notIn"` } type LoginResponse struct { Token string `json:"token"` RefreshToken string `json:"refreshToken"` } type RefreshTokenResponse struct { Token string `json:"token"` } type Role struct { ID string `json:"id"` AccessLevel int `json:"accessLevel"` Name string `json:"name"` UpdatedAt *int `json:"updatedAt"` DeletedAt *int `json:"deletedAt"` CreatedAt *int `json:"createdAt"` Users []*User `json:"users"` } type RoleCreateInput struct { AccessLevel int `json:"accessLevel"` Name string `json:"name"` } type RoleDeletePayload struct { ID string `json:"id"` } type RoleFilter struct { Search *string `json:"search"` Where *RoleWhere `json:"where"` } type RolePagination struct { Limit int `json:"limit"` Page int `json:"page"` } type RolePayload struct { Role *Role `json:"role"` } type RoleUpdateInput struct { AccessLevel *int `json:"accessLevel"` Name *string `json:"name"` UpdatedAt *int `json:"updatedAt"` DeletedAt *int `json:"deletedAt"` CreatedAt *int `json:"createdAt"` } type RoleWhere struct { ID *IDFilter `json:"id"` AccessLevel *IntFilter `json:"accessLevel"` Name *StringFilter `json:"name"` UpdatedAt *IntFilter `json:"updatedAt"` DeletedAt *IntFilter `json:"deletedAt"` CreatedAt *IntFilter `json:"createdAt"` Users *UserWhere `json:"users"` Or *RoleWhere `json:"or"` And *RoleWhere `json:"and"` } type RolesCreateInput struct { Roles []*RoleCreateInput `json:"roles"` } type RolesDeletePayload struct { Ids []string `json:"ids"` } type RolesPayload struct { Roles []*Role `json:"roles"` } type RolesUpdatePayload struct { Ok bool `json:"ok"` } type StringFilter struct { EqualTo *string `json:"equalTo"` NotEqualTo *string `json:"notEqualTo"` In []string `json:"in"` NotIn []string `json:"notIn"` StartWith *string `json:"startWith"` NotStartWith *string `json:"notStartWith"` EndWith *string `json:"endWith"` NotEndWith *string `json:"notEndWith"` Contain *string `json:"contain"` NotContain *string `json:"notContain"` StartWithStrict *string `json:"startWithStrict"` NotStartWithStrict *string `json:"notStartWithStrict"` EndWithStrict *string `json:"endWithStrict"` NotEndWithStrict *string `json:"notEndWithStrict"` ContainStrict *string `json:"containStrict"` NotContainStrict *string `json:"notContainStrict"` } type User struct { ID string `json:"id"` FirstName *string `json:"firstName"` LastName *string `json:"lastName"` Username *string `json:"username"` Password *string `json:"password"` Email *string `json:"email"` Mobile *string `json:"mobile"` Phone *string `json:"phone"` Address *string `json:"address"` Active *bool `json:"active"` LastLogin *int `json:"lastLogin"` LastPasswordChange *int `json:"lastPasswordChange"` Token *string `json:"token"` Role *Role `json:"role"` CreatedAt *int `json:"createdAt"` DeletedAt *int `json:"deletedAt"` UpdatedAt *int `json:"updatedAt"` } type UserCreateInput struct { FirstName *string `json:"firstName"` LastName *string `json:"lastName"` Username *string `json:"username"` Password *string `json:"password"` Email *string `json:"email"` RoleID *string `json:"roleId"` } type UserDeletePayload struct { ID string `json:"id"` } type UserFilter struct { Search *string `json:"search"` Where *UserWhere `json:"where"` } type UserPagination struct { Limit int `json:"limit"` Page int `json:"page"` } type UserPayload struct { User *User `json:"user"` } type UserUpdateInput struct { FirstName *string `json:"firstName"` LastName *string `json:"lastName"` Mobile *string `json:"mobile"` Phone *string `json:"phone"` Address *string `json:"address"` } type UserUpdatePayload struct { Ok bool `json:"ok"` } type UserWhere struct { ID *IDFilter `json:"id"` FirstName *StringFilter `json:"firstName"` LastName *StringFilter `json:"lastName"` Username *StringFilter `json:"username"` Password *StringFilter `json:"password"` Email *StringFilter `json:"email"` Mobile *StringFilter `json:"mobile"` Phone *StringFilter `json:"phone"` Address *StringFilter `json:"address"` Active *BooleanFilter `json:"active"` LastLogin *IntFilter `json:"lastLogin"` LastPasswordChange *IntFilter `json:"lastPasswordChange"` Token *StringFilter `json:"token"` Role *RoleWhere `json:"role"` CreatedAt *IntFilter `json:"createdAt"` DeletedAt *IntFilter `json:"deletedAt"` UpdatedAt *IntFilter `json:"updatedAt"` Or *UserWhere `json:"or"` And *UserWhere `json:"and"` } type UsersCreateInput struct { Users []*UserCreateInput `json:"users"` } type UsersDeletePayload struct { Ids []string `json:"ids"` } type UsersPayload struct { Users []*User `json:"users"` Total int `json:"total"` }
graphql_models/generated_models.go
0.531209
0.401219
generated_models.go
starcoder
package tinygraph import ( "errors" "fmt" ) // MatrixType is log 2 of the cell size type MatrixType uint64 const ( // Bit is a single-bit cell Bit MatrixType = iota // TwoBit is a two-bit cell TwoBit // FourBit is a four-bit cell FourBit // Byte is an eight-bit cell Byte // SixteenBit is a sixteen-bit cell SixteenBit // ThirtyTwoBit is a thirty-two-bit cell ThirtyTwoBit // Long is a sixty-four-bit cell Long ) const ( // WordSize is the size of the word we will be using to store the matrix WordSize = uint64(64) // WordSizeMinusOne is word size minus one, for bitwise modulus WordSizeMinusOne = uint64(63) // WordSizeExp is log 2 of the word size WordSizeExp = uint64(6) // One is 1 One = uint64(1) ) var ( // ErrOutOfBounds is returned when a coordinate outside the bounds of the // matrix is requested or set ErrOutOfBounds = errors.New("Bit requested is outside the matrix bounds") _ Matrix = &ArrayMatrix{} ) // Matrix is a 2-dimensional square matrix. type Matrix interface { Set(i, j uint64) error Unset(i, j uint64) error SetBit(i, j, k uint64) error UnsetBit(i, j, k uint64) error Replace(i, j, k uint64) error Clear(i, j uint64) error Get(i, j uint64) (uint64, error) GetRow(i uint64) ([]uint64, error) SetRow(i uint64, row []uint64) error Copy() Matrix Transpose() Matrix } func New(mtype MatrixType, size uint64) Matrix { return NewArrayMatrix(mtype, size) } // ArrayMatrix is an implementation of Matrix that stores cells as // a 1-dimensional array of uint64s type ArrayMatrix struct { Words []uint64 Size uint64 LastIndex uint64 WordsPerRow uint64 MType MatrixType cellmask uint64 cellsize uint64 } // NewArrayMatrix creates a new matrix with a given cell size and given dimensions func NewArrayMatrix(mtype MatrixType, size uint64) Matrix { matrix := &ArrayMatrix{ Size: size, LastIndex: size - 1, MType: mtype, cellmask: (1 << (1 << mtype)) - 1, cellsize: 1 << mtype, } if mtype == Long { matrix.cellmask = ^uint64(0) } // calculate the number of words required to store a square matrix of size // rows with size mtype cells per row. Each row begins with a new uint64, // to avoid inefficient rebuilding of a row that starts in the middle of // a uint64. // Shift to get number of bits per row bitsPerRow := size << mtype // Ceiling division to get number of words per row matrix.WordsPerRow = ((bitsPerRow - 1) >> WordSizeExp) + 1 // Now multiply by size to get total number of words required to store the // matrix matrix.Words = make([]uint64, size*matrix.WordsPerRow) return matrix } // GetWordIndex returns the index of the word that contains the coordinate specified func (m *ArrayMatrix) GetWordIndex(i, j uint64) uint64 { return (i * m.WordsPerRow) + (j << m.MType >> WordSizeExp) } // Set sets the principal bit of the cell at the coordinates requested func (m *ArrayMatrix) Set(i, j uint64) error { if i > m.LastIndex || j > m.LastIndex { return ErrOutOfBounds } mask := One << (j << m.MType & WordSizeMinusOne) m.Words[m.GetWordIndex(i, j)] |= mask return nil } func (m *ArrayMatrix) Unset(i, j uint64) error { if i > m.LastIndex || j > m.LastIndex { return ErrOutOfBounds } mask := One << (j << m.MType & WordSizeMinusOne) m.Words[m.GetWordIndex(i, j)] &= ^mask return nil } func (m *ArrayMatrix) SetBit(i, j, k uint64) error { if i > m.LastIndex || j > m.LastIndex || k >= m.cellsize { return ErrOutOfBounds } mask := One << k << (j << m.MType & WordSizeMinusOne) m.Words[m.GetWordIndex(i, j)] |= mask return nil } func (m *ArrayMatrix) Replace(i, j, k uint64) error { if i > m.LastIndex || j > m.LastIndex { return ErrOutOfBounds } offset := (j << m.MType & WordSizeMinusOne) newcell := k << offset mask := m.cellmask << offset word := m.Words[m.GetWordIndex(i, j)] m.Words[m.GetWordIndex(i, j)] ^= (word ^ newcell) & mask return nil } func (m *ArrayMatrix) Clear(i, j uint64) error { return m.Replace(i, j, 0) } func (m *ArrayMatrix) UnsetBit(i, j, k uint64) error { if i > m.LastIndex || j > m.LastIndex || k >= m.cellsize { return ErrOutOfBounds } offset := (j << m.MType & WordSizeMinusOne) mask := One << k << offset cell := (m.cellmask << offset) & ^mask m.Words[m.GetWordIndex(i, j)] &= cell return nil } // Get gets the cell at the coordinates requested func (m *ArrayMatrix) Get(i, j uint64) (uint64, error) { if i > m.LastIndex || j > m.LastIndex { return 0, ErrOutOfBounds } word := m.Words[m.GetWordIndex(i, j)] result := word >> (j << m.MType & WordSizeMinusOne) & m.cellmask return result, nil } func (m *ArrayMatrix) GetRow(i uint64) ([]uint64, error) { if i > m.LastIndex { return nil, ErrOutOfBounds } row := make([]uint64, m.WordsPerRow) idx := i * m.WordsPerRow copy(row, m.Words[idx:idx+m.WordsPerRow]) return row, nil } func (m *ArrayMatrix) SetRow(i uint64, row []uint64) error { if i > m.LastIndex || uint64(len(row)) != m.WordsPerRow { return ErrOutOfBounds } idx := i * m.WordsPerRow copy(m.Words[idx:idx+m.WordsPerRow], row) return nil } func (m *ArrayMatrix) Swap(i0, j0, i1, j1 uint64) error { if i0 > m.LastIndex || j0 > m.LastIndex || i1 > m.LastIndex || j1 > m.LastIndex { return ErrOutOfBounds } idx0 := m.GetWordIndex(i0, j0) idx1 := m.GetWordIndex(i1, j1) pos0 := j0 << m.MType & WordSizeMinusOne pos1 := j1 << m.MType & WordSizeMinusOne mask := ((m.Words[idx0] >> pos0) ^ (m.Words[idx1] >> pos1)) & m.cellmask m.Words[idx0] ^= (mask << pos0) m.Words[idx1] ^= (mask << pos1) return nil } func (m *ArrayMatrix) ReverseRow(i uint64) error { if i > m.LastIndex { return ErrOutOfBounds } row, _ := m.GetRow(i) n := make([]uint64, len(row)) for i, v := range row { mask := ^uint64(0) s := WordSize >> 1 for s >= m.cellsize { mask ^= (mask << s) v = ((v >> s) & mask) | ((v << s) & ^mask) s >>= 1 } n[m.WordsPerRow-1-uint64(i)] = v } offset := m.Size << m.MType & WordSizeMinusOne if offset > 0 { // Need to shift all the bits, since not evenly divisible by 64 var j uint64 mask := uint64(1) for t := uint64(0); t < offset; t++ { mask = (mask << 1) | 1 } current := n[0] >> (WordSize - offset) & mask for j = 1; j < m.WordsPerRow; j++ { next := n[j] current |= next << offset n[j-1] = current current = next >> (WordSize - offset) & mask } n[m.WordsPerRow-1] = current } return m.SetRow(i, n) } // Transpose returns a view of the matrix with the axes transposed func (m *ArrayMatrix) Transpose() Matrix { return &TransposedArrayMatrix{m} } func (m *ArrayMatrix) Copy() Matrix { n := &ArrayMatrix{ Size: m.Size, LastIndex: m.LastIndex, MType: m.MType, WordsPerRow: m.WordsPerRow, cellmask: m.cellmask, cellsize: m.cellsize, } n.Words = make([]uint64, len(m.Words)) copy(n.Words, m.Words) return n } func logWord(s string, m MatrixType, i uint64) { fmt.Println(s) fmt.Printf(" %s", spaceformat(i, 1<<m)) fmt.Printf("\n") } func logRow(s string, m MatrixType, r []uint64) { fmt.Println(s) for _, i := range r { fmt.Printf(" %s", spaceformat(i, 1<<m)) } fmt.Printf("\n") } func spaceformat(n, m uint64) string { in := fmt.Sprintf("%064b", n) out := make([]byte, len(in)+(len(in)-2+int(in[0]/'0'))/4) var i, j, k uint64 for i, j, k = uint64(len(in)-1), uint64(len(out)-1), 0; ; i, j = i-1, j-1 { out[j] = in[i] if i == 0 { return string(out) } if k++; k == m { j, k = j-1, 0 out[j] = ' ' } } }
matrix.go
0.686055
0.571826
matrix.go
starcoder
package stats import ( "fmt" "github.com/onsi/gomega/types" ) type StatItem struct { Name string `json:"name"` Value interface{} `json:"value"` } type Stats struct { Stats []StatItem `json:"stats"` } func BeEqual(expected interface{}) types.GomegaMatcher { return &statMatcher{ expected: expected, predicate: &equalPredicate, } } func BeGreaterThanZero() types.GomegaMatcher { return &statMatcher{ predicate: &greaterThanZeroPredicate, } } func BeEqualZero() types.GomegaMatcher { return &statMatcher{ predicate: &equalZeroPredicate, } } var equalZero = func(stat StatItem) bool { return stat.Value.(float64) == 0 } var equalZeroPredicate = func(interface{}) func(item StatItem) bool { return equalZero } var greaterThanZero = func(stat StatItem) bool { return stat.Value.(float64) > 0 } var greaterThanZeroPredicate = func(interface{}) func(item StatItem) bool { return greaterThanZero } var equalPredicate = func(expected interface{}) func(item StatItem) bool { return func(stat StatItem) bool { return int(stat.Value.(float64)) == expected.(int) } } type statMatcher struct { expected interface{} predicate *func(interface{}) func(StatItem) bool } func (m *statMatcher) Match(actual interface{}) (success bool, err error) { stats, ok := actual.(*Stats) if !ok { return false, fmt.Errorf("BeEqual matcher expects a Stats") } if len(stats.Stats) == 0 { return false, fmt.Errorf("no stat found: %+q", stats) } if len(stats.Stats) > 1 { return false, fmt.Errorf("actual stats have more items than 1: %+q", stats) } return (*m.predicate)(m.expected)(stats.Stats[0]), nil } func (m *statMatcher) genFailureMessage(toBeOrNotToBe string, actual interface{}) (message string) { actualStats := actual.(*Stats) actualStat := actualStats.Stats[0] var expectation string switch m.predicate { case &equalPredicate: expectation = fmt.Sprintf("%s: %v %s equal %v", toBeOrNotToBe, actualStat.Name, actualStat.Value, m.expected) case &equalZeroPredicate: expectation = fmt.Sprintf("%s: %v %s equal 0", toBeOrNotToBe, actualStat.Name, actualStat.Value) case &greaterThanZeroPredicate: expectation = fmt.Sprintf("%s: %v %s greater than 0", actualStat.Name, actualStat.Value, toBeOrNotToBe) default: panic("unknown predicate") } return fmt.Sprintf("Expected %s", expectation) } func (m *statMatcher) FailureMessage(actual interface{}) (message string) { return m.genFailureMessage("to be", actual) } func (m *statMatcher) NegatedFailureMessage(actual interface{}) (message string) { return m.genFailureMessage("not to be", actual) }
test/framework/envoy_admin/stats/stats.go
0.783368
0.446314
stats.go
starcoder
package embedding import ( "bufio" "io" "strconv" "strings" "github.com/pkg/errors" "github.com/wujunfeng1/wego/pkg/embedding/embutil" ) type Embedding struct { Word string Dim int Vector []float64 Norm float64 } func (e Embedding) Validate() error { if e.Word == "" { return errors.New("Word is empty") } else if e.Dim == 0 || len(e.Vector) == 0 { return errors.Errorf("Dim of %s is zero", e.Word) } else if e.Dim != len(e.Vector) { return errors.Errorf("Dim and length of Vector must be same, Dim=%d, len(Vec)=%d", e.Dim, len(e.Vector)) } return nil } type Embeddings []Embedding func (embs Embeddings) Empty() bool { return len(embs) == 0 } func (embs Embeddings) Find(word string) (Embedding, bool) { for _, emb := range embs { if word == emb.Word { return emb, true } } return Embedding{}, false } func (embs Embeddings) Validate() error { if len(embs) > 0 { dim := embs[0].Dim for _, emb := range embs { if dim != emb.Dim { return errors.Errorf("dimension for all vectors must be the same: %d but got %d", dim, emb.Dim) } } } return nil } func Load(r io.Reader) (Embeddings, error) { var embs Embeddings if err := parse(r, func(emb Embedding) error { if err := emb.Validate(); err != nil { return err } embs = append(embs, emb) return nil }); err != nil { return nil, err } return embs, nil } func parse(r io.Reader, op func(Embedding) error) error { s := bufio.NewScanner(r) for s.Scan() { line := s.Text() if strings.HasPrefix(line, " ") { continue } emb, err := parseLine(line) if err != nil { return err } if err := op(emb); err != nil { return err } } if err := s.Err(); err != nil && err != io.EOF { return errors.Wrapf(err, "failed to scan") } return nil } func parseLine(line string) (Embedding, error) { slice := strings.Fields(line) if len(slice) < 2 { return Embedding{}, errors.New("Must be over 2 lenghth for word and vector elems") } word := slice[0] vector := slice[1:] dim := len(vector) vec := make([]float64, dim) for k, elem := range vector { val, err := strconv.ParseFloat(elem, 64) if err != nil { return Embedding{}, err } vec[k] = val } return Embedding{ Word: word, Dim: dim, Vector: vec, Norm: embutil.Norm(vec), }, nil }
pkg/embedding/embedding.go
0.66072
0.410047
embedding.go
starcoder
package parse import ( "go/ast" "strings" "github.com/ardnew/gosh/cmd/goshfun/util" ) // Argument represents an individual argument variable in the list of argument // variables of an individual function definition. type Argument struct { Name string Ref []Reference Type string } // NewArgument creates a new Argument by inspecting the parsed AST field. func NewArgument(field *ast.Field) *Argument { return (&Argument{ Name: "", Ref: []Reference{}, Type: "", }).Parse(field.Type) } // Parse constructs an Argument by traversing the AST construction. func (arg *Argument) Parse(expr ast.Expr) *Argument { switch t := expr.(type) { case *ast.Ident: arg.Type = t.Name return arg // base case; we stop recursion once we reach the type name. case *ast.ArrayType: arg.Ref = append(arg.Ref, RefArray) return arg.Parse(t.Elt) case *ast.Ellipsis: arg.Ref = append(arg.Ref, RefEllipses) return arg.Parse(t.Elt) case *ast.StarExpr: arg.Ref = append(arg.Ref, RefPointer) return arg.Parse(t.X) } // shouldn't reach here unless the Expr doesn't have an identifying type, // (which I believe is always a syntax error in Go), or we encountered an // unrecognized expression and is not currently supported. in either case, // this is interpreted as an error, and we cannot use this function. return nil } func (arg *Argument) String() string { return arg.ProtoSh() } // IsListRef returns whether or not the reference at index ri is one of the list // types (array or ellipses). func (arg *Argument) IsListRef(ri int) bool { return nil != arg && ri < len(arg.Ref) && (RefArray == arg.Ref[ri] || RefEllipses == arg.Ref[ri]) } // ProtoGo returns the signature used for this Argument value for the Go // interface. func (arg *Argument) ProtoGo() string { var sb strings.Builder if arg.Name != "" { sb.WriteString(arg.Name) sb.WriteRune(' ') } for _, ref := range arg.Ref { sb.WriteString(ref.Symbol()) } sb.WriteString(arg.Type) return sb.String() } // ProtoSh returns the signature used for this Argument value for the shell // interface. func (arg *Argument) ProtoSh() string { var sb strings.Builder for _, ref := range arg.Ref { switch ref { case RefArray, RefEllipses: sb.WriteString(RefEllipses.Symbol()) break } } if arg.Name != "" { sb.WriteString(arg.Name) } else { sb.WriteString(arg.Type) } return sb.String() } // Prototype returns the signature used for this Argument value for either the // shell interface or the Go interface. func (arg *Argument) Prototype(sh bool) string { if sh { return arg.ProtoSh() } return arg.ProtoGo() } // Declaration returns a representation of the type of this argument that can be // attached to a local variable identifier. func (arg *Argument) Declaration() string { var a string var hasList bool for _, ref := range arg.Ref { switch ref { case RefArray, RefEllipses: if hasList { // currently do not support list indirection break } a = a + RefArray.Symbol() hasList = true case RefPointer: a = a + ref.Symbol() } } return a + arg.Type } // Expression returns a string representation of the receiver arg suitable for // passing on as argument in a function call. func (arg *Argument) Expression() string { s := arg.Name // we only support a single level of indirection or enumeration... if len(arg.Ref) > 0 && RefEllipses == arg.Ref[0] { s += RefEllipses.Symbol() } return s } // Parser returns a slice of Go source code lines defining an anonymous function // that will parse a string into a variable whose type is identified by the // receiver arg's Type. func (arg *Argument) Parser(aName, pName, iName string, argPos, numArgs, reqArgs int) []string { ln, fn := util.Newliner{}, util.Newliner{} eName := aName + "Err" fName := "parse" + strings.Title(aName) iWidth := util.NumDigits(numArgs) ln.Addf("// -------------------------------------") ln.Addf("// %*d | %s -> %s", iWidth, argPos, aName, arg.Declaration()) ln.Addf("// -------------------------------------") ln.Addf("var %s %s", aName, arg.Declaration()) ln.Addf("var %s error", eName) ln.Addf("%s := func(input string) (%s, error) {", fName, arg.Type) switch arg.Type { case "rune": fn.Add("if len(input) > 0 {") fn.Add("\tr, _ := utf8.DecodeRuneInString(input)") fn.Add("\tif utf8.RuneError != r {") fn.Add("\t\treturn r, nil") fn.Add("\t}") fn.Add("\treturn utf8.RuneError, fmt.Errorf(\"invalid UTF-8 encoding: %s\", input)") fn.Add("}") fn.Add("return utf8.RuneError, fmt.Errorf(\"empty string (0 bytes)\")") case "string": // no conversion necessary fn.Add("return input, nil") case "error": fn.Add("return fmt.Errorf(\"%s\", input), nil") case "bool": fn.Add("b, err := strconv.ParseBool(input)") fn.Add("if nil != err {") fn.Add("\treturn false, err") fn.Add("}") fn.Add("return b, nil") case "byte": fn.Add("u, err := strconv.ParseUint(input, 0, 8)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return byte(u), nil") case "int": fn.Add("d, err := strconv.ParseInt(input, 0, strconv.IntSize)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return int(d), nil") case "int8": fn.Add("d, err := strconv.ParseInt(input, 0, 8)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return int8(d), nil") case "int16": fn.Add("d, err := strconv.ParseInt(input, 0, 16)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return int16(d), nil") case "int32": fn.Add("d, err := strconv.ParseInt(input, 0, 32)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return int32(d), nil") case "int64": fn.Add("d, err := strconv.ParseInt(input, 0, 64)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return int64(d), nil") case "uint": fn.Add("u, err := strconv.ParseUint(input, 0, strconv.IntSize)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return uint(u), nil") case "uint8": fn.Add("u, err := strconv.ParseUint(input, 0, 8)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return uint8(u), nil") case "uint16": fn.Add("u, err := strconv.ParseUint(input, 0, 16)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return uint16(u), nil") case "uint32": fn.Add("u, err := strconv.ParseUint(input, 0, 32)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return uint32(u), nil") case "uint64": fn.Add("u, err := strconv.ParseUint(input, 0, 64)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return uint64(u), nil") case "uintptr": fn.Add("u, err := strconv.ParseUint(input, 0, strconv.IntSize)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return uintptr(u), nil") case "float32": fn.Add("f, err := strconv.ParseFloat(input, 32)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return float32(f), nil") case "float64": fn.Add("f, err := strconv.ParseFloat(input, 64)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return float64(f), nil") case "complex64": fn.Add("i, err := strconv.ParseComplex(input, 64)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return complex64(i), nil") case "complex128": fn.Add("i, err := strconv.ParseComplex(input, 128)") fn.Add("if nil != err {") fn.Add("\treturn 0, err") fn.Add("}") fn.Add("return complex128(i), nil") default: fn.Add("return nil, nil") } for _, s := range fn { ln.Addf("\t%s", s) } ln.Add("}") if arg.IsListRef(0) { ln.Addf("%s = make(%s, len(%s)-%d)", aName, arg.Declaration(), pName, reqArgs) ln.Addf("for i := 0; i < len(%s)-%d; i++ {", pName, reqArgs) ln.Addf("\t%s[i], %s = %s(%s[%s])", aName, eName, fName, pName, iName) ln.Addf("\tif nil != %s {", eName) ln.Add("\t\tbreak") ln.Add("\t}") ln.Addf("\t%s++", iName) ln.Add("}") } else { ln.Addf("%s, %s = %s(%s[%s])", aName, eName, fName, pName, iName) ln.Addf("%s++", iName) } ln.Addf("if nil != %s {", eName) ln.Addf("\treturn nil, %s", eName) ln.Add("}") return ln }
cmd/goshfun/parse/argument.go
0.70619
0.468487
argument.go
starcoder
package fractal import ( "image" "image/color" "math/rand" "time" ) // Fractal is a general fractal representation. Have information about: // iterations, zoom value and centering point. type Fractal struct { Src *image.RGBA // Image to write fractal on. Iter float64 // Number of iterations to perform. Center complex128 // Point to center the fractal on. Zoom float64 // Zoom value. } // Gradient is a list of colors. type Gradient []color.Color var ( // PedagogicalGradient have a fixed transformation between colors for easier // visualization of divergence.s PedagogicalGradient = Gradient{ color.RGBA{0, 0, 0, 0xff}, // Black. color.RGBA{0xff, 0xf0, 0, 0xff}, // Yellow. color.RGBA{0, 0, 0xff, 0xff}, // Blue. color.RGBA{0, 0xff, 0, 0xff}, // Green. color.RGBA{0xff, 0, 0, 0xff}, // Red. } ) // NewRandomGradient creates a gradient of colors proportional to the number of // iterations. func NewRandomGradient(iterations float64) Gradient { seed := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) grad := make(Gradient, int64(iterations)) for n := range grad { grad[n] = randomColor(seed) } return grad } // randomColor returns a random RGB color from a random seed. func randomColor(seed *rand.Rand) color.RGBA { return color.RGBA{ uint8(seed.Intn(255)), uint8(seed.Intn(255)), uint8(seed.Intn(255)), 0xff} // No alpha. } // NewPrettyGradient creates a gradient of colors fading between purple and // white. The smoothness is proportional to the number of iterations func NewPrettyGradient(iterations float64) Gradient { grad := make(Gradient, int64(iterations)) var col color.Color for n := range grad { // val ranges from [0..255] val := uint8(float64(n) / float64(iterations) * 255) if int64(n) < int64(iterations/2) { col = color.RGBA{val * 2, 0x00, val * 2, 0xff} // Shade of purple. } else { col = color.RGBA{val, val, val, 0xff} // Shade of white. } grad[n] = col } return grad } // DivergenceToColor returns a color depending on the number of iterations it // took for the fractal to escape the fractal set. func (g Gradient) DivergenceToColor(escapedIn int) color.Color { return g[escapedIn%len(g)] } const ( Modulo int = iota IterationCount ) func (g *Gradient) Get(i, it, mode int) (float64, float64, float64) { switch mode { case Modulo: return g.modulo(i) case IterationCount: return g.iteration(i, it) default: return g.modulo(i) } } func (grad *Gradient) modulo(i int) (float64, float64, float64) { if i >= len(*grad) { i %= len(*grad) } r, g, b, _ := (*grad)[i].RGBA() return float64(r>>8) / 256, float64(g>>8) / 256, float64(b>>8) / 256 } var Keys = []int{ 0, 0, 0, } func (grad *Gradient) iteration(i, it int) (float64, float64, float64) { ranges := []float64{ 0.05, 0.15, 0.25, } key := 0 for rId := len(ranges) - 1; rId >= 0; rId-- { if float64(i)/float64(it) >= ranges[rId] { key = rId break } } Keys[key] += i r, g, b, _ := (*grad)[key].RGBA() return float64(r>>8) / 256, float64(g>>8) / 256, float64(b>>8) / 256 } func (g *Gradient) AddColor(c color.Color) { (*g) = append((*g), c) }
fractal/fractal.go
0.739234
0.483892
fractal.go
starcoder
package leetcode /* Given a collection of intervals, merge all overlapping intervals. Example 1: Input: intervals = [[1,3],[2,6],[8,10],[15,18]] Output: [[1,6],[8,10],[15,18]] Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6]. Example 2: Input: intervals = [[1,4],[4,5]] Output: [[1,5]] Explanation: Intervals [1,4] and [4,5] are considered overlapping. NOTE: input types have been changed on April 15, 2019. Please reset to default code definition to get new method signature. Constraints: intervals[i][0] <= intervals[i][1] */ func merge(intervals [][]int) [][]int { if len(intervals) < 2 { return intervals } // 首先根据每个区间的起始位置的值进行升序排序 miQuickSort(intervals, 0, len(intervals)-1) var index int for i := 1; i < len(intervals); i++ { // 比较相邻两个区间,如果前区间的结束值大于等于后区间的起始值 // 则说明这两个区间有重合 // 区间合并:选出这两个重合区间起始值的最小值和结束值的最大值 // 这两个值就是合并后区间的起始值和结束值 // 然后用这个合并后的区间再和下一个区间进行重合判断 if intervals[index][1] >= intervals[i][0] { mi := min(intervals[index][0], intervals[i][0]) ma := max(intervals[index][1], intervals[i][1]) // 合并后的区间覆盖合并前的区间,直接复用传入的intervals的内存 intervals[index] = []int{mi, ma} } else { index++ // 没有重合 intervals[index] = intervals[i] } } return intervals[:index+1] } func min(a, b int) int { if a <= b { return a } return b } func max(a, b int) int { if a >= b { return a } return b } func miQuickSort(sli [][]int, lo, hi int) { if lo >= hi { return } p := miPartition(sli, lo, hi) miQuickSort(sli, lo, p-1) miQuickSort(sli, p+1, hi) } func miPartition(sli [][]int, lo, hi int) int { povit := sli[hi][0] index := lo for i := lo; i < hi; i++ { if sli[i][0] < povit { if i != index { sli[i], sli[index] = sli[index], sli[i] } index++ } } sli[index], sli[hi] = sli[hi], sli[index] return index } /* func main() { //intervals := [][]int{{0, 3}, {0, 1}, {0, 2}, {1, 9}, {2, 5}, {10, 11}, {12, 20}, {19, 20}} intervals := [][]int{{1, 9}, {2, 5}, {19, 20}, {10, 11}, {12, 20}, {0, 3}, {0, 1}, {0, 2}} nintervals := merge(intervals) for i := range nintervals { fmt.Println(nintervals[i][0], intervals[i][1]) } } */
go-impl/056-MergeIntervals.go
0.599485
0.473779
056-MergeIntervals.go
starcoder
package particles // IDOrder is an interface for mapping paritcles IDs to their 3D index into the // simulation grid. This interface also supports mutli-reoslution simulations // which are split up into "levels" of fixed resolution. type IDOrder interface { // IDToIndex converts an ID to its 3-index equivalent in the level's grid. // It also returns the resolution level of the particle. IDToIndex(id uint64) (idx [3]int, level int) // IndexToID converts a 3-index within a level to its ID. IndexToID(i [3]int, level int) uint64 // Levels returns the number of levels that this system of IDs uses. Levels() int // LevelOrigin returns a 3-index to the origin of a given level in units of // that resolution level. LevelOrigin(level int) [3]int // LevelSpan returns the 3-index representing the span of a given level in // units of that resolution level. LevelSpan(level int) [3]int // Ntot returns the total number of particles spanned by IDOrder. NTot() int64 } // Type assertions var ( _ IDOrder = &ZMajorUnigrid{ } ) // ZMajorUnigrid is the IDOrder of a z-major uniform-mass grid. This is the // ordering used by, e.g., 2LPTic and many other codes. See the IDOrder // interface for documentation of the methods. type ZMajorUnigrid struct { n int n64 uint64 } // NewZMajorUnigrid returns a z-major uniform density grid with width n on each // side. func NewZMajorUnigrid(n int) *ZMajorUnigrid { return &ZMajorUnigrid{ n, uint64(n) } } func (g *ZMajorUnigrid) IDToIndex(id uint64) (idx [3]int, level int) { return [3]int{ int(id / (g.n64 * g.n64)), int((id / g.n64) % g.n64), int(id % g.n64), }, 0 } func (g *ZMajorUnigrid) IDToLevel(id uint64) int { return 0 } func (g *ZMajorUnigrid) IndexToID(i [3]int, level int) uint64 { return uint64(i[2]) + uint64(i[1])*g.n64 + uint64(i[0])*g.n64*g.n64 } func (g *ZMajorUnigrid) Levels() int { return 1 } func (g *ZMajorUnigrid) LevelOrigin(level int) [3]int { return [3]int{ 0, 0, 0 } } func (g *ZMajorUnigrid) LevelSpan(level int) [3]int { return [3]int{ g.n, g.n, g.n } } func (g *ZMajorUnigrid) NTot() int64 { return int64(g.n64*g.n64*g.n64) }
lib/particles/id_order.go
0.780119
0.667771
id_order.go
starcoder
package main import ( "github.com/faiface/pixel" "github.com/faiface/pixel/pixelgl" "github.com/zergon321/cirno" ) type player struct { speed float64 jumpAcceleration float64 verticalSpeed float64 terminalSpeed float64 aim cirno.Vector bulletSpeed float64 bulletSprite *pixel.Sprite spawnedBullets []*electroBullet rect *cirno.Rectangle sprite *pixel.Sprite animation []*pixel.Sprite transform pixel.Matrix dead bool } func (p *player) update(win *pixelgl.Window, space *cirno.Space, deltaTime float64) error { movement := cirno.Zero() // Read movement inputs to change aim // and animation. if win.Pressed(pixelgl.KeyLeft) { movement.X-- p.aim = cirno.Left() p.sprite = p.animation[1] } if win.Pressed(pixelgl.KeyRight) { movement.X++ p.aim = cirno.Right() p.sprite = p.animation[0] } // If player shoots a bullet. if win.JustPressed(pixelgl.KeyZ) { bulletPos := p.rect.Center(). Add(p.aim.MultiplyByScalar(p.rect.Width() / 4)). Add(p.aim.MultiplyByScalar(p.bulletSprite.Frame().W() / 4)) bulletHitCircle, err := cirno.NewCircle(bulletPos, p.bulletSprite.Frame().W()/4) if err != nil { return err } bullet := &electroBullet{ spawner: p, hitCircle: bulletHitCircle, sprite: p.bulletSprite, direction: p.aim, speed: p.bulletSpeed, transform: pixel.IM.Moved(cirnoToPixel(bulletPos)), } bullet.transform = bullet.transform.Scaled(cirnoToPixel(bulletPos), 0.5) bullet.hitCircle.SetData(bullet) bullet.hitCircle.SetIdentity(electroBulletID) bullet.hitCircle.SetMask(beholderEyeID) err = space.Add(bullet.hitCircle) if err != nil { return err } p.spawnedBullets = append(p.spawnedBullets, bullet) } // Find out if player is grounded. leftRayOrigin := cirno.NewVector(p.rect.Center().X-p.rect.Width()/2, p.rect.Center().Y) rightRayOrigin := cirno.NewVector(p.rect.Center().X+p.rect.Width()/2, p.rect.Center().Y) leftShape, _, err := space.Raycast(leftRayOrigin, cirno.Down(), p.rect.Height()/2+4, p.rect.GetMask()) if err != nil { return err } rightShape, _, err := space.Raycast(rightRayOrigin, cirno.Down(), p.rect.Height()/2+4, p.rect.GetMask()) if err != nil { return err } grounded := leftShape != nil || rightShape != nil // Compute vertical speed. if grounded { if win.JustPressed(pixelgl.KeyUp) { p.verticalSpeed = p.jumpAcceleration } else { p.verticalSpeed = 0 } } else { p.verticalSpeed += gravity * deltaTime if p.verticalSpeed < p.terminalSpeed*deltaTime { p.verticalSpeed = p.terminalSpeed * deltaTime } } // Adjust movement with framerate. movement.X *= p.speed * deltaTime movement.Y = p.verticalSpeed if movement != cirno.Zero() { // Update player sprite. if movement.X > 0 { p.sprite = p.animation[1] } else if movement.X < 0 { p.sprite = p.animation[0] } shapes, err := space.WouldBeCollidedBy(p.rect, movement, 0) if err != nil { return err } // Resolve collision. if len(shapes) > 0 { pos, _, _, err := cirno.Approximate(p.rect, movement, 0, shapes, intensity, space.UseTags()) if err != nil { return err } movement = pos.Subtract(p.rect.Center()) } // Move sprite and hitbox. prev := p.rect.Center() p.rect.Move(movement) space.AdjustShapePosition(p.rect) p.transform = p.transform.Moved(cirnoToPixel(p.rect.Center().Subtract(prev))) _, err = space.Update(p.rect) if err != nil { return err } } // Check collision with bullets. shapes, err := space.CollidingWith(p.rect) if err != nil { return err } bulletShapes := shapes.FilterByIdentity(bloodBulletID) // If a bullet or more hit the player. if len(bulletShapes) > 0 { p.dead = true // Remove all the bullets that hit the player. for bulletShape := range bulletShapes { bullet := bulletShape.Data().(*bloodBullet) ind := -1 for i := range bullet.spawner.spawnedBullets { if bullet.spawner.spawnedBullets[i] == bullet { ind = i break } } bullet.spawner.spawnedBullets = append(bullet.spawner.spawnedBullets[:ind], bullet.spawner.spawnedBullets[ind+1:]...) err = space.Remove(bullet.hitLine) if err != nil { return err } } // Remove player's hitbox. err = space.Remove(p.rect) if err != nil { return err } } return nil } func (p *player) draw(target pixel.Target) { p.sprite.Draw(target, p.transform) }
examples/platformer/player.go
0.652131
0.461502
player.go
starcoder
package math import ( "github.com/jtejido/ggsl/specfunc" gomath "math" ) // Lower Incomplete Gamma func Ligamma(a, z float64) float64 { return gomath.Pow(z, a) * specfunc.Hyperg_1F1(a, a+1, -z) / a } // Inverse of the upper incomplete Gamma function func InverseRegularizedUpperIncompleteGamma(a, p float64) float64 { if gomath.IsNaN(a) || gomath.IsNaN(p) { return gomath.NaN() } if p < 0 || p > 1 || a <= 0 { panic("out of bounds") } x0 := gomath.MaxFloat64 yl := 0.0 x1 := 0.0 yh := 1.0 dithresh := 5.0 * machEp if p == 0 { return gomath.Inf(1) } if p == 1 { return 0.0 } // IgamC(a, x) - p = 0 d := 1.0 / (9.0 * a) y := 1.0 - d - Ndtri(p)*gomath.Sqrt(d) x := a * y * y * y lgm := specfunc.Lngamma(a) for i := 0; i < 10; i++ { if x > x0 || x < x1 { break } y = UpperIncompleteGamma(a, x) if y < yl || y > yh { break } if y < p { x0 = x yl = y } else { x1 = x yh = y } // Compute the derivative of the function at this point d = (a-1)*gomath.Log(x) - x - lgm if d < -maxLog { break } d = -gomath.Exp(d) // Compute the step to the next approximation of x d = (y - p) / d if gomath.Abs(d/x) < machEp { return x } x = x - d } d = 0.0625 if x0 == gomath.MaxFloat64 { if x <= 0 { x = 1 } for x0 == gomath.MaxFloat64 { x = (1 + d) * x y = UpperIncompleteGamma(a, x) if y < p { x0 = x yl = y break } d = d + d } } d = 0.5 dir := 0 for i := 0; i < 400; i++ { x = x1 + d*(x0-x1) y = UpperIncompleteGamma(a, x) lgm = (x0 - x1) / (x1 + x0) if gomath.Abs(lgm) < dithresh { break } lgm = (y - p) / p if gomath.Abs(lgm) < dithresh { break } if x <= 0 { break } if y >= p { x1 = x yh = y if dir < 0 { dir = 0 d = 0.5 } else if dir > 1 { d = 0.5*d + 0.5 } else { d = (p - yl) / (yh - yl) } dir++ } else { x0 = x yl = y if dir > 0 { dir = 0 d = 0.5 } else if dir < -1 { d = 0.5 * d } else { d = (p - yl) / (yh - yl) } dir-- } } return x } // Inverse of the lower incomplete Gamma function func InverseRegularizedLowerIncompleteGamma(a, y0 float64) float64 { if gomath.IsNaN(a) || gomath.IsNaN(y0) { return gomath.NaN() } if y0 < 0 || y0 > 1 || a <= 0 { panic("out of bounds") } xUpper := gomath.MaxFloat64 xLower := 0. yUpper := 1. yLower := 0. dithresh := 5.0 * machEp if y0 == 0. { return 0 } if y0 == 1 { return gomath.Inf(1) } y0 = 1 - y0 // Initial Guess d := 1 / (9 * a) y := 1 - d - (0.98 * gomath.Sqrt2 * gomath.Erfinv((2.0*y0)-1.0) * gomath.Sqrt(d)) x := a * y * y * y lgm := specfunc.Lngamma(a) for i := 0; i < 20; i++ { if x < xLower || x > xUpper { d = 0.0625 break } y = 1 - LowerIncompleteGamma(a, x) if y < yLower || y > yUpper { d = 0.0625 break } if y < y0 { xUpper = x yLower = y } else { xLower = x yUpper = y } d = ((a - 1) * gomath.Log(x)) - x - lgm if d < -709.78271289338399 { d = 0.0625 break } d = -gomath.Exp(d) d = (y - y0) / d if gomath.Abs(d/x) < machEp { return x } if (d > (x / 4)) && (y0 < 0.05) { // Naive heuristics for cases near the singularity d = x / 10 } x -= d } if xUpper == gomath.MaxFloat64 { if x <= 0 { x = 1 } for xUpper == gomath.MaxFloat64 { x = (1 + d) * x y = 1 - LowerIncompleteGamma(a, x) if y < y0 { xUpper = x yLower = y break } d = d + d } } dir := 0 d = 0.5 for i := 0; i < 400; i++ { x = xLower + (d * (xUpper - xLower)) y = 1 - LowerIncompleteGamma(a, x) lgm = (xUpper - xLower) / (xLower + xUpper) if gomath.Abs(lgm) < dithresh { return x } lgm = (y - y0) / y0 if gomath.Abs(lgm) < dithresh { return x } if x <= 0 { return 0 } if y >= y0 { xLower = x yUpper = y if dir < 0 { dir = 0 d = 0.5 } else { if dir > 1 { d = (0.5 * d) + 0.5 } else { d = (y0 - yLower) / (yUpper - yLower) } } dir = dir + 1 } else { xUpper = x yLower = y if dir > 0 { dir = 0 d = 0.5 } else { if dir < -1 { d = 0.5 * d } else { d = (y0 - yLower) / (yUpper - yLower) } } dir = dir - 1 } } return x } func InverseRegularizedIncompleteBeta(aa, bb, yy0 float64) float64 { var a, b, y0, d, y, x, x0, x1, lgm, yp, di, dithresh, yl, yh, xt float64 var i, rflg, dir, nflg int i = 0 if yy0 <= 0 { return 0 } if yy0 >= 1.0 { return 1 } x0 = 0.0 yl = 0.0 x1 = 1.0 yh = 1.0 nflg = 0 if aa <= 1.0 || bb <= 1.0 { dithresh = 1.0e-6 rflg = 0 a = aa b = bb y0 = yy0 x = a / (a + b) y = specfunc.Beta_inc(a, b, x) goto ihalve } else { dithresh = 1.0e-4 } // Approximation to inverse function yp = -Ndtri(yy0) if yy0 > 0.5 { rflg = 1 a = bb b = aa y0 = 1.0 - yy0 yp = -yp } else { rflg = 0 a = aa b = bb y0 = yy0 } lgm = (yp*yp - 3.0) / 6.0 x = 2.0 / (1.0/(2.0*a-1.0) + 1.0/(2.0*b-1.0)) d = yp*gomath.Sqrt(x+lgm)/x - (1.0/(2.0*b-1.0)-1.0/(2.0*a-1.0))*(lgm+5.0/6.0-2.0/(3.0*x)) d = 2.0 * d if d < minLog { // mtherr("incbi", UNDERFLOW) x = 0 goto done } x = a / (a + b*gomath.Exp(d)) y = specfunc.Beta_inc(a, b, x) yp = (y - y0) / y0 if gomath.Abs(yp) < 0.2 { goto newt } /* Resort to interval halving if not close enough. */ ihalve: dir = 0 di = 0.5 for i = 0; i < 100; i++ { if i != 0 { x = x0 + di*(x1-x0) if x == 1.0 { x = 1.0 - machEp } if x == 0.0 { di = 0.5 x = x0 + di*(x1-x0) if x == 0.0 { // mtherr("incbi", UNDERFLOW) goto done } } y = specfunc.Beta_inc(a, b, x) yp = (x1 - x0) / (x1 + x0) if gomath.Abs(yp) < dithresh { goto newt } yp = (y - y0) / y0 if gomath.Abs(yp) < dithresh { goto newt } } if y < y0 { x0 = x yl = y if dir < 0 { dir = 0 di = 0.5 } else if dir > 3 { di = 1.0 - (1.0-di)*(1.0-di) } else if dir > 1 { di = 0.5*di + 0.5 } else { di = (y0 - y) / (yh - yl) } dir += 1 if x0 > 0.75 { if rflg == 1 { rflg = 0 a = aa b = bb y0 = yy0 } else { rflg = 1 a = bb b = aa y0 = 1.0 - yy0 } x = 1.0 - x y = specfunc.Beta_inc(a, b, x) x0 = 0.0 yl = 0.0 x1 = 1.0 yh = 1.0 goto ihalve } } else { x1 = x if rflg == 1 && x1 < machEp { x = 0.0 goto done } yh = y if dir > 0 { dir = 0 di = 0.5 } else if dir < -3 { di = di * di } else if dir < -1 { di = 0.5 * di } else { di = (y - y0) / (yh - yl) } dir -= 1 } } // mtherr("incbi", PLOSS) if x0 >= 1.0 { x = 1.0 - machEp goto done } if x <= 0.0 { // mtherr("incbi", UNDERFLOW) x = 0.0 goto done } newt: if nflg > 0 { goto done } nflg = 1 lgm = specfunc.Lngamma(a+b) - specfunc.Lngamma(a) - specfunc.Lngamma(b) for i = 0; i < 8; i++ { /* Compute the function at this point. */ if i != 0 { y = specfunc.Beta_inc(a, b, x) } if y < yl { x = x0 y = yl } else if y > yh { x = x1 y = yh } else if y < y0 { x0 = x yl = y } else { x1 = x yh = y } if x == 1.0 || x == 0.0 { break } /* Compute the derivative of the function at this point. */ d = (a-1.0)*gomath.Log(x) + (b-1.0)*gomath.Log(1.0-x) + lgm if d < minLog { goto done } if d > maxLog { break } d = gomath.Exp(d) /* Compute the step to the next approximation of x. */ d = (y - y0) / d xt = x - d if xt <= x0 { y = (x - x0) / (x1 - x0) xt = x0 + 0.5*y*(x-x0) if xt <= 0.0 { break } } if xt >= x1 { y = (x1 - x) / (x1 - x0) xt = x1 - 0.5*y*(x1-x) if xt >= 1.0 { break } } x = xt if gomath.Abs(d/x) < 128.0*machEp { goto done } } /* Did not converge. */ dithresh = 256.0 * machEp goto ihalve done: if rflg > 0 { if x <= machEp { x = 1.0 - machEp } else { x = 1.0 - x } } return (x) }
math/gamma.go
0.677901
0.592608
gamma.go
starcoder
package assertjson import ( "testing" "github.com/stretchr/testify/assert" ) // IsString asserts that the JSON node has a string value. func (node *AssertNode) IsString(msgAndArgs ...interface{}) { node.t.Helper() if node.exists() { assert.IsType(node.t, "", node.value, msgAndArgs...) } } // EqualToTheString asserts that the JSON node has a string value equals to the given value. func (node *AssertNode) EqualToTheString(expectedValue string, msgAndArgs ...interface{}) { node.t.Helper() if node.exists() { assert.IsType(node.t, "", node.value, msgAndArgs...) assert.Equal(node.t, expectedValue, node.value, msgAndArgs...) } } // Matches asserts that the JSON node has a string value that matches the regular expression. func (node *AssertNode) Matches(regexp string, msgAndArgs ...interface{}) { node.t.Helper() if node.exists() { assert.IsType(node.t, "", node.value, msgAndArgs...) assert.Regexp(node.t, regexp, node.value, msgAndArgs...) } } // DoesNotMatch asserts that the JSON node has a string value that does not match the regular expression. func (node *AssertNode) DoesNotMatch(regexp string, msgAndArgs ...interface{}) { node.t.Helper() if node.exists() { assert.IsType(node.t, "", node.value, msgAndArgs...) assert.NotRegexp(node.t, regexp, node.value, msgAndArgs...) } } // Contains asserts that the JSON node has a string value that contains a string. func (node *AssertNode) Contains(contain string, msgAndArgs ...interface{}) { node.t.Helper() if node.exists() { assert.Contains(node.t, node.value, contain, msgAndArgs...) } } // DoesNotContain asserts that the JSON node has a string value that does not contain a string. func (node *AssertNode) DoesNotContain(contain string, msgAndArgs ...interface{}) { node.t.Helper() if node.exists() { assert.NotContains(node.t, node.value, contain, msgAndArgs...) } } // IsStringWithLength asserts that the JSON node has a string value with length equal to the given value. func (node *AssertNode) IsStringWithLength(length int, msgAndArgs ...interface{}) { node.t.Helper() if node.exists() { assert.IsType(node.t, "", node.value, msgAndArgs...) assert.Equal(node.t, len(node.value.(string)), length, msgAndArgs...) } } // IsStringWithLengthInRange asserts that the JSON node has a string value with length in the given range. func (node *AssertNode) IsStringWithLengthInRange(min int, max int, msgAndArgs ...interface{}) { node.t.Helper() if node.exists() { assert.IsType(node.t, "", node.value, msgAndArgs...) assert.GreaterOrEqual(node.t, len(node.value.(string)), min, msgAndArgs...) assert.LessOrEqual(node.t, len(node.value.(string)), max, msgAndArgs...) } } // AssertString asserts that the JSON node has a string value and it is satisfied by the user function assertFunc. func (node *AssertNode) AssertString(assertFunc func(t testing.TB, value string)) { node.t.Helper() if node.exists() && assert.IsType(node.t, "", node.value) { assertFunc(node.t, node.value.(string)) } }
assertjson/string.go
0.647241
0.737938
string.go
starcoder
package advent import ( . "github.com/davidparks11/advent2021/internal/advent/day8" ) var _ Problem = &sevenSegmentSearch{} type sevenSegmentSearch struct { dailyProblem } func NewSevenSegmentSearch() Problem { return &sevenSegmentSearch{ dailyProblem{ day: 8, }, } } func (s *sevenSegmentSearch) Solve() interface{} { input := s.GetInputLines() var results []int results = append(results, s.uniqueSum(input)) results = append(results, s.outputSum(input)) return results } /* You barely reach the safety of the cave when the whale smashes into the cave mouth, collapsing it. Sensors indicate another exit to this cave at a much greater depth, so you have no choice but to press on. As your submarine slowly makes its way through the cave system, you notice that the four-digit seven-segment displays in your submarine are malfunctioning; they must have been damaged during the escape. You'll be in a lot of trouble without them, so you'd better figure out what's wrong. Each digit of a seven-segment display is rendered by turning on or off any of seven segments named a through g: 0: 1: 2: 3: 4: aaaa .... aaaa aaaa .... b c . c . c . c b c b c . c . c . c b c .... .... dddd dddd dddd e f . f e . . f . f e f . f e . . f . f gggg .... gggg gggg .... 5: 6: 7: 8: 9: aaaa aaaa aaaa aaaa aaaa b . b . . c b c b c b . b . . c b c b c dddd dddd .... dddd dddd . f e f . f e f . f . f e f . f e f . f gggg gggg .... gggg gggg So, to render a 1, only segments c and f would be turned on; the rest would be off. To render a 7, only segments a, c, and f would be turned on. The problem is that the signals which control the segments have been mixed up on each display. The submarine is still trying to display numbers by producing output on signal wires a through g, but those wires are connected to segments randomly. Worse, the wire/segment connections are mixed up separately for each four-digit display! (All of the digits within a display use the same connections, though.) So, you might know that only signal wires b and g are turned on, but that doesn't mean segments b and g are turned on: the only digit that uses two segments is 1, so it must mean segments c and f are meant to be on. With just that information, you still can't tell which wire (b/g) goes to which segment (c/f). For that, you'll need to collect more information. For each display, you watch the changing signals for a while, make a note of all ten unique signal patterns you see, and then write down a single four digit output value (your puzzle input). Using the signal patterns, you should be able to work out which pattern corresponds to which digit. For example, here is what you might see in a single entry in your notes: acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab | cdfeb fcadb cdfeb cdbaf (The entry is wrapped here to two lines so it fits; in your notes, it will all be on a single line.) Each entry consists of ten unique signal patterns, a | delimiter, and finally the four digit output value. Within an entry, the same wire/segment connections are used (but you don't know what the connections actually are). The unique signal patterns correspond to the ten different ways the submarine tries to render a digit using the current wire/segment connections. Because 7 is the only digit that uses three segments, dab in the above example means that to render a 7, signal lines d, a, and b are on. Because 4 is the only digit that uses four segments, eafb means that to render a 4, signal lines e, a, f, and b are on. Using this information, you should be able to work out which combination of signal wires corresponds to each of the ten digits. Then, you can decode the four digit output value. Unfortunately, in the above example, all of the digits in the output value (cdfeb fcadb cdfeb cdbaf) use five segments and are more difficult to deduce. For now, focus on the easy digits. Consider this larger example: be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce Because the digits 1, 4, 7, and 8 each use a unique number of segments, you should be able to tell which combinations of signals correspond to those digits. Counting only digits in the output values (the part after | on each line), in the above example, there are 26 instances of digits that use a unique number of segments (highlighted above). In the output values, how many times do digits 1, 4, 7, or 8 appear? */ func (s *sevenSegmentSearch) uniqueSum(input []string) int { signalPattens := ParseInput(input) distinctSigCount := 0 for _, p := range signalPattens { for _, o := range p.Outputs { if o.OneCount() == 2 || o.OneCount() == 3 || o.OneCount() == 4 || o.OneCount() == 7 { distinctSigCount++ } } } return distinctSigCount } /* Through a little deduction, you should now be able to determine the remaining digits. Consider again the first example above: acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab | cdfeb fcadb cdfeb cdbaf After some careful analysis, the mapping between signal wires and segments only make sense in the following configuration: dddd e a e a ffff g b g b cccc So, the unique signal patterns would correspond to the following digits: acedgfb: 8 cdfbe: 5 gcdfa: 2 fbcad: 3 dab: 7 cefabd: 9 cdfgeb: 6 eafb: 4 cagedb: 0 ab: 1 Then, the four digits of the output value can be decoded: cdfeb: 5 fcadb: 3 cdfeb: 5 cdbaf: 3 Therefore, the output value for this entry is 5353. Following this same process for each entry in the second, larger example above, the output value of each entry can be determined: fdgacbe cefdb cefbgd gcbe: 8394 fcgedb cgb dgebacf gc: 9781 cg cg fdcagb cbg: 1197 efabcd cedba gadfec cb: 9361 gecf egdcabf bgf bfgea: 4873 gebdcfa ecba ca fadegcb: 8418 cefg dcbef fcge gbcadfe: 4548 ed bcgafe cdgba cbgef: 1625 gbdfcae bgc cg cgb: 8717 fgae cfgab fg bagce: 4315 Adding all of the output values in this larger example produces 61229. For each entry, determine all of the wire/segment connections and decode the four-digit output values. What do you get if you add up all of the output values? */ func (s *sevenSegmentSearch) outputSum(input []string) int { entries := ParseInput(input) sumOutputs := 0 for _, e := range entries { var digits [10]Pattern for _, p := range e.Patterns { s.solveUniqueCases(&digits, p) } for _, p := range e.Patterns { s.inferOtherCases(&digits, p) } decodeOutput := 0 for _, o := range e.Outputs { decodeOutput = decodeOutput*10 + s.matchDigit(&digits, o) } sumOutputs += decodeOutput } return sumOutputs } func (s *sevenSegmentSearch) matchDigit(digits *[10]Pattern, p Pattern) int { for i, d := range digits { if d == p { return i } } return -1 } func (s *sevenSegmentSearch) solveUniqueCases(digits *[10]Pattern, p Pattern) { switch p.OneCount() { case 2: digits[1] = p case 3: digits[7] = p case 4: digits[4] = p case 7: digits[8] = p } } func (s *sevenSegmentSearch) inferOtherCases(digits *[10]Pattern, p Pattern) { //all non-unique cases have a one count of 5 or 6. Their number can be //found with the one count and unique cases switch p.OneCount() { case 5: if p|digits[4] == digits[8] { digits[2] = p } else if p&digits[7] == digits[7] { digits[3] = p } else { digits[5] = p } case 6: if p&digits[4] == digits[4] { digits[9] = p } else if p&digits[7] == digits[7] { digits[0] = p } else { digits[6] = p } } }
internal/advent/day8.go
0.68342
0.519399
day8.go
starcoder
package world import ( "ForestModel/entity" "ForestModel/util" "errors" ) //World represents the reified datastructure of the world itself type World struct { Entities []*entity.Entity Cells map[util.Point]*Cell size util.Rect } //Cell represents a containing subsection of forest type Cell struct { util.Rect Strata []*Stratum Substrate *entity.Soil Contents []entity.GenericInterface WPointer *World Position util.Point } //Stratum represents the properties of one of the stratified layers of biomedia within the modelled biome type Stratum struct { Humidity float32 Temperature float32 LightLevel float32 } //InitializeWorld allocates and generates a simulated world with the specified properties func InitializeWorld(size util.Rect, cellSize int) (World, error) { world := World{} world.size = size if size.Width%cellSize != 0 || size.Height%cellSize != 0 { return World{}, errors.New("World size not evenly divisible by subcell size") } for x := 0; x <= size.Width; x += cellSize { for y := 0; y <= size.Height; y += cellSize { world.Cells = map[util.Point]*Cell{} world.Cells[util.Point{int32(x), int32(y)}] = &Cell{ Substrate: entity.NewSoil(), Strata: []*Stratum{}, WPointer: &world, } } } return world, nil } //SetHumidity assigns all cell substrates/strata to the given moisture/humidity value func (w *World) SetHumidity(value float32) { for cell := range w.Cells { w.Cells[cell].Substrate.Humidity = value for stratum := range w.Cells[cell].Strata { w.Cells[cell].Strata[stratum].Humidity = value } } } //SetTemperature assigns all cell substrates/strata to the given temperature value func (w *World) SetTemperature(value float32) { for cell := range w.Cells { w.Cells[cell].Substrate.Temperature = value for stratum := range w.Cells[cell].Strata { w.Cells[cell].Strata[stratum].Temperature = value } } } //Simulate models a certain number of seconds of world time func (w *World) Simulate(ticks int) { temp := (func() map[util.Point]Cell { _map := map[util.Point]Cell{} for k, v := range w.Cells { _map[k] = *v } return _map })() for i := 0; i <= ticks; i++ { for cell := range temp { for entity := range temp[cell].Contents { temp[cell].Contents[entity].Simulate(w) } } } w.Cells = (func() map[util.Point]*Cell { _map := map[util.Point]*Cell{} for k, v := range w.Cells { _map[k] = v } return _map })() } //GetNeighbors returns the neighboring cells func (c *Cell) GetNeighbors() { neighbors := map[util.Point]*Cell{} }
world/world.go
0.730674
0.536981
world.go
starcoder
package testdata // GetRefundResponse example const GetRefundResponse = `{ "resource": "refund", "id": "re_4qqhO89gsT", "amount": { "currency": "EUR", "value": "5.95" }, "status": "pending", "createdAt": "2018-03-14T17:09:02.0Z", "description": "Order #33", "metadata": { "bookkeeping_id": 12345 }, "paymentId": "tr_WDqYK6vllg", "_links": { "self": { "href": "https://api.mollie.com/v2/payments/tr_WDqYK6vllg/refunds/re_4qqhO89gsT", "type": "application/hal+json" }, "payment": { "href": "https://api.mollie.com/v2/payments/tr_WDqYK6vllg", "type": "application/hal+json" }, "documentation": { "href": "https://docs.mollie.com/reference/v2/refunds-api/get-refund", "type": "text/html" } } }` // GetRefundListResponse example const GetRefundListResponse = `{ "count": 5, "_embedded": { "refunds": [ { "resource": "refund", "id": "re_4qqhO89gsT", "amount": { "currency": "EUR", "value": "5.95" }, "status": "pending", "createdAt": "2018-03-14T17:09:02.0Z", "description": "Order", "metadata": { "bookkeeping_id": 12345 }, "paymentId": "tr_WDqYK6vllg", "_links": { "self": { "href": "https://api.mollie.com/v2/payments/tr_WDqYK6vllg/refunds/re_4qqhO89gsT", "type": "application/hal+json" }, "payment": { "href": "https://api.mollie.com/v2/payments/tr_WDqYK6vllg", "type": "application/hal+json" }, "documentation": { "href": "https://docs.mollie.com/reference/v2/refunds-api/get-refund", "type": "text/html" } } }, { }, { } ] }, "_links": { "self": { "href": "https://api.mollie.com/v2/payments/tr_7UhSN1zuXS/refunds?limit=5", "type": "application/hal+json" }, "previous": null, "next": { "href": "https://api.mollie.com/v2/payments/tr_7UhSN1zuXS/refunds?from=re_APBiGPH2vV&limit=5", "type": "application/hal+json" }, "documentation": { "href": "https://docs.mollie.com/reference/v2/refunds-api/list-refunds", "type": "text/html" } } }`
testdata/refunds.go
0.667906
0.409752
refunds.go
starcoder
package gopriceoptions import ( "math" ) var sqtwopi float64 = math.Sqrt(2 * math.Pi) var IVPrecision = 0.00001 func PriceBlackScholes(callType bool, underlying float64, strike float64, timeToExpiration float64, volatility float64, riskFreeInterest float64, dividend float64) float64 { var sign float64 if callType { if timeToExpiration <= 0 { return math.Abs(underlying - strike) } sign = 1 } else { if timeToExpiration <= 0 { return math.Abs(strike - underlying) } sign = -1 } if sign == 0 { return 0.0 } re := math.Exp(-riskFreeInterest * timeToExpiration) qe := math.Exp(-dividend * timeToExpiration) vt := volatility * (math.Sqrt(timeToExpiration)) d1 := d1f(underlying, strike, timeToExpiration, volatility, riskFreeInterest, dividend, vt) d2 := d2f(d1, vt) d1 = sign * d1 d2 = sign * d2 nd1 := Stdnorm.Cdf(d1) nd2 := Stdnorm.Cdf(d2) bsprice := sign * ((underlying * qe * nd1) - (strike * re * nd2)) return bsprice } func d1f(underlying float64, strike float64, timeToExpiration float64, volatility float64, riskFreeInterest float64, dividend float64, volatilityWithExpiration float64) float64 { d1 := math.Log(underlying/strike) + (timeToExpiration * (riskFreeInterest - dividend + ((volatility * volatility) * 0.5))) d1 = d1 / volatilityWithExpiration return d1 } func d2f(d1 float64, volatilityWithExpiration float64) float64 { d2 := d1 - volatilityWithExpiration return d2 } func d1pdff(underlying float64, strike float64, timeToExpiration float64, volatility float64, riskFreeInterest float64, dividend float64) float64 { vt := volatility * (math.Sqrt(timeToExpiration)) d1 := d1f(underlying, strike, timeToExpiration, volatility, riskFreeInterest, dividend, vt) d1pdf := math.Exp(-(d1 * d1) * 0.5) d1pdf = d1pdf / sqtwopi return d1pdf } func BSDelta(callType bool, underlying float64, strike float64, timeToExpiration float64, volatility float64, riskFreeInterest float64, dividend float64) float64 { var zo float64 if !callType { zo = -1 } else { zo = 0 } drq := math.Exp(-dividend * timeToExpiration) vt := volatility * (math.Sqrt(timeToExpiration)) d1 := d1f(underlying, strike, timeToExpiration, volatility, riskFreeInterest, dividend, vt) cdfd1 := Stdnorm.Cdf(d1) delta := drq * (cdfd1 + zo) return delta } func BSVega(underlying float64, strike float64, timeToExpiration float64, volatility float64, riskFreeInterest float64, dividend float64) float64 { d1pdf := d1pdff(underlying, strike, timeToExpiration, volatility, riskFreeInterest, dividend) drq := math.Exp(-dividend * timeToExpiration) sqt := math.Sqrt(timeToExpiration) vega := (d1pdf) * drq * underlying * sqt * 0.01 return vega } func BSGamma(underlying float64, strike float64, timeToExpiration float64, volatility float64, riskFreeInterest float64, dividend float64) float64 { drq := math.Exp(-dividend * timeToExpiration) drd := underlying * volatility * math.Sqrt(timeToExpiration) d1pdf := d1pdff(underlying, strike, timeToExpiration, volatility, riskFreeInterest, dividend) gamma := (drq / drd) * d1pdf return gamma } func BSTheta(callType bool, underlying float64, strike float64, timeToExpiration float64, volatility float64, riskFreeInterest float64, dividend float64) float64 { var sign float64 if !callType { sign = -1 } else { sign = 1 } sqt := math.Sqrt(timeToExpiration) drq := math.Exp(-dividend * timeToExpiration) dr := math.Exp(-riskFreeInterest * timeToExpiration) d1pdf := d1pdff(underlying, strike, timeToExpiration, volatility, riskFreeInterest, dividend) twosqt := 2 * sqt p1 := -1 * ((underlying * volatility * drq) / twosqt) * d1pdf vt := volatility * (sqt) d1 := d1f(underlying, strike, timeToExpiration, volatility, riskFreeInterest, dividend, vt) d2 := d2f(d1, vt) var nd1, nd2 float64 d1 = sign * d1 d2 = sign * d2 nd1 = Stdnorm.Cdf(d1) nd2 = Stdnorm.Cdf(d2) p2 := -sign * riskFreeInterest * strike * dr * nd2 p3 := sign * dividend * underlying * drq * nd1 theta := (p1 + p2 + p3) / 365 return theta } func BSRho(callType bool, underlying float64, strike float64, timeToExpiration float64, volatility float64, riskFreeInterest float64, dividend float64) float64 { var sign float64 if !callType { sign = -1 } else { sign = 1 } dr := math.Exp(-riskFreeInterest * timeToExpiration) p1 := sign * (strike * timeToExpiration * dr) / 100 vt := volatility * (math.Sqrt(timeToExpiration)) d1 := d1f(underlying, strike, timeToExpiration, volatility, riskFreeInterest, dividend, vt) d2 := sign * d2f(d1, vt) nd2 := Stdnorm.Cdf(d2) rho := p1 * nd2 return rho } func BSImpliedVol(callType bool, lastTradedPrice float64, underlying float64, strike float64, timeToExpiration float64, startAnchorVolatility float64, riskFreeInterest float64, dividend float64) float64 { if startAnchorVolatility > 0 == false { startAnchorVolatility = 0.5 } errlim := IVPrecision maxl := 100 dv := errlim + 1 n := 0 maxloops := 100 for ; math.Abs(dv) > errlim && n < maxl; n++ { difval := PriceBlackScholes(callType, underlying, strike, timeToExpiration, startAnchorVolatility, riskFreeInterest, dividend) - lastTradedPrice v1 := BSVega(underlying, strike, timeToExpiration, startAnchorVolatility, riskFreeInterest, dividend) / 0.01 dv = difval / v1 startAnchorVolatility = startAnchorVolatility - dv } var iv float64 if n < maxloops { iv = startAnchorVolatility } else { iv = math.NaN() } return iv }
blacklike.go
0.72027
0.458349
blacklike.go
starcoder
package modern import ( "github.com/dotstart/identicons/library/identicons/icon/tiled" "github.com/dotstart/identicons/library/identicons/shape" ) var halfRectangleTile = tiled.Rect(0, 0, 0.5, 0.5) var tileTable = []tiled.Tile{ halfRectangleTile, // half rectangle tiled.Combined(halfRectangleTile, tiled.Flipped(halfRectangleTile, true, true)), // diagonally connected half rectangles tiled.Shape(shape.Vert(0, 0), shape.Vert(1, 0), shape.Vert(1, 1)), // triangle down-left tiled.Shape(shape.Vert(0, 1), shape.Vert(1, 1), shape.Vert(1, 0)), // triangle center tiled.Shape(shape.Vert(0, 0), shape.Vert(1, 1), shape.Vert(1, 0)), // triangle top-right tiled.Shape(shape.Vert(0, 0), shape.Vert(0, 1), shape.Vert(1, 0)), // triangle out tiled.Shape(shape.Vert(0, 0), shape.Vert(0, 1), shape.Vert(0.5, 0)), // steep triangle down tiled.Shape(shape.Vert(0, 0.5), shape.Vert(0, 1), shape.Vert(1, 1)), // steep triangle right tiled.Shape(shape.Vert(0.5, 1), shape.Vert(1, 1), shape.Vert(1, 0)), // steep triangle up tiled.Shape(shape.Vert(0, 0), shape.Vert(1, 0.5), shape.Vert(1, 0)), // steep triangle left tiled.Shape(shape.Vert(.5, 0), shape.Vert(0, .5), shape.Vert(.5, 1), shape.Vert(1, .5)), // diamond tiled.Circle(1), tiled.Circle(0.5), tiled.Donut(1, 0.75), tiled.Donut(0.5, 0.25), } var centerTileTable = append( tileTable, tiled.Shape(shape.Vert(0, 0), shape.Vert(0, 1), shape.Vert(0.5, 1), shape.Vert(0.25, 0.25), shape.Vert(1, 0.5), shape.Vert(1, 0)), // inverse star tiled.Shape(shape.Vert(0, 0), shape.Vert(0, 1), shape.Vert(1, 1), shape.Vert(1, 0), shape.Vert(0.25, 0), shape.Vert(0.25, 0.25), shape.Vert(0.75, 0.25), shape.Vert(0.5, 0.75), shape.Vert(0.25, 0.25), shape.Vert(0.25, 0)), // inverse triangle tiled.Shape(shape.Vert(0, 0), shape.Vert(0, 1), shape.Vert(1, 1), shape.Vert(1, 0), shape.Vert(0.27, 0.2), shape.Vert(0.8, 0.73), shape.Vert(0.73, 0.8), shape.Vert(0.2, 0.27), shape.Vert(0.27, 0.2), shape.Vert(0.27, 0)), // inverse diagonal bar )
library/identicons/icon/modern/tiles.go
0.535098
0.600393
tiles.go
starcoder
package main type struct1 struct { A int // To ignore a field in a struct, just annotate it with testdiff:"ignore" like this: B int `testdiff:"ignore"` } type struct2 struct { A int b int C []int } type struct3 struct { s2 struct2 } type struct4 struct { s3 struct3 } type struct5 struct { s4 []struct4 } func ex1() (struct1, struct1) { a := struct1{ 1, 2, } b := struct1{ 1, 3, } return a, b } func ex2() (struct2, struct2) { a := struct2{ A: 1, b: 2, C: []int{1}, } b := struct2{ A: 1, b: 3, C: []int{1, 1, 2}, } return a, b } func ex3() ([]int, []int) { a := []int{1, 2} b := []int{1} return a, b } func ex4() ([]int, []int) { a := []int{1, 1} b := []int{1} return a, b } func ex5() ([]int, []int) { a := []int{1} b := []int{1, 1} return a, b } func ex6() (struct4, struct4) { a := struct4{ s3: struct3{ s2: struct2{ A: 1, b: 2, C: []int{1}, }, }, } b := struct4{ s3: struct3{ s2: struct2{ A: 1, b: 3, C: []int{1, 1, 2}, }, }, } return a, b } func ex7() (struct5, struct5) { a := struct5{ s4: []struct4{ { s3: struct3{ s2: struct2{ A: 1, b: 2, C: []int{1}, }, }, }, }, } b := struct5{ s4: []struct4{ { s3: struct3{ s2: struct2{ A: 1, b: 3, C: []int{1, 1, 2}, }, }, }, { s3: struct3{ s2: struct2{ A: 1, b: 2, C: []int{1}, }, }, }, }, } return a, b } func ex8() (struct5, struct5) { a := struct5{ s4: []struct4{ { s3: struct3{ s2: struct2{ A: 1, b: 2, C: []int{1}, }, }, }, }, } b := struct5{ s4: []struct4{ { s3: struct3{ s2: struct2{ A: 1, b: 2, C: []int{1}, }, }, }, { s3: struct3{ s2: struct2{ A: 1, b: 3, C: []int{1, 1, 2}, }, }, }, }, } return a, b } func ex9() (struct5, struct5) { a := struct5{ s4: []struct4{ { s3: struct3{ s2: struct2{ A: 1, b: 2, C: []int{1}, }, }, }, }, } b := struct5{ s4: []struct4{ { s3: struct3{ s2: struct2{ A: 1, b: 2, C: []int{1, 1}, }, }, }, { s3: struct3{ s2: struct2{ A: 1, b: 3, C: []int{1, 1, 2}, }, }, }, }, } return a, b }
dev/RnD/messagediff/simple.go
0.547222
0.447702
simple.go
starcoder