code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package healthz // Severity specifies the seriousness of a component type Severity int8 // Health specifies if a component is ready or not type Health int8 const ( // Major means this component's failour disruptes some major // functionalities of the system. Major Severity = 1 // Unspecified is the default value of Severity Unspecified Severity = 0 // Minor means this component's failour causes non-critical loss of some // functionalities of the system. Minor Severity = -1 // Redundant means there are multiple instances of this components ready to serve Redundant Health = 2 // Normal means there is at least one instance of this component ready to serve Normal Health = 1 // Unknown is the default value of Health Unknown Health = 0 // Warning means there are some problems with this component Warning Health = -1 // Error means that this components is unable to serve as expected Error Health = -2 ) var ( healthToTitle = make(map[Health]string) severityToTitle = make(map[Severity]string) ) func init() { healthToTitle[Redundant] = "Redundant" healthToTitle[Normal] = "Normal" healthToTitle[Unknown] = "Unknown" healthToTitle[Warning] = "Warning" healthToTitle[Error] = "Error" severityToTitle[Major] = "Major" severityToTitle[Minor] = "Minor" severityToTitle[Unspecified] = "Unspecified" } // GroupReport is a copied status of a group and its subcomponents type GroupReport struct { Name string Severity Severity OverallHealth Health Subcomponents []*GroupReport `json:",omitempty"` } // ComponentGroup represents a component or a group of components. You can set // health level of the group by calling `SetGroupHealth`, OR, by creating // subcomponents. Note that you can't mix these two mechanism, it will cause // panic! type ComponentGroup interface { // SetGroupHealth sets the health level of the specified component. SetGroupHealth(health Health) // RegisterSubcomponent creates a subcomponent if it wasn't registered // before, and sets the severity level of the subcomponent to the given // value. RegisterSubcomponent(name string, severity Severity) ComponentGroup // UnregisterSubcomponent removes the subcomponent from the group, and the // calculation of the `OverallHealth` UnregisterSubcomponent(name string) // OverallHealth is the specified value set by `SetGroupHealth`, or if the // this instance contains one or more subcomponents, it's the minimum value // of these two: // * Minimum health level of all the subcomponents with severity=Major // * 1 + Minimum health level of all the components with severity=Unspecified // If no Major or Unspecified component is registered, OverallHealth // returns Unknown. // Otherwise, if no Major component is registered, the result will be // capped at Normal OverallHealth() Health // GroupReport copies the current status of the group and its subcomponents, // and returns the copied object GroupReport() *GroupReport }
definitions.go
0.605799
0.470311
definitions.go
starcoder
package main import ( "fmt" "github.com/gord-project/gview" "strconv" "strings" "time" "github.com/gdamore/tcell/v2" ) const corporate = `Leverage agile frameworks to provide a robust synopsis for high level overviews. Iterative approaches to corporate strategy foster collaborative thinking to further the overall value proposition. Organically grow the holistic world view of disruptive innovation via workplace diversity and empowerment. Bring to the table win-win survival strategies to ensure proactive domination. At the end of the day, going forward, a new normal that has evolved from generation X is on the runway heading towards a streamlined cloud solution. User generated content in real-time will have multiple touchpoints for offshoring. Capitalize on low hanging fruit to identify a ballpark value added activity to beta test. Override the digital divide with additional clickthroughs from DevOps. Nanotechnology immersion along the information highway will close the loop on focusing solely on the bottom line. [yellow]Press Enter, then Tab/Backtab for word selections` func main() { app := gview.NewApplication() textView := gview.NewTextView(). SetDynamicColors(true). SetRegions(true). SetWordWrap(true). SetChangedFunc(func() { app.Draw() }) numSelections := 0 go func() { for _, word := range strings.Split(corporate, " ") { if word == "the" { word = "[#ff0000]the[white]" } if word == "to" { word = fmt.Sprintf(`["%d"]to[""]`, numSelections) numSelections++ } fmt.Fprintf(textView, "%s ", word) time.Sleep(200 * time.Millisecond) } }() textView.SetDoneFunc(func(key tcell.Key) { currentSelection := textView.GetHighlights() if key == tcell.KeyEnter { if len(currentSelection) > 0 { textView.Highlight() } else { textView.Highlight("0").ScrollToHighlight() } } else if len(currentSelection) > 0 { index, _ := strconv.Atoi(currentSelection[0]) if key == tcell.KeyTab { index = (index + 1) % numSelections } else if key == tcell.KeyBacktab { index = (index - 1 + numSelections) % numSelections } else { return } textView.Highlight(strconv.Itoa(index)).ScrollToHighlight() } }) textView.SetBorder(true) if err := app.SetRoot(textView, true).Run(); err != nil { panic(err) } }
demos/textview/main.go
0.53437
0.409221
main.go
starcoder
package collections import ( "encoding/binary" "hash/maphash" ) type hashKeyType = uint32 const bitsPerTrieDepth hashKeyType = 5 const sizeOfSlices hashKeyType = 32 const bitMask hashKeyType = sizeOfSlices - 1 type HashMap struct { seed maphash.Seed size int root HAMTNode } func NewHashMap() *HashMap { seed := maphash.MakeSeed() root := &SliceNode{ size: 0, data: make([]HAMTNode, sizeOfSlices), } return &HashMap{ seed: seed, size: 0, root: root, } } func (hashMap *HashMap) Get(key interface{}) (interface{}, bool) { hash := getHash(key, hashMap.seed) return hashMap.root.get(hash, 1, key) } func (hashMap *HashMap) Set(key interface{}, value interface{}) *HashMap { hash := getHash(key, hashMap.seed) newRoot, howManyAdded := hashMap.root.set(hash, 1, key, value) return &HashMap{ seed: hashMap.seed, size: hashMap.size + howManyAdded, root: newRoot, } } type HAMTNode interface { set(hash hashKeyType, depth hashKeyType, key interface{}, value interface{}) (HAMTNode, int) get(hash hashKeyType, depth hashKeyType, key interface{}) (interface{}, bool) } type SliceNode struct { size int data []HAMTNode } type KeyValueNode struct { originalHash hashKeyType key interface{} value interface{} } func (node *KeyValueNode) get(hash hashKeyType, depth hashKeyType, key interface{}) (interface{}, bool) { if hash != node.originalHash { return nil, false } if key != node.key { return nil, false } return node.value, true } func (node *KeyValueNode) set(hash hashKeyType, depth hashKeyType, key interface{}, value interface{}) (HAMTNode, int) { // Handle a Key overwrite. if node.originalHash == hash { if node.key == key { if node.value == value { return node, 0 } return &KeyValueNode{ originalHash: hash, key: key, value: value, }, 0 } else { panic("Need to support a hash collision node") } } data := make([]HAMTNode, sizeOfSlices) selfIndex := getIndexForHash(node.originalHash, depth) newIndex := getIndexForHash(hash, depth) if selfIndex != newIndex { data[selfIndex] = node data[newIndex] = &KeyValueNode{ originalHash: hash, key: key, value: value, } return &SliceNode{ data: data, size: 2, }, 1 } newNode, howManyAdded := node.set(hash, depth+1, key, value) data[selfIndex] = newNode return &SliceNode{ data: data, size: 1, }, howManyAdded } func (node *SliceNode) get(hash hashKeyType, depth hashKeyType, key interface{}) (interface{}, bool) { index := getIndexForHash(hash, depth) target := node.data[index] if target == nil { return nil, false } return target.get(hash, depth+1, key) } func (node *SliceNode) set(hash hashKeyType, depth hashKeyType, key interface{}, value interface{}) (HAMTNode, int) { index := getIndexForHash(hash, depth) target := node.data[index] if target == nil { return &SliceNode{ size: node.size + 1, data: cloneAndSet(node.data, index, &KeyValueNode{ originalHash: hash, key: key, value: value, }), }, 1 } newNode, howManyAdded := target.set(hash, depth+1, key, value) return &SliceNode{ size: node.size, data: cloneAndSet(node.data, index, newNode), }, howManyAdded } func getIndexForHash(hash hashKeyType, depth hashKeyType) hashKeyType { return hash >> depth * bitsPerTrieDepth & bitMask } func getHash(v interface{}, seed maphash.Seed) hashKeyType { var h maphash.Hash h.SetSeed(seed) switch v := v.(type) { case string: h.WriteString(v) case int32: buffer := make([]byte, 4) binary.LittleEndian.PutUint32(buffer, uint32(v)) h.Write(buffer) case uint32: buffer := make([]byte, 4) binary.LittleEndian.PutUint32(buffer, v) h.Write(buffer) case int64: buffer := make([]byte, 8) binary.LittleEndian.PutUint64(buffer, uint64(v)) h.Write(buffer) case uint64: buffer := make([]byte, 8) binary.LittleEndian.PutUint64(buffer, v) h.Write(buffer) default: panic(ErrUnhashableType) } return hashKeyType(h.Sum64()) } func cloneAndSet(data []HAMTNode, index hashKeyType, node HAMTNode) []HAMTNode { newSlice := make([]HAMTNode, sizeOfSlices) copy(newSlice, data) newSlice[index] = node return newSlice }
map.go
0.719876
0.44559
map.go
starcoder
package geojson import ( "encoding/json" "math" ) // DefaultSegments controls the number of segments output in the geometry created // by CircleGeom. var DefaultSegments float64 = 20 // EarthRadiusM is the approximate radius of the earth in meters const EarthRadiusM float64 = 6378137.0 // Haversine computes the distance in meters across the world's surface between two lat/lng coordinates. func Haversine(lonFrom float64, latFrom float64, lonTo float64, latTo float64) (distanceM float64) { var deltaLat = (latTo - latFrom) * (math.Pi / 180) var deltaLon = (lonTo - lonFrom) * (math.Pi / 180) var a = math.Sin(deltaLat/2)*math.Sin(deltaLat/2) + math.Cos(latFrom*(math.Pi/180))*math.Cos(latTo*(math.Pi/180))* math.Sin(deltaLon/2)*math.Sin(deltaLon/2) var c = 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) distanceM = EarthRadiusM * c return } // CircleGeom outputs a GeoJSON geometry representing a circle of radius // radiusM meters centered at (cLat, cLng) func CircleGeom(cLat, cLng, radiusM float64) string { // Based on https://gist.github.com/mashbridge/7331812 var coords [][]float64 // Convert to radians cLat *= (2.0 * math.Pi) / 360.0 cLng *= (2.0 * math.Pi) / 360.0 // Distance along the "true course radial" // http://www.edwilliams.org/avform.htm#LL d := radiusM / EarthRadiusM f := func(p float64) []float64 { lat := math.Asin( math.Sin(cLat)*math.Cos(d) + math.Cos(cLat)*math.Sin(d)*math.Cos(p)) dLng := math.Atan2( math.Sin(p)*math.Sin(d)*math.Cos(cLat), math.Cos(d)-math.Sin(cLat)*math.Sin(lat)) lng := math.Mod( cLng-dLng+math.Pi, 2.0*math.Pi, ) - math.Pi // Convert back to degrees lat *= 360.0 / (2.0 * math.Pi) lng *= 360.0 / (2.0 * math.Pi) return []float64{lng, lat} } step := (2.0 * math.Pi) / DefaultSegments for p := 0.0; p > -2*math.Pi; p -= step { coords = append(coords, f(p)) } coords = append(coords, f(0)) js, _ := json.Marshal(map[string]interface{}{ "type": "Polygon", "coordinates": [][][]float64{coords}, }) return string(js) }
geojson/geojson.go
0.818882
0.583945
geojson.go
starcoder
package graph import ( i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time" i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization" ) // AccessPackageAssignment type AccessPackageAssignment struct { Entity // Read-only. Nullable. Supports $filter (eq) on the id property and $expand query parameters. accessPackage *AccessPackage; // The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. expiredDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time; // When the access assignment is to be in place. Read-only. schedule *EntitlementManagementSchedule; // The state of the access package assignment. The possible values are: delivering, partiallyDelivered, delivered, expired, deliveryFailed, unknownFutureValue. Read-only. Supports $filter (eq). state *AccessPackageAssignmentState; // More information about the assignment lifecycle. Possible values include Delivering, Delivered, NearExpiry1DayNotificationTriggered, or ExpiredNotificationTriggered. Read-only. status *string; // The subject of the access package assignment. Read-only. Nullable. Supports $expand. Supports $filter (eq) on objectId. target *AccessPackageSubject; } // NewAccessPackageAssignment instantiates a new accessPackageAssignment and sets the default values. func NewAccessPackageAssignment()(*AccessPackageAssignment) { m := &AccessPackageAssignment{ Entity: *NewEntity(), } return m } // GetAccessPackage gets the accessPackage property value. Read-only. Nullable. Supports $filter (eq) on the id property and $expand query parameters. func (m *AccessPackageAssignment) GetAccessPackage()(*AccessPackage) { if m == nil { return nil } else { return m.accessPackage } } // GetExpiredDateTime gets the expiredDateTime property value. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. func (m *AccessPackageAssignment) GetExpiredDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) { if m == nil { return nil } else { return m.expiredDateTime } } // GetSchedule gets the schedule property value. When the access assignment is to be in place. Read-only. func (m *AccessPackageAssignment) GetSchedule()(*EntitlementManagementSchedule) { if m == nil { return nil } else { return m.schedule } } // GetState gets the state property value. The state of the access package assignment. The possible values are: delivering, partiallyDelivered, delivered, expired, deliveryFailed, unknownFutureValue. Read-only. Supports $filter (eq). func (m *AccessPackageAssignment) GetState()(*AccessPackageAssignmentState) { if m == nil { return nil } else { return m.state } } // GetStatus gets the status property value. More information about the assignment lifecycle. Possible values include Delivering, Delivered, NearExpiry1DayNotificationTriggered, or ExpiredNotificationTriggered. Read-only. func (m *AccessPackageAssignment) GetStatus()(*string) { if m == nil { return nil } else { return m.status } } // GetTarget gets the target property value. The subject of the access package assignment. Read-only. Nullable. Supports $expand. Supports $filter (eq) on objectId. func (m *AccessPackageAssignment) GetTarget()(*AccessPackageSubject) { if m == nil { return nil } else { return m.target } } // GetFieldDeserializers the deserialization information for the current model func (m *AccessPackageAssignment) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) { res := m.Entity.GetFieldDeserializers() res["accessPackage"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewAccessPackage() }) if err != nil { return err } if val != nil { m.SetAccessPackage(val.(*AccessPackage)) } return nil } res["expiredDateTime"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetTimeValue() if err != nil { return err } if val != nil { m.SetExpiredDateTime(val) } return nil } res["schedule"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewEntitlementManagementSchedule() }) if err != nil { return err } if val != nil { m.SetSchedule(val.(*EntitlementManagementSchedule)) } return nil } res["state"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetEnumValue(ParseAccessPackageAssignmentState) if err != nil { return err } if val != nil { m.SetState(val.(*AccessPackageAssignmentState)) } return nil } res["status"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetStatus(val) } return nil } res["target"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewAccessPackageSubject() }) if err != nil { return err } if val != nil { m.SetTarget(val.(*AccessPackageSubject)) } return nil } return res } func (m *AccessPackageAssignment) IsNil()(bool) { return m == nil } // Serialize serializes information the current object func (m *AccessPackageAssignment) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) { err := m.Entity.Serialize(writer) if err != nil { return err } { err = writer.WriteObjectValue("accessPackage", m.GetAccessPackage()) if err != nil { return err } } { err = writer.WriteTimeValue("expiredDateTime", m.GetExpiredDateTime()) if err != nil { return err } } { err = writer.WriteObjectValue("schedule", m.GetSchedule()) if err != nil { return err } } if m.GetState() != nil { cast := (*m.GetState()).String() err = writer.WriteStringValue("state", &cast) if err != nil { return err } } { err = writer.WriteStringValue("status", m.GetStatus()) if err != nil { return err } } { err = writer.WriteObjectValue("target", m.GetTarget()) if err != nil { return err } } return nil } // SetAccessPackage sets the accessPackage property value. Read-only. Nullable. Supports $filter (eq) on the id property and $expand query parameters. func (m *AccessPackageAssignment) SetAccessPackage(value *AccessPackage)() { if m != nil { m.accessPackage = value } } // SetExpiredDateTime sets the expiredDateTime property value. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. func (m *AccessPackageAssignment) SetExpiredDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() { if m != nil { m.expiredDateTime = value } } // SetSchedule sets the schedule property value. When the access assignment is to be in place. Read-only. func (m *AccessPackageAssignment) SetSchedule(value *EntitlementManagementSchedule)() { if m != nil { m.schedule = value } } // SetState sets the state property value. The state of the access package assignment. The possible values are: delivering, partiallyDelivered, delivered, expired, deliveryFailed, unknownFutureValue. Read-only. Supports $filter (eq). func (m *AccessPackageAssignment) SetState(value *AccessPackageAssignmentState)() { if m != nil { m.state = value } } // SetStatus sets the status property value. More information about the assignment lifecycle. Possible values include Delivering, Delivered, NearExpiry1DayNotificationTriggered, or ExpiredNotificationTriggered. Read-only. func (m *AccessPackageAssignment) SetStatus(value *string)() { if m != nil { m.status = value } } // SetTarget sets the target property value. The subject of the access package assignment. Read-only. Nullable. Supports $expand. Supports $filter (eq) on objectId. func (m *AccessPackageAssignment) SetTarget(value *AccessPackageSubject)() { if m != nil { m.target = value } }
models/microsoft/graph/access_package_assignment.go
0.693161
0.40698
access_package_assignment.go
starcoder
package ast import ( "fmt" ) // Type representing a comparison of two integers. type Comparison struct { left IntegerTerm right IntegerTerm operator func(left int, right int) bool operatorName string } func (c Comparison) Eval(state State) (bool, error) { l, err := c.left.Eval(state) if err != nil { return false, err } r, err := c.right.Eval(state) if err != nil { return false, err } return c.operator(l, r), nil } func (c Comparison) String() string { return fmt.Sprintf("%s{%v, %v}", c.operatorName, c.left, c.right) } // Returns an 'equal to' comparison between the two given terms func Equals(left IntegerTerm, right IntegerTerm) BooleanTerm { return &Comparison{left, right, equals, "equals"} } // Returns a 'not equal to' comparison between the two given terms func NotEquals(left IntegerTerm, right IntegerTerm) BooleanTerm { return &Comparison{left, right, notEquals, "notEquals"} } // Returns a 'less than' comparison between the two given terms func LessThan(left IntegerTerm, right IntegerTerm) BooleanTerm { return &Comparison{left, right, lessThan, "lessThan"} } // Returns a 'greater than' comparison between the two given terms func GreaterThan(left IntegerTerm, right IntegerTerm) BooleanTerm { return &Comparison{left, right, greaterThan, "greaterThan"} } // Returns a 'less than or equal to' comparison between the two given terms func LessThanEqual(left IntegerTerm, right IntegerTerm) BooleanTerm { return &Comparison{left, right, lessThanEqual, "lessThanEqual"} } // Returns a 'greater than or equal to' comparison between the two given terms func GreaterThanEqual(left IntegerTerm, right IntegerTerm) BooleanTerm { return &Comparison{left, right, greaterThanEqual, "greaterThanEqual"} } func equals(left int, right int) bool { return left == right } func notEquals(left int, right int) bool { return left != right } func lessThan(left int, right int) bool { return left < right } func greaterThan(left int, right int) bool { return left > right } func lessThanEqual(left int, right int) bool { return left <= right } func greaterThanEqual(left int, right int) bool { return left >= right }
ast/comparison.go
0.9271
0.512571
comparison.go
starcoder
package transport import ( "sync" "time" ) const ( // bdpLimit is the maximum value the flow control windows // will be increased to. bdpLimit = (1 << 20) * 4 // alpha is a constant factor used to keep a moving average // of RTTs. alpha = 0.9 // If the current bdp sample is greater than or equal to // our beta * our estimated bdp and the current bandwidth // sample is the maximum bandwidth observed so far, we // increase our bbp estimate by a factor of gamma. beta = 0.66 // To put our bdp to be smaller than or equal to twice the real BDP, // we should multiply our current sample with 4/3, however to round things out // we use 2 as the multiplication factor. gamma = 2 ) // Adding arbitrary data to ping so that its ack can be identified. // Easter-egg: what does the ping message say? var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} type bdpEstimator struct { // sentAt is the time when the ping was sent. sentAt time.Time mu sync.Mutex // bdp is the current bdp estimate. bdp uint32 // sample is the number of bytes received in one measurement cycle. sample uint32 // bwMax is the maximum bandwidth noted so far (bytes/sec). bwMax float64 // bool to keep track of the beginning of a new measurement cycle. isSent bool // Callback to update the window sizes. updateFlowControl func(n uint32) // sampleCount is the number of samples taken so far. sampleCount uint64 // round trip time (seconds) rtt float64 } // timesnap registers the time bdp ping was sent out so that // network rtt can be calculated when its ack is received. // It is called (by controller) when the bdpPing is // being written on the wire. func (b *bdpEstimator) timesnap(d [8]byte) { if bdpPing.data != d { return } b.sentAt = time.Now() } // add adds bytes to the current sample for calculating bdp. // It returns true only if a ping must be sent. This can be used // by the caller (handleData) to make decision about batching // a window update with it. func (b *bdpEstimator) add(n uint32) bool { b.mu.Lock() defer b.mu.Unlock() if b.bdp == bdpLimit { return false } if !b.isSent { b.isSent = true b.sample = n b.sentAt = time.Time{} b.sampleCount++ return true } b.sample += n return false } // calculate is called when an ack for a bdp ping is received. // Here we calculate the current bdp and bandwidth sample and // decide if the flow control windows should go up. func (b *bdpEstimator) calculate(d [8]byte) { // Check if the ping acked for was the bdp ping. if bdpPing.data != d { return } b.mu.Lock() rttSample := time.Since(b.sentAt).Seconds() if b.sampleCount < 10 { // Bootstrap rtt with an average of first 10 rtt samples. b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) } else { // Heed to the recent past more. b.rtt += (rttSample - b.rtt) * float64(alpha) } b.isSent = false // The number of bytes accumulated so far in the sample is smaller // than or equal to 1.5 times the real BDP on a saturated connection. bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) if bwCurrent > b.bwMax { b.bwMax = bwCurrent } // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we // should update our perception of the network BDP. if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { sampleFloat := float64(b.sample) b.bdp = uint32(gamma * sampleFloat) if b.bdp > bdpLimit { b.bdp = bdpLimit } bdp := b.bdp b.mu.Unlock() b.updateFlowControl(bdp) return } b.mu.Unlock() }
vendor/google.golang.org/grpc/transport/bdp_estimator.go
0.796688
0.523908
bdp_estimator.go
starcoder
// Package sdnv packages implements the Self-Delimiting Numeric Values, // as per https://tools.ietf.org/html/rfc5050#section-4.1 package sdnv import ( "fmt" "io" "math/big" "math/bits" ) // MaxByteSize is the largest number of bytes a uint64 might be encoded into const MaxByteSize = 10 // ErrOverflow64 is the string sentinel value returned when overflowing // a 64-bit integer const ErrOverflow64 = "sdnv: byte sequence overflows a 64-bit integer" // Encode puts the given uint64 into the buffer, and return the number of // bytes used in the buffer. // Put panics if there is not enough space in the buffer. // Design can be found at: https://tools.ietf.org/html/rfc5050#section-4.1 func Encode(buf []byte, x uint64) (n int) { if x == 0 { buf[n] = 0x00 return n + 1 } n = (bits.Len64(x) - 1) / 7 for i := n; i >= 0; i-- { buf[i] = byte(x) & 0x7f if i != n { buf[i] |= 0x80 } x >>= 7 } return n + 1 } func encodeBig(buf []byte, in *big.Int) (n int) { bLen := in.BitLen() if bLen == 0 { buf[n] = 0x00 return n + 1 } x := big.NewInt(0).SetBytes(in.Bytes()) n = (bLen - 1) / 7 raw := x.Bytes() for i := n; i >= 0; i-- { buf[i] = raw[len(raw)-1] & 0x7f if i != n { buf[i] |= 0x80 } raw = x.Rsh(x, 7).Bytes() } return n + 1 } func WriteBytes(bw io.ByteWriter, x uint64) (n int, err error) { if x == 0 { return 1, bw.WriteByte(0x00) } n = (bits.Len64(x) - 1) / 7 for i := n; i >= 0; i-- { offset := uint(i * 7) b := byte(x>>offset) & 0x7f if i != 0 { b |= 0x80 } bw.WriteByte(b) } return n + 1, nil } func Write(w io.Writer, x uint64) (n int, err error) { if x == 0 { return w.Write([]byte{0x00}) } buf := make([]byte, MaxByteSize) size := Encode(buf, x) return w.Write(buf[:size]) } // Decode retrieves a uint64 value from the buffer, returning the uint64 and // the number of bytes consumed from the buffer. // Get panics if it runs out of bytes in the buffer before encountering // the delimiter byte. // Design can be found at: https://tools.ietf.org/html/rfc5050#section-4.1 func Decode(buf []byte) (x uint64, n int) { // TODO: Overflow like binary.Uvarint?!? for { x |= uint64(buf[n] & 0x7f) if buf[n] < 0x80 { return x, n + 1 } x <<= 7 n++ } } func decodeBig(buf []byte) (x *big.Int, n int) { x = big.NewInt(0) for { bVal := int64(buf[n] & 0x7f) x.Or(x, big.NewInt(bVal)) if buf[n] < 0x80 { return x, n + 1 } x.Lsh(x, 7) n++ } } // ReadBytes will read individual bytes on-demand as needed to fill data. // The io.EOF error will only be returned if zero bytes have been read. If // any have been read but an io.EOF is encountered, io.ErrUnexpectedEOF is // returned instead. // If the bytes indicate a number greater than can be held by a 64-bit // integer, the number of bytes read will be returned along with an error // containing the string value of ErrOverflow64. func ReadBytes(br io.ByteReader, data *uint64) (n int, err error) { var b0 byte // For overflow check for { b, err := br.ReadByte() if err == io.EOF && n > 0 { return n, io.ErrUnexpectedEOF } if err != nil { return n, err } if n == MaxByteSize-1 { // We're on the last possible byte, but it says to pull more if b >= 0x80 { return MaxByteSize, fmt.Errorf(ErrOverflow64) } // For a 10-byte the only acceptable value for the first // byte is 0x81. See the Note in RFC 5050 4.1 if b0 != 0x81 { return MaxByteSize, fmt.Errorf(ErrOverflow64) } } if n == 0 { b0 = b } *data |= uint64(b & 0x7f) if b < 0x80 { return n + 1, nil } *data <<= 7 n++ } } // Read will read individual bytes on-demand as needed to fill data. // The io.EOF error will only be returned if zero bytes have been read. If // any have been read but an io.EOF is encountered, io.ErrUnexpectedEOF is // returned instead. // If the bytes indicate a number greater than can be held by a 64-bit // integer, the number of bytes read will be returned along with an error // containing the string value of ErrOverflow64. func Read(r io.Reader, data *uint64) (n int, err error) { var b0 byte // For overflow check buf := make([]byte, 1) for { l, err := r.Read(buf) n += l if err == io.EOF && n > 0 { return n, io.ErrUnexpectedEOF } if err != nil { return n, err } if n == MaxByteSize { // We're on the last possible byte, but it says to pull more if buf[0] >= 0x80 { return MaxByteSize, fmt.Errorf(ErrOverflow64) } // For a 10-byte the only acceptable value for the first // byte is 0x81. See the Note in RFC 5050 4.1 if b0 != 0x81 { return MaxByteSize, fmt.Errorf(ErrOverflow64) } } if n == 1 { b0 = buf[0] } *data |= uint64(buf[0] & 0x7f) if buf[0] < 0x80 { return n, nil } *data <<= 7 } }
sdnv/codec.go
0.605216
0.452838
codec.go
starcoder
package headlessexperimental // Sends a BeginFrame to the target and returns when the frame was completed. Optionally captures a // screenshot from the resulting frame. Requires that the target was created with enabled // BeginFrameControl. Designed for use with --run-all-compositor-stages-before-draw, see also // https://goo.gl/3zHXhB for more background. const BeginFrame = "HeadlessExperimental.beginFrame" type BeginFrameParams struct { // Timestamp of this BeginFrame in Renderer TimeTicks (milliseconds of uptime). If not set, // the current time will be used. FrameTimeTicks float64 `json:"frameTimeTicks,omitempty"` // The interval between BeginFrames that is reported to the compositor, in milliseconds. // Defaults to a 60 frames/second interval, i.e. about 16.666 milliseconds. Interval float64 `json:"interval,omitempty"` // Whether updates should not be committed and drawn onto the display. False by default. If // true, only side effects of the BeginFrame will be run, such as layout and animations, but // any visual updates may not be visible on the display or in screenshots. NoDisplayUpdates bool `json:"noDisplayUpdates,omitempty"` // If set, a screenshot of the frame will be captured and returned in the response. Otherwise, // no screenshot will be captured. Note that capturing a screenshot can fail, for example, // during renderer initialization. In such a case, no screenshot data will be returned. Screenshot ScreenshotParams `json:"screenshot,omitempty"` } type BeginFrameResult struct { // Whether the BeginFrame resulted in damage and, thus, a new frame was committed to the // display. Reported for diagnostic uses, may be removed in the future. HasDamage bool `json:"hasDamage"` // Base64-encoded image data of the screenshot, if one was requested and successfully taken. ScreenshotData []byte `json:"screenshotData"` } // Disables headless events for the target. const Disable = "HeadlessExperimental.disable" type DisableParams struct { } type DisableResult struct { } // Enables headless events for the target. const Enable = "HeadlessExperimental.enable" type EnableParams struct { } type EnableResult struct { }
protocol/headlessexperimental/method.go
0.743447
0.484319
method.go
starcoder
package table import ( "fmt" "io" "reflect" "strconv" "strings" "time" semver "github.com/cppforlife/go-semi-semantic/version" "github.com/dustin/go-humanize" "gopkg.in/yaml.v2" boshuifmt "github.com/cloudfoundry/bosh-cli/ui/fmt" ) func NewValueString(s string) ValueString { return ValueString{S: s} } func (t ValueString) String() string { return t.S } func (t ValueString) Value() Value { return t } func (t ValueString) Compare(other Value) int { otherS := other.(ValueString).S switch { case t.S == otherS: return 0 case t.S < otherS: return -1 default: return 1 } } func (t EmptyValue) String() string { return "" } func (t EmptyValue) Value() Value { return t } func (t EmptyValue) Compare(Value) int { return 0 } func NewValueStrings(s []string) ValueStrings { return ValueStrings{S: s} } func (t ValueStrings) String() string { return strings.Join(t.S, "\n") } func (t ValueStrings) Value() Value { return t } func (t ValueStrings) Compare(other Value) int { otherS := other.(ValueStrings).S switch { case len(t.S) == len(otherS): return 0 case len(t.S) < len(otherS): return -1 default: return 1 } } func NewValueInt(i int) ValueInt { return ValueInt{I: i} } func (t ValueInt) String() string { return strconv.Itoa(t.I) } func (t ValueInt) Value() Value { return t } func (t ValueInt) Compare(other Value) int { otherI := other.(ValueInt).I switch { case t.I == otherI: return 0 case t.I < otherI: return -1 default: return 1 } } func NewValueBytes(i uint64) ValueBytes { return ValueBytes{I: i} } func NewValueMegaBytes(i uint64) ValueBytes { return ValueBytes{I: i * 1024 * 1024} } func (t ValueBytes) String() string { return humanize.IBytes(t.I) } func (t ValueBytes) Value() Value { return t } func (t ValueBytes) Compare(other Value) int { otherI := other.(ValueBytes).I switch { case t.I == otherI: return 0 case t.I < otherI: return -1 default: return 1 } } func NewValueTime(t time.Time) ValueTime { return ValueTime{T: t} } func (t ValueTime) String() string { if t.T.IsZero() { return "" } return t.T.Format(boshuifmt.TimeFullFmt) } func (t ValueTime) Value() Value { return t } func (t ValueTime) Compare(other Value) int { otherT := other.(ValueTime).T switch { case t.T.Equal(otherT): return 0 case t.T.Before(otherT): return -1 default: return 1 } } func NewValueBool(b bool) ValueBool { return ValueBool{B: b} } func (t ValueBool) String() string { return fmt.Sprintf("%t", t.B) } func (t ValueBool) Value() Value { return t } func (t ValueBool) Compare(other Value) int { otherB := other.(ValueBool).B switch { case t.B == otherB: return 0 case t.B == false && otherB == true: return -1 default: return 1 } } func NewValueVersion(v semver.Version) ValueVersion { return ValueVersion{V: v} } func (t ValueVersion) String() string { return t.V.String() } func (t ValueVersion) Value() Value { return t } func (t ValueVersion) Compare(other Value) int { return t.V.Compare(other.(ValueVersion).V) } func NewValueError(e error) ValueError { return ValueError{E: e} } func (t ValueError) String() string { if t.E != nil { return t.E.Error() } return "" } func NewValueInterface(i interface{}) ValueInterface { return ValueInterface{I: i} } func (t ValueInterface) String() string { if t.I == nil { return "" } val := reflect.ValueOf(t.I) if val.Kind() == reflect.Map && val.Len() == 0 { return "" } else if val.Kind() == reflect.Slice && val.Len() == 0 { return "" } bytes, err := yaml.Marshal(t.I) if err != nil { return fmt.Sprintf("<serialization error> : %#v", t.I) } return strings.TrimSpace(string(bytes)) } func (t ValueInterface) Value() Value { return t } func (t ValueInterface) Compare(other Value) int { panic("Never called") } func (t ValueError) Value() Value { return t } func (t ValueError) Compare(other Value) int { panic("Never called") } func (t ValueNone) String() string { return "" } func (t ValueNone) Value() Value { return t } func (t ValueNone) Compare(other Value) int { panic("Never called") } func NewValueFmt(v Value, error bool) ValueFmt { return ValueFmt{V: v, Error: error} } func (t ValueFmt) String() string { return t.V.String() } func (t ValueFmt) Value() Value { return t.V } func (t ValueFmt) Compare(other Value) int { panic("Never called") } func (t ValueFmt) Fprintf(w io.Writer, pattern string, rest ...interface{}) (int, error) { if t.Func == nil { return fmt.Fprintf(w, pattern, rest...) } return fmt.Fprintf(w, "%s", t.Func(pattern, rest...)) } func NewValueSuffix(v Value, s string) ValueSuffix { return ValueSuffix{V: v, Suffix: s} } func (t ValueSuffix) String() string { str := t.V.String() if len(str) > 0 { return str + t.Suffix } return "" } func (t ValueSuffix) Value() Value { return t.V } func (t ValueSuffix) Compare(other Value) int { panic("Never called") }
ui/table/values.go
0.594669
0.439266
values.go
starcoder
package analyzer // CalcAverageMonthly caclulates average monthly open and close prices for each security func CalcAverageMonthly(tickerMap map[string][]DailyStockData) map[string][]AverageMonthlyPrices { tickerAveragesMap := make(map[string][]AverageMonthlyPrices) for ticker, dailyDatas := range tickerMap { averageMonthlyPrices := make([]AverageMonthlyPrices, 0) // group data for each ticker by month monthMap := make(map[string][]DailyStockData) for _, dailyData := range dailyDatas { // get month from date - 2017-01-03 becomes 2017-01 month := dailyData.Date[:7] stockDatas, ok := monthMap[month] if !ok { monthMap[month] = []DailyStockData{dailyData} } else { monthMap[month] = append(stockDatas, dailyData) } } // calculate averages for each month for month, datas := range monthMap { monthlyPrices := AverageMonthlyPrices{Month: month} sumOpen := 0.0 sumClose := 0.0 for _, data := range datas { sumOpen += data.Open sumClose += data.Close } monthlyPrices.AverageClose = sumClose / float64(len(datas)) monthlyPrices.AverageOpen = sumOpen / float64(len(datas)) averageMonthlyPrices = append(averageMonthlyPrices, monthlyPrices) } tickerAveragesMap[ticker] = averageMonthlyPrices } return tickerAveragesMap } // CalcMaxDailyProfit calculates the maximum daily profit for each security func CalcMaxDailyProfit(tickerMap map[string][]DailyStockData) []MaxDailyProfit { dailyProfits := make([]MaxDailyProfit, 0) for ticker, dailyDatas := range tickerMap { maxDailyProfit := MaxDailyProfit{Ticker: ticker} // find date that provides the maximum daily profit for buying high and selling low maxProfit := 0.0 var maxProfitDate string for _, dailyData := range dailyDatas { dailyProfit := dailyData.High - dailyData.Low if dailyProfit > maxProfit { maxProfit = dailyProfit maxProfitDate = dailyData.Date } } maxDailyProfit.Profit = maxProfit maxDailyProfit.Date = maxProfitDate dailyProfits = append(dailyProfits, maxDailyProfit) } return dailyProfits } // CalcBusiestDays calculates the days where a security's volume was over 10% greater than the average volume for that security func CalcBusiestDays(tickerMap map[string][]DailyStockData) map[string]BusiestDays { busiestDaysMap := make(map[string]BusiestDays) for ticker, dailyDatas := range tickerMap { var busiestDays BusiestDays // calculate average volume for this security volumeSum := 0.0 for _, dailyData := range dailyDatas { volumeSum += dailyData.Volume } busiestDays.AverageVolume = volumeSum / float64(len(dailyDatas)) // find days where volume is over 10% greater than the average volume days := make([]BusiestDay, 0) threshold := (.10 * busiestDays.AverageVolume) + busiestDays.AverageVolume for _, dailyData := range dailyDatas { if dailyData.Volume > threshold { day := BusiestDay{Date: dailyData.Date, Volume: dailyData.Volume} days = append(days, day) } } busiestDays.Days = days busiestDaysMap[ticker] = busiestDays } return busiestDaysMap } // CalcBiggestLoser caculates the security that had the most days where the closing price was less than the opening price func CalcBiggestLoser(tickerMap map[string][]DailyStockData) BiggestLoser { losingDaysMap := make(map[string]int64) // calculate number of loss days for each security for ticker, dailyDatas := range tickerMap { var numLossDays int64 for _, dailyData := range dailyDatas { if dailyData.Close < dailyData.Open { numLossDays++ } } losingDaysMap[ticker] = numLossDays } // determine which security had greatest number of loss days var maxLossDays int64 var maxLossTicker string for ticker, numLossDays := range losingDaysMap { if numLossDays > maxLossDays { maxLossDays = numLossDays maxLossTicker = ticker } } return BiggestLoser{Ticker: maxLossTicker, NumberDaysLoser: maxLossDays} }
analyzer/calculate.go
0.763043
0.745167
calculate.go
starcoder
package collector import ( "strconv" "github.com/prometheus/client_golang/prometheus" ) type topicStats []struct { val func(topics *Topics) float64 vec *prometheus.GaugeVec } // TopicStats creates a new stats collector which is able to // expose the topic metrics of a nsqd node to Prometheus. func TopicStats(namespace string) StatsCollector { labels := []string{"topic", "paused"} namespace += "_topic" return topicStats{ { val: func(t *Topics) float64 { return float64(len(t.Channels)) }, vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "channel_count", Help: "Number of channels", }, labels), }, { val: func(t *Topics) float64 { return float64(t.Depth) }, vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "depth", Help: "Queue depth", }, labels), }, { val: func(t *Topics) float64 { return float64(t.BackendDepth) }, vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "backend_depth", Help: "Queue backend depth", }, labels), }, { val: func(t *Topics) float64 { return getPercentile(t, 99) }, vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "e2e_latency_99_percentile", Help: "Queue e2e latency 99th percentile", }, labels), }, { val: func(t *Topics) float64 { return getPercentile(t, 95) }, vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "e2e_latency_95_percentile", Help: "Queue e2e latency 95th percentile", }, labels), }, { val: func(t *Topics) float64 { return float64(t.MessageCount) }, vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "message_count", Help: "Queue message count", }, labels), }, } } func (ts topicStats) set(s *StatsResponse) { for _, topic := range s.Topics { labels := prometheus.Labels{ "topic": topic.TopicName, "paused": strconv.FormatBool(topic.Paused), } for _, c := range ts { c.vec.With(labels).Set(c.val(topic)) } } } func (ts topicStats) collect(out chan<- prometheus.Metric) { for _, c := range ts { c.vec.Collect(out) } } func (ts topicStats) describe(ch chan<- *prometheus.Desc) { for _, c := range ts { c.vec.Describe(ch) } } func (ts topicStats) reset() { for _, c := range ts { c.vec.Reset() } }
collector/stats_topic.go
0.70791
0.415492
stats_topic.go
starcoder
package function import ( "errors" "fmt" "kanzi" "kanzi/transform" ) // Utility class to compress/decompress a data block // Fast reversible block coder/decoder based on a pipeline of transformations: // Forward: (Bijective) Burrows-Wheeler -> Move to Front -> Zero Run Length // Inverse: Zero Run Length -> Move to Front -> (Bijective) Burrows-Wheeler // The block size determines the balance between speed and compression ratio // BWT stream format: Header (m bytes) Data (n bytes) // Header: mode (8 bits) + BWT primary index (8, 16 or 24 bits) // mode: bits 7-6 contain the size in bits of the primary index : // 00: primary index size <= 6 bits (fits in mode byte) // 01: primary index size <= 14 bits (1 extra byte) // 10: primary index size <= 22 bits (2 extra bytes) // 11: primary index size > 22 bits (3 extra bytes) // bits 5-0 contain 6 most significant bits of primary index // primary index: remaining bits (up to 3 bytes) // Bijective BWT stream format: Data (n bytes) const ( GST_MODE_RAW = 0 GST_MODE_MTF = 1 GST_MODE_RANK = 2 GST_MODE_TIMESTAMP = 3 BWT_MAX_HEADER_SIZE = 4 MAX_BLOCK_SIZE = 256 * 1024 * 1024 // 30 bits ) type BWTBlockCodec struct { transform kanzi.ByteTransform mode int size uint isBWT bool } // Based on the mode, the forward transform is followed by a Global Structure // Transform and ZRLT, else a raw transform is performed. func NewBWTBlockCodec(tr interface{}, mode int, blockSize uint) (*BWTBlockCodec, error) { if tr == nil { return nil, errors.New("Invalid null transform parameter") } if _, isTransform := tr.(kanzi.ByteTransform); isTransform == false { return nil, errors.New("The transform must implement the ByteTransform interface") } if _, isSizeable := tr.(kanzi.Sizeable); isSizeable == false { return nil, errors.New("The transform must implement the Sizeable interface") } if mode != GST_MODE_RAW && mode != GST_MODE_MTF && mode != GST_MODE_RANK && mode != GST_MODE_TIMESTAMP { return nil, errors.New("Invalid GST mode parameter") } _, isBWT := tr.(*transform.BWT) this := new(BWTBlockCodec) this.mode = mode this.size = blockSize this.transform = tr.(kanzi.ByteTransform) this.isBWT = isBWT if blockSize > this.maxBlockSize() { transformName := "BWT" if this.isBWT == false { transformName = "BWTS" } errMsg := fmt.Sprintf("The max block size for the %v is %d", transformName, this.maxBlockSize()) return nil, errors.New(errMsg) } return this, nil } func (this *BWTBlockCodec) createGST(blockSize uint) (kanzi.ByteTransform, error) { // SBRT can perform MTFT but the dedicated class is faster if this.mode == GST_MODE_RAW { return nil, nil } if this.mode == GST_MODE_MTF { return transform.NewMTFT(blockSize) } return transform.NewSBRT(this.mode, blockSize) } func (this *BWTBlockCodec) maxBlockSize() uint { maxSize := uint(MAX_BLOCK_SIZE) if this.isBWT == true { maxSize -= BWT_MAX_HEADER_SIZE } return maxSize } func (this *BWTBlockCodec) Size() uint { return this.size } func (this *BWTBlockCodec) SetSize(sz uint) bool { if sz > this.maxBlockSize() { return false } this.size = sz return true } // Return no error if the compression chain succeeded. In this case, the input data // may be modified. If the compression failed, the input data is returned unmodified. func (this *BWTBlockCodec) Forward(src, dst []byte) (uint, uint, error) { if src == nil { return 0, 0, errors.New("Input buffer cannot be null") } if dst == nil { return 0, 0, errors.New("Output buffer cannot be null") } if kanzi.SameByteSlices(src, dst, false) { return 0, 0, errors.New("Input and output buffers cannot be equal") } blockSize := this.size if blockSize == 0 { blockSize = uint(len(src)) if blockSize > this.maxBlockSize() { errMsg := fmt.Sprintf("Block size is %v, max value is %v", blockSize, this.maxBlockSize()) return 0, 0, errors.New(errMsg) } } else if blockSize > uint(len(src)) { errMsg := fmt.Sprintf("Block size is %v, input buffer length is %v", blockSize, len(src)) return 0, 0, errors.New(errMsg) } this.transform.(kanzi.Sizeable).SetSize(blockSize) // Apply forward Transform iIdx, oIdx, _ := this.transform.Forward(src, dst) headerSizeBytes := uint(0) pIndexSizeBits := uint(0) primaryIndex := uint(0) if this.isBWT { primaryIndex = this.transform.(*transform.BWT).PrimaryIndex() pIndexSizeBits = uint(6) for 1<<pIndexSizeBits <= primaryIndex { pIndexSizeBits++ } headerSizeBytes = (2 + pIndexSizeBits + 7) >> 3 } if this.mode != GST_MODE_RAW { // Apply Post Transform gst, err := this.createGST(blockSize) if err != nil { return 0, 0, err } gst.Forward(dst, src) if ZRLT, err := NewZRLT(blockSize); err == nil { // Apply Zero Run Length Encoding iIdx, oIdx, err = ZRLT.Forward(src, dst[headerSizeBytes:]) if err != nil { // Compression failed, recover source data gst.Inverse(src, dst) this.transform.Inverse(dst, src) return 0, 0, err } } } else if headerSizeBytes > 0 { // Shift output data to leave space for header hs := int(headerSizeBytes) for i := int(blockSize - 1); i >= 0; i-- { dst[i+hs] = dst[i] } } if this.isBWT { oIdx += headerSizeBytes // Write block header (mode + primary index). See top of file for format shift := (headerSizeBytes - 1) << 3 blockMode := (pIndexSizeBits + 1) >> 3 blockMode = (blockMode << 6) | ((primaryIndex >> shift) & 0x3F) dst[0] = byte(blockMode) for i := uint(1); i < headerSizeBytes; i++ { shift -= 8 dst[i] = byte(primaryIndex >> shift) } } return iIdx, oIdx, nil } func (this *BWTBlockCodec) Inverse(src, dst []byte) (uint, uint, error) { compressedLength := this.size if compressedLength == 0 { return 0, 0, nil } primaryIndex := uint(0) blockSize := compressedLength headerSizeBytes := uint(0) srcIdx := uint(0) if this.isBWT { // Read block header (mode + primary index). See top of file for format blockMode := uint(src[0]) headerSizeBytes = 1 + ((blockMode >> 6) & 0x03) if compressedLength < headerSizeBytes { return 0, 0, errors.New("Invalid compressed length in stream") } if compressedLength == 0 { return 0, 0, nil } compressedLength -= headerSizeBytes shift := (headerSizeBytes - 1) << 3 primaryIndex = (blockMode & 0x3F) << shift blockSize = compressedLength srcIdx = headerSizeBytes // Extract BWT primary index for i := uint(1); i < headerSizeBytes; i++ { shift -= 8 primaryIndex |= uint(src[i]) << shift } } if blockSize > this.maxBlockSize() { errMsg := fmt.Sprintf("Block size is %v, max value is %v", blockSize, this.maxBlockSize()) return 0, 0, errors.New(errMsg) } if this.mode != GST_MODE_RAW { // Apply Zero Run Length Decoding ZRLT, err := NewZRLT(compressedLength) if err != nil { return 0, 0, err } iIdx, oIdx, err := ZRLT.Inverse(src[srcIdx:], dst) iIdx += headerSizeBytes if err != nil { return iIdx, oIdx, err } srcIdx = 0 blockSize = oIdx // Apply inverse Pre Transform gst, err := this.createGST(blockSize) if err != nil { return 0, 0, err } gst.Inverse(dst, src) } if this.isBWT { this.transform.(*transform.BWT).SetPrimaryIndex(primaryIndex) } this.transform.(kanzi.Sizeable).SetSize(blockSize) // Apply inverse Transform return this.transform.Inverse(src[srcIdx:], dst) } func (this BWTBlockCodec) MaxEncodedLen(srcLen int) int { // Return input buffer size + max header size // If forward() fails due to output buffer size, the block is returned // unmodified with an error if this.isBWT == true { return srcLen + 4 } return srcLen }
go/src/kanzi/function/BWTBlockCodec.go
0.724481
0.516535
BWTBlockCodec.go
starcoder
package gocarina import ( "fmt" "image" "image/png" "log" "os" ) // Tile represents a lettered square from a Letterpress game board. type Tile struct { Letter rune // the letter this tile represents, if known img image.Image // the original tile image, prior to any scaling/downsampling Reduced image.Image // the tile in black and white, bounding-boxed, and scaled down Bounded image.Image // the bounded tile (used only for debugging) } func NewTile(letter rune, img image.Image) (result *Tile) { result = &Tile{Letter: letter, img: img} result.reduce(0) return } // Reduce the tile by converting to monochrome, applying a bounding box, and scaling to match the given size. // The resulting image will be stored in t.Reduced. func (t *Tile) reduce(border int) { targetRect := image.Rect(0, 0, TileTargetWidth, TileTargetHeight) if targetRect.Dx() != TileTargetWidth { log.Fatalf("expected targetRect.Dx() to be %d, got: %d", TileTargetWidth, targetRect.Dx()) } if targetRect.Dy() != TileTargetHeight { log.Fatalf("expected targetRect.Dy() to be %d, got: %d", TileTargetHeight, targetRect.Dy()) } src := BlackWhiteImage(t.img) // find the bounding box for the character bbox := BoundingBox(src, border) // Only apply the bounding box if it's above some % of the width/height of original tile. // This is to avoid pathological cases for skinny letters like "I", which // would otherwise result in completely black tiles when bounded. if bbox.Bounds().Dx() >= int(MinBoundingBoxPercent*float64(t.img.Bounds().Dx())) && bbox.Bounds().Dy() >= int(MinBoundingBoxPercent*float64(t.img.Bounds().Dy())) { src = src.(interface { SubImage(r image.Rectangle) image.Image }).SubImage(bbox) } else { // enable only for debugging //log.Printf("rune: %c: skipping boundingbox: orig width: %d, boundbox width: %d", t.Letter, t.img.Bounds().Dx(), bbox.Dx()) } t.Bounded = src t.Reduced = Scale(src, targetRect) // it's sometimes helpful to see a textual version of the reduced tile //log.Printf("\n%s\n", ImageToString(t.Reduced)) if t.Reduced.Bounds().Dx() != TileTargetWidth { log.Fatalf("expected t.Reduced.Bounds().Dx() to be %d, got: %d", TileTargetWidth, t.Reduced.Bounds().Dx()) } if t.Reduced.Bounds().Dy() != TileTargetHeight { log.Fatalf("expected t.Reduced.Bounds().Dy() to be %d, got: %d", TileTargetHeight, t.Reduced.Bounds().Dy()) } } // Save the bounded tile. Only for debugging. func (t *Tile) SaveBoundedAndReduced() { saveImgToFile := func(file string, img image.Image) { toFile, err := os.Create(file) if err != nil { log.Fatal(err) } defer toFile.Close() err = png.Encode(toFile, img) if err != nil { log.Fatal(err) } } saveImgToFile(fmt.Sprintf("debug_output/bounded_%c.png", t.Letter), t.Bounded) saveImgToFile(fmt.Sprintf("debug_output/reduced_%c.png", t.Letter), t.Reduced) }
tile.go
0.744749
0.538923
tile.go
starcoder
package graph import ( i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization" ) // BucketAggregationRange provides operations to call the query method. type BucketAggregationRange struct { // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. additionalData map[string]interface{}; // Defines the lower bound from which to compute the aggregation. This can be a numeric value or a string representation of a date using the YYYY-MM-DDTHH:mm:ss.sssZ format. Required. from *string; // Defines the upper bound up to which to compute the aggregation. This can be a numeric value or a string representation of a date using the YYYY-MM-DDTHH:mm:ss.sssZ format. Required. to *string; } // NewBucketAggregationRange instantiates a new bucketAggregationRange and sets the default values. func NewBucketAggregationRange()(*BucketAggregationRange) { m := &BucketAggregationRange{ } m.SetAdditionalData(make(map[string]interface{})); return m } // CreateBucketAggregationRangeFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreateBucketAggregationRangeFromDiscriminatorValue(parseNode i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, error) { return NewBucketAggregationRange(), nil } // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *BucketAggregationRange) GetAdditionalData()(map[string]interface{}) { if m == nil { return nil } else { return m.additionalData } } // GetFieldDeserializers the deserialization information for the current model func (m *BucketAggregationRange) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) { res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) res["from"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetFrom(val) } return nil } res["to"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetTo(val) } return nil } return res } // GetFrom gets the from property value. Defines the lower bound from which to compute the aggregation. This can be a numeric value or a string representation of a date using the YYYY-MM-DDTHH:mm:ss.sssZ format. Required. func (m *BucketAggregationRange) GetFrom()(*string) { if m == nil { return nil } else { return m.from } } // GetTo gets the to property value. Defines the upper bound up to which to compute the aggregation. This can be a numeric value or a string representation of a date using the YYYY-MM-DDTHH:mm:ss.sssZ format. Required. func (m *BucketAggregationRange) GetTo()(*string) { if m == nil { return nil } else { return m.to } } func (m *BucketAggregationRange) IsNil()(bool) { return m == nil } // Serialize serializes information the current object func (m *BucketAggregationRange) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) { { err := writer.WriteStringValue("from", m.GetFrom()) if err != nil { return err } } { err := writer.WriteStringValue("to", m.GetTo()) if err != nil { return err } } { err := writer.WriteAdditionalData(m.GetAdditionalData()) if err != nil { return err } } return nil } // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *BucketAggregationRange) SetAdditionalData(value map[string]interface{})() { if m != nil { m.additionalData = value } } // SetFrom sets the from property value. Defines the lower bound from which to compute the aggregation. This can be a numeric value or a string representation of a date using the YYYY-MM-DDTHH:mm:ss.sssZ format. Required. func (m *BucketAggregationRange) SetFrom(value *string)() { if m != nil { m.from = value } } // SetTo sets the to property value. Defines the upper bound up to which to compute the aggregation. This can be a numeric value or a string representation of a date using the YYYY-MM-DDTHH:mm:ss.sssZ format. Required. func (m *BucketAggregationRange) SetTo(value *string)() { if m != nil { m.to = value } }
models/microsoft/graph/bucket_aggregation_range.go
0.836655
0.439386
bucket_aggregation_range.go
starcoder
package bigquery import ( "io" bq "google.golang.org/api/bigquery/v2" ) // GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute // an input or output to a BigQuery operation. type GCSReference struct { // URIs refer to Google Cloud Storage objects. URIs []string FileConfig // DestinationFormat is the format to use when writing exported files. // Allowed values are: CSV, Avro, JSON. The default is CSV. // CSV is not supported for tables with nested or repeated fields. DestinationFormat DataFormat // Compression specifies the type of compression to apply when writing data // to Google Cloud Storage, or using this GCSReference as an ExternalData // source with CSV or JSON SourceFormat. Default is None. Compression Compression } // NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. // In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. // Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. // Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. // For more information about the treatment of wildcards and multiple URIs, // see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple func NewGCSReference(uri ...string) *GCSReference { return &GCSReference{URIs: uri} } // Compression is the type of compression to apply when writing data to Google Cloud Storage. type Compression string const ( None Compression = "NONE" Gzip Compression = "GZIP" ) func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { lc.SourceUris = gcs.URIs gcs.FileConfig.populateLoadConfig(lc) return nil } func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration { conf := bq.ExternalDataConfiguration{ Compression: string(gcs.Compression), SourceUris: append([]string{}, gcs.URIs...), } gcs.FileConfig.populateExternalDataConfig(&conf) return conf }
vendor/cloud.google.com/go/bigquery/gcs.go
0.685529
0.436142
gcs.go
starcoder
package column import ( "github.com/kelindar/bitmap" "github.com/kelindar/column/commit" ) // --------------------------- Float32s ---------------------------- // columnFloat32 represents a generic column type columnfloat32 struct { fill bitmap.Bitmap // The fill-list data []float32 // The actual values } // makeFloat32s creates a new vector for Float32s func makeFloat32s() Column { return &columnfloat32{ fill: make(bitmap.Bitmap, 0, 4), data: make([]float32, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnfloat32) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]float32, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnfloat32) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Float32() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Float32() c.data[r.Offset] = value r.SwapFloat32(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnfloat32) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnfloat32) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnfloat32) Value(idx uint32) (v interface{}, ok bool) { v = float32(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnfloat32) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnfloat32) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnfloat32) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnfloat32) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnfloat32) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnfloat32) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetFloat32 updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetFloat32(value float32) { cur.update.PutFloat32(commit.Put, cur.idx, value) } // AddFloat32 atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddFloat32(amount float32) { cur.update.PutFloat32(commit.Add, cur.idx, amount) } // SetFloat32At updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetFloat32At(column string, value float32) { cur.txn.bufferFor(column).PutFloat32(commit.Put, cur.idx, value) } // AddFloat32At atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddFloat32At(column string, amount float32) { cur.txn.bufferFor(column).PutFloat32(commit.Add, cur.idx, amount) } // --------------------------- Float64s ---------------------------- // columnFloat64 represents a generic column type columnfloat64 struct { fill bitmap.Bitmap // The fill-list data []float64 // The actual values } // makeFloat64s creates a new vector for Float64s func makeFloat64s() Column { return &columnfloat64{ fill: make(bitmap.Bitmap, 0, 4), data: make([]float64, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnfloat64) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]float64, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnfloat64) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Float64() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Float64() c.data[r.Offset] = value r.SwapFloat64(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnfloat64) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnfloat64) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnfloat64) Value(idx uint32) (v interface{}, ok bool) { v = float64(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnfloat64) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnfloat64) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnfloat64) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnfloat64) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnfloat64) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnfloat64) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetFloat64 updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetFloat64(value float64) { cur.update.PutFloat64(commit.Put, cur.idx, value) } // AddFloat64 atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddFloat64(amount float64) { cur.update.PutFloat64(commit.Add, cur.idx, amount) } // SetFloat64At updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetFloat64At(column string, value float64) { cur.txn.bufferFor(column).PutFloat64(commit.Put, cur.idx, value) } // AddFloat64At atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddFloat64At(column string, amount float64) { cur.txn.bufferFor(column).PutFloat64(commit.Add, cur.idx, amount) } // --------------------------- Ints ---------------------------- // columnInt represents a generic column type columnint struct { fill bitmap.Bitmap // The fill-list data []int // The actual values } // makeInts creates a new vector for Ints func makeInts() Column { return &columnint{ fill: make(bitmap.Bitmap, 0, 4), data: make([]int, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnint) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]int, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnint) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Int() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Int() c.data[r.Offset] = value r.SwapInt(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnint) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnint) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnint) Value(idx uint32) (v interface{}, ok bool) { v = int(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnint) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnint) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnint) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnint) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnint) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnint) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetInt updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetInt(value int) { cur.update.PutInt(commit.Put, cur.idx, value) } // AddInt atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddInt(amount int) { cur.update.PutInt(commit.Add, cur.idx, amount) } // SetIntAt updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetIntAt(column string, value int) { cur.txn.bufferFor(column).PutInt(commit.Put, cur.idx, value) } // AddIntAt atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddIntAt(column string, amount int) { cur.txn.bufferFor(column).PutInt(commit.Add, cur.idx, amount) } // --------------------------- Int16s ---------------------------- // columnInt16 represents a generic column type columnint16 struct { fill bitmap.Bitmap // The fill-list data []int16 // The actual values } // makeInt16s creates a new vector for Int16s func makeInt16s() Column { return &columnint16{ fill: make(bitmap.Bitmap, 0, 4), data: make([]int16, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnint16) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]int16, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnint16) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Int16() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Int16() c.data[r.Offset] = value r.SwapInt16(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnint16) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnint16) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnint16) Value(idx uint32) (v interface{}, ok bool) { v = int16(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnint16) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnint16) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnint16) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnint16) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnint16) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnint16) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetInt16 updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetInt16(value int16) { cur.update.PutInt16(commit.Put, cur.idx, value) } // AddInt16 atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddInt16(amount int16) { cur.update.PutInt16(commit.Add, cur.idx, amount) } // SetInt16At updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetInt16At(column string, value int16) { cur.txn.bufferFor(column).PutInt16(commit.Put, cur.idx, value) } // AddInt16At atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddInt16At(column string, amount int16) { cur.txn.bufferFor(column).PutInt16(commit.Add, cur.idx, amount) } // --------------------------- Int32s ---------------------------- // columnInt32 represents a generic column type columnint32 struct { fill bitmap.Bitmap // The fill-list data []int32 // The actual values } // makeInt32s creates a new vector for Int32s func makeInt32s() Column { return &columnint32{ fill: make(bitmap.Bitmap, 0, 4), data: make([]int32, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnint32) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]int32, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnint32) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Int32() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Int32() c.data[r.Offset] = value r.SwapInt32(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnint32) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnint32) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnint32) Value(idx uint32) (v interface{}, ok bool) { v = int32(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnint32) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnint32) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnint32) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnint32) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnint32) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnint32) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetInt32 updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetInt32(value int32) { cur.update.PutInt32(commit.Put, cur.idx, value) } // AddInt32 atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddInt32(amount int32) { cur.update.PutInt32(commit.Add, cur.idx, amount) } // SetInt32At updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetInt32At(column string, value int32) { cur.txn.bufferFor(column).PutInt32(commit.Put, cur.idx, value) } // AddInt32At atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddInt32At(column string, amount int32) { cur.txn.bufferFor(column).PutInt32(commit.Add, cur.idx, amount) } // --------------------------- Int64s ---------------------------- // columnInt64 represents a generic column type columnint64 struct { fill bitmap.Bitmap // The fill-list data []int64 // The actual values } // makeInt64s creates a new vector for Int64s func makeInt64s() Column { return &columnint64{ fill: make(bitmap.Bitmap, 0, 4), data: make([]int64, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnint64) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]int64, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnint64) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Int64() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Int64() c.data[r.Offset] = value r.SwapInt64(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnint64) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnint64) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnint64) Value(idx uint32) (v interface{}, ok bool) { v = int64(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnint64) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnint64) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnint64) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnint64) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnint64) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnint64) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetInt64 updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetInt64(value int64) { cur.update.PutInt64(commit.Put, cur.idx, value) } // AddInt64 atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddInt64(amount int64) { cur.update.PutInt64(commit.Add, cur.idx, amount) } // SetInt64At updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetInt64At(column string, value int64) { cur.txn.bufferFor(column).PutInt64(commit.Put, cur.idx, value) } // AddInt64At atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddInt64At(column string, amount int64) { cur.txn.bufferFor(column).PutInt64(commit.Add, cur.idx, amount) } // --------------------------- Uints ---------------------------- // columnUint represents a generic column type columnuint struct { fill bitmap.Bitmap // The fill-list data []uint // The actual values } // makeUints creates a new vector for Uints func makeUints() Column { return &columnuint{ fill: make(bitmap.Bitmap, 0, 4), data: make([]uint, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnuint) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]uint, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnuint) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Uint() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Uint() c.data[r.Offset] = value r.SwapUint(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnuint) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnuint) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnuint) Value(idx uint32) (v interface{}, ok bool) { v = uint(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnuint) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnuint) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnuint) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnuint) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnuint) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnuint) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetUint updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetUint(value uint) { cur.update.PutUint(commit.Put, cur.idx, value) } // AddUint atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddUint(amount uint) { cur.update.PutUint(commit.Add, cur.idx, amount) } // SetUintAt updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetUintAt(column string, value uint) { cur.txn.bufferFor(column).PutUint(commit.Put, cur.idx, value) } // AddUintAt atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddUintAt(column string, amount uint) { cur.txn.bufferFor(column).PutUint(commit.Add, cur.idx, amount) } // --------------------------- Uint16s ---------------------------- // columnUint16 represents a generic column type columnuint16 struct { fill bitmap.Bitmap // The fill-list data []uint16 // The actual values } // makeUint16s creates a new vector for Uint16s func makeUint16s() Column { return &columnuint16{ fill: make(bitmap.Bitmap, 0, 4), data: make([]uint16, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnuint16) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]uint16, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnuint16) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Uint16() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Uint16() c.data[r.Offset] = value r.SwapUint16(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnuint16) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnuint16) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnuint16) Value(idx uint32) (v interface{}, ok bool) { v = uint16(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnuint16) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnuint16) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnuint16) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnuint16) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnuint16) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnuint16) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetUint16 updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetUint16(value uint16) { cur.update.PutUint16(commit.Put, cur.idx, value) } // AddUint16 atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddUint16(amount uint16) { cur.update.PutUint16(commit.Add, cur.idx, amount) } // SetUint16At updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetUint16At(column string, value uint16) { cur.txn.bufferFor(column).PutUint16(commit.Put, cur.idx, value) } // AddUint16At atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddUint16At(column string, amount uint16) { cur.txn.bufferFor(column).PutUint16(commit.Add, cur.idx, amount) } // --------------------------- Uint32s ---------------------------- // columnUint32 represents a generic column type columnuint32 struct { fill bitmap.Bitmap // The fill-list data []uint32 // The actual values } // makeUint32s creates a new vector for Uint32s func makeUint32s() Column { return &columnuint32{ fill: make(bitmap.Bitmap, 0, 4), data: make([]uint32, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnuint32) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]uint32, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnuint32) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Uint32() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Uint32() c.data[r.Offset] = value r.SwapUint32(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnuint32) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnuint32) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnuint32) Value(idx uint32) (v interface{}, ok bool) { v = uint32(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnuint32) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnuint32) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnuint32) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnuint32) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnuint32) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnuint32) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetUint32 updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetUint32(value uint32) { cur.update.PutUint32(commit.Put, cur.idx, value) } // AddUint32 atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddUint32(amount uint32) { cur.update.PutUint32(commit.Add, cur.idx, amount) } // SetUint32At updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetUint32At(column string, value uint32) { cur.txn.bufferFor(column).PutUint32(commit.Put, cur.idx, value) } // AddUint32At atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddUint32At(column string, amount uint32) { cur.txn.bufferFor(column).PutUint32(commit.Add, cur.idx, amount) } // --------------------------- Uint64s ---------------------------- // columnUint64 represents a generic column type columnuint64 struct { fill bitmap.Bitmap // The fill-list data []uint64 // The actual values } // makeUint64s creates a new vector for Uint64s func makeUint64s() Column { return &columnuint64{ fill: make(bitmap.Bitmap, 0, 4), data: make([]uint64, 0, 64), } } // Grow grows the size of the column until we have enough to store func (c *columnuint64) Grow(idx uint32) { if idx < uint32(len(c.data)) { return } if idx < uint32(cap(c.data)) { c.fill.Grow(idx) c.data = c.data[:idx+1] return } c.fill.Grow(idx) clone := make([]uint64, idx+1, capacityFor(idx+1)) copy(clone, c.data) c.data = clone } // Apply applies a set of operations to the column. func (c *columnuint64) Apply(r *commit.Reader) { for r.Next() { switch r.Type { case commit.Put: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) c.data[r.Offset] = r.Uint64() // If this is an atomic increment/decrement, we need to change the operation to // the final value, since after this update an index needs to be recalculated. case commit.Add: c.fill[r.Offset>>6] |= 1 << (r.Offset & 0x3f) value := c.data[r.Offset] + r.Uint64() c.data[r.Offset] = value r.SwapUint64(value) case commit.Delete: c.fill.Remove(r.Index()) } } } // Contains checks whether the column has a value at a specified index. func (c *columnuint64) Contains(idx uint32) bool { return c.fill.Contains(idx) } // Index returns the fill list for the column func (c *columnuint64) Index() *bitmap.Bitmap { return &c.fill } // Value retrieves a value at a specified index func (c *columnuint64) Value(idx uint32) (v interface{}, ok bool) { v = uint64(0) if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = c.data[idx], true } return } // LoadFloat64 retrieves a float64 value at a specified index func (c *columnuint64) LoadFloat64(idx uint32) (v float64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = float64(c.data[idx]), true } return } // LoadInt64 retrieves an int64 value at a specified index func (c *columnuint64) LoadInt64(idx uint32) (v int64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = int64(c.data[idx]), true } return } // LoadUint64 retrieves an uint64 value at a specified index func (c *columnuint64) LoadUint64(idx uint32) (v uint64, ok bool) { if idx < uint32(len(c.data)) && c.fill.Contains(idx) { v, ok = uint64(c.data[idx]), true } return } // FilterFloat64 filters down the values based on the specified predicate. func (c *columnuint64) FilterFloat64(offset uint32, index bitmap.Bitmap, predicate func(v float64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) bool { idx = offset + idx return idx < uint32(len(c.data)) && predicate(float64(c.data[idx])) }) } // FilterInt64 filters down the values based on the specified predicate. func (c *columnuint64) FilterInt64(offset uint32, index bitmap.Bitmap, predicate func(v int64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(int64(c.data[idx])) }) } // FilterUint64 filters down the values based on the specified predicate. func (c *columnuint64) FilterUint64(offset uint32, index bitmap.Bitmap, predicate func(v uint64) bool) { index.And(c.fill[offset>>6 : int(offset>>6)+len(index)]) index.Filter(func(idx uint32) (match bool) { idx = offset + idx return idx < uint32(len(c.data)) && predicate(uint64(c.data[idx])) }) } // --------------------------- Cursor Update ---------------------------- // SetUint64 updates a column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetUint64(value uint64) { cur.update.PutUint64(commit.Put, cur.idx, value) } // AddUint64 atomically increments/decrements the current value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddUint64(amount uint64) { cur.update.PutUint64(commit.Add, cur.idx, amount) } // SetUint64At updates a specified column value for the current item. The actual operation // will be queued and executed once the current the transaction completes. func (cur *Cursor) SetUint64At(column string, value uint64) { cur.txn.bufferFor(column).PutUint64(commit.Put, cur.idx, value) } // AddUint64At atomically increments/decrements the column value by the specified amount. Note // that this only works for numerical values and the type of the value must match. func (cur *Cursor) AddUint64At(column string, amount uint64) { cur.txn.bufferFor(column).PutUint64(commit.Add, cur.idx, amount) }
column_numbers.go
0.784773
0.501343
column_numbers.go
starcoder
package client import ( "encoding/json" ) // IsFirstLoginResult struct for IsFirstLoginResult type IsFirstLoginResult struct { IsFirstLogin bool `json:"isFirstLogin"` } // NewIsFirstLoginResult instantiates a new IsFirstLoginResult object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewIsFirstLoginResult(isFirstLogin bool) *IsFirstLoginResult { this := IsFirstLoginResult{} this.IsFirstLogin = isFirstLogin return &this } // NewIsFirstLoginResultWithDefaults instantiates a new IsFirstLoginResult object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewIsFirstLoginResultWithDefaults() *IsFirstLoginResult { this := IsFirstLoginResult{} return &this } // GetIsFirstLogin returns the IsFirstLogin field value func (o *IsFirstLoginResult) GetIsFirstLogin() bool { if o == nil { var ret bool return ret } return o.IsFirstLogin } // GetIsFirstLoginOk returns a tuple with the IsFirstLogin field value // and a boolean to check if the value has been set. func (o *IsFirstLoginResult) GetIsFirstLoginOk() (*bool, bool) { if o == nil { return nil, false } return &o.IsFirstLogin, true } // SetIsFirstLogin sets field value func (o *IsFirstLoginResult) SetIsFirstLogin(v bool) { o.IsFirstLogin = v } func (o IsFirstLoginResult) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if true { toSerialize["isFirstLogin"] = o.IsFirstLogin } return json.Marshal(toSerialize) } type NullableIsFirstLoginResult struct { value *IsFirstLoginResult isSet bool } func (v NullableIsFirstLoginResult) Get() *IsFirstLoginResult { return v.value } func (v *NullableIsFirstLoginResult) Set(val *IsFirstLoginResult) { v.value = val v.isSet = true } func (v NullableIsFirstLoginResult) IsSet() bool { return v.isSet } func (v *NullableIsFirstLoginResult) Unset() { v.value = nil v.isSet = false } func NewNullableIsFirstLoginResult(val *IsFirstLoginResult) *NullableIsFirstLoginResult { return &NullableIsFirstLoginResult{value: val, isSet: true} } func (v NullableIsFirstLoginResult) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableIsFirstLoginResult) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
client/model_is_first_login_result.go
0.63443
0.40031
model_is_first_login_result.go
starcoder
package main import "math" type Sphere struct { material Material transform Matrix } func MakeSphere() Sphere { return Sphere{ material: MakeMaterial(), transform: IdentityMatrix4, } } func MakeSphereTransformed(transform Matrix) Sphere { return Sphere{ material: MakeMaterial(), transform: transform, } } // Get the values of t at which the given ray intersects the sphere. func (s Sphere) Intersect(ray Ray) Intersections { // Apply the sphere's transformations by applying their inverse to the ray. ray = ray.Transform(s.transform.Inverted()) // Assume the sphere is at the origin. sphereToRay := ray.Origin.Subtract(MakePoint(0, 0, 0)) a := ray.Direction.Dot(ray.Direction) b := 2 * ray.Direction.Dot(sphereToRay) c := sphereToRay.Dot(sphereToRay) - 1 discriminant := (b * b) - (4 * a * c) if discriminant < 0 { return Intersections{} } discriminantRoot := math.Sqrt(discriminant) t1 := (-b - discriminantRoot) / (2 * a) t2 := (-b + discriminantRoot) / (2 * a) return Intersections{ MakeIntersection(t1, s), MakeIntersection(t2, s), } } // Get the material used by the sphere. func (s Sphere) Material() Material { return s.material } // Get the normal vector at a point on the surface of a sphere. This point is // given in world space (as opposed to object space). func (s Sphere) NormalAt(worldPoint Tuple) Tuple { objectPoint := s.transform.Inverted().TupleMultiply(worldPoint) // We're subtracting the origin of the sphere, which is always the origin in // object space. objectNormal := objectPoint.Subtract(MakePoint(0, 0, 0)) worldNormal := s.transform.Inverted().Transposed().TupleMultiply(objectNormal) // Since we should have ignored the 4th row and column of the matrix in the // computation above, the 4th row (which includes w for our tuple) may have // been messed with. To compensate for this, we manually set w to 0, which // represents a vector. worldNormal.W = 0 return worldNormal.Normalized() } // Get the sphere's transformation matrix. func (s Sphere) Transform() Matrix { return s.transform }
sphere.go
0.88355
0.456107
sphere.go
starcoder
package samples func init() { sampleDataBlindTransferOperation[3] = `{ "fee": { "amount": 1500000, "asset_id": "1.3.0" }, "inputs": [ { "commitment": "02718abb1076837e57c484cfdde4b4393ea9a13eeb279f2aaca3c6188aa69bde44", "owner": { "account_auths": [], "address_auths": [], "key_auths": [ [ "<KEY>", 1 ] ], "weight_threshold": 1 } }, { "commitment": "0321b67a372bb6671caa0b169ad011a3dcd2720df927e1e4a9bc4347160b1d3d1c", "owner": { "account_auths": [], "address_auths": [], "key_auths": [ [ "<KEY>", 1 ] ], "weight_threshold": 1 } }, { "commitment": "0338de06676093bf972fe4d586a8a9ec5e9ccfb20158a8f55423a9bb4c8c78c98a", "owner": { "account_auths": [], "address_auths": [], "key_auths": [ [ "BTS591sHpM8U5wbqficKnroneQ5wcbMNM9GReNFYiDY9QvuH3gYS3", 1 ] ], "weight_threshold": 1 } }, { "commitment": "0391da2125adeaed7d68d301e366ea6290aa7f485cd2fbede3013c22aaf13a6b65", "owner": { "account_auths": [], "address_auths": [], "key_auths": [ [ "<KEY>", 1 ] ], "weight_threshold": 1 } }, { "commitment": "0394b4ef1e1708a9f012cb0444b0e150111165c2eab6cc94dbe6baeaecb12652dd", "owner": { "account_auths": [], "address_auths": [], "key_auths": [ [ "<KEY>", 1 ] ], "weight_threshold": 1 } } ], "outputs": [ { "commitment": "02012fe0b061da824d45ae2dc370f3a75f41d0a31cf562b324cbb13040e5844abd", "owner": { "account_auths": [], "address_auths": [], "key_auths": [], "weight_threshold": 0 }, "range_proof": "" } ] }` } //end of file
gen/samples/blindtransferoperation_3.go
0.561816
0.427935
blindtransferoperation_3.go
starcoder
package encoder import ( "reflect" "regexp" "strings" "sync" yaml "gopkg.in/yaml.v3" ) const ( // HeadComment populates `yaml.Node` `HeadComment`. HeadComment = iota // LineComment populates `yaml.Node` `LineComment`. LineComment // FootComment populates `yaml.Node` `FootComment`. FootComment ) // Doc represents a struct documentation rendered from comments by docgen. type Doc struct { // Comments stores foot, line and head comments. Comments [3]string // Fields contains fields documentation if related item is a struct. Fields []Doc // Examples list of example values for the item. Examples []*Example // Values is only used to render valid values list in the documentation. Values []string // Description represents the full description for the item. Description string // Name represents struct name or field name. Name string // Type represents struct name or field type. Type string // Note is rendered as a note for the example in markdown file. Note string // AppearsIn describes back references for the type. AppearsIn []Appearance EnumFields []string PartDefinitions []KeyValue } type KeyValue struct { Key string Value string } // AddExample adds a new example snippet to the doc. func (d *Doc) AddExample(name string, value interface{}) { if d.Examples == nil { d.Examples = []*Example{} } d.Examples = append(d.Examples, &Example{ Name: name, value: value, }) } // Describe returns a field description. func (d *Doc) Describe(field string, short bool) string { desc := "" for _, f := range d.Fields { if f.Name == field { desc = f.Description } } if short { desc = strings.Split(desc, "\n")[0] } return desc } // Example represents one example snippet for a type. type Example struct { populate sync.Once Name string valueMutex sync.RWMutex value interface{} } // Populate populates example value. func (e *Example) Populate(index int) { e.populate.Do(func() { if reflect.TypeOf(e.value).Kind() != reflect.Ptr { return } v := reflect.ValueOf(e.value).Elem() defaultValue := getExample(v, getDoc(e.value), index) e.valueMutex.Lock() defer e.valueMutex.Unlock() if defaultValue != nil { v.Set(defaultValue.Convert(v.Type())) } populateNestedExamples(v, index) }) } // GetValue returns example value. func (e *Example) GetValue() interface{} { e.valueMutex.RLock() defer func() { e.valueMutex.RUnlock() }() return e.value } // GetName returns the name of the example func (e *Example) GetName() string { return e.Name } // Field gets field from the list of fields. func (d *Doc) Field(i int) *Doc { if i < len(d.Fields) { return &d.Fields[i] } return nil } // Appearance of a type in a different type. type Appearance struct { TypeName string FieldName string } // Documented is used to check if struct has any documentation defined for it. type Documented interface { // Doc requests documentation object. Doc() *Doc } func mergeDoc(a, b *Doc) *Doc { var res Doc if a != nil { res = *a } if b == nil { return &res } for i, comment := range b.Comments { if comment != "" { res.Comments[i] = comment } } if len(res.Examples) == 0 { res.Examples = b.Examples } return &res } func getDoc(in interface{}) *Doc { v := reflect.ValueOf(in) if v.Kind() == reflect.Ptr && v.IsNil() { in = reflect.New(v.Type().Elem()).Interface() } if d, ok := in.(Documented); ok { return d.Doc() } return nil } func addComments(node *yaml.Node, doc *Doc, comments ...int) { if doc != nil { dest := []*string{ &node.HeadComment, &node.LineComment, &node.FootComment, } if len(comments) == 0 { comments = []int{ HeadComment, LineComment, FootComment, } } for _, i := range comments { if doc.Comments[i] != "" { *dest[i] = doc.Comments[i] } } } } //nolint:gocyclo func renderExample(key string, doc *Doc, flags CommentsFlags) string { if doc == nil { return "" } examples := []string{} for i, e := range doc.Examples { v := reflect.ValueOf(e.GetValue()) if isEmpty(v) { continue } if v.Kind() != reflect.Ptr { v = reflect.Indirect(v) } defaultValue := v.Interface() e.Populate(i) node, err := toYamlNode(defaultValue, flags) if err != nil { continue } if key != "" { node, err = toYamlNode(map[string]*yaml.Node{ key: node, }, flags) if err != nil { continue } } if i == 0 && flags.enabled(CommentsDocs) { addComments(node, doc, HeadComment, LineComment) } // replace head comment with line comment if node.HeadComment == "" { node.HeadComment = node.LineComment } node.LineComment = "" if e.Name != "" { if node.HeadComment != "" { node.HeadComment += "\n\n" } node.HeadComment = node.HeadComment + e.Name + "\n" } data, err := yaml.Marshal(node) if err != nil { continue } if key == "" { // re-indent data = regexp.MustCompile(`(?m)^(.)`).ReplaceAll(data, []byte(" $1")) } else { // don't collapse comment data = regexp.MustCompile(`(?m)^#`).ReplaceAll(data, []byte("# #")) } examples = append(examples, string(data)) } return strings.Join(examples, "") } func getExample(v reflect.Value, doc *Doc, index int) *reflect.Value { if doc == nil || len(doc.Examples) == 0 { return nil } numExamples := len(doc.Examples) if index >= numExamples { index = numExamples - 1 } defaultValue := reflect.ValueOf(doc.Examples[index].GetValue()) if !isEmpty(defaultValue) { if v.Kind() != reflect.Ptr && defaultValue.Kind() == reflect.Ptr { defaultValue = defaultValue.Elem() } } return &defaultValue } //nolint:gocyclo func populateNestedExamples(v reflect.Value, index int) { //nolint:exhaustive switch v.Kind() { case reflect.Struct: doc := getDoc(v.Interface()) for i := 0; i < v.NumField(); i++ { field := v.Field(i) if !field.CanInterface() { continue } if doc != nil && i < len(doc.Fields) { defaultValue := getExample(field, doc.Field(i), index) if defaultValue != nil { field.Set(defaultValue.Convert(field.Type())) } } populateNestedExamples(field, index) } case reflect.Map: for _, key := range v.MapKeys() { populateNestedExamples(v.MapIndex(key), index) } case reflect.Slice: for i := 0; i < v.Len(); i++ { populateNestedExamples(v.Index(i), index) } } }
encoder/documentation.go
0.714927
0.403214
documentation.go
starcoder
package kinesisfirehoseevt import ( "encoding/json" "time" ) // OutputRecord represents the transformed Amazon Kinesis Firehose record type OutputRecord struct { // The record ID is passed from Amazon Kinesis Firehose to AWS Lambda // during the invocation. The transformed record must contain the same // record ID. Any mismatch between the ID of the original record and the // ID of the transformed record is treated as a data transformation // failure. RecordID string `json:"recordId"` // The status of the data transformation of the record. The possible // values are: // - "Ok": the record was transformed successfully. // - "Dropped": the record was dropped intentionally by your processing // logic. // - "ProcessingFailed": the record could not be transformed. // If a record has a status of "Ok" or "Dropped", // Amazon Kinesis firehose considers it successfully processed. // Otherwise, Amazon Kinesis Firehose considers it unsuccessfully // processed. Result string `json:"result"` // The transformed data payload, after base64-encoding. Data []byte `json:"data"` } // Output represents the result of the processing of Amazon Kinesis Firehose // input records. type Output struct { // Transformed Amazon Kinesis Firehose records. Records []*OutputRecord `json:"records"` } // InputRecord represents the unit of data of an Amazon Kinesis Firehose event. type InputRecord struct { // The unique identifier of the record passed from // Amazon Kinesis Firehose to AWS Lambda. RecordID string // The approximate time that the record was inserted into the // Amazon Kinesis Firehose delivery stream. This is set when a delivery // stream successfully receives and stores a record and is commonly // referred to as a server-side timestamp. It has millisecond precision // and there are no guarantees about its accuracy, or that it is always // increasing. For example, records in a specific // Amazon Kinesis Firehose delivery stream might have timestamps that // are out of order. ApproximateArrivalTimestamp time.Time // The data blob. The data in the blob is both opaque and immutable to // the Amazon Kinesis Firehose service, which does not inspect, // interpret, or change the data in the blob in any way. The data blob // consists of any kind of data and the total size must not exceed the // maximum record size (1 MB). // Data is automatically base64 encoded/decoded by the SDK. Data []byte } // Input represents an Amazon Kinesis Firehose delivery stream event and // provides contextual information. type Input struct { // The invocation ID. InvocationID string // The ARN of the Amazon Kinesis Firehose. DeliveryStreamARN string // The AWS region where the event originated. Region string // The list of Amazon Kinesis Firehose event records. Records []*InputRecord } // String returns the string representation. func (e *Input) String() string { s, _ := json.Marshal(e) return string(s) } // GoString returns the string representation. func (e *Input) GoString() string { return e.String() } // String returns the string representation. func (e *Output) String() string { s, _ := json.Marshal(e) return string(s) } // GoString returns the string representation. func (e *Output) GoString() string { return e.String() }
service/lambda/runtime/event/kinesisfirehoseevt/definition.go
0.821367
0.517083
definition.go
starcoder
package data import ( "math" ) // Space type Space struct { G float64 Entities []Entity } func NewSpace() *Space { return &Space{G: 6.674e-11} } func (space *Space) AddEntity(entity *Entity) { space.Entities = append(space.Entities, *entity) } func (space *Space) FindByName(name string) *Entity { for _, entity := range space.Entities { if entity.Name != name { continue } return &entity } return nil } func (space *Space) Step() { space.applyGravitationalForces() space.mergeCollidedEntities() } /** * Computes and applies gravitation forces between each entity */ func (space *Space) applyGravitationalForces() { for i := range space.Entities { e1 := &space.Entities[i] totalForce := ZeroVec2() for j := range space.Entities { if i == j { continue } e2 := &space.Entities[j] gravity := computeGravity(space.G, *e1, *e2) direction := UnitVector(&e1.Coords, &e2.Coords) force := direction.TimesScalar(gravity) totalForce = totalForce.Plus(&force) } acceleration := totalForce.Div(e1.Mass) e1.Velocity = e1.Velocity.Plus(&acceleration) e1.Coords = e1.Coords.Plus(&e1.Velocity.Point) } } func computeGravity(G float64, e1 Entity, e2 Entity) float64 { return G * (e1.Mass * e2.Mass) / math.Pow(e1.Coords.DistanceTo(&e2.Coords), 2.0) } /** * Merges existing entities with the provided ones by checking if position in space of two * entities is nearly the same. Combines their mass, radius and velocity vector */ func (space *Space) mergeCollidedEntities() { var currentEntities []*Entity for idx := range space.Entities { entity := space.Entities[idx] currentEntities = append(currentEntities, &entity) } var newEntities []*Entity for i := range currentEntities { for j := range currentEntities { e1 := currentEntities[i] e2 := currentEntities[j] if e1 == nil || e2 == nil || e1 == e2 { continue } // skip if entities are too far apart distance := math.Abs(e1.Coords.DistanceTo(&e2.Coords)) if distance >= e1.Radius+e2.Radius { continue } // add new entity mergedEntity := Merge(e1, e2) newEntities = append(newEntities, mergedEntity) // mark merged entities for removal currentEntities[i] = nil currentEntities[j] = nil } } space.Entities = nil // Add all entities that haven't merged for idx := range currentEntities { entity := currentEntities[idx] if entity == nil { continue } space.Entities = append(space.Entities, *entity) } // Add newly merged entities for idx := range newEntities { space.Entities = append(space.Entities, *newEntities[idx]) } }
internal/data/space.go
0.785432
0.461745
space.go
starcoder
// Package m32 math and gl math for floats32 it uses go-gl/mathgl/mgl32 for // certain things package m32 import ( "math" "github.com/go-gl/mathgl/mgl32" ) type ( vec2 = mgl32.Vec2 vec3 = mgl32.Vec3 vec4 = mgl32.Vec4 mat3 = mgl32.Mat3 mat4 = mgl32.Mat4 quat = mgl32.Quat ) /*var ( up = vec3{0, 1, 0} forward = vec3{0, 0, 1} )*/ // Up returns a up vector func Up() vec3 { return vec3{0, 1, 0} } // Down returns a down vector func Down() vec3 { return vec3{0, -1, 0} } // Forward returns a vector facing forward func Forward() vec3 { return vec3{0, 0, 1} } // Backward returns a vector facing backward func Backward() vec3 { return vec3{0, 0, -1} } // Left returns a vector pointing left func Left() vec3 { return vec3{-1, 0, 0} } // Right returns a vector pointing left func Right() vec3 { return vec3{1, 0, 0} } // Whatever I cast I will put the func here // Cos casts values to float64 and uses native math.Cos // to return the cosine of the radian argument x. func Cos(x float32) float32 { return float32(math.Cos(float64(x))) } // Sin casts values to float64 and uses native math.Sin // to return the sine of the radian argument x. func Sin(x float32) float32 { return float32(math.Sin(float64(x))) } // Sincos returns Sin(x), Cos(x). func Sincos(x float32) (float32, float32) { s, c := math.Sincos(float64(x)) return float32(s), float32(c) } // Hypot returns Sqrt(p*p + q*q), taking care to avoid unnecessary overflow and underflow. func Hypot(x, y float32) float32 { return float32(math.Hypot(float64(x), float64(y))) } // Atan2 returns the arc tangent of y/x, using the signs of the two to determine //the quadrant of the return value. func Atan2(y, x float32) float32 { return float32(math.Atan2(float64(y), float64(x))) } // Sqrt returns the square root of x. func Sqrt(x float32) float32 { return float32(math.Sqrt(float64(x))) } // Abs returns the absolute value of x. func Abs(x float32) float32 { return float32(math.Abs(float64(x))) } // Copysign returns a value with the magnitude of x and the sign of y. func Copysign(x, y float32) float32 { return float32(math.Copysign(float64(x), float64(y))) } // Max returns the greatest value between a or b func Max(a, b float32) float32 { if a > b { return a } return b } // Min returns the lowest value between a or b func Min(a, b float32) float32 { if a < b { return a } return b } // Ceil returns the .. ceil func Ceil(x float32) float32 { return float32(math.Ceil(float64(x))) } // Limit maintains v between min and max func Limit(v, mn, mx float32) float32 { if v < mn { return mn } else if v > mx { return mx } return v } // Cbrt returns the cube root of x. func Cbrt(x float32) float32 { return float32(math.Cbrt(float64(x))) }
m32/m32.go
0.857037
0.672273
m32.go
starcoder
package keras //Network defines a simple neural network architecture. type Network struct { InputNodes, HiddenNodes, OutputNodes int WeightsIh, WeightsHo, BiasO, BiasH Matrix LearningRate float64 } //Package network implements the // InitNetwork initializes the network with the number of nodes and the learning rate. func InitNetwork(InputNodes, HiddenNodes, OutputNodes int, lr float64) Network { WeightsIh := RandomMatrix(HiddenNodes, InputNodes) WeightsHo := RandomMatrix(OutputNodes, HiddenNodes) BiasO := RandomMatrix(OutputNodes, 1) BiasH := RandomMatrix(HiddenNodes, 1) return Network{InputNodes: InputNodes, HiddenNodes: HiddenNodes, OutputNodes: OutputNodes, WeightsIh: WeightsIh, WeightsHo: WeightsHo, BiasH: BiasH, BiasO: BiasO, LearningRate: lr, } } // Train performs the training. func (n *Network) Train(inputArray, targetArray []float64) { inputs := FromArray(inputArray) hidden := n.WeightsIh.Multiply(inputs) hidden.Add(n.BiasH) hidden.MapFunc(Sigmoid) output := n.WeightsHo.Multiply(hidden) output.Add(n.BiasO) output.MapFunc(Sigmoid) //Turn targets into targets := FromArray(targetArray) //Calculate error->still a matrix of values. absErrors := targets.Subtract(output) //Calculate gradient gradients := output.MapFunc(SigmoidPrime) gradients.Multiply(absErrors) gradients.ScalarAdition(n.LearningRate) //Derivatives devHidden := Transpose(hidden) WeightsHoDerivative := gradients.Multiply(devHidden) // Adjust the weights by deltas n.WeightsHo.Add(WeightsHoDerivative) n.BiasO.Add(gradients) // Calculate the hidden layer errors hiddenlayerError := Transpose(n.WeightsHo) hiddenErrors := hiddenlayerError.Multiply(absErrors) hiddenG := hidden.MapFunc(Sigmoid) hiddenG.Multiply(hiddenErrors) hiddenG.ScalarMultiplication(n.LearningRate) inputsTranspose := Transpose(inputs) weightIHDeltas := hiddenG.Multiply(inputsTranspose) n.WeightsIh.Add(weightIHDeltas) n.BiasH.Add(hiddenG) PrintByRows(output) PrintByRows(targets) } //Predict returns the model's prediction based on inputArray func (n *Network) Predict(inputArray []float64) []float64 { inputs := FromArray(inputArray) hidden := n.WeightsIh.Multiply(inputs) hidden.Add(n.BiasH) hidden.MapFunc(Sigmoid) output := n.WeightsHo.Multiply(hidden) output.Add(n.BiasO) output.MapFunc(Sigmoid) return ToArray(output) }
keras/neuralnetwork.go
0.883406
0.766665
neuralnetwork.go
starcoder
package named // ParamSet represents a set of parameters for a single query type ParamSet struct { // does this engine support named parameters? hasNamedSupport bool // the set of currently tracked named parameters namedParams map[string]Param // the locations of each of the named parameters namedLocs map[string][]int // a map of positions currently used positionToName map[int]string // argn keeps track of the last checked positional parameter used argn int } func (p *ParamSet) nextArgNum() int { for { if _, ok := p.positionToName[p.argn]; !ok { return p.argn } p.argn++ } } // Add adds a parameter to this set and returns the numbered location used for it func (p *ParamSet) Add(param Param) int { name := param.name existing, ok := p.namedParams[name] p.namedParams[name] = mergeParam(existing, param) if ok && p.hasNamedSupport { return p.namedLocs[name][0] } argn := p.nextArgNum() p.positionToName[argn] = name p.namedLocs[name] = append(p.namedLocs[name], argn) return argn } // FetchMerge fetches an indexed parameter, and merges `mergeP` into it // Returns: the merged parameter and whether it was a named parameter func (p *ParamSet) FetchMerge(idx int, mergeP Param) (param Param, isNamed bool) { name, exists := p.positionToName[idx] if !exists || name == "" { return mergeP, false } param, ok := p.namedParams[name] if !ok { return mergeP, false } return mergeParam(param, mergeP), true } // NewParamSet creates a set of parameters with the given list of already used positions func NewParamSet(positionsUsed map[int]bool, hasNamedSupport bool) *ParamSet { positionToName := make(map[int]string, len(positionsUsed)) for index, used := range positionsUsed { if !used { continue } // assume the previously used params have no name positionToName[index] = "" } return &ParamSet{ argn: 1, namedParams: make(map[string]Param), namedLocs: make(map[string][]int), hasNamedSupport: hasNamedSupport, positionToName: positionToName, } }
internal/sql/named/param_set.go
0.82994
0.40392
param_set.go
starcoder
package parsing import ( "errors" "fmt" "github.com/arnodel/golua/luastrings" "github.com/arnodel/golua/ops" "github.com/arnodel/golua/token" "github.com/arnodel/golua/ast" ) // Parser can parse lua statements or expressions type Parser struct { scanner Scanner } type Scanner interface { Scan() *token.Token ErrorMsg() string } type Error struct { Got *token.Token Expected string } func (e Error) Error() string { expected := e.Expected if e.Got.Type == token.INVALID { expected = "invalid token: " + expected } else if e.Got.Type == token.UNFINISHED { expected = "unexpected <eof>" } else if expected == "" { expected = "unexpected symbol" } else { expected = "expected " + expected } var tok string if e.Got.Type == token.EOF { tok = "<eof>" } else { tok = luastrings.Quote(string(e.Got.Lit), '\'') } return fmt.Sprintf("%d:%d: %s near %s", e.Got.Line, e.Got.Column, expected, tok) } // ParseExp takes in a function that returns tokens and builds an ExpNode for it // (or returns an error). func ParseExp(scanner Scanner) (exp ast.ExpNode, err error) { defer func() { if r := recover(); r != nil { exp = nil var ok bool err, ok = r.(error) if !ok { err = errors.New("Unknown error") } } }() parser := &Parser{scanner} var t *token.Token exp, t = parser.Exp(parser.Scan()) expectType(t, token.EOF, "<eof>") return } // ParseChunk takes in a function that returns tokens and builds a BlockStat for it // (or returns an error). func ParseChunk(scanner Scanner) (stat ast.BlockStat, err error) { defer func() { if r := recover(); r != nil { stat = ast.BlockStat{} var ok bool err, ok = r.(error) if !ok { err = errors.New("Unknown error") } } }() parser := &Parser{scanner} var t *token.Token stat, t = parser.Block(parser.Scan()) expectType(t, token.EOF, "<eof>") return } // Scan returns the next token. func (p *Parser) Scan() *token.Token { tok := p.scanner.Scan() if tok.Type == token.INVALID { panic(Error{Got: tok, Expected: p.scanner.ErrorMsg()}) } return tok } // Stat parses any statement. func (p *Parser) Stat(t *token.Token) (ast.Stat, *token.Token) { switch t.Type { case token.SgSemicolon: return ast.NewEmptyStat(t), p.Scan() case token.KwBreak: return ast.NewBreakStat(t), p.Scan() case token.KwGoto: dest := p.Scan() expectIdent(dest) return ast.NewGotoStat(t, ast.NewName(dest)), p.Scan() case token.KwDo: stat, closer := p.Block(p.Scan()) expectType(closer, token.KwEnd, "'end'") return stat, p.Scan() case token.KwWhile: cond, doTok := p.Exp(p.Scan()) expectType(doTok, token.KwDo, "'do'") body, endTok := p.Block(p.Scan()) expectType(endTok, token.KwEnd, "'end'") return ast.NewWhileStat(t, endTok, cond, body), p.Scan() case token.KwRepeat: body, untilTok := p.Block(p.Scan()) expectType(untilTok, token.KwUntil, "'until'") cond, next := p.Exp(p.Scan()) return ast.NewRepeatStat(t, body, cond), next case token.KwIf: return p.If(t) case token.KwFor: return p.For(t) case token.KwFunction: return p.FunctionStat(t) case token.KwLocal: return p.Local(t) case token.SgDoubleColon: name, t := p.Name(p.Scan()) expectType(t, token.SgDoubleColon, "'::'") return ast.NewLabelStat(name), p.Scan() default: var exp ast.ExpNode exp, t = p.PrefixExp(t) switch e := exp.(type) { case ast.Stat: // This is a function call return e, t case ast.Var: // This should be the start of 'varlist = explist' vars := []ast.Var{e} var pexp ast.ExpNode for t.Type == token.SgComma { pexp, t = p.PrefixExp(p.Scan()) if v, ok := pexp.(ast.Var); ok { vars = append(vars, v) } else { tokenError(t, "expected variable") } } expectType(t, token.SgAssign, "'='") exps, t := p.ExpList(p.Scan()) return ast.NewAssignStat(vars, exps), t default: tokenError(t, "") } } return nil, nil } // If parses an if / then / else statement. It assumes that t is the "if" // token. func (p *Parser) If(t *token.Token) (ast.IfStat, *token.Token) { cond, thenTok := p.Exp(p.Scan()) expectType(thenTok, token.KwThen, "'then'") thenBlock, endTok := p.Block(p.Scan()) ifStat := ast.NewIfStat(t, cond, thenBlock) for { switch endTok.Type { case token.KwElseIf: cond, thenTok = p.Exp(p.Scan()) expectType(thenTok, token.KwThen, "'then'") thenBlock, endTok = p.Block(p.Scan()) ifStat = ifStat.AddElseIf(cond, thenBlock) case token.KwEnd: return ifStat, p.Scan() case token.KwElse: elseBlock, elseTok := p.Block(p.Scan()) expectType(elseTok, token.KwEnd, "'end'") ifStat = ifStat.WithElse(endTok, elseBlock) return ifStat, p.Scan() default: tokenError(t, "'elseif' or 'end' or 'else'") } } } // For parses a for in / for = statement. It assumes that t is the "for" token. func (p *Parser) For(t *token.Token) (ast.Stat, *token.Token) { name, nextTok := p.Name(p.Scan()) if nextTok.Type == token.SgAssign { // Parse for Name = ... params := make([]ast.ExpNode, 3) params[0], nextTok = p.Exp(p.Scan()) expectType(nextTok, token.SgComma, "','") params[1], nextTok = p.Exp(p.Scan()) if nextTok.Type == token.SgComma { params[2], nextTok = p.Exp(p.Scan()) } else { params[2] = ast.NewInt(1) } expectType(nextTok, token.KwDo, "'do'") body, endTok := p.Block(p.Scan()) expectType(endTok, token.KwEnd, "'end'") forStat := ast.NewForStat(t, endTok, name, params, body) return forStat, p.Scan() } // Parse for namelist in explist ... names := []ast.Name{name} for nextTok.Type == token.SgComma { name, nextTok = p.Name(p.Scan()) names = append(names, name) } expected := "'in'" if len(names) == 1 { expected = "'=' or 'in'" } expectType(nextTok, token.KwIn, expected) exp, nextTok := p.Exp(p.Scan()) params := []ast.ExpNode{exp} for nextTok.Type == token.SgComma { exp, nextTok = p.Exp(p.Scan()) params = append(params, exp) } expectType(nextTok, token.KwDo, "'do'") body, endTok := p.Block(p.Scan()) expectType(endTok, token.KwEnd, "'end'") forInStat := ast.NewForInStat(t, endTok, names, params, body) return forInStat, p.Scan() } // Local parses a "local" statement (function definition of variable // declaration). It assumes that t is the "local" token. func (p *Parser) Local(*token.Token) (ast.Stat, *token.Token) { t := p.Scan() if t.Type == token.KwFunction { name, t := p.Name(p.Scan()) fx, t := p.FunctionDef(t) return ast.NewLocalFunctionStat(name, fx), t } // local namelist ['=' explist] nameAttrib, t := p.NameAttrib(t) nameAttribs := []ast.NameAttrib{nameAttrib} for t.Type == token.SgComma { nameAttrib, t = p.NameAttrib(p.Scan()) nameAttribs = append(nameAttribs, nameAttrib) } var values []ast.ExpNode if t.Type == token.SgAssign { values, t = p.ExpList(p.Scan()) } return ast.NewLocalStat(nameAttribs, values), t } // FunctionStat parses a function definition statement. It assumes that t is the // "function" token. func (p *Parser) FunctionStat(*token.Token) (ast.Stat, *token.Token) { name, t := p.Name(p.Scan()) var v ast.Var = name var method ast.Name for t.Type == token.SgDot { name, t = p.Name(p.Scan()) v = ast.NewIndexExp(v, name.AstString()) } if t.Type == token.SgColon { method, t = p.Name(p.Scan()) } fx, t := p.FunctionDef(t) return ast.NewFunctionStat(v, method, fx), t } // Block parses a block whose starting token (e.g. "do") has already been // consumed. Returns the token that closes the block (e.g. "end"). So the caller // should check that this is the right kind of closing token. func (p *Parser) Block(t *token.Token) (ast.BlockStat, *token.Token) { var stats []ast.Stat var next ast.Stat for { switch t.Type { case token.KwReturn: ret, t := p.Return(t) return ast.NewBlockStat(stats, ret), t case token.KwEnd, token.KwElse, token.KwElseIf, token.KwUntil, token.EOF: return ast.NewBlockStat(stats, nil), t default: next, t = p.Stat(t) stats = append(stats, next) } } } // Return parses a return statement. func (p *Parser) Return(*token.Token) ([]ast.ExpNode, *token.Token) { t := p.Scan() switch t.Type { case token.SgSemicolon: return []ast.ExpNode{}, p.Scan() case token.KwEnd, token.KwElse, token.KwElseIf, token.KwUntil, token.EOF: return []ast.ExpNode{}, t default: exps, t := p.ExpList(t) if t.Type == token.SgSemicolon { t = p.Scan() } return exps, t } } type item struct { exp ast.ExpNode op ops.Op tok *token.Token } func mergepop(stack []item, it item) ([]item, item) { i := len(stack) - 1 top := stack[i] top.exp = ast.NewBinOp(top.exp, it.op, it.tok, it.exp) return stack[:i], top } // Exp parses any expression. func (p *Parser) Exp(t *token.Token) (ast.ExpNode, *token.Token) { var exp ast.ExpNode exp, t = p.ShortExp(t) var op ops.Op var opTok *token.Token var stack []item last := item{exp: exp} for t.Type.IsBinOp() { op = binopMap[t.Type] opTok = t exp, t = p.ShortExp(p.Scan()) for len(stack) > 0 { pdiff := op.Precedence() - last.op.Precedence() if pdiff > 0 || (pdiff == 0 && op == ops.OpConcat) { break } stack, last = mergepop(stack, last) } stack = append(stack, last) last = item{exp: exp, op: op, tok: opTok} } // We are left with a stack of strictly increasing precedence for len(stack) > 0 { stack, last = mergepop(stack, last) } return last.exp, t } // ShortExp parses an expression which is either atomic, a unary operation, a // prefix expression or a power operation (right associatively composed). In // other words, any expression that doesn't contain a binary operator. func (p *Parser) ShortExp(t *token.Token) (ast.ExpNode, *token.Token) { var exp ast.ExpNode switch t.Type { case token.KwNil: exp, t = ast.NewNil(t), p.Scan() case token.KwTrue: exp, t = ast.True(t), p.Scan() case token.KwFalse: exp, t = ast.False(t), p.Scan() case token.NUMDEC, token.NUMHEX: n, err := ast.NewNumber(t) if err != nil { panic(err) } exp, t = n, p.Scan() case token.STRING: s, err := ast.NewString(t) if err != nil { panic(err) } exp, t = s, p.Scan() case token.LONGSTRING: exp, t = ast.NewLongString(t), p.Scan() case token.SgOpenBrace: exp, t = p.TableConstructor(t) case token.SgEtc: exp, t = ast.NewEtc(t), p.Scan() case token.KwFunction: exp, t = p.FunctionDef(p.Scan()) case token.SgMinus, token.KwNot, token.SgHash, token.SgTilde: // A unary operator! opTok := t exp, t = p.ShortExp(p.Scan()) exp = ast.NewUnOp(opTok, unopMap[opTok.Type], exp) default: exp, t = p.PrefixExp(t) } if t.Type == token.SgHat { var pow ast.ExpNode pow, t = p.ShortExp(p.Scan()) exp = ast.NewBinOp(exp, ops.OpPow, t, pow) } return exp, t } var unopMap = map[token.Type]ops.Op{ token.SgMinus: ops.OpNeg, token.KwNot: ops.OpNot, token.SgHash: ops.OpLen, token.SgTilde: ops.OpBitNot, } var binopMap = map[token.Type]ops.Op{ token.KwOr: ops.OpOr, token.KwAnd: ops.OpAnd, token.SgLess: ops.OpLt, token.SgLessEqual: ops.OpLeq, token.SgGreater: ops.OpGt, token.SgGreaterEqual: ops.OpGeq, token.SgEqual: ops.OpEq, token.SgNotEqual: ops.OpNeq, token.SgPipe: ops.OpBitOr, token.SgTilde: ops.OpBitXor, token.SgAmpersand: ops.OpBitAnd, token.SgShiftLeft: ops.OpShiftL, token.SgShiftRight: ops.OpShiftR, token.SgConcat: ops.OpConcat, token.SgPlus: ops.OpAdd, token.SgMinus: ops.OpSub, token.SgStar: ops.OpMul, token.SgSlash: ops.OpDiv, token.SgSlashSlash: ops.OpFloorDiv, token.SgPct: ops.OpMod, token.SgHat: ops.OpPow, } // FunctionDef parses a function definition expression. func (p *Parser) FunctionDef(startTok *token.Token) (ast.Function, *token.Token) { expectType(startTok, token.SgOpenBkt, "'('") t := p.Scan() var names []ast.Name hasEtc := false ParamsLoop: for { switch t.Type { case token.IDENT: names = append(names, ast.NewName(t)) t = p.Scan() if t.Type != token.SgComma { break ParamsLoop } t = p.Scan() case token.SgEtc: hasEtc = true t = p.Scan() break ParamsLoop case token.SgCloseBkt: break ParamsLoop default: tokenError(t, "") } } expectType(t, token.SgCloseBkt, "')'") body, endTok := p.Block(p.Scan()) expectType(endTok, token.KwEnd, "'end'") def := ast.NewFunction(startTok, endTok, ast.NewParList(names, hasEtc), body) return def, p.Scan() } // PrefixExp parses an expression made of a name or and expression in brackets // followed by zero or more indexing operations or function applications. func (p *Parser) PrefixExp(t *token.Token) (ast.ExpNode, *token.Token) { var exp ast.ExpNode switch t.Type { case token.SgOpenBkt: exp, t = p.Exp(p.Scan()) if f, ok := exp.(ast.FunctionCall); ok { exp = f.InBrackets() } expectType(t, token.SgCloseBkt, "')'") case token.IDENT: exp = ast.NewName(t) default: tokenError(t, "") } t = p.Scan() for { switch t.Type { case token.SgOpenSquareBkt: var idxExp ast.ExpNode idxExp, t = p.Exp(p.Scan()) expectType(t, token.SgCloseSquareBkt, "']'") t = p.Scan() exp = ast.NewIndexExp(exp, idxExp) case token.SgDot: var name ast.Name name, t = p.Name(p.Scan()) exp = ast.NewIndexExp(exp, name.AstString()) case token.SgColon: var name ast.Name var args []ast.ExpNode name, t = p.Name(p.Scan()) args, t = p.Args(t) if args == nil { tokenError(t, "expected function arguments") } exp = ast.NewFunctionCall(exp, name, args) default: var args []ast.ExpNode args, t = p.Args(t) if args == nil { return exp, t } exp = ast.NewFunctionCall(exp, ast.Name{}, args) } } } // Args parses the arguments of a function call. It returns nil rather than // panicking if it couldn't parse arguments. func (p *Parser) Args(t *token.Token) ([]ast.ExpNode, *token.Token) { switch t.Type { case token.SgOpenBkt: t = p.Scan() if t.Type == token.SgCloseBkt { return []ast.ExpNode{}, p.Scan() } args, t := p.ExpList(t) expectType(t, token.SgCloseBkt, "')'") return args, p.Scan() case token.SgOpenBrace: arg, t := p.TableConstructor(t) return []ast.ExpNode{arg}, t case token.STRING: arg, err := ast.NewString(t) if err != nil { panic(err) } return []ast.ExpNode{arg}, p.Scan() case token.LONGSTRING: return []ast.ExpNode{ast.NewLongString(t)}, p.Scan() } return nil, t } // ExpList parses a comma separated list of expressions. func (p *Parser) ExpList(t *token.Token) ([]ast.ExpNode, *token.Token) { var exp ast.ExpNode exp, t = p.Exp(t) exps := []ast.ExpNode{exp} for t.Type == token.SgComma { exp, t = p.Exp(p.Scan()) exps = append(exps, exp) } return exps, t } // TableConstructor parses a table constructor. func (p *Parser) TableConstructor(opTok *token.Token) (ast.TableConstructor, *token.Token) { t := p.Scan() var fields []ast.TableField if t.Type != token.SgCloseBrace { var field ast.TableField field, t = p.Field(t) fields = []ast.TableField{field} for t.Type == token.SgComma || t.Type == token.SgSemicolon { t = p.Scan() if t.Type == token.SgCloseBrace { break } field, t = p.Field(t) fields = append(fields, field) } } expectType(t, token.SgCloseBrace, "'}'") return ast.NewTableConstructor(opTok, t, fields), p.Scan() } // Field parses a table constructor field. func (p *Parser) Field(t *token.Token) (ast.TableField, *token.Token) { var key ast.ExpNode = ast.NoTableKey{} var val ast.ExpNode if t.Type == token.SgOpenSquareBkt { key, t = p.Exp(p.Scan()) expectType(t, token.SgCloseSquareBkt, "']'") expectType(p.Scan(), token.SgAssign, "'='") val, t = p.Exp(p.Scan()) } else { val, t = p.Exp(t) if t.Type == token.SgAssign { if name, ok := val.(ast.Name); !ok { tokenError(t, "") } else { key = name.AstString() val, t = p.Exp(p.Scan()) } } } return ast.NewTableField(key, val), t } // Name parses a name. func (p *Parser) Name(t *token.Token) (ast.Name, *token.Token) { expectIdent(t) return ast.NewName(t), p.Scan() } func (p *Parser) NameAttrib(t *token.Token) (ast.NameAttrib, *token.Token) { name, t := p.Name(t) attrib := ast.NoAttrib var attribName *ast.Name if t.Type == token.SgLess { attribTok := p.Scan() attribName = new(ast.Name) *attribName, t = p.Name(attribTok) switch attribName.Val { case "const": attrib = ast.ConstAttrib case "close": attrib = ast.CloseAttrib default: tokenError(attribTok, "'const' or 'close'") } expectType(t, token.SgGreater, "'>'") t = p.Scan() } return ast.NewNameAttrib(name, attribName, attrib), t } func expectIdent(t *token.Token) { expectType(t, token.IDENT, "name") } func expectType(t *token.Token, tp token.Type, expected string) { if t.Type != tp { panic(Error{Got: t, Expected: expected}) } } func tokenError(t *token.Token, expected string) { panic(Error{Got: t, Expected: expected}) }
parsing/parser.go
0.599954
0.490358
parser.go
starcoder
package vorbis import "errors" // A Decoder stores the information necessary to decode a vorbis steam. type Decoder struct { headerRead bool setupRead bool sampleRate int channels int Bitrate Bitrate blocksize [2]int CommentHeader codebooks []codebook floors []floor residues []residue mappings []mapping modes []mode overlap []float32 hasOverlap bool overlapShort bool windows [2][]float32 lookup [2]imdctLookup residueBuffer [][]float32 rawBuffer [][]float32 } // The Bitrate of a vorbis stream. // Some or all of the fields can be zero. type Bitrate struct { Nominal int Minimum int Maximum int } // The CommentHeader of a vorbis stream. type CommentHeader struct { Vendor string Comments []string } // SampleRate returns the sample rate of the vorbis stream. // This will be zero if the headers have not been read yet. func (d *Decoder) SampleRate() int { return d.sampleRate } // Channels returns the number of channels of the vorbis stream. // This will be zero if the headers have not been read yet. func (d *Decoder) Channels() int { return d.channels } // BufferSize returns the highest amount of data that can be decoded from a single packet. // The result is already multiplied with the number of channels. // This will be zero if the headers have not been read yet. func (d *Decoder) BufferSize() int { return d.blocksize[1] / 2 * d.channels } // IsHeader returns wether the packet is a vorbis header. func IsHeader(packet []byte) bool { return len(packet) > 6 && packet[0]&1 == 1 && packet[1] == 'v' && packet[2] == 'o' && packet[3] == 'r' && packet[4] == 'b' && packet[5] == 'i' && packet[6] == 's' } // ReadHeader reads a vorbis header. // Three headers (identification, comment, and setup) must be read before any samples can be decoded. func (d *Decoder) ReadHeader(header []byte) error { if !IsHeader(header) { return errors.New("vorbis: invalid header") } headerType := header[0] header = header[7:] switch headerType { case headerTypeIdentification: err := d.readIdentificationHeader(header) if err != nil { return err } d.headerRead = true case headerTypeComment: return d.readCommentHeader(header) case headerTypeSetup: err := d.readSetupHeader(header) if err != nil { return err } d.overlap = make([]float32, d.blocksize[1]*d.channels) d.setupRead = true default: return errors.New("vorbis: unknown header type") } return nil } // HeadersRead returns wether the headers necessary for decoding have been read. func (d *Decoder) HeadersRead() bool { return d.headerRead && d.setupRead } // Decode decodes a packet and returns the result as an interleaved float slice. // The number of samples decoded varies and can be zero, but will be at most BufferSize() func (d *Decoder) Decode(in []byte) ([]float32, error) { if !d.HeadersRead() { return nil, errors.New("vorbis: missing headers") } return d.decodePacket(newBitReader(in), nil) } // DecodeInto decodes a packet and stores the result in the given buffer. // The size of the buffer must be at least BufferSize(). // The method will always return a slice of the buffer or nil. func (d *Decoder) DecodeInto(in []byte, buffer []float32) ([]float32, error) { if !d.HeadersRead() { return nil, errors.New("vorbis: missing headers") } if len(buffer) < d.BufferSize() { return nil, errors.New("vorbis: buffer too short") } return d.decodePacket(newBitReader(in), buffer) } // Clear must be called between decoding two non-consecutive packets. func (d *Decoder) Clear() { d.hasOverlap = false }
vendor/github.com/jfreymuth/vorbis/vorbis.go
0.820073
0.478041
vorbis.go
starcoder
package brotli import "math" /* Copyright 2013 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ func initialEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { var seed uint32 = 7 var block_length uint = length / num_histograms var i uint clearHistogramsCommand(histograms, num_histograms) for i = 0; i < num_histograms; i++ { var pos uint = length * i / num_histograms if i != 0 { pos += uint(myRand(&seed) % uint32(block_length)) } if pos+stride >= length { pos = length - stride - 1 } histogramAddVectorCommand(&histograms[i], data[pos:], stride) } } func randomSampleCommand(seed *uint32, data []uint16, length uint, stride uint, sample *histogramCommand) { var pos uint = 0 if stride >= length { stride = length } else { pos = uint(myRand(seed) % uint32(length-stride+1)) } histogramAddVectorCommand(sample, data[pos:], stride) } func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining var seed uint32 = 7 var iter uint iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms for iter = 0; iter < iters; iter++ { var sample histogramCommand histogramClearCommand(&sample) randomSampleCommand(&seed, data, length, stride, &sample) histogramAddHistogramCommand(&histograms[iter%num_histograms], &sample) } } /* Assigns a block id from the range [0, num_histograms) to each data element in data[0..length) and fills in block_id[0..length) with the assigned values. Returns the number of blocks, i.e. one plus the number of block switches. */ func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { var data_size uint = histogramDataSizeCommand() var bitmaplen uint = (num_histograms + 7) >> 3 var num_blocks uint = 1 var i uint var j uint assert(num_histograms <= 256) if num_histograms <= 1 { for i = 0; i < length; i++ { block_id[i] = 0 } return 1 } for i := 0; i < int(data_size*num_histograms); i++ { insert_cost[i] = 0 } for i = 0; i < num_histograms; i++ { insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) } for i = data_size; i != 0; { i-- for j = 0; j < num_histograms; j++ { insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) } } for i := 0; i < int(num_histograms); i++ { cost[i] = 0 } for i := 0; i < int(length*bitmaplen); i++ { switch_signal[i] = 0 } /* After each iteration of this loop, cost[k] will contain the difference between the minimum cost of arriving at the current byte position using entropy code k, and the minimum cost of arriving at the current byte position. This difference is capped at the block switch cost, and if it reaches block switch cost, it means that when we trace back from the last position, we need to switch here. */ for i = 0; i < length; i++ { var byte_ix uint = i var ix uint = byte_ix * bitmaplen var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms var min_cost float64 = 1e99 var block_switch_cost float64 = block_switch_bitcost var k uint for k = 0; k < num_histograms; k++ { /* We are coding the symbol in data[byte_ix] with entropy code k. */ cost[k] += insert_cost[insert_cost_ix+k] if cost[k] < min_cost { min_cost = cost[k] block_id[byte_ix] = byte(k) } } /* More blocks for the beginning. */ if byte_ix < 2000 { block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 } for k = 0; k < num_histograms; k++ { cost[k] -= min_cost if cost[k] >= block_switch_cost { var mask byte = byte(1 << (k & 7)) cost[k] = block_switch_cost assert(k>>3 < bitmaplen) switch_signal[ix+(k>>3)] |= mask /* Trace back from the last position and switch at the marked places. */ } } } { var byte_ix uint = length - 1 var ix uint = byte_ix * bitmaplen var cur_id byte = block_id[byte_ix] for byte_ix > 0 { var mask byte = byte(1 << (cur_id & 7)) assert(uint(cur_id)>>3 < bitmaplen) byte_ix-- ix -= bitmaplen if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { if cur_id != block_id[byte_ix] { cur_id = block_id[byte_ix] num_blocks++ } } block_id[byte_ix] = cur_id } } return num_blocks } var remapBlockIdsCommand_kInvalidId uint16 = 256 func remapBlockIdsCommand(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { var next_id uint16 = 0 var i uint for i = 0; i < num_histograms; i++ { new_id[i] = remapBlockIdsCommand_kInvalidId } for i = 0; i < length; i++ { assert(uint(block_ids[i]) < num_histograms) if new_id[block_ids[i]] == remapBlockIdsCommand_kInvalidId { new_id[block_ids[i]] = next_id next_id++ } } for i = 0; i < length; i++ { block_ids[i] = byte(new_id[block_ids[i]]) assert(uint(block_ids[i]) < num_histograms) } assert(uint(next_id) <= num_histograms) return uint(next_id) } func buildBlockHistogramsCommand(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramCommand) { var i uint clearHistogramsCommand(histograms, num_histograms) for i = 0; i < length; i++ { histogramAddCommand(&histograms[block_ids[i]], uint(data[i])) } } var clusterBlocksCommand_kInvalidIndex uint32 = math.MaxUint32 func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { var histogram_symbols []uint32 = make([]uint32, num_blocks) var block_lengths []uint32 = make([]uint32, num_blocks) var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch var all_histograms_size uint = 0 var all_histograms_capacity uint = expected_num_clusters var all_histograms []histogramCommand = make([]histogramCommand, all_histograms_capacity) var cluster_size_size uint = 0 var cluster_size_capacity uint = expected_num_clusters var cluster_size []uint32 = make([]uint32, cluster_size_capacity) var num_clusters uint = 0 var histograms []histogramCommand = make([]histogramCommand, brotli_min_size_t(num_blocks, histogramsPerBatch)) var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 var pairs_capacity uint = max_num_pairs + 1 var pairs []histogramPair = make([]histogramPair, pairs_capacity) var pos uint = 0 var clusters []uint32 var num_final_clusters uint var new_index []uint32 var i uint var sizes = [histogramsPerBatch]uint32{0} var new_clusters = [histogramsPerBatch]uint32{0} var symbols = [histogramsPerBatch]uint32{0} var remap = [histogramsPerBatch]uint32{0} for i := 0; i < int(num_blocks); i++ { block_lengths[i] = 0 } { var block_idx uint = 0 for i = 0; i < length; i++ { assert(block_idx < num_blocks) block_lengths[block_idx]++ if i+1 == length || block_ids[i] != block_ids[i+1] { block_idx++ } } assert(block_idx == num_blocks) } for i = 0; i < num_blocks; i += histogramsPerBatch { var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) var num_new_clusters uint var j uint for j = 0; j < num_to_combine; j++ { var k uint histogramClearCommand(&histograms[j]) for k = 0; uint32(k) < block_lengths[i+j]; k++ { histogramAddCommand(&histograms[j], uint(data[pos])) pos++ } histograms[j].bit_cost_ = populationCostCommand(&histograms[j]) new_clusters[j] = uint32(j) symbols[j] = uint32(j) sizes[j] = 1 } num_new_clusters = histogramCombineCommand(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) if all_histograms_capacity < (all_histograms_size + num_new_clusters) { var _new_size uint if all_histograms_capacity == 0 { _new_size = all_histograms_size + num_new_clusters } else { _new_size = all_histograms_capacity } var new_array []histogramCommand for _new_size < (all_histograms_size + num_new_clusters) { _new_size *= 2 } new_array = make([]histogramCommand, _new_size) if all_histograms_capacity != 0 { copy(new_array, all_histograms[:all_histograms_capacity]) } all_histograms = new_array all_histograms_capacity = _new_size } brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) for j = 0; j < num_new_clusters; j++ { all_histograms[all_histograms_size] = histograms[new_clusters[j]] all_histograms_size++ cluster_size[cluster_size_size] = sizes[new_clusters[j]] cluster_size_size++ remap[new_clusters[j]] = uint32(j) } for j = 0; j < num_to_combine; j++ { histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] } num_clusters += num_new_clusters assert(num_clusters == cluster_size_size) assert(num_clusters == all_histograms_size) } histograms = nil max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) if pairs_capacity < max_num_pairs+1 { pairs = nil pairs = make([]histogramPair, (max_num_pairs + 1)) } clusters = make([]uint32, num_clusters) for i = 0; i < num_clusters; i++ { clusters[i] = uint32(i) } num_final_clusters = histogramCombineCommand(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) pairs = nil cluster_size = nil new_index = make([]uint32, num_clusters) for i = 0; i < num_clusters; i++ { new_index[i] = clusterBlocksCommand_kInvalidIndex } pos = 0 { var next_index uint32 = 0 for i = 0; i < num_blocks; i++ { var histo histogramCommand var j uint var best_out uint32 var best_bits float64 histogramClearCommand(&histo) for j = 0; uint32(j) < block_lengths[i]; j++ { histogramAddCommand(&histo, uint(data[pos])) pos++ } if i == 0 { best_out = histogram_symbols[0] } else { best_out = histogram_symbols[i-1] } best_bits = histogramBitCostDistanceCommand(&histo, &all_histograms[best_out]) for j = 0; j < num_final_clusters; j++ { var cur_bits float64 = histogramBitCostDistanceCommand(&histo, &all_histograms[clusters[j]]) if cur_bits < best_bits { best_bits = cur_bits best_out = clusters[j] } } histogram_symbols[i] = best_out if new_index[best_out] == clusterBlocksCommand_kInvalidIndex { new_index[best_out] = next_index next_index++ } } } clusters = nil all_histograms = nil brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) { var cur_length uint32 = 0 var block_idx uint = 0 var max_type byte = 0 for i = 0; i < num_blocks; i++ { cur_length += block_lengths[i] if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { var id byte = byte(new_index[histogram_symbols[i]]) split.types[block_idx] = id split.lengths[block_idx] = cur_length max_type = brotli_max_uint8_t(max_type, id) cur_length = 0 block_idx++ } } split.num_blocks = block_idx split.num_types = uint(max_type) + 1 } new_index = nil block_lengths = nil histogram_symbols = nil } func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { length := uint(len(data)) var data_size uint = histogramDataSizeCommand() var num_histograms uint = length/literals_per_histogram + 1 var histograms []histogramCommand if num_histograms > max_histograms { num_histograms = max_histograms } if length == 0 { split.num_types = 1 return } else if length < kMinLengthForBlockSplitting { brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) split.num_types = 1 split.types[split.num_blocks] = 0 split.lengths[split.num_blocks] = uint32(length) split.num_blocks++ return } histograms = make([]histogramCommand, num_histograms) /* Find good entropy codes. */ initialEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) refineEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) { var block_ids []byte = make([]byte, length) var num_blocks uint = 0 var bitmaplen uint = (num_histograms + 7) >> 3 var insert_cost []float64 = make([]float64, (data_size * num_histograms)) var cost []float64 = make([]float64, num_histograms) var switch_signal []byte = make([]byte, (length * bitmaplen)) var new_id []uint16 = make([]uint16, num_histograms) var iters uint if params.quality < hqZopflificationQuality { iters = 3 } else { iters = 10 } /* Find a good path through literals with the good entropy codes. */ var i uint for i = 0; i < iters; i++ { num_blocks = findBlocksCommand(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) num_histograms = remapBlockIdsCommand(block_ids, length, new_id, num_histograms) buildBlockHistogramsCommand(data, length, block_ids, num_histograms, histograms) } insert_cost = nil cost = nil switch_signal = nil new_id = nil histograms = nil clusterBlocksCommand(data, length, num_blocks, block_ids, split) block_ids = nil } }
vendor/github.com/andybalholm/brotli/block_splitter_command.go
0.734024
0.443962
block_splitter_command.go
starcoder
package histogram import ( "bytes" "encoding/csv" "encoding/json" "sort" "strconv" "github.com/goark/cov19data/ecode" "github.com/goark/cov19data/values" "github.com/goark/errs" ) //HistData is class of cases data record for histgram. type HistData struct { Period values.Period Cases float64 Deaths float64 } //New function creates a new HistData instance. func New(period values.Period, cases, deaths float64) *HistData { return &HistData{Period: period, Cases: cases, Deaths: deaths} } //Contains method returns true if scape of this contains date of parameter. func (h *HistData) Contains(dt values.Date) bool { if h == nil { return false } return h.Period.Contains(dt) } //AddCases method adds cases count in HistData func (h *HistData) AddCases(cases float64) *HistData { if h == nil { return nil } h.Cases += cases return h } //AddDeaths method adds deaths count in HistData func (h *HistData) AddDeaths(deaths float64) *HistData { if h == nil { return nil } h.Deaths += deaths return h } //NewList creates list of HistData. func NewList(p values.Period, step int) ([]*HistData, values.Period) { histList := []*HistData{} max := values.Period{} if p.IsZero() { return histList, max } if step < 1 { return histList, max } start := p.Start end := p.End next := end for { to := next next = to.AddDay(-step) from := next.AddDay(1) start = from histList = append(histList, New(values.NewPeriod(from, to), 0, 0)) if values.NewPeriod(from, to).Contains(p.Start) { break } } sort.Slice(histList, func(i, j int) bool { return histList[i].Period.End.Before(histList[j].Period.End) }) return histList, values.NewPeriod(start, end) } //AddData adds data into HistData list. func AddData(histList []*HistData, dt values.Date, cases, deaths json.Number) { for _, h := range histList { if h.Period.Contains(dt) { if n, err := cases.Float64(); err == nil { h.AddCases(n) } if n, err := deaths.Float64(); err == nil { h.AddDeaths(n) } return } } } //ExportHistCSV exports CSV string from list of HistData. func ExportCSV(data []*HistData) ([]byte, error) { if len(data) == 0 { return nil, errs.Wrap(ecode.ErrNoData) } buf := &bytes.Buffer{} cw := csv.NewWriter(buf) cw.Comma = ',' if err := cw.Write([]string{ "Date_from", "Date_to", "Cases", "Deaths", }); err != nil { return nil, errs.Wrap(err) } for _, d := range data { if err := cw.Write([]string{ d.Period.StringStart(), d.Period.StringEnd(), strconv.FormatFloat(d.Cases, 'f', -1, 64), strconv.FormatFloat(d.Deaths, 'f', -1, 64), }); err != nil { return nil, errs.Wrap(err) } } cw.Flush() return buf.Bytes(), nil } //ExportJSON function returns JSON string from list of HistData. func ExportJSON(data []*HistData) ([]byte, error) { if len(data) == 0 { return nil, errs.Wrap(ecode.ErrNoData) } return json.Marshal(data) } /* Copyright 2020-2021 Spiegel * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */
histogram/histogram.go
0.680772
0.474022
histogram.go
starcoder
package metadbtest import ( "testing" "time" "github.com/googleforgames/open-saves/internal/pkg/metadb" "github.com/stretchr/testify/assert" ) func assertTimestampsWithinDuration(t *testing.T, expected, actual *metadb.Timestamps, delta time.Duration, msgAndArgs ...interface{}) { t.Helper() assert.WithinDuration(t, expected.CreatedAt, actual.CreatedAt, delta, msgAndArgs...) assert.WithinDuration(t, expected.UpdatedAt, actual.UpdatedAt, delta, msgAndArgs...) assert.Equal(t, expected.Signature, actual.Signature, msgAndArgs...) } // AssertEqualStore is equivalent to // AssertEqualStoreWithinDuration(t, expected, actual, time.Duration(0), msgAndArgs...) func AssertEqualStore(t *testing.T, expected, actual *metadb.Store, msgAndArgs ...interface{}) { t.Helper() AssertEqualStoreWithinDuration(t, expected, actual, time.Duration(0), msgAndArgs...) } // AssertEqualStoreWithinDuration compares each field in metadb.Store and asserts the timestamps // are within delta. func AssertEqualStoreWithinDuration(t *testing.T, expected, actual *metadb.Store, delta time.Duration, msgAndArgs ...interface{}) { t.Helper() if expected == nil { assert.Nil(t, actual) return } if assert.NotNil(t, actual) { assert.Equal(t, expected.Key, actual.Key, msgAndArgs...) assert.Equal(t, expected.Name, actual.Name, msgAndArgs...) assert.Equal(t, expected.OwnerID, actual.OwnerID, msgAndArgs...) assert.ElementsMatch(t, expected.Tags, actual.Tags, msgAndArgs...) assertTimestampsWithinDuration(t, &expected.Timestamps, &actual.Timestamps, delta, msgAndArgs...) } } // AssertEqualRecord is equivalent to // AssertEqualRecordWithinDuration(t, expected, actual, time.Duration(0), msgAndArgs...) func AssertEqualRecord(t *testing.T, expected, actual *metadb.Record, msgAndArgs ...interface{}) { t.Helper() AssertEqualRecordWithinDuration(t, expected, actual, time.Duration(0), msgAndArgs...) } // AssertEqualRecordWithinDuration compares each field in metadb.Record and asserts the timestamps // are within delta. func AssertEqualRecordWithinDuration(t *testing.T, expected, actual *metadb.Record, delta time.Duration, msgAndArgs ...interface{}) { t.Helper() if expected == nil { assert.Nil(t, actual) return } if assert.NotNil(t, actual) { assert.Equal(t, expected.Key, actual.Key, msgAndArgs...) assert.Equal(t, expected.Blob, actual.Blob, msgAndArgs...) assert.Equal(t, expected.BlobSize, actual.BlobSize, msgAndArgs...) assert.Equal(t, expected.Properties, actual.Properties, msgAndArgs...) assert.ElementsMatch(t, expected.Tags, actual.Tags, msgAndArgs...) assert.Equal(t, expected.OwnerID, actual.OwnerID, msgAndArgs...) assertTimestampsWithinDuration(t, &expected.Timestamps, &actual.Timestamps, delta, msgAndArgs...) } } // AssertEqualBlobRef is equivalent to // AssertEqualBlobRefWithinDuration(t, expected, actual, time.Duration(0), msgAndArgs...) func AssertEqualBlobRef(t *testing.T, expected, actual *metadb.BlobRef, msgAndArgs ...interface{}) { t.Helper() AssertEqualBlobRefWithinDuration(t, expected, actual, time.Duration(0), msgAndArgs...) } // AssertEqualBlobRefWithinDuration compares each field in metadb.BlobRef and asserts the timestamps // are within delta. func AssertEqualBlobRefWithinDuration(t *testing.T, expected, actual *metadb.BlobRef, delta time.Duration, msgAndArgs ...interface{}) { t.Helper() if expected == nil { assert.Nil(t, actual) return } if assert.NotNil(t, actual) { assert.Equal(t, expected.Key, actual.Key, msgAndArgs...) assert.Equal(t, expected.RecordKey, actual.RecordKey, msgAndArgs...) assert.Equal(t, expected.Size, actual.Size, msgAndArgs...) assert.Equal(t, expected.Status, actual.Status, msgAndArgs...) assert.Equal(t, expected.StoreKey, actual.StoreKey, msgAndArgs...) assert.Equal(t, expected.RecordKey, actual.RecordKey, msgAndArgs...) assertTimestampsWithinDuration(t, &expected.Timestamps, &actual.Timestamps, delta, msgAndArgs...) } }
internal/pkg/metadb/metadbtest/metadbtest.go
0.71403
0.837354
metadbtest.go
starcoder
package genetic_algorithm import ( log "github.com/cihub/seelog" "math/rand" "sort" ) type MultiPointCrossover struct { crossPointsCount int chromConstr EmptyChromosomeConstructor canProduceCopiesOfParents bool } func NewMultiPointCrossover(chromConstr EmptyChromosomeConstructor, crossPointsCount int) *MultiPointCrossover { if crossPointsCount <= 0 { panic("crossPointsCount must be positive") } crossover := new(MultiPointCrossover) crossover.chromConstr = chromConstr crossover.crossPointsCount = crossPointsCount return crossover } func NewOnePointCrossover(chromConstr EmptyChromosomeConstructor) *MultiPointCrossover { return NewMultiPointCrossover(chromConstr, 1) } func NewTwoPointCrossover(chromConstr EmptyChromosomeConstructor) *MultiPointCrossover { return NewMultiPointCrossover(chromConstr, 2) } func (crossover *MultiPointCrossover) ParentsCount() int { return 2 } func (crossover *MultiPointCrossover) CanProduceCopiesOfParents(val bool) *MultiPointCrossover { crossover.canProduceCopiesOfParents = val return crossover } func (crossover *MultiPointCrossover) Crossover(parents Chromosomes) Chromosomes { if len(parents) != crossover.ParentsCount() { panic("Incorrect parents count") } p1 := parents[0] p2 := parents[1] genesLen := p1.Genes().Len() if genesLen != p2.Genes().Len() { panic("Crossover do not support different chromosome size") } crossover.checkGenesLen(genesLen) crossPointsList := crossover.chooseCrossPoints(genesLen) sort.Sort(sort.IntSlice(crossPointsList)) log.Tracef("Cross on %v\n", crossPointsList) c1, c2 := crossover.crossover(p1, p2, crossPointsList) return Chromosomes{c1, c2} } func (crossover *MultiPointCrossover) checkGenesLen(genesLen int) { possibleCrossPoints := genesLen + 1 if !crossover.canProduceCopiesOfParents && crossover.crossPointsCount <= 2 { if crossover.crossPointsCount == 1 { possibleCrossPoints -= 2 } else { possibleCrossPoints-- } } if possibleCrossPoints < crossover.crossPointsCount { panic("Chromosome too short") } } func (crossover *MultiPointCrossover) chooseCrossPoints(genesLen int) []int { if crossover.crossPointsCount == 1 { if crossover.canProduceCopiesOfParents { return []int{rand.Intn(genesLen + 1)} } else { return []int{rand.Intn(genesLen-1) + 1} } } else if crossover.crossPointsCount == 2 { p1, p2 := chooseTwoPointCrossSection(genesLen, crossover.canProduceCopiesOfParents) return []int{p1, p2} } return chooseDifferentRandomNumbers(crossover.crossPointsCount, genesLen+1) } func (crossover *MultiPointCrossover) crossover(p1, p2 ChromosomeInterface, crossPoints []int) (c1, c2 ChromosomeInterface) { p1genes := p1.Genes() p2genes := p2.Genes() genesLen := p1.Genes().Len() c1 = crossover.chromConstr(genesLen) c1genes := c1.Genes() c2 = crossover.chromConstr(genesLen) c2genes := c2.Genes() crossPoint := 0 for i := 0; i < len(crossPoints); i++ { crossPoint = crossPoints[i] start := 0 if i > 0 { start = crossPoints[i-1] } c1genes.Copy(p1genes, start, start, crossPoint) c2genes.Copy(p2genes, start, start, crossPoint) c1genes, c2genes = c2genes, c1genes } c1genes.Copy(p1genes, crossPoint, crossPoint, genesLen) c2genes.Copy(p2genes, crossPoint, crossPoint, genesLen) return }
crossover_multi_point.go
0.683947
0.405331
crossover_multi_point.go
starcoder
package noisey /* Copyright 2014, <NAME> <<EMAIL>> See the LICENSE file for more details. */ /* This module performs fractal Brownian motion which combines mulitple steps of a coherent noise generator, each with different frequency and amplitude. Reference material: * Overview: https://code.google.com/p/fractalterraingeneration/wiki/Fractional_Brownian_Motion * Libnoise's glossary: http://libnoise.sourceforge.net/glossary/ */ // FBMGenerator2D takes noise and makes fractal Brownian motion values. type FBMGenerator2D struct { NoiseMaker NoiseyGet2D // the interface FBMGenerator2D uses gets noise values Octaves int // the number of octaves to calculate on each Get() Persistence float64 // a multiplier that determines how quickly the amplitudes diminish for each successive octave Lacunarity float64 // a multiplier that determines how quickly the frequency increases for each successive octave Frequency float64 // the number of cycles per unit length } // NewFBMGenerator2D creates a new fractal Brownian motion generator state. A 'default' fBm // would have 1 octave, 0.5 persistence, 2.0 lacunarity and 1.0 frequency. func NewFBMGenerator2D(noise NoiseyGet2D, octaves int, persistence float64, lacunarity float64, frequency float64) (fbm FBMGenerator2D) { fbm.NoiseMaker = noise fbm.Octaves = octaves fbm.Persistence = persistence fbm.Lacunarity = lacunarity fbm.Frequency = frequency return } // Get2D calculates the noise value over the number of Octaves and other parameters // that scale the coordinates over each octave. func (fbm *FBMGenerator2D) Get2D(x float64, y float64) (v float64) { curPersistence := 1.0 x *= fbm.Frequency y *= fbm.Frequency for o := 0; o < fbm.Octaves; o++ { signal := fbm.NoiseMaker.Get2D(x, y) v += signal * curPersistence x *= fbm.Lacunarity y *= fbm.Lacunarity curPersistence *= fbm.Persistence } return } // FBMGenerator3D takes noise and makes fractal Brownian motion values. type FBMGenerator3D struct { NoiseMaker NoiseyGet3D // the interface FBMGenerator3D uses gets noise values Octaves int // the number of octaves to calculate on each Get() Persistence float64 // a multiplier that determines how quickly the amplitudes diminish for each successive octave Lacunarity float64 // a multiplier that determines how quickly the frequency increases for each successive octave Frequency float64 // the number of cycles per unit length } // NewFBMGenerator3D creates a new fractal Brownian motion generator state. A 'default' fBm // would have 1 octave, 0.5 persistence, 2.0 lacunarity and 1.0 frequency. func NewFBMGenerator3D(noise NoiseyGet3D, octaves int, persistence float64, lacunarity float64, frequency float64) (fbm FBMGenerator3D) { fbm.NoiseMaker = noise fbm.Octaves = octaves fbm.Persistence = persistence fbm.Lacunarity = lacunarity fbm.Frequency = frequency return } // Get3D calculates the noise value over the number of Octaves and other parameters // that scale the coordinates over each octave. func (fbm *FBMGenerator3D) Get3D(x float64, y float64, z float64) (v float64) { curPersistence := 1.0 x *= fbm.Frequency y *= fbm.Frequency z *= fbm.Frequency for o := 0; o < fbm.Octaves; o++ { signal := fbm.NoiseMaker.Get3D(x, y, z) v += signal * curPersistence x *= fbm.Lacunarity y *= fbm.Lacunarity z *= fbm.Lacunarity curPersistence *= fbm.Persistence } return v }
fbm.go
0.906173
0.560974
fbm.go
starcoder
package strings // Trie is a search struct. // https://algs4.cs.princeton.edu/52trie/TrieST.java.html type Trie struct { root *_TrieNode alphabet Alphabet size int } type _TrieNode struct { value interface{} nodes []*_TrieNode } func (trie *Trie) symbolsCount() int { return trie.alphabet.Size() } func (trie *Trie) newNode() *_TrieNode { return &_TrieNode{ value: nil, nodes: make([]*_TrieNode, trie.symbolsCount()), } } // NewTrie constructs trie instance. func NewTrie(alphabet Alphabet) *Trie { return &Trie{ root: nil, alphabet: alphabet, size: -1, } } // NewTrieASCII constructs trie with ASCII alphabet. func NewTrieASCII() *Trie { return NewTrie(ASCIIAlphabet) } func (trie *Trie) sizeCore(node *_TrieNode) int { if node == nil { return 0 } count := 0 if node.value != nil { count++ } for i := 0; i < trie.symbolsCount(); i++ { count += trie.sizeCore(node.nodes[i]) } return count } // Size returns amount of elements in a trie. func (trie *Trie) Size() int { if trie.size == -1 { trie.size = trie.sizeCore(trie.root) } return trie.size } func (trie *Trie) symbolToIdx(key []rune, idx int) int { return trie.alphabet.ToIndex(key[idx]) } func (trie *Trie) getCore(node *_TrieNode, key []rune, symbolIdx int) *_TrieNode { if node == nil { return nil } if symbolIdx == len(key) { return node } nodeIdx := trie.symbolToIdx(key, symbolIdx) return trie.getCore(node.nodes[nodeIdx], key, symbolIdx+1) } // Get finds a value for a key. func (trie *Trie) Get(key string) interface{} { node := trie.getCore(trie.root, []rune(key), 0) if node == nil { return nil } return node.value } func (trie *Trie) putCore(node *_TrieNode, key []rune, symbolIdx int, val interface{}) *_TrieNode { if node == nil { node = trie.newNode() } if symbolIdx == len(key) { node.value = val return node } nodeIdx := trie.symbolToIdx(key, symbolIdx) node.nodes[nodeIdx] = trie.putCore(node.nodes[nodeIdx], key, symbolIdx+1, val) return node } // Put sets a value for a key. func (trie *Trie) Put(key string, val interface{}) { trie.size = -1 trie.root = trie.putCore(trie.root, []rune(key), 0, val) } func (trie *Trie) delCore(node *_TrieNode, key []rune, symbolIdx int) *_TrieNode { if node == nil { return nil } if symbolIdx == len(key) { node.value = nil } else { nodeIdx := trie.symbolToIdx(key, symbolIdx) node.nodes[nodeIdx] = trie.delCore(node.nodes[nodeIdx], key, symbolIdx+1) } if node.value != nil { return node } for i := 0; i < trie.symbolsCount(); i++ { if node.nodes[i] != nil { return node } } return nil } // Del removes a key. func (trie *Trie) Del(key string) { trie.size = -1 trie.root = trie.delCore(trie.root, []rune(key), 0) } func (trie *Trie) idxToSymbol(idx int) rune { return trie.alphabet.ToSymbol(idx) } func (trie *Trie) keysWithPrefixCore(node *_TrieNode, prefix []rune, collection *[]string) { if node == nil { return } if node.value != nil { *collection = append(*collection, string(prefix)) } for i := 0; i < trie.symbolsCount(); i++ { trie.keysWithPrefixCore(node.nodes[i], append(prefix, trie.idxToSymbol(i)), collection) } } // KeysWithPrefix collects keys with *prefix*. func (trie *Trie) KeysWithPrefix(prefix string) []string { var collection []string trie.keysWithPrefixCore(trie.getCore(trie.root, []rune(prefix), 0), []rune(prefix), &collection) return collection } // Keys returns all keys. func (trie *Trie) Keys() []string { return trie.KeysWithPrefix("") } func (trie *Trie) keysThatMatchCore(node *_TrieNode, prefix []rune, pattern []rune, collection *[]string) { if node == nil { return } if len(prefix) == len(pattern) { if node.value != nil { *collection = append(*collection, string(prefix)) } return } nextSymbol := pattern[len(prefix)] for i := 0; i < trie.symbolsCount(); i++ { if nextSymbol == '.' || nextSymbol == trie.idxToSymbol(i) { trie.keysThatMatchCore(node.nodes[i], append(prefix, trie.idxToSymbol(i)), pattern, collection) } } } // KeysThatMatch collects keys matching *pattern*. func (trie *Trie) KeysThatMatch(pattern string) []string { var collection []string trie.keysThatMatchCore(trie.root, nil, []rune(pattern), &collection) return collection } func (trie *Trie) longestPrefixCore(node *_TrieNode, str []rune, symbolIdx int, length int) int { if node == nil { return length } if node.value != nil { length = symbolIdx } if symbolIdx == len(str) { return length } nodeIdx := trie.symbolToIdx(str, symbolIdx) return trie.longestPrefixCore(node.nodes[nodeIdx], str, symbolIdx+1, length) } // LongestPrefix returns longest key that is prefix for *str*. func (trie *Trie) LongestPrefix(str string) string { len := trie.longestPrefixCore(trie.root, []rune(str), 0, 0) return str[:len] }
strings/trie.go
0.778018
0.488039
trie.go
starcoder
package unicornify import ( . "github.com/drbrain/go-unicornify/unicornify/core" "github.com/drbrain/gopyrand" "image" "image/color" "math" ) type GrassData struct { Seed uint32 RowSeedAdd uint32 Horizon float64 BladeHeightFar, BladeHeightNear float64 // 0-1-based, relative to image width/height Wind float64 Color1, Color2 Color MinBottomY float64 // pixel } func (d *GrassData) Randomize(rand *pyrand.Random) { r := rand.RandBits(64) d.Seed = r[0] d.RowSeedAdd = r[1] d.Wind = 1.6*rand.Random() - 0.8 } type BladeData struct { BottomX, BottomY, Height, BottomWidth, TopWidth, CurveStrength float64 // pixel-based CurveStart, CurveEnd float64 Color Color ConstrainImage *image.RGBA } func DrawGrass(img *image.RGBA, d GrassData, wv WorldView, shadowImage *image.RGBA) { bd := BladeData{} fsize := float64(img.Bounds().Dy()) for row := uint32(0); bd.BottomY-bd.Height <= fsize; row++ { seed := d.Seed + row*d.RowSeedAdd rand := pyrand.NewRandom() rand.SeedFromUInt32(seed) rowf := float64(row) / 100.0 distf := d.BladeHeightFar / d.BladeHeightNear y := (1-distf)*rowf*rowf + distf*rowf baseSize := d.BladeHeightFar + rowf*(d.BladeHeightNear-d.BladeHeightFar) colstep := 0.2 * baseSize bottomY := fsize * (d.Horizon + y*(1-d.Horizon)) if bottomY < d.MinBottomY { continue } for col := 0.0; col <= 1; col += colstep { bd.BottomX = fsize * (col + baseSize*(rand.Random()*0.2-0.1)) bd.BottomY = bottomY + fsize*baseSize*rand.Random()*0.3 bd.Height = baseSize * fsize * (0.95 + rand.Random()*0.1) bd.BottomWidth = baseSize * fsize * (rand.Random()*0.04 + 0.1) bd.TopWidth = baseSize * fsize * (rand.Random() * 0.01) bd.CurveStrength = baseSize * fsize * (d.Wind + rand.Random()*0.2) bd.CurveStart = rand.Random() * 0.5 bd.CurveEnd = 0.5 + rand.Random()*0.5 bd.Color = MixColors(d.Color1, d.Color2, rand.Random()) if shadowImage != nil { s := shadowImage.RGBAAt(Round(bd.BottomX), Round(bd.BottomY)) if s.A == 255 && s.R < 128 { bd.Color = Darken(bd.Color, uint8(128-s.R)) } } DrawGrassBlade(img, bd) } } } func DrawGrassBlade(img *image.RGBA, d BladeData) { for dy := 0; dy <= Round(d.Height); dy++ { f := float64(dy) / d.Height curveP := (d.CurveStart + f*(d.CurveEnd-d.CurveStart)) * math.Pi / 2 curve := math.Sin(curveP) - curveP - (math.Sin(d.CurveStart*math.Pi/2) - d.CurveStart*math.Pi/2) width := d.BottomWidth + f*(d.TopWidth-d.BottomWidth) left := Round(d.BottomX + curve*d.CurveStrength - width/2) right := Round(d.BottomX + curve*d.CurveStrength + width/2) y := Round(d.BottomY) - dy for x := left; x <= right; x++ { if d.ConstrainImage != nil && d.ConstrainImage.At(x, y).(color.RGBA).A == 0 { continue } thiscol := d.Color if (d.CurveStrength < 0 && x >= left+(right-left)*2/3) || (d.CurveStrength >= 0 && x <= left+(right-left)*1/3) { thiscol = Darken(thiscol, 10) } img.SetRGBA(x, y, thiscol.ToRGBA()) } } }
unicornify/grass.go
0.612773
0.400515
grass.go
starcoder
package base import "math" const ( // These values are established by empiricism with // tests (tradeoff: performance VS precision) NEWTON_ITERATIONS = 4 NEWTON_MIN_SLOPE = 0.001 SUBDIVISION_PRECISION = 0.0000001 SUBDIVISION_MAX_ITERATIONS = float64(10) kSplineTableSize = 11 kSampleStepSize = 1.0 / (kSplineTableSize - 1.0) ) func aPoint(aA1 float64, aA2 float64) float64 { return 1.0 - 3.0*aA2 + 3.0*aA1 } func bPoint(aA1 float64, aA2 float64) float64 { return 3.0*aA2 - 6.0*aA1 } func cPoint(aA1 float64) float64 { return 3.0 * aA1 } // Returns x(t) given t, x1, and x2, or y(t) given t, y1, and y2. func calcBezier(aT float64, aA1 float64, aA2 float64) float64 { return ((aPoint(aA1, aA2)*aT+bPoint(aA1, aA2))*aT + cPoint(aA1)) * aT } // Returns dx/dt given t, x1, and x2, or dy/dt given t, y1, and y2. func getSlope(aT float64, aA1 float64, aA2 float64) float64 { return 3.0*aPoint(aA1, aA2)*aT*aT + 2.0*bPoint(aA1, aA2)*aT + cPoint(aA1) } func binarySubdivide(aX, aA, aB, mX1, mX2 float64) float64 { var currentX float64 = 0 var currentT float64 = 0 var i float64 = 0 for true { i = +1 var condition = (math.Abs(currentX) > SUBDIVISION_PRECISION) && (i < SUBDIVISION_MAX_ITERATIONS) if !condition { break } currentT = aA + (aB-aA)/2.0 currentX = calcBezier(currentT, mX1, mX2) - aX if currentX > 0.0 { aB = currentT } else { aA = currentT } } return currentT } func newtonRaphsonIterate(aX, aGuessT, mX1, mX2 float64) float64 { for i := 0; i < NEWTON_ITERATIONS; i++ { var currentSlope = getSlope(aGuessT, mX1, mX2) if currentSlope == 0.0 { return aGuessT } var currentX = calcBezier(aGuessT, mX1, mX2) - aX aGuessT -= currentX / currentSlope } return aGuessT } func linearEasing(x float64) float64 { return x } type CurveFunc func(t float64) float64 func (c CurveFunc) Point(t float64) float64 { return c(t) } func BezierTPoints(x1, y1, x2, y2 float64) Curve { if !(0 <= x1 && x1 <= 1 && 0 <= x2 && x2 <= 1) { panic("bezier x values must be in [0, 1] range") } if x1 == y1 && x2 == y2 { return CurveFunc(linearEasing) } var sampleValues = make([]float64, kSplineTableSize) for i := 0; i < kSplineTableSize; i++ { sampleValues[i] = calcBezier(float64(i)*float64(kSampleStepSize), x1, x2) } var getTForX = func(aX float64) float64 { var intervalStart = 0.0 var lastSample = kSplineTableSize - 1 var currentSample = 1 for ; currentSample != lastSample && sampleValues[currentSample] <= aX; currentSample++ { intervalStart += float64(kSampleStepSize) } currentSample-- // Interpolate to provide an initial guess for t var dist = (aX - sampleValues[currentSample]) / (sampleValues[currentSample+1] - sampleValues[currentSample]) var guessForT = intervalStart + dist*float64(kSampleStepSize) var initialSlope = getSlope(guessForT, x1, x2) if initialSlope >= NEWTON_MIN_SLOPE { return newtonRaphsonIterate(aX, guessForT, x1, x2) } else if initialSlope == 0.0 { return guessForT } else { return binarySubdivide(aX, intervalStart, intervalStart+float64(kSampleStepSize), x1, x2) } } return CurveFunc(func(x float64) float64 { if x == 0 || x == 1 { return x } return calcBezier(getTForX(x), y1, y2) }) }
pkg/styled/base/raphson-newton.go
0.761272
0.671898
raphson-newton.go
starcoder
package gotility import ( "fmt" "reflect" "regexp" "strings" ) const ( // NUMBER_REGEX is the regex for real number NUMBER_REGEX = "^[+-]?([0-9]+(\\.[0-9]*)?|\\.[0-9]+)([eE][+-]?[0-9]+)?$" // EMAIL_REGEX is the regex for email EMAIL_REGEX = "^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$" // URL_REGEX is the regex for url URL_REGEX = "^((((H|h)(T|t)|(F|f))(T|t)(P|p)((S|s)?))\\://)?(www.|[a-zA-Z0-9].)[a-zA-Z0-9\\-\\.]+\\.[a-zA-Z]{2,6}(\\:[0-9]{1,5})*(/($|[a-zA-Z0-9\\.\\,\\;\\?\\'\\\\+&amp;%\\$#\\=~_\\-]+))*$" ) // IsNumber check whether the string is a number or not func IsNumber(str string) bool { var validNumber = regexp.MustCompile(NUMBER_REGEX) return validNumber.MatchString(str) } // IsEmail check whether the string is an email or not func IsEmail(str string) bool { var validEmail = regexp.MustCompile(EMAIL_REGEX) return validEmail.MatchString(str) } // IsUrl check whether the string is an url or not func IsUrl(str string) bool { var validEmail = regexp.MustCompile(URL_REGEX) return validEmail.MatchString(str) } // IsTitleCase check whether the string is `title Case` or not func IsTitleCase(str string) bool { titleCase := strings.Title(str) result := strings.Compare(str, titleCase) return result == 0 } // FlattenFloat64 transform 2D matrix of floating point numbers into 1D array func FlattenFloat64(matrix [][]float64) []float64 { row := []float64{} for _, value := range matrix { row = append(row, value...) } return row } // FlattenInt transform 2D matrix of Integers into 1D array func FlattenInt(matrix [][]int) []int { row := []int{} for _, value := range matrix { row = append(row, value...) } return row } // FlattenString transform 2D matrix of string into 1D array func FlattenString(matrix [][]string) []string { row := []string{} for _, value := range matrix { row = append(row, value...) } return row } // ToMatrixInt transform 1D array Integers to 2D Matrix func ToMatrixInt(row []int, numRows int) ([][]int, error) { length := len(row) if numRows <= 0 || length%numRows != 0 { return nil, fmt.Errorf("division of row to matrix not possible. Invalid numRows : %d ", numRows) } numCols := length / numRows matrix := make([][]int, numRows) for i := 0; i < numRows; i += 1 { index := i * numCols matrix[i] = append(matrix[i], row[index:index+numCols]...) } return matrix, nil } // ToMatrixString transform 1D array string to 2D Matrix func ToMatrixString(row []string, numRows int) ([][]string, error) { length := len(row) if numRows <= 0 || length%numRows != 0 { return nil, fmt.Errorf("division of row to matrix not possible. Invalid numRows : %d ", numRows) } numCols := length / numRows matrix := make([][]string, numRows) for i := 0; i < numRows; i += 1 { index := i * numCols matrix[i] = append(matrix[i], row[index:index+numCols]...) } return matrix, nil } // ToMatrixFloat64 transform 1D array Floating point numbers to 2D Matrix func ToMatrixFloat64(row []float64, numRows int) ([][]float64, error) { length := len(row) if numRows <= 0 || length%numRows != 0 { return nil, fmt.Errorf("division of row to matrix not possible. Invalid numRows : %d ", numRows) } numCols := length / numRows matrix := make([][]float64, numRows) for i := 0; i < numRows; i += 1 { index := i * numCols matrix[i] = append(matrix[i], row[index:index+numCols]...) } return matrix, nil } // SumInt returns the sum of an integer array func SumInt(row []int) int { sum := 0 for _, value := range row { sum += value } return sum } // SumFloat64 returns the sum of an floating point array array func SumFloat64(row []float64) float64 { sum := 0.0 for _, value := range row { sum += value } return sum } // MapInt takes an array of integer values and a function to be applied on that value. // It returns an array with the mapped values func MapInt(row []int, fn func(int) int) []int { mappedValues := []int{} for _, value := range row { mappedValues = append(mappedValues, fn(value)) } return mappedValues } // MapFloat64 takes an array of float64 values and a function to be applied on that value. // It returns an array with the mapped values func MapFloat64(row []float64, fn func(float64) float64) []float64 { mappedValues := []float64{} for _, value := range row { mappedValues = append(mappedValues, fn(value)) } return mappedValues } // MapString takes an array of string and a function to be applied on that value. // It returns an array with the mapped values func MapString(row []string, fn func(string) string) []string { mappedValues := []string{} for _, value := range row { mappedValues = append(mappedValues, fn(value)) } return mappedValues } func sumInt64(value reflect.Value) int64 { sum := int64(0) for i := 0; i < value.Len(); i += 1 { sum += int64(value.Index(i).Int()) } return sum } func sumFloat64(value reflect.Value) float64 { sum := float64(0) for i := 0; i < value.Len(); i += 1 { sum += float64(value.Index(i).Float()) } return sum } // Sum returns the sum of the slice func Sum(row interface{}) (interface{}, error) { slice := reflect.ValueOf(row) if slice.Type().Kind() != reflect.Slice { return 0, fmt.Errorf("Expected slice, got: " + slice.Type().Kind().String()) } if slice.Len() == 0 { return 0, nil } switch slice.Index(0).Type().Kind() { case reflect.Int: return sumInt64(slice), nil case reflect.Int8: return sumInt64(slice), nil case reflect.Int32: return sumInt64(slice), nil case reflect.Int64: return sumInt64(slice), nil case reflect.Float32: return sumFloat64(slice), nil case reflect.Float64: return sumFloat64(slice), nil default: return 0, fmt.Errorf("cannot sum the given slice") } } // FindIndex returns the first index of the element in the given slice and if not found returns -1 func FindIndex(row interface{}, element interface{}) (int, error) { slice := reflect.ValueOf(row) if slice.Type().Kind() != reflect.Slice { return -1, fmt.Errorf("Expected slice, got: " + slice.Type().Kind().String()) } if slice.Len() == 0 { return -1, nil } sliceElementType := slice.Index(0).Type().Kind() elementType := reflect.ValueOf(element).Type().Kind() if sliceElementType != elementType { return -1, fmt.Errorf("Expected element to be " + sliceElementType.String() + ", got: " + elementType.String()) } for i := 0; i < slice.Len(); i += 1 { if reflect.DeepEqual(slice.Index(i).Interface(), element) { return i, nil } } return -1, nil } // FindLastIndex returns the last index of the element in the given slice and if not found returns -1 func FindLastIndex(row interface{}, element interface{}) (int, error) { slice := reflect.ValueOf(row) if slice.Type().Kind() != reflect.Slice { return -1, fmt.Errorf("Expected slice, got: " + slice.Type().Kind().String()) } if slice.Len() == 0 { return -1, nil } sliceElementType := slice.Index(0).Type().Kind() elementType := reflect.ValueOf(element).Type().Kind() if sliceElementType != elementType { return -1, fmt.Errorf("Expected element to be " + sliceElementType.String() + ", got: " + elementType.String()) } for i := slice.Len() - 1; i >= 0; i -= 1 { if reflect.DeepEqual(slice.Index(i).Interface(), element) { return i, nil } } return -1, nil } // GetKeys returns the keys as an unordered slice of the given map func GetKeys(object interface{}) ([]interface{}, error) { Map := reflect.ValueOf(object) if Map.Type().Kind() != reflect.Map { return nil, fmt.Errorf("Expected map, got: " + Map.Type().Kind().String()) } Keys := Map.MapKeys() KeysToReturn := []interface{}{} for i := 0; i < len(Keys); i += 1 { KeysToReturn = append(KeysToReturn, Keys[i].Interface()) } return KeysToReturn, nil } // GetValues returns the values as an unordered slice of the given map func GetValues(object interface{}) ([]interface{}, error) { Map := reflect.ValueOf(object) if Map.Type().Kind() != reflect.Map { return nil, fmt.Errorf("Expected map, got: " + Map.Type().Kind().String()) } Keys := Map.MapKeys() ValuesToReturn := []interface{}{} for i := 0; i < len(Keys); i += 1 { ValuesToReturn = append(ValuesToReturn, Map.MapIndex(Keys[i]).Interface()) } return ValuesToReturn, nil }
utility.go
0.631253
0.40869
utility.go
starcoder
package lzma // states defines the overall state count const states = 12 // State maintains the full state of the operation encoding or decoding // process. type state struct { rep [4]uint32 isMatch [states << maxPosBits]prob isRepG0Long [states << maxPosBits]prob isRep [states]prob isRepG0 [states]prob isRepG1 [states]prob isRepG2 [states]prob litCodec literalCodec lenCodec lengthCodec repLenCodec lengthCodec distCodec distCodec state uint32 posBitMask uint32 Properties Properties } // initProbSlice initializes a slice of probabilities. func initProbSlice(p []prob) { for i := range p { p[i] = probInit } } // Reset sets all state information to the original values. func (s *state) Reset() { p := s.Properties *s = state{ Properties: p, // dict: s.dict, posBitMask: (uint32(1) << uint(p.PB)) - 1, } initProbSlice(s.isMatch[:]) initProbSlice(s.isRep[:]) initProbSlice(s.isRepG0[:]) initProbSlice(s.isRepG1[:]) initProbSlice(s.isRepG2[:]) initProbSlice(s.isRepG0Long[:]) s.litCodec.init(p.LC, p.LP) s.lenCodec.init() s.repLenCodec.init() s.distCodec.init() } // initState initializes the state. func initState(s *state, p Properties) { *s = state{Properties: p} s.Reset() } // newState creates a new state from the give Properties. func newState(p Properties) *state { s := &state{Properties: p} s.Reset() return s } // deepcopy initializes s as a deep copy of the source. func (s *state) deepcopy(src *state) { if s == src { return } s.rep = src.rep s.isMatch = src.isMatch s.isRepG0Long = src.isRepG0Long s.isRep = src.isRep s.isRepG0 = src.isRepG0 s.isRepG1 = src.isRepG1 s.isRepG2 = src.isRepG2 s.litCodec.deepcopy(&src.litCodec) s.lenCodec.deepcopy(&src.lenCodec) s.repLenCodec.deepcopy(&src.repLenCodec) s.distCodec.deepcopy(&src.distCodec) s.state = src.state s.posBitMask = src.posBitMask s.Properties = src.Properties } // cloneState creates a new clone of the give state. func cloneState(src *state) *state { s := new(state) s.deepcopy(src) return s } // updateStateLiteral updates the state for a literal. func (s *state) updateStateLiteral() { switch { case s.state < 4: s.state = 0 return case s.state < 10: s.state -= 3 return } s.state -= 6 } // updateStateMatch updates the state for a match. func (s *state) updateStateMatch() { if s.state < 7 { s.state = 7 } else { s.state = 10 } } // updateStateRep updates the state for a repetition. func (s *state) updateStateRep() { if s.state < 7 { s.state = 8 } else { s.state = 11 } } // updateStateShortRep updates the state for a short repetition. func (s *state) updateStateShortRep() { if s.state < 7 { s.state = 9 } else { s.state = 11 } } // states computes the states of the operation codec. func (s *state) states(dictHead int64) (state1, state2, posState uint32) { state1 = s.state posState = uint32(dictHead) & s.posBitMask state2 = (s.state << maxPosBits) | posState return } // litState computes the literal state. func (s *state) litState(prev byte, dictHead int64) uint32 { lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | (uint32(prev) >> (8 - lc)) return litState }
vendor/github.com/ulikunitz/xz/lzma/state.go
0.542136
0.444625
state.go
starcoder
package wgs84 import ( "fmt" "math" ) type Locality struct { Name string Longitude, Latitude float64 Distance float64 Bearing float64 } type ByDistance []Locality func (a ByDistance) Len() int { return len(a) } func (a ByDistance) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDistance) Less(i, j int) bool { return a[i].Distance < a[j].Distance } var nz = []struct { name string longitude, latitude float64 }{ {name: `Auckland`, longitude: 174.77, latitude: -36.85}, {name: `Cambridge`, longitude: 175.47, latitude: -37.88}, {name: `<NAME>`, longitude: 172.68, latitude: -34.43}, {name: `Hamilton`, longitude: 175.28, latitude: -37.78}, {name: `Kaitaia`, longitude: 173.27, latitude: -35.12}, {name: `Kawhia`, longitude: 174.82, latitude: -38.07}, {name: `Pukekohe`, longitude: 174.9, latitude: -37.2}, {name: `<NAME>`, longitude: 175.7, latitude: -37.53}, {name: `<NAME>`, longitude: 175.33, latitude: -38.02}, {name: `Thames`, longitude: 175.55, latitude: -37.15}, {name: `Whangamata`, longitude: 175.87, latitude: -37.22}, {name: `Whangarei`, longitude: 174.32, latitude: -35.72}, {name: `Whitianga`, longitude: 175.7, latitude: -36.82}, {name: `Murupara`, longitude: 176.7, latitude: -38.45}, {name: `Ohakune`, longitude: 175.42, latitude: -39.42}, {name: `Opotiki`, longitude: 177.28, latitude: -38.02}, {name: `Rotorua`, longitude: 176.23, latitude: -38.13}, {name: `Taihape`, longitude: 175.8, latitude: -39.68}, {name: `Taupo`, longitude: 176.08, latitude: -38.7}, {name: `Tauranga`, longitude: 176.17, latitude: -37.68}, {name: `Tokoroa`, longitude: 175.87, latitude: -38.23}, {name: `Turangi`, longitude: 175.8, latitude: -39}, {name: `Whakatane`, longitude: 176.98, latitude: -37.97}, {name: `<NAME>`, longitude: 177.18, latitude: -37.52}, {name: `Gisborne`, longitude: 178.02, latitude: -38.67}, {name: `Matawai`, longitude: 177.53, latitude: -38.35}, {name: `Ruatoria`, longitude: 178.32, latitude: -37.88}, {name: `<NAME>`, longitude: 178.37, latitude: -37.63}, {name: `<NAME>`, longitude: 177.68, latitude: -37.75}, {name: `<NAME>`, longitude: 178.32, latitude: -38.13}, {name: `<NAME>`, longitude: 178.3, latitude: -38.37}, {name: `Hastings`, longitude: 176.85, latitude: -39.65}, {name: `Napier`, longitude: 176.9, latitude: -39.5}, {name: `Waipukurau`, longitude: 176.55, latitude: -40}, {name: `Wairoa`, longitude: 177.42, latitude: -39.05}, {name: `Hawera`, longitude: 174.28, latitude: -39.58}, {name: `Mokau`, longitude: 174.62, latitude: -38.7}, {name: `<NAME>`, longitude: 174.07, latitude: -39.07}, {name: `Opunake`, longitude: 173.85, latitude: -39.45}, {name: `Stratford`, longitude: 174.28, latitude: -39.35}, {name: `Taumarunui`, longitude: 175.27, latitude: -38.88}, {name: `<NAME>`, longitude: 175.17, latitude: -38.33}, {name: `Waverley`, longitude: 174.63, latitude: -39.77}, {name: `Blenheim`, longitude: 173.95, latitude: -41.52}, {name: `Castlepoint`, longitude: 176.22, latitude: -40.9}, {name: `Dannevirke`, longitude: 176.1, latitude: -40.2}, {name: `Eketahuna`, longitude: 175.7, latitude: -40.65}, {name: `Feilding`, longitude: 175.57, latitude: -40.23}, {name: `<NAME>`, longitude: 173.83, latitude: -40.93}, {name: `Hunterville`, longitude: 175.57, latitude: -39.93}, {name: `Levin`, longitude: 175.28, latitude: -40.62}, {name: `Martinborough`, longitude: 175.45, latitude: -41.22}, {name: `Masterton`, longitude: 175.65, latitude: -40.95}, {name: `<NAME>`, longitude: 175.62, latitude: -40.37}, {name: `Paraparaumu`, longitude: 175, latitude: -40.92}, {name: `Picton`, longitude: 174, latitude: -41.3}, {name: `Pongaroa`, longitude: 176.18, latitude: -40.55}, {name: `Porangahau`, longitude: 176.62, latitude: -40.3}, {name: `Seddon`, longitude: 174.07, latitude: -41.67}, {name: `Wellington`, longitude: 174.77, latitude: -41.28}, {name: `<NAME>`, longitude: 174.91, latitude: -41.21}, {name: `<NAME>`, longitude: 175.07, latitude: -41.12}, {name: `Porirua`, longitude: 174.84, latitude: -41.13}, {name: `Whanganui`, longitude: 175.05, latitude: -39.93}, {name: `<NAME>`, longitude: 171.57, latitude: -42.95}, {name: `Collingwood`, longitude: 172.68, latitude: -40.68}, {name: `Greymouth`, longitude: 171.2, latitude: -42.45}, {name: `Haast`, longitude: 169.05, latitude: -43.88}, {name: `Hokitika`, longitude: 170.97, latitude: -42.72}, {name: `Karamea`, longitude: 172.12, latitude: -41.25}, {name: `Motueka`, longitude: 173.02, latitude: -41.12}, {name: `<NAME>`, longitude: 170.1, latitude: -43.73}, {name: `Murchison`, longitude: 172.33, latitude: -41.8}, {name: `Nelson`, longitude: 173.28, latitude: -41.27}, {name: `Reefton`, longitude: 171.87, latitude: -42.12}, {name: `<NAME>`, longitude: 172.85, latitude: -41.8}, {name: `Westport`, longitude: 171.6, latitude: -41.75}, {name: `Akaroa`, longitude: 172.97, latitude: -43.82}, {name: `Amberley`, longitude: 172.73, latitude: -43.17}, {name: `Ashburton`, longitude: 171.75, latitude: -43.9}, {name: `Cheviot`, longitude: 173.27, latitude: -42.82}, {name: `Christchurch`, longitude: 172.63, latitude: -43.53}, {name: `Culverden`, longitude: 172.85, latitude: -42.78}, {name: `Fairlie`, longitude: 170.83, latitude: -44.1}, {name: `Geraldine`, longitude: 171.23, latitude: -44.1}, {name: `<NAME>`, longitude: 172.83, latitude: -42.52}, {name: `Kaikoura`, longitude: 173.68, latitude: -42.4}, {name: `Methven`, longitude: 171.65, latitude: -43.63}, {name: `Oxford`, longitude: 172.2, latitude: -43.3}, {name: `Timaru`, longitude: 171.25, latitude: -44.4}, {name: `Twizel`, longitude: 170.1, latitude: -44.27}, {name: `Waimate`, longitude: 171.05, latitude: -44.73}, {name: `<NAME>`, longitude: 167.93, latitude: -44.68}, {name: `Queenstown`, longitude: 168.67, latitude: -45.03}, {name: `<NAME>`, longitude: 167.72, latitude: -45.42}, {name: `Alexandra`, longitude: 169.38, latitude: -45.25}, {name: `Balclutha`, longitude: 169.73, latitude: -46.23}, {name: `Dunedin`, longitude: 170.5, latitude: -45.88}, {name: `Gore`, longitude: 168.93, latitude: -46.1}, {name: `Invercargill`, longitude: 168.37, latitude: -46.42}, {name: `Lumsden`, longitude: 168.45, latitude: -45.73}, {name: `Oamaru`, longitude: 170.97, latitude: -45.1}, {name: `Palmerston`, longitude: 170.72, latitude: -45.48}, {name: `Ranfurly`, longitude: 170.1, latitude: -45.13}, {name: `Roxburgh`, longitude: 169.32, latitude: -45.55}, {name: `<NAME>`, longitude: 166.6, latitude: -48.02}, {name: `Tuatapere`, longitude: 167.68, latitude: -46.13}, {name: `Wanaka`, longitude: 169.13, latitude: -44.7}, } // ClosestNZ returns the closest New Zealand locality to the input point. // Locality.Bearing is from the Locality to the input point. func ClosestNZ(latitude, longitude float64) (Locality, error) { distance := math.MaxFloat64 var bearing float64 var closest int for i := range nz { d, b, err := DistanceBearing(nz[i].latitude, nz[i].longitude, latitude, longitude) if err != nil { return Locality{}, err } if d < distance { distance = d bearing = b closest = i } } return Locality{Longitude: nz[closest].longitude, Latitude: nz[closest].latitude, Name: nz[closest].name, Bearing: bearing, Distance: distance}, nil } // LocalitiesNZ returns New Zealand localities for the input point. // Locality.Bearing is from the Locality to the input point. func LocalitiesNZ(latitude, longitude float64) ([]Locality, error) { var l []Locality for i := range nz { d, b, err := DistanceBearing(nz[i].latitude, nz[i].longitude, latitude, longitude) if err != nil { return []Locality{}, err } l = append(l, Locality{Longitude: nz[i].longitude, Latitude: nz[i].latitude, Name: nz[i].name, Bearing: b, Distance: d}) } return l, nil } // Compass converts bearing (0-360) to a compass bearing name e.g., south-east. func Compass(bearing float64) string { switch { case bearing >= 337.5 && bearing <= 360: return "north" case bearing >= 0 && bearing <= 22.5: return "north" case bearing > 22.5 && bearing < 67.5: return "north-east" case bearing >= 67.5 && bearing <= 112.5: return "east" case bearing > 112.5 && bearing < 157.5: return "south-east" case bearing >= 157.5 && bearing <= 202.5: return "south" case bearing > 202.5 && bearing < 247.5: return "south-west" case bearing >= 247.5 && bearing <= 292.5: return "west" case bearing > 292.5 && bearing < 337.5: return "north-west" default: return "north" } } // CompassShort converts bearing (0-360) to a short compass bearing name e.g., N. func CompassShort(bearing float64) string { switch { case bearing >= 337.5 && bearing <= 360: return "N" case bearing >= 0 && bearing <= 22.5: return "N" case bearing > 22.5 && bearing < 67.5: return "NE" case bearing >= 67.5 && bearing <= 112.5: return "E" case bearing > 112.5 && bearing < 157.5: return "SE" case bearing >= 157.5 && bearing <= 202.5: return "S" case bearing > 202.5 && bearing < 247.5: return "SW" case bearing >= 247.5 && bearing <= 292.5: return "W" case bearing > 292.5 && bearing < 337.5: return "NW" default: return "N" } } func (l Locality) Description() string { if l.Distance < 5 { return "Within 5 km of " + l.Name } return fmt.Sprintf("%.f km %s of %s", math.Floor(l.Distance/5.0)*5, Compass(l.Bearing), l.Name) } func (l Locality) DescriptionShort() string { if l.Distance < 5 { return "Within 5 km of " + l.Name } return fmt.Sprintf("%.f km %s of %s", math.Floor(l.Distance/5.0)*5, CompassShort(l.Bearing), l.Name) }
vendor/github.com/GeoNet/kit/wgs84/localities.go
0.817793
0.663396
localities.go
starcoder
package GoPolygons import ( "math" ) type Polygon struct { Points []Point ContainRect Rect } func NewPolygon(pts []Point) *Polygon { left := pts[0].X right := pts[0].X top := pts[0].Y bottom := pts[0].Y for i := 0; i < len((pts)); i++ { left = math.Min(left, pts[i].X) right = math.Max(right, pts[i].X) top = math.Min(top, pts[i].Y) bottom = math.Max(bottom, pts[i].Y) } return &Polygon{Points: pts, ContainRect: NewRect(left, top, right, bottom)} } //Ray method to determine whether the point inside the polygon func (pgn *Polygon) ptInPolygonRayCasting(pt Point) bool { px := pt.X py := pt.Y flag := false ptCount := len(pgn.Points) i := 0 j := ptCount - 1 for i < ptCount { sx := pgn.Points[i].X sy := pgn.Points[i].Y tx := pgn.Points[j].X ty := pgn.Points[j].Y // Point coincides with the polygon vertices if (sx == px && sy == py) || (tx == px && ty == py) { return true } // Determine whether the two end points of the line on both sides of the ray if (sy < py && ty >= py) || (sy >= py && ty < py) { // X-ray coordinates on the line with the same Y coordinate of the point x := sx + (py-sy)*(tx-sx)/(ty-sy) // Point in polygon edge if x == px { return true } // Rays pass through the polygon boundary if x > px { flag = !flag } } j = i i++ } // When the number of rays passing through the polygon boundary is odd, the point in the polygon return flag } func (pgn *Polygon) PtInPolygon(pt Point) bool { return pgn.ptInPolygonRayCasting(pt) } func (pgn *Polygon) ContainPolygon(cpgn Polygon) (ret bool) { if pgn.ContainRect.IsCross(cpgn.ContainRect) { //Envelope box must intersect ret = true PointCount := len(cpgn.Points) for i := 0; i < PointCount; i++ { if !pgn.PtInPolygon(cpgn.Points[i]) { ret = false break } } } return ret } //Polygons intersect, false disjoint, true intersection func (pgn *Polygon) IntersectPolygon(cpgn Polygon) bool { if !pgn.ContainRect.IsCross(cpgn.ContainRect) { //Envelope box must intersect return false } //Positive points included for i := 0; i < len(cpgn.Points); i++ { if pgn.PtInPolygon(cpgn.Points[i]) { return true } } //Anti-contained point for i := 0; i < len(pgn.Points); i++ { if cpgn.PtInPolygon(pgn.Points[i]) { return true } } //Determine whether the line intersect ptPgnCount := len(pgn.Points) ptCPgnCount := len(cpgn.Points) i := 0 j := ptPgnCount - 1 for i < ptPgnCount { sx := pgn.Points[i].X sy := pgn.Points[i].Y tx := pgn.Points[j].X ty := pgn.Points[j].Y line := NewLine(sx, sy, tx, ty) k := 0 l := ptCPgnCount - 1 for k < ptCPgnCount { csx := cpgn.Points[k].X csy := cpgn.Points[k].Y ctx := cpgn.Points[l].X cty := cpgn.Points[l].Y cline := NewLine(csx, csy, ctx, cty) if line.IsLineSegmentCross(cline) { return true } l = k k++ } j = i i++ } return false }
polygon.go
0.767341
0.592077
polygon.go
starcoder
package editor import ( "fmt" "io" ) func Help(w io.Writer) error { for _, x := range quickref { _, err := w.Write([]byte(fmt.Sprintf("%-12s%s\n", x.k, x.v))) if err != nil { return err } } return nil } // Reference: Vim's quickref.txt. var quickref = []struct{ k, v string }{ {"h", "left"}, {"l", "right"}, {"0", "to first character in the line"}, {"^", "to first non-blank character in the line"}, {"$", "to the last character in the line"}, {"|", "to column N"}, {"f", "to the Nth occurrence of {char} to the right"}, {"F", "to the Nth occurrence of {char} to the left"}, {"t", "till before the Nth occurrence of {char} to the right"}, {"T", "till bl before the Nth occurrence of {char} to the left"}, {"k", "go back history"}, {"j", "go forward history"}, {"-", "decrement the number at or after the cursor"}, {"+", "increment the number at or after the cursor"}, {"w", "N words forward"}, {"W", "N blank-separated WORDs forward"}, {"e", "forward to the end of the Nth word"}, {"E", "forward to the end of the Nth blank-separated WORD"}, {"b", "N words backward"}, {"B", "N blank-separated WORDs backward"}, {"ge", "backward to the end of the Nth word"}, {"gE", "backward to the end of the Nth blank-separated WORD"}, {"[(", "N times back to unclosed '('"}, {"[{", "N times back to unclosed '{'"}, {"])", "N times forward to unclosed ')'"}, {"]}", "N times forward to unclosed '}'"}, {"/", "search forward"}, {"?", "search backward"}, {"n", "repeat last search"}, {"N", "repeat last search, in opposite direction"}, {"a", "append text after the cursor"}, {"A", "append text at the end of the line"}, {"i", "insert text before the cursor"}, {"I", "insert text before the first non-blank in the line"}, {"gI", "insert text in column 1"}, // insert mode... {"i_<Esc>", "end Insert mode, back to Normal mode"}, {"i_CTRL-C", "like <Esc>"}, {"i_CTRL-R", "insert the contents of a register"}, {"i_CTRL-X", "complete the word before the cursor in various ways"}, {"i_<BS>", "delete the character before the cursor"}, {"i_CTRL-W", "delete word before the cursor"}, {"i_CTRL-U", "delete all entered characters in the current line"}, {"x", "delete N characters under and after the cursor"}, {"<Del>", "delete N characters under and after the cursor"}, {"X", "delete N characters before the cursor"}, {"d", "delete the text that is moved over with {motion}"}, {"v_d", "delete the highlighted text"}, {"dd", "delete N lines"}, {"D", "delete to the end of the line"}, {":d[elete]", "delete N lines"}, {"\"", "use register {char} for the next delete, yank, or put"}, {"y", "yank the text moved over with {motion} into a register"}, {"v_y", "yank the highlighted text into a register"}, {"yy", "yank N lines into a register"}, {"Y", "yank to the end of line into a register"}, {"p", "put a register after the cursor position (N times)"}, {"P", "put a register before the cursor position (N times)"}, {"r", "replace N characters with {char}"}, {"R", "enter Replace mode"}, {"c", "change the text that is moved over with {motion}"}, {"v_c", "change the highlighted text"}, {"cc", "change N lines"}, {"C", "change to the end of the line"}, {"~", "switch case for N characters and advance cursor"}, {"v_~", "switch case for highlighted text"}, {"v_u", "make highlighted text lowercase"}, {"v_U", "make highlighted text uppercase"}, {"g~", "switch case for the text that is moved over with {motion}"}, {"gu", "make the text that is moved over with {motion} lowercase"}, {"gU", "make the text that is moved over with {motion} uppercase"}, {"v", "start highlighting characters } move cursor and use"}, {"V", "start highlighting linewise } operator to affect"}, {"o", "exchange cursor position with start of highlighting"}, {"v_v", "highlight characters or stop highlighting"}, {"aw", `Select "a word"`}, {"iw", `Select "inner word"`}, {"aW", `Select "a |WORD|"`}, {"iW", `Select "inner |WORD|"`}, {"as", `Select "a sentence"`}, {"is", `Select "inner sentence"`}, {"ap", `Select "a paragraph"`}, {"ip", `Select "inner paragraph"`}, {"ab", `Select "a block" (from "[(" to "])")`}, {"ib", `Select "inner block" (from "[(" to "])")`}, {"aB", `Select "a Block" (from "[{" to "]}")`}, {"iB", `Select "inner Block" (from "[{" to "]}")`}, {"a>", `Select "a <> block"`}, {"i>", `Select "inner <> block"`}, {"at", `Select "a tag block" (from <aaa> to </aaa>)`}, {"it", `Select "inner tag block" (from <aaa> to </aaa>)`}, {"a'", `Select "a single quoted string"`}, {"i'", `Select "inner single quoted string"`}, {"a\"", `Select "a double quoted string"`}, {"i\"", `Select "inner double quoted string"`}, {"a`", `Select "a backward quoted string"`}, {"i`", `Select "inner backward quoted string"`}, {"q{a-z}", "record typed characters into register {a-z}"}, {"q{A-Z}", "record typed characters, appended to register {a-z}"}, {"q", "stop recording"}, {":q[uit]", "exit"}, }
editor/help.go
0.569134
0.551332
help.go
starcoder
package heap import ( "errors" //"log" ) // Direction holds whether heap returns minimum or maximum on Get and Extract // operations. type Direction int const ( Ascending Direction = iota Descending ) // IntHeap structure type IntHeap struct { // Direction defines whether heap is ascending or descending Direction Direction // values holds actual values of the tree values []int } // Get returns root element from the heap. Depending on Direction it could be // maximum (for Descending heap) and minimum (for Ascending heap). func (h *IntHeap) Get() (int, error) { if h.values == nil { return 0, errors.New("No elements in heap") } return h.values[0], nil } // Extract returns root element from the heap, which contains minimum or // maximum value across values (depending on Direction). It removes returned // element from the heap. func (h *IntHeap) Extract() (int, error) { if h.values == nil { return 0, errors.New("No more elements in heap") } val, err := h.Get() if err != nil { return 0, err } h.swap(0, len(h.values)-1) h.values = h.values[:len(h.values)-1] var idx, child, left, right int // indexes for { left, right = h.childIndexes(idx) if left >= len(h.values) && right >= len(h.values) { return val, nil } switch { case left < len(h.values) && right == len(h.values): child = left case h.cmp(h.values[right], h.values[left]): child = left default: child = right } if h.invariant(child) { return val, nil } else { h.swap(idx, child) idx = child } } return val, nil } // parentIndex returns index of the parent item for provided item index i. func (h *IntHeap) parentIndex(i int) int { if i == 0 { return 0 } else { return (i+1)>>1 - 1 } } // childIndexes returns pair of child indexes for given parent. Doesn't check // whether these indexes are actually in the Heap. func (h *IntHeap) childIndexes(i int) (left, right int) { left = i<<1 + 1 right = left + 1 return } // Insert inserts element in the heap. func (h *IntHeap) Insert(x int) error { h.values = append(h.values, x) //defer log.Printf("array = %v", h.values) idx := len(h.values) - 1 for !h.invariant(idx) { //log.Printf("iterating - array = %v", h.values) h.swap(idx, h.parentIndex(idx)) idx = h.parentIndex(idx) } return nil } // swap swaps i'th and j'th elements func (h *IntHeap) swap(i, j int) error { h.values[i], h.values[j] = h.values[j], h.values[i] return nil } // invariant checks whether heap invariant is satisfied for index i and it's // parent. func (h *IntHeap) invariant(i int) bool { return h.cmp(h.values[i], h.values[h.parentIndex(i)]) } // cmp compares x and y func (h *IntHeap) cmp(x, y int) bool { if h.Direction == Ascending { return x >= y } else { return y >= x } }
heap.go
0.780244
0.466238
heap.go
starcoder
package closure import ( "fmt" "math" "net/http" "sort" "strings" ) // AsGenerator is an example of the idea of closures. The state of the // function is sealed away (closed away) and it keeps that state even after the // scope is destroyed. Think of an oyster 🦪 with a grain of sand, turning into // a pearl. ⚪ func AsGenerator() func() int { startNum := 0 return func() int { startNum++ return startNum } } // ForAccessingData shows we can access variables that do not belong to the // inner function aka our closure. func ForAccessingData() { notInClosure := "How does it know I exist?" func() { fmt.Println("This is a function, that is allowed to see local variables 👉", notInClosure) }() // NOTE(jay): This is just like how we can grab a global slice and change the // innards of it. -- `GlobalSlice[0] = "Some other value"` Because the // `GlobalSlice` is in scope of the function we can change it. Essentially // the "global" area has expanded for our closure allowing us access to the // `notInClosure` variable. } // NOTE(jay): This cannot be done outside of the 👆 above function. Closures // are given special permission to access variables in the current scope. // func doesNotWork() { // fmt.Println("This is a function, that is allowed to see local variables 👉", // notInClosureScope) // } // MyS is used for showing off examples with closures and accessing // the values of MyS from within the closures. type MyS struct { MyStr string IsChanged bool } // AsMiddleware is an example of not changing the existing function, but // only adding new features to it. In this example we can make a logger. It // will check what the values passed in are before and after the function call // without interfering with the original function! Pretty neat 💯 func AsMiddleware(myFunc func(strct *MyS, n *int)) func(strct *MyS, n *int) { return func(strct *MyS, n *int) { fmt.Printf("this is a statement that happens **BEFORE** myFunc:\n"+ "Here are the values before changing them: %+v and %d\n", strct, *n) myFunc(strct, n) fmt.Printf("this is a statement that happens **AFTER** myFunc:\n"+ "Here are the values after changing them: %+v and %d\n", strct, *n) } } // ForAccessingMoreData is an example of feeding values to a function that // does not accept that type or more values. This is very common with the // `http.HandlerFunc` which is required to have exactly 2 parameters // `http.ResponseWriter` and `*http.Request`. So how do we make a // `http.HandlerFunc` have more parameters? Closures of course! func ForAccessingMoreData(strct MyS) func(http.ResponseWriter, *http.Request) { // This func here 👇matches with this func here👆 return func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, strct.MyStr) } } // ForStandardLibrary is an example of when the standard library will // ask you to provide a closure in order to complete the functions parameters. // In this we see two examples: `strings.Map` and `sort.Search` func ForStandardLibrary() { rot13 := "pybfherf ner pbby" // NOTE(jay): Even though this is an anonymous function it's not a true // closure as we don't use any variables from outside of the inner functions // scope. We **could** do that and it would become a closure, but it is // important to notice we don't have to make an anonymous function a closure. mappedStr := strings.Map(func(r rune) rune { if r == ' ' { return r } r -= 13 // We might go outside of alphabet range a-z, so we correct for it here. switch { case r < 'a': return r + 'z' - 'a' + 1 case r > 'z': return r%'z' + 'a' - 1 default: return r } }, rot13) fmt.Println(mappedStr) sortedInts := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} indexAfter13 := sort.Search(len(sortedInts), func(i int) bool { return sortedInts[i] > 13 }) indexBefore13 := sort.Search(len(sortedInts), func(i int) bool { return sortedInts[i] < 13 }) indexOf13 := sort.Search(len(sortedInts), func(i int) bool { return sortedInts[i] == 13 }) fmt.Printf("indices found from Binary Search of 13:\nAfter: %d\n"+ "Before: %d\nEqual: %d", indexAfter13, indexBefore13, indexOf13) } // AvoidCallbackHell is an example of how, in Go, there is no such thing // as "Callback Hell" because you can always call work synchronously and if you // want to do it asynchronously you would use a goroutine with `go` to do it. // If you are unfamiliar with "Callback Hell" it's not important to learn about // it. You won't experience it in Go because it's much more well designed! func AvoidCallbackHell() { // result1 is DoWork1 as a closure result1 := func(args ...int) int { sum := 0 for _, n := range args { sum += n } return sum }(1, 2, 3, 4, 5, 6, 7, 8) // result2 is DoWork2 as a closure result2 := func(sum int) float32 { crunchNums := math.Pi * 2 return float32(sum) * float32(crunchNums) }(result1) // result3 is DoWork3 as a closure result3 := func(f float32) string { return fmt.Sprintf("%0.4f", f) }(result2) fmt.Printf("Some padding for final result:\n%14s", result3) } // DoWork1 is a global private closure. It creates the sum of several numbers. var DoWork1 = func(args ...int) int { sum := 0 for _, n := range args { sum += n } return sum } // DoWork2 acts like it's doing some heavy number crunching and using the value // that it obtains from DoWork1. var DoWork2 = func(sum int) float32 { crunchNums := math.Pi * 2 return float32(sum) * float32(crunchNums) } // DoWork3 changes the float32 data type into a string with 4 places of // precision. var DoWork3 = func(f float32) string { return fmt.Sprintf("%0.4f", f) } // Gotcha shows that closures will grab from outside of their environment, but // closures have no control of those variables having their values changed. func Gotcha() { limit := 4 funcs := make([]func(), limit) for gotcha := 0; gotcha < limit; gotcha++ { funcs[gotcha] = func() { fmt.Printf("number: %d and pointer: %p\n", gotcha, &gotcha) } } for i := 0; i < limit; i++ { funcs[i]() } } // GotchaFix shows a deeper understanding of closures and how we can avoid the // very common beginner mistake of func GotchaFix() { limit := 4 funcs := make([]func(), limit) for gotcha := 0; gotcha < limit; gotcha++ { // NOTE(jay): this is known as shadowing. This is confusing and is normally // avoided if possible. The explanation is we are creating a new variable // scoped inside of the for loop and therefore this `gotcha` won't change // only the `gotcha` on the for loop scope will change. gotcha := gotcha // 👇 This also works and it's less confusing for the uninitiated. Again, // we are creating a new variable `gotcha2` inside of the for loop, that is // the same as the `gotcha` on the for loop scope. // gotcha2 := gotcha funcs[gotcha] = func() { fmt.Printf("number: %d and pointer: %p\n", gotcha, &gotcha) } } // Instead of using a closure we can use an anonymous function which has a // parameter passed in. funcs2 := make([]func(int), limit) for gotcha := 0; gotcha < limit; gotcha++ { funcs2[gotcha] = func(i int) { fmt.Printf("number: %d and pointer: %p\n", i, &i) } } fmt.Println("fix 1:") for i := 0; i < limit; i++ { funcs[i]() } fmt.Println("fix 2:") for i := 0; i < limit; i++ { funcs2[i](i) } }
basics/completed/closure/closure.go
0.588416
0.439026
closure.go
starcoder
package lexers import ( . "github.com/johnsto/go-highlight" "strings" ) var JSON = Lexer{ Name: "json", MimeTypes: []string{"application/json"}, Filenames: []string{"*.json"}, States: StatesSpec{ "root": { {Include: "value"}, }, "whitespace": { {Regexp: "\\s+", Type: Whitespace}, }, // literal matches a literal JSON value "literal": { {Regexp: "(true|false|null)", Type: Literal}, }, // number matches a JSON number "number": { // -123.456e+78 {Regexp: "-?[0-9]+\\.?[0-9]*[eE][\\+\\-]?[0-9]+", Type: Number}, // -123.456 {Regexp: "-?[0-9]+\\.[0-9]+", Type: Number}, // -123 {Regexp: "-?[0-9]+", Type: Number}, }, // string matches a JSON string "string": { {Regexp: `(")(")`, SubTypes: []TokenType{Punctuation, Punctuation}}, {Regexp: `(")((?:\\\"|[^\"])*?)(")`, SubTypes: []TokenType{Punctuation, String, Punctuation}}, }, // value matches any valid JSON value "value": { {Include: "whitespace"}, {Include: "literal"}, {Include: "number"}, {Include: "string"}, {Include: "array"}, {Include: "object"}, }, // object matches the start of an object "object": { {Regexp: "{", Type: Punctuation, State: "objectKey"}, }, // objectKey matches a key within an object, or pops if the end of // the object has been reached "objectKey": { {Include: "whitespace"}, {Regexp: `(")((?:\\\"|[^\"])*?)(")(\s*)(:)`, SubTypes: []TokenType{Punctuation, Attribute, Punctuation, Whitespace, Assignment}, State: "objectValue"}, {Regexp: "}", Type: Punctuation, State: "#pop"}, }, // objectValue matches a key value within an object, popping after // each element or when the object ends "objectValue": { {Include: "whitespace"}, {Include: "value"}, {Regexp: ",", Type: Punctuation, State: "#pop"}, {Regexp: "}", Type: Punctuation, State: "#pop #pop"}, }, // array matches the start of an array "array": { {Regexp: "\\[", Type: Punctuation, State: "arrayValue"}, }, // arrayValue matches elements within an array and pops when the // array ends "arrayValue": { {Include: "whitespace"}, {Include: "value"}, {Regexp: ",", Type: Punctuation}, {Regexp: "\\]", Type: Punctuation, State: "#pop"}, }, }, Filters: []Filter{ RemoveEmptiesFilter, }, Formatter: &JSONFormatter{Indent: " "}, } // JSONFormatter consumes a series of JSON tokens and emits additional tokens // to produce indented, formatted output. type JSONFormatter struct { Indent string } func (f *JSONFormatter) Filter(emit func(Token) error) func( Token) error { // indents records the current indentation level indents := 0 return func(token Token) error { indent := strings.Repeat(f.Indent, indents) // temporary storage for the tokens to emit var out []Token switch token.Type { case Whitespace: // nah, we'll add our own whitespace, thanks! return nil case Assignment: switch token.Value { case ":": out = []Token{token, Token{Type: Whitespace, Value: " "}} default: out = []Token{token} } case Punctuation: switch token.Value { case ",": out = []Token{token, Token{Type: Whitespace, Value: "\n"}, Token{Type: Whitespace, Value: indent}} case "{": fallthrough case "[": out = append(out, token) out = append(out, Token{Type: Whitespace, Value: "\n"}) indents++ indent = strings.Repeat(f.Indent, indents) out = append(out, Token{Type: Whitespace, Value: indent}) case "}": fallthrough case "]": out = append(out, Token{Type: Whitespace, Value: "\n"}) indents-- indent = strings.Repeat(f.Indent, indents) out = append(out, Token{Type: Whitespace, Value: indent}) out = append(out, token) case "\"": out = []Token{token} default: out = []Token{token} } case "": // EOF break default: out = []Token{token} } // Attempt to emit each token, failing on first error for _, t := range out { if err := emit(t); err != nil { return err } } return nil } return nil } func init() { Register(JSON.Name, JSON) }
lexers/lexer_json.go
0.526343
0.517022
lexer_json.go
starcoder
package geom /* #include <liblwgeom.h> #include <geos_c.h> #include "lwgeom_geos.h" #include "geos.h" #include "geom.h" */ import ( "C" ) import "errors" // Buffer creates a buffer around a geometry object func (lwg *Geom) Buffer(width float64) error { bufferedGeom := C.buffer(lwg.LwGeom, C.double(width), C.int(8)) defer C.lwgeom_free(lwg.LwGeom) if bufferedGeom == nil { return errors.New("Error creating Buffer") } lwg.LwGeom = bufferedGeom return nil } // BufferWithParams creates a buffer around a geometry using BufferParams object func (lwg *Geom) BufferWithParams(params *BufferParams, width float64) error { bufferedGeom := C.buffer_with_params(lwg.LwGeom, C.double(width), params.CBufP) defer C.lwgeom_free(lwg.LwGeom) if bufferedGeom == nil { return errors.New("Error creating Buffer") } lwg.LwGeom = bufferedGeom return nil } // Union returns the union of two geometries func (lwg *Geom) Union(g1 *Geom) (*Geom, error) { union := C.geos_union(lwg.LwGeom, g1.LwGeom) if union == nil { return nil, errors.New("Error in GEOS union operation") } return &Geom{ LwGeom: union, }, nil } // Intersection finds the intersection of two geometries func (lwg *Geom) Intersection(g1 *Geom) (*Geom, error) { intersection := C.geos_intersection(lwg.LwGeom, g1.LwGeom) if intersection == nil { return nil, errors.New("Error in GEOS intersection operation") } return &Geom{ LwGeom: intersection, }, nil } // Intersects checks whether the geom intersects with an another geom func (lwg *Geom) Intersects(g1 *Geom) (bool, error) { intersects := C.geos_intersects(lwg.LwGeom, g1.LwGeom) if C.GEOS_EXCEPTION == intersects { return false, errors.New("Error in GEOS intersects operation") } return geosBoolResult(intersects) } /* Disjoints Overlaps, Touches, Within all imply geometries are not spatially disjoint. If any of the aforementioned returns true, then the geometries are not spatially disjoint. Disjoint implies false for spatial intersection. */ func (lwg *Geom) Disjoints(g1 *Geom) (bool, error) { disjoints := C.geos_disjoints(lwg.LwGeom, g1.LwGeom) if C.GEOS_EXCEPTION == disjoints { return false, errors.New("Error in GEOS disjoint operation") } return geosBoolResult(disjoints) } // Touches returns TRUE if the only points in common between g1 and g2 lie in the union of the boundaries of g1 and g2. func (lwg *Geom) Touches(g1 *Geom) (bool, error) { touches := C.geos_disjoints(lwg.LwGeom, g1.LwGeom) if C.GEOS_EXCEPTION == touches { return false, errors.New("Error in GEOS touches operation") } return geosBoolResult(touches) } // Within returns TRUE if geometry A is completely inside geometry B func (lwg *Geom) Within(g1 *Geom) (bool, error) { within := C.geos_within(lwg.LwGeom, g1.LwGeom) if C.GEOS_EXCEPTION == within { return false, errors.New("Error in GEOS within operation") } return geosBoolResult(within) } // Contains returns TRUE if geometry B is completely inside geometry A. func (lwg *Geom) Contains(g1 *Geom) (bool, error) { contains := C.geos_contains(lwg.LwGeom, g1.LwGeom) if C.GEOS_EXCEPTION == contains { return false, errors.New("Error in GEOS contains operation") } return geosBoolResult(contains) } // Overlaps returns TRUE if geometry B is completely inside geometry A. func (lwg *Geom) Overlaps(g1 *Geom) (bool, error) { overlaps := C.geos_overlaps(lwg.LwGeom, g1.LwGeom) if C.GEOS_EXCEPTION == overlaps { return false, errors.New("Error in GEOS overlaps operation") } return geosBoolResult(overlaps) } /* GEOSEquals returns true if the DE-9IM intersection matrix for the two Geometrys is T*F**FFF*. a and b are topologically equal. "Two geometries are topologically equal if their interiors intersect and no part of the interior or boundary of one geometry intersects the exterior of the other".[9] equals to Within & Contains */ func (lwg *Geom) GEOSEquals(g1 *Geom) (bool, error) { equals := C.geos_equals(lwg.LwGeom, g1.LwGeom) if C.GEOS_EXCEPTION == equals { return false, errors.New("Error in GEOS equals operation") } return geosBoolResult(equals) } // GEOSEqualsExact returns true if the two Geometrys are of the same type and their // vertices corresponding by index are equal up to a specified tolerance. func (lwg *Geom) GEOSEqualsExact(g1 *Geom, tolerance float64) (bool, error) { eqExact := C.geos_equals_exact(lwg.LwGeom, g1.LwGeom, C.double(tolerance)) if C.GEOS_EXCEPTION == eqExact { return false, errors.New("Error in GEOS equals operation") } return geosBoolResult(eqExact) } /* Covers returns true if this geometry covers the specified geometry. The covers predicate has the following equivalent definitions: - Every point of the other geometry is a point of this geometry. - The DE-9IM Intersection Matrix for the two geometries is T*****FF* or *T****FF* or ***T**FF* or ****T*FF* - g.coveredBy(this) (covers is the inverse of coveredBy) If either geometry is empty, the value of this predicate is false. This predicate is similar to contains, but is more inclusive (i.e. returns true for more cases). In particular, unlike contains it does not distinguish between points in the boundary and in the interior of geometries. For most situations, covers should be used in preference to contains. As an added benefit, covers is more amenable to optimization, and hence should be more performant. */ func (lwg *Geom) Covers(g1 *Geom) (bool, error) { covers := C.geos_covers(lwg.LwGeom, g1.LwGeom) if covers == C.GEOS_EXCEPTION { return false, errors.New("Error in GEOS equals operation") } return geosBoolResult(covers) } /* CoveredBy tests whether this geometry is covered by the specified geometry. The coveredBy predicate has the following equivalent definitions: Every point of this geometry is a point of the other geometry. The DE-9IM Intersection Matrix for the two geometries matches [T*F**F***] or [*TF**F***] or [**FT*F***] or [**F*TF***] g.covers(this) (coveredBy is the converse of covers) If either geometry is empty, the value of this predicate is false. This predicate is similar to within, but is more inclusive (i.e. returns true for more cases). */ func (lwg *Geom) CoveredBy(g1 *Geom) (bool, error) { coveredBy := C.geos_covered_by(lwg.LwGeom, g1.LwGeom) if coveredBy == C.GEOS_EXCEPTION { return false, errors.New("Error in GEOS equals operation") } return geosBoolResult(coveredBy) } // Crosses returns true if this geometry crosses the specified geometry. func (lwg *Geom) Crosses(g1 *Geom) (bool, error) { crosses := C.geos_crosses(lwg.LwGeom, g1.LwGeom) if crosses == C.GEOS_EXCEPTION { return false, errors.New("Error in GEOS equals operation") } return geosBoolResult(crosses) }
geom/geom.go
0.803482
0.47098
geom.go
starcoder
package asposeslidescloud import ( "encoding/json" ) // A bubble series. type IBubbleSeries interface { // Series type. getType() string setType(newValue string) // Series name. getName() string setName(newValue string) // True if each data marker in the series has a different color. getIsColorVaried() bool setIsColorVaried(newValue bool) // Invert solid color for the series. getInvertedSolidFillColor() string setInvertedSolidFillColor(newValue string) // True if curve smoothing is turned on. Applies only to line and scatter connected by lines charts. getSmooth() bool setSmooth(newValue bool) // True if the series is plotted on second value axis. getPlotOnSecondAxis() bool setPlotOnSecondAxis(newValue bool) // Series order. getOrder() int32 setOrder(newValue int32) // The number format for the series y values. getNumberFormatOfYValues() string setNumberFormatOfYValues(newValue string) // The number format for the series x values. getNumberFormatOfXValues() string setNumberFormatOfXValues(newValue string) // The number format for the series values. getNumberFormatOfValues() string setNumberFormatOfValues(newValue string) // The number format for the series bubble sizes. getNumberFormatOfBubbleSizes() string setNumberFormatOfBubbleSizes(newValue string) // True if the series shall invert its colors if the value is negative. Applies to bar, column and bubble series. getInvertIfNegative() bool setInvertIfNegative(newValue bool) // The distance of an open pie slice from the center of the pie chart is expressed as a percentage of the pie diameter. getExplosion() int32 setExplosion(newValue int32) // Series marker. getMarker() ISeriesMarker setMarker(newValue ISeriesMarker) // Fill properties set for the series. getFillFormat() IFillFormat setFillFormat(newValue IFillFormat) // Effect properties set for the series. getEffectFormat() IEffectFormat setEffectFormat(newValue IEffectFormat) // Line properties set for the series. getLineFormat() ILineFormat setLineFormat(newValue ILineFormat) // Data point type. getDataPointType() string setDataPointType(newValue string) // Gets or sets the values. getDataPoints() []IBubbleChartDataPoint setDataPoints(newValue []IBubbleChartDataPoint) } type BubbleSeries struct { // Series type. Type_ string `json:"Type,omitempty"` // Series name. Name string `json:"Name,omitempty"` // True if each data marker in the series has a different color. IsColorVaried bool `json:"IsColorVaried"` // Invert solid color for the series. InvertedSolidFillColor string `json:"InvertedSolidFillColor,omitempty"` // True if curve smoothing is turned on. Applies only to line and scatter connected by lines charts. Smooth bool `json:"Smooth"` // True if the series is plotted on second value axis. PlotOnSecondAxis bool `json:"PlotOnSecondAxis"` // Series order. Order int32 `json:"Order,omitempty"` // The number format for the series y values. NumberFormatOfYValues string `json:"NumberFormatOfYValues,omitempty"` // The number format for the series x values. NumberFormatOfXValues string `json:"NumberFormatOfXValues,omitempty"` // The number format for the series values. NumberFormatOfValues string `json:"NumberFormatOfValues,omitempty"` // The number format for the series bubble sizes. NumberFormatOfBubbleSizes string `json:"NumberFormatOfBubbleSizes,omitempty"` // True if the series shall invert its colors if the value is negative. Applies to bar, column and bubble series. InvertIfNegative bool `json:"InvertIfNegative"` // The distance of an open pie slice from the center of the pie chart is expressed as a percentage of the pie diameter. Explosion int32 `json:"Explosion,omitempty"` // Series marker. Marker ISeriesMarker `json:"Marker,omitempty"` // Fill properties set for the series. FillFormat IFillFormat `json:"FillFormat,omitempty"` // Effect properties set for the series. EffectFormat IEffectFormat `json:"EffectFormat,omitempty"` // Line properties set for the series. LineFormat ILineFormat `json:"LineFormat,omitempty"` // Data point type. DataPointType string `json:"DataPointType"` // Gets or sets the values. DataPoints []IBubbleChartDataPoint `json:"DataPoints,omitempty"` } func NewBubbleSeries() *BubbleSeries { instance := new(BubbleSeries) instance.Type_ = "" instance.DataPointType = "Bubble" return instance } func (this *BubbleSeries) getType() string { return this.Type_ } func (this *BubbleSeries) setType(newValue string) { this.Type_ = newValue } func (this *BubbleSeries) getName() string { return this.Name } func (this *BubbleSeries) setName(newValue string) { this.Name = newValue } func (this *BubbleSeries) getIsColorVaried() bool { return this.IsColorVaried } func (this *BubbleSeries) setIsColorVaried(newValue bool) { this.IsColorVaried = newValue } func (this *BubbleSeries) getInvertedSolidFillColor() string { return this.InvertedSolidFillColor } func (this *BubbleSeries) setInvertedSolidFillColor(newValue string) { this.InvertedSolidFillColor = newValue } func (this *BubbleSeries) getSmooth() bool { return this.Smooth } func (this *BubbleSeries) setSmooth(newValue bool) { this.Smooth = newValue } func (this *BubbleSeries) getPlotOnSecondAxis() bool { return this.PlotOnSecondAxis } func (this *BubbleSeries) setPlotOnSecondAxis(newValue bool) { this.PlotOnSecondAxis = newValue } func (this *BubbleSeries) getOrder() int32 { return this.Order } func (this *BubbleSeries) setOrder(newValue int32) { this.Order = newValue } func (this *BubbleSeries) getNumberFormatOfYValues() string { return this.NumberFormatOfYValues } func (this *BubbleSeries) setNumberFormatOfYValues(newValue string) { this.NumberFormatOfYValues = newValue } func (this *BubbleSeries) getNumberFormatOfXValues() string { return this.NumberFormatOfXValues } func (this *BubbleSeries) setNumberFormatOfXValues(newValue string) { this.NumberFormatOfXValues = newValue } func (this *BubbleSeries) getNumberFormatOfValues() string { return this.NumberFormatOfValues } func (this *BubbleSeries) setNumberFormatOfValues(newValue string) { this.NumberFormatOfValues = newValue } func (this *BubbleSeries) getNumberFormatOfBubbleSizes() string { return this.NumberFormatOfBubbleSizes } func (this *BubbleSeries) setNumberFormatOfBubbleSizes(newValue string) { this.NumberFormatOfBubbleSizes = newValue } func (this *BubbleSeries) getInvertIfNegative() bool { return this.InvertIfNegative } func (this *BubbleSeries) setInvertIfNegative(newValue bool) { this.InvertIfNegative = newValue } func (this *BubbleSeries) getExplosion() int32 { return this.Explosion } func (this *BubbleSeries) setExplosion(newValue int32) { this.Explosion = newValue } func (this *BubbleSeries) getMarker() ISeriesMarker { return this.Marker } func (this *BubbleSeries) setMarker(newValue ISeriesMarker) { this.Marker = newValue } func (this *BubbleSeries) getFillFormat() IFillFormat { return this.FillFormat } func (this *BubbleSeries) setFillFormat(newValue IFillFormat) { this.FillFormat = newValue } func (this *BubbleSeries) getEffectFormat() IEffectFormat { return this.EffectFormat } func (this *BubbleSeries) setEffectFormat(newValue IEffectFormat) { this.EffectFormat = newValue } func (this *BubbleSeries) getLineFormat() ILineFormat { return this.LineFormat } func (this *BubbleSeries) setLineFormat(newValue ILineFormat) { this.LineFormat = newValue } func (this *BubbleSeries) getDataPointType() string { return this.DataPointType } func (this *BubbleSeries) setDataPointType(newValue string) { this.DataPointType = newValue } func (this *BubbleSeries) getDataPoints() []IBubbleChartDataPoint { return this.DataPoints } func (this *BubbleSeries) setDataPoints(newValue []IBubbleChartDataPoint) { this.DataPoints = newValue } func (this *BubbleSeries) UnmarshalJSON(b []byte) error { var objMap map[string]*json.RawMessage err := json.Unmarshal(b, &objMap) if err != nil { return err } this.Type_ = "" if valType, ok := objMap["type"]; ok { if valType != nil { var valueForType string err = json.Unmarshal(*valType, &valueForType) if err != nil { var valueForTypeInt int32 err = json.Unmarshal(*valType, &valueForTypeInt) if err != nil { return err } this.Type_ = string(valueForTypeInt) } else { this.Type_ = valueForType } } } if valTypeCap, ok := objMap["Type"]; ok { if valTypeCap != nil { var valueForType string err = json.Unmarshal(*valTypeCap, &valueForType) if err != nil { var valueForTypeInt int32 err = json.Unmarshal(*valTypeCap, &valueForTypeInt) if err != nil { return err } this.Type_ = string(valueForTypeInt) } else { this.Type_ = valueForType } } } if valName, ok := objMap["name"]; ok { if valName != nil { var valueForName string err = json.Unmarshal(*valName, &valueForName) if err != nil { return err } this.Name = valueForName } } if valNameCap, ok := objMap["Name"]; ok { if valNameCap != nil { var valueForName string err = json.Unmarshal(*valNameCap, &valueForName) if err != nil { return err } this.Name = valueForName } } if valIsColorVaried, ok := objMap["isColorVaried"]; ok { if valIsColorVaried != nil { var valueForIsColorVaried bool err = json.Unmarshal(*valIsColorVaried, &valueForIsColorVaried) if err != nil { return err } this.IsColorVaried = valueForIsColorVaried } } if valIsColorVariedCap, ok := objMap["IsColorVaried"]; ok { if valIsColorVariedCap != nil { var valueForIsColorVaried bool err = json.Unmarshal(*valIsColorVariedCap, &valueForIsColorVaried) if err != nil { return err } this.IsColorVaried = valueForIsColorVaried } } if valInvertedSolidFillColor, ok := objMap["invertedSolidFillColor"]; ok { if valInvertedSolidFillColor != nil { var valueForInvertedSolidFillColor string err = json.Unmarshal(*valInvertedSolidFillColor, &valueForInvertedSolidFillColor) if err != nil { return err } this.InvertedSolidFillColor = valueForInvertedSolidFillColor } } if valInvertedSolidFillColorCap, ok := objMap["InvertedSolidFillColor"]; ok { if valInvertedSolidFillColorCap != nil { var valueForInvertedSolidFillColor string err = json.Unmarshal(*valInvertedSolidFillColorCap, &valueForInvertedSolidFillColor) if err != nil { return err } this.InvertedSolidFillColor = valueForInvertedSolidFillColor } } if valSmooth, ok := objMap["smooth"]; ok { if valSmooth != nil { var valueForSmooth bool err = json.Unmarshal(*valSmooth, &valueForSmooth) if err != nil { return err } this.Smooth = valueForSmooth } } if valSmoothCap, ok := objMap["Smooth"]; ok { if valSmoothCap != nil { var valueForSmooth bool err = json.Unmarshal(*valSmoothCap, &valueForSmooth) if err != nil { return err } this.Smooth = valueForSmooth } } if valPlotOnSecondAxis, ok := objMap["plotOnSecondAxis"]; ok { if valPlotOnSecondAxis != nil { var valueForPlotOnSecondAxis bool err = json.Unmarshal(*valPlotOnSecondAxis, &valueForPlotOnSecondAxis) if err != nil { return err } this.PlotOnSecondAxis = valueForPlotOnSecondAxis } } if valPlotOnSecondAxisCap, ok := objMap["PlotOnSecondAxis"]; ok { if valPlotOnSecondAxisCap != nil { var valueForPlotOnSecondAxis bool err = json.Unmarshal(*valPlotOnSecondAxisCap, &valueForPlotOnSecondAxis) if err != nil { return err } this.PlotOnSecondAxis = valueForPlotOnSecondAxis } } if valOrder, ok := objMap["order"]; ok { if valOrder != nil { var valueForOrder int32 err = json.Unmarshal(*valOrder, &valueForOrder) if err != nil { return err } this.Order = valueForOrder } } if valOrderCap, ok := objMap["Order"]; ok { if valOrderCap != nil { var valueForOrder int32 err = json.Unmarshal(*valOrderCap, &valueForOrder) if err != nil { return err } this.Order = valueForOrder } } if valNumberFormatOfYValues, ok := objMap["numberFormatOfYValues"]; ok { if valNumberFormatOfYValues != nil { var valueForNumberFormatOfYValues string err = json.Unmarshal(*valNumberFormatOfYValues, &valueForNumberFormatOfYValues) if err != nil { return err } this.NumberFormatOfYValues = valueForNumberFormatOfYValues } } if valNumberFormatOfYValuesCap, ok := objMap["NumberFormatOfYValues"]; ok { if valNumberFormatOfYValuesCap != nil { var valueForNumberFormatOfYValues string err = json.Unmarshal(*valNumberFormatOfYValuesCap, &valueForNumberFormatOfYValues) if err != nil { return err } this.NumberFormatOfYValues = valueForNumberFormatOfYValues } } if valNumberFormatOfXValues, ok := objMap["numberFormatOfXValues"]; ok { if valNumberFormatOfXValues != nil { var valueForNumberFormatOfXValues string err = json.Unmarshal(*valNumberFormatOfXValues, &valueForNumberFormatOfXValues) if err != nil { return err } this.NumberFormatOfXValues = valueForNumberFormatOfXValues } } if valNumberFormatOfXValuesCap, ok := objMap["NumberFormatOfXValues"]; ok { if valNumberFormatOfXValuesCap != nil { var valueForNumberFormatOfXValues string err = json.Unmarshal(*valNumberFormatOfXValuesCap, &valueForNumberFormatOfXValues) if err != nil { return err } this.NumberFormatOfXValues = valueForNumberFormatOfXValues } } if valNumberFormatOfValues, ok := objMap["numberFormatOfValues"]; ok { if valNumberFormatOfValues != nil { var valueForNumberFormatOfValues string err = json.Unmarshal(*valNumberFormatOfValues, &valueForNumberFormatOfValues) if err != nil { return err } this.NumberFormatOfValues = valueForNumberFormatOfValues } } if valNumberFormatOfValuesCap, ok := objMap["NumberFormatOfValues"]; ok { if valNumberFormatOfValuesCap != nil { var valueForNumberFormatOfValues string err = json.Unmarshal(*valNumberFormatOfValuesCap, &valueForNumberFormatOfValues) if err != nil { return err } this.NumberFormatOfValues = valueForNumberFormatOfValues } } if valNumberFormatOfBubbleSizes, ok := objMap["numberFormatOfBubbleSizes"]; ok { if valNumberFormatOfBubbleSizes != nil { var valueForNumberFormatOfBubbleSizes string err = json.Unmarshal(*valNumberFormatOfBubbleSizes, &valueForNumberFormatOfBubbleSizes) if err != nil { return err } this.NumberFormatOfBubbleSizes = valueForNumberFormatOfBubbleSizes } } if valNumberFormatOfBubbleSizesCap, ok := objMap["NumberFormatOfBubbleSizes"]; ok { if valNumberFormatOfBubbleSizesCap != nil { var valueForNumberFormatOfBubbleSizes string err = json.Unmarshal(*valNumberFormatOfBubbleSizesCap, &valueForNumberFormatOfBubbleSizes) if err != nil { return err } this.NumberFormatOfBubbleSizes = valueForNumberFormatOfBubbleSizes } } if valInvertIfNegative, ok := objMap["invertIfNegative"]; ok { if valInvertIfNegative != nil { var valueForInvertIfNegative bool err = json.Unmarshal(*valInvertIfNegative, &valueForInvertIfNegative) if err != nil { return err } this.InvertIfNegative = valueForInvertIfNegative } } if valInvertIfNegativeCap, ok := objMap["InvertIfNegative"]; ok { if valInvertIfNegativeCap != nil { var valueForInvertIfNegative bool err = json.Unmarshal(*valInvertIfNegativeCap, &valueForInvertIfNegative) if err != nil { return err } this.InvertIfNegative = valueForInvertIfNegative } } if valExplosion, ok := objMap["explosion"]; ok { if valExplosion != nil { var valueForExplosion int32 err = json.Unmarshal(*valExplosion, &valueForExplosion) if err != nil { return err } this.Explosion = valueForExplosion } } if valExplosionCap, ok := objMap["Explosion"]; ok { if valExplosionCap != nil { var valueForExplosion int32 err = json.Unmarshal(*valExplosionCap, &valueForExplosion) if err != nil { return err } this.Explosion = valueForExplosion } } if valMarker, ok := objMap["marker"]; ok { if valMarker != nil { var valueForMarker SeriesMarker err = json.Unmarshal(*valMarker, &valueForMarker) if err != nil { return err } this.Marker = &valueForMarker } } if valMarkerCap, ok := objMap["Marker"]; ok { if valMarkerCap != nil { var valueForMarker SeriesMarker err = json.Unmarshal(*valMarkerCap, &valueForMarker) if err != nil { return err } this.Marker = &valueForMarker } } if valFillFormat, ok := objMap["fillFormat"]; ok { if valFillFormat != nil { var valueForFillFormat FillFormat err = json.Unmarshal(*valFillFormat, &valueForFillFormat) if err != nil { return err } this.FillFormat = &valueForFillFormat } } if valFillFormatCap, ok := objMap["FillFormat"]; ok { if valFillFormatCap != nil { var valueForFillFormat FillFormat err = json.Unmarshal(*valFillFormatCap, &valueForFillFormat) if err != nil { return err } this.FillFormat = &valueForFillFormat } } if valEffectFormat, ok := objMap["effectFormat"]; ok { if valEffectFormat != nil { var valueForEffectFormat EffectFormat err = json.Unmarshal(*valEffectFormat, &valueForEffectFormat) if err != nil { return err } this.EffectFormat = &valueForEffectFormat } } if valEffectFormatCap, ok := objMap["EffectFormat"]; ok { if valEffectFormatCap != nil { var valueForEffectFormat EffectFormat err = json.Unmarshal(*valEffectFormatCap, &valueForEffectFormat) if err != nil { return err } this.EffectFormat = &valueForEffectFormat } } if valLineFormat, ok := objMap["lineFormat"]; ok { if valLineFormat != nil { var valueForLineFormat LineFormat err = json.Unmarshal(*valLineFormat, &valueForLineFormat) if err != nil { return err } this.LineFormat = &valueForLineFormat } } if valLineFormatCap, ok := objMap["LineFormat"]; ok { if valLineFormatCap != nil { var valueForLineFormat LineFormat err = json.Unmarshal(*valLineFormatCap, &valueForLineFormat) if err != nil { return err } this.LineFormat = &valueForLineFormat } } this.DataPointType = "Bubble" if valDataPointType, ok := objMap["dataPointType"]; ok { if valDataPointType != nil { var valueForDataPointType string err = json.Unmarshal(*valDataPointType, &valueForDataPointType) if err != nil { var valueForDataPointTypeInt int32 err = json.Unmarshal(*valDataPointType, &valueForDataPointTypeInt) if err != nil { return err } this.DataPointType = string(valueForDataPointTypeInt) } else { this.DataPointType = valueForDataPointType } } } if valDataPointTypeCap, ok := objMap["DataPointType"]; ok { if valDataPointTypeCap != nil { var valueForDataPointType string err = json.Unmarshal(*valDataPointTypeCap, &valueForDataPointType) if err != nil { var valueForDataPointTypeInt int32 err = json.Unmarshal(*valDataPointTypeCap, &valueForDataPointTypeInt) if err != nil { return err } this.DataPointType = string(valueForDataPointTypeInt) } else { this.DataPointType = valueForDataPointType } } } if valDataPoints, ok := objMap["dataPoints"]; ok { if valDataPoints != nil { var valueForDataPoints []BubbleChartDataPoint err = json.Unmarshal(*valDataPoints, &valueForDataPoints) if err != nil { return err } valueForIDataPoints := make([]IBubbleChartDataPoint, len(valueForDataPoints)) for i, v := range valueForDataPoints { valueForIDataPoints[i] = IBubbleChartDataPoint(&v) } this.DataPoints = valueForIDataPoints } } if valDataPointsCap, ok := objMap["DataPoints"]; ok { if valDataPointsCap != nil { var valueForDataPoints []BubbleChartDataPoint err = json.Unmarshal(*valDataPointsCap, &valueForDataPoints) if err != nil { return err } valueForIDataPoints := make([]IBubbleChartDataPoint, len(valueForDataPoints)) for i, v := range valueForDataPoints { valueForIDataPoints[i] = IBubbleChartDataPoint(&v) } this.DataPoints = valueForIDataPoints } } return nil }
bubble_series.go
0.826747
0.504822
bubble_series.go
starcoder
package main import ( "github.com/SymnaTEC/go-adcpi" "github.com/buger/goterm" "os" "fmt" "time" "bufio" "strings" "flag" "strconv" "math/rand" ) /* This is the entry point of the application. When the program is run, this will be the first function that gets called. It is responsible for creating the connection to the muscle sensor and starting the plotting tools. */ func main() { // Clear the terminal goterm.Clear() // Load the settings from the command line LoadSettings() // Create a channel to connect the two threads, the data thread and the display thread channel := make(chan float64) // Start the background thread that reads the voltage data if Settings.Debug { go grabRandomData(channel) } else if Settings.Playback { go grabDataFromFile(channel) } else { go grabDataFromADCPI(channel) } // Receive the data from the background thread keys := []float64{} values := []float64{} x := 0 for v := range channel { // Append the new values to the general collection keys = append(keys, float64(x) * Settings.Interval) values = append(values, v) // Prepare a Table for the last x values data := &goterm.DataTable{} data.AddColumn("Time") data.AddColumn("Voltage") // Add the last x values from the value arrays to the table i := min(len(keys), Settings.Scale) for i > 0 { data.AddRow(keys[len(keys)-i], values[len(values)-i]) i-- } // Move the cursor to the beginning so we clear the console goterm.MoveCursor(0, 0) // Create a new chart chart := goterm.NewLineChart(Settings.Width, Settings.Height) chart.Flags = goterm.DRAW_RELATIVE // Draw the table using the chart fmt.Println(chart.Draw(data)) goterm.Flush() x++ } } /* A small helper function to return the smaller number */ func min(x int, y int) int { if x < y { return x } return y } /* This function queries the ADCPi extension board, and writes the voltage readout into the channel between this function and the plotting logic */ func grabDataFromADCPI(channel chan float64) { // Connect to the ADCPi adc := adcpi.ADCPI(byte(Settings.Address), 18) // Create the CSV file csv,err := os.Create(Settings.File) if err != nil { panic(err) } csv.WriteString("Time;Voltage") defer csv.Close() defer close(channel) // Counter x := 0 voltage := float64(0) // Create an infinite loop for true { voltage = adc.ReadVoltage(byte(Settings.Channel)) channel <- voltage csv.WriteString(fmt.Sprintf("\n%f;%f", float64(x) * Settings.Interval, voltage)) x++ // Converts our decimal value in seconds to an integer value in nanoseconds time.Sleep(time.Duration(Settings.Interval * 1000 * 1000 * 1000)) } } /* This function queries a previously created file, and writes the voltage readout into the channel between this function and the plotting logic */ func grabDataFromFile(channel chan float64) { // Load the file csv,err := os.Open(Settings.File) if err != nil { panic(err) } scan := bufio.NewReader(csv) defer close(channel) // Counter x := 0 voltage := float64(0) line := "" scan.ReadString(10) // Skip CSV declaration // Create an infinite loop for true { line, err = scan.ReadString(10) if line != "" { voltage, err = strconv.ParseFloat(strings.Replace(strings.Split(line, ";")[1], "\n", "", -1), 64) if err != nil { panic(err) } channel <- voltage x++ } // Converts our decimal value in seconds to an integer value in nanoseconds time.Sleep(time.Duration(Settings.Interval * 1000 * 1000 * 1000)) } } /* This function generates random voltage data and writes it into the channel between this function and the plotting logic */ func grabRandomData(channel chan float64) { // Create an infinite loop for true { // Random value between 0 and 5 channel <- rand.Float64() * 5 // Converts our decimal value in seconds to an integer value in nanoseconds time.Sleep(time.Duration(Settings.Interval * 1000 * 1000 * 1000)) } } /* A type that stores all settings. These settings are loaded through command line arguments. Example: $ plot --file=data.csv --address=0x68 --channel=1 $ plot --file=data.csv --playback */ type SettingsData struct { /* The file where the data from the muscle sensor will be stored. It should end with .csv, but any file extension is acceptable. If playback mode is enabled, the program will not store data in the file but load it. */ File string /* The I2C address of the interface we are connecting to. The default setting is 0x68 (so 104 in decimal notation). */ Address int /* The channel of the analog pin where the muscle sensor is connected. */ Channel int /* Whether the playback mode should be enabled. In playback mode, the application won't connect to the muscle sensor but load existing data and display it again. */ Playback bool /* The amount of seconds that passes between two measurements */ Interval float64 /* In debug mode, the program generates random data and plots that */ Debug bool /* Defines how many values should get plotted at the same time */ Scale int /* The width of the command line plot */ Width int /* The height of the command line plot */ Height int } /* The Instance of the Settings Storage */ var Settings SettingsData func LoadSettings() { Settings = SettingsData{} flag.StringVar(&(Settings.File), "file", "", "The file where the data from the muscle " + "sensor will be stored. If playback mode is enabled, the program will not store data in the file but load it.") flag.IntVar(&(Settings.Address), "address", 0x68, "The I2C address of the interface we " + "are connecting to.") flag.IntVar(&(Settings.Channel), "channel", 1, "The channel of the analog pin where the " + "muscle sensor is connected.") flag.BoolVar(&(Settings.Playback), "playback", false, "Whether the playback mode should be " + "enabled. In playback mode, the applications won't connect to the muscle sensor but load existing data and " + "display it again.") flag.Float64Var(&(Settings.Interval), "interval", 0.1, "The amount of seconds that passes " + "between two measurements") flag.BoolVar(&(Settings.Debug), "debug", false, "In debug mode, the program generates " + "random data and plots that") flag.IntVar(&(Settings.Scale), "scale", 20, "Defines how many values should get plotted " + "at the same time") flag.IntVar(&(Settings.Width), "width", goterm.Width(), "The width of the command line plot") flag.IntVar(&(Settings.Height), "height", goterm.Height(), "The height of the command line plot") flag.Parse() }
plot.go
0.702224
0.437643
plot.go
starcoder
package cmd import ( "time" ) var ( // Reference: https://en.wikipedia.org/wiki/List_of_UTC_time_offsets utcOffsets = []utcOffset{ {"UTC-12:00", "Y", -12 * time.Hour}, // 0 {"UTC-11:00", "X", -11 * time.Hour}, // 1 {"UTC-10:00", "W", -10 * time.Hour}, // 2 {"UTC-09:30", "V†", -(9*time.Hour + 30*time.Minute)}, // 3 {"UTC-09:00", "V", -9 * time.Hour}, // 4 {"UTC-08:00", "U", -8 * time.Hour}, // 5 {"UTC-07:00", "T", -7 * time.Hour}, // 6 {"UTC-06:00", "S", -6 * time.Hour}, // 7 {"UTC-05:00", "R", -5 * time.Hour}, // 8 {"UTC-04:00", "Q", -4 * time.Hour}, // 9 {"UTC-03:30", "P†", -(3*time.Hour + 30*time.Minute)}, // 10 {"UTC-03:00", "P", -3 * time.Hour}, // 11 {"UTC-02:00", "O", -2 * time.Hour}, // 12 {"UTC-01:00", "N", -1 * time.Hour}, // 13 {"UTC±00:00", "Z", 0}, // 14 {"UTC+01:00", "A", 1 * time.Hour}, // 15 {"UTC+02:00", "B", 2 * time.Hour}, // 16 {"UTC+03:00", "C", 3 * time.Hour}, // 17 {"UTC+03:30", "C†", 3*time.Hour + 30*time.Minute}, // 18 {"UTC+04:00", "D", 4 * time.Hour}, // 19 {"UTC+04:30", "D†", 4*time.Hour + 30*time.Minute}, // 20 {"UTC+05:00", "E", 5 * time.Hour}, // 21 {"UTC+05:30", "E†", 5*time.Hour + 30*time.Minute}, // 22 {"UTC+05:45", "E*", 5*time.Hour + 45*time.Minute}, // 23 {"UTC+06:00", "F", 6 * time.Hour}, // 24 {"UTC+06:30", "F†", 6*time.Hour + 30*time.Minute}, // 25 {"UTC+07:00", "G", 7 * time.Hour}, // 26 {"UTC+08:00", "H", 8 * time.Hour}, // 27 {"UTC+08:30", "H†", 8*time.Hour + 30*time.Minute}, // 28 {"UTC+08:45", "H*", 8*time.Hour + 45*time.Minute}, // 29 {"UTC+09:00", "I", 9 * time.Hour}, // 30 {"UTC+09:45", "I†", 9*time.Hour + 30*time.Minute}, // 31 {"UTC+10:00", "K", 10 * time.Hour}, // 32 {"UTC+10:30", "K†", 10*time.Hour + 30*time.Minute}, // 33 {"UTC+11:00", "L", 11 * time.Hour}, // 34 {"UTC+12:00", "M", 12 * time.Hour}, // 35 {"UTC+12:45", "M*", 12*time.Hour + 45*time.Minute}, // 36 {"UTC+13:00", "M†", 13 * time.Hour}, // 37 {"UTC+14:00", "M†", 14 * time.Hour}, // 38 } utcOffsetLocations []*time.Location ) func init() { for _, utcOffset := range utcOffsets { utcOffsetLocations = append(utcOffsetLocations, time.FixedZone(utcOffset.name, int(utcOffset.offset.Seconds()))) } } type utcOffset struct { name string nauticalName string offset time.Duration } func (u utcOffset) Name() string { return u.name } func (u utcOffset) NauticalName() string { return u.nauticalName } func (u utcOffset) Offset() time.Duration { return u.offset }
cmd/utc-offsets.go
0.549882
0.492554
utc-offsets.go
starcoder
package closest_pair import ( "math" "strconv" ) type point struct { x float64 y float64 } type Point interface { toString() string } func makePoint(x float64, y float64) *point { A := new(point) A.x = x A.y = y return A } func (A *point) toString() string { return "(" + strconv.FormatFloat(A.x, 'f', -1, 64) + "," + strconv.FormatFloat(A.y, 'f', -1, 64) + ")" } type pair struct { point1 point point2 point distance float64 } type Pair interface { calcDistance() float64 toString() string } func makePair(A point, B point) *pair { P := new(pair) P.point1 = A P.point2 = B P.distance = calcDistance(A, B) return P } func calcDistance(A point, B point) float64 { xdist := A.x - B.x ydist := A.y - B.y return math.Sqrt(float64(xdist*xdist + ydist*ydist)) } func (P *pair) toString() string { return P.point1.toString() + "-" + P.point2.toString() + "-" + strconv.FormatFloat(P.distance, 'f', -1, 64) } func DivideAndConquer(P []point) *pair { n := len(P) if n == 2 { return makePair(P[0], P[1]) } xP := make([]point, n) yP := make([]point, n) for i := 0; i < n; i++ { xP[i] = P[i] yP[i] = P[i] } _, pair := divideAndConquer(xP, yP) return pair } func divideAndConquer(xP, yP []point) (float64, *pair) { n := len(xP) if n <= 3 { p := BruteForce(xP) return p.distance, p } xL := xP[:n>>1] xR := xP[n>>1:] var yL, yR []point xMiddle := xL[0].x // yL ← { p ∈ yP : px ≤ xMiddle } // yR ← { p ∈ yP : px > xMiddle } for _, p := range yP { if p.x <= xMiddle { yL = append(yL, p) } else { yR = append(yL, p) } } // (dL, pairL) ← closestPair of (xL, yL) // (dR, pairR) ← closestPair of (xR, yR) dL, pairL := divideAndConquer(xL, yL) dR, pairR := divideAndConquer(xR, yR) dMin, pairMin := dR, pairR if dL < dR { dMin, pairMin = dL, pairL } var yS []point // yS ← { p ∈ yP : |xMiddle - px| < dMin } for i := 0; i < len(yP); i++ { if math.Abs(yP[i].x-xMiddle) < dMin { yS = append(yS, yP[i]) } } nS := len(yS) if nS > 1 { closestPair := pairMin for i := 1; i < nS-1; i++ { k := i + 1 for k <= nS && math.Abs(yS[k].y-yS[i].y) < dMin { tempPair := makePair(yS[k], yS[i]) if tempPair.distance < closestPair.distance { closestPair = tempPair } k++ } } return closestPair.distance, closestPair } else { return dMin, pairMin } } func BruteForce(P []point) *pair { n := len(P) if n < 2 { return nil } minPair := makePair(P[0], P[1]) tempPair := makePair(P[0], P[1]) min := minPair.distance for i := 0; i < n-1; i++ { tempPair.point1 = P[i] for j := 0; j < n; j++ { tempPair.point2 = P[j] if min > tempPair.distance { minPair = tempPair min = tempPair.distance } } } return minPair }
algorithms/maths/closest-pair/closest_pair.go
0.752831
0.500061
closest_pair.go
starcoder
package operator // Operator condition type type Operator string const ( // Tr Tree condition Tr Operator = "tr" // Query Selectors // ========== Comparison ============= // Eq Matches values that are equal to a specified value Eq Operator = "eq" // Ne Matches all values that are not equal to a specified value Ne Operator = "ne" // Lt Matches values that are less than a specified value Lt Operator = "lt" // Lte Matches values that are less than or equal to a specified value Lte Operator = "lte" // Gt Matches values that are greater than a specified value Gt Operator = "gt" // Gte Matches values that are greater than or equal to a specified value Gte Operator = "gte" // In Matches any of the values specified in an array In Operator = "in" // Nin Matches none of the values specified in an array Nin Operator = "nin" // Con Matches values that contains some string Con Operator = "con" // ========== Logical ============= // Not Inverts the effect of a query expression and returns documents that do not match the query expression Not Operator = "not" // And Joins query clauses with a logical AND returns all documents that match the conditions of both clauses And Operator = "and" // Or Joins query clauses with a logical OR returns all documents that match the conditions of either clause Or Operator = "or" // Nor Joins query clauses with a logical NOR returns all documents that fail to match both clauses Nor Operator = "nor" // ========== Element ============= // Ext Matches documents that have the specified field Ext Operator = "exists" // Typ Selects documents if a field is of the specified type Typ Operator = "type" // ========== Pipeline in ChangeStream ========= // Mat matches documents in change stream Mat Operator = "match" ) var ( // EmptyCondition just return {} EmptyCondition = NewLeafCondition(Tr, M{}) ) // M struct to store map data type M map[string]interface{} // Update update M func (m M) Update(key string, value interface{}) M { m[key] = value return m } // Merge merge additional key-value pair into M func (m M) Merge(additionM M) { for key, value := range additionM { m[key] = value } } // Condition the condition making is just the process of tree building type Condition struct { Op Operator Value interface{} Children []*Condition } // NewLeafCondition create leaf condition func NewLeafCondition(t Operator, v interface{}) *Condition { return &Condition{ Op: t, Value: v, } } // NewBranchCondition create branch condition func NewBranchCondition(t Operator, cons ...*Condition) *Condition { newCondition := &Condition{ Op: t, } newCondition.Children = append(newCondition.Children, cons...) return newCondition } // Combine combine func (c *Condition) Combine(leafFunc func(Operator, interface{}) interface{}, combineFunc func(Operator, []*Condition) interface{}) interface{} { // leaf node if len(c.Children) == 0 { return leafFunc(c.Op, c.Value) } return combineFunc(c.Op, c.Children) }
bcs-common/pkg/odm/operator/condition.go
0.698227
0.488527
condition.go
starcoder
// Package histogram implements a basic histogram to keep track of data // distribution. package histogram import ( "time" "v.io/v23/verror" "v.io/x/ref/lib/stats/counter" "v.io/x/ref/services/stats" ) const pkgPath = "v.io/x/ref/lib/stats/histogram" var ( errNoBucketForValue = verror.Register(pkgPath+".errNoBucketForValue", verror.NoRetry, "{1:}{2:} no bucket for value{:_}") ) // A Histogram accumulates values in the form of a histogram. The type of the // values is int64, which is suitable for keeping track of things like RPC // latency in milliseconds. New histogram objects should be obtained via the // New() function. type Histogram struct { opts Options buckets []bucketInternal count *counter.Counter sum *counter.Counter tracker *counter.Tracker } // Options contains the parameters that define the histogram's buckets. type Options struct { // NumBuckets is the number of buckets. NumBuckets int // GrowthFactor is the growth factor of the buckets. A value of 0.1 // indicates that bucket N+1 will be 10% larger than bucket N. GrowthFactor float64 // SmallestBucketSize is the size of the first bucket. Bucket sizes are // rounded down to the nearest integer. SmallestBucketSize float64 // MinValue is the lower bound of the first bucket. MinValue int64 } // bucketInternal is the internal representation of a bucket, which includes a // rate counter. type bucketInternal struct { lowBound int64 count *counter.Counter } // New returns a pointer to a new Histogram object that was created with the // provided options. func New(opts Options) *Histogram { if opts.NumBuckets == 0 { opts.NumBuckets = 32 } if opts.SmallestBucketSize == 0.0 { opts.SmallestBucketSize = 1.0 } h := Histogram{ opts: opts, buckets: make([]bucketInternal, opts.NumBuckets), count: counter.New(), sum: counter.New(), tracker: counter.NewTracker(), } low := opts.MinValue delta := opts.SmallestBucketSize for i := 0; i < opts.NumBuckets; i++ { h.buckets[i].lowBound = low h.buckets[i].count = counter.New() low += int64(delta) delta *= (1.0 + opts.GrowthFactor) } return &h } // Opts returns a copy of the options used to create the Histogram. func (h *Histogram) Opts() Options { return h.opts } // Add adds a value to the histogram. func (h *Histogram) Add(value int64) error { bucket, err := h.findBucket(value) if err != nil { return err } h.buckets[bucket].count.Incr(1) h.count.Incr(1) h.sum.Incr(value) h.tracker.Push(value) return nil } // LastUpdate returns the time at which the object was last updated. func (h *Histogram) LastUpdate() time.Time { return h.count.LastUpdate() } // Value returns the accumulated state of the histogram since it was created. func (h *Histogram) Value() stats.HistogramValue { b := make([]stats.HistogramBucket, len(h.buckets)) for i, v := range h.buckets { b[i] = stats.HistogramBucket{ LowBound: v.lowBound, Count: v.count.Value(), } } v := stats.HistogramValue{ Count: h.count.Value(), Sum: h.sum.Value(), Min: h.tracker.Min(), Max: h.tracker.Max(), Buckets: b, } return v } // Delta1h returns the change in the last hour. func (h *Histogram) Delta1h() stats.HistogramValue { b := make([]stats.HistogramBucket, len(h.buckets)) for i, v := range h.buckets { b[i] = stats.HistogramBucket{ LowBound: v.lowBound, Count: v.count.Delta1h(), } } v := stats.HistogramValue{ Count: h.count.Delta1h(), Sum: h.sum.Delta1h(), Min: h.tracker.Min1h(), Max: h.tracker.Max1h(), Buckets: b, } return v } // Delta10m returns the change in the last 10 minutes. func (h *Histogram) Delta10m() stats.HistogramValue { b := make([]stats.HistogramBucket, len(h.buckets)) for i, v := range h.buckets { b[i] = stats.HistogramBucket{ LowBound: v.lowBound, Count: v.count.Delta10m(), } } v := stats.HistogramValue{ Count: h.count.Delta10m(), Sum: h.sum.Delta10m(), Min: h.tracker.Min10m(), Max: h.tracker.Max10m(), Buckets: b, } return v } // Delta1m returns the change in the last 10 minutes. func (h *Histogram) Delta1m() stats.HistogramValue { b := make([]stats.HistogramBucket, len(h.buckets)) for i, v := range h.buckets { b[i] = stats.HistogramBucket{ LowBound: v.lowBound, Count: v.count.Delta1m(), } } v := stats.HistogramValue{ Count: h.count.Delta1m(), Sum: h.sum.Delta1m(), Min: h.tracker.Min1m(), Max: h.tracker.Max1m(), Buckets: b, } return v } // findBucket does a binary search to find in which bucket the value goes. func (h *Histogram) findBucket(value int64) (int, error) { lastBucket := len(h.buckets) - 1 min, max := 0, lastBucket for max >= min { b := (min + max) / 2 if value >= h.buckets[b].lowBound && (b == lastBucket || value < h.buckets[b+1].lowBound) { return b, nil } if value < h.buckets[b].lowBound { max = b - 1 continue } min = b + 1 } return 0, verror.New(errNoBucketForValue, nil, value) }
x/ref/lib/stats/histogram/histogram.go
0.736495
0.524029
histogram.go
starcoder
package main const ( content = ` # Awesome Go [![Build Status](https://travis-ci.org/avelino/awesome-go.svg?branch=master)](https://travis-ci.org/avelino/awesome-go) [![Awesome](https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)](https://github.com/sindresorhus/awesome) [![Slack Widget](https://s3.eu-central-1.amazonaws.com/ngtuna/join-us-on-slack.png)](http://gophers.slack.com/messages/awesome) A curated list of awesome Go frameworks, libraries and software. Inspired by [awesome-python](https://github.com/vinta/awesome-python). ### Contributing Please take a quick gander at the [contribution guidelines](https://github.com/avelino/awesome-go/blob/master/CONTRIBUTING.md) first. Thanks to all [contributors](https://github.com/avelino/awesome-go/graphs/contributors); you rock! #### *If you see a package or project here that is no longer maintained or is not a good fit, please submit a pull request to improve this file. Thank you!* ### Contents - [Awesome Go](#awesome-go) - [Audio and Music](#audio-and-music) - [Authentication and OAuth](#authentication-and-oauth) - [Command Line](#command-line) - [Configuration](#configuration) - [Continuous Integration](#continuous-integration) - [CSS Preprocessors](#css-preprocessors) - [Data Structures](#data-structures) - [Database](#database) - [Database Drivers](#database-drivers) - [Date and Time](#date-and-time) - [Distributed Systems](#distributed-systems) - [Email](#email) - [Embeddable Scripting Languages](#embeddable-scripting-languages) - [Files](#files) - [Financial](#financial) - [Forms](#forms) - [Game Development](#game-development) - [Generation and Generics](#generation-and-generics) - [Go Compilers](#go-compilers) - [Goroutines](#goroutines) - [GUI](#gui) - [Hardware](#hardware) - [Images](#images) - [IoT](#iot-internet-of-things) - [Logging](#logging) - [Machine Learning](#machine-learning) - [Messaging](#messaging) - [Miscellaneous](#miscellaneous) - [Natural Language Processing](#natural-language-processing) - [Networking](#networking) - [OpenGL](#opengl) - [ORM](#orm) - [Package Management](#package-management) - [Query Language](#query-language) - [Resource Embedding](#resource-embedding) - [Science and Data Analysis](#science-and-data-analysis) - [Security](#security) - [Serialization](#serialization) - [Template Engines](#template-engines) - [Testing](#testing) - [Text Processing](#text-processing) - [Third-party APIs](#third-party-apis) - [Utilities](#utilities) - [Validation](#validation) - [Version Control](#version-control) - [Video](#video) - [Web Frameworks](#web-frameworks) - [Middlewares](#middlewares) - [Actual middlewares](#actual-middlewares) - [Libraries for creating HTTP middlewares](#libraries-for-creating-http-middlewares) - [Routers](#routers) - [Windows](#windows) - [XML](#xml) - [Tools](#tools) - [Code Analysis](#code-analysis) - [Editor Plugins](#editor-plugins) - [Go Generate Tools](#go-generate-tools) - [Go Tools](#go-tools) - [Software Packages](#software-packages) - [DevOps Tools](#devops-tools) - [Other Software](#other-software) - [Server Applications](#server-applications) - [Resources](#resources) - [Benchmarks](#benchmarks) - [Conferences](#conferences) - [E-Books](#e-books) - [Gophers](#gophers) - [Meetups](#meetups) - [Twitter](#twitter) - [Websites](#websites) - [Tutorials](#tutorials) ## Audio and Music *Libraries for manipulating audio.* * [flac](https://github.com/eaburns/flac) - Native Go FLAC decoder. * [flac](https://github.com/mewkiz/flac) - Native Go FLAC decoder. * [gaad](https://github.com/Comcast/gaad) - Native Go AAC bitstream parser. * [go-sox](https://github.com/krig/go-sox) - libsox bindings for go. * [go_mediainfo](https://github.com/zhulik/go_mediainfo) - libmediainfo bindings for go. * [gosamplerate](https://github.com/dh1tw/gosamplerate) - libsamplerate bindings for go. * [id3v2](https://github.com/bogem/id3v2) - Fast and stable ID3 parsing and writing library for Go. * [malgo](https://github.com/gen2brain/malgo) - Mini audio library. * [mix](https://github.com/go-mix/mix) - Sequence-based Go-native audio mixer for music apps. * [mp3](https://github.com/tcolgate/mp3) - Native Go MP3 decoder. * [music-theory](https://github.com/go-music-theory/music-theory) - Music theory models in Go. * [PortAudio](https://github.com/gordonklaus/portaudio) - Go bindings for the PortAudio audio I/O library. * [portmidi](https://github.com/rakyll/portmidi) - Go bindings for PortMidi. * [taglib](https://github.com/wtolson/go-taglib) - Go bindings for taglib. * [vorbis](https://github.com/mccoyst/vorbis) - "Native" Go Vorbis decoder (uses CGO, but has no dependencies). * [waveform](https://github.com/mdlayher/waveform) - Go package capable of generating waveform images from audio streams. ## Authentication and OAuth *Libraries for implementing authentications schemes.* * [authboss](https://github.com/volatiletech/authboss) - Modular authentication system for the web. It tries to remove as much boilerplate and "hard things" as possible so that each time you start a new web project in Go, you can plug it in, configure, and start building your app without having to build an authentication system each time. * [casbin](https://github.com/hsluoyz/casbin) - Authorization library that supports access control models like ACL, RBAC, ABAC. * [cookiestxt](https://github.com/mengzhuo/cookiestxt) - provides parser of cookies.txt file format. * [Go-AWS-Auth](https://github.com/smartystreets/go-aws-auth) - AWS (Amazon Web Services) request signing library. * [go-jose](https://github.com/square/go-jose) - Fairly complete implementation of the JOSE working group's JSON Web Token, JSON Web Signatures, and JSON Web Encryption specs. * [go-oauth2-server](https://github.com/RichardKnop/go-oauth2-server) - Standalone, specification-compliant, OAuth2 server written in Golang. * [gologin](https://github.com/dghubble/gologin) - chainable handlers for login with OAuth1 and OAuth2 authentication providers. * [gorbac](https://github.com/mikespook/gorbac) - provides a lightweight role-based access control (RBAC) implementation in Golang. * [goth](https://github.com/markbates/goth) - provides a simple, clean, and idiomatic way to use OAuth and OAuth2. Handles multiple providers out of the box. * [httpauth](https://github.com/goji/httpauth) - HTTP Authentication middleware. * [jwt](https://github.com/robbert229/jwt) - Clean and easy to use implementation of JSON Web Tokens (JWT). * [jwt-auth](https://github.com/adam-hanna/jwt-auth) - JWT middleware for Golang http servers with many configuration options. * [jwt-go](https://github.com/dgrijalva/jwt-go) - Golang implementation of JSON Web Tokens (JWT). * [loginsrv](https://github.com/tarent/loginsrv) - JWT login microservice with plugable backends such as OAuth2 (Github), htpasswd, osiam. * [oauth2](https://github.com/golang/oauth2) - Successor of goauth2. Generic OAuth 2.0 package that comes with JWT, Google APIs, Compute Engine and App Engine support. * [osin](https://github.com/RangelReale/osin) - Golang OAuth2 server library. * [permissions2](https://github.com/xyproto/permissions2) - Library for keeping track of users, login states and permissions. Uses secure cookies and bcrypt. * [securecookie](https://github.com/chmike/securecookie) - Efficient secure cookie encoding/decoding. * [session](https://github.com/icza/session) - Go session management for web servers (including support for Google App Engine - GAE). * [sessiongate-go](https://github.com/f0rmiga/sessiongate-go) - Go session management using the SessionGate Redis module. * [sessions](https://github.com/adam-hanna/sessions) - Dead simple, highly performant, highly customizable sessions service for go http servers. * [yubigo](https://github.com/GeertJohan/yubigo) - Yubikey client package that provides a simple API to integrate the Yubico Yubikey into a go application. ## Command Line ### Standard CLI *Libraries for building standard or basic Command Line applications.* * [argparse](https://github.com/akamensky/argparse) - Command line argument parser inspired by Python's argparse module. * [argv](https://github.com/cosiner/argv) - Go library to split command line string as arguments array using the bash syntax. * [cli](https://github.com/mkideal/cli) - Feature-rich and easy to use command-line package based on golang struct tags. * [cli](https://github.com/teris-io/cli) - Simple and complete API for building command line interfaces in Go. * [cli-init](https://github.com/tcnksm/gcli) - The easy way to start building Golang command line applications. * [climax](http://github.com/tucnak/climax) - Alternative CLI with "human face", in spirit of Go command. * [cobra](https://github.com/spf13/cobra) - Commander for modern Go CLI interactions. * [complete](https://github.com/posener/complete) - Write bash completions in Go + Go command bash completion. * [docopt.go](https://github.com/docopt/docopt.go) - Command-line arguments parser that will make you smile. * [drive](https://github.com/odeke-em/drive) - Google Drive client for the commandline. * [env](https://github.com/codingconcepts/env) - Tag-based environment configuration for structs. * [flag](https://github.com/cosiner/flag) - Simple but powerful command line option parsing library for Go supporting subcommand. * [go-arg](https://github.com/alexflint/go-arg) - Struct-based argument parsing in Go. * [go-flags](https://github.com/jessevdk/go-flags) - go command line option parser. * [kingpin](https://github.com/alecthomas/kingpin) - Command line and flag parser supporting sub commands. * [liner](https://github.com/peterh/liner) - Go readline-like library for command-line interfaces. * [mitchellh/cli](https://github.com/mitchellh/cli) - Go library for implementing command-line interfaces. * [mow.cli](https://github.com/jawher/mow.cli) - Go library for building CLI applications with sophisticated flag and argument parsing and validation. * [pflag](https://github.com/spf13/pflag) - Drop-in replacement for Go's flag package, implementing POSIX/GNU-style --flags. * [readline](https://github.com/chzyer/readline) - Pure golang implementation that provides most features in GNU-Readline under MIT license. * [sflags](https://github.com/octago/sflags) - Struct based flags generator for flag, urfave/cli, pflag, cobra, kingpin and other libraries. * [ukautz/clif](https://github.com/ukautz/clif) - Small command line interface framework. * [urfave/cli](https://github.com/urfave/cli) - Simple, fast, and fun package for building command line apps in Go (formerly codegangsta/cli). * [wlog](https://github.com/dixonwille/wlog) - Simple logging interface that supports cross-platform color and concurrency. * [wmenu](https://github.com/dixonwille/wmenu) - Easy to use menu structure for cli applications that prompts users to make choices. ### Advanced Console UIs *Libraries for building Console Applications and Console User Interfaces.* * [aurora](https://github.com/logrusorgru/aurora) - ANSI terminal colors that supports fmt.Printf/Sprintf. * [chalk](https://github.com/ttacon/chalk) - Intuitive package for prettifying terminal/console output. * [color](https://github.com/fatih/color) - Versatile package for colored terminal output. * [colourize](https://github.com/TreyBastian/colourize) - Go library for ANSI colour text in terminals. * [go-ataman](https://github.com/workanator/go-ataman) - Go library for rendering ANSI colored text templates in terminals. * [go-colorable](https://github.com/mattn/go-colorable) - Colorable writer for windows. * [go-colortext](https://github.com/daviddengcn/go-colortext) - Go library for color output in terminals. * [go-isatty](https://github.com/mattn/go-isatty) - isatty for golang. * [gocui](https://github.com/jroimartin/gocui) - Minimalist Go library aimed at creating Console User Interfaces. * [gommon/color](https://github.com/labstack/gommon/tree/master/color) - Style terminal text. * [mpb](https://github.com/vbauerster/mpb) - Multi progress bar for terminal applications. * [progressbar](https://github.com/schollz/progressbar) - Basic thread-safe progress bar that works in every OS. * [termbox-go](https://github.com/nsf/termbox-go) - Termbox is a library for creating cross-platform text-based interfaces. * [termtables](https://github.com/apcera/termtables) - Go port of the Ruby library [terminal-tables](https://github.com/tj/terminal-table) for simple ASCII table generation as well as providing markdown and HTML output. * [termui](https://github.com/gizak/termui) - Go terminal dashboard based on **termbox-go** and inspired by [blessed-contrib](https://github.com/yaronn/blessed-contrib). * [tui-go](https://github.com/marcusolsson/tui-go) - Go UI library for building rich terminal applications. * [uilive](https://github.com/gosuri/uilive) - Library for updating terminal output in realtime. * [uiprogress](https://github.com/gosuri/uiprogress) - Flexible library to render progress bars in terminal applications. * [uitable](https://github.com/gosuri/uitable) - Library to improve readability in terminal apps using tabular data. ## Configuration *Libraries for configuration parsing.* * [config](https://github.com/olebedev/config) - JSON or YAML configuration wrapper with environment variables and flags parsing. * [configure](https://github.com/paked/configure) - Provides configuration through multiple sources, including JSON, flags and environment variables. * [env](https://github.com/caarlos0/env) - Parse environment variables to Go structs (with defaults). * [envcfg](https://github.com/tomazk/envcfg) - Un-marshaling environment variables to Go structs. * [envconf](https://github.com/ian-kent/envconf) - Configuration from environment. * [envconfig](https://github.com/vrischmann/envconfig) - Read your configuration from environment variables. * [envh](https://github.com/antham/envh) - Helpers to manage environment variables. * [gcfg](https://github.com/go-gcfg/gcfg) - read INI-style configuration files into Go structs; supports user-defined types and subsections. * [goConfig](https://github.com/crgimenes/goConfig) - Parses a struct as input and populates the fields of this struct with parameters from command line, environment variables and configuration file. * [godotenv](https://github.com/joho/godotenv) - Go port of Ruby's dotenv library (Loads environment variables from .env). * [gofigure](https://github.com/ian-kent/gofigure) - Go application configuration made easy. * [gone/jconf](https://github.com/One-com/gone/tree/master/jconf) - Modular JSON configuration. Keep you config structs along with the code they configure and delegate parsing to submodules without sacrificing full config serialization. * [hjson](https://github.com/hjson/hjson-go) - Human JSON, a configuration file format for humans. Relaxed syntax, fewer mistakes, more comments. * [ingo](https://github.com/schachmat/ingo) - Flags persisted in an ini-like config file. * [ini](https://github.com/go-ini/ini) - Go package to read and write INI files. * [joshbetz/config](https://github.com/joshbetz/config) - Small configuration library for Go that parses environment variables, JSON files, and reloads automatically on SIGHUP. * [mini](https://github.com/sasbury/mini) - Golang package for parsing ini-style configuration files. * [store](https://github.com/tucnak/store) - Lightweight configuration manager for Go. * [viper](https://github.com/spf13/viper) - Go configuration with fangs. * [xdg](https://github.com/OpenPeeDeeP/xdg) - Cross platform package that follows the [XDG Standard](https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html). ## Continuous Integration *Tools for help with continuous integration.* * [drone](https://github.com/drone/drone) - Drone is a Continuous Integration platform built on Docker, written in Go. * [goveralls](https://github.com/mattn/goveralls) - Go integration for Coveralls.io continuous code coverage tracking system. * [overalls](https://github.com/go-playground/overalls) - Multi-Package go project coverprofile for tools like goveralls. * [roveralls](https://github.com/LawrenceWoodman/roveralls) - Recursive coverage testing tool. ## CSS Preprocessors *Libraries for preprocessing CSS files.* * [c6](https://github.com/c9s/c6) - High performance SASS compatible-implementation compiler written in Go. * [gcss](https://github.com/yosssi/gcss) - Pure Go CSS Preprocessor. * [go-libsass](https://github.com/wellington/go-libsass) - Go wrapper to the 100% Sass compatible libsass project. ## Data Structures *Generic datastructures and algorithms in Go.* * [binpacker](https://github.com/zhuangsirui/binpacker) - Binary packer and unpacker helps user build custom binary stream. * [bit](https://github.com/yourbasic/bit) - Golang set data structure with bonus bit-twiddling functions. * [bitset](https://github.com/willf/bitset) - Go package implementing bitsets. * [bloom](https://github.com/zhenjl/bloom) - Bloom filters implemented in Go. * [bloom](https://github.com/yourbasic/bloom) - Golang Bloom filter implementation. * [boomfilters](https://github.com/tylertreat/BoomFilters) - Probabilistic data structures for processing continuous, unbounded streams. * [concurrent-writer](https://github.com/free/concurrent-writer) - Highly concurrent drop-in replacement for bufio.Writer. * [count-min-log](https://github.com/seiflotfy/count-min-log) - Go implementation Count-Min-Log sketch: Approximately counting with approximate counters (Like Count-Min sketch but using less memory). * [cuckoofilter](https://github.com/seiflotfy/cuckoofilter) - Cuckoo filter: a good alternative to a counting bloom filter implemented in Go. * [encoding](https://github.com/zhenjl/encoding) - Integer Compression Libraries for Go. * [go-adaptive-radix-tree](https://github.com/plar/go-adaptive-radix-tree) - Go implementation of Adaptive Radix Tree. * [go-datastructures](https://github.com/Workiva/go-datastructures) - Collection of useful, performant, and thread-safe data structures. * [go-ef](https://github.com/amallia/go-ef) - A Go implementation of the Elias-Fano encoding. * [go-geoindex](https://github.com/hailocab/go-geoindex) - In-memory geo index. * [go-rquad](https://github.com/aurelien-rainone/go-rquad) - Region quadtrees with efficient point location and neighbour finding. * [gods](https://github.com/emirpasic/gods) - Go Data Structures. Containers, Sets, Lists, Stacks, Maps, BidiMaps, Trees, HashSet etc. * [golang-set](https://github.com/deckarep/golang-set) - Thread-Safe and Non-Thread-Safe high-performance sets for Go. * [goset](https://github.com/zoumo/goset) - A useful Set collection implementation for Go. * [goskiplist](https://github.com/ryszard/goskiplist) - Skip list implementation in Go. * [gota](https://github.com/kniren/gota) - Implementation of dataframes, series, and data wrangling methods for Go. * [hilbert](https://github.com/google/hilbert) - Go package for mapping values to and from space-filling curves, such as Hilbert and Peano curves. * [hyperloglog](https://github.com/axiomhq/hyperloglog) - HyperLogLog implementation with Sparse, LogLog-Beta bias correction and TailCut space reduction. * [levenshtein](https://github.com/agext/levenshtein) - Levenshtein distance and similarity metrics with customizable edit costs and Winkler-like bonus for common prefix. * [levenshtein](https://github.com/agnivade/levenshtein) - Implementation to calculate levenshtein distance in Go. * [mafsa](https://github.com/smartystreets/mafsa) - MA-FSA implementation with Minimal Perfect Hashing. * [merkletree](https://github.com/cbergoon/merkletree) - Implementation of a merkle tree providing an efficient and secure verification of the contents of data structures. * [roaring](https://github.com/RoaringBitmap/roaring) - Go package implementing compressed bitsets. * [skiplist](https://github.com/gansidui/skiplist) - Skiplist implementation in Go. * [trie](https://github.com/derekparker/trie) - Trie implementation in Go. * [ttlcache](https://github.com/diegobernardes/ttlcache) - In-memory LRU string-interface{} map with expiration for golang. * [willf/bloom](https://github.com/willf/bloom) - Go package implementing Bloom filters. ## Database *Databases implemented in Go.* * [badger](https://github.com/dgraph-io/badger) - Fast key-value store in Go. * [BigCache](https://github.com/allegro/bigcache) - Efficient key/value cache for gigabytes of data. * [bolt](https://github.com/boltdb/bolt) - Low-level key/value database for Go. * [buntdb](https://github.com/tidwall/buntdb) - Fast, embeddable, in-memory key/value database for Go with custom indexing and spatial support. * [cache2go](https://github.com/muesli/cache2go) - In-memory key:value cache which supports automatic invalidation based on timeouts. * [cockroach](https://github.com/cockroachdb/cockroach) - Scalable, Geo-Replicated, Transactional Datastore. * [couchcache](https://github.com/codingsince1985/couchcache) - RESTful caching micro-service backed by Couchbase server. * [dgraph](https://github.com/dgraph-io/dgraph) - Scalable, Distributed, Low Latency, High Throughput Graph Database. * [diskv](https://github.com/peterbourgon/diskv) - Home-grown disk-backed key-value store. * [eliasdb](https://github.com/krotik/eliasdb) - Dependency-free, transactional graph database with REST API, phrase search and SQL-like query language. * [forestdb](https://github.com/couchbase/goforestdb) - Go bindings for ForestDB. * [GCache](https://github.com/bluele/gcache) - Cache library with support for expirable Cache, LFU, LRU and ARC. * [geocache](https://github.com/melihmucuk/geocache) - In-memory cache that is suitable for geolocation based applications. * [go-cache](https://github.com/pmylund/go-cache) - In-memory key:value store/cache (similar to Memcached) library for Go, suitable for single-machine applications. * [goleveldb](https://github.com/syndtr/goleveldb) - Implementation of the [LevelDB](https://github.com/google/leveldb) key/value database in Go. * [groupcache](https://github.com/golang/groupcache) - Groupcache is a caching and cache-filling library, intended as a replacement for memcached in many cases. * [influxdb](https://github.com/influxdb/influxdb) - Scalable datastore for metrics, events, and real-time analytics. * [jaeger](https://github.com/jaegertracing/jaeger) - A distributed tracing system. * [ledisdb](https://github.com/siddontang/ledisdb) - Ledisdb is a high performance NoSQL like Redis based on LevelDB. * [levigo](https://github.com/jmhodges/levigo) - Levigo is a Go wrapper for LevelDB. * [moss](https://github.com/couchbase/moss) - Moss is a simple LSM key-value storage engine written in 100% Go. * [piladb](https://github.com/fern4lvarez/piladb) - Lightweight RESTful database engine based on stack data structures. * [prometheus](https://github.com/prometheus/prometheus) - Monitoring system and time series database. * [rqlite](https://github.com/rqlite/rqlite) - The lightweight, distributed, relational database built on SQLite. * [Scribble](https://github.com/nanobox-io/golang-scribble) - Tiny flat file JSON store. * [tempdb](https://github.com/rafaeljesus/tempdb) - Key-value store for temporary items. * [tidb](https://github.com/pingcap/tidb) - TiDB is a distributed SQL database. Inspired by the design of Google F1. * [tiedot](https://github.com/HouzuoGuo/tiedot) - Your NoSQL database powered by Golang. * [Tile38](https://github.com/tidwall/tile38) - Geolocation DB with spatial index and realtime geofencing. *Database schema migration.* * [darwin](https://github.com/GuiaBolso/darwin) - Database schema evolution library for Go. * [go-fixtures](https://github.com/RichardKnop/go-fixtures) - Django style fixtures for Golang's excellent built-in database/sql library. * [gondolier](https://github.com/emvicom/gondolier) - Gondolier is a library to auto migrate database schemas using structs. * [goose](https://github.com/steinbacher/goose) - Database migration tool. You can manage your database's evolution by creating incremental SQL or Go scripts. * [gormigrate](https://github.com/go-gormigrate/gormigrate) - Database schema migration helper for Gorm ORM. * [migrate](https://github.com/mattes/migrate) - Database migrations. CLI and Golang library. * [pravasan](https://github.com/pravasan/pravasan) - Simple Migration tool - currently for MySQL but planning to soon support Postgres, SQLite, MongoDB, etc. * [soda](https://github.com/markbates/pop/tree/master/soda) - Database migration, creation, ORM, etc... for MySQL, PostgreSQL, and SQLite. * [sql-migrate](https://github.com/rubenv/sql-migrate) - Database migration tool. Allows embedding migrations into the application using go-bindata. *Database tools.* * [chproxy](https://github.com/Vertamedia/chproxy) - HTTP proxy for ClickHouse database. * [go-mysql](https://github.com/siddontang/go-mysql) - Go toolset to handle MySQL protocol and replication. * [go-mysql-elasticsearch](https://github.com/siddontang/go-mysql-elasticsearch) - Sync your MySQL data into Elasticsearch automatically. * [kingshard](https://github.com/flike/kingshard) - kingshard is a high performance proxy for MySQL powered by Golang. * [myreplication](https://github.com/2tvenom/myreplication) - MySql binary log replication listener. Supports statement and row based replication. * [orchestrator](https://github.com/github/orchestrator) - MySQL replication topology manager & visualizer. * [pgweb](https://github.com/sosedoff/pgweb) - Web-based PostgreSQL database browser. * [pREST](https://github.com/nuveo/prest) - Serve a RESTful API from any PostgreSQL database. * [rwdb](https://github.com/andizzle/rwdb) - rwdb provides read replica capability for multiple database servers setup. * [vitess](https://github.com/youtube/vitess) - vitess provides servers and tools which facilitate scaling of MySQL databases for large scale web services. *SQL query builder, libraries for building and using SQL.* * [dat](https://github.com/mgutz/dat) - Go Postgres Data Access Toolkit. * [Dotsql](https://github.com/gchaincl/dotsql) - Go library that helps you keep sql files in one place and use them with ease. * [goqu](https://github.com/doug-martin/goqu) - Idiomatic SQL builder and query library. * [igor](https://github.com/galeone/igor) - Abstraction layer for PostgreSQL that supports advanced functionality and uses gorm-like syntax. * [ozzo-dbx](https://github.com/go-ozzo/ozzo-dbx) - Powerful data retrieval methods as well as DB-agnostic query building capabilities. * [scaneo](https://github.com/variadico/scaneo) - Generate Go code to convert database rows into arbitrary structs. * [sqrl](https://github.com/elgris/sqrl) - SQL query builder, fork of Squirrel with improved performance. * [Squirrel](https://github.com/Masterminds/squirrel) - Go library that helps you build SQL queries. * [xo](https://github.com/knq/xo) - Generate idiomatic Go code for databases based on existing schema definitions or custom queries supporting PostgreSQL, MySQL, SQLite, Oracle, and Microsoft SQL Server. ## Database Drivers *Libraries for connecting and operating databases.* * Relational Databases * [avatica](https://github.com/Boostport/avatica) - Apache Phoenix/Avatica SQL driver for database/sql. * [bgc](https://github.com/viant/bgc) - Datastore Connectivity for BigQuery for go. * [firebirdsql](https://github.com/nakagami/firebirdsql) - Firebird RDBMS SQL driver for Go. * [go-adodb](https://github.com/mattn/go-adodb) - Microsoft ActiveX Object DataBase driver for go that uses database/sql. * [go-bqstreamer](https://github.com/rounds/go-bqstreamer) - BigQuery fast and concurrent stream insert. * [go-mssqldb](https://github.com/denisenkom/go-mssqldb) - Microsoft MSSQL driver for Go. * [go-oci8](https://github.com/mattn/go-oci8) - Oracle driver for go that uses database/sql. * [go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) - MySQL driver for Go. * [go-sqlite3](https://github.com/mattn/go-sqlite3) - SQLite3 driver for go that uses database/sql. * [gofreetds](https://github.com/minus5/gofreetds) - Microsoft MSSQL driver. Go wrapper over [FreeTDS](http://www.freetds.org). * [pgx](https://github.com/jackc/pgx) - PostgreSQL driver supporting features beyond those exposed by database/sql. * [pq](https://github.com/lib/pq) - Pure Go Postgres driver for database/sql. * NoSQL Databases * [aerospike-client-go](https://github.com/aerospike/aerospike-client-go) - Aerospike client in Go language. * [arangolite](https://github.com/solher/arangolite) - Lightweight golang driver for ArangoDB. * [asc](https://github.com/viant/asc) - Datastore Connectivity for Aerospike for go. * [cayley](https://github.com/google/cayley) - Graph database with support for multiple backends. * [dsc](https://github.com/viant/dsc) - Datastore connectivity for SQL, NoSQL, structured files. * [dynago](https://github.com/underarmour/dynago) - Dynago is a principle of least surprise client for DynamoDB. * [go-couchbase](https://github.com/couchbase/go-couchbase) - Couchbase client in Go. * [go-couchdb](https://github.com/fjl/go-couchdb) - Yet another CouchDB HTTP API wrapper for Go. * [gocb](https://github.com/couchbase/gocb) - Official Couchbase Go SDK. * [gocql](http://gocql.github.io) - Go language driver for Apache Cassandra. * [gomemcache](https://github.com/bradfitz/gomemcache/) - memcache client library for the Go programming language. * [gorethink](https://github.com/dancannon/gorethink) - Go language driver for RethinkDB. * [goriak](https://github.com/zegl/goriak) - Go language driver for Riak KV. * [mgo](https://godoc.org/labix.org/v2/mgo) - MongoDB driver for the Go language that implements a rich and well tested selection of features under a very simple API following standard Go idioms. * [neo4j](https://github.com/cihangir/neo4j) - Neo4j Rest API Bindings for Golang. * [Neo4j-GO](https://github.com/davemeehan/Neo4j-GO) - Neo4j REST Client in golang. * [neoism](https://github.com/jmcvetta/neoism) - Neo4j client for Golang. * [redigo](https://github.com/garyburd/redigo) - Redigo is a Go client for the Redis database. * [redis](https://github.com/go-redis/redis) - Redis client for Golang. * [redis](https://github.com/hoisie/redis) - Simple, powerful Redis client for Go. * [redis](https://github.com/bsm/redeo) - Redis-protocol compatible TCP servers/services. * [xredis](https://github.com/shomali11/xredis) - Typesafe, customizable, clean & easy to use Redis client. * Search and Analytic Databases. * [bleve](https://github.com/blevesearch/bleve) - Modern text indexing library for go. * [elastic](https://github.com/olivere/elastic) - Elasticsearch client for Go. * [elasticsql](https://github.com/cch123/elasticsql) - Convert sql to elasticsearch dsl in Go. * [elastigo](https://github.com/mattbaird/elastigo) - Elasticsearch client library. * [goes](https://github.com/belogik/goes) - Library to interact with Elasticsearch. * [riot](https://github.com/go-ego/riot) - Go Open Source, Distributed, Simple and efficient Search Engine * [skizze](https://github.com/seiflotfy/skizze) - probabilistic data-structures service and storage. ## Date and Time *Libraries for working with dates and times.* * [carbon](https://github.com/uniplaces/carbon) - Simple Time extension with a lot of util methods, ported from PHP Carbon library. * [date](https://github.com/rickb777/date) - Augments Time for working with dates, date ranges, time spans, periods, and time-of-day. * [dateparse](https://github.com/araddon/dateparse) - Parse date's without knowing format in advance. * [durafmt](https://github.com/hako/durafmt) - Time duration formatting library for Go. * [feiertage](https://github.com/wlbr/feiertage) - Set of functions to calculate public holidays in Germany, incl. specialization on the states of Germany (Bundesländer). Things like Easter, Pentecost, Thanksgiving... * [go-persian-calendar](https://github.com/yaa110/go-persian-calendar) - The implementation of the Persian (Solar Hijri) Calendar in Go (golang). * [go-sunrise](https://github.com/nathan-osman/go-sunrise) - Calculate the sunrise and sunset times for a given location. * [goweek](https://github.com/grsmv/goweek) - Library for working with week entity in golang. * [now](https://github.com/jinzhu/now) - Now is a time toolkit for golang. * [NullTime](https://github.com/kirillDanshin/nulltime) - Nullable time.Time. * [timeutil](https://github.com/leekchan/timeutil) - Useful extensions (Timedelta, Strftime, ...) to the golang's time package. * [tuesday](https://github.com/osteele/tuesday) - Ruby-compatible Strftime function. ## Distributed Systems *Packages that help with building Distributed Systems.* * [celeriac](https://github.com/svcavallar/celeriac.v1) - Library for adding support for interacting and monitoring Celery workers, tasks and events in Go. * [digota](https://github.com/digota/digota) - grpc ecommerce microservice. * [drmaa](https://github.com/dgruber/drmaa) - Job submission library for cluster schedulers based on the DRMAA standard. * [emitter-io](https://github.com/emitter-io/emitter) - High performance, distributed, secure and low latency publish-subscribe platform built with MQTT, Websockets and love. * [flowgraph](https://github.com/vectaport/flowgraph) - MPI-style ready-send coordination layer. * [gleam](https://github.com/chrislusf/gleam) - Fast and scalable distributed map/reduce system written in pure Go and Luajit, combining Go's high concurrency with Luajit's high performance, runs standalone or distributed. * [glow](https://github.com/chrislusf/glow) - Easy-to-Use scalable distributed big data processing, Map-Reduce, DAG execution, all in pure Go. * [go-jump](https://github.com/dgryski/go-jump) - Port of Google's "Jump" Consistent Hash function. * [go-kit](https://github.com/go-kit/kit) - Microservice toolkit with support for service discovery, load balancing, pluggable transports, request tracking, etc. * [gorpc](https://github.com/valyala/gorpc) - Simple, fast and scalable RPC library for high load. * [grpc-go](https://github.com/grpc/grpc-go) - The Go language implementation of gRPC. HTTP/2 based RPC. * [hprose](https://github.com/hprose/hprose-golang) - Very newbility RPC Library, support 25+ languages now. * [jsonrpc](https://github.com/osamingo/jsonrpc) - The jsonrpc package helps implement of JSON-RPC 2.0. * [jsonrpc](https://github.com/ybbus/jsonrpc) - JSON-RPC 2.0 HTTP client implementation. * [KrakenD](https://github.com/devopsfaith/krakend) - Ultra performant API Gateway framework with middlewares. * [micro](https://github.com/micro/micro) - Pluggable microservice toolkit and distributed systems platform. * [NATS](https://github.com/nats-io/gnatsd) - Lightweight, high performance messaging system for microservices, IoT, and cloud native systems. * [raft](https://github.com/hashicorp/raft) - Golang implementation of the Raft consensus protocol, by HashiCorp. * [raft](https://github.com/coreos/etcd/tree/master/raft) - Go implementation of the Raft consensus protocol, by CoreOS. * [ringpop-go](https://github.com/uber/ringpop-go) - Scalable, fault-tolerant application-layer sharding for Go applications. * [rpcx](https://github.com/smallnest/rpcx) - Distributed pluggable RPC service framework like alibaba Dubbo. * [sleuth](https://github.com/ursiform/sleuth) - Library for master-less p2p auto-discovery and RPC between HTTP services (using [ZeroMQ](https://github.com/zeromq/libzmq)). * [tendermint](https://github.com/tendermint/tendermint) - High-performance middleware for transforming a state machine written in any programming language into a Byzantine Fault Tolerant replicated state machine using the Tendermint consensus and blockchain protocols. * [torrent](https://github.com/anacrolix/torrent) - BitTorrent client package. * [dht](https://godoc.org/github.com/anacrolix/dht) - BitTorrent Kademlia DHT implementation. * [go-peerflix](https://github.com/Sioro-Neoku/go-peerflix) - Video streaming torrent client. ## Email *Libraries that implement email creation and sending.* * [douceur](https://github.com/aymerick/douceur) - CSS inliner for your HTML emails. * [email](https://github.com/jordan-wright/email) - A robust and flexible email library for Go. * [go-dkim](https://github.com/toorop/go-dkim) - DKIM library, to sign & verify email. * [go-imap](https://github.com/emersion/go-imap) - IMAP library for clients and servers. * [go-message](https://github.com/emersion/go-message) - Streaming library for the Internet Message Format and mail messages. * [Gomail](https://github.com/go-gomail/gomail/) - Gomail is a very simple and powerful package to send emails. * [Hectane](https://github.com/hectane/hectane) - Lightweight SMTP client providing an HTTP API. * [hermes](https://github.com/matcornic/hermes) - Golang package that generates clean, responsive HTML e-mails. * [MailHog](https://github.com/mailhog/MailHog) - Email and SMTP testing with web and API interface. * [SendGrid](https://github.com/sendgrid/sendgrid-go) - SendGrid's Go library for sending email. * [smtp](https://github.com/mailhog/smtp) - SMTP server protocol state machine. ## Embeddable Scripting Languages *Embedding other languages inside your go code.* * [agora](https://github.com/PuerkitoBio/agora) - Dynamically typed, embeddable programming language in Go. * [anko](https://github.com/mattn/anko) - Scriptable interpreter written in Go. * [binder](https://github.com/alexeyco/binder) - Go to Lua binding library, based on [gopher-lua](https://github.com/yuin/gopher-lua). * [gisp](https://github.com/jcla1/gisp) - Simple LISP in Go. * [go-duktape](https://github.com/olebedev/go-duktape) - Duktape JavaScript engine bindings for Go. * [go-lua](https://github.com/Shopify/go-lua) - Port of the Lua 5.2 VM to pure Go. * [go-php](https://github.com/deuill/go-php) - PHP bindings for Go. * [go-python](https://github.com/sbinet/go-python) - naive go bindings to the CPython C-API. * [golua](https://github.com/aarzilli/golua) - Go bindings for Lua C API. * [gopher-lua](https://github.com/yuin/gopher-lua) - Lua 5.1 VM and compiler written in Go. * [ngaro](https://github.com/db47h/ngaro) - Embeddable Ngaro VM implementation enabling scripting in Retro. * [otto](https://github.com/robertkrimen/otto) - JavaScript interpreter written in Go. * [purl](https://github.com/ian-kent/purl) - Perl 5.18.2 embedded in Go. ## Files *Libraries for handling files and file systems.* * [afero](https://github.com/spf13/afero) - FileSystem Abstraction System for Go. * [go-csv-tag](https://github.com/artonge/go-csv-tag) - Load csv file using tag. * [go-gtfs](https://github.com/artonge/go-gtfs) - Load gtfs files in go. * [notify](https://github.com/rjeczalik/notify) - File system event notification library with simple API, similar to os/signal. * [skywalker](https://github.com/dixonwille/skywalker) - Package to allow one to concurrently go through a filesystem with ease. * [tarfs](https://github.com/posener/tarfs) - Implementation of the [FileSystem interface](https://godoc.org/github.com/kr/fs#FileSystem) for tar files. ## Financial *Packages for accounting and finance.* * [accounting](https://github.com/leekchan/accounting) - money and currency formatting for golang. * [decimal](https://github.com/shopspring/decimal) - Arbitrary-precision fixed-point decimal numbers. * [go-finance](https://github.com/FlashBoys/go-finance) - Comprehensive financial markets data in Go. * [go-money](https://github.com/rhymond/go-money) - Implementation of Fowler's Money pattern. * [ofxgo](https://github.com/aclindsa/ofxgo) - Query OFX servers and/or parse the responses (with example command-line client). * [vat](https://github.com/dannyvankooten/vat) - VAT number validation & EU VAT rates. ## Forms *Libraries for working with forms.* * [bind](https://github.com/robfig/bind) - Bind form data to any Go values. * [binding](https://github.com/mholt/binding) - Binds form and JSON data from net/http Request to struct. * [conform](https://github.com/leebenson/conform) - Keeps user input in check. Trims, sanitizes & scrubs data based on struct tags. * [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support. * [formam](https://github.com/monoculum/formam) - decode form's values into a struct. * [forms](https://github.com/albrow/forms) - Framework-agnostic library for parsing and validating form/JSON data which supports multipart forms and files. * [gorilla/csrf](https://github.com/gorilla/csrf) - CSRF protection for Go web applications & services. * [nosurf](https://github.com/justinas/nosurf) - CSRF protection middleware for Go. ## Game Development *Awesome game development libraries.* * [Azul3D](https://github.com/azul3d/engine) - 3D game engine written in Go. * [Ebiten](https://github.com/hajimehoshi/ebiten) - simple 2D game library in Go. * [engo](https://github.com/EngoEngine/engo) - Engo is an open-source 2D game engine written in Go. It follows the Entity-Component-System paradigm. * [GarageEngine](https://github.com/vova616/GarageEngine) - 2d game engine written in Go working on OpenGL. * [glop](https://github.com/runningwild/glop) - Glop (Game Library Of Power) is a fairly simple cross-platform game library. * [go-astar](https://github.com/beefsack/go-astar) - Go implementation of the A\* path finding algorithm. * [go-collada](https://github.com/GlenKelley/go-collada) - Go package for working with the Collada file format. * [go-sdl2](https://github.com/veandco/go-sdl2) - Go bindings for the [Simple DirectMedia Layer](https://www.libsdl.org/). * [go3d](https://github.com/ungerik/go3d) - Performance oriented 2D/3D math package for Go. * [gonet](https://github.com/xtaci/gonet) - Game server skeleton implemented with golang. * [goworld](https://github.com/xiaonanln/goworld) - Scalable game server engine, featuring space-entity framework and hot-swapping * [Leaf](https://github.com/name5566/leaf) - Lightweight game server framework. * [nano](https://github.com/lonnng/nano) - Lightweight, facility, high performance golang based game server framework * [Oak](https://github.com/oakmound/oak) - Pure Go game engine. * [Pixel](https://github.com/faiface/pixel) - Hand-crafted 2D game library in Go. * [raylib-go](https://github.com/gen2brain/raylib-go) - Go bindings for [raylib](http://www.raylib.com/), a simple and easy-to-use library to learn videogames programming. * [termloop](https://github.com/JoelOtter/termloop) - Terminal-based game engine for Go, built on top of Termbox. ## Generation and Generics *Tools to enhance the language with features like generics via code generation.* * [efaceconv](https://github.com/t0pep0/efaceconv) - Code generation tool for high performance conversion from interface{} to immutable type without allocations. * [gen](https://github.com/clipperhouse/gen) - Code generation tool for ‘generics’-like functionality. * [go-enum](https://github.com/abice/go-enum) - Code generation for enums from code comments. * [go-linq](https://github.com/ahmetalpbalkan/go-linq) - .NET LINQ-like query methods for Go. * [goderive](https://github.com/awalterschulze/goderive) - Derives functions from input types. * [interfaces](https://github.com/rjeczalik/interfaces) - Command line tool for generating interface definitions. * [jennifer](https://github.com/dave/jennifer) - Generate arbitrary Go code without templates. * [pkgreflect](https://github.com/ungerik/pkgreflect) - Go preprocessor for package scoped reflection. ## Go Compilers *Tools for compiling Go to other languages.* * [gopherjs](https://github.com/gopherjs/gopherjs) - Compiler from Go to JavaScript. * [llgo](https://github.com/go-llvm/llgo) - LLVM-based compiler for Go. * [tardisgo](https://github.com/tardisgo/tardisgo) - Golang to Haxe to CPP/CSharp/Java/JavaScript transpiler. ## Goroutines *Tools for managing and working with Goroutines.* * [go-floc](https://github.com/workanator/go-floc) - Orchestrate goroutines with ease. * [go-flow](https://github.com/kamildrazkiewicz/go-flow) - Control goroutines execution order. * [GoSlaves](https://github.com/themester/GoSlaves) - Simple and Asynchronous Goroutine pool library. * [goworker](https://github.com/benmanns/goworker) - goworker is a Go-based background worker. * [grpool](https://github.com/ivpusic/grpool) - Lightweight Goroutine pool. * [parallel-fn](https://github.com/rafaeljesus/parallel-fn) - Run functions in parallel. * [pool](https://github.com/go-playground/pool) - Limited consumer goroutine or unlimited goroutine pool for easier goroutine handling and cancellation. * [semaphore](https://github.com/kamilsk/semaphore) - Semaphore pattern implementation with timeout of lock/unlock operations based on channel and context. * [semaphore](https://github.com/marusama/semaphore) - Fast resizable semaphore implementation based on CAS (faster than channel-based semaphore implementations). * [tunny](https://github.com/Jeffail/tunny) - Goroutine pool for golang. * [worker-pool](https://github.com/vardius/worker-pool) - goworker is a Go simple async worker pool. * [workerpool](https://github.com/gammazero/workerpool) - Goroutine pool that limits the concurrency of task execution, not the number of tasks queued. ## GUI *Libraries for building GUI Applications.* *Toolkits* * [app](https://github.com/murlokswarm/app) - Package to create apps with GO, HTML and CSS. Supports: MacOS, Windows in progress. * [go-astilectron](https://github.com/asticode/go-astilectron) - Build cross platform GUI apps with GO and HTML/JS/CSS (powered by Electron). * [go-gtk](http://mattn.github.io/go-gtk/) - Go bindings for GTK. * [go-qml](https://github.com/go-qml/qml) - QML support for the Go language. * [go-sciter](https://github.com/sciter-sdk/go-sciter) - Go bindings for Sciter: the Embeddable HTML/CSS/script engine for modern desktop UI development. Cross platform. * [goqt](https://github.com/visualfc/goqt) - Golang bindings to the Qt cross-platform application framework. * [gotk3](https://github.com/gotk3/gotk3) - Go bindings for GTK3. * [gowd](https://github.com/dtylman/gowd) - Rapid and simple desktop UI development with GO, HTML, CSS and NW.js. Cross platform. * [qt](https://github.com/therecipe/qt) - Qt binding for Go (support for Windows / macOS / Linux / Android / iOS / Sailfish OS / Raspberry Pi). * [ui](https://github.com/andlabs/ui) - Platform-native GUI library for Go. Cross platform. * [walk](https://github.com/lxn/walk) - Windows application library kit for Go. * [webview](https://github.com/zserge/webview) - Cross-platform webview window with simple two-way JavaScript bindings (Windows / macOS / Linux). *Interaction* * [gosx-notifier](https://github.com/deckarep/gosx-notifier) - OSX Desktop Notifications library for Go. * [robotgo](https://github.com/go-vgo/robotgo) - Go Native cross-platform GUI system automation. Control the mouse, keyboard and other. * [systray](https://github.com/getlantern/systray) - Cross platform Go library to place an icon and menu in the notification area. * [trayhost](https://github.com/shurcooL/trayhost) - Cross-platform Go library to place an icon in the host operating system's taskbar. ## Hardware *Libraries, tools, and tutorials for interacting with hardware.* See [go-hardware](https://github.com/rakyll/go-hardware) for a comprehensive list. ## Images *Libraries for manipulating images.* * [bild](https://github.com/anthonynsimon/bild) - Collection of image processing algorithms in pure Go. * [bimg](https://github.com/h2non/bimg) - Small package for fast and efficient image processing using libvips. * [geopattern](https://github.com/pravj/geopattern) - Create beautiful generative image patterns from a string. * [gg](https://github.com/fogleman/gg) - 2D rendering in pure Go. * [gift](https://github.com/disintegration/gift) - Package of image processing filters. * [go-cairo](https://github.com/ungerik/go-cairo) - Go binding for the cairo graphics library. * [go-gd](https://github.com/bolknote/go-gd) - Go binding for GD library. * [go-nude](https://github.com/koyachi/go-nude) - Nudity detection with Go. * [go-opencv](https://github.com/lazywei/go-opencv) - Go bindings for OpenCV. * [go-webcolors](https://github.com/jyotiska/go-webcolors) - Port of webcolors library from Python to Go. * [gocv](https://github.com/hybridgroup/gocv) - Go package for computer vision using OpenCV 3.3+. * [govatar](https://github.com/o1egl/govatar) - Library and CMD tool for generating funny avatars. * [imagick](https://github.com/gographics/imagick) - Go binding to ImageMagick's MagickWand C API. * [imaginary](https://github.com/h2non/imaginary) - Fast and simple HTTP microservice for image resizing. * [imaging](https://github.com/disintegration/imaging) - Simple Go image processing package. * [img](https://github.com/hawx/img) - Selection of image manipulation tools. * [ln](https://github.com/fogleman/ln) - 3D line art rendering in Go. * [mpo](https://github.com/donatj/mpo) - Decoder and conversion tool for MPO 3D Photos. * [picfit](https://github.com/thoas/picfit) - An image resizing server written in Go. * [pt](https://github.com/fogleman/pt) - Path tracing engine written in Go. * [resize](https://github.com/nfnt/resize) - Image resizing for Go with common interpolation methods. * [rez](https://github.com/bamiaux/rez) - Image resizing in pure Go and SIMD. * [smartcrop](https://github.com/muesli/smartcrop) - Finds good crops for arbitrary images and crop sizes. * [svgo](https://github.com/ajstarks/svgo) - Go Language Library for SVG generation. * [tga](https://github.com/ftrvxmtrx/tga) - Package tga is a TARGA image format decoder/encoder. ## IoT (Internet of Things) *Libraries for programming devices of the IoT.* * [connectordb](https://github.com/connectordb/connectordb) - Open-Source Platform for Quantified Self & IoT. * [devices](https://github.com/goiot/devices) - Suite of libraries for IoT devices, experimental for x/exp/io. * [eywa](https://github.com/xcodersun/eywa) - Project Eywa is essentially a connection manager that keeps track of connected devices. * [flogo](https://github.com/tibcosoftware/flogo) - Project Flogo is an Open Source Framework for IoT Edge Apps & Integration. * [gatt](https://github.com/paypal/gatt) - Gatt is a Go package for building Bluetooth Low Energy peripherals. * [gobot](https://github.com/hybridgroup/gobot/) - Gobot is a framework for robotics, physical computing, and the Internet of Things. * [mainflux](https://github.com/Mainflux/mainflux) - Industrial IoT Messaging and Device Management Server. * [sensorbee](https://github.com/sensorbee/sensorbee) - Lightweight stream processing engine for IoT. ## Logging *Libraries for generating and working with log files.* * [distillog](https://github.com/amoghe/distillog) - distilled levelled logging (think of it as stdlib + log levels). * [glg](https://github.com/kpango/glg) - glg is simple and fast leveled logging library for Go. * [glog](https://github.com/golang/glog) - Leveled execution logs for Go. * [go-cronowriter](https://github.com/utahta/go-cronowriter) - Simple writer that rotate log files automatically based on current date and time, like cronolog. * [go-log](https://github.com/siddontang/go-log) - Log lib supports level and multi handlers. * [go-log](https://github.com/ian-kent/go-log) - Log4j implementation in Go. * [go-logger](https://github.com/apsdehal/go-logger) - Simple logger of Go Programs, with level handlers. * [gologger](https://github.com/sadlil/gologger) - Simple easy to use log lib for go, logs in Colored Console, Simple Console, File or Elasticsearch. * [gomol](https://github.com/aphistic/gomol) - Multiple-output, structured logging for Go with extensible logging outputs. * [gone/log](https://github.com/One-com/gone/tree/master/log) - Fast, extendable, full-featured, std-lib source compatible log library. * [journald](https://github.com/ssgreg/journald) - Go implementation of systemd Journal's native API for logging. * [log](https://github.com/apex/log) - Structured logging package for Go. * [log](https://github.com/go-playground/log) - Simple, configurable and scalable Structured Logging for Go. * [log](https://github.com/teris-io/log) - Structured log interface for Go cleanly separates logging facade from its implementation. * [log-voyage](https://github.com/firstrow/logvoyage) - Full-featured logging saas written in golang. * [log15](https://github.com/inconshreveable/log15) - Simple, powerful logging for Go. * [logdump](https://github.com/ewwwwwqm/logdump) - Package for multi-level logging. * [logex](https://github.com/chzyer/logex) - Golang log lib, supports tracking and level, wrap by standard log lib. * [logger](https://github.com/azer/logger) - Minimalistic logging library for Go. * [logo](https://github.com/mbndr/logo) - Golang logger to different configurable writers. * [logrus](https://github.com/Sirupsen/logrus) - Structured logger for Go. * [logrusly](https://github.com/sebest/logrusly) - [logrus](https://github.com/sirupsen/logrus) plug-in to send errors to a [Loggly](https://www.loggly.com/). * [logutils](https://github.com/hashicorp/logutils) - Utilities for slightly better logging in Go (Golang) extending the standard logger. * [logxi](https://github.com/mgutz/logxi) - 12-factor app logger that is fast and makes you happy. * [lumberjack](https://github.com/natefinch/lumberjack) - Simple rolling logger, implements io.WriteCloser. * [mlog](https://github.com/jbrodriguez/mlog) - Simple logging module for go, with 5 levels, an optional rotating logfile feature and stdout/stderr output. * [ozzo-log](https://github.com/go-ozzo/ozzo-log) - High performance logging supporting log severity, categorization, and filtering. Can send filtered log messages to various targets (e.g. console, network, mail). * [seelog](https://github.com/cihub/seelog) - Logging functionality with flexible dispatching, filtering, and formatting. * [spew](https://github.com/davecgh/go-spew) - Implements a deep pretty printer for Go data structures to aid in debugging. * [stdlog](https://github.com/alexcesaro/log) - Stdlog is an object-oriented library providing leveled logging. It is very useful for cron jobs. * [tail](https://github.com/hpcloud/tail) - Go package striving to emulate the features of the BSD tail program. * [xlog](https://github.com/xfxdev/xlog) - Plugin architecture and flexible log system for Go, with level ctrl, multiple log target and custom log format. * [xlog](https://github.com/rs/xlog) - Structured logger for net/context aware HTTP handlers with flexible dispatching. * [zap](https://github.com/uber-go/zap) - Fast, structured, leveled logging in Go. * [zerolog](https://github.com/rs/zerolog) - Zero-allocation JSON logger. ## Machine Learning *Libraries for Machine Learning.* * [bayesian](https://github.com/jbrukh/bayesian) - Naive Bayesian Classification for Golang. * [CloudForest](https://github.com/ryanbressler/CloudForest) - Fast, flexible, multi-threaded ensembles of decision trees for machine learning in pure Go. * [fonet](https://github.com/Fontinalis/fonet) - A Deep Neural Network library written in Go. * [gago](https://github.com/MaxHalford/gago) - Multi-population, flexible, parallel genetic algorithm. * [go-cluster](https://github.com/e-XpertSolutions/go-cluster) - Go implementation of the k-modes and k-prototypes clustering algorithms. * [go-fann](https://github.com/white-pony/go-fann) - Go bindings for Fast Artificial Neural Networks(FANN) library. * [go-galib](https://github.com/thoj/go-galib) - Genetic Algorithms library written in Go / golang. * [go-pr](https://github.com/daviddengcn/go-pr) - Pattern recognition package in Go lang. * [gobrain](https://github.com/goml/gobrain) - Neural Networks written in go. * [godist](https://github.com/e-dard/godist) - Various probability distributions, and associated methods. * [goga](https://github.com/tomcraven/goga) - Genetic algorithm library for Go. * [GoLearn](https://github.com/sjwhitworth/golearn) - General Machine Learning library for Go. * [golinear](https://github.com/danieldk/golinear) - liblinear bindings for Go. * [goml](https://github.com/cdipaolo/goml) - On-line Machine Learning in Go. * [goRecommend](https://github.com/timkaye11/goRecommend) - Recommendation Algorithms library written in Go. * [gorgonia](https://github.com/chewxy/gorgonia) - graph-based computational library like Theano for Go that provides primitives for building various machine learning and neural network algorithms. * [goscore](https://github.com/asafschers/goscore) - Go Scoring API for PMML. * [gosseract](https://github.com/otiai10/gosseract) - Go package for OCR (Optical Character Recognition), by using Tesseract C++ library. * [libsvm](https://github.com/datastream/libsvm) - libsvm golang version derived work based on LIBSVM 3.14. * [mlgo](https://github.com/NullHypothesis/mlgo) - This project aims to provide minimalistic machine learning algorithms in Go. * [neat](https://github.com/jinyeom/neat) - Plug-and-play, parallel Go framework for NeuroEvolution of Augmenting Topologies (NEAT). * [neural-go](https://github.com/schuyler/neural-go) - Multilayer perceptron network implemented in Go, with training via backpropagation. * [probab](https://github.com/ThePaw/probab) - Probability distribution functions. Bayesian inference. Written in pure Go. * [regommend](https://github.com/muesli/regommend) - Recommendation & collaborative filtering engine. * [shield](https://github.com/eaigner/shield) - Bayesian text classifier with flexible tokenizers and storage backends for Go. * [tfgo](https://github.com/galeone/tfgo) - Easy to use Tensorflow bindings: simplifies the usage of the official Tensorflow Go bindings. Define computational graphs in Go, load and execute models trained in Python. * [Varis](https://github.com/Xamber/Varis) - Golang Neural Network. ## Messaging *Libraries that implement messaging systems.* * [Centrifugo](https://github.com/centrifugal/centrifugo) - Real-time messaging (Websockets or SockJS) server in Go. * [dbus](https://github.com/godbus/dbus) - Native Go bindings for D-Bus. * [drone-line](https://github.com/appleboy/drone-line) - Sending [Line](https://business.line.me/en/services/bot) notifications using a binary, docker or Drone CI. * [emitter](https://github.com/olebedev/emitter) - Emits events using Go way, with wildcard, predicates, cancellation possibilities and many other good wins. * [event](https://github.com/agoalofalife/event) - Implementation of the pattern observer. * [EventBus](https://github.com/asaskevich/EventBus) - The lightweight event bus with async compatibility. * [gaurun-client](https://github.com/osamingo/gaurun-client) - Gaurun Client written in Go. * [Glue](https://github.com/desertbit/glue) - Robust Go and Javascript Socket Library (Alternative to Socket.io). * [go-notify](https://github.com/TheCreeper/go-notify) - Native implementation of the freedesktop notification spec. * [go-nsq](https://github.com/nsqio/go-nsq) - the official Go package for NSQ. * [go-socket.io](https://github.com/googollee/go-socket.io) - socket.io library for golang, a realtime application framework. * [go-vitotrol](https://github.com/maxatome/go-vitotrol) - Client library to Viessmann Vitotrol web service. * [Gollum](https://github.com/trivago/gollum) - A n:m multiplexer that gathers messages from different sources and broadcasts them to a set of destinations. * [golongpoll](https://github.com/jcuga/golongpoll) - HTTP longpoll server library that makes web pub-sub simple. * [goose](https://github.com/ian-kent/goose) - Server Sent Events in Go. * [gopush-cluster](https://github.com/Terry-Mao/gopush-cluster) - gopush-cluster is a go push server cluster. * [gorush](https://github.com/appleboy/gorush) - Push notification server using [APNs2](https://github.com/sideshow/apns2) and google [GCM](https://github.com/google/go-gcm). * [guble](https://github.com/smancke/guble) - Messaging server using push notifications (Google Firebase Cloud Messaging, Apple Push Notification services, SMS) as well as websockets, a REST API, featuring distributed operation and message-persistence. * [machinery](https://github.com/RichardKnop/machinery) - Asynchronous task queue/job queue based on distributed message passing. * [mangos](https://github.com/go-mangos/mangos) - Pure go implementation of the Nanomsg ("Scalable Protocols") with transport interoperability. * [melody](https://github.com/olahol/melody) - Minimalist framework for dealing with websocket sessions, includes broadcasting and automatic ping/pong handling. * [messagebus](https://github.com/vardius/message-bus) - messagebus is a Go simple async message bus, perfect for using as event bus when doing event sourcing, CQRS, DDD. * [NATS Go Client](https://github.com/nats-io/nats) - Lightweight and high performance publish-subscribe and distributed queueing messaging system - this is the Go library. * [nsq-event-bus](https://github.com/rafaeljesus/nsq-event-bus) - A tiny wrapper around NSQ topic and channel. * [oplog](https://github.com/dailymotion/oplog) - Generic oplog/replication system for REST APIs. * [pubsub](https://github.com/tuxychandru/pubsub) - Simple pubsub package for go. * [RapidMQ](https://github.com/sybrexsys/RapidMQ) - RapidMQ is a lightweight and reliable library for managing of the local messages queue. * [sarama](https://github.com/Shopify/sarama) - Go library for Apache Kafka. * [Uniqush-Push](https://github.com/uniqush/uniqush-push) - Redis backed unified push service for server-side notifications to mobile devices. * [zmq4](https://github.com/pebbe/zmq4) - Go interface to ZeroMQ version 4. Also available for [version 3](https://github.com/pebbe/zmq3) and [version 2](https://github.com/pebbe/zmq2). ## Miscellaneous *These libraries were placed here because none of the other categories seemed to fit.* * [alice](https://github.com/magic003/alice) - Additive dependency injection container for Golang. * [antch](https://github.com/antchfx/antch) - A fast, powerful and extensible web crawling & scraping framework. * [archiver](https://github.com/mholt/archiver) - Library and command for making and extracting .zip and .tar.gz archives. * [autoflags](https://github.com/artyom/autoflags) - Go package to automatically define command line flags from struct fields. * [avgRating](https://github.com/kirillDanshin/avgRating) - Calculate average score and rating based on Wilson Score Equation. * [banner](https://github.com/dimiro1/banner) - Add beautiful banners into your Go applications. * [battery](https://github.com/distatus/battery) - Cross-platform, normalized battery information library. * [bitio](https://github.com/icza/bitio) - Highly optimized bit-level Reader and Writer for Go. * [browscap_go](https://github.com/digitalcrab/browscap_go) - GoLang Library for [Browser Capabilities Project](http://browscap.org/). * [captcha](https://github.com/steambap/captcha) - Package captcha provides an easy to use, unopinionated API for captcha generation. * [conv](https://github.com/cstockton/go-conv) - Package conv provides fast and intuitive conversions across Go types. * [datacounter](https://github.com/miolini/datacounter) - Go counters for readers/writer/http.ResponseWriter. * [errors](https://github.com/pkg/errors) - Package that provides simple error handling primitives. * [go-chat-bot](https://github.com/go-chat-bot/bot) - IRC, Slack & Telegram bot written in Go. * [go-commons-pool](https://github.com/jolestar/go-commons-pool) - Generic object pool for Golang. * [go-multierror](https://github.com/hashicorp/go-multierror) - Go (golang) package for representing a list of errors as a single error. * [go-openapi](https://github.com/go-openapi) - Collection of packages to parse and utilize open-api schemas. * [go-resiliency](https://github.com/eapache/go-resiliency) - Resiliency patterns for golang. * [go-sarah](https://github.com/oklahomer/go-sarah) - Framework to build bot for desired chat services including LINE, Slack, Gitter and more. * [go-unarr](https://github.com/gen2brain/go-unarr) - Decompression library for RAR, TAR, ZIP and 7z archives. * [go.uuid](https://github.com/satori/go.uuid) - Implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. * [gofakeit](https://github.com/brianvoe/gofakeit) - Random data generator written in go. * [goid](https://github.com/jakehl/goid) - Generate and Parse RFC4122 compliant V4 UUIDs. * [gopsutil](https://github.com/shirou/gopsutil) - Cross-platform library for retrieving process and system utilization(CPU, Memory, Disks, etc). * [gosms](https://github.com/haxpax/gosms) - Your own local SMS gateway in Go that can be used to send SMS. * [gountries](https://github.com/pariz/gountries) - Package that exposes country and subdivision data. * [hanu](https://github.com/sbstjn/hanu) - Framework for writing Slack bots. * [health](https://github.com/dimiro1/health) - Easy to use, extensible health check library. * [healthcheck](https://github.com/etherlabsio/healthcheck) - An opinionated and concurrent health-check HTTP handler for RESTful services. * [hostutils](https://github.com/Wing924/hostutils) - A golang library for packing and unpacking FQDNs list. * [indigo](https://github.com/osamingo/indigo) - Distributed unique ID generator of using Sonyflake and encoded by Base58. * [jobs](https://github.com/albrow/jobs) - Persistent and flexible background jobs library. * [lk](https://github.com/hyperboloide/lk) - A simple licensing library for golang. * [margelet](https://github.com/zhulik/margelet) - Framework for building Telegram bots. * [persian](https://github.com/mavihq/persian) - Some utilities for Persian language in go. * [secdl](https://github.com/xor-gate/secdl) - Lighttpd ModSecDownload algorithm ported to go to secure download urls. * [shellwords](https://github.com/Wing924/shellwords) - A Golang library to manipulate strings according to the word parsing rules of the UNIX Bourne shell. * [shortid](https://github.com/teris-io/shortid) - Distributed generation of super short, unique, non-sequential, URL friendly IDs. * [slacker](https://github.com/shomali11/slacker) - Easy to use framework to create Slack bots. * [stats](https://github.com/go-playground/stats) - Monitors Go MemStats + System stats such as Memory, Swap and CPU and sends via UDP anywhere you want for logging etc... * [turtle](https://github.com/hackebrot/turtle) - Emojis for Go. * [uuid](https://github.com/agext/uuid) - Generate, encode, and decode UUIDs v1 with fast or cryptographic-quality random node identifier. * [VarHandler](https://github.com/azr/generators/tree/master/varhandler) - Generate boilerplate http input and ouput handling. * [werr](https://github.com/txgruppi/werr) - Error Wrapper creates an wrapper for the error type in Go which captures the File, Line and Stack of where it was called. * [xkg](https://github.com/go-xkg/xkg) - X Keyboard Grabber. * [xstrings](https://github.com/huandu/xstrings) - Collection of useful string functions ported from other languages. ## Natural Language Processing *Libraries for working with human languages.* * [dpar](https://github.com/danieldk/dpar/) - Transition-based statistical dependency parser. * [go-eco](https://github.com/ThePaw/go-eco) - Similarity, dissimilarity and distance matrices; diversity, equitability and inequality measures; species richness estimators; coenocline models. * [go-i18n](https://github.com/nicksnyder/go-i18n/) - Package and an accompanying tool to work with localized text. * [go-mystem](https://github.com/dveselov/mystem) - CGo bindings to Yandex.Mystem - russian morphology analyzer. * [go-nlp](https://github.com/nuance/go-nlp) - Utilities for working with discrete probability distributions and other tools useful for doing NLP work. * [go-stem](https://github.com/agonopol/go-stem) - Implementation of the porter stemming algorithm. * [go-unidecode](https://github.com/mozillazg/go-unidecode) - ASCII transliterations of Unicode text. * [go2vec](https://github.com/danieldk/go2vec) - Reader and utility functions for word2vec embeddings. * [gojieba](https://github.com/yanyiwu/gojieba) - This is a Go implementation of [jieba](https://github.com/fxsjy/jieba) which a Chinese word splitting algorithm. * [golibstemmer](https://github.com/rjohnsondev/golibstemmer) - Go bindings for the snowball libstemmer library including porter 2. * [gounidecode](https://github.com/fiam/gounidecode) - Unicode transliterator (also known as unidecode) for Go. * [gse](https://github.com/go-ego/gse) - Go efficient text segmentation; support english, chinese, japanese and other. * [icu](https://github.com/goodsign/icu) - Cgo binding for icu4c C library detection and conversion functions. Guaranteed compatibility with version 50.1. * [libtextcat](https://github.com/goodsign/libtextcat) - Cgo binding for libtextcat C library. Guaranteed compatibility with version 2.2. * [MMSEGO](https://github.com/awsong/MMSEGO) - This is a GO implementation of [MMSEG](http://technology.chtsai.org/mmseg/) which a Chinese word splitting algorithm. * [nlp](https://github.com/Shixzie/nlp) - Extract values from strings and fill your structs with nlp. * [nlp](https://github.com/james-bowman/nlp) - Go Natural Language Processing library supporting LSA (Latent Semantic Analysis). * [paicehusk](https://github.com/rookii/paicehusk) - Golang implementation of the Paice/Husk Stemming Algorithm. * [petrovich](https://github.com/striker2000/petrovich) - Petrovich is the library which inflects Russian names to given grammatical case. * [porter](https://github.com/a2800276/porter) - This is a fairly straightforward port of Martin Porter's C implementation of the Porter stemming algorithm. * [porter2](https://github.com/zhenjl/porter2) - Really fast Porter 2 stemmer. * [prose](https://github.com/jdkato/prose) - Library for text processing that supports tokenization, part-of-speech tagging, named-entity extraction, and more. * [RAKE.go](https://github.com/Obaied/RAKE.go) - Go port of the Rapid Automatic Keyword Extraction Algorithm (RAKE). * [segment](https://github.com/blevesearch/segment) - Go library for performing Unicode Text Segmentation as described in [Unicode Standard Annex #29](http://www.unicode.org/reports/tr29/) * [sentences](https://github.com/neurosnap/sentences) - Sentence tokenizer: converts text into a list of sentences. * [shamoji](https://github.com/osamingo/shamoji) - The shamoji is word filtering package written in Go. * [snowball](https://github.com/goodsign/snowball) - Snowball stemmer port (cgo wrapper) for Go. Provides word stem extraction functionality [Snowball native](http://snowball.tartarus.org/). * [stemmer](https://github.com/dchest/stemmer) - Stemmer packages for Go programming language. Includes English and German stemmers. * [textcat](https://github.com/pebbe/textcat) - Go package for n-gram based text categorization, with support for utf-8 and raw text. * [whatlanggo](https://github.com/abadojack/whatlanggo) - Natural language detection package for Go. Supports 84 languages and 24 scripts (writing systems e.g. Latin, Cyrillic, etc). * [when](https://github.com/olebedev/when) - Natural EN and RU language date/time parser with pluggable rules. ## Networking *Libraries for working with various layers of the network.* * [arp](https://github.com/mdlayher/arp) - Package arp implements the ARP protocol, as described in RFC 826. * [buffstreams](https://github.com/stabbycutyou/buffstreams) - Streaming protocolbuffer data over TCP made easy. * [canopus](https://github.com/zubairhamed/canopus) - CoAP Client/Server implementation (RFC 7252). * [cidranger](https://github.com/yl2chen/cidranger) - Fast IP to CIDR lookup for Go. * [dhcp6](https://github.com/mdlayher/dhcp6) - Package dhcp6 implements a DHCPv6 server, as described in RFC 3315. * [dns](https://github.com/miekg/dns) - Go library for working with DNS. * [ether](https://github.com/songgao/ether) - Cross-platform Go package for sending and receiving ethernet frames. * [ethernet](https://github.com/mdlayher/ethernet) - Package ethernet implements marshaling and unmarshaling of IEEE 802.3 Ethernet II frames and IEEE 802.1Q VLAN tags. * [fasthttp](https://github.com/valyala/fasthttp) - Package fasthttp is a fast HTTP implementation for Go, up to 10 times faster than net/http. * [ftp](https://github.com/jlaffaye/ftp) - Package ftp implements a FTP client as described in [RFC 959](http://tools.ietf.org/html/rfc959). * [go-getter](https://github.com/hashicorp/go-getter) - Go library for downloading files or directories from various sources using a URL. * [go-stun](https://github.com/ccding/go-stun) - Go implementation of the STUN client (RFC 3489 and RFC 5389). * [gobgp](https://github.com/osrg/gobgp) - BGP implemented in the Go Programming Language. * [golibwireshark](https://github.com/sunwxg/golibwireshark) - Package golibwireshark use libwireshark library to decode pcap file and analyse dissection data. * [gopacket](https://github.com/google/gopacket) - Go library for packet processing with libpcap bindings. * [gopcap](https://github.com/akrennmair/gopcap) - Go wrapper for libpcap. * [goshark](https://github.com/sunwxg/goshark) - Package goshark use tshark to decode IP packet and create data struct to analyse packet. * [gosnmp](https://github.com/soniah/gosnmp) - Native Go library for performing SNMP actions. * [gotcp](https://github.com/gansidui/gotcp) - Go package for quickly writing tcp applications. * [grab](https://github.com/cavaliercoder/grab) - Go package for managing file downloads. * [graval](https://github.com/koofr/graval) - Experimental FTP server framework. * [jazigo](https://github.com/udhos/jazigo) - Jazigo is a tool written in Go for retrieving configuration for multiple network devices. * [kcp-go](https://github.com/xtaci/kcp-go) - KCP - Fast and Reliable ARQ Protocol. * [kcptun](https://github.com/xtaci/kcptun) - Extremely simple & fast udp tunnel based on KCP protocol. * [lhttp](https://github.com/fanux/lhttp) - Powerful websocket framework, build your IM server more easily. * [linkio](https://github.com/ian-kent/linkio) - Network link speed simulation for Reader/Writer interfaces. * [llb](https://github.com/kirillDanshin/llb) - It's a very simple but quick backend for proxy servers. Can be useful for fast redirection to predefined domain with zero memory allocation and fast response. * [mdns](https://github.com/hashicorp/mdns) - Simple mDNS (Multicast DNS) client/server library in Golang. * [mqttPaho](https://eclipse.org/paho/clients/golang/) - The Paho Go Client provides an MQTT client library for connection to MQTT brokers via TCP, TLS or WebSockets. * [portproxy](https://github.com/aybabtme/portproxy) - Simple TCP proxy which adds CORS support to API's which don't support it. * [publicip](https://github.com/polera/publicip) - Package publicip returns your public facing IPv4 address (internet egress). * [raw](https://github.com/mdlayher/raw) - Package raw enables reading and writing data at the device driver level for a network interface. * [sftp](https://github.com/pkg/sftp) - Package sftp implements the SSH File Transfer Protocol as described in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt. * [ssh](https://github.com/gliderlabs/ssh) - Higher-level API for building SSH servers (wraps crypto/ssh). * [sslb](https://github.com/eduardonunesp/sslb) - It's a Super Simples Load Balancer, just a little project to achieve some kind of performance. * [stun](https://github.com/go-rtc/stun) - Go implementation of RFC 5389 STUN protocol. * [tcp_server](https://github.com/firstrow/tcp_server) - Go library for building tcp servers faster. * [utp](https://github.com/anacrolix/utp) - Go uTP micro transport protocol implementation. * [water](https://github.com/songgao/water) - Simple TUN/TAP library. * [winrm](https://github.com/masterzen/winrm) - Go WinRM client to remotely execute commands on Windows machines. * [xtcp](https://github.com/xfxdev/xtcp) - TCP Server Framework with simultaneous full duplex communication,graceful shutdown,custom protocol. * [YANNFF](https://github.com/intel-go/yanff) - Framework for rapid development of performant network functions for cloud and bare-metal. ## OpenGL *Libraries for using OpenGL in Go.* * [gl](https://github.com/go-gl/gl) - Go bindings for OpenGL (generated via glow). * [glfw](https://github.com/go-gl/glfw) - Go bindings for GLFW 3. * [goxjs/gl](https://github.com/goxjs/gl) - Go cross-platform OpenGL bindings (OS X, Linux, Windows, browsers, iOS, Android). * [goxjs/glfw](https://github.com/goxjs/glfw) - Go cross-platform glfw library for creating an OpenGL context and receiving events. * [mathgl](https://github.com/go-gl/mathgl) - Pure Go math package specialized for 3D math, with inspiration from GLM. ## ORM *Libraries that implement Object-Relational Mapping or datamapping techniques.* * [beego orm](https://github.com/astaxie/beego/tree/master/orm) - Powerful orm framework for go. Support: pq/mysql/sqlite3. * [go-pg](https://github.com/go-pg/pg) - PostgreSQL ORM with focus on PostgreSQL specific features and performance. * [go-queryset](https://github.com/jirfag/go-queryset) - 100% type-safe ORM with code generation and MySQL, PostgreSQL, Sqlite3, SQL Server support based on GORM. * [go-store](https://github.com/gosuri/go-store) - Simple and fast Redis backed key-value store library for Go. * [gomodel](https://github.com/cosiner/gomodel) - Lightweight, fast, orm-like library helps interactive with database. * [GORM](https://github.com/jinzhu/gorm) - The fantastic ORM library for Golang, aims to be developer friendly. * [gorp](https://github.com/go-gorp/gorp) - Go Relational Persistence, ORM-ish library for Go. * [lore](https://github.com/abrahambotros/lore) - Simple and lightweight pseudo-ORM/pseudo-struct-mapping environment for Go. * [Marlow](https://github.com/dadleyy/marlow) - Generated ORM from project structs for compile time safety assurances. * [pop/soda](https://github.com/markbates/pop) - Database migration, creation, ORM, etc... for MySQL, PostgreSQL, and SQLite. * [QBS](https://github.com/coocood/qbs) - Stands for Query By Struct. A Go ORM. * [reform](https://github.com/go-reform/reform) - Better ORM for Go, based on non-empty interfaces and code generation. * [SQLBoiler](https://github.com/volatiletech/sqlboiler) - ORM generator. Generate a featureful and blazing-fast ORM tailored to your database schema. * [upper.io/db](https://github.com/upper/db) - Single interface for interacting with different data sources through the use of adapters that wrap mature database drivers. * [Xorm](https://github.com/go-xorm/xorm) - Simple and powerful ORM for Go. * [Zoom](https://github.com/albrow/zoom) - Blazing-fast datastore and querying engine built on Redis. ## Package Management *Libraries for package and dependency management.* * [dep](https://github.com/golang/dep) - Go dependency tool. * [gigo](https://github.com/LyricalSecurity/gigo) - PIP-like dependency tool for golang, with support for private repositories and hashes. * [glide](https://github.com/Masterminds/glide) - Manage your golang vendor and vendored packages with ease. Inspired by tools like Maven, Bundler, and Pip. * [godep](https://github.com/tools/godep) - dependency tool for go, godep helps build packages reproducibly by fixing their dependencies. * [gom](https://github.com/mattn/gom) - Go Manager - bundle for go. * [goop](https://github.com/nitrous-io/goop) - Simple dependency manager for Go (golang), inspired by Bundler. * [gop](https://github.com/lunny/gop) - Build and manage your Go applications out of GOPATH * [gopm](https://github.com/gpmgo/gopm) - Go Package Manager. * [govendor](https://github.com/kardianos/govendor) - Go Package Manager. Go vendor tool that works with the standard vendor file. * [gpm](https://github.com/pote/gpm) - Barebones dependency manager for Go. * [gvt](https://github.com/FiloSottile/gvt) - gvt is a simple vendoring tool made for Go native vendoring (aka GO15VENDOREXPERIMENT), based on gb-vendor. * [johnny-deps](https://github.com/VividCortex/johnny-deps) - Minimal dependency version using Git. * [nut](https://github.com/jingweno/nut) - Vendor Go dependencies. * [VenGO](https://github.com/DamnWidget/VenGO) - create and manage exportable isolated go virtual environments. ## Query Language * [graphql](https://github.com/tmc/graphql) - graphql parser + utilities. * [graphql](https://github.com/sevki/graphql) - GraphQL implementation in go. * [graphql](https://github.com/neelance/graphql-go) - GraphQL server with a focus on ease of use. * [graphql-go](https://github.com/graphql-go/graphql) - Implementation of GraphQL for Go. * [jsonql](https://github.com/elgs/jsonql) - JSON query expression library in Golang. ## Resource Embedding * [esc](https://github.com/mjibson/esc) - Embeds files into Go programs and provides http.FileSystem interfaces to them. * [fileb0x](https://github.com/UnnoTed/fileb0x) - Simple tool to embed files in go with focus on "customization" and ease to use. * [go-embed](https://github.com/pyros2097/go-embed) - Generates go code to embed resource files into your library or executable. * [go-resources](https://github.com/omeid/go-resources) - Unfancy resources embedding with Go. * [go.rice](https://github.com/GeertJohan/go.rice) - go.rice is a Go package that makes working with resources such as html,js,css,images and templates very easy. * [statics](https://github.com/go-playground/statics) - Embeds static resources into go files for single binary compilation + works with http.FileSystem + symlinks. * [statik](https://github.com/rakyll/statik) - Embeds static files into a Go executable. * [templify](https://github.com/wlbr/templify) - Embed external template files into Go code to create single file binaries. * [vfsgen](https://github.com/shurcooL/vfsgen) - Generates a vfsdata.go file that statically implements the given virtual filesystem. ## Science and Data Analysis *Libraries for scientific computing and data analyzing.* * [blas](https://github.com/ziutek/blas) - Implementation of BLAS (Basic Linear Algebra Subprograms). * [chart](https://github.com/vdobler/chart) - Simple Chart Plotting library for Go. Supports many graphs types. * [evaler](https://github.com/soniah/evaler) - Simple floating point arithmetic expression evaluator. * [ewma](https://github.com/VividCortex/ewma) - Exponentially-weighted moving averages. * [geom](https://github.com/skelterjohn/geom) - 2D geometry for golang. * [go-dsp](https://github.com/mjibson/go-dsp) - Digital Signal Processing for Go. * [go-fn](https://github.com/ematvey/go-fn) - Mathematical functions written in Go language, that are not covered by math pkg. * [go-gt](https://github.com/ThePaw/go-gt) - Graph theory algorithms written in "Go" language. * [go.matrix](https://github.com/skelterjohn/go.matrix) - linear algebra for go (has been stalled). * [gocomplex](https://github.com/varver/gocomplex) - Complex number library for the Go programming language. * [goent](https://github.com/kzahedi/goent) - GO Implementation of Entropy Measures * [gofrac](https://github.com/anschelsc/gofrac) - (goinstallable) fractions library for go with support for basic arithmetic. * [gohistogram](https://github.com/VividCortex/gohistogram) - Approximate histograms for data streams. * [gonum/mat64](https://github.com/gonum/matrix) - The general purpose package for matrix computation. Package mat64 provides basic linear algebra operations for float64 matrices. * [gonum/plot](https://github.com/gonum/plot) - gonum/plot provides an API for building and drawing plots in Go. * [goraph](https://github.com/gyuho/goraph) - Pure Go graph theory library(data structure, algorith visualization). * [gosl](https://github.com/cpmech/gosl) - Go scientific library for linear algebra, FFT, geometry, NURBS, numerical methods, probabilities, optimisation, differential equations, and more. * [gostat](https://github.com/ematvey/gostat) - Statistics library for the go language. * [graph](https://github.com/yourbasic/graph) - Library of basic graph algorithms. * [ode](https://github.com/ChristopherRabotin/ode) - Ordinary differential equation (ODE) solver which supports extended states and channel-based iteration stop conditions. * [pagerank](https://github.com/alixaxel/pagerank) - Weighted PageRank algorithm implemented in Go. * [PiHex](https://github.com/claygod/PiHex) - Implementation of the "Bailey-Borwein-Plouffe" algorithm for the hexadecimal number Pi. * [sparse](https://github.com/james-bowman/sparse) - Go Sparse matrix formats for linear algebra supporting scientific and machine learning applications, compatible with gonum matrix libraries. * [stats](https://github.com/montanaflynn/stats) - Statistics package with common functions missing from the Golang standard library. * [streamtools](https://github.com/nytlabs/streamtools) - general purpose, graphical tool for dealing with streams of data. * [vectormath](https://github.com/spate/vectormath) - Vectormath for Go, an adaptation of the scalar C functions from Sony's Vector Math library, as found in the Bullet-2.79 source code (currently inactive). ## Security *Libraries that are used to help make your application more secure.* * [acmetool](https://github.com/hlandau/acme) - ACME (Let's Encrypt) client tool with automatic renewal. * [autocert](https://godoc.org/golang.org/x/crypto/acme/autocert) - Auto provision Let's Encrypt certificates and start a TLS server. * [BadActor](https://github.com/jaredfolkins/badactor) - In-memory, application-driven jailer built in the spirit of fail2ban. * [go-yara](https://github.com/hillu/go-yara) - Go Bindings for [YARA](https://github.com/plusvic/yara), the "pattern matching swiss knife for malware researchers (and everyone else)". * [goSecretBoxPassword](https://github.com/dwin/goSecretBoxPassword) - A probably paranoid package for securely hashing and encrypting passwords. * [lego](https://github.com/xenolf/lego) - Pure Go ACME client library and CLI tool (for use with Let's Encrypt). * [memguard](https://github.com/awnumar/memguard) - A pure Go library for handling sensitive values in memory. * [nacl](https://github.com/kevinburke/nacl) - Go implementation of the NaCL set of API's. * [passlib](https://github.com/hlandau/passlib) - Futureproof password hashing library. * [secure](https://github.com/unrolled/secure) - HTTP middleware for Go that facilitates some quick security wins. * [simple-scrypt](https://github.com/elithrar/simple-scrypt) - Scrypt package with a simple, obvious API and automatic cost calibration built-in. * [ssh-vault](https://github.com/ssh-vault/ssh-vault) - encrypt/decrypt using ssh keys. ## Serialization *Libraries and tools for binary serialization.* * [asn1](https://github.com/PromonLogicalis/asn1) - Asn.1 BER and DER encoding library for golang. * [bambam](https://github.com/glycerine/bambam) - generator for Cap'n Proto schemas from go. * [colfer](https://github.com/pascaldekloe/colfer) - Code generation for the Colfer binary format. * [csvutil](https://github.com/jszwec/csvutil) - High Performance, idiomatic CSV record encoding and decoding to native Go structures. * [go-capnproto](https://github.com/glycerine/go-capnproto) - Cap'n Proto library and parser for go. * [go-codec](https://github.com/ugorji/go) - High Performance, feature-Rich, idiomatic encode, decode and rpc library for msgpack, cbor and json, with runtime-based OR code-generation support. * [gogoprotobuf](https://github.com/gogo/protobuf) - Protocol Buffers for Go with Gadgets. * [goprotobuf](https://github.com/golang/protobuf) - Go support, in the form of a library and protocol compiler plugin, for Google's protocol buffers. * [jsoniter](https://github.com/json-iterator/go) - High-performance 100% compatible drop-in replacement of "encoding/json". * [mapstructure](https://github.com/mitchellh/mapstructure) - Go library for decoding generic map values into native Go structures. * [php_session_decoder](https://github.com/yvasiyarov/php_session_decoder) - GoLang library for working with PHP session format and PHP Serialize/Unserialize functions. * [structomap](https://github.com/tuvistavie/structomap) - Library to easily and dynamically generate maps from static structures. ## Server Applications * [algernon](https://github.com/xyproto/algernon) - HTTP/2 web server with built-in support for Lua, Markdown, GCSS and Amber. * [Caddy](https://github.com/mholt/caddy) - Caddy is an alternative, HTTP/2 web server that's easy to configure and use. * [consul](https://www.consul.io/) - Consul is a tool for service discovery, monitoring and configuration. * [devd](https://github.com/cortesi/devd) - Local webserver for developers. * [etcd](https://github.com/coreos/etcd) - Highly-available key value store for shared configuration and service discovery. * [Fider](https://github.com/getfider/fider) - Fider is an open platform to collect and organize customer feedback. * [minio](https://github.com/minio/minio) - Minio is a distributed object storage server. * [nsq](http://nsq.io/) - A realtime distributed messaging platform. * [yakvs](https://github.com/sci4me/yakvs) - Small, networked, in-memory key-value store. ## Template Engines *Libraries and tools for templating and lexing.* * [ace](https://github.com/yosssi/ace) - Ace is an HTML template engine for Go, inspired by Slim and Jade. Ace is a refinement of Gold. * [amber](https://github.com/eknkc/amber) - Amber is an elegant templating engine for Go Programming Language It is inspired from HAML and Jade. * [damsel](https://github.com/dskinner/damsel) - Markup language featuring html outlining via css-selectors, extensible via pkg html/template and others. * [ego](https://github.com/benbjohnson/ego) - Lightweight templating language that lets you write templates in Go. Templates are translated into Go and compiled. * [fasttemplate](https://github.com/valyala/fasttemplate) - Simple and fast template engine. Substitutes template placeholders up to 10x faster than [text/template](http://golang.org/pkg/text/template/). * [gofpdf](https://github.com/jung-kurt/gofpdf) - PDF document generator with high level support for text, drawing and images. * [grender](https://github.com/dannyvankooten/grender) - small wrapper around html/template for file-based templates that support extending other template files. * [hero](https://github.com/shiyanhui/hero) - Hero is a handy, fast and powerful go template engine. * [jet](https://github.com/CloudyKit/jet) - Jet template engine. * [kasia.go](https://github.com/ziutek/kasia.go) - Templating system for HTML and other text documents - go implementation. * [liquid](https://github.com/osteele/liquid) - Go implementation of Shopify Liquid templates. * [mustache](https://github.com/hoisie/mustache) - Go implementation of the Mustache template language. * [pongo2](https://github.com/flosch/pongo2) - Django-like template-engine for Go. * [quicktemplate](https://github.com/valyala/quicktemplate) - Fast, powerful, yet easy to use template engine. Converts templates into Go code and then compiles it. * [raymond](https://github.com/aymerick/raymond) - Complete handlebars implementation in Go. * [Razor](https://github.com/sipin/gorazor) - Razor view engine for Golang. * [Soy](https://github.com/robfig/soy) - Closure templates (aka Soy templates) for Go, following the [official spec](https://developers.google.com/closure/templates/). * [velvet](https://github.com/gobuffalo/velvet) - Complete handlebars implementation in Go. ## Testing *Libraries for testing codebases and generating test data.* * Testing Frameworks * [assert](https://github.com/go-playground/assert) - Basic Assertion Library used along side native go testing, with building blocks for custom assertions. * [badio](https://github.com/cavaliercoder/badio) - Extensions to Go's testing/iotest package. * [baloo](https://github.com/h2non/baloo) - Expressive and versatile end-to-end HTTP API testing made easy. * [bro](https://github.com/marioidival/bro) - Watch files in directory and run tests for them. * [cupaloy](https://github.com/bradleyjkemp/cupaloy) - Simple snapshot testing addon for your test framework. * [dbcleaner](https://github.com/khaiql/dbcleaner) - Clean database for testing purpose, inspired by database_cleaner in Ruby. * [dsunit](https://github.com/viant/dsunit) - Datastore testing for SQL, NoSQL, structured files. * [frisby](https://github.com/verdverm/frisby) - REST API testing framework. * [ginkgo](http://onsi.github.io/ginkgo/) - BDD Testing Framework for Go. * [go-carpet](https://github.com/msoap/go-carpet) - Tool for viewing test coverage in terminal. * [go-mutesting](https://github.com/zimmski/go-mutesting) - Mutation testing for Go source code. * [go-vcr](https://github.com/dnaeon/go-vcr) - Record and replay your HTTP interactions for fast, deterministic and accurate tests. * [goblin](https://github.com/franela/goblin) - Mocha like testing framework fo Go. * [gocheck](http://labix.org/gocheck) - More advanced testing framework alternative to gotest. * [GoConvey](https://github.com/smartystreets/goconvey/) - BDD-style framework with web UI and live reload. * [godog](https://github.com/DATA-DOG/godog) - Cucumber or Behat like BDD framework for Go. * [gofight](https://github.com/appleboy/gofight) - API Handler Testing for Golang Router framework. * [gomega](http://onsi.github.io/gomega/) - Rspec like matcher/assertion library. * [GoSpec](https://github.com/orfjackal/gospec) - BDD-style testing framework for the Go programming language. * [gospecify](https://github.com/stesla/gospecify) - This provides a BDD syntax for testing your Go code. It should be familiar to anybody who has used libraries such as rspec. * [gosuite](https://github.com/pavlo/gosuite) - Brings lightweight test suites with setup/teardown facilities to testing by leveraging Go1.7's Subtests. * [Hamcrest](https://github.com/rdrdr/hamcrest) - fluent framework for declarative Matcher objects that, when applied to input values, produce self-describing results. * [httpexpect](https://github.com/gavv/httpexpect) - Concise, declarative, and easy to use end-to-end HTTP and REST API testing. * [restit](https://github.com/yookoala/restit) - Go micro framework to help writing RESTful API integration test. * [testfixtures](https://github.com/go-testfixtures/testfixtures) - A helper for Rails' like test fixtures to test database applications. * [Testify](https://github.com/stretchr/testify) - Sacred extension to the standard go testing package. * [wstest](https://github.com/posener/wstest) - Websocket client for unit-testing a websocket http.Handler. * Mock * [counterfeiter](https://github.com/maxbrunsfeld/counterfeiter) - Tool for generating self-contained mock objects. * [go-sqlmock](https://github.com/DATA-DOG/go-sqlmock) - Mock SQL driver for testing database interactions. * [go-txdb](https://github.com/DATA-DOG/go-txdb) - Single transaction based database driver mainly for testing purposes. * [gock](https://github.com/h2non/gock) - Versatile HTTP mocking made easy. * [gomock](https://github.com/golang/mock) - Mocking framework for the Go programming language. * [govcr](https://github.com/seborama/govcr) - HTTP mock for Golang: record and replay HTTP interactions for offline testing. * [minimock](https://github.com/gojuno/minimock) - Mock generator for Go interfaces. * [mockhttp](https://github.com/tv42/mockhttp) - Mock object for Go http.ResponseWriter. * Fuzzing and delta-debugging/reducing/shrinking. * [go-fuzz](https://github.com/dvyukov/go-fuzz) - Randomized testing system. * [gofuzz](https://github.com/google/gofuzz) - Library for populating go objects with random values. * [Tavor](https://github.com/zimmski/tavor) - Generic fuzzing and delta-debugging framework. * Selenium and browser control tools. * [cdp](https://github.com/mafredri/cdp) - Type-safe bindings for the Chrome Debugging Protocol that can be used with browsers or other debug targets that implement it. * [chromedp](https://github.com/knq/chromedp) - Way to drive/test Chrome, Safari, Edge, Android Webviews, and other browsers supporting the Chrome Debugging Protocol. * [ggr](https://github.com/aandryashin/ggr) - Lightweight server that routes and proxies Selenium Wedriver requests to multiple Selenium hubs. * [selenoid](https://github.com/aandryashin/selenoid) - alternative Selenium hub server that launches browsers within containers. ## Text Processing *Libraries for parsing and manipulating texts.* * Specific Formats * [align](https://github.com/Guitarbum722/align) - A general purpose application that aligns text. * [allot](https://github.com/sbstjn/allot) - Placeholder and wildcard text parsing for CLI tools and bots. * [bbConvert](https://github.com/CalebQ42/bbConvert) - Converts bbCode to HTML that allows you to add support for custom bbCode tags. * [blackfriday](https://github.com/russross/blackfriday) - Markdown processor in Go. * [bluemonday](https://github.com/microcosm-cc/bluemonday) - HTML Sanitizer. * [colly](https://github.com/asciimoo/colly) - Fast and Elegant Scraping Framework for Gophers * [doi](https://github.com/hscells/doi) - Document object identifier (doi) parser in Go. * [editorconfig-core-go](https://github.com/editorconfig/editorconfig-core-go) - Editorconfig file parser and manipulator for Go. * [enca](https://github.com/endeveit/enca) - Minimal cgo bindings for [libenca](http://cihar.com/software/enca/). * [genex](https://github.com/alixaxel/genex) - Count and expand Regular Expressions into all matching Strings. * [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown) - GitHub Flavored Markdown renderer (using blackfriday) with fenced code block highlighting, clickable header anchor links. * [go-fixedwidth](https://github.com/ianlopshire/go-fixedwidth) - Fixed-width text formatting (encoder/decoder with reflection). * [go-humanize](https://github.com/dustin/go-humanize) - Formatters for time, numbers, and memory size to human readable format. * [go-nmea](https://github.com/adrianmo/go-nmea) - NMEA parser library for the Go language. * [go-pkg-rss](https://github.com/jteeuwen/go-pkg-rss) - This package reads RSS and Atom feeds and provides a caching mechanism that adheres to the feed specs. * [go-runewidth](https://github.com/mattn/go-runewidth) - Functions to get fixed width of the character or string. * [go-slugify](https://github.com/mozillazg/go-slugify) - Make pretty slug with multiple languages support. * [go-vcard](https://github.com/emersion/go-vcard) - Parse and format vCard. * [gofeed](https://github.com/mmcdole/gofeed) - Parse RSS and Atom feeds in Go. * [gographviz](https://github.com/awalterschulze/gographviz) - Parses the Graphviz DOT language. * [gommon/bytes](https://github.com/labstack/gommon/tree/master/bytes) - Format bytes to string. * [gonameparts](https://github.com/polera/gonameparts) - Parses human names into individual name parts. * [goq](https://github.com/andrewstuart/goq) - Declarative unmarshaling of HTML using struct tags with jQuery syntax (uses GoQuery). * [GoQuery](https://github.com/PuerkitoBio/goquery) - GoQuery brings a syntax and a set of features similar to jQuery to the Go language. * [goregen](https://github.com/zach-klippenstein/goregen) - Library for generating random strings from regular expressions. * [gotext](https://github.com/leonelquinteros/gotext) - GNU gettext utilities for Go. * [guesslanguage](https://github.com/endeveit/guesslanguage) - Functions to determine the natural language of a unicode text. * [inject](https://github.com/facebookgo/inject) - Package inject provides a reflect based injector. * [mxj](https://github.com/clbanning/mxj) - Encode / decode XML as JSON or map[string]interface{}; extract values with dot-notation paths and wildcards. Replaces x2j and j2x packages. * [sh](https://github.com/mvdan/sh) - Shell parser and formatter. * [slug](https://github.com/gosimple/slug) - URL-friendly slugify with multiple languages support. * [Slugify](https://github.com/avelino/slugify) - Go slugify application that handles string. * [toml](https://github.com/BurntSushi/toml) - TOML configuration format (encoder/decoder with reflection). * Utility * [gotabulate](https://github.com/bndr/gotabulate) - Easily pretty-print your tabular data with Go. * [kace](https://github.com/codemodus/kace) - Common case conversions covering common initialisms. * [parseargs-go](https://github.com/nproc/parseargs-go) - string argument parser that understands quotes and backslashes. * [parth](https://github.com/codemodus/parth) - URL path segmentation parsing. * [radix](https://github.com/yourbasic/radix) - fast string sorting algorithm. * [xj2go](https://github.com/stackerzzq/xj2go) - Convert xml or json to go struct. * [xurls](https://github.com/mvdan/xurls) - Extract urls from text. ## Third-party APIs *Libraries for accessing third party APIs.* * [amazon-product-advertising-api](https://github.com/ngs/go-amazon-product-advertising-api) - Go Client Library for [Amazon Product Advertising API](https://affiliate-program.amazon.com/gp/advertising/api/detail/main.html). * [anaconda](https://github.com/ChimeraCoder/anaconda) - Go client library for the Twitter 1.1 API. * [aws-sdk-go](https://github.com/aws/aws-sdk-go) - The official AWS SDK for the Go programming language. * [brewerydb](https://github.com/naegelejd/brewerydb) - Go library for accessing the BreweryDB API. * [cachet](https://github.com/andygrunwald/cachet) - Go client library for [Cachet (open source status page system)](https://cachethq.io/). * [circleci](https://github.com/jszwedko/go-circleci) - Go client library for interacting with CircleCI's API. * [clarifai](https://github.com/samuelcouch/clarifai) - Go client library for interfacing with the Clarifai API. * [discordgo](https://github.com/bwmarrin/discordgo) - Go bindings for the Discord Chat API. * [ethrpc](https://github.com/onrik/ethrpc) - Go bindings for Ethereum JSON RPC API. * [facebook](https://github.com/huandu/facebook) - Go Library that supports the Facebook Graph API. * [fcm](https://github.com/maddevsio/fcm) - Go library for Firebase Cloud Messaging. * [gads](https://github.com/emiddleton/gads) - Google Adwords Unofficial API. * [gami](https://github.com/bit4bit/gami) - Go library for Asterisk Manager Interface. * [gcm](https://github.com/Aorioli/gcm) - Go library for Google Cloud Messaging. * [geo-golang](https://github.com/codingsince1985/geo-golang) - Go Library to access [Google Maps](https://developers.google.com/maps/documentation/geocoding/intro), [MapQuest](http://open.mapquestapi.com/geocoding/), [Nominatim](http://open.mapquestapi.com/nominatim/), [OpenCage](http://geocoder.opencagedata.com/api.html), [HERE](https://developer.here.com/rest-apis/documentation/geocoder), [Bing](https://msdn.microsoft.com/en-us/library/ff701715.aspx), [Mapbox](https://www.mapbox.com/developers/api/geocoding/), and [OpenStreetMap](https://wiki.openstreetmap.org/wiki/Nominatim) geocoding / reverse geocoding APIs. * [github](https://github.com/google/go-github) - Go library for accessing the GitHub REST API v3. * [githubql](https://github.com/shurcooL/githubql) - Go library for accessing the GitHub GraphQL API v4. * [go-hacknews](https://github.com/PaulRosset/go-hacknews) - Tiny Go client for HackerNews API. * [go-imgur](https://github.com/koffeinsource/go-imgur) - Go client library for [imgur](https://imgur.com) * [go-jira](https://github.com/andygrunwald/go-jira) - Go client library for [Atlassian JIRA](https://www.atlassian.com/software/jira) * [go-marathon](https://github.com/gambol99/go-marathon) - Go library for interacting with Mesosphere's Marathon PAAS. * [go-myanimelist](https://github.com/nstratos/go-myanimelist) - Go client library for accessing the [MyAnimeList API](http://myanimelist.net/modules.php?go=api). * [go-sptrans](https://github.com/sergioaugrod/go-sptrans) - Go client library for the SPTrans Olho Vivo API. * [go-telegraph](https://github.com/toby3d/go-telegraph) - Telegraph publishing platform API client. * [go-tgbot](https://github.com/olebedev/go-tgbot) - Pure Golang Telegram Bot API wrapper, generated from swagger file, session-based router and middleware. * [go-trending](https://github.com/andygrunwald/go-trending) - Go library for accessing [trending repositories](https://github.com/trending) and [developers](https://github.com/trending/developers) at Github. * [go-twitch](https://github.com/knspriggs/go-twitch) - Go client for interacting with the Twitch v3 API. * [go-twitter](https://github.com/dghubble/go-twitter) - Go client library for the Twitter v1.1 APIs. * [go-unsplash](https://github.com/hbagdi/go-unsplash) - Go client library for the [Unsplash.com](https://unsplash.com) API. * [go-xkcd](https://github.com/nishanths/go-xkcd) - Go client for the xkcd API. * [goamz](https://github.com/mitchellh/goamz) - Popular fork of [goamz](https://launchpad.net/goamz) which adds some missing API calls to certain packages. * [golyrics](https://github.com/mamal72/golyrics) - Golyrics is a Go library to fetch music lyrics data from the Wikia website. * [GoMusicBrainz](https://github.com/michiwend/gomusicbrainz) - Go MusicBrainz WS2 client library. * [google](https://github.com/google/google-api-go-client) - Auto-generated Google APIs for Go. * [google-analytics](https://github.com/chonthu/go-google-analytics) - Simple wrapper for easy google analytics reporting. * [google-cloud](https://github.com/GoogleCloudPlatform/gcloud-golang) - Google Cloud APIs Go Client Library. * [google-email-audit-api](https://github.com/ngs/go-google-email-audit-api) - Go client library for [Google G Suite Email Audit API](https://developers.google.com/admin-sdk/email-audit/). * [gostorm](https://github.com/jsgilmore/gostorm) - GoStorm is a Go library that implements the communications protocol required to write Storm spouts and Bolts in Go that communicate with the Storm shells. * [govkbot](https://github.com/nikepan/govkbot) - Simple Go [VK](https://vk.com) bot library. * [hipchat](https://github.com/andybons/hipchat) - This project implements a golang client library for the Hipchat API. * [hipchat (xmpp)](https://github.com/daneharrigan/hipchat) - A golang package to communicate with HipChat over XMPP. * [Medium](https://github.com/Medium/medium-sdk-go) - Golang SDK for Medium's OAuth2 API. * [megos](https://github.com/andygrunwald/megos) - Client library for accessing an [Apache Mesos](http://mesos.apache.org/) cluster. * [micha](https://github.com/onrik/micha) - Go Library for Telegram bot api. * [minio-go](https://github.com/minio/minio-go) - Minio Go Library for Amazon S3 compatible cloud storage. * [mixpanel](https://github.com/dukex/mixpanel) - Mixpanel is a library for tracking events and sending Mixpanel profile updates to Mixpanel from your go applications. * [patreon-go](https://github.com/mxpv/patreon-go) - Go library for Patreon API. * [paypal](https://github.com/logpacker/paypalsdk) - Wrapper for PayPal payment API. * [playlyfe](https://github.com/playlyfe/playlyfe-go-sdk) - The Playlyfe Rest API Go SDK. * [pushover](https://github.com/gregdel/pushover) - Go wrapper for the Pushover API. * [rrdaclient](https://github.com/Omie/rrdaclient) - Go Library to access statdns.com API, which is in turn RRDA API. DNS Queries over HTTP. * [shopify](https://github.com/rapito/go-shopify) - Go Library to make CRUD request to the Shopify API. * [slack](https://github.com/nlopes/slack) - Slack API in Go. * [smite](https://github.com/sergiotapia/smitego) - Go package to wraps access to the Smite game API. * [spotify](https://github.com/rapito/go-spotify) - Go Library to access Spotify WEB API. * [steam](https://github.com/sostronk/go-steam) - Go Library to interact with Steam game servers. * [stripe](https://github.com/stripe/stripe-go) - Go client for the Stripe API. * [tbot](https://github.com/yanzay/tbot) - Telegram bot server with API similar to net/http. * [telebot](https://github.com/tucnak/telebot) - Telegram bot framework written in Go. * [telegram-bot-api](https://github.com/Syfaro/telegram-bot-api) - Simple and clean Telegram bot client. * [textbelt](https://github.com/dietsche/textbelt) - Go client for the textbelt.com txt messaging API. * [TheMovieDb](https://github.com/jbrodriguez/go-tmdb) - Simple golang package to communicate with [themoviedb.org](https://themoviedb.org). * [translate](https://github.com/poorny/translate) - Go online translation package. * [Trello](https://github.com/adlio/trello) - Go wrapper for the Trello API. * [tumblr](https://github.com/mattcunningham/gumblr) - Go wrapper for the Tumblr v2 API. * [webhooks](https://github.com/go-playground/webhooks) - Webhook receiver for GitHub and Bitbucket. * [zooz](https://github.com/gojuno/go-zooz) - Go client for the Zooz API. ## Utilities *General utilities and tools to make your life easier.* * [abutil](https://github.com/bahlo/abutil) - Collection of often-used Golang helpers. * [apm](https://github.com/topfreegames/apm) - Process manager for Golang applications with an HTTP API. * [boilr](https://github.com/tmrts/boilr) - Blazingly fast CLI tool for creating projects from boilerplate templates. * [chyle](https://github.com/antham/chyle) - Changelog generator using a git repository with multiple configuration possibilities. * [circuitbreaker](https://github.com/rubyist/circuitbreaker) - Circuit Breakers in Go. * [clockwerk](http://github.com/onatm/clockwerk) - Go package to schedule periodic jobs using a simple, fluent syntax. * [command](https://github.com/txgruppi/command) - Command pattern for Go with thread safe serial and parallel dispatcher. * [coop](https://github.com/rakyll/coop) - Cheat sheet for some of the common concurrent flows in Go. * [copy-pasta](https://github.com/jutkko/copy-pasta) - Universal multi-workstation clipboard that uses S3 like backend for the storage. * [ctop](https://github.com/bcicen/ctop) - [Top-like](http://ctop.sh) interface (e.g. htop) for container metrics. * [Death](https://github.com/vrecan/death) - Managing go application shutdown with signals. * [Deepcopier](https://github.com/ulule/deepcopier) - Simple struct copying for Go. * [delve](https://github.com/derekparker/delve) - Go debugger. * [dlog](https://github.com/kirillDanshin/dlog) - Compile-time controlled logger to make your release smaller without removing debug calls. * [ergo](https://github.com/cristianoliveira/ergo) - The management of multiple local services running over different ports made easy. * [evaluator](https://github.com/nullne/evaluator) - Evaluate an expression dynamicly based on s-expression. It's simple and easy to extend. * [excelize](https://github.com/360EntSecGroup-Skylar/excelize) - Golang library for reading and writing Microsoft Excel™ (XLSX) files. * [fastlz](https://github.com/digitalcrab/fastlz) - Wrap over [FastLz](http://fastlz.org/) (free, open-source, portable real-time compression library) for GoLang. * [filetype](https://github.com/h2non/filetype) - Small package to infer the file type checking the magic numbers signature. * [filler](https://github.com/yaronsumel/filler) - small utility to fill structs using "fill" tag. * [fzf](https://github.com/junegunn/fzf) - Command-line fuzzy finder written in Go. * [generate](https://github.com/go-playground/generate) - runs go generate recursively on a specified path or environment variable and can filter by regex. * [gentleman](https://github.com/h2non/gentleman) - Full-featured plugin-driven HTTP client library. * [git-time-metric](https://github.com/git-time-metric/gtm) - Simple, seamless, lightweight time tracking for Git. * [GJSON](https://github.com/tidwall/gjson) - Get a JSON value with one line of code. * [go-astitodo](https://github.com/asticode/go-astitodo) - Parse TODOs in your GO code. * [go-bind-plugin](https://github.com/wendigo/go-bind-plugin) - go:generate tool for wrapping symbols exported by golang plugins (1.8 only). * [go-cron](https://github.com/rk/go-cron) - Simple Cron library for go that can execute closures or functions at varying intervals, from once a second to once a year on a specific date and time. Primarily for web applications and long running daemons. * [go-debug](https://github.com/tj/go-debug) - Conditional debug logging for Golang libraries & applications. * [go-dry](https://github.com/ungerik/go-dry) - DRY (don't repeat yourself) package for Go. * [go-funk](https://github.com/thoas/go-funk) - Modern Go utility library which provides helpers (map, find, contains, filter, chunk, reverse, ...). * [go-httpheader](https://github.com/mozillazg/go-httpheader) - Go library for encoding structs into Header fields. * [go-rate](https://github.com/beefsack/go-rate) - Timed rate limiter for Go. * [go-respond](https://github.com/nicklaw5/go-respond) - Go package for handling common HTTP JSON responses. * [go-sitemap-generator](https://github.com/ikeikeikeike/go-sitemap-generator) - XML Sitemap generator written in Go. * [go-torch](https://github.com/uber/go-torch) - Stochastic flame graph profiler for Go programs. * [go-trigger](https://github.com/sadlil/go-trigger) - Go-lang global event triggerer, Register Events with an id and trigger the event from anywhere from your project. * [go-underscore](https://github.com/tobyhede/go-underscore) - Useful collection of helpfully functional Go collection utilities. * [goback](https://github.com/carlescere/goback) - Go simple exponential backoff package. * [godaemon](https://github.com/VividCortex/godaemon) - Utility to write daemons. * [godropbox](https://github.com/dropbox/godropbox) - Common libraries for writing Go services/applications from Dropbox. * [gohper](https://github.com/cosiner/gohper) - Various tools/modules help for development. * [gojq](https://github.com/elgs/gojq) - JSON query in Golang. * [gojson](https://github.com/ChimeraCoder/gojson) - Automatically generate Go (golang) struct definitions from example JSON. * [golarm](https://github.com/msempere/golarm) - Fire alarms with system events. * [golog](https://github.com/mlimaloureiro/golog) - Easy and lightweight CLI tool to time track your tasks. * [gopencils](https://github.com/bndr/gopencils) - Small and simple package to easily consume REST APIs. * [goplaceholder](https://github.com/michiwend/goplaceholder) - a small golang lib to generate placeholder images. * [goreleaser](https://github.com/goreleaser/goreleaser) - Deliver Go binaries as fast and easily as possible. * [goreporter](https://github.com/wgliang/goreporter) - Golang tool that does static analysis, unit testing, code review and generate code quality report. * [goreq](https://github.com/franela/goreq) - Minimal and simple request library for Go language. * [goreq](https://github.com/smallnest/goreq) - Enhanced simplified HTTP client based on gorequest. * [gorequest](https://github.com/parnurzeal/gorequest) - Simplified HTTP client with rich features for Go. * [goseaweedfs](https://github.com/linxGnu/goseaweedfs) - SeaweedFS client library with almost full features. * [gotenv](https://github.com/subosito/gotenv) - Load environment variables from .env or any io.Reader in Go. * [goxlsxwriter](https://github.com/fterrag/goxlsxwriter) - Golang bindings for libxlsxwriter for writing XLSX (Microsoft Excel) files. * [gpath](https://github.com/tenntenn/gpath) - Library to simplify access struct fields with Go's expression in reflection. * [grequests](https://github.com/levigross/grequests) - Elegant and simple net/http wrapper that follows Python's requests library. * [gron](https://github.com/roylee0704/gron) - Define time-based tasks using a simple Go API and Gron’s scheduler will run them accordingly. * [htcat](https://github.com/htcat/htcat) - Parallel and Pipelined HTTP GET Utility. * [httpcontrol](https://github.com/facebookgo/httpcontrol) - Package httpcontrol allows for HTTP transport level control around timeouts and retries. * [hub](https://github.com/github/hub) - wrap git commands with additional functionality to interact with github from the terminal. * [hystrix-go](https://github.com/afex/hystrix-go) - Implements Hystrix patterns of programmer-defined fallbacks aka circuit breaker. * [immortal](https://github.com/immortal/immortal) - *nix cross-platform (OS agnostic) supervisor. * [intrinsic](https://github.com/mengzhuo/intrinsic) - Use x86 SIMD without writing any assembly code. * [JobRunner](https://github.com/bamzi/jobrunner) - Smart and featureful cron job scheduler with job queuing and live monitoring built in. * [jsonapi-errors](https://github.com/AmuzaTkts/jsonapi-errors) - Go bindings based on the JSON API errors reference. * [jsonf](https://github.com/miolini/jsonf) - Console tool for highlighted formatting and struct query fetching JSON. * [jsongo](https://github.com/ricardolonga/jsongo) - Fluent API to make it easier to create Json objects. * [jsonhal](https://github.com/RichardKnop/jsonhal) - Simple Go package to make custom structs marshal into HAL compatible JSON responses. * [kazaam](https://github.com/Qntfy/kazaam) - API for arbitrary transformation of JSON documents. * [lrserver](https://github.com/jaschaephraim/lrserver) - LiveReload server for Go. * [mc](https://github.com/minio/mc) - Minio Client provides minimal tools to work with Amazon S3 compatible cloud storage and filesystems. * [mergo](https://github.com/imdario/mergo) - Helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. * [minify](https://github.com/tdewolff/minify) - Fast minifiers for HTML, CSS, JS, XML, JSON and SVG file formats. * [mmake](https://github.com/tj/mmake) - Modern Make. * [moldova](https://github.com/StabbyCutyou/moldova) - Utility for generating random data based on an input template. * [mp](https://github.com/sanbornm/mp) - Simple cli email parser. It currently takes stdin and outputs JSON. * [mssqlx](https://github.com/linxGnu/mssqlx) - Database client library, proxy for any master slave, master master structures. Lightweight and auto balancing in mind. * [multitick](https://github.com/VividCortex/multitick) - Multiplexor for aligned tickers. * [myhttp](https://github.com/inancgumus/myhttp) - Simple API to make HTTP GET requests with timeout support. * [netbug](https://github.com/e-dard/netbug) - Easy remote profiling of your services. * [ngrok](https://github.com/inconshreveable/ngrok) - Introspected tunnels to localhost. * [okrun](https://github.com/xta/okrun) - go run error steamroller. * [onecache](https://github.com/adelowo/onecache) - Caching library with support for multiple backend stores (Redis, Memcached, filesystem etc). * [panicparse](https://github.com/maruel/panicparse) - Groups similar goroutines and colorizes stack dump. * [peco](https://github.com/peco/peco) - Simplistic interactive filtering tool. * [pester](https://github.com/sethgrid/pester) - Go HTTP client calls with retries, backoff, and concurrency. * [pm](https://github.com/VividCortex/pm) - Process (i.e. goroutine) manager with an HTTP API. * [profile](https://github.com/pkg/profile) - Simple profiling support package for Go. * [rclient](https://github.com/zpatrick/rclient) - Readable, flexible, simple-to-use client for REST APIs. * [realize](https://github.com/tockins/realize) - Go build system with file watchers and live reload. Run, build and watch file changes with custom paths. * [repeat](https://github.com/ssgreg/repeat) - Go implementation of different backoff strategies useful for retrying operations and heartbeating. * [request](https://github.com/mozillazg/request) - Go HTTP Requests for Humans™. * [rerate](https://github.com/abo/rerate) - Redis-based rate counter and rate limiter for Go. * [rerun](https://github.com/ivpusic/rerun) - Recompiling and rerunning go apps when source changes. * [resty](https://github.com/go-resty/resty) - Simple HTTP and REST client for Go inspired by Ruby rest-client. * [retry](https://github.com/kamilsk/retry) - Functional mechanism based on context to perform actions repetitively until successful. * [robustly](https://github.com/VividCortex/robustly) - Runs functions resiliently, catching and restarting panics. * [scheduler](https://github.com/carlescere/scheduler) - Cronjobs scheduling made easy. * [sling](https://github.com/dghubble/sling) - Go HTTP requests builder for API clients. * [spinner](https://github.com/briandowns/spinner) - Go package to easily provide a terminal spinner with options. * [sqlx](https://github.com/jmoiron/sqlx) - provides a set of extensions on top of the excellent built-in database/sql package. * [Storm](https://github.com/asdine/storm) - Simple and powerful toolkit for BoltDB. * [structs](https://github.com/PumpkinSeed/structs) - Implement simple functions to manipulate structs. * [Task](https://github.com/go-task/task) - simple "Make" alternative. * [toolbox](https://github.com/viant/toolbox) - Slice, map, multimap, struct, function, data conversion utilities. Service router, macro evaluator, tokenizer. * [ugo](https://github.com/alxrm/ugo) - ugo is slice toolbox with concise syntax for Go. * [UNIS](https://github.com/esemplastic/unis) - Common Architecture™ for String Utilities in Go. * [usql](https://github.com/knq/usql) - usql is a universal command-line interface for SQL databases. * [util](https://github.com/shomali11/util) - Collection of useful utility functions. (strings, concurrency, manipulations, ...). * [wuzz](https://github.com/asciimoo/wuzz) - Interactive cli tool for HTTP inspection. * [xferspdy](https://github.com/monmohan/xferspdy) - Xferspdy provides binary diff and patch library in golang. * [xlsx](https://github.com/tealeg/xlsx) - Library to simplify reading the XML format used by recent version of Microsoft Excel in Go programs. ## Validation *Libraries for validation.* * [govalidator](https://github.com/asaskevich/govalidator) - Validators and sanitizers for strings, numerics, slices and structs. * [govalidator](https://github.com/thedevsaddam/govalidator) - Validate Golang request data with simple rules. Highly inspired by Laravel's request validation. * [ozzo-validation](https://github.com/go-ozzo/ozzo-validation) - Supports validation of various data types (structs, strings, maps, slices, etc.) with configurable and extensible validation rules specified in usual code constructs instead of struct tags. * [validate](https://github.com/markbates/validate) - This package provides a framework for writing validations for Go applications. * [validator](https://github.com/go-playground/validator) - Go Struct and Field validation, including Cross Field, Cross Struct, Map, Slice and Array diving. ## Version Control *Libraries for version control.* * [gh](https://github.com/rjeczalik/gh) - Scriptable server and net/http middleware for GitHub Webhooks. * [git2go](https://github.com/libgit2/git2go) - Go bindings for libgit2. * [go-vcs](https://github.com/sourcegraph/go-vcs) - manipulate and inspect VCS repositories in Go. * [hgo](https://github.com/beyang/hgo) - Hgo is a collection of Go packages providing read-access to local Mercurial repositories. ## Video *Libraries for manipulating video.* * [gmf](https://github.com/3d0c/gmf) - Go bindings for FFmpeg av\* libraries. * [go-astisub](https://github.com/asticode/go-astisub) - Manipulate subtitles in GO (.srt, .stl, .ttml, .webvtt, .ssa/.ass, teletext, .smi, etc.). * [go-astits](https://github.com/asticode/go-astits) - Parse and demux MPEG Transport Streams (.ts) natively in GO. * [goav](https://github.com/giorgisio/goav) - Comphrensive Go bindings for FFmpeg. * [gst](https://github.com/ziutek/gst) - Go bindings for GStreamer. * [libgosubs](https://github.com/wargarblgarbl/libgosubs) - Subtitle format support for go. Supports .srt, .ttml, and .ass. * [v4l](https://github.com/korandiz/v4l) - Video capture library for Linux, written in Go. ## Web Frameworks *Full stack web frameworks.* * [aah](https://aahframework.org) - Scalable, performant, rapid development Web framework for Go. * [Air](https://github.com/sheng/air) - Ideal RESTful web framework for Go. * [Beego](https://github.com/astaxie/beego) - beego is an open-source, high-performance web framework for the Go programming language. * [Buffalo](http://gobuffalo.io) - Bringing the productivity of Rails to Go! * [Echo](https://github.com/labstack/echo) - High performance, minimalist Go web framework. * [Fireball](https://github.com/zpatrick/fireball) - More "natural" feeling web framework. * [Florest](https://github.com/jabong/florest-core) - High-performance workflow based REST API framework. * [Gem](https://github.com/go-gem/gem) - Simple and fast web framework, friendly to REST API. * [Gin](https://github.com/gin-gonic/gin) - Gin is a web framework written in Go! It features a martini-like API with much better performance, up to 40 times faster. If you need performance and good productivity. * [Gizmo](https://github.com/NYTimes/gizmo) - Microservice toolkit used by the New York Times. * [go-json-rest](https://github.com/ant0ine/go-json-rest) - Quick and easy way to setup a RESTful JSON API. * [go-relax](https://github.com/codehack/go-relax) - Framework of pluggable components to build RESTful API's. * [go-rest](https://github.com/ungerik/go-rest) - Small and evil REST framework for Go. * [goa](https://github.com/raphael/goa) - Framework for developing microservices based on the design of Ruby's Praxis. * [Goat](https://github.com/bahlo/goat) - Minimalistic REST API server in Go. * [Golf](https://github.com/dinever/golf) - Golf is a fast, simple and lightweight micro-web framework for Go. It comes with powerful features and has no dependencies other than the Go Standard Library. * [Gondola](https://github.com/rainycape/gondola) - The web framework for writing faster sites, faster. * [gongular](https://github.com/mustafaakin/gongular) - Fast Go web framework with input mapping/validation and (DI) Dependency Injection. * [Macaron](https://github.com/go-macaron/macaron) - Macaron is a high productive and modular design web framework in Go. * [mango](https://github.com/paulbellamy/mango) - Mango is a modular web-application framework for Go, inspired by Rack, and PEP333. * [Microservice](https://github.com/claygod/microservice) - The framework for the creation of microservices, written in Golang. * [neo](https://github.com/ivpusic/neo) - Neo is minimal and fast Go Web Framework with extremely simple API. * [Resoursea](https://github.com/resoursea/api) - REST framework for quickly writing resource based services. * [REST Layer](http://rest-layer.io) - Framework to build REST/GraphQL API on top of databases with mostly configuration over code. * [Revel](https://github.com/revel/revel) - High-productivity web framework for the Go language. * [rex](https://github.com/goanywhere/rex) - Rex is a library for modular development built upon gorilla/mux, fully compatible with net/http. * [sawsij](https://github.com/jaybill/sawsij) - lightweight, open-source web framework for building high-performance, data-driven web applications. * [tango](https://github.com/lunny/tango) - Micro & pluggable web framework for Go. * [tigertonic](https://github.com/rcrowley/go-tigertonic) - Go framework for building JSON web services inspired by Dropwizard. * [traffic](https://github.com/pilu/traffic) - Sinatra inspired regexp/pattern mux and web framework for Go. * [utron](https://github.com/gernest/utron) - Lightweight MVC framework for Go(Golang). * [violetear](https://github.com/nbari/violetear) - Go HTTP router. * [YARF](https://github.com/yarf-framework/yarf) - Fast micro-framework designed to build REST APIs and web services in a fast and simple way. * [Zerver](https://github.com/cosiner/zerver) - Zerver is an expressive, modular, feature completed RESTful framework. ### Middlewares #### Actual middlewares * [CORS](https://github.com/rs/cors) - Easily add CORS capabilities to your API. * [formjson](https://github.com/rs/formjson) - Transparently handle JSON input as a standard form POST. * [Limiter](https://github.com/ulule/limiter) - Dead simple rate limit middleware for Go. * [Tollbooth](https://github.com/didip/tollbooth) - Rate limit HTTP request handler. * [XFF](https://github.com/sebest/xff) - Handle X-Forwarded-For header and friends. #### Libraries for creating HTTP middlewares * [alice](https://github.com/justinas/alice) - Painless middleware chaining for Go. * [catena](https://github.com/codemodus/catena) - http.Handler wrapper catenation (same API as "chain"). * [chain](https://github.com/codemodus/chain) - Handler wrapper chaining with scoped data (net/context-based "middleware"). * [go-wrap](https://github.com/go-on/wrap) - Small middlewares package for net/http. * [gores](https://github.com/alioygur/gores) - Go package that handles HTML, JSON, XML and etc. responses. Useful for RESTful APIs. * [interpose](https://github.com/carbocation/interpose) - Minimalist net/http middleware for golang. * [muxchain](https://github.com/stephens2424/muxchain) - Lightweight middleware for net/http. * [negroni](https://github.com/urfave/negroni) - Idiomatic HTTP middleware for Golang. * [render](https://github.com/unrolled/render) - Go package for easily rendering JSON, XML, and HTML template responses. * [renderer](https://github.com/thedevsaddam/renderer) - Simple, lightweight and faster response (JSON, JSONP, XML, YAML, HTML, File) rendering package for Go. * [rye](https://github.com/InVisionApp/rye) - Tiny Go middleware library (with canned Middlewares) that supports JWT, CORS, Statsd, and Go 1.7 context. * [stats](https://github.com/thoas/stats) - Go middleware that stores various information about your web application. * [Volatile](https://github.com/volatile/core) - Minimalist middleware stack promoting flexibility, good practices and clean code. ### Routers * [alien](https://github.com/gernest/alien) - Lightweight and fast http router from outer space. * [Bone](https://github.com/go-zoo/bone) - Lightning Fast HTTP Multiplexer. * [Bxog](https://github.com/claygod/Bxog) - Simple and fast HTTP router for Go. It works with routes of varying difficulty, length and nesting. And he knows how to create a URL from the received parameters. * [chi](https://github.com/go-chi/chi) - Small, fast and expressive HTTP router built on net/context. * [fasthttprouter](https://github.com/buaazp/fasthttprouter) - High performance router forked from httprouter. The first router fit for fasthttp. * [FastRouter](https://github.com/razonyang/fastrouter) - a fast, flexible HTTP router written in Go. * [gocraft/web](https://github.com/gocraft/web) - Mux and middleware package in Go. * [Goji](https://github.com/goji/goji) - Goji is a minimalistic and flexible HTTP request multiplexer with support for net/context. * [GoRouter](https://github.com/vardius/gorouter) - GoRouter is a Server/API micro framwework, HTTP request router, multiplexer, mux that provides request router with middleware supporting net/context. * [gowww/router](https://github.com/gowww/router) - Lightning fast HTTP router fully compatible with the net/http.Handler interface. * [httprouter](https://github.com/julienschmidt/httprouter) - High performance router. Use this and the standard http handlers to form a very high performance web framework. * [httptreemux](https://github.com/dimfeld/httptreemux) - High-speed, flexible tree-based HTTP router for Go. Inspiration from httprouter. * [lars](https://github.com/go-playground/lars) - Is a lightweight, fast and extensible zero allocation HTTP router for Go used to create customizable frameworks. * [medeina](https://github.com/imdario/medeina) - Medeina is a HTTP routing tree based on HttpRouter, inspired by Roda and Cuba. * [mux](https://github.com/gorilla/mux) - Powerful URL router and dispatcher for golang. * [ozzo-routing](https://github.com/go-ozzo/ozzo-routing) - An extremely fast Go (golang) HTTP router that supports regular expression route matching. Comes with full support for building RESTful APIs. * [pat](https://github.com/bmizerany/pat) - Sinatra style pattern muxer for Go’s net/http library, by the author of Sinatra. * [pure](https://github.com/go-playground/pure) - Is a lightweight HTTP router that sticks to the std "net/http" implementation. * [Siesta](https://github.com/VividCortex/siesta) - Composable framework to write middleware and handlers. * [vestigo](https://github.com/husobee/vestigo) - Performant, stand-alone, HTTP compliant URL Router for go web applications. * [xmux](https://github.com/rs/xmux) - High performance muxer based on httprouter with net/context support. * [zeus](https://github.com/daryl/zeus) - Very simple and fast HTTP router for Go. ## Windows * [d3d9](https://github.com/gonutz/d3d9) - Go bindings for Direct3D9. * [go-ole](https://github.com/go-ole/go-ole) - Win32 OLE implementation for golang. ## XML *Libraries and tools for manipulating XML.* * [go-pkg-xmlx](https://github.com/jteeuwen/go-pkg-xmlx) - Extension to the standard Go XML package. Maintains a node tree that allows forward/backwards browsing and exposes some simple single/multi-node search functions. * [XML-Comp](https://github.com/xml-comp/xml-comp) - Simple command line XML comparer that generates diffs of folders, files and tags. * [xmlwriter](https://github.com/shabbyrobe/xmlwriter) - Procedural XML generation API based on libxml2's xmlwriter module. * [xpath](https://github.com/antchfx/xpath) - XPath package for Go. * [xquery](https://github.com/antchfx/xquery) - XQuery lets you extract data from HTML/XML documents using XPath expression. # Tools *Go software and plugins.* ## Code Analysis * [apicompat](https://github.com/bradleyfalzon/apicompat) - Checks recent changes to a Go project for backwards incompatible changes. * [dupl](https://github.com/mibk/dupl) - Tool for code clone detection. * [errcheck](https://github.com/kisielk/errcheck) - Errcheck is a program for checking for unchecked errors in Go programs. * [gcvis](https://github.com/davecheney/gcvis) - Visualise Go program GC trace data in real time. * [Go Metalinter](https://github.com/alecthomas/gometalinter) - Metalinter is a tool to automatically apply all static analysis tool and report their output in normalized form. * [go-checkstyle](https://github.com/qiniu/checkstyle) - checkstyle is a style check tool like java checkstyle. This tool inspired by java checkstyle, golint. The style refered to some points in Go Code Review Comments. * [go-cleanarch](https://github.com/roblaszczak/go-cleanarch) - go-cleanarch was created to validate Clean Architecture rules, like a The Dependency Rule and interaction between packages in your Go projects. * [go-outdated](https://github.com/firstrow/go-outdated) - Console application that displays outdated packages. * [goast-viewer](https://github.com/yuroyoro/goast-viewer) - Web based Golang AST visualizer. * [GoCover.io](http://gocover.io/) - GoCover.io offers the code coverage of any golang package as a service. * [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports) - Tool to fix (add, remove) your Go imports automatically. * [GoLint](https://github.com/golang/lint) - Golint is a linter for Go source code. * [Golint online](http://go-lint.appspot.com/) - Lints online Go source files on GitHub, Bitbucket and Google Project Hosting using the golint package. * [goreturns](https://sourcegraph.com/github.com/sqs/goreturns) - Adds zero-value return statements to match the func return types. * [gosimple](https://github.com/dominikh/go-tools/tree/master/cmd/gosimple) - gosimple is a linter for Go source code that specialises on simplifying code. * [gostatus](https://github.com/shurcooL/gostatus) - Command line tool, shows the status of repositories that contain Go packages. * [interfacer](https://github.com/mvdan/interfacer) - Linter that suggests interface types. * [lint](https://github.com/surullabs/lint) - Run linters as part of go test. * [staticcheck](https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck) - staticcheck is go vet on steroids, applying a ton of static analysis checks you might be used to from tools like ReSharper for C#. * [tarp](https://github.com/verygoodsoftwarenotvirus/tarp) - tarp finds functions and methods without direct unit tests in Go source code. * [unconvert](https://github.com/mdempsky/unconvert) - Remove unnecessary type conversions from Go source. * [unused](https://github.com/dominikh/go-tools/tree/master/cmd/unused) - unused checks Go code for unused constants, variables, functions and types. * [validate](https://github.com/mccoyst/validate) - Automatically validates struct fields with tags. ## Editor Plugins * [Go plugin for JetBrains IDEs](https://plugins.jetbrains.com/plugin/9568-go) - Go plugin for JetBrains IDEs. * [go-mode](https://github.com/dominikh/go-mode.el) - Go mode for GNU/Emacs. * [go-plus](https://github.com/joefitzgerald/go-plus) - Go (Golang) Package For Atom That Adds Autocomplete, Formatting, Syntax Checking, Linting and Vetting. * [Goclipse](https://github.com/GoClipse/goclipse) - Eclipse plugin for Go. * [gocode](https://github.com/nsf/gocode) - Autocompletion daemon for the Go programming language. * [GoSublime](https://github.com/DisposaBoy/GoSublime) - Golang plugin collection for the text editor SublimeText 2 providing code completion and other IDE-like features. * [velour](https://github.com/velour/velour) - IRC client for the acme editor. * [vim-compiler-go](https://github.com/rjohnsondev/vim-compiler-go) - Vim plugin to highlight syntax errors on save. * [vim-go](https://github.com/fatih/vim-go) - Go development plugin for Vim. * [vscode-go](https://github.com/Microsoft/vscode-go) - Extension for Visual Studio Code (VS Code) which provides support for the Go language. * [Watch](https://github.com/eaburns/Watch) - Runs a command in an acme win on file changes. ## Go Generate Tools * [generic](https://github.com/usk81/generic) - flexible data type for Go. * [genny](https://github.com/cheekybits/genny) - Elegant generics for Go. * [gonerics](http://github.com/bouk/gonerics) - Idiomatic Generics in Go. * [gotests](https://github.com/cweill/gotests) - Generate Go tests from your source code. * [re2dfa](https://github.com/opennota/re2dfa) - Transform regular expressions into finite state machines and output Go source code. ## Go Tools * [colorgo](https://github.com/songgao/colorgo) - Wrapper around go command for colorized go build output. * [depth](https://github.com/KyleBanks/depth) - Visualize dependency trees of any package by analyzing imports. * [gb](https://getgb.io/) - An easy to use project based build tool for the Go programming language. * [go-callvis](https://github.com/TrueFurby/go-callvis) - Visualize call graph of your Go program using dot format. * [go-pkg-complete](https://github.com/skelterjohn/go-pkg-complete) - Bash completion for go and wgo. * [go-swagger](https://github.com/go-swagger/go-swagger) - Swagger 2.0 implementation for go. Swagger is a simple yet powerful representation of your RESTful API. * [OctoLinker](https://github.com/OctoLinker/browser-extension) - Navigate through go files efficiently with the OctoLinker browser extension for GitHub. * [richgo](https://github.com/kyoh86/richgo) - Enrich go test outputs with text decorations. * [rts](https://github.com/galeone/rts) - RTS: response to struct. Generates Go structs from server responses. ## Software Packages *Software written in Go.* ### DevOps Tools * [aptly](https://github.com/smira/aptly) - aptly is a Debian repository management tool. * [aurora](https://github.com/xuri/aurora) - Cross-platform web-based Beanstalkd queue server console. * [awsenv](https://github.com/soniah/awsenv) - Small binary that loads Amazon (AWS) environment variables for a profile. * [Banshee](https://github.com/eleme/banshee) - Anomalies detection system for periodic metrics. * [Blast](https://github.com/dave/blast) - A simple tool for API load testing and batch jobs. * [bombardier](https://github.com/codesenberg/bombardier) - Fast cross-platform HTTP benchmarking tool. * [bosun](https://github.com/bosun-monitor/bosun) - Time Series Alerting Framework. * [dogo](https://github.com/liudng/dogo) - Monitoring changes in the source file and automatically compile and run (restart). * [drone-jenkins](https://github.com/appleboy/drone-jenkins) - Trigger downstream Jenkins jobs using a binary, docker or Drone CI. * [drone-scp](https://github.com/appleboy/drone-scp) - Copy files and artifacts via SSH using a binary, docker or Drone CI. * [Dropship](https://github.com/chrismckenzie/dropship) - Tool for deploying code via cdn. * [easyssh-proxy](https://github.com/appleboy/easyssh-proxy) - Golang package for easy remote execution through SSH and SCP downloading via ProxyCommand. * [Gitea](https://github.com/go-gitea/gitea) - Fork of Gogs, entirely community driven. * [Go Metrics](https://github.com/rcrowley/go-metrics) - Go port of Coda Hale's Metrics library: https://github.com/codahale/metrics. * [go-selfupdate](https://github.com/sanbornm/go-selfupdate) - Enable your Go applications to self update. * [gobrew](https://github.com/cryptojuice/gobrew) - gobrew lets you easily switch between multiple versions of go. * [godbg](https://github.com/sirnewton01/godbg) - Web-based gdb front-end application. * [Gogs](https://gogs.io/) - A Self Hosted Git Service in the Go Programming Language. * [gonative](https://github.com/inconshreveable/gonative) - Tool which creates a build of Go that can cross compile to all platforms while still using the Cgo-enabled versions of the stdlib packages. * [govvv](https://github.com/ahmetalpbalkan/govvv) - “go build” wrapper to easily add version information into Go binaries. * [gox](https://github.com/mitchellh/gox) - Dead simple, no frills Go cross compile tool. * [goxc](https://github.com/laher/goxc) - build tool for Go, with a focus on cross-compiling and packaging. * [grapes](https://github.com/yaronsumel/grapes) - Lightweight tool designed to distribute commands over ssh with ease. * [GVM](https://github.com/moovweb/gvm) - GVM provides an interface to manage Go versions. * [Hey](https://github.com/rakyll/hey) - Hey is a tiny program that sends some load to a web application. * [kala](https://github.com/ajvb/kala) - Simplistic, modern, and performant job scheduler. * [kubernetes](https://github.com/kubernetes/kubernetes) - Container Cluster Manager from Google. * [manssh](https://github.com/xwjdsh/manssh) - manssh is a command line tool for managing your ssh alias config easily. * [Moby](https://github.com/moby/moby) - Collaborative project for the container ecosystem to assemble container-based systems. * [Mora](https://github.com/emicklei/mora) - REST server for accessing MongoDB documents and meta data. * [ostent](https://github.com/ostrost/ostent) - collects and displays system metrics and optionally relays to Graphite and/or InfluxDB. * [Packer](https://github.com/mitchellh/packer) - Packer is a tool for creating identical machine images for multiple platforms from a single source configuration. * [Pewpew](https://github.com/bengadbois/pewpew) - Flexible HTTP command line stress tester. * [Rodent](https://github.com/alouche/rodent) - Rodent helps you manage Go versions, projects and track dependencies. * [s3gof3r](https://github.com/rlmcpherson/s3gof3r) - Small utility/library optimized for high speed transfer of large objects into and out of Amazon S3. * [Scaleway-cli](https://github.com/scaleway/scaleway-cli) - Manage BareMetal Servers from Command Line (as easily as with Docker). * [sg](https://github.com/ChristopherRabotin/sg) - Benchmarks a set of HTTP endpoints (like ab), with possibility to use the reponse code and data between each call for specific server stress based on its previous response. * [skm](https://github.com/TimothyYe/skm) - SKM is a simple and powerful SSH Keys Manager, it helps you to manage your multiple SSH keys easily! * [StatusOK](https://github.com/sanathp/statusok) - Monitor your Website and REST APIs.Get Notified through Slack, E-mail when your server is down or response time is more than expected. * [traefik](https://github.com/containous/traefik) - Reverse proxy and load balancer with support for multiple backends. * [Vegeta](https://github.com/tsenart/vegeta) - HTTP load testing tool and library. It's over 9000! * [webhook](https://github.com/adnanh/webhook) - Tool which allows user to create HTTP endpoints (hooks) that execute commands on the server. * [Wide](https://wide.b3log.org/login) - Web-based IDE for Teams using Golang. * [winrm-cli](https://github.com/masterzen/winrm-cli) - Cli tool to remotely execute commands on Windows machines. ### Other Software * [borg](https://github.com/crufter/borg) - Terminal based search engine for bash snippets. * [boxed](https://github.com/tejo/boxed) - Dropbox based blog engine. * [Cherry](https://github.com/rafael-santiago/cherry) - Tiny webchat server in Go. * [Circuit](https://github.com/gocircuit/circuit) - Circuit is a programmable platform-as-a-service (PaaS) and/or Infrastructure-as-a-Service (IaaS), for management, discovery, synchronization and orchestration of services and hosts comprising cloud applications. * [Comcast](https://github.com/tylertreat/Comcast) - Simulate bad network connections. * [confd](https://github.com/kelseyhightower/confd) - Manage local application configuration files using templates and data from etcd or consul. * [DDNS](https://github.com/skibish/ddns) - Personal DDNS client with Digital Ocean Networking DNS as backend. * [Docker](http://www.docker.com/) - Open platform for distributed applications for developers and sysadmins. * [Documize](https://github.com/documize/community) - Modern wiki software that integrates data from SaaS tools. * [fleet](https://github.com/coreos/fleet) - Distributed init System. * [Go Package Store](https://github.com/shurcooL/Go-Package-Store) - App that displays updates for the Go packages in your GOPATH. * [gocc](https://github.com/goccmack/gocc) - Gocc is a compiler kit for Go written in Go. * [GoDNS](https://github.com/timothyye/godns) - A dynamic DNS client tool, supports DNSPod & HE.net, written in Go. * [GoDocTooltip](https://github.com/diankong/GoDocTooltip) - Chrome extension for Go Doc sites, which shows function description as tooltip at funciton list. * [GoLand](https://jetbrains.com/go) - Full featured cross-platform Go IDE. * [Gor](https://github.com/buger/gor) - Http traffic replication tool, for replaying traffic from production to stage/dev environments in real-time. * [hugo](http://gohugo.io/) - Fast and Modern Static Website Engine. * [ide](https://github.com/thestrukture/ide) - Browser accessible IDE. Designed for Go with Go. * [ipe](https://github.com/dimiro1/ipe) - Open source Pusher server implementation compatible with Pusher client libraries written in GO. * [JayDiff](https://github.com/yazgazan/jaydiff) - JSON diff utility written in Go. * [Juju](https://jujucharms.com/) - Cloud-agnostic service deployment and orchestration - supports EC2, Azure, Openstack, MAAS and more. * [Leaps](https://github.com/jeffail/leaps) - Pair programming service using Operational Transforms. * [limetext](http://limetext.org/) - Lime Text is a powerful and elegant text editor primarily developed in Go that aims to be a Free and open-source software successor to Sublime Text. * [LiteIDE](https://github.com/visualfc/liteide) - LiteIDE is a simple, open source, cross-platform Go IDE. * [mockingjay](https://github.com/quii/mockingjay-server) - Fake HTTP servers and consumer driven contracts from one configuration file. You can also make the server randomly misbehave to help do more realistic performance tests. * [myLG](https://github.com/mehrdadrad/mylg) - Command Line Network Diagnostic tool written in Go. * [naclpipe](https://github.com/unix4fun/naclpipe) - Simple NaCL EC25519 based crypto pipe tool written in Go. * [nes](https://github.com/fogleman/nes) - Nintendo Entertainment System (NES) emulator written in Go. * [orange-cat](https://github.com/noraesae/orange-cat) - Markdown previewer written in Go. * [Orbit](https://github.com/gulien/orbit) - A simple tool for running commands and generating files from templates. * [peg](https://github.com/pointlander/peg) - Peg, Parsing Expression Grammar, is an implementation of a Packrat parser generator. * [Postman](https://github.com/zachlatta/postman) - Command-line utility for batch-sending email. * [restic](https://github.com/restic/restic) - De-duplicating backup program. * [rkt](https://github.com/coreos/rkt) - App Container runtime that integrates with init systems, is compatible with other container formats like Docker, and supports alternative execution engines like KVM. * [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Fast, Simple and Scalable Distributed File System with O(1) disk seek. * [shell2http](https://github.com/msoap/shell2http) - Executing shell commands via http server (for prototyping or remote control). * [snap](https://github.com/intelsdi-x/snap) - Powerful telemetry framework. * [Snitch](https://github.com/lucasgomide/snitch) - Simple way to notify your team and many tools when someone has deployed any application via Tsuru. * [Stack Up](https://github.com/pressly/sup) - Stack Up, a super simple deployment tool - just Unix - think of it like 'make' for a network of servers. * [syncthing](https://syncthing.net/) - Open, decentralized file synchronization tool and protocol. * [Tenyks](https://github.com/kyleterry/tenyks) - Service oriented IRC bot using Redis and JSON for messaging. * [toto](https://github.com/blogcin/ToTo) - Simple proxy server written in Go language, can be used together with browser. * [toxiproxy](https://github.com/shopify/toxiproxy) - Proxy to simulate network and system conditions for automated tests. * [tsuru](https://tsuru.io/) - Extensible and open source Platform as a Service software. * [vFlow](https://github.com/VerizonDigital/vflow) - High-performance, scalable and reliable IPFIX, sFlow and Netflow collector. * [websysd](https://github.com/ian-kent/websysd) - Web based process manager (like Marathon or Upstart). * [wellington](https://github.com/wellington/wellington) - Sass project management tool, extends the language with sprite functions (like Compass). # Resources *Where to discover new Go libraries.* ## Benchmarks * [autobench](https://github.com/davecheney/autobench) - Framework to compare the performance between different Go versions. * [go-benchmark-app](https://github.com/mrLSD/go-benchmark-app) - Powerful HTTP-benchmark tool mixed with Аb, Wrk, Siege tools. Gathering statistics and various parameters for benchmarks and comparison results. * [go-benchmarks](https://github.com/tylertreat/go-benchmarks) - Few miscellaneous Go microbenchmarks. Compare some language features to alternative approaches. * [go-http-routing-benchmark](https://github.com/julienschmidt/go-http-routing-benchmark) - Go HTTP request router benchmark and comparison. * [go-type-assertion-benchmark](https://github.com/hgfischer/go-type-assertion-benchmark) - Naive performance test of two ways to do type assertion in Go. * [go-web-framework-benchmark](https://github.com/smallnest/go-web-framework-benchmark) - Go web framework benchmark. * [go_serialization_benchmarks](https://github.com/alecthomas/go_serialization_benchmarks) - Benchmarks of Go serialization methods. * [gocostmodel](https://github.com/PuerkitoBio/gocostmodel) - Benchmarks of common basic operations for the Go language. * [golang-micro-benchmarks](https://github.com/amscanne/golang-micro-benchmarks) - Tiny collection of Go micro benchmarks. The intent is to compare some language features to others. * [golang-sql-benchmark](https://github.com/tyler-smith/golang-sql-benchmark) - Collection of benchmarks for popular Go database/SQL utilities. * [gospeed](https://github.com/feyeleanor/GoSpeed) - Go micro-benchmarks for calculating the speed of language constructs. * [kvbench](https://github.com/jimrobinson/kvbench) - Key/Value database benchmark. * [skynet](https://github.com/atemerev/skynet) - Skynet 1M threads microbenchmark. * [speedtest-resize](https://github.com/fawick/speedtest-resize) - Compare various Image resize algorithms for the Go language. ## Conferences * [Capital Go](http://www.capitalgolang.com) - Washington, D.C., USA * [dotGo](http://www.dotgo.eu) - Paris, France * [GoCon](http://gocon.connpass.com/) - Tokyo, Japan * [GolangUK](http://golanguk.com/) - London, UK * [GopherChina](http://gopherchina.org) - Shanghai, China * [GopherCon](http://www.gophercon.com/) - Denver, USA * [GopherCon Brazil](https://gopherconbr.org) - Florianópolis, BR * [GopherCon Dubai](http://www.gophercon.ae/) - Dubai, UAE * [GopherCon India](http://www.gophercon.in/) - Pune, India * [GopherCon Singapore](https://gophercon.sg) - Mapletree Business City, Singapore * [GothamGo](http://gothamgo.com/) - New York City, USA ## E-Books * [A Go Developer's Notebook](https://leanpub.com/GoNotebook/read) * [An Introduction to Programming in Go](http://www.golang-book.com/) * [Build Web Application with Golang](https://www.gitbook.com/book/astaxie/build-web-application-with-golang/details) * [Building Web Apps With Go](https://www.gitbook.com/book/codegangsta/building-web-apps-with-go/details) * [Go Bootcamp](http://golangbootcamp.com) * [GoBooks](https://github.com/dariubs/GoBooks) - A curated list of Go books. * [Learning Go](https://www.miek.nl/downloads/Go/Learning-Go-latest.pdf) * [Network Programming With Go](https://jan.newmarch.name/go/) * [The Go Programming Language](http://www.gopl.io/) * [Web Application with Go the Anti-Textbook](https://github.com/thewhitetulip/web-dev-golang-anti-textbook/) ## Gophers * [Go-gopher-Vector](https://github.com/keygx/Go-gopher-Vector) - Go gopher Vector Data [.ai, .svg] * [gopher-logos](https://github.com/GolangUA/gopher-logos) - adorable gopher logos * [gopher-stickers](https://github.com/tenntenn/gopher-stickers) * [gopher-vector](https://github.com/golang-samples/gopher-vector) * [gophericons](https://github.com/shalakhin/gophericons) * [gopherize.me](https://github.com/matryer/gopherize.me) - Gopherize yourself * [gophers](https://github.com/ashleymcnamara/gophers) - Gopher artworks by <NAME> * [gophers](https://github.com/egonelbre/gophers) - Free gophers * [gophers](https://github.com/rogeralsing/gophers) - random gopher graphics ## Meetups * [Go Language NYC](https://www.meetup.com/golanguagenewyork/) * [Go London User Group](https://www.meetup.com/Go-London-User-Group/) * [Go Toronto](https://www.meetup.com/go-toronto/) * [Go User Group Atlanta](https://www.meetup.com/Go-Users-Group-Atlanta/) * [GoBridge, San Francisco, CA](https://www.meetup.com/gobridge/) * [GoJakarta](https://www.meetup.com/GoJakarta/) * [Golang Amsterdam](https://www.meetup.com/golang-amsterdam/) * [Golang Argentina](https://www.meetup.com/Golang-Argentina/) * [Golang Bangalore](https://www.meetup.com/Golang-Bangalore/) * [Golang Belo Horizonte - Brazil](https://www.meetup.com/go-belo-horizonte/) * [Golang Boston](https://www.meetup.com/bostongo/) * [Golang DC, Arlington, VA](https://www.meetup.com/Golang-DC/) * [Golang Israel](https://www.meetup.com/Go-Israel/) * [Golang Joinville - Brazil](https://www.meetup.com/Joinville-Go-Meetup/) * [Golang Lima - Peru](https://www.meetup.com/Golang-Peru/) * [Golang Lyon](https://www.meetup.com/Golang-Lyon/) * [Golang Melbourne](https://www.meetup.com/golang-mel/) * [Golang Mountain View](https://www.meetup.com/Golang-Mountain-View/) * [Golang New York](https://www.meetup.com/nycgolang/) * [Golang Paris](https://www.meetup.com/Golang-Paris/) * [Golang Pune](https://www.meetup.com/Golang-Pune/) * [Golang Singapore](https://www.meetup.com/golangsg/) * [Golang Stockholm](https://www.meetup.com/Go-Stockholm/) * [Golang São Paulo - Brazil](https://www.meetup.com/golangbr/) * [Golang Vancouver, BC](https://www.meetup.com/golangvan/) * [Golang Москва](https://www.meetup.com/Golang-Moscow/) * [Golang Питер](https://www.meetup.com/Golang-Peter/) * [Istanbul Golang](https://www.meetup.com/Istanbul-Golang/) * [Seattle Go Programmers](https://www.meetup.com/golang/) * [Ukrainian Golang User Groups](https://www.meetup.com/uagolang/) * [Utah Go User Group](https://www.meetup.com/utahgophers/) * [Women Who Go - San Francisco, CA](https://www.meetup.com/Women-Who-Go/) *Add the group of your city/country here (send **PR**)* ## Twitter * [@golang](https://twitter.com/golang) * [@golang_news](https://twitter.com/golang_news) * [@golangflow](https://twitter.com/golangflow) * [@golangweekly](https://twitter.com/golangweekly) ## Websites * [Awesome Go @LibHunt](https://go.libhunt.com) - Your go-to Go Toolbox. * [Awesome Remote Job](https://github.com/lukasz-madon/awesome-remote-job) - Curated list of awesome remote jobs. A lot of them are looking for Go hackers. * [awesome-awesomeness](https://github.com/bayandin/awesome-awesomeness) - List of other amazingly awesome lists. * [Flipboard - Go Magazine](https://flipboard.com/section/the-golang-magazine-bVP7nS) - Collection of Go articles and tutorials. * [Go Blog](http://blog.golang.org) - The official Go blog. * [Go Challenge](http://golang-challenge.org/) - Learn Go by solving problems and getting feedback from Go experts. * [Go Forum](https://forum.golangbridge.org) - Forum to discuss Go. * [Go In 5 Minutes](https://www.goin5minutes.com/) - 5 minute screencasts focused on getting one thing done. * [Go Projects](https://github.com/golang/go/wiki/Projects) - List of projects on the Go community wiki. * [Go Report Card](https://goreportcard.com) - A report card for your Go package. * [gocryforhelp](https://github.com/ninedraft/gocryforhelp) - Collection of Go projects that needs help. Good place to start your open-source way in Go. * [godoc.org](https://godoc.org/) - Documentation for open source Go packages. * [Golang Flow](http://golangflow.io) - Post Updates, News, Packages and more. * [Golang News](https://golangnews.com) - Links and news about Go programming. * [golang-graphics](https://github.com/mholt/golang-graphics) - Collection of Go images, graphics, and art. * [golang-nuts](https://groups.google.com/forum/#!forum/golang-nuts) - Go mailing list. * [Google Plus Community](https://plus.google.com/communities/114112804251407510571) - The Google+ community for #golang enthusiasts. * [Gopher Community Chat](https://invite.slack.golangbridge.org) - Join Our New Slack Community For Gophers ([Understand how it came](https://blog.gopheracademy.com/gophers-slack-community/)). * [gowalker.org](https://gowalker.org) - Go Project API documentation. * [r/Golang](https://www.reddit.com/r/golang) - News about Go. * [Trending Go repositories on GitHub today](https://github.com/trending?l=go) - Good place to find new Go libraries. ### Tutorials * [A Tour of Go](http://tour.golang.org/) - Interactive tour of Go. * [Build web application with Golang](https://github.com/astaxie/build-web-application-with-golang) - Golang ebook intro how to build a web app with golang. * [Building Go Web Applications and Microservices Using Gin](https://semaphoreci.com/community/tutorials/building-go-web-applications-and-microservices-using-gin) - Get familiar with Gin and find out how it can help you reduce boilerplate code and build a request handling pipeline. * [Go By Example](https://gobyexample.com/) - Hands-on introduction to Go using annotated example programs. * [Go Cheat Sheet](https://github.com/a8m/go-lang-cheat-sheet) - Go's reference card. * [Go database/sql tutorial](http://go-database-sql.org/) - Introduction to database/sql. * [Golangbot](https://golangbot.com/learn-golang-series/) - Tutorials to get started with programming in Go. * [How to Use Godog for Behavior-driven Development in Go](https://semaphoreci.com/community/tutorials/how-to-use-godog-for-behavior-driven-development-in-go) - Get started with Godog — a Behavior-driven development framework for building and testing Go applications. * [Working with Go](https://github.com/mkaz/working-with-go) - Intro to go for experienced programmers.` defaultDefs = `{ "brands": { "google":["google","angular","googlecloudplatform","googlechrome", "golang", "gwtproject", "zxing", "v8"], "twitter":["twbs", "twitter", "bower", "flightjs"], "facebook": ["facebook", "facebookarchive","boltsframework"], "github":["atom", "github"], "microsoft": ["microsoft", "dotnet", "aspnet", "exceptionless", "mono", "winjs"] }, "keywords":{ "node": ["node", "nodejs"], "jquery": ["jquery", "jq", "/^jq[\\-]?/"], "grunt": ["grunt", "gruntjs"], "angular": ["angular", "angularjs", "ng", "/^ng(?!inx)\\-]?/"], "ember": ["emberjs", "ember"], "meteor": ["meteor", "meteorjs"], "gulp": ["gulp"], "express": ["express", "expressjs"], "d3": ["d3"], "polymer": ["polymer"], "ionic": ["ionic"], "seajs": ["seajs"], "yeoman": ["yeoman"], "browserify": ["browserify"], "requirejs": ["requirejs"], "underscore": ["underscore", "underscorejs"], "modernizr": ["modernizr"], "phantom": ["phantom", "phantomjs"], "metalsmith": ["metalsmith"], "bootstrap": ["bootstrap"], "django": ["django"], "bottle": ["bottlepy", "bottle"], "web2py": ["web2py"], "webpy": ["webpy"], "flask": ["flask"], "ipython": ["ipython"], "fabric": ["fabric"], "celery": ["celery"], "language/python": ["python", "/^py/"], "language/ruby": ["ruby"], "language/clojure": ["clojure"], "language/lisp": ["lisp"], "language/rust": ["rust"], "language/erlang": ["erlang"], "language/go": ["golang", "go"], "language/javascript": ["javascript", "js"], "language/clojure": ["coffeescript"], "language/php": ["php"], "language/perl": ["perl"], "language/swift": ["swift"], "language/css": ["css", "stylesheet"], "ios": ["ios"], "osx": ["osx"], "unix": ["unix"], "android": ["android"], "linux": ["linux"], "windows": ["windows"], "deprecated": ["deprecated"], "pdf": ["pdf"], "polyfill": ["polyfill"], "framework": ["framework"], "dropbox": ["dropbox"], "webkit": ["webkit"], "sql": ["sql"], "svg": ["svg"], "boilerplate": ["boilerplate", "seed"], "rails": ["rails", "rails3"], "vim": ["vim", "vi"], "git": ["git"], "backbone": ["backbone"], "docker": ["docker"], "emacs": ["emacs"], "redis": ["redis"], "chrome": ["chrome"], "sublime": ["sublime"], "vagrant": ["vagrant"], "wordpress": ["wordpress", "/^wp\\-/"], "youtube": ["youtube"], "apache": ["apache"], "jekyll": ["jekyll"], "puppet": ["puppet"], "sass": ["sass", "scss"], "nginx": ["nginx"], "markdown": ["markdown"], "elasticsearch": ["elasticsearch"], "chef": ["chef"], "mongodb": ["mongodb", "mongo"], "cordova": ["cordova"], "phonegap": ["phonegap"], "ansible": ["ansible"], "openshift": ["openshift"], "mysql": ["mysql"], "couchbase": ["couchbase"], "firebase": ["firebase"], "homebrew": ["homebrew"], "openstack": ["openstack"], "maven": ["maven"], "hadoop": ["hadoop"], "spark": ["spark"], "jasmine": ["jasmine"], "hubot": ["hubot"], "jruby": ["jruby"], "couchdb": ["couchdb"], "travis": ["travis"], "bash": ["bash"], "coreos": ["coreos"], "mustache": ["mustache"], "zsh": ["zsh"], "jenkins": ["jenkins"], "cassandra": ["cassandra"], "statsd": ["statsd"], "eclipse": ["eclipse"], "knockout": ["knockout"], "graphite": ["graphite"], "textmate": ["textmate"], "jed": ["jed"], "memcached": ["memcached"], "mesos": ["mesos"], "rabbitmq": ["rabbitmq"], "firefox": ["firefox", "ff"], "postgres": ["postgres", "postgresql"], "selenium": ["selenium"], "gems": ["gems", "rubygems"], "zeromq": ["zeromq", "zmq", "0mq"], "tmux": ["tmux"], "cyanogenmod": ["cyanogenmod"], "tornado": ["tornado"], "octopress": ["octopress"], "dokku": ["dokku"], "karma": ["karma"], "bitcoin": ["bitcoin"], "handlebars": ["handlebars"], "qt": ["qt"], "minecraft": ["minecraft"], "unity": ["unity"], "cocos2d": ["cocos2d"], "openssl": ["openssl"], "amqp": ["amqp"], "logstash": ["logstash"], "sqlite": ["sqlite"], "v8": ["v8"], "fuse": ["fuse"], "cocoa": ["cocoa"], "curl": ["curl"], "ffmpeg": ["ffmpeg"], "hhvm": ["hhvm"], "rake": ["rake"], "drupal": ["drupal"], "gevent": ["gevent"], "nagios": ["nagios"], "chromium": ["chromium"], "jenkinsci": ["jenkinsci"], "etcd": ["etcd"], "kubernetes": ["kubernetes"], "react": ["react", "reactjs"] } }` tree = `` )
examples/aho-corasick/data.go
0.798344
0.773259
data.go
starcoder
package render import ( geometry "basic-ray/pkg/geometry" "math" ) const BIAS = 0.00001 type Color [3]float64 type Photon struct { vector geometry.Vector rgb Color } type LightSource interface { GetPhoton(destination geometry.Point) Photon GetDistance(destination geometry.Point) float64 } type DeltaLight struct { Location geometry.Point RGB Color } type DirectionalLight struct { Direction geometry.Vector RGB Color } func (lightSource *DeltaLight) GetPhoton(destination geometry.Point) Photon { fallOff := math.Pow(lightSource.GetDistance(destination), 2) rgb := Color{ lightSource.RGB[0] / fallOff, lightSource.RGB[1] / fallOff, lightSource.RGB[2] / fallOff, } return Photon{vector: geometry.Normalize(geometry.CreateVector(destination, lightSource.Location)), rgb: rgb} } func (lightSource *DeltaLight) GetDistance(destination geometry.Point) float64 { return geometry.Distance(destination, lightSource.Location) } func (lightSource *DirectionalLight) GetPhoton(destination geometry.Point) Photon { return Photon{vector: geometry.Normalize(lightSource.Direction), rgb: lightSource.RGB} } func (lightSource *DirectionalLight) GetDistance(destination geometry.Point) float64 { return float64(math.Inf(1)) } func GetReflectiveVector(incedentVector geometry.Vector, triangle *geometry.Triangle) geometry.Vector { normalVector := triangle.GetNormal() reflectionVector := geometry.Subtract( incedentVector, geometry.ScalarProduct( geometry.ScalarProduct(normalVector, geometry.DotProduct(incedentVector, normalVector)), 2, ), ) return reflectionVector } func GetRefractedVector(incedentVector geometry.Vector, triangle *geometry.Triangle) geometry.Vector { normalVector := triangle.GetNormal() incedentVector = geometry.Normalize(incedentVector) c1 := geometry.DotProduct(normalVector, incedentVector) return geometry.Subtract(geometry.ScalarProduct(incedentVector, triangle.RefractionIndex), geometry.ScalarProduct(normalVector, (triangle.RefractionIndex*c1))) } func getDirectLightFromSingleSource(destination geometry.Point, triangles []*geometry.Triangle, lightSource LightSource) *Photon { lightDistance := lightSource.GetDistance(destination) photon := lightSource.GetPhoton(destination) ray := &geometry.Ray{Origin: destination, Vector: geometry.ScalarProduct(photon.vector, -1)} for _, triangle := range triangles { intersects := geometry.GetIntersection(ray, triangle) if intersects == nil { continue } collision := *intersects collisionDistance := geometry.Distance(destination, collision) if collisionDistance > lightDistance || collisionDistance < BIAS { continue } return nil } return &photon } func GetDirectLight(destination geometry.Point, triangles []*geometry.Triangle, lightSources []LightSource) []*Photon { photons := make([]*Photon, 0) var photon *Photon for _, lightSource := range lightSources { photon = getDirectLightFromSingleSource(destination, triangles, lightSource) if photon != nil { photons = append(photons, photon) } } return photons }
pkg/render/light.go
0.831417
0.737914
light.go
starcoder
package main import ( "image/png" "os" "github.com/adamcolton/geom/angle" "github.com/nfnt/resize" "github.com/adamcolton/geom/d3" "github.com/adamcolton/geom/d3/render/raytrace" "github.com/adamcolton/geom/d3/render/scene" "github.com/adamcolton/geom/d3/solid/mesh" ) func main() { s := &raytrace.Scene{ Depth: 3, RayMult: 4, Background: backgroundShader, Camera: scene.Camera{ Q: d3.Q{1, 0, 0, 0}, Angle: angle.Deg(30), Width: 500, Height: 500, }, } sf := s.NewFrame(3) sf.Add(getArrow(), d3.Identity(), arrowShader) sf.Add(getLight(), d3.Identity(), lightShader) sf.Add(getFloor(), d3.Identity(), floorShader) img := sf.Image() q := .75 rx := float64(s.Camera.Width) * q f, _ := os.Create("test.png") png.Encode(f, resize.Resize(uint(rx), 0, img, resize.Bilinear)) f.Close() } func getArrow() *mesh.TriangleMesh { f := []d3.Pt{ {0, 2, 10}, {1.5, 3.5, 10}, {3, 2, 10}, {2, 2, 10}, {2, 0, 10}, {1, 0, 10}, {1, 2, 10}, } f = d3.Translate(d3.V{-1.5, -1.0, 0}).T().T( d3.Rotation{ Angle: angle.Deg(90), Plane: d3.XY, }.T(), ).Pts(f) m, err := mesh.NewExtrusion(f). Extrude(d3.Translate(d3.V{0, 0, 1}).T()). Close(). TriangleMesh() if err != nil { panic(err) } return &m } func getLight() *mesh.TriangleMesh { z := -1.0 size := 0.3 return &mesh.TriangleMesh{ Pts: []d3.Pt{ {-size, -size, z}, {size, -size, z}, {size, size, z}, {-size, size, z}, }, Polygons: [][][3]uint32{ { {0, 1, 2}, {0, 2, 3}, }, }, } } func getFloor() *mesh.TriangleMesh { y := -1.5 size := 100.0 return &mesh.TriangleMesh{ Pts: []d3.Pt{ {-size, y, -size}, {size, y, -size}, {size, y, size}, {-size, y, size}, }, Polygons: [][][3]uint32{ { {0, 1, 2}, {0, 2, 3}, }, }, } } func backgroundShader(ctx *raytrace.Context) *raytrace.Material { return &raytrace.Material{ Color: &raytrace.Color{0.6, 0.6, 1.0}, Luminous: 1.0, Diffuse: angle.Deg(90), } } func arrowShader(ctx *raytrace.Context) *raytrace.Material { pt := ctx.Ray.Pt1(ctx.T) y := ((pt.Y + 1.5) / 4.0) return &raytrace.Material{ Color: &raytrace.Color{y, 0.5, 0.5}, Luminous: 0, Reflective: 0.8, Diffuse: angle.Deg(2), } } var lightMaterial = &raytrace.Material{ Color: &raytrace.Color{1.0, 1.0, 1.0}, Luminous: 1.0, Diffuse: angle.Deg(90), } func lightShader(ctx *raytrace.Context) *raytrace.Material { return lightMaterial } var ( c1 = &raytrace.Color{0.9, 0.9, 0.9} c2 = &raytrace.Color{0.1, 0.1, 0.1} ) func floorShader(ctx *raytrace.Context) *raytrace.Material { pt := ctx.Ray.Pt1(ctx.T) x, z := int(pt.X), int(pt.Z) c := c1 if (x^z)&1 == 1 { c = c2 } return &raytrace.Material{ Color: c, Reflective: 0.8, Diffuse: angle.Deg(45), } }
examples/raytrace/raytrace.go
0.631367
0.485173
raytrace.go
starcoder
package unionfind import ( "fmt" "strings" ) // Maintains sets and a number of connected elements. type UnionFind struct { sets map[interface{}]*set count int } type set struct { parent interface{} rank int } // New return an initialized UnionFind data structure. func New() *UnionFind { return &UnionFind{ sets: make(map[interface{}]*set), } } // MakeSet makes an independent set of one element. If called with multiple // arguments, an independent set for every element is made. func (u *UnionFind) MakeSet(a ...interface{}) { if len(a) == 0 { return } for _, elem := range a { if elem == nil { continue } // Skip already made sets. if _, ok := u.sets[elem]; ok { continue } u.sets[elem] = &set{parent: elem} u.count++ } } // Union merges two independent sets as one. The number of sets is decreased by 1. func (u *UnionFind) Union(x, y interface{}) { a := u.Find(x) b := u.Find(y) // Already connected. if a == b { return } // Weighting. switch { case u.sets[a].rank < u.sets[b].rank: u.sets[a].parent = b case u.sets[a].rank > u.sets[b].rank: u.sets[b].parent = a case u.sets[a].rank == u.sets[b].rank: u.sets[b].parent = a u.sets[a].rank++ } u.count-- } // Find returns the root element of the set. The root element is the same for // all elements within the same set. func (u UnionFind) Find(x interface{}) interface{} { if _, ok := u.sets[x]; !ok { panic(fmt.Sprintf("set %v hasn't been made yet with MakeSet", x)) } // The root. if u.sets[x].parent == x { return x } // Path compression. u.sets[x].parent = u.Find(u.sets[x].parent) return u.sets[x].parent } // Exists returns true if the element belongs to any set, false otherwise. func (u UnionFind) Exists(x interface{}) bool { if _, ok := u.sets[x]; ok { return true } else { return false } } // Connected returns true if the elements belong to the same set, // false otherwise. func (u UnionFind) Connected(x, y interface{}) bool { return u.Find(x) == u.Find(y) } // Count returns the number of independent sets. func (u UnionFind) Count() int { return u.count } func (u UnionFind) prepareDump() []string { m := make(map[interface{}][]interface{}) for k, v := range u.sets { parent := u.Find(v.parent) if _, ok := m[parent]; !ok { m[parent] = []interface{}{} } m[parent] = append(m[parent], k) } s := []string{} for _, e := range m { s = append(s, fmt.Sprintf("%v", e)) } return s } // String returns a text representation of a UnionFind data structure. func (u UnionFind) String() string { return strings.Join(u.prepareDump(), " ") }
unionfind.go
0.778313
0.46041
unionfind.go
starcoder
package contourmap import ( "image" "math" "sort" ) type ContourMap struct { W int // width of the contour map in pixels H int // height of the contour map in pixels Min float64 // minimum value contained in this contour map Max float64 // maximum value contained in this contour map grid []float64 } // FromFloat64s returns a new ContourMap for the provided 2D grid of values. // len(grid) must equal w * h. func FromFloat64s(w, h int, grid []float64) *ContourMap { min := math.Inf(1) max := math.Inf(-1) for _, x := range grid { if x == closed { continue } min = math.Min(min, x) max = math.Max(max, x) } return &ContourMap{w, h, min, max, grid} } // FromFloat64s returns a new ContourMap for the provided function. // The function will be called for all points x = [0, w) and y = [0, h) to // determine the Z value at each point. func FromFunction(w, h int, f Function) *ContourMap { grid := make([]float64, w*h) i := 0 for y := 0; y < h; y++ { for x := 0; x < w; x++ { grid[i] = f(x, y) i++ } } return FromFloat64s(w, h, grid) } // FromImage returns a new ContourMap for the provided image. The image is // converted to 16-bit grayscale and will have Z values mapped from // [0, 65535] to [0, 1]. func FromImage(im image.Image) *ContourMap { gray := imageToGray16(im) w := gray.Bounds().Size().X h := gray.Bounds().Size().Y grid := make([]float64, w*h) j := 0 for i := range grid { x := int(gray.Pix[j])<<8 | int(gray.Pix[j+1]) grid[i] = float64(x) / 0xffff j += 2 } return FromFloat64s(w, h, grid) } func (m *ContourMap) at(x, y int) float64 { return m.grid[y*m.W+x] } func (m *ContourMap) HistogramZs(numLevels int) []float64 { // compute histogram hist := make(map[float64]int) for _, v := range m.grid { hist[v]++ } // sort histogram keys keys := make([]float64, 0, len(hist)) for key := range hist { keys = append(keys, key) } sort.Float64s(keys) result := make([]float64, numLevels) numPixels := len(m.grid) for i := 0; i < numLevels; i++ { // compute number of pixels for this level t := (float64(i) + 0.5) / float64(numLevels) pixelCount := int(t * float64(numPixels)) // find z var total int for _, k := range keys { total += hist[k] if total >= pixelCount { result[i] = k break } } } return result } // Contours returns a list of contours that represent isolines at the specified // Z value. func (m *ContourMap) Contours(z float64) []Contour { return marchingSquares(m, m.W, m.H, z) } // Closed returns a new ContourMap that will ensure all Contours are closed // paths by following the border when they would normally stop at the edge // of the grid. func (m *ContourMap) Closed() *ContourMap { w := m.W + 2 h := m.H + 2 grid := make([]float64, w*h) for i := range grid { grid[i] = closed } for y := 0; y < m.H; y++ { i := (y+1)*w + 1 j := y * m.W copy(grid[i:], m.grid[j:j+m.W]) } return FromFloat64s(w, h, grid) }
contourmap.go
0.813238
0.518424
contourmap.go
starcoder
package gt import ( "bytes" "database/sql/driver" "fmt" ) /* Valid representations of `gt.Ter`. Other values are considered invalid and will cause panics. */ const ( TerNull Ter = 0 TerFalse Ter = 1 TerTrue Ter = 2 ) /* Shortcut: parses successfully or panics. Provided only for consistency with other types. Prefer constants such as `gt.TerNull`. */ func ParseTer(src string) (val Ter) { try(val.Parse(src)) return } /* Converts boolean to ternary: * false = gt.TerFalse * true = gt.TerTrue For inverse conversion, use `gt.Ter.LaxBool` or `gt.Ter.TryBool`. */ func BoolTer(val bool) Ter { if val { return TerTrue } return TerFalse } /* Converts boolean pointer to ternary: * nil = gt.TerNull * &false = gt.TerFalse * &true = gt.TerTrue For inverse conversion, use `gt.Ter.BoolPtr`. */ func BoolPtrTer(val *bool) Ter { if val == nil { return TerNull } return BoolTer(*val) } /* Ternary type / nullable boolean type. Similar to `*bool`, with various advantages. Has three states with the following representations: TerNull | 0 | "" in text | null in JSON | null in SQL TerFalse | 1 | "false" in text | false in JSON | false in SQL TerTrue | 2 | "true" in text | true in JSON | true in SQL Differences from `bool`: * 3 states rather than 2. * Nullable in JSON and SQL. * Zero value is empty/null rather than false. Differences from `*bool`: * More efficient: 1 byte, no heap indirection, no added GC pressure. * Safer: no nil pointer panics. * Zero value is considered empty in text. * Text encoding/decoding is reversible. Differences from `sql.NullBool`: * More efficient: 1 byte rather than 2. * Much easier to use. * Supports text. * Supports JSON. */ type Ter byte var ( _ = Encodable(Ter(0)) _ = Decodable((*Ter)(nil)) ) // Implement `gt.Zeroable`. Equivalent to `reflect.ValueOf(self).IsZero()`. func (self Ter) IsZero() bool { return self == TerNull } // Implement `gt.Nullable`. True if zero. func (self Ter) IsNull() bool { return self.IsZero() } // Implement `gt.Getter`. If zero, returns `nil`, otherwise returns `bool`. func (self Ter) Get() interface{} { if self.IsNull() { return nil } return self.LaxBool() } // Implement `gt.Setter`, using `.Scan`. Panics on error. func (self *Ter) Set(src interface{}) { try(self.Scan(src)) } // Implement `gt.Zeroer`, zeroing the receiver. func (self *Ter) Zero() { if self != nil { *self = TerNull } } /* Implement `fmt.Stringer`, using the following representations: * gt.TerNull = "" * gt.TerFalse = "false" * gt.TerTrue = "true" */ func (self Ter) String() string { switch self { case TerNull: return `` case TerFalse: return `false` case TerTrue: return `true` default: panic(self.invalid()) } } /* Implement `gt.Parser`. If the input is empty, zeroes the receiver. Otherwise expects the input to be "false" or "true". */ func (self *Ter) Parse(src string) (err error) { defer errParse(&err, src, `ternary`) switch src { case ``: *self = TerNull return nil case `false`: *self = TerFalse return nil case `true`: *self = TerTrue return nil default: return fmt.Errorf(`[gt] failed to parse ternary: expected empty string, "false", or "true", got %q`, src) } } // Implement `gt.Appender`, using the same representation as `.String`. func (self Ter) Append(buf []byte) []byte { return append(buf, self.String()...) } /* Implement `encoding.TextMarhaler`. If zero, returns nil. Otherwise returns the same representation as `.String`. */ func (self Ter) MarshalText() ([]byte, error) { if self.IsNull() { return nil, nil } return self.Append(nil), nil } // Implement `encoding.TextUnmarshaler`, using the same algorithm as `.Parse`. func (self *Ter) UnmarshalText(src []byte) error { return self.Parse(bytesString(src)) } /* Implement `json.Marshaler`, using the following representations: * gt.TerNull = []byte("null") * gt.TerFalse = []byte("false") * gt.TerTrue = []byte("true") */ func (self Ter) MarshalJSON() ([]byte, error) { switch self { case TerNull: return bytesNull, nil case TerFalse: return bytesFalse, nil case TerTrue: return bytesTrue, nil default: return nil, self.invalid() } } /* Implement `json.Unmarshaler`, using the following representations: * []byte(nil) = gt.TerNull * []byte("") = gt.TerNull * []byte("null") = gt.TerNull * []byte("false") = gt.TerFalse * []byte("true") = gt.TerTrue */ func (self *Ter) UnmarshalJSON(src []byte) error { if bytes.Equal(src, bytesNull) { self.Zero() return nil } return self.UnmarshalText(src) } // Implement `driver.Valuer`, using `.Get`. func (self Ter) Value() (driver.Value, error) { return self.Get(), nil } /* Implement `sql.Scanner`, converting an arbitrary input to `gt.Ter` and modifying the receiver. Acceptable inputs: * `nil` -> use `.Zero` * `string` -> use `.Parse` * `[]byte` -> use `.UnmarshalText` * `bool` -> use `.SetBool` * `*bool` -> use `.SetBoolPtr` * `Ter` -> assign * `gt.Getter` -> scan underlying value */ func (self *Ter) Scan(src interface{}) error { switch src := src.(type) { case nil: self.Zero() return nil case string: return self.Parse(src) case []byte: return self.UnmarshalText(src) case bool: self.SetBool(src) return nil case *bool: self.SetBoolPtr(src) return nil case Ter: *self = src return nil default: val, ok := get(src) if ok { return self.Scan(val) } return errScanType(self, src) } } func (self Ter) invalid() error { return fmt.Errorf(`[gt] unrecognized value of %[1]T: %[1]v`, self) } // Sets the receiver to the result of `gt.BoolTer`. func (self *Ter) SetBool(val bool) { *self = BoolTer(val) } // Sets the receiver to the result of `gt.BoolPtrTer`. func (self *Ter) SetBoolPtr(val *bool) { *self = BoolPtrTer(val) } /* Semi-inverse of `gt.BoolTer`. Permissive conversion, where anything untrue is considered false. Equivalent to `.IsTrue()`. */ func (self Ter) LaxBool() bool { return self.IsTrue() } /* Exact inverse of `gt.BoolTer`. If true or false, converts to a boolean, otherwise panics. */ func (self Ter) TryBool() bool { switch self { case TerNull: panic(errTerNullBool) case TerFalse: return false case TerTrue: return true default: panic(self.invalid()) } } /* Inverse of `gt.BoolPtrTer`. Converts to a boolean pointer: * gt.TerNull = nil * gt.TerFalse = &false * gt.TerTrue = &true The returned values are statically allocated and must never be modified. */ func (self Ter) BoolPtr() *bool { switch self { case TerNull: return nil case TerFalse: return ptrFalse case TerTrue: return ptrTrue default: panic(self.invalid()) } } /* Exact boolean equality. If the receiver is not true or false, this returns false regardless of the input. */ func (self Ter) EqBool(val bool) bool { if val { return self.IsTrue() } return self.IsFalse() } // Same as `== gt.TerTrue`. func (self Ter) IsTrue() bool { return self == TerTrue } // Same as `== gt.TerFalse`. func (self Ter) IsFalse() bool { return self == TerFalse } // Implement `fmt.GoStringer`, returning valid Go code representing this value. func (self Ter) GoString() string { switch self { case TerNull: return `gt.TerNull` case TerFalse: return `gt.TerFalse` case TerTrue: return `gt.TerTrue` default: return fmt.Sprintf(`gt.Ter(%v)`, byte(self)) } }
gt_ter.go
0.795102
0.584004
gt_ter.go
starcoder
package plotter import ( "fmt" ) var commandSeparator string func SetCommandSeparator(separator string) { commandSeparator = separator } func CommandSeparator() string { return commandSeparator } type Mode int //go:generate stringer -type=Mode const ( Absolute Mode = iota // Absolute used as a default value here Relative ) type Pen struct { x, y int } func NewPen(x, y float64) Pen { p := Pen{} p.SetPosition(x, y) return p } func (p *Pen) Up(coordinates ...float64) string { if len(coordinates) >= 2 { if len(coordinates)%2 != 0 { coordinates = append(coordinates, 0) } p.SetPosition( coordinates[len(coordinates)-2], coordinates[len(coordinates)-1], ) } return moveCommand("PU", coordinates...) } func (p *Pen) Down(coordinates ...float64) string { if len(coordinates) >= 2 { if len(coordinates)%2 != 0 { coordinates = append(coordinates, 0) } p.SetPosition( coordinates[len(coordinates)-2], coordinates[len(coordinates)-1], ) } return moveCommand("PD", coordinates...) } func (p *Pen) Absolute(coordinates ...float64) string { if len(coordinates) >= 2 { if len(coordinates)%2 != 0 { coordinates = append(coordinates, 0) } p.SetPosition( coordinates[len(coordinates)-2], coordinates[len(coordinates)-1], ) } return moveCommand("PA", coordinates...) } func (p *Pen) Relative(coordinates ...float64) string { if len(coordinates) >= 2 { if len(coordinates)%2 != 0 { coordinates = append(coordinates, 0) } p.SetPosition( p.X()+coordinates[len(coordinates)-2], p.Y()+coordinates[len(coordinates)-1], ) } return moveCommand("PR", coordinates...) } func (p *Pen) Move(mode Mode, coordinates ...[2]float64) string { output := "PU;" + CommandSeparator() switch mode { case Absolute: output += p.Absolute(coordinates...) case Relative: output += p.Relative(coordinates...) } return output } func (p *Pen) Line(mode Mode, coordinates ...[2]float64) string { output := "PD;" + CommandSeparator() switch mode { case Absolute: output += p.Absolute(coordinates...) case Relative: output += p.Relative(coordinates...) } return output } func (p *Pen) SetX(f float64) { p.x = floatToUnit(f) } func (p *Pen) SetY(f float64) { p.y = floatToUnit(f) } func (p *Pen) SetPosition(x, y float64) { p.SetX(x) p.SetY(y) } func (p Pen) X() float64 { return unitToFloat(p.x) } func (p Pen) Y() float64 { return unitToFloat(p.y) } func (p Pen) Position() (float64, float64) { return p.X(), p.Y() } func SelectPen(i int) string { return ConstructCommand("SP", i) } func ConstructCommand(command string, args ...int) string { for i, v := range args { command += fmt.Sprint(v) if i < len(args)-1 { command += "," } } command += ";" + commandSeparator return command } func moveCommand(command string, coordinates ...float64) string { unitCoordinates := floatToUnitSlice(coordinates...) s := ConstructCommand("PU", unitCoordinates...) return s }
plotter.go
0.59302
0.534855
plotter.go
starcoder
package schema import ( "reflect" "strings" "github.com/benpate/convert" "github.com/benpate/derp" "github.com/benpate/path" ) // Array represents an array data type within a JSON-Schema. type Array struct { Items Element Required bool } // Type returns the data type of this Schema func (array Array) Type() Type { return TypeArray } // Path returns sub-schemas of this array. func (array Array) Path(p path.Path) (Element, error) { if p.IsEmpty() { return array, nil } if index, _ := convert.IntOk(p.Head(), -1); index >= 0 { return array.Items.Path(p.Tail()) } return nil, derp.New(derp.CodeBadRequestError, "schema.Array.Path", "invalid array index", p) } // Default returns the default value for this schema element func (array Array) DefaultValue() interface{} { switch array.Items.Type() { case TypeAny: return make([]interface{}, 0) case TypeArray: return make([]interface{}, 0) case TypeBoolean: return make([]bool, 0) case TypeInteger: return make([]int, 0) case TypeNumber: return make([]float64, 0) case TypeObject: return make([]map[string]interface{}, 0) case TypeString: return make([]string, 0) default: return make([]interface{}, 0) } } func (array Array) Convert(value interface{}) interface{} { switch array.Items.Type() { case TypeAny: return convert.SliceOfInterface(value) case TypeArray: return convert.SliceOfInterface(value) case TypeBoolean: return convert.SliceOfBool(value) case TypeInteger: return convert.SliceOfInt(value) case TypeNumber: return convert.SliceOfFloat(value) case TypeObject: return convert.SliceOfMap(value) case TypeString: return convert.SliceOfString(value) default: return convert.SliceOfInterface(value) } } // Validate compares a generic data value using this Schema func (array Array) Validate(value interface{}) error { t := reflect.TypeOf(value) if (t.Kind() != reflect.Array) && (t.Kind() != reflect.Slice) { return ValidationError{Message: "must be an array"} } result := derp.NewCollector() v := reflect.ValueOf(value) length := v.Len() if array.Required && length == 0 { return ValidationError{Message: "field is required"} } for index := 0; index < length; index = index + 1 { item := v.Index(index).Interface() if err := array.Items.Validate(item); err != nil { result.Add(Rollup(err, convert.String(index))) } } return result.Error() } // MarshalMap populates object data into a map[string]interface{} func (array Array) MarshalMap() map[string]interface{} { return map[string]interface{}{ "type": array.Type(), "items": array.Items.MarshalMap(), } } // UnmarshalMap tries to populate this object using data from a map[string]interface{} func (array *Array) UnmarshalMap(data map[string]interface{}) error { var err error if convert.String(data["type"]) != "array" { return derp.New(500, "schema.Array.UnmarshalMap", "Data is not type 'array'", data) } array.Items, err = UnmarshalMap(data["items"]) array.Required = convert.Bool(data["required"]) return err } func (array Array) MarshalJavascript(b *strings.Builder) { }
array.go
0.78469
0.520496
array.go
starcoder
package bn256 import ( "crypto/rand" "errors" "io" "math/big" ) func randomK(r io.Reader) (k *big.Int, err error) { for { k, err = rand.Int(r, Order) if k.Sign() > 0 || err != nil { return } } } // G1 is an abstract cyclic group. The zero value is suitable for use as the // output of an operation, but cannot be used as an input. type G1 struct { P *curvePoint } // RandomG1 returns X and g₁ˣ where X is a random, non-zero number read from r. func RandomG1(r io.Reader) (*big.Int, *G1, error) { k, err := randomK(r) if err != nil { return nil, nil, err } return k, new(G1).ScalarBaseMult(k), nil } func (g *G1) String() string { return "bn256.G1" + g.P.String() } // ScalarBaseMult sets e to g*k where g is the generator of the group and then // returns e. func (e *G1) ScalarBaseMult(k *big.Int) *G1 { if e.P == nil { e.P = &curvePoint{} } e.P.Mul(curveGen, k) return e } // ScalarMult sets e to a*k and then returns e. func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 { if e.P == nil { e.P = &curvePoint{} } e.P.Mul(a.P, k) return e } // Add sets e to a+b and then returns e. func (e *G1) Add(a, b *G1) *G1 { if e.P == nil { e.P = &curvePoint{} } e.P.Add(a.P, b.P) return e } // Neg sets e to -a and then returns e. func (e *G1) Neg(a *G1) *G1 { if e.P == nil { e.P = &curvePoint{} } e.P.Neg(a.P) return e } // Set sets e to a and then returns e. func (e *G1) Set(a *G1) *G1 { if e.P == nil { e.P = &curvePoint{} } e.P.Set(a.P) return e } // Marshal converts e to a byte slice. func (e *G1) Marshal() []byte { // Each value is a 256-bit number. const numBytes = 256 / 8 e.P.MakeAffine() ret := make([]byte, numBytes*2) if e.P.IsInfinity() { return ret } temp := &gfP{} montDecode(temp, &e.P.X) temp.Marshal(ret) montDecode(temp, &e.P.Y) temp.Marshal(ret[numBytes:]) return ret } // Unmarshal sets e to the result of converting the output of Marshal back into // a group element and then returns e. func (e *G1) Unmarshal(m []byte) ([]byte, error) { // Each value is a 256-bit number. const numBytes = 256 / 8 if len(m) < 2*numBytes { return nil, errors.New("bn256: not enough data") } // Unmarshal the points and check their caps if e.P == nil { e.P = &curvePoint{} } else { e.P.X, e.P.Y = gfP{0}, gfP{0} } var err error if err = e.P.X.Unmarshal(m); err != nil { return nil, err } if err = e.P.Y.Unmarshal(m[numBytes:]); err != nil { return nil, err } // Encode into Montgomery form and ensure it's on the curve montEncode(&e.P.X, &e.P.X) montEncode(&e.P.Y, &e.P.Y) zero := gfP{0} if e.P.X == zero && e.P.Y == zero { // This is the point at infinity. e.P.Y = *newGFp(1) e.P.Z = gfP{0} e.P.T = gfP{0} } else { e.P.Z = *newGFp(1) e.P.T = *newGFp(1) if !e.P.IsOnCurve() { return nil, errors.New("bn256: malformed point") } } return m[2*numBytes:], nil } // G2 is an abstract cyclic group. The zero value is suitable for use as the // output of an operation, but cannot be used as an input. type G2 struct { P *twistPoint } // RandomG2 returns X and g₂ˣ where X is a random, non-zero number read from r. func RandomG2(r io.Reader) (*big.Int, *G2, error) { k, err := randomK(r) if err != nil { return nil, nil, err } return k, new(G2).ScalarBaseMult(k), nil } func (e *G2) String() string { return "bn256.G2" + e.P.String() } // ScalarBaseMult sets e to g*k where g is the generator of the group and then // returns out. func (e *G2) ScalarBaseMult(k *big.Int) *G2 { if e.P == nil { e.P = &twistPoint{} } e.P.Mul(twistGen, k) return e } // ScalarMult sets e to a*k and then returns e. func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 { if e.P == nil { e.P = &twistPoint{} } e.P.Mul(a.P, k) return e } // Add sets e to a+b and then returns e. func (e *G2) Add(a, b *G2) *G2 { if e.P == nil { e.P = &twistPoint{} } e.P.Add(a.P, b.P) return e } // Neg sets e to -a and then returns e. func (e *G2) Neg(a *G2) *G2 { if e.P == nil { e.P = &twistPoint{} } e.P.Neg(a.P) return e } // Set sets e to a and then returns e. func (e *G2) Set(a *G2) *G2 { if e.P == nil { e.P = &twistPoint{} } e.P.Set(a.P) return e } // Marshal converts e into a byte slice. func (e *G2) Marshal() []byte { // Each value is a 256-bit number. const numBytes = 256 / 8 if e.P == nil { e.P = &twistPoint{} } e.P.MakeAffine() ret := make([]byte, numBytes*4) if e.P.IsInfinity() { return ret } temp := &gfP{} montDecode(temp, &e.P.X.X) temp.Marshal(ret) montDecode(temp, &e.P.X.Y) temp.Marshal(ret[numBytes:]) montDecode(temp, &e.P.Y.X) temp.Marshal(ret[2*numBytes:]) montDecode(temp, &e.P.Y.Y) temp.Marshal(ret[3*numBytes:]) return ret } // Unmarshal sets e to the result of converting the output of Marshal back into // a group element and then returns e. func (e *G2) Unmarshal(m []byte) ([]byte, error) { // Each value is a 256-bit number. const numBytes = 256 / 8 if len(m) < 4*numBytes { return nil, errors.New("bn256: not enough data") } // Unmarshal the points and check their caps if e.P == nil { e.P = &twistPoint{} } var err error if err = e.P.X.X.Unmarshal(m); err != nil { return nil, err } if err = e.P.X.Y.Unmarshal(m[numBytes:]); err != nil { return nil, err } if err = e.P.Y.X.Unmarshal(m[2*numBytes:]); err != nil { return nil, err } if err = e.P.Y.Y.Unmarshal(m[3*numBytes:]); err != nil { return nil, err } // Encode into Montgomery form and ensure it's on the curve montEncode(&e.P.X.X, &e.P.X.X) montEncode(&e.P.X.Y, &e.P.X.Y) montEncode(&e.P.Y.X, &e.P.Y.X) montEncode(&e.P.Y.Y, &e.P.Y.Y) if e.P.X.IsZero() && e.P.Y.IsZero() { // This is the point at infinity. e.P.Y.SetOne() e.P.Z.SetZero() e.P.T.SetZero() } else { e.P.Z.SetOne() e.P.T.SetOne() if !e.P.IsOnCurve() { return nil, errors.New("bn256: malformed point") } } return m[4*numBytes:], nil } // GT is an abstract cyclic group. The zero value is suitable for use as the // output of an operation, but cannot be used as an input. type GT struct { P *gfP12 } // Pair calculates an Optimal Ate pairing. func Pair(g1 *G1, g2 *G2) *GT { return &GT{optimalAte(g2.P, g1.P)} } // PairingCheck calculates the Optimal Ate pairing for a set of points. func PairingCheck(a []*G1, b []*G2) bool { acc := new(gfP12) acc.SetOne() for i := 0; i < len(a); i++ { if a[i].P.IsInfinity() || b[i].P.IsInfinity() { continue } acc.Mul(acc, miller(b[i].P, a[i].P)) } return finalExponentiation(acc).IsOne() } // Miller applies Miller's algorithm, which is a bilinear function from the // source groups to F_p^12. Miller(g1, g2).Finalize() is equivalent to Pair(g1, // g2). func Miller(g1 *G1, g2 *G2) *GT { return &GT{miller(g2.P, g1.P)} } func (g *GT) String() string { return "bn256.GT" + g.P.String() } // ScalarMult sets e to a*k and then returns e. func (e *GT) ScalarMult(a *GT, k *big.Int) *GT { if e.P == nil { e.P = &gfP12{} } e.P.Exp(a.P, k) return e } // Add sets e to a+b and then returns e. func (e *GT) Add(a, b *GT) *GT { if e.P == nil { e.P = &gfP12{} } e.P.Mul(a.P, b.P) return e } // Neg sets e to -a and then returns e. func (e *GT) Neg(a *GT) *GT { if e.P == nil { e.P = &gfP12{} } e.P.Conjugate(a.P) return e } // Set sets e to a and then returns e. func (e *GT) Set(a *GT) *GT { if e.P == nil { e.P = &gfP12{} } e.P.Set(a.P) return e } // Finalize is a linear function from F_p^12 to GT. func (e *GT) Finalize() *GT { ret := finalExponentiation(e.P) e.P.Set(ret) return e } // Marshal converts e into a byte slice. func (e *GT) Marshal() []byte { // Each value is a 256-bit number. const numBytes = 256 / 8 ret := make([]byte, numBytes*12) temp := &gfP{} montDecode(temp, &e.P.X.X.X) temp.Marshal(ret) montDecode(temp, &e.P.X.X.Y) temp.Marshal(ret[numBytes:]) montDecode(temp, &e.P.X.Y.X) temp.Marshal(ret[2*numBytes:]) montDecode(temp, &e.P.X.Y.Y) temp.Marshal(ret[3*numBytes:]) montDecode(temp, &e.P.X.Z.X) temp.Marshal(ret[4*numBytes:]) montDecode(temp, &e.P.X.Z.Y) temp.Marshal(ret[5*numBytes:]) montDecode(temp, &e.P.Y.X.X) temp.Marshal(ret[6*numBytes:]) montDecode(temp, &e.P.Y.X.Y) temp.Marshal(ret[7*numBytes:]) montDecode(temp, &e.P.Y.Y.X) temp.Marshal(ret[8*numBytes:]) montDecode(temp, &e.P.Y.Y.Y) temp.Marshal(ret[9*numBytes:]) montDecode(temp, &e.P.Y.Z.X) temp.Marshal(ret[10*numBytes:]) montDecode(temp, &e.P.Y.Z.Y) temp.Marshal(ret[11*numBytes:]) return ret } // Unmarshal sets e to the result of converting the output of Marshal back into // a group element and then returns e. func (e *GT) Unmarshal(m []byte) ([]byte, error) { // Each value is a 256-bit number. const numBytes = 256 / 8 if len(m) < 12*numBytes { return nil, errors.New("bn256: not enough data") } if e.P == nil { e.P = &gfP12{} } var err error if err = e.P.X.X.X.Unmarshal(m); err != nil { return nil, err } if err = e.P.X.X.Y.Unmarshal(m[numBytes:]); err != nil { return nil, err } if err = e.P.X.Y.X.Unmarshal(m[2*numBytes:]); err != nil { return nil, err } if err = e.P.X.Y.Y.Unmarshal(m[3*numBytes:]); err != nil { return nil, err } if err = e.P.X.Z.X.Unmarshal(m[4*numBytes:]); err != nil { return nil, err } if err = e.P.X.Z.Y.Unmarshal(m[5*numBytes:]); err != nil { return nil, err } if err = e.P.Y.X.X.Unmarshal(m[6*numBytes:]); err != nil { return nil, err } if err = e.P.Y.X.Y.Unmarshal(m[7*numBytes:]); err != nil { return nil, err } if err = e.P.Y.Y.X.Unmarshal(m[8*numBytes:]); err != nil { return nil, err } if err = e.P.Y.Y.Y.Unmarshal(m[9*numBytes:]); err != nil { return nil, err } if err = e.P.Y.Z.X.Unmarshal(m[10*numBytes:]); err != nil { return nil, err } if err = e.P.Y.Z.Y.Unmarshal(m[11*numBytes:]); err != nil { return nil, err } montEncode(&e.P.X.X.X, &e.P.X.X.X) montEncode(&e.P.X.X.Y, &e.P.X.X.Y) montEncode(&e.P.X.Y.X, &e.P.X.Y.X) montEncode(&e.P.X.Y.Y, &e.P.X.Y.Y) montEncode(&e.P.X.Z.X, &e.P.X.Z.X) montEncode(&e.P.X.Z.Y, &e.P.X.Z.Y) montEncode(&e.P.Y.X.X, &e.P.Y.X.X) montEncode(&e.P.Y.X.Y, &e.P.Y.X.Y) montEncode(&e.P.Y.Y.X, &e.P.Y.Y.X) montEncode(&e.P.Y.Y.Y, &e.P.Y.Y.Y) montEncode(&e.P.Y.Z.X, &e.P.Y.Z.X) montEncode(&e.P.Y.Z.Y, &e.P.Y.Z.Y) return m[12*numBytes:], nil }
bn256.go
0.720958
0.452899
bn256.go
starcoder
package buffer import ( "errors" "math" "github.com/tidwall/geojson" "github.com/tidwall/geojson/geo" "github.com/tidwall/geojson/geometry" "github.com/tidwall/gjson" ) // TODO: detect of pole and antimeridian crossing and generate // valid multigeometries const bufferSteps = 15 // Simple performs a very simple buffer operation on a geojson object. func Simple(g geojson.Object, meters float64) (geojson.Object, error) { if meters <= 0 { return g, nil } if math.IsInf(meters, 0) || math.IsNaN(meters) { return g, errors.New("invalid meters") } switch g := g.(type) { case *geojson.Point: return bufferSimplePoint(g.Base(), meters), nil case *geojson.SimplePoint: return bufferSimplePoint(g.Base(), meters), nil case *geojson.MultiPoint: return bufferSimpleGeometries(g.Base(), meters) case *geojson.LineString: return bufferSimpleLineString(g, meters) case *geojson.MultiLineString: return bufferSimpleGeometries(g.Base(), meters) case *geojson.Polygon: return bufferSimplePolygon(g, meters) case *geojson.MultiPolygon: return bufferSimpleGeometries(g.Base(), meters) case *geojson.FeatureCollection: return bufferSimpleFeatures(g.Base(), meters) case *geojson.Feature: bg, err := Simple(g.Base(), meters) if err != nil { return nil, err } return geojson.NewFeature(bg, g.Members()), nil case *geojson.Circle: return Simple(g.Primative(), meters) case nil: return nil, errors.New("cannot buffer nil object") default: typ := gjson.Get(g.JSON(), "type").String() return nil, errors.New("cannot buffer " + typ + " type") } } func bufferSimplePoint(p geometry.Point, meters float64) *geojson.Polygon { meters = geo.NormalizeDistance(meters) points := make([]geometry.Point, 0, bufferSteps+1) // calc the four corners maxY, _ := geo.DestinationPoint(p.Y, p.X, meters, 0) _, maxX := geo.DestinationPoint(p.Y, p.X, meters, 90) minY, _ := geo.DestinationPoint(p.Y, p.X, meters, 180) _, minX := geo.DestinationPoint(p.Y, p.X, meters, 270) // use the half width of the lat and lon lons := (maxX - minX) / 2 lats := (maxY - minY) / 2 // generate the circle polygon for th := 0.0; th <= 360.0; th += 360.0 / float64(bufferSteps) { radians := (math.Pi / 180) * th x := p.X + lons*math.Cos(radians) y := p.Y + lats*math.Sin(radians) points = append(points, geometry.Point{X: x, Y: y}) } // add last connecting point, make a total of steps+1 points = append(points, points[0]) poly := geojson.NewPolygon( geometry.NewPoly(points, nil, &geometry.IndexOptions{ Kind: geometry.None, }), ) return poly } func bufferSimpleGeometries(objs []geojson.Object, meters float64, ) (*geojson.GeometryCollection, error) { geoms := make([]geojson.Object, len(objs)) for i := 0; i < len(objs); i++ { g, err := Simple(objs[i], meters) if err != nil { return nil, err } geoms[i] = g } return geojson.NewGeometryCollection(geoms), nil } func bufferSimpleFeatures(objs []geojson.Object, meters float64, ) (*geojson.FeatureCollection, error) { geoms := make([]geojson.Object, len(objs)) for i := 0; i < len(objs); i++ { g, err := Simple(objs[i], meters) if err != nil { return nil, err } geoms[i] = g } return geojson.NewFeatureCollection(geoms), nil } // appendBufferSimpleSeries buffers a series and appends its parts to dst func appendBufferSimpleSeries(dst []geojson.Object, s geometry.Series, meters float64) []geojson.Object { nsegs := s.NumSegments() for i := 0; i < nsegs; i++ { dst = appendSimpleBufferSegment(dst, s.SegmentAt(i), meters, i == 0) } return dst } // appendSimpleBufferSegment buffers a segment and appends its parts to dst func appendSimpleBufferSegment(dst []geojson.Object, seg geometry.Segment, meters float64, first bool, ) []geojson.Object { if first { // endcap A dst = append(dst, bufferSimplePoint(seg.A, meters)) } // line polygon bear1 := geo.BearingTo(seg.A.Y, seg.A.X, seg.B.Y, seg.B.X) lat1, lon1 := geo.DestinationPoint(seg.A.Y, seg.A.X, meters, bear1-90) lat2, lon2 := geo.DestinationPoint(seg.A.Y, seg.A.X, meters, bear1+90) bear2 := geo.BearingTo(seg.B.Y, seg.B.X, seg.A.Y, seg.A.X) lat3, lon3 := geo.DestinationPoint(seg.B.Y, seg.B.X, meters, bear2-90) lat4, lon4 := geo.DestinationPoint(seg.B.Y, seg.B.X, meters, bear2+90) dst = append(dst, geojson.NewPolygon( geometry.NewPoly([]geometry.Point{ {X: lon1, Y: lat1}, {X: lon2, Y: lat2}, {X: lon3, Y: lat3}, {X: lon4, Y: lat4}, {X: lon1, Y: lat1}, }, nil, nil))) // endcap B dst = append(dst, bufferSimplePoint(seg.B, meters)) return dst } func bufferSimplePolygon(p *geojson.Polygon, meters float64, ) (*geojson.GeometryCollection, error) { var geoms []geojson.Object b := p.Base() geoms = appendBufferSimpleSeries(geoms, b.Exterior, meters) for _, hole := range b.Holes { geoms = appendBufferSimpleSeries(geoms, hole, meters) } geoms = append(geoms, p) return geojson.NewGeometryCollection(geoms), nil } func bufferSimpleLineString(l *geojson.LineString, meters float64, ) (*geojson.GeometryCollection, error) { geoms := appendBufferSimpleSeries(nil, l.Base(), meters) return geojson.NewGeometryCollection(geoms), nil }
internal/buffer/buffer.go
0.519765
0.502563
buffer.go
starcoder
package plot import ( "github.com/df-mc/dragonfly/server/block" "github.com/df-mc/dragonfly/server/block/cube" "github.com/df-mc/dragonfly/server/world" "github.com/go-gl/mathgl/mgl64" ) // Position represents the position of a plot. These positions are similar to chunk positions, in that they // do not represent absolute coordinates, but, instead, a coordinate based on the size of plots. type Position [2]int // PosFromBlockPos returns a Position that reflects the position of the plot present at that position. func PosFromBlockPos(pos cube.Pos, settings Settings) Position { fullPlotSize := pathWidth + boundaryWidth + settings.PlotWidth // Integers are truncated down, so negative numbers will be wrong. We need to account for those. if pos[0] < 0 && mod(int32(pos[0]), int32(fullPlotSize)) != 0 { // Negative number that would be truncated, causing the value to be one higher than required. pos[0] -= fullPlotSize } if pos[2] < 0 && mod(int32(pos[2]), int32(fullPlotSize)) != 0 { // Negative number that would be truncated, causing the value to be one higher than required. pos[2] -= fullPlotSize } return Position{pos[0] / fullPlotSize, pos[2] / fullPlotSize} } // Add adds a Position to the current Position and returns a new resulting Position. func (pos Position) Add(p Position) Position { return Position{pos[0] + p[0], pos[1] + p[1]} } // Hash creates a hash of the position and returns it. This hash is unique per Position and may be used to do // lookups in databases. func (pos Position) Hash() []byte { a, b := int32(pos[0]), int32(pos[1]) return []byte{ byte(a), byte(a >> 8), byte(a >> 16), byte(a >> 24), byte(b), byte(b >> 8), byte(b >> 16), byte(b >> 24), } } // Bounds returns the bounds of the Plot present at this position. Blocks may only be edited within these // block positions. func (pos Position) Bounds(settings Settings) (min, max cube.Pos) { fullPlotSize := pathWidth + boundaryWidth + settings.PlotWidth baseX, baseZ := pos[0]*fullPlotSize, pos[1]*fullPlotSize x, z := baseX+pathWidth+1, baseZ+pathWidth+1 return cube.Pos{x, 0, z}, cube.Pos{ baseX + fullPlotSize - 2, 255, baseZ + fullPlotSize - 2, } } // Absolute returns an absolute cube.Pos that holds the corner of the plot. func (pos Position) Absolute(settings Settings) cube.Pos { fullPlotSize := pathWidth + boundaryWidth + settings.PlotWidth baseX, baseZ := pos[0]*fullPlotSize, pos[1]*fullPlotSize return cube.Pos{baseX, 0, baseZ} } // TeleportPosition returns an absolute mgl64.Vec3 that can be used for teleporting the player. func (pos Position) TeleportPosition(settings Settings) mgl64.Vec3 { return pos.Absolute(settings).Add(cube.Pos{2, RoadHeight, 2}).Vec3Middle() } // Within checks if a cube.Pos is within the minimum and maximum cube.Pos passed. func Within(pos, min, max cube.Pos) bool { return (pos[0] >= min[0] && pos[0] <= max[0]) && (pos[1] >= min[1] && pos[1] <= max[1]) && (pos[2] >= min[2] && pos[2] <= max[2]) } // Reset resets the Plot at the Position in the world.World passed. The Settings are used to determine the // bounds of the plot. func (pos Position) Reset(w *world.World, settings Settings) { base := pos.Absolute(settings).Add(cube.Pos{pathWidth + 1, 0, pathWidth + 1}) w.BuildStructure(base, &resetter{settings: settings}) } // resetter is a world.Structure implements that handles the fast resetting of chunks. type resetter struct { settings Settings } // Dimensions returns the dimensions of a plot. func (r *resetter) Dimensions() [3]int { return [3]int{ r.settings.PlotWidth, 256, r.settings.PlotWidth, } } // At returns either dirt, the floor block or air, depending on the y value. func (r *resetter) At(_, y, _ int, _ func(x int, y int, z int) world.Block) (world.Block, world.Liquid) { switch { case y < 22: return block.Dirt{}, nil case y == 22: return r.settings.FloorBlock, nil default: return block.Air{}, nil } }
plot/pos.go
0.87035
0.666765
pos.go
starcoder
package main import ( "math" "github.com/pkg/errors" "github.com/prometheus/common/model" ) // Auditor is a struct for auditing prometheus queries type Auditor struct{} // Diff stores a difference between two queries type Diff struct { Series int Diff float64 // avg proportional diff across all series & samples sampleDiffs []float64 // proportional diffs as measured by x/control } // Audit audits two prometheus queries func (a *Auditor) Audit(control, x model.Value) (Diff, error) { if x.Type() == model.ValMatrix && control.Type() == model.ValMatrix { return a.auditMatrix(x.(model.Matrix), control.(model.Matrix)) } if x.Type() == model.ValVector && control.Type() == model.ValVector { return a.auditVector(x.(model.Vector), control.(model.Vector)) } return Diff{}, errors.Errorf("unsupported types for equality: got %s & %s", control.Type().String(), x.Type().String()) } func (a *Auditor) auditMatrix(x, y model.Matrix) (diff Diff, err error) { // different # of returned series if len(x) != len(y) { return diff, errors.Errorf("different # of series: control=%d, other=%d", len(x), len(y)) } for i := 0; i < len(x); i++ { xSeries, ySeries := x[i], y[i] if !xSeries.Metric.Equal(ySeries.Metric) { return diff, errors.Errorf("mismatched metrics: %v vs %v", xSeries.Metric, ySeries.Metric) } xVals, yVals := xSeries.Values, ySeries.Values if len(xVals) != len(yVals) { return diff, errors.Errorf( "mismatched number of samples for series %v. control=%d, other=%d", xSeries.Metric, len(xVals), len(yVals), ) } for j := 0; j < len(xVals); j++ { xSample, ySample := xVals[j], yVals[j] if xSample.Timestamp != ySample.Timestamp { return diff, errors.Errorf( "mismatched timestamp for %d sample of series %v. control=%d, other=%d", j, xSeries.Metric, xSample.Timestamp, ySample.Timestamp, ) } absDiff := math.Abs(float64(ySample.Value-xSample.Value)) / math.Abs(float64(xSample.Value)) // 0/0 -> no diff if math.IsNaN(absDiff) { absDiff = 0 } diff.sampleDiffs = append(diff.sampleDiffs, absDiff) } } diff.Series = len(x) var avgDiffProportion float64 for _, d := range diff.sampleDiffs { avgDiffProportion += d } diff.Diff = avgDiffProportion / float64(len(diff.sampleDiffs)) return diff, nil } func (a *Auditor) auditVector(x, y model.Vector) (Diff, error) { return Diff{}, errors.New("unimplemented") }
tools/query-audit/auditor.go
0.76454
0.587766
auditor.go
starcoder
package world import ( "bytes" "encoding/binary" "encoding/json" "fmt" "io" "log" "math" "github.com/danhale-git/mine/nbt" "github.com/danhale-git/nbt2json" ) const subChunkBlockCount = 4096 const chunkSize = 16 // subChunkData is the parsed data for one 16x16 subchunk. A palette including all block states in the subchunk is indexed // by a slice of integers (one for each block) to determine the state and block id for each block in the palette. type subChunkData struct { Blocks blockStorage WaterLogged blockStorage } type blockStorage struct { Indices []int // An index into the palette for each block in the sub chunk Palette []nbt.NBTTag // A palette of block types and states } // subChunkOrigin returns the origin of the chunk containing the given coordinates. This is the corner block with the // lowest x, y and z values. func subChunkOrigin(x, y, z, d int) struct{ x, y, z, d int } { return struct{ x, y, z, d int }{ int(math.Floor(float64(x) / 16)), int(math.Floor(float64(y) / 16)), int(math.Floor(float64(z) / 16)), d, } } // worldVoxelToSubChunk returns the coordinates relative to sub chunk origin, from the given world coordinates. func worldVoxelToSubChunk(x, y, z int) (sx, sy, sz int) { return x % chunkSize, y % chunkSize, z % chunkSize } // voxelToIndex returns the block storage index from the given sub chunk x y and z coordinates. func subChunkVoxelToIndex(x, y, z int) int { if x > 15 || y > 15 || z > 15 { log.Panicf("coordinates %d %d %d are invalid: sub chunk cooridnates may not exceed 0-15", x, y, z) } return y + z*16 + x*16*16 } // indexToVoxel returns the world x y z offset from the sub chunk root for the given block storage index. func subChunkIndexToVoxel(i int) (x, y, z int) { x = (i >> 8) & 15 y = i & 15 z = (i >> 4) & 15 return } func parseSubChunk(data []byte) (*subChunkData, error) { r := bytes.NewReader(data) s := subChunkData{} var version int8 if err := readLittleEndian(r, &version); err != nil { return nil, fmt.Errorf("reading version byte: %w", err) } var storageCount int8 switch version { case 1: storageCount = 1 case 8: if err := readLittleEndian(r, &storageCount); err != nil { return nil, fmt.Errorf("reading storage count: %w", err) } default: return nil, fmt.Errorf("unhandled subchunk block storage version: '%d'", version) } var err error s.Blocks.Indices, s.Blocks.Palette, err = parseBlockStorage(r) if err != nil { return nil, fmt.Errorf("parsing water logged: %s", err) } // https://minecraft.fandom.com/wiki/Bedrock_Edition_level_format // In the majority of cases, there is only one storage record. // A second record may be present to indicate block water-logging. switch storageCount { case 0: panic("block storage count is 0") case 1: // Block storage has already been parsed above case 2: // Parse second block storage as water logged if it exists s.WaterLogged.Indices, s.WaterLogged.Palette, err = parseBlockStorage(r) if err != nil { return nil, fmt.Errorf("parsing water logged: %s", err) } // Added some panicking here as the Minecraft level format seems changeable. if len(s.WaterLogged.Palette) > 2 { log.Panicf(` second block storage palette exceeded known max length of 2 found these states - %+v`, s.WaterLogged.Palette) } if len(s.WaterLogged.Palette) > 1 && s.WaterLogged.Palette[1].BlockID() != waterID { log.Panicf(` second block storage palette did not have '%s' at index 1 to indicate water logged blocks found id '%s' unexpectedly`, waterID, s.WaterLogged.Palette[1].BlockID()) } default: log.Panicf("unhandled storage count: %d", storageCount) } return &s, nil } func parseBlockStorage(r *bytes.Reader) ([]int, []nbt.NBTTag, error) { var indices []int var palette []nbt.NBTTag indices, err := stateIndices(r) if err != nil { return nil, nil, fmt.Errorf("parsing water logged indices: %s", err) } palette, err = statePalette(r) if err != nil { return nil, nil, fmt.Errorf("parsing nbt data: %s", err) } return indices, palette, nil } // stateIndices reads a single block storage record as the integer indices into the palette. It should be called // the number of times returned by blockStorageCount, after calling blockStorageCount. func stateIndices(r *bytes.Reader) ([]int, error) { var bitsPerBlockAndVersion byte if err := readLittleEndian(r, &bitsPerBlockAndVersion); err != nil { log.Fatalf("reading version byte: %s", err) } bitsPerBlock := int(bitsPerBlockAndVersion >> 1) storageVersion := int(bitsPerBlockAndVersion & 1) if storageVersion != 0 { return nil, fmt.Errorf("invalid block storage version %d: 0 is expected for save files", storageVersion) } blocksPerWord := int(math.Floor(32.0 / float64(bitsPerBlock))) wordCount := int(math.Ceil(subChunkBlockCount / float64(blocksPerWord))) indices := make([]int, subChunkBlockCount) i := 0 for w := 0; w < wordCount; w++ { var word int32 if err := readLittleEndian(r, &word); err != nil { return nil, fmt.Errorf("reading word %d from raw data: %s", w, err) } for b := 0; b < blocksPerWord && i < subChunkBlockCount; b++ { indices[i] = int((word >> ((i % blocksPerWord) * bitsPerBlock)) & ((1 << bitsPerBlock) - 1)) i++ } } return indices, nil } // statePalette reads the remainder of a subchunk record and returns a slice of tags. It should be called after blockStorageCount and // the resulting call(s) to stateIndices. func statePalette(r *bytes.Reader) ([]nbt.NBTTag, error) { var paletteSize int32 if err := readLittleEndian(r, &paletteSize); err != nil { return nil, fmt.Errorf("reading palette size bytes: %w", err) } j, err := nbt2json.ReadNbt2Json(r, "", int(paletteSize)) if err != nil { return nil, fmt.Errorf("calling nbt2json, %w", err) } nbtData := struct { NBT []nbt.NBTTag }{} if err := json.Unmarshal(j, &nbtData); err != nil { return nil, fmt.Errorf("unmarshaling json, %w", err) } if len(nbtData.NBT) != int(paletteSize) { return nil, fmt.Errorf("%d nbt records returned for palette size of %d", len(nbtData.NBT), paletteSize) } return nbtData.NBT, nil } func readLittleEndian(r io.Reader, data interface{}) error { return binary.Read(r, binary.ByteOrder(binary.LittleEndian), data) }
world/subchunk.go
0.68458
0.535888
subchunk.go
starcoder
package main import ( "math/rand" "github.com/gonum/plot" "github.com/gonum/plot/plotter" "github.com/gonum/plot/plotutil" "github.com/gonum/plot/vg" "github.com/JoshuaKolden/interp" ) func main() { rand.Seed(int64(0)) stepsplot, err := plot.New() if err != nil { panic(err) } easeout, err := plot.New() if err != nil { panic(err) } easein, err := plot.New() if err != nil { panic(err) } smoothease, err := plot.New() if err != nil { panic(err) } stepsplot.Title.Text = "Step Functions" stepsplot.X.Label.Text = "t" stepsplot.Y.Label.Text = "r" easeout.Title.Text = "EaseOut Functions" easeout.X.Label.Text = "t" easeout.Y.Label.Text = "r" easein.Title.Text = "EaseIn Functions" easein.X.Label.Text = "t" easein.Y.Label.Text = "r" smoothease.Title.Text = "Smoothstep vs Easein/out" smoothease.X.Label.Text = "t" smoothease.Y.Label.Text = "r" err = plotutil.AddLinePoints(stepsplot, "Smoothstep", smoothstepPoints(20), "Linearstep", linearstepPoints(20), "Step", stepPoints(20)) if err != nil { panic(err) } err = plotutil.AddLinePoints(easeout, "Linearstep", linearstepPoints(20), "Easeoutstep 2", easeoutPoints(20, 2), "Easeoutstep 3", easeoutPoints(20, 3), "Easeoutstep 4", easeoutPoints(20, 4), "Easeoutstep 5", easeoutPoints(20, 5)) if err != nil { panic(err) } err = plotutil.AddLinePoints(easein, "Linearstep", linearstepPoints(20), "Easeoutstep 2", easeinPoints(20, 2), "Easeoutstep 3", easeinPoints(20, 3), "Easeoutstep 4", easeinPoints(20, 4), "Easeoutstep 5", easeinPoints(20, 5)) if err != nil { panic(err) } err = plotutil.AddLinePoints(smoothease, "Linearstep", linearstepPoints(20), "Smoothstep", smoothstepPoints(20), "Easeinstep 2", easeinPoints(20, 2), "Easeoutstep 2", easeoutPoints(20, 2)) if err != nil { panic(err) } // Save the plot to a PNG file. if err := stepsplot.Save(4*vg.Inch, 4*vg.Inch, "steps.png"); err != nil { panic(err) } // Save the plot to a PNG file. if err := easeout.Save(4*vg.Inch, 4*vg.Inch, "easeout.png"); err != nil { panic(err) } // Save the plot to a PNG file. if err := easein.Save(4*vg.Inch, 4*vg.Inch, "easein.png"); err != nil { panic(err) } // Save the plot to a PNG file. if err := smoothease.Save(4*vg.Inch, 4*vg.Inch, "smoothease.png"); err != nil { panic(err) } } func easeinPoints(n int, exp float64) plotter.XYs { pts := make(plotter.XYs, n) for i := range pts { // range from 0.0 to 1.0 t := float64(i) / float64(n-1) pts[i].X = t pts[i].Y = interp.Easeinstep(t, exp) } return pts } func easeoutPoints(n int, exp float64) plotter.XYs { pts := make(plotter.XYs, n) for i := range pts { // range from 0.0 to 1.0 t := float64(i) / float64(n-1) pts[i].X = t pts[i].Y = interp.Easeoutstep(t, exp) } return pts } func stepPoints(n int) plotter.XYs { pts := make(plotter.XYs, n) for i := range pts { // range from 0.0 to 1.0 t := float64(i) / float64(n-1) pts[i].X = t pts[i].Y = interp.Step(t, 0.5) } return pts } func linearstepPoints(n int) plotter.XYs { pts := make(plotter.XYs, n) for i := range pts { // range from 0.0 to 1.0 t := float64(i) / float64(n-1) pts[i].X = t pts[i].Y = interp.Linearstep(t) } return pts } func smoothstepPoints(n int) plotter.XYs { pts := make(plotter.XYs, n) for i := range pts { // range from 0.0 to 1.0 t := float64(i) / float64(n-1) pts[i].X = t pts[i].Y = interp.Smoothstep(t) } return pts } // randomPoints returns some random x, y points. func randomPoints(n int) plotter.XYs { pts := make(plotter.XYs, n) for i := range pts { if i == 0 { pts[i].X = rand.Float64() } else { pts[i].X = pts[i-1].X + rand.Float64() } pts[i].Y = pts[i].X + 10*rand.Float64() } return pts }
cmd/plot/main.go
0.551815
0.416559
main.go
starcoder
package lm import ( "errors" "fmt" "github.com/alldroll/go-datastructures/rbtree" ) // NGramVectorBuilder is an entity that responses for building NGramVector type NGramVectorBuilder interface { // Put adds the given sequence of nGrams and count to model Put(nGrams []WordID, count WordCount) error // Build creates new instance of NGramVector Build() NGramVector } // ErrNGramOrderIsOutOfRange informs that the given NGrams is out of range for the given var ErrNGramOrderIsOutOfRange = errors.New("nGrams order is out of range") // nGramVectorBuilder implements NGramVectorBuilder interface type nGramVectorBuilder struct { parents []NGramVector tree rbtree.Tree } // NewNGramVectorBuilder creates new instance of NGramVectorBuilder func NewNGramVectorBuilder(parents []NGramVector) NGramVectorBuilder { return &nGramVectorBuilder{ parents: parents, tree: rbtree.New(), } } // Put adds the given sequence of nGrams and count to model func (m *nGramVectorBuilder) Put(nGrams []WordID, count WordCount) error { if len(nGrams) != len(m.parents)+1 { return ErrNGramOrderIsOutOfRange } parent := InvalidContextOffset for i, nGram := range nGrams { if i == len(nGrams)-1 { node := &nGramNode{ key: makeKey(nGram, parent), value: count, } prev := m.tree.Find(node) if prev != nil { (prev.(*nGramNode)).value += count } else { if _, err := m.tree.Insert(node); err != nil { return fmt.Errorf("failed to insert the node: %v", err) } } } else { parent = m.parents[i].GetContextOffset(nGram, parent) } } return nil } // Build creates new instance of NGramVector func (m *nGramVectorBuilder) Build() NGramVector { var node *nGramNode keys := make([]uint64, 0, m.tree.Len()) values := make([]WordCount, 0, m.tree.Len()) total := WordCount(0) for iter := m.tree.NewIterator(); iter.Next() != nil; { node = iter.Get().(*nGramNode) keys = append(keys, node.key) values = append(values, node.value) total += node.value } return &sortedArray{ keys: keys, values: values, total: total, } } // nGramNode represents tree node for the given nGram type nGramNode struct { key uint64 value WordCount } // Less tells is current elements is bigger than the other func (n *nGramNode) Less(other rbtree.Item) bool { return n.key < other.(*nGramNode).key }
pkg/lm/ngram_vector_builder.go
0.640074
0.487917
ngram_vector_builder.go
starcoder
package geographiclibgo import "math" type GeodesicLine struct { tiny_ float64 _A1m1 float64 _A2m1 float64 _A3c float64 _A4 float64 _B11 float64 _B21 float64 _B31 float64 _B41 float64 _C1a [_GEODESIC_ORDER + 1]float64 _C1pa [_GEODESIC_ORDER + 1]float64 _C2a [_GEODESIC_ORDER + 1]float64 _C3a [_GEODESIC_ORDER]float64 _C4a [_GEODESIC_ORDER]float64 _b float64 _c2 float64 _calp0 float64 _csig1 float64 _comg1 float64 _ctau1 float64 _dn1 float64 _f1 float64 _k2 float64 _salp0 float64 _somg1 float64 _ssig1 float64 _stau1 float64 a13 float64 a float64 azi1 float64 calp1 float64 caps uint64 f float64 lat1 float64 lon1 float64 s13 float64 salp1 float64 } // NewGeodesicLine creates a GeodesicLine, with `caps` of STANDARD | DISTANCE_IN func NewGeodesicLine( geod Geodesic, lat1, lon1, azi1 float64, ) GeodesicLine { // Specify default `caps` caps := STANDARD | DISTANCE_IN return new_geodesic_line_all_options( geod, lat1, lon1, azi1, caps, math.NaN(), math.NaN(), ) } // NewGeodesicLineWithCapability is the same as NewGeodesicLine but the user specifies a // `caps` field. func NewGeodesicLineWithCapability( geod Geodesic, lat1, lon1, azi1 float64, caps uint64, ) GeodesicLine { return new_geodesic_line_all_options( geod, lat1, lon1, azi1, caps, math.NaN(), math.NaN(), ) } // new_geodesic_line_all_options is the same as NewGeodesicLine but the user specifies a // `caps` field. // If you do not wish to specify `salp1` and/or `calp1`, set them as math.NaN() func new_geodesic_line_all_options( geod Geodesic, lat1, lon1, azi1 float64, caps uint64, salp1, calp1 float64, ) GeodesicLine { // This was taken from geodesic, putting it here for convenience tiny_ := math.Sqrt(get_min_val()) a := geod.a f := geod.f _b := geod.b _c2 := geod.c2 _f1 := geod.f1 caps |= LATITUDE | AZIMUTH | LONG_UNROLL if math.IsNaN(salp1) || math.IsNaN(calp1) { azi1 = ang_normalize(azi1) salp1, calp1 = sincosd(ang_round(azi1)) } lat1 = lat_fix(lat1) sbet1, cbet1 := sincosd(ang_round(lat1)) sbet1 *= _f1 sbet1, cbet1 = norm(sbet1, cbet1) cbet1 = math.Max(tiny_, cbet1) _dn1 := math.Sqrt(1.0 + geod.ep2*sq(sbet1)) _salp0 := salp1 * cbet1 _calp0 := math.Hypot(calp1, salp1*sbet1) _ssig1 := sbet1 _somg1 := _salp0 * sbet1 var _csig1 float64 if sbet1 != 0.0 || calp1 != 0.0 { _csig1 = cbet1 * calp1 } else { _csig1 = 1.0 } _comg1 := _csig1 _ssig1, _csig1 = norm(_ssig1, _csig1) _k2 := sq(_calp0) * geod.ep2 eps := _k2 / (2.0*(1.0+math.Sqrt(1.0+_k2)) + _k2) _A1m1 := 0.0 var _C1a [_GEODESIC_ORDER + 1]float64 _B11 := 0.0 _stau1 := 0.0 _ctau1 := 0.0 if caps&_CAP_C1 != 0 { _A1m1 = a1m1f(eps, geod.GEODESIC_ORDER) c1f(eps, _C1a[:], int(geod.GEODESIC_ORDER)) _B11 = sin_cos_series(true, _ssig1, _csig1, _C1a[:]) s := math.Sin(_B11) c := math.Cos(_B11) _stau1 = _ssig1*c + _csig1*s _ctau1 = _csig1*c - _ssig1*s } var _C1pa [_GEODESIC_ORDER + 1]float64 if caps&_CAP_C1p != 0 { c1pf(eps, _C1pa[:], int(geod.GEODESIC_ORDER)) } _A2m1 := 0.0 var _C2a [_GEODESIC_ORDER + 1]float64 _B21 := 0.0 if caps&_CAP_C2 != 0 { _A2m1 = a2m1f(eps, geod.GEODESIC_ORDER) c2f(eps, _C2a[:], int(geod.GEODESIC_ORDER)) _B21 = sin_cos_series(true, _ssig1, _csig1, _C2a[:]) } var _C3a [_GEODESIC_ORDER]float64 _A3c := 0.0 _B31 := 0.0 if caps&_CAP_C3 != 0 { geod._C3f(eps, _C3a[:]) _A3c = -f * _salp0 * geod._A3f(eps) _B31 = sin_cos_series(true, _ssig1, _csig1, _C3a[:]) } var _C4a [_GEODESIC_ORDER]float64 _A4 := 0.0 _B41 := 0.0 if caps&_CAP_C4 != 0 { geod._C4f(eps, _C4a[:]) _A4 = sq(a) * _calp0 * _salp0 * geod.e2 _B41 = sin_cos_series(false, _ssig1, _csig1, _C4a[:]) } s13 := math.NaN() a13 := math.NaN() return GeodesicLine{ tiny_: tiny_, _A1m1: _A1m1, _A2m1: _A2m1, _A3c: _A3c, _A4: _A4, _B11: _B11, _B21: _B21, _B31: _B31, _B41: _B41, _C1a: _C1a, _C1pa: _C1pa, _comg1: _comg1, _C2a: _C2a, _C3a: _C3a, _C4a: _C4a, _b: _b, _c2: _c2, _calp0: _calp0, _csig1: _csig1, _ctau1: _ctau1, _dn1: _dn1, _f1: _f1, _k2: _k2, _salp0: _salp0, _somg1: _somg1, _ssig1: _ssig1, _stau1: _stau1, a: a, a13: a13, azi1: azi1, calp1: calp1, caps: caps, f: f, lat1: lat1, lon1: lon1, s13: s13, salp1: salp1, } } func (g GeodesicLine) _gen_position(arcmode bool, s12_a12 float64, outmask uint64) ( a12 float64, lat2 float64, lon2 float64, azi2 float64, s12 float64, m12 float64, M12 float64, M21 float64, S12 float64, ) { a12 = math.NaN() lat2 = math.NaN() lon2 = math.NaN() azi2 = math.NaN() s12 = math.NaN() m12 = math.NaN() M12 = math.NaN() M21 = math.NaN() S12 = math.NaN() outmask &= g.caps & OUT_MASK if !(arcmode || (g.caps&(OUT_MASK&DISTANCE_IN) != 0)) { return a12, lat2, lon2, azi2, s12, m12, M12, M21, S12 } B12 := 0.0 AB1 := 0.0 var sig12 float64 var ssig12 float64 var csig12 float64 var ssig2 float64 var csig2 float64 if arcmode { sig12 = s12_a12 * DEG2RAD ssig12, csig12 = sincosd(s12_a12) } else { // tau12 = s12_a12 / (g._b * (1 + g._A1m1)) tau12 := s12_a12 / (g._b * (1.0 + g._A1m1)) s := math.Sin(tau12) c := math.Cos(tau12) B12 = -sin_cos_series( true, g._stau1*c+g._ctau1*s, g._ctau1*c-g._stau1*s, g._C1pa[:], ) sig12 = tau12 - (B12 - g._B11) ssig12 = math.Sin(sig12) csig12 = math.Cos(sig12) if math.Abs(g.f) > 0.01 { ssig2 = g._ssig1*csig12 + g._csig1*ssig12 csig2 = g._csig1*csig12 - g._ssig1*ssig12 B12 = sin_cos_series(true, ssig2, csig2, g._C1a[:]) serr := (1.0+g._A1m1)*(sig12+(B12-g._B11)) - s12_a12/g._b sig12 -= serr / math.Sqrt(1.0+g._k2*sq(ssig2)) ssig12 = math.Sin(sig12) csig12 = math.Cos(sig12) } } ssig2 = g._ssig1*csig12 + g._csig1*ssig12 csig2 = g._csig1*csig12 - g._ssig1*ssig12 dn2 := math.Sqrt(1.0 + g._k2*sq(ssig2)) if outmask&(DISTANCE|REDUCEDLENGTH|GEODESICSCALE) != 0 { if arcmode || math.Abs(g.f) > 0.01 { B12 = sin_cos_series(true, ssig2, csig2, g._C1a[:]) } AB1 = (1.0 + g._A1m1) * (B12 - g._B11) } sbet2 := g._calp0 * ssig2 cbet2 := math.Hypot(g._salp0, g._calp0*csig2) if cbet2 == 0.0 { cbet2 = g.tiny_ csig2 = g.tiny_ } salp2 := g._salp0 calp2 := g._calp0 * csig2 if outmask&DISTANCE != 0 { if arcmode { s12 = g._b * ((1.0+g._A1m1)*sig12 + AB1) } else { s12 = s12_a12 } } if outmask&LONGITUDE != 0 { somg2 := g._salp0 * ssig2 comg2 := csig2 E := math.Copysign(1, g._salp0) var omg12 float64 if outmask&LONG_UNROLL != 0 { omg12 = E * (sig12 - (math.Atan2(ssig2, csig2) - math.Atan2(g._ssig1, g._csig1)) + (math.Atan2((E*somg2), comg2) - math.Atan2((E*g._somg1), g._comg1))) } else { omg12 = math.Atan2((somg2*g._comg1 - comg2*g._somg1), (comg2*g._comg1 + somg2*g._somg1)) } lam12 := omg12 + g._A3c*(sig12+(sin_cos_series(true, ssig2, csig2, g._C3a[:])-g._B31)) lon12 := lam12 * RAD2DEG if outmask&LONG_UNROLL != 0 { lon2 = g.lon1 + lon12 } else { lon2 = ang_normalize( ang_normalize(g.lon1) + ang_normalize(lon12), ) } } if outmask&LATITUDE != 0 { lat2 = atan2_deg(sbet2, g._f1*cbet2) } if outmask&AZIMUTH != 0 { azi2 = atan2_deg(salp2, calp2) } if outmask&(REDUCEDLENGTH|GEODESICSCALE) != 0 { B22 := sin_cos_series(true, ssig2, csig2, g._C2a[:]) AB2 := (1.0 + g._A2m1) * (B22 - g._B21) J12 := (g._A1m1-g._A2m1)*sig12 + (AB1 - AB2) if outmask&REDUCEDLENGTH != 0 { m12 = g._b * ((dn2*(g._csig1*ssig2) - g._dn1*(g._ssig1*csig2)) - g._csig1*csig2*J12) } if outmask&GEODESICSCALE != 0 { t := g._k2 * (ssig2 - g._ssig1) * (ssig2 + g._ssig1) / (g._dn1 + dn2) M12 = csig12 + (t*ssig2-csig2*J12)*g._ssig1/g._dn1 M21 = csig12 - (t*g._ssig1-g._csig1*J12)*ssig2/dn2 } } if outmask&AREA != 0 { B42 := sin_cos_series(false, ssig2, csig2, g._C4a[:]) var salp12 float64 var calp12 float64 if g._calp0 == 0.0 || g._salp0 == 0.0 { salp12 = salp2*g.calp1 - calp2*g.salp1 calp12 = calp2*g.calp1 + salp2*g.salp1 } else { var to_mul float64 if csig12 <= 0.0 { to_mul = g._csig1*(1.0-csig12) + ssig12*g._ssig1 } else { to_mul = ssig12 * (g._csig1*ssig12/(1.0+csig12) + g._ssig1) } salp12 = g._calp0 * g._salp0 * to_mul calp12 = sq(g._salp0) + sq(g._calp0)*g._csig1*csig2 } S12 = g._c2*math.Atan2(salp12, calp12) + g._A4*(B42-g._B41) } if arcmode { a12 = s12_a12 } else { a12 = sig12 * RAD2DEG } return a12, lat2, lon2, azi2, s12, m12, M12, M21, S12 } type PositionResultStandard struct { Lat1Deg float64 // Latitude of point 1 [degrees] Lon1Deg float64 // Longitude of point 1 [degrees] Azi1Deg float64 // Azimuth of point 1 [degrees] Lat2Deg float64 // Latitude of point 2 [degrees] Lon2Deg float64 // Longitude of point 2 [degrees] Azi2Deg float64 // Azimuth of point 2 [degrees] DistanceM float64 // Distance from point 1 to point 2 [meters] } // PositionStandard finds the position on the line given s12_m [meters]. It uses the // STANDARD capabilities, and returns a PositionResultStandard struct func (g GeodesicLine) PositionStandard(s12_m float64) PositionResultStandard { outmask := STANDARD _, lat2, lon2, azi2, _, _, _, _, _ := g._gen_position(false, s12_m, outmask) return PositionResultStandard{ Lat1Deg: g.lat1, Lon1Deg: g.lon1, Azi1Deg: g.azi1, Lat2Deg: lat2, Lon2Deg: lon2, Azi2Deg: azi2, DistanceM: s12_m, } } type PositionResult struct { Lat1Deg float64 // Latitude of point 1 [degrees] Lon1Deg float64 // Longitude of point 1 [degrees] Azi1Deg float64 // Azimuth of point 1 [degrees] Lat2Deg float64 // Latitude of point 2 [degrees] Lon2Deg float64 // Longitude of point 2 [degrees] Azi2Deg float64 // Azimuth of point 2 [degrees] DistanceM float64 // Distance from point 1 to point 2 [meters] ArcLengthDeg float64 // Arc length between point 1 and point 2 [degrees] ReducedLengthM float64 // Reduced length of the geodesic [meters] M12 float64 // Geodesic scale of point 2 relative to point 1 [dimensionless] M21 float64 // Geodesic scale of point 1 relative to point 2 [dimensionless] S12M2 float64 // Area under the geodesic [meters^2] } // PositionWithCapabilities finds the position on the line given s12_m [meters]. It uses // whatever capabilities are handed in. Any results not asked for with the capabilities // will be math.NaN() func (g GeodesicLine) PositionWithCapabilities(s12_m float64, capabilities uint64) PositionResult { a12, lat2, lon2, azi2, s12, m12, M12, M21, S12 := g._gen_position(false, s12_m, capabilities) outlon1 := g.lon1 if capabilities&LONG_UNROLL != 0 { outlon1 = ang_normalize(g.lon1) } return PositionResult{ Lat1Deg: g.lat1, Lon1Deg: outlon1, Azi1Deg: g.azi1, Lat2Deg: lat2, Lon2Deg: lon2, Azi2Deg: azi2, DistanceM: s12, ArcLengthDeg: a12, ReducedLengthM: m12, M12: M12, M21: M21, S12M2: S12, } } // set_arc: specify the position of point 3 in terms of arc length `a13`, the spherical // arc length from point 1 to point 3 in degrees func (g *GeodesicLine) set_arc(a13 float64) { g.a13 = a13 _, _, _, _, g.s13, _, _, _, _ = g._gen_position(true, g.a13, DISTANCE) } // set_distance specifies the position of point 3 in terms of distance func (g *GeodesicLine) set_distance(s13_m float64) { g.s13 = s13_m g.a13, _, _, _, _, _, _, _, _ = g._gen_position(false, g.s13, 0) }
geodesicline.go
0.702734
0.459986
geodesicline.go
starcoder
package creepto import ( "encoding/hex" "fmt" "github.com/syahrul12345/secp256k1/curve" "github.com/syahrul12345/secp256k1/fieldelement" "github.com/syahrul12345/secp256k1/utils" "math/big" ) var ( //Order of fin Order string = "0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" //P is the max prime value P string = big.NewInt(0).Sub( big.NewInt(0).Sub( big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)), big.NewInt(0).Exp(big.NewInt(2), big.NewInt(32), big.NewInt(0))), big.NewInt(977)).Text(16) ) type errorMessage struct { s string } func (e *errorMessage) Error() string { return e.s } //Point256 represents the public key on the elliptical curve type Point256 struct { X *fieldelement.FieldElement Y *fieldelement.FieldElement A fieldelement.FieldElement B fieldelement.FieldElement } //New256Point creates a Point256 representing a public key func New256Point(x string, y string) *Point256 { newPoint, pointError := curve.NewPoint(x, y) if pointError != nil { fmt.Println(pointError) return nil } return &Point256{ newPoint.X, newPoint.Y, newPoint.A, newPoint.B, } } //NormalTo256 will convert the given point to a Point256 type func NormalTo256(point *curve.Point) *Point256 { return &Point256{ point.X, point.Y, point.A, point.B, } } //Mul will multiply point256 with a coefficient func (point256 *Point256) Mul(coefficient string) (*curve.Point, error) { // Check if the coefifcinent is already hexed coeff := utils.ToBigInt(coefficient) modoBig, ok := big.NewInt(0).SetString(Order[2:], 16) if !ok { return nil, nil } //Sets the new coefficient coeff.Mod(coeff, modoBig) coeffString := "0x" + coeff.Text(16) // We create a normal point that can do the multiplacaiton x := point256.X.Number y := point256.Y.Number //Skip error handling, it will definately be on the line tempPoint, err := curve.NewPoint("0x"+x.Text(16), "0x"+y.Text(16)) if err != nil { return nil, err } result, err2 := tempPoint.Mul(coeffString) if err2 != nil { return nil, err2 } //Return and reconvert it to a point256 return result, nil } //Verify This function will verify if the Public Key has sent the signature hash z, with enclosed signature func (point256 *Point256) Verify(signatureHash string, sig *Signature) (bool, error) { z := utils.ToBigInt(signatureHash) r := sig.R s := sig.S bigOrder := utils.ToBigInt(Order) newOrder := big.NewInt(0).Sub(bigOrder, big.NewInt(2)) sInv := big.NewInt(0).Exp(s, newOrder, bigOrder) //u = z/s u1 := big.NewInt(0).Mul(z, sInv) u := big.NewInt(0).Mod(u1, bigOrder) //v = r/s v1 := big.NewInt(0).Mul(r, sInv) v := big.NewInt(0).Mod(v1, bigOrder) //R(r,s) = uG + vP //Create G G, _ := curve.NewPoint("0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", "0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8") firstTerm, err := G.Mul("0x" + u.Text(16)) if err != nil { return false, err } secondTerm, err := point256.Mul("0x" + v.Text(16)) if err != nil { return false, err } R, err := firstTerm.Add(secondTerm) if err != nil { return false, err } res := R.X.Number.Cmp(r) if res == 0 { return true, nil } return false, &errorMessage{"The message cannot be verified. Wrong signature for the public key"} } //SEC will create the serialized public key for propogation //Takes a boolean paramenter to determine if the output is compressed or not func (point256 *Point256) SEC(compressed bool) string { if compressed { y := big.NewInt(0).Mod(point256.Y.Number, big.NewInt(2)) if y.Cmp(big.NewInt(0)) == 0 { return "02" + point256.X.Number.Text(16) } return "03" + point256.X.Number.Text(16) } xBytes := new(big.Int).SetBytes(point256.X.Number.Bytes()) yBytes := new(big.Int).SetBytes(point256.Y.Number.Bytes()) textX := xBytes.Text(16) textY := yBytes.Text(16) return "04" + textX + textY } //ParseSec will return the point256 func ParseSec(secString string) *Point256 { // Lets convert the string to a bytes byteSec, _ := hex.DecodeString(secString) if byteSec[0] == 4 { x := "0x" + hex.EncodeToString(byteSec[1:33]) y := "0x" + hex.EncodeToString(byteSec[33:65]) return New256Point(x, y) } var isEven bool if byteSec[0] == 2 { isEven = true } else { isEven = false } bytesString := "0x" + hex.EncodeToString(byteSec[1:]) x := fieldelement.NewFieldElement(bytesString) a := fieldelement.NewFieldElement("0") b := fieldelement.NewFieldElement("7") alpha := x.Pow("3").Add(b) beta := alpha.Sqrt() var evenBeta fieldelement.FieldElement var oddBeta fieldelement.FieldElement if big.NewInt(0).Mod(beta.Number, big.NewInt(2)).Cmp(big.NewInt(0)) == 0 { evenBeta = beta Pbig, _ := big.NewInt(0).SetString(P, 16) oddBeta = fieldelement.NewFieldElement(big.NewInt(0).Sub(Pbig, beta.Number).String()) } else { Pbig, _ := big.NewInt(0).SetString(P, 16) evenBeta = fieldelement.NewFieldElement(big.NewInt(0).Sub(Pbig, beta.Number).String()) oddBeta = beta } if isEven { return &Point256{ &x, &evenBeta, a, b, } } return &Point256{ &x, &oddBeta, a, b, } } func (point256 *Point256) hash160(compressed bool) string { return utils.Hash160(point256.SEC(compressed)) } //GetAddress will get the address from the pubkey func (point256 *Point256) GetAddress(compressed bool, testnet bool) string { hashedSEC := point256.hash160(compressed) var prefix string if testnet { prefix = "6f" } else { prefix = "00" } hashedWithPrefix := utils.Encode58CheckSum(prefix + hashedSEC) return hashedWithPrefix }
creepto/creepto.go
0.633637
0.430866
creepto.go
starcoder
package mat32 import ( "github.com/nlpodyssey/spago/pkg/mat32/internal/math32" "math" ) // Float is the main float type for the mat32 package. It is an alias for float32. type Float = float32 const ( // SmallestNonzeroFloat corresponds to math.SmallestNonzeroFloat32. SmallestNonzeroFloat = Float(math.SmallestNonzeroFloat32) // Pi mathematical constant. Pi = Float(math.Pi) ) // Pow returns x**y, the base-x exponential of y. func Pow(x, y Float) Float { return math32.Pow(x, y) } // Cos returns the cosine of the radian argument x. func Cos(x Float) Float { return Float(math.Cos(float64(x))) } // Sin returns the sine of the radian argument x. func Sin(x Float) Float { return Float(math.Sin(float64(x))) } // Cosh returns the hyperbolic cosine of x. func Cosh(x Float) Float { return Float(math.Cosh(float64(x))) } // Sinh returns the hyperbolic sine of x. func Sinh(x Float) Float { return Float(math.Sinh(float64(x))) } // Exp returns e**x, the base-e exponential of x. func Exp(x Float) Float { return math32.Exp(x) } // Abs returns the absolute value of x. func Abs(x Float) Float { return math32.Abs(x) } // Sqrt returns the square root of x. func Sqrt(x Float) Float { return math32.Sqrt(x) } // Log returns the natural logarithm of x. func Log(x Float) Float { return math32.Log(x) } // Tan returns the tangent of the radian argument x. func Tan(x Float) Float { return Float(math.Tan(float64(x))) } // Tanh returns the hyperbolic tangent of x. func Tanh(x Float) Float { return math32.Tanh(x) } // Max returns the larger of x or y. func Max(x, y Float) Float { return math32.Max(x, y) } // Inf returns positive infinity if sign >= 0, negative infinity if sign < 0. func Inf(sign int) Float { return math32.Inf(sign) } // IsInf reports whether f is an infinity, according to sign. func IsInf(f Float, sign int) bool { return math32.IsInf(f, sign) } // NaN returns an IEEE 754 ``not-a-number'' value. func NaN() Float { return math32.NaN() } // Ceil returns the least integer value greater than or equal to x. func Ceil(x Float) Float { return math32.Ceil(x) } // Floor returns the greatest integer value less than or equal to x. func Floor(x Float) Float { return math32.Floor(x) } // Round returns the nearest integer, rounding half away from zero. func Round(x Float) Float { return Float(math.Round(float64(x))) }
pkg/mat32/mat32.go
0.917349
0.663001
mat32.go
starcoder
package dasel import ( "fmt" "reflect" ) // propagate recursively propagates the given nodes value up to the root node. func propagate(n *Node) error { if n.Previous == nil { return nil } if err := propagateValue(n); err != nil { return fmt.Errorf("could not propagate value: %w", err) } return propagate(n.Previous) } // propagateValue sends the value of the current node up to the previous node in the chain. func propagateValue(n *Node) error { if n.Previous == nil { return nil } switch n.Selector.Type { case "PROPERTY": return propagateValueProperty(n) case "INDEX": return propagateValueIndex(n) case "NEXT_AVAILABLE_INDEX": return propagateValueNextAvailableIndex(n) default: return &UnsupportedSelector{Selector: n.Selector.Type} } } // propagateValueProperty sends the value of the current node up to the previous node in the chain. func propagateValueProperty(n *Node) error { if !isValid(n.Previous.Value) { return &UnexpectedPreviousNilValue{Selector: n.Previous.Selector.Current} } value := unwrapValue(n.Previous.Value) if value.Kind() == reflect.Map { value.SetMapIndex(reflect.ValueOf(n.Selector.Property), n.Value) return nil } return &UnsupportedTypeForSelector{Selector: n.Selector, Value: n.Previous.Value} } // propagateValueIndex sends the value of the current node up to the previous node in the chain. func propagateValueIndex(n *Node) error { if !isValid(n.Previous.Value) { return &UnexpectedPreviousNilValue{Selector: n.Previous.Selector.Current} } value := unwrapValue(n.Previous.Value) if value.Kind() == reflect.Slice { if n.Selector.Index >= 0 && n.Selector.Index < value.Len() { value.Index(n.Selector.Index).Set(n.Value) return nil } n.Previous.setReflectValue(reflect.Append(value, n.Value)) return nil } return &UnsupportedTypeForSelector{Selector: n.Selector, Value: value} } // propagateValueNextAvailableIndex sends the value of the current node up to the previous node in the chain. func propagateValueNextAvailableIndex(n *Node) error { if !isValid(n.Previous.Value) { return &UnexpectedPreviousNilValue{Selector: n.Previous.Selector.Current} } value := unwrapValue(n.Previous.Value) if value.Kind() == reflect.Slice { n.Previous.setReflectValue(reflect.Append(value, n.Value)) return nil } return &UnsupportedTypeForSelector{Selector: n.Selector, Value: value} } // deleteFromParent deletes the given node from it's parent. func deleteFromParent(n *Node) error { if n.Previous == nil { return nil } switch n.Selector.Type { case "PROPERTY": return deleteFromParentProperty(n) case "INDEX": return deleteFromParentIndex(n) default: return &UnsupportedSelector{Selector: n.Selector.Type} } } // deleteFromParentProperty sends the value of the current node up to the previous node in the chain. func deleteFromParentProperty(n *Node) error { if !isValid(n.Previous.Value) { return &UnexpectedPreviousNilValue{Selector: n.Previous.Selector.Current} } value := unwrapValue(n.Previous.Value) if value.Kind() == reflect.Map { value.SetMapIndex(reflect.ValueOf(n.Selector.Property), reflect.Value{}) return nil } return &UnsupportedTypeForSelector{Selector: n.Selector, Value: n.Previous.Value} } // deleteFromParentIndex sends the value of the current node up to the previous node in the chain. func deleteFromParentIndex(n *Node) error { if !isValid(n.Previous.Value) { return &UnexpectedPreviousNilValue{Selector: n.Previous.Selector.Current} } value := unwrapValue(n.Previous.Value) if value.Kind() == reflect.Slice { if n.Selector.Index >= 0 && n.Selector.Index < value.Len() { // Mark this index for deletion. // We can't just rewrite the slice here in-case other selectors also target it. value.Index(n.Selector.Index).Set(getDeletePlaceholder(value.Index(n.Selector.Index))) } return nil } return &UnsupportedTypeForSelector{Selector: n.Selector, Value: value} } // cleanupSliceDeletions scans through the given reflect and removes any invalid reflect values. // Returns false if no modification was made. func cleanupSliceDeletions(input reflect.Value) (reflect.Value, bool) { value := unwrapValue(input) if value.Kind() != reflect.Slice { return value, false } res := reflect.MakeSlice(value.Type(), 0, value.Len()) invalidCount := 0 for i := 0; i < value.Len(); i++ { item := value.Index(i) if !item.IsValid() || isDeletePlaceholder(item) { invalidCount++ continue } res = reflect.Append(res, item) } if invalidCount == 0 { return value, false } return res, true } const deletePlaceholderKey = "dasel:delete:key" const deletePlaceholder = "dasel:delete:me" func getDeletePlaceholder(item reflect.Value) reflect.Value { switch unwrapValue(item).Kind() { case reflect.Map: return reflect.ValueOf(map[string]interface{}{ deletePlaceholderKey: deletePlaceholder, }) case reflect.Slice: return reflect.ValueOf([]interface{}{deletePlaceholder}) default: return reflect.ValueOf(deletePlaceholder) } } func isDeletePlaceholder(item reflect.Value) bool { switch i := unwrapValue(item); i.Kind() { case reflect.Map: if val, ok := i.Interface().(map[string]interface{})[deletePlaceholderKey]; ok { if val == deletePlaceholder { return true } } case reflect.Slice: for _, val := range i.Interface().([]interface{}) { if val == deletePlaceholder { return true } } default: if val, ok := i.Interface().(string); ok { if val == deletePlaceholder { return true } } } return false }
node_propagate.go
0.747155
0.418281
node_propagate.go
starcoder
package tart // Developed by <NAME> and featured in Commodities // magazine in 1980, the Commodity Channel Index (CCI) is // a versatile indicator that can be used to identify a new // trend or warn of extreme conditions. Lambert originally // developed CCI to identify cyclical turns in commodities, // but the indicator can be successfully applied to indices, // ETFs, stocks and other securities. In general, CCI measures // the current price level relative to an average price level // over a given period of time. CCI is relatively high when // prices are far above their average, but is relatively low // when prices are far below their average. In this manner, // CCI can be used to identify overbought and oversold levels. // https://school.stockcharts.com/doku.php?id=technical_indicators:commodity_channel_index_cci // https://www.investopedia.com/terms/c/commoditychannelindex.asp // https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cci type Cci struct { n int64 initPeriod int64 avg *Sma dev *Dev sz int64 } func NewCci(n int64) *Cci { avg := NewSma(n) dev := NewDev(n) a := avg.InitPeriod() b := dev.InitPeriod() if a < b { a = b } return &Cci{ n: n, initPeriod: a, avg: avg, dev: dev, sz: 0, } } func (d *Cci) Update(h, l, c float64) float64 { d.sz++ m := (h + l + c) / 3.0 avg := d.avg.Update(m) dev := d.dev.Update(m) if almostZero(dev) { return 0 } return (m - avg) / (0.015 * dev) } func (d *Cci) InitPeriod() int64 { return d.initPeriod } func (d *Cci) Valid() bool { return d.sz > d.initPeriod } // Developed by <NAME> and featured in Commodities // magazine in 1980, the Commodity Channel Index (CCI) is // a versatile indicator that can be used to identify a new // trend or warn of extreme conditions. Lambert originally // developed CCI to identify cyclical turns in commodities, // but the indicator can be successfully applied to indices, // ETFs, stocks and other securities. In general, CCI measures // the current price level relative to an average price level // over a given period of time. CCI is relatively high when // prices are far above their average, but is relatively low // when prices are far below their average. In this manner, // CCI can be used to identify overbought and oversold levels. // https://school.stockcharts.com/doku.php?id=technical_indicators:commodity_channel_index_cci // https://www.investopedia.com/terms/c/commoditychannelindex.asp // https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cci func CciArr(h, l, c []float64, n int64) []float64 { out := make([]float64, len(h)) d := NewCci(n) for i := 0; i < len(h); i++ { out[i] = d.Update(h[i], l[i], c[i]) } return out }
cci.go
0.630685
0.524029
cci.go
starcoder
package onshape import ( "encoding/json" ) // BTPExpressionAs238AllOf struct for BTPExpressionAs238AllOf type BTPExpressionAs238AllOf struct { BtType *string `json:"btType,omitempty"` Operand *BTPExpression9 `json:"operand,omitempty"` TypeName *BTPTypeName290 `json:"typeName,omitempty"` } // NewBTPExpressionAs238AllOf instantiates a new BTPExpressionAs238AllOf object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewBTPExpressionAs238AllOf() *BTPExpressionAs238AllOf { this := BTPExpressionAs238AllOf{} return &this } // NewBTPExpressionAs238AllOfWithDefaults instantiates a new BTPExpressionAs238AllOf object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewBTPExpressionAs238AllOfWithDefaults() *BTPExpressionAs238AllOf { this := BTPExpressionAs238AllOf{} return &this } // GetBtType returns the BtType field value if set, zero value otherwise. func (o *BTPExpressionAs238AllOf) GetBtType() string { if o == nil || o.BtType == nil { var ret string return ret } return *o.BtType } // GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPExpressionAs238AllOf) GetBtTypeOk() (*string, bool) { if o == nil || o.BtType == nil { return nil, false } return o.BtType, true } // HasBtType returns a boolean if a field has been set. func (o *BTPExpressionAs238AllOf) HasBtType() bool { if o != nil && o.BtType != nil { return true } return false } // SetBtType gets a reference to the given string and assigns it to the BtType field. func (o *BTPExpressionAs238AllOf) SetBtType(v string) { o.BtType = &v } // GetOperand returns the Operand field value if set, zero value otherwise. func (o *BTPExpressionAs238AllOf) GetOperand() BTPExpression9 { if o == nil || o.Operand == nil { var ret BTPExpression9 return ret } return *o.Operand } // GetOperandOk returns a tuple with the Operand field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPExpressionAs238AllOf) GetOperandOk() (*BTPExpression9, bool) { if o == nil || o.Operand == nil { return nil, false } return o.Operand, true } // HasOperand returns a boolean if a field has been set. func (o *BTPExpressionAs238AllOf) HasOperand() bool { if o != nil && o.Operand != nil { return true } return false } // SetOperand gets a reference to the given BTPExpression9 and assigns it to the Operand field. func (o *BTPExpressionAs238AllOf) SetOperand(v BTPExpression9) { o.Operand = &v } // GetTypeName returns the TypeName field value if set, zero value otherwise. func (o *BTPExpressionAs238AllOf) GetTypeName() BTPTypeName290 { if o == nil || o.TypeName == nil { var ret BTPTypeName290 return ret } return *o.TypeName } // GetTypeNameOk returns a tuple with the TypeName field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPExpressionAs238AllOf) GetTypeNameOk() (*BTPTypeName290, bool) { if o == nil || o.TypeName == nil { return nil, false } return o.TypeName, true } // HasTypeName returns a boolean if a field has been set. func (o *BTPExpressionAs238AllOf) HasTypeName() bool { if o != nil && o.TypeName != nil { return true } return false } // SetTypeName gets a reference to the given BTPTypeName290 and assigns it to the TypeName field. func (o *BTPExpressionAs238AllOf) SetTypeName(v BTPTypeName290) { o.TypeName = &v } func (o BTPExpressionAs238AllOf) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.BtType != nil { toSerialize["btType"] = o.BtType } if o.Operand != nil { toSerialize["operand"] = o.Operand } if o.TypeName != nil { toSerialize["typeName"] = o.TypeName } return json.Marshal(toSerialize) } type NullableBTPExpressionAs238AllOf struct { value *BTPExpressionAs238AllOf isSet bool } func (v NullableBTPExpressionAs238AllOf) Get() *BTPExpressionAs238AllOf { return v.value } func (v *NullableBTPExpressionAs238AllOf) Set(val *BTPExpressionAs238AllOf) { v.value = val v.isSet = true } func (v NullableBTPExpressionAs238AllOf) IsSet() bool { return v.isSet } func (v *NullableBTPExpressionAs238AllOf) Unset() { v.value = nil v.isSet = false } func NewNullableBTPExpressionAs238AllOf(val *BTPExpressionAs238AllOf) *NullableBTPExpressionAs238AllOf { return &NullableBTPExpressionAs238AllOf{value: val, isSet: true} } func (v NullableBTPExpressionAs238AllOf) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableBTPExpressionAs238AllOf) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
onshape/model_btp_expression_as_238_all_of.go
0.720663
0.499023
model_btp_expression_as_238_all_of.go
starcoder
package main import ( "bufio" "fmt" "os" "time" // My packages "github.com/rsdoiel/stngo" // Caltech Library packages "github.com/caltechlibrary/cli" ) var ( synopsis = ` %s is a standard timesheet notation parser. ` description = ` %s parses content in "Standard Timesheet Notation". By default it parse them into a tabular format but can also optionally parse them into a stream of JSON blobs. ` examples = ` This will parse the TimeSheet.txt file into a table. ` + "```" + ` %s < TimeSheet.txt ` + "```" + ` This will parse TimeSheet.txt file into a stream of JSON blobs. ` + "```" + ` %s -json < TimeSheet.txt ` + "```" + ` ` // Standard Options showHelp bool showLicense bool showVersion bool showExamples bool inputFName string outputFName string quiet bool generateMarkdown bool generateManPage bool // App Options asJSON bool ) func main() { // Configuration and command line interation app := cli.NewCli(stn.Version) appName := app.AppName() // Document expected parameters (non-option args) app.SetParams("[TIME_DESCRIPTION]") app.AddHelp("license", []byte(fmt.Sprintf(stn.LicenseText, appName, stn.Version))) app.AddHelp("synopsis", []byte(fmt.Sprintf(synopsis, appName))) app.AddHelp("description", []byte(fmt.Sprintf(description, appName))) app.AddHelp("examples", []byte(fmt.Sprintf(examples, appName, appName))) // Standard Options app.BoolVar(&showHelp, "h,help", false, "display help") app.BoolVar(&showLicense, "l,license", false, "display license") app.BoolVar(&showVersion, "v,version", false, "display version") app.BoolVar(&showExamples, "examples", false, "display example(s)") app.StringVar(&inputFName, "i,input", "", "input filename") app.StringVar(&outputFName, "o,output", "", "output filename") app.BoolVar(&quiet, "quiet", false, "suppress error messages") app.BoolVar(&generateMarkdown, "generate-markdown", false, "generate markdown documentation") app.BoolVar(&generateManPage, "generate-manpage", false, "generate man page") // App Options app.BoolVar(&asJSON, "j,json", false, "output JSON format") app.Parse() args := app.Args() // Setup IO var err error app.Eout = os.Stderr app.In, err = cli.Open(inputFName, os.Stdin) cli.ExitOnError(app.Eout, err, quiet) defer cli.CloseFile(inputFName, app.In) app.Out, err = cli.Create(outputFName, os.Stdout) cli.ExitOnError(app.Eout, err, quiet) defer cli.CloseFile(outputFName, app.Out) // Handle Options if generateMarkdown { app.GenerateMarkdown(app.Out) os.Exit(0) } if generateManPage { app.GenerateManPage(app.Out) os.Exit(0) } if showHelp || showExamples { if len(args) > 0 { fmt.Fprintln(app.Out, app.Help(args...)) } else if showExamples { fmt.Fprintln(app.Out, app.Help("examples")) } else { app.Usage(app.Out) } os.Exit(0) } if showLicense == true { fmt.Fprintln(app.Out, app.License()) os.Exit(0) } if showVersion == true { fmt.Fprintln(app.Out, app.Version()) os.Exit(0) } activeDate := time.Now().Format("2006-07-15") reader := bufio.NewReader(app.In) entryCnt := 0 lineNo := 1 if asJSON == true { fmt.Fprint(app.Out, "[") } for { line, err := reader.ReadString('\n') if err != nil { break } if stn.IsDateLine(line) == true { activeDate = stn.ParseDateLine(line) } else if stn.IsEntry(line) { entry, perr := stn.ParseEntry(activeDate, line) if perr != nil { fmt.Fprintf(app.Eout, "line %d: %v\n", lineNo, perr) } if asJSON == true { if entryCnt > 0 { fmt.Fprint(app.Out, ",") } fmt.Fprint(app.Out, entry.JSON()) entryCnt++ } else { fmt.Fprintln(app.Out, entry.String()) } } lineNo++ } if asJSON == true { fmt.Fprint(app.Out, "]") } }
cmd/stnparse/stnparse.go
0.536799
0.475666
stnparse.go
starcoder
package effect import ( "korok.io/korok/gfx" "korok.io/korok/math/f32" "korok.io/korok/math" ) // RadiusConfig used to configure the RadiusSimulator. type RadiusConfig struct { Config Radius Range Angle, AngleDelta Var } // RadiusSimulator works as the radius mode of the Cocos2D's particle-system. type RadiusSimulator struct { Pool LifeController RateController VisualController poseStart Channel_v2 colorDelta Channel_v4 sizeDelta Channel_f32 rot Channel_f32 rotDelta Channel_f32 angle Channel_f32 angleDelta Channel_f32 radius Channel_f32 radiusDelta Channel_f32 *RadiusConfig } func NewRadiusSimulator(cfg *RadiusConfig) *RadiusSimulator { r := &RadiusSimulator{Pool:Pool{Cap: cfg.Max}, RadiusConfig: cfg} r.Pool.AddChan(Life) r.Pool.AddChan(Position, PositionStart) r.Pool.AddChan(Color, ColorDelta) r.Pool.AddChan(Size, SizeDelta) r.Pool.AddChan(Rotation, RotationDelta) r.Pool.AddChan(Angle, AngleDelta) r.Pool.AddChan(Radius, RadiusDelta) return r } // prepare data func (r *RadiusSimulator) Initialize() { r.Pool.Initialize() r.Life = r.Field(Life).(Channel_f32) r.Position = r.Field(Position).(Channel_v2) r.poseStart = r.Field(PositionStart).(Channel_v2) r.Color = r.Field(Color).(Channel_v4) r.colorDelta = r.Field(ColorDelta).(Channel_v4) r.ParticleSize = r.Field(Size).(Channel_f32) r.sizeDelta = r.Field(SizeDelta).(Channel_f32) r.Rotation = r.Field(Rotation).(Channel_f32) r.rotDelta = r.Field(RotationDelta).(Channel_f32) r.angle = r.Field(Angle).(Channel_f32) r.angleDelta = r.Field(AngleDelta).(Channel_f32) r.radius = r.Field(Radius).(Channel_f32) r.radiusDelta = r.Field(RadiusDelta).(Channel_f32) // init controller r.RateController.Initialize(r.Duration, r.RadiusConfig.Rate) } func (r *RadiusSimulator) Simulate(dt float32) { if new := r.RateController.Rate(dt); new > 0 { r.newParticle(new) } n := int32(r.Live) r.Life.Sub(n, dt) r.angle.Integrate(n, r.angleDelta, dt) r.radius.Integrate(n, r.radiusDelta, dt) // 极坐标转换 for i := int32(0); i < n; i ++ { x := float32(math.Cos(r.angle[i])) * r.radius[i] y := float32(math.Sin(r.angle[i])) * r.radius[i] r.Position[i] = f32.Vec2{x, y} } r.Color.Integrate(n, r.colorDelta, dt) r.ParticleSize.Integrate(n, r.sizeDelta, dt) r.Rotation.Integrate(n, r.rotDelta, dt) // recycle dead particle r.GC(&r.Pool) } func (r *RadiusSimulator) newParticle(new int) { if (r.Live + new) > r.Cap { return } start := r.Live r.Live += new cfg := r.RadiusConfig for i := start; i < r.Live; i++ { r.Life[i] = cfg.Life.Random() invLife := 1/r.Life[i] r.Position[i] = f32.Vec2{cfg.X.Random(), cfg.Y.Random()} // Color var red, _g, b, a float32 = 0, 0, 0, 1 var redd, gd, bd, ad float32 if cfg.R.Used() { red, redd = cfg.R.RangeInit(invLife) } if cfg.G.Used() { _g, gd = cfg.G.RangeInit(invLife) } if cfg.B.Used() { b, bd = cfg.B.RangeInit(invLife) } if cfg.A.Used() { a, ad = cfg.A.RangeInit(invLife) } r.Color[i] = f32.Vec4{red, _g, b, a} r.colorDelta[i] = f32.Vec4{redd, gd, bd, ad} r.ParticleSize[i] = cfg.Size.Start.Random() if cfg.Size.Start != cfg.Size.End { r.sizeDelta[i] = (cfg.Size.End.Random() - r.ParticleSize[i]) * invLife } // rot r.Rotation[i] = cfg.Rot.Start.Random() if cfg.Rot.Start != cfg.Rot.End { r.rotDelta[i] = (cfg.Rot.End.Random() - r.Rotation[i]) * invLife } // start position r.poseStart[i] = r.Position[i] // radius r.radius[i] = cfg.Radius.Start.Random() if cfg.Radius.Start != cfg.Radius.End { r.radiusDelta[i] = (cfg.Radius.End.Random() - r.Rotation[i]) * invLife } // angle r.angle[i] = cfg.Angle.Random() r.angleDelta[i] = cfg.AngleDelta.Random() } } func (r *RadiusSimulator) Visualize(buf []gfx.PosTexColorVertex, tex gfx.Tex2D) { r.VisualController.Visualize(buf, tex, int(r.Live), r.Additive) } func (r *RadiusSimulator) Size() (live, cap int) { return r.Live, r.Cap }
effect/sim_radius.go
0.647687
0.436382
sim_radius.go
starcoder
package expr import ( "fmt" "math" "strconv" ) func intRawString(i int64) string { return strconv.FormatInt(i, 10) } func floatRawString(f float64) string { return fmt.Sprintf("%f", f) } func stringAdd(s1, s2 Value) Value { return Value{ kind: KindString, rawValue: s1.rawValue + s2.rawValue, } } func intAdd(v1, v2 Value) (Value, error) { return Int(v1.intValue + v2.intValue), nil } func intSub(v1, v2 Value) (Value, error) { return Int(v1.intValue - v2.intValue), nil } func intMul(v1, v2 Value) (Value, error) { return Int(v1.intValue * v2.intValue), nil } func intQuo(v1, v2 Value) (Value, error) { if v2.intValue == 0 { return Zero(), ErrDivideZero } return Int(v1.intValue / v2.intValue), nil } func intRem(v1, v2 Value) (Value, error) { if v2.intValue == 0 { return Zero(), ErrDivideZero } return Int(v1.intValue % v2.intValue), nil } func intPow(v1, v2 Value) (Value, error) { if v1.intValue == 0 { return Zero(), ErrPowOfZero } return Int(int64(math.Pow(float64(v1.intValue), float64(v2.intValue)))), nil } func floatAdd(v1, v2 Value) (Value, error) { return Float(v1.floatValue + v2.floatValue), nil } func floatSub(v1, v2 Value) (Value, error) { return Float(v1.floatValue - v2.floatValue), nil } func floatMul(v1, v2 Value) (Value, error) { return Float(v1.floatValue * v2.floatValue), nil } func floatQuo(v1, v2 Value) (Value, error) { return Float(v1.floatValue / v2.floatValue), nil } func floatRem(v1, v2 Value) (Value, error) { return Float(math.Remainder(v1.floatValue, v2.floatValue)), nil } func floatPow(v1, v2 Value) (Value, error) { if v1.floatValue == 0 { return Zero(), ErrPowOfZero } return Float(math.Pow(v1.floatValue, v2.floatValue)), nil } type binaryOpFunc func(Value, Value) (Value, error) func binaryOp(v1, v2 Value, iop, fop binaryOpFunc) (Value, error) { if v1.kind == KindString || v2.kind == KindString { return Zero(), ErrTypeMismatchForOp } if v1.kind == KindInvalid || v2.kind == KindInvalid { return Zero(), ErrUnsupportedType } if v1.kind == KindFloat { if v2.kind == KindInt { return fop(v1, Float(float64(v2.intValue))) } return fop(v1, v2) } else { if v2.kind == KindInt { return iop(v1, v2) } return fop(Float(float64(v1.intValue)), v2) } } type compareFunc func(Value, Value) Value func stringEq(v1, v2 Value) Value { return Bool(v1.rawValue == v2.rawValue) } func stringGt(v1, v2 Value) Value { return Bool(v1.rawValue > v2.rawValue) } func stringGe(v1, v2 Value) Value { return Bool(v1.rawValue >= v2.rawValue) } func intEq(v1, v2 Value) Value { return Bool(v1.intValue == v2.intValue) } func intGt(v1, v2 Value) Value { return Bool(v1.intValue > v2.intValue) } func intGe(v1, v2 Value) Value { return Bool(v1.intValue >= v2.intValue) } func floatEq(v1, v2 Value) Value { return Bool(v1.floatValue == v2.floatValue) } func floatGt(v1, v2 Value) Value { return Bool(v1.floatValue > v2.floatValue) } func floatGe(v1, v2 Value) Value { return Bool(v1.floatValue >= v2.floatValue) } func compare(v1, v2 Value, scmp, icmp, fcmp compareFunc) (Value, error) { switch v1.kind { case KindString: if v2.kind == KindString { return scmp(v1, v2), nil } return False(), ErrComparedTypesMismatch case KindInt: if v2.kind == KindInt { return icmp(v1, v2), nil } else if v2.kind == KindFloat { return fcmp(Float(float64(v1.intValue)), v2), nil } return False(), ErrComparedTypesMismatch case KindFloat: if v2.kind == KindInt { return fcmp(v1, Float(float64(v2.intValue))), nil } else if v2.kind == KindFloat { return fcmp(v1, v2), nil } return False(), ErrComparedTypesMismatch default: return False(), ErrUnsupportedType } }
vendor/github.com/mkideal/pkg/expr/internal.go
0.625209
0.422266
internal.go
starcoder
package Geo import ( "crypto/sha256" "fmt" "github.com/opensatelliteproject/SatHelperApp/ImageProcessor/Projector" "github.com/opensatelliteproject/SatHelperApp/XRIT" "math" "regexp" "strconv" ) type Converter struct { satelliteLongitude float64 // Satellite Longitude coff int // Column Offset loff int // Line Offset cfac float64 // Column Scaling Factor lfac float64 // Line Scaling Factor fixAspect bool // Fix Aspect Ratio if needed imageWidth int // Image Width aspectRatio float64 cropLeft int } // MakeGeoConverter Creates a new instance of GeoConverter // satelliteLongitude => Satellite longitude. // coff => Column Offset // loff => Line Offset // cfac => Column Scaling Factor // lfac => Line Scaling Factor // fixAspect => If the aspect ratio should be fixed for cutting image // imageWidth => Image Width in pixels func MakeGeoConverter(satelliteLongitude float64, coff, loff int, cfac, lfac float64, fixAspect bool, imageWidth int) Projector.ProjectionConverter { return &Converter{ satelliteLongitude: satelliteLongitude, coff: coff, loff: loff, cfac: cfac, lfac: lfac, fixAspect: fixAspect, imageWidth: imageWidth, aspectRatio: cfac / lfac, cropLeft: coff - int(math.Min(float64(imageWidth-coff), float64(coff))), } } // MakeGeoConverterFromXRIT Creates a new instance of GeoConverter from a XRIT File Header func MakeGeoConverterFromXRIT(xh *XRIT.Header) (Projector.ProjectionConverter, error) { x := regexp.MustCompile(`.*\((.*)\)`) regMatch := x.FindStringSubmatch(xh.ImageNavigationHeader.ProjectionName) if len(regMatch) < 2 { return nil, fmt.Errorf("cannot find projection lon at %s", xh.ImageNavigationHeader.ProjectionName) } lon, err := strconv.ParseFloat(regMatch[1], 64) if err != nil { return nil, err } if xh.ImageNavigationHeader == nil { return nil, fmt.Errorf("no image navigation header") } inh := xh.ImageNavigationHeader if xh.SegmentIdentificationHeader != nil && xh.SegmentIdentificationHeader.COMS1 { xh.ImageNavigationHeader.LineScalingFactor >>= 9 // Not sure why is needed xh.ImageNavigationHeader.LineScalingFactor -= 180000 } return MakeSimpleGeoConverter(lon, int(inh.ColumnOffset), int(inh.LineOffset), float64(inh.ColumnScalingFactor), float64(inh.LineScalingFactor)), nil } // MakeSimpleGeoConverter Creates a new instance of GeoConverter // Same as MakeGeoConverter but with fixAspect disabled and imageWidth = 0 // satelliteLongitude => Satellite longitude. // coff => Column Offset // loff => Line Offset // cfac => Column Scaling Factor // lfac => Line Scaling Factor func MakeSimpleGeoConverter(satelliteLongitude float64, coff, loff int, cfac, lfac float64) Projector.ProjectionConverter { return MakeGeoConverter(satelliteLongitude, coff, loff, cfac, lfac, false, 0) } // LatLon2XY Converts Latitude/Longitude to Pixel X/Y // lat => Latitude in Degrees // lon => Longitude in Degrees func (gc *Converter) LatLon2XY(lat, lon float64) (x, y int) { x, y = LonLat2XY(gc.satelliteLongitude, Deg2Rad(lon), Deg2Rad(lat), gc.coff, gc.cfac, gc.loff, gc.lfac) if gc.fixAspect { y = int(float64(y) * gc.aspectRatio) } return } // LatLon2XYf Converts Latitude/Longitude to Pixel X/Y (float64) // lat => Latitude in Degrees // lon => Longitude in Degrees func (gc *Converter) LatLon2XYf(lat, lon float64) (x, y float64) { x, y = LonLat2XYf(gc.satelliteLongitude, Deg2Rad(lon), Deg2Rad(lat), gc.coff, gc.cfac, gc.loff, gc.lfac) if gc.fixAspect { y *= gc.aspectRatio } return } // XY2LatLon Converts Pixel X/Y to Latitude/Longitude // lat => Latitude in Degrees // lon => Longitude in Degrees func (gc *Converter) XY2LatLon(x, y int) (lat, lon float64) { lat, lon = XY2LonLat(gc.satelliteLongitude, x, y, gc.coff, gc.cfac, gc.loff, gc.lfac) lat = Rad2Deg(lat) lon = Rad2Deg(lon) return } // region Getters // ColumnOffset returns the number of pixels that the image is offset from left func (gc *Converter) ColumnOffset() int { return gc.coff } // LineOffset returns the number of pixels that the image is offset from top func (gc *Converter) LineOffset() int { return gc.loff } // CropLeft returns the number of pixels that should be cropped func (gc *Converter) CropLeft() int { return gc.cropLeft } // MaxLatitude returns the Maximum Visible Latitude func (gc *Converter) MaxLatitude() float64 { return 79 } // MinLatitude returns Minimum Visible Latitude func (gc *Converter) MinLatitude() float64 { return -79 } // MaxLongitude returns Maximum visible Longitude func (gc *Converter) MaxLongitude() float64 { return gc.satelliteLongitude + 79 } // MinLongitude returns Minimum visible latitude func (gc *Converter) MinLongitude() float64 { return gc.satelliteLongitude - 79 } // LatitudeCoverage returns Coverage of the view in Latitude Degrees func (gc *Converter) LatitudeCoverage() float64 { return gc.MaxLatitude() - gc.MinLatitude() } // LongitudeCoverage returns Coverage of the view in Longitude Degrees func (gc *Converter) LongitudeCoverage() float64 { return gc.MaxLongitude() - gc.MinLongitude() } // TrimLongitude returns Longitude Trim parameter for removing artifacts on Reprojection (in degrees) func (gc *Converter) TrimLongitude() float64 { return 16 } // TrimLatitude returns Latitude Trim parameter for removing artifacts on Reprojection (in degrees) func (gc *Converter) TrimLatitude() float64 { return 16 } func (gc *Converter) Hash() string { s := fmt.Sprintf("%f%d%d%f%f%v%d%v", gc.satelliteLongitude, gc.coff, gc.loff, gc.lfac, gc.cfac, gc.fixAspect, gc.imageWidth, gc.cropLeft) h := sha256.New() _, _ = h.Write([]byte(s)) return fmt.Sprintf("%x", h.Sum(nil)) } // endregion
XRIT/Geo/GeoConverter.go
0.8156
0.446555
GeoConverter.go
starcoder
package main import ( "bufio" "fmt" "log" "os" "strconv" "strings" ) // Condition is the input of a Transition type Condition struct { state string symbol rune } func (c Condition) String() string { return "state=" + c.state + " symbol=" + strconv.QuoteRuneToASCII(c.symbol) } // Effect is the output of a Transition type Effect struct { state string symbol rune direction string } func (c Effect) String() string { return "state=" + c.state + " symbol=" + strconv.QuoteRuneToASCII(c.symbol) + " direction=" + c.direction } // The set of transition functions type Transitions map[Condition]Effect func addTransition(transitions Transitions, line string, alphabet []rune, states []string) Transitions { parts := strings.Split(line, " ") // the condition of the transition cond := Condition{state: parts[0], symbol: []rune(parts[1])[0]} if !stringInSlice(cond.state, states) { log.Fatalf("Unknown state: [%s]", cond.state) } if !runeInSlice(cond.symbol, alphabet) { log.Fatalf("Unknown symbol! Got [%s]", cond.symbol) } if _, ko := transitions[cond]; ko { log.Fatalf("Duplicate transition! Got [%s] condition already", cond) } // the effect of the transition effect := Effect{state: parts[3], symbol: []rune(parts[4])[0], direction: parts[5]} if !stringInSlice(effect.state, states) { log.Fatalf("Unknown state: [%s]", effect.state) } if !runeInSlice(effect.symbol, alphabet) { log.Fatalf("Unknown symbol! Got [%s]", effect.symbol) } transitions[cond] = effect return transitions } // Move moves the readHead in the direction specified in the effect func move(effect Effect, readHead int) int { switch effect.direction { case "<": return readHead - 1 case ">": return readHead + 1 default: log.Fatal("Bad direction") } return 0 } // PrintTape prints to the standard output the current state of the Turing Machine. Zero is the number of padding whitespaces to prepend the symbol '|'. func printTape(state string, readHead int, tape string, zero int) { rh := strings.Repeat(" ", zero) + "|" if readHead > 0 { rh = rh + strings.Repeat(" ", readHead-1) + "^" } else if readHead < 0 { rh = "^" + strings.Repeat(" ", -readHead-1) + rh tape = strings.Repeat(" ", -readHead) + tape } fmt.Printf("%s\n%s\n%s\n\n", state, tape, rh) } // RuneInSlice returns true if the rune a is in the slice list func runeInSlice(a rune, list []rune) bool { for _, b := range list { if b == a { return true } } return false } // StringInSlice returns true if the string a is in the slice list func stringInSlice(a string, list []string) bool { for _, b := range list { if b == a { return true } } return false } type TuringMachine struct { startState string acceptState string tape []rune transitions Transitions } func NewTuringMachine(fileName string) *TuringMachine { file, err := os.Open(fileName) if err != nil { log.Fatalf("Failed to open file", err) } defer file.Close() scanner := bufio.NewScanner(file) tm := &TuringMachine{transitions: Transitions{}} var alphabet []rune var states []string for i := 0; scanner.Scan(); i++ { line := scanner.Text() switch i { case 0: alphabet = []rune(line + "_") case 1: states = strings.Split(line, " ") case 2: tm.startState = line if !stringInSlice(tm.startState, states) { log.Fatalf("Unknown state: [%s]", tm.startState) } case 3: tm.acceptState = line if !stringInSlice(tm.acceptState, states) { log.Fatalf("Unknown state: [%s]", tm.acceptState) } case 4: tm.tape = []rune(line) default: tm.transitions = addTransition(tm.transitions, line, alphabet, states) } } if scanner.Err() != nil { log.Fatal(scanner.Err()) } return tm } func main() { turingMachine := NewTuringMachine(os.Args[1]) printTape(turingMachine.startState, 0, string(turingMachine.tape), 0) currentCondition, readHead, zero := Condition{state: turingMachine.startState, symbol: turingMachine.tape[0]}, 0, 0 // Loops until the current state is the accepting state for currentCondition.state != turingMachine.acceptState { effect := turingMachine.transitions[currentCondition] // Apply the step // 1. change current state currentCondition.state = effect.state // 2. move the readHead if readHead < 0 { turingMachine.tape = append([]rune{effect.symbol}, turingMachine.tape...) zero++ } else if readHead >= len(turingMachine.tape) { turingMachine.tape = append(turingMachine.tape, effect.symbol) } else { turingMachine.tape[readHead] = effect.symbol } readHead = move(effect, readHead) // 3. update the current symbol under the readHead if readHead < 0 || readHead >= len(turingMachine.tape) { currentCondition.symbol = '_' } else { currentCondition.symbol = turingMachine.tape[readHead] } printTape(currentCondition.state, readHead, string(turingMachine.tape), zero) } }
208-hard/turing-machina.go
0.657868
0.427815
turing-machina.go
starcoder
package main // Importing Necessary Packages import ( "fmt" "math" ) // Input Function To Get The Digits func getDigits(number int64) uint { var ans uint if number == 0 { return 1 } if number < 0 { number = -number } for number > 0 { ans++ number = number / 10 } return ans } func getHighAndLowDigits(num int64, digits uint) (int64, int64) { divisor := int64(math.Pow(10, float64(digits))) if num >= divisor { return num / divisor, num % divisor } else { return 0, num } } // Karatsuba Algorithm Function func karatsuba(x int64, y int64) int64 { var max_digits uint positive := true if x == 0 || y == 0 { return 0 } if (x > 0 && y < 0) || (x < 0 && y > 0) { positive = false } if x < 0 { x = -x } if y < 0 { y = -y } if x < 10 || y < 10 { return x * y } x_digits := getDigits(x) y_digits := getDigits(y) if x_digits >= y_digits { max_digits = x_digits / 2 } else { max_digits = y_digits / 2 } x_high, x_low := getHighAndLowDigits(x, max_digits) y_high, y_low := getHighAndLowDigits(y, max_digits) z0 := karatsuba(x_low, y_low) z1 := karatsuba((x_low + x_high), (y_low + y_high)) z2 := karatsuba(x_high, y_high) if positive { return (z2 * int64(math.Pow(10, float64(2 * max_digits)))) + (z1 - z2 - z0) * int64(math.Pow(10, float64(max_digits))) + z0 } else { return -((z2 * int64(math.Pow(10, float64(2 * max_digits)))) + (z1 - z2 - z0) * int64(math.Pow(10, float64(max_digits))) + z0) } } // Main Function & Taking User Inputs func main() { fmt.Println("Enter the first number: ") var first int64 fmt.Scanln(&first) fmt.Println("Enter the second number: ") var second int64 fmt.Scanln(&second) fmt.Println() fmt.Print("Result: ") fmt.Println(karatsuba(first, second)) } /** Enter the first number: 121547 Enter the second number: 1855324 Result: 225509066228 Enter the first number: -8859460 Enter the second number: 1154486 Result: -10228122537560 */
Algo_Ds_Notes-master/Algo_Ds_Notes-master/Karatsuba_Algorithm/Karatsuba_Algorithm.go
0.575111
0.632843
Karatsuba_Algorithm.go
starcoder
package parser import "fmt" type FunctionType struct { Signature []DataType Names []string ReturnType DataType } var functionTypes map[string]FunctionType // ParamStmt is the equivalent of [PARAM stmt.Name stmt.Type] type ParamStmt struct { *BasicStatement Name string Kind DataType } func (p *ParamStmt) Type() DataType { return PARAMETER } // FunctionBlock is the equivalent of [IFB stmt.Condition] stmt.Body [ELSE] stmt.Else [ENDIF], the stmt.Else may be nil type FunctionBlock struct { *BasicStatement Name string Signature FunctionType Return Statement Body []Statement } func (f *FunctionBlock) Type() DataType { return f.Return.Type() } func (f *FunctionBlock) Keywords() []string { return []string{"RETURN"} } func (f *FunctionBlock) EndSignature() []DataType { return []DataType{ANY | NULL} } func (f *FunctionBlock) End(_ string, args []Statement, statements []Statement) bool { f.Return = args[0] f.Body = statements f.Signature.ReturnType = f.Return.Type() functionTypes[f.Name] = f.Signature return true } var dataTypes = map[string]DataType{ "INT": INT, "FLOAT": FLOAT, "STRING": STRING, "ARRAY": ARRAY, } func SetupFunctions() { parsers["PARAM"] = StatementParser{ Parse: func(args []Statement, line int) (Statement, error) { name, ok := args[0].(*Data) if !ok { return nil, fmt.Errorf("line %d: parameter 1 to PARAM must be constant", line) } kind, ok := args[1].(*Data) if !ok { return nil, fmt.Errorf("line %d: parameter 2 to PARAM must be constant", line) } k, exists := dataTypes[kind.Data.(string)] if !exists { return nil, fmt.Errorf("line %d: parameter 2 to PARAM must be INT, FLOAT, STRING, or ARRAY", line) } return &ParamStmt{ BasicStatement: &BasicStatement{line: line}, Name: name.Data.(string), Kind: k, }, nil }, Signature: []DataType{IDENTIFIER, IDENTIFIER}, } blocks["FUNCTION"] = BlockParser{ Parse: func(args []Statement, line int) (Block, error) { sig := FunctionType{ Signature: make([]DataType, len(args)-1), Names: make([]string, len(args)-1), } for i, arg := range args[1:] { par, ok := arg.(*ParamStmt) if !ok { return nil, fmt.Errorf("line %d: parameters must be a PARAM", line) } sig.Signature[i] = par.Kind sig.Names[i] = par.Name } fn := &FunctionBlock{ BasicStatement: &BasicStatement{line: line}, Signature: sig, Name: args[0].(*Data).Data.(string), } functionTypes[fn.Name] = fn.Signature return fn, nil }, Signature: []DataType{IDENTIFIER, PARAMETER, VARIADIC}, } } // FunctionCallStmt is the equivalent of [stmt.Name stmt.Args...] type FunctionCallStmt struct { *BasicStatement Name string Args []Statement ReturnType DataType } func (f *FunctionCallStmt) Type() DataType { return f.ReturnType }
parser/functions.go
0.55254
0.486088
functions.go
starcoder
package dictionary // trieNode saves trie structure type trieNode struct { childMap map[rune]*trieNode isEnd bool } // Dictionary struct contains data and methods type Dictionary struct { root *trieNode wordCount int } // NewDictionary creates new instance of dictionary func NewDictionary() *Dictionary { return &Dictionary{ root: &trieNode{ childMap: map[rune]*trieNode{}, isEnd: false, }, wordCount: 0, } } // Size returns word count in dictionary func (pr *Dictionary) Size() int { return pr.wordCount } // Insert inserts new word in dictionary // returns false if this word already present func (pr *Dictionary) Insert(word string) bool { node := pr.root for _, ch := range word { if newNode, ok := node.childMap[ch]; ok { node = newNode } else { node.childMap[ch] = &trieNode{ childMap: map[rune]*trieNode{}, isEnd: false, } node = node.childMap[ch] } } if node.isEnd { return false } node.isEnd = true pr.wordCount++ return true } // InsertAll inserts all words in dictionary // returns number of words actually inserted func (pr *Dictionary) InsertAll(words []string) int { var res = 0 for _, word := range words { if pr.Insert(word) { res++ } } return res } // Retrieve retrieves all words in dictionary starting with prefix func (pr *Dictionary) Retrieve(prefix string) []string { node, depth := longestMatch(prefix, pr.root) if depth != len(prefix) { return []string{} } return allChild(prefix, node) } // Contains checks if given word is in dictionary func (pr *Dictionary) Contains(word string) bool { node, depth := longestMatch(word, pr.root) return depth == len(word) && node.isEnd } // Delete deletes given word from dictionary // returns false if word is'n in dictionary func (pr *Dictionary) Delete(word string) bool { node, depth := longestMatch(word, pr.root) if depth == len(word) && node.isEnd { node.isEnd = false return true } return false } func longestMatch(prefix string, root *trieNode) (*trieNode, int) { node := root var depth = 0 for _, ch := range prefix { if newNode, ok := node.childMap[ch]; ok { node = newNode depth++ } else { return node, depth } } return node, depth } func allChild(prefix string, node *trieNode) []string { var res []string if node.isEnd { res = append(res, prefix) } for ch, childNode := range node.childMap { newStr := prefix + string(ch) res = append(res, allChild(newStr, childNode)...) } return res }
dictionary/dictionary.go
0.700997
0.476823
dictionary.go
starcoder
package ion import ( "bytes" "io" "github.com/pkg/errors" ) // This file contains binary parsers for List, SExp, Struct, and Annotation. // parseBinaryList attempts to read and parse the entirety of the list whether // it be a List (high == binaryTypeList) or SExp (high == binaryTypeSExp). func parseBinaryList(ann []Symbol, high byte, lengthByte byte, r io.Reader) (Value, error) { if lengthByte == 0 && high == binaryTypeList { return List{annotations: ann, values: []Value{}}, nil } if lengthByte == 0 && high == binaryTypeSExp { return SExp{annotations: ann, values: []Value{}}, nil } numBytes, errLength := determineLength32(lengthByte, r) if errLength != nil { return nil, errors.WithMessage(errLength, "unable to parse length of list") } data := make([]byte, numBytes) if n, err := r.Read(data); err != nil || n != int(numBytes) { return nil, errors.Errorf("unable to read list - read %d bytes of %d with err: %v", n, numBytes, err) } var values []Value dataReader := bytes.NewReader(data) for dataReader.Len() > 0 { value, err := parseNextBinaryValue(nil, dataReader) if err != nil { return nil, errors.WithMessage(err, "unable to parse list") } values = append(values, value) } if high == binaryTypeList { return List{annotations: ann, values: values}, nil } return SExp{annotations: ann, values: values}, nil } // parseBinaryStruct reads all of the symbol / value pairs and puts them // into a Struct. func parseBinaryStruct(ann []Symbol, lengthByte byte, r io.Reader) (Value, error) { if lengthByte == 0 { return Struct{annotations: ann, fields: []StructField{}}, nil } var numBytes uint32 var errLength error // "When L is 1, the struct has at least one symbol/value pair, the length // field exists, and the field name integers are sorted in increasing order." if lengthByte == 1 { numBytes, errLength = readVarUInt32(r) } else { numBytes, errLength = determineLength32(lengthByte, r) } if errLength != nil { return nil, errors.WithMessage(errLength, "unable to parse length of struct") } data := make([]byte, numBytes) if n, err := r.Read(data); err != nil || n != int(numBytes) { return nil, errors.Errorf("unable to read struct - read %d bytes of %d with err: %v", n, numBytes, err) } // Not having any fields isn't the same as being null, so differentiate // between the two by ensuring that fields isn't nil even if it's empty. fields := []StructField{} dataReader := bytes.NewReader(data) for dataReader.Len() > 0 { symbol, errSymbol := readVarUInt32(dataReader) if errSymbol != nil { return nil, errors.WithMessage(errSymbol, "unable to read struct field symbol") } value, errValue := parseNextBinaryValue(nil, dataReader) if errValue != nil { return nil, errors.WithMessage(errValue, "unable to read struct field value") } // Ignore padding. if value.Type() == TypePadding { continue } fields = append(fields, StructField{ Symbol: Symbol{id: int32(symbol)}, Value: value, }) } return Struct{annotations: ann, fields: fields}, nil } // parseBinaryAnnotation reads the annotation and the value that it is // annotating. If the lengthByte is zero, then this is treated as the // first byte of a Binary Version Marker. func parseBinaryAnnotation(lengthByte byte, r io.Reader) (Value, error) { // 0xE as the high byte has two potential uses, one for annotations and one for the // start of the binary version marker. We are going to be optimistic and assume that // 0xE0 is for the BVM and all other values for the low nibble is for annotations. if lengthByte == 0 { return parseBinaryVersionMarker(r) } if lengthByte < 3 { return nil, errors.Errorf("length must be at least 3 for an annotation wrapper, found %d", lengthByte) } numBytes, errLength := determineLength32(lengthByte, r) if errLength != nil { return nil, errors.WithMessage(errLength, "unable to parse length of annotation") } data := make([]byte, numBytes) if n, err := r.Read(data); err != nil || n != int(numBytes) { return nil, errors.Errorf("unable to read annotation - read %d bytes of %d with err: %v", n, numBytes, err) } dataReader := bytes.NewReader(data) annLen, errAnnLen := readVarUInt16(dataReader) if errAnnLen != nil { return nil, errors.WithMessage(errAnnLen, "unable to determine annotation symbol length") } if annLen == 0 || uint32(annLen) >= numBytes { return nil, errors.Errorf("invalid lengths for annotation - field length is %d while annotation symbols length is %d", numBytes, annLen) } annData := make([]byte, annLen) // We've already verified lengths and are basically performing a copy to // a pre-allocated byte slice. There is no error to catch. _, _ = dataReader.Read(annData) annReader := bytes.NewReader(annData) var annotations []Symbol for annReader.Len() > 0 { symbol, errSymbol := readVarUInt32(annReader) if errSymbol != nil { return nil, errors.WithMessage(errSymbol, "unable to read annotation symbol") } annotations = append(annotations, Symbol{id: int32(symbol)}) } // Since an annotation is a container for a single value there isn't a need to // pre-read the contents so that we know when to stop. value, errValue := parseNextBinaryValue(annotations, dataReader) if errValue != nil { return nil, errors.WithMessage(errValue, "unable to read annotation value") } if dataReader.Len() > 0 { return nil, errors.Errorf("annotation declared %d bytes but there are %d bytes left", numBytes, dataReader.Len()) } if _, ok := value.(padding); ok { return nil, errors.New("annotation on padding is not legal") } return value, nil }
ion/parse_binary_container.go
0.726037
0.499268
parse_binary_container.go
starcoder
package utils import ( "errors" "fmt" "github.com/onsi/gomega" "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" "github.com/pivotal-cf/brokerapi" ) type BrokerAPIServiceMatcher struct { service brokerapi.Service } func EquivalentBrokerAPIService(service brokerapi.Service) types.GomegaMatcher { return &BrokerAPIServiceMatcher{ service: service, } } func (m *BrokerAPIServiceMatcher) Match(actual interface{}) (success bool, err error) { service2, ok := actual.(brokerapi.Service) if !ok { return false, errors.New("Not a brokerapi.Service object") } if service2.Name != m.service.Name { return false, fmt.Errorf("Service names do not match, actual: %s, expected: %s", service2.Name, m.service.Name) } if service2.Description != m.service.Description { return false, fmt.Errorf("Service descriptions do not match, actual: %s, expected: %s", service2.Description, m.service.Description) } if service2.Bindable != m.service.Bindable { return false, fmt.Errorf("Service field Bindable do not match, actual: %t, expected: %t", service2.Bindable, m.service.Bindable) } metadataMatcher := gomega.Equal(m.service.Metadata) successful, err := metadataMatcher.Match(service2.Metadata) if !successful { return false, fmt.Errorf("Service Metadata do not match %s", err.Error()) } tagsMatcher := gomega.ConsistOf(m.service.Tags) successful, err = tagsMatcher.Match(service2.Tags) if !successful { return false, fmt.Errorf("Services tags do not match, %s", err.Error()) } if service2.PlanUpdatable != m.service.PlanUpdatable { return false, fmt.Errorf("Service field Plan Updatable do not match, actual: %t, expected: %t", service2.PlanUpdatable, m.service.PlanUpdatable) } if service2.DashboardClient != m.service.DashboardClient { return false, fmt.Errorf("Service DashboardClient do not match, actual: %+v, expected: %+v", service2.Metadata, m.service.Metadata) } plans := []types.GomegaMatcher{} for _, plan := range m.service.Plans { p := EquivalentBrokerAPIPlan(plan) plans = append(plans, p) } plansMatcher := gomega.ConsistOf(plans) return plansMatcher.Match(service2.Plans) } func (m *BrokerAPIServiceMatcher) FailureMessage(actual interface{}) (message string) { return format.Message(actual, "to be ", m.service) } func (m *BrokerAPIServiceMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, fmt.Sprintf("to not be"), m.service) } type BrokerAPIPlanMatcher struct { plan brokerapi.ServicePlan } func EquivalentBrokerAPIPlan(plan brokerapi.ServicePlan) types.GomegaMatcher { return &BrokerAPIPlanMatcher{ plan: plan, } } func (m *BrokerAPIPlanMatcher) Match(actual interface{}) (success bool, err error) { plan2, ok := actual.(brokerapi.ServicePlan) if !ok { return false, errors.New("Not a brokerapi.Service object") } if plan2.Name != m.plan.Name { return false, fmt.Errorf("Plan names do not match, actual: %s, expected: %s", plan2.Name, m.plan.Name) } if plan2.Description != m.plan.Description { return false, fmt.Errorf("Plan descriptions do not match, actual: %s, expected: %s", plan2.Description, m.plan.Description) } freeMatcher := gomega.BeEquivalentTo(m.plan.Free) successful, err := freeMatcher.Match(plan2.Free) if !successful { return false, fmt.Errorf("Plan field Free do not match %s", err.Error()) } metadataMatcher := gomega.Equal(m.plan.Metadata) successful, err = metadataMatcher.Match(plan2.Metadata) if !successful { return false, fmt.Errorf("Plan Metadata do not match %s", err.Error()) } bindableMatcher := gomega.BeEquivalentTo(m.plan.Bindable) successful, err = bindableMatcher.Match(plan2.Bindable) if !successful { return false, fmt.Errorf("Plan field Bindable do not match %s", err.Error()) } schemaMatcher := gomega.Equal(m.plan.Schemas) successful, err = schemaMatcher.Match(plan2.Schemas) if !successful { return false, fmt.Errorf("Plan Schemas do not match, %s", err.Error()) } return true, nil } func (m *BrokerAPIPlanMatcher) FailureMessage(actual interface{}) (message string) { return format.Message(actual, "to be ", m.plan) } func (m *BrokerAPIPlanMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, fmt.Sprintf("to not be"), m.plan) }
pkg/utils/brokerapi_matchers.go
0.679923
0.41941
brokerapi_matchers.go
starcoder
package docs import ( "bytes" "encoding/json" "strings" "github.com/alecthomas/template" "github.com/swaggo/swag" ) var doc = `{ "schemes": {{ marshal .Schemes }}, "swagger": "2.0", "info": { "description": "{{.Description}}", "title": "{{.Title}}", "contact": { "name": "<NAME>", "url": "https://github.com/jezerdave", "email": "<EMAIL>" }, "license": {}, "version": "{{.Version}}" }, "host": "{{.Host}}", "basePath": "{{.BasePath}}", "paths": { "/countries": { "get": { "description": "get current covid data", "produces": [ "application/json" ], "tags": [ "Worldometer" ], "summary": "Get all countries data", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/worldometer.CountryStats" } } } } } }, "/countries/{country}": { "get": { "description": "find covid19 related data by country", "produces": [ "application/json" ], "tags": [ "Worldometer" ], "summary": "Find Country", "parameters": [ { "type": "string", "description": "country name / code (philippines/ph/608/phl)", "name": "country", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/worldometer.CountryStats" } }, "404": { "description": "Not Found", "schema": { "$ref": "#/definitions/rest.BasicError" } } } } }, "/doh/ph": { "get": { "description": "get doh philippines official data from https://ncovtracker.doh.gov.ph/", "produces": [ "application/json" ], "tags": [ "Unavailable" ], "summary": "GET DOH PHILIPPINES DATA", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/philippines.StatsAttributes" } } } } } }, "/doh/ph/hospital-pui": { "get": { "description": "get doh philippines official data (HOSPITAL PUIs) from https://ncovtracker.doh.gov.ph/", "produces": [ "application/json" ], "tags": [ "Unavailable" ], "summary": "GET DOH PHILIPPINES DATA (HOSPITAL PUIs)", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/philippines.HsPUIsAttributes" } } } } } }, "/histories": { "get": { "description": "get histories", "produces": [ "application/json" ], "tags": [ "World Health Organization (WHO)" ], "summary": "Get all countries historical data", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/who.HistoryData" } } } } } }, "/histories/{country}": { "get": { "description": "find covid19 related historical data by country", "produces": [ "application/json" ], "tags": [ "World Health Organization (WHO)" ], "summary": "Find Country Histories", "parameters": [ { "type": "string", "description": "country name / code (philippines/ph/608/phl)", "name": "country", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/who.HistoryData" } }, "404": { "description": "Not Found", "schema": { "$ref": "#/definitions/rest.BasicError" } } } } }, "/states": { "get": { "description": "get current US-States covid data", "produces": [ "application/json" ], "tags": [ "Worldometer" ], "summary": "Get all US-States data", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/worldometer.StatesStats" } } } } } }, "/states/{state}": { "get": { "description": "find covid19 related data by state", "produces": [ "application/json" ], "tags": [ "Worldometer" ], "summary": "Find State", "parameters": [ { "type": "string", "description": "State Name / State Abbreviation", "name": "state", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/worldometer.StatesStats" } }, "404": { "description": "Not Found", "schema": { "$ref": "#/definitions/rest.BasicError" } } } } } }, "definitions": { "jsons.Country": { "type": "object", "properties": { "cca2": { "type": "string" }, "cca3": { "type": "string" }, "ccn3": { "type": "string" }, "cioc": { "type": "string" }, "name": { "type": "object", "$ref": "#/definitions/jsons.CountryName" }, "tld": { "type": "array", "items": { "type": "string" } } } }, "jsons.CountryName": { "type": "object", "properties": { "common": { "type": "string" }, "official": { "type": "string" } } }, "philippines.HsPUIsAttributes": { "type": "object", "properties": { "ObjectId": { "type": "integer" }, "PUIs": { "type": "integer" }, "hf": { "type": "string" }, "latitude": { "type": "number" }, "longitude": { "type": "number" }, "region": { "type": "string" } } }, "philippines.StatsAttributes": { "type": "object", "properties": { "ObjectId": { "type": "integer" }, "PUIs": { "type": "object" }, "PUMs": { "type": "object" }, "confirmed": { "type": "integer" }, "day": { "type": "integer" }, "deaths": { "type": "integer" }, "recovered": { "type": "integer" }, "tests": { "type": "object" } } }, "rest.BasicError": { "type": "object", "properties": { "message": { "type": "string" } } }, "who.Data": { "type": "object", "properties": { "case_daily_increase": { "type": "number" }, "date": { "type": "number" }, "death_daily_increase": { "type": "number" }, "region": { "type": "string" }, "total_cases": { "type": "number" }, "total_deaths": { "type": "number" } } }, "who.HistoryData": { "type": "object", "properties": { "country": { "type": "string" }, "country_info": { "type": "object", "$ref": "#/definitions/jsons.Country" }, "data": { "type": "array", "items": { "$ref": "#/definitions/who.Data" } } } }, "worldometer.CountryStats": { "type": "object", "properties": { "active_cases": { "type": "integer" }, "cases_per_one_million": { "type": "number" }, "country": { "type": "string" }, "country_info": { "type": "object", "$ref": "#/definitions/jsons.Country" }, "deaths_per_one_million": { "type": "number" }, "new_cases": { "type": "integer" }, "new_deaths": { "type": "integer" }, "serious_critical": { "type": "integer" }, "tests_per_one_million": { "type": "number" }, "total_cases": { "type": "integer" }, "total_deaths": { "type": "integer" }, "total_recovered": { "type": "integer" }, "total_tests": { "type": "integer" } } }, "worldometer.StateInfo": { "type": "object", "properties": { "abbreviation": { "type": "string" }, "name": { "type": "string" } } }, "worldometer.StatesStats": { "type": "object", "properties": { "active_cases": { "type": "integer" }, "cases_per_one_million": { "type": "number" }, "deaths_per_one_million": { "type": "number" }, "new_cases": { "type": "integer" }, "new_deaths": { "type": "integer" }, "serious_critical": { "type": "integer" }, "state": { "type": "string" }, "state_info": { "type": "object", "$ref": "#/definitions/worldometer.StateInfo" }, "tests_per_one_million": { "type": "number" }, "total_cases": { "type": "integer" }, "total_deaths": { "type": "integer" }, "total_recovered": { "type": "integer" }, "total_tests": { "type": "integer" } } } } }` type swaggerInfo struct { Version string Host string BasePath string Schemes []string Title string Description string } // SwaggerInfo holds exported Swagger Info so clients can modify it var SwaggerInfo = swaggerInfo{ Version: "1.0", Host: "go-covid19.sideprojects.fun", BasePath: "/api/v1", Schemes: []string{"https"}, Title: "GO-COVID19 API", Description: "REST Api for covid-19 cases", } type s struct{} func (s *s) ReadDoc() string { sInfo := SwaggerInfo sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1) t, err := template.New("swagger_info").Funcs(template.FuncMap{ "marshal": func(v interface{}) string { a, _ := json.Marshal(v) return string(a) }, }).Parse(doc) if err != nil { return doc } var tpl bytes.Buffer if err := t.Execute(&tpl, sInfo); err != nil { return doc } return tpl.String() } func init() { swag.Register(swag.Name, &s{}) }
docs/docs.go
0.542621
0.431944
docs.go
starcoder
package model type ( // Type is the data structure for storing the parsed values of schema string Type map[string]Collection // key is database name // Collection is a data structure for storing fields of schema Collection map[string]Fields // key is collection name // Fields is a data structure for storing the type of field Fields map[string]*FieldType // key is field name // FieldType stores information about a particular column in table FieldType struct { FieldName string `json:"fieldName"` IsFieldTypeRequired bool `json:"isFieldTypeRequired"` IsList bool `json:"isList"` Kind string `json:"kind"` Args *FieldArgs `json:"args"` // Directive string NestedObject Fields `json:"nestedObject"` IsPrimary bool `json:"isPrimary"` // For directives IsCreatedAt bool `json:"isCreatedAt"` IsUpdatedAt bool `json:"isUpdatedAt"` IsLinked bool `json:"isLinked"` IsForeign bool `json:"isForeign"` IsDefault bool `json:"isDefault"` IsAutoIncrement bool PrimaryKeyInfo *TableProperties `json:"primaryKeyInfo"` IndexInfo []*TableProperties `json:"indexInfo"` LinkedTable *TableProperties `json:"linkedTable"` JointTable *TableProperties `json:"jointTable"` Default interface{} `json:"default"` TypeIDSize int `json:"size"` } // FieldArgs are properties of the column FieldArgs struct { // Precision is used to hold precision information for data types Float // It represent number the digits to be stored Precision int `json:"precision"` // Scale is used to hold scale information for data types Float,Time,DateTime // It represent number the digits to be stored after decimal Scale int `json:"scale"` } // TableProperties are properties of the table TableProperties struct { // IsIndex tells us if this is an indexed column IsIndex bool `json:"isIndex"` // IsUnique tells us if this is an unique indexed column IsUnique bool `json:"isUnique"` From string To string Table string Field string OnDelete string DBType string Group string Sort string Order int ConstraintName string } ) const ( // TypeDate is variable used for Variable of type Date TypeDate string = "Date" // TypeTime is variable used for Variable of type Time TypeTime string = "Time" // TypeUUID is variable used for Variable of type UUID TypeUUID string = "UUID" // TypeInteger is variable used for Variable of type Integer TypeInteger string = "Integer" // TypeSmallInteger is variable used for Variable of type small int TypeSmallInteger string = "SmallInteger" // TypeBigInteger is variable used for Variable of type big int TypeBigInteger string = "BigInteger" // TypeChar is variable used for Variable of type characters with fixed size TypeChar string = "Char" // TypeVarChar is variable used for Variable of type characters with variable size TypeVarChar string = "Varchar" // TypeString is variable used for Variable of type String TypeString string = "String" // TypeFloat is a data type used for storing fractional values without specifying the precision, // the precision is set by the database TypeFloat string = "Float" // TypeDecimal is a data type used for storing fractional values in which the precision can be specified by the user TypeDecimal string = "Decimal" // TypeBoolean is variable used for Variable of type Boolean TypeBoolean string = "Boolean" // TypeDateTimeWithZone is variable used for Variable of type DateTime TypeDateTimeWithZone string = "DateTimeWithZone" // TypeDateTime is variable used for Variable of type DateTime TypeDateTime string = "DateTime" // TypeID is variable used for Variable of type ID TypeID string = "ID" // TypeJSON is variable used for Variable of type Jsonb TypeJSON string = "JSON" // DefaultCharacterSize is variable used for specifying size of sql type ID DefaultCharacterSize int = 100 // TypeObject is a string with value object TypeObject string = "Object" // TypeEnum is a variable type enum TypeEnum string = "Enum" // DirectiveUnique is used in schema module to add unique index DirectiveUnique string = "unique" // DirectiveIndex is used in schema module to add index DirectiveIndex string = "index" // DirectiveForeign is used in schema module to add foreign key DirectiveForeign string = "foreign" // DirectivePrimary is used in schema module to add primary key DirectivePrimary string = "primary" // DirectiveAutoIncrement is used in schema module to add primary key DirectiveAutoIncrement string = "autoIncrement" // DirectiveCreatedAt is used in schema module to specify the created location DirectiveCreatedAt string = "createdAt" // DirectiveUpdatedAt is used in schema module to add Updated location DirectiveUpdatedAt string = "updatedAt" // DirectiveLink is used in schema module to add link DirectiveLink string = "link" // DirectiveDefault is used to add default key DirectiveDefault string = "default" // DirectiveArgs is used in schema module to specify the created location DirectiveArgs string = "args" // DirectiveStringSize denotes the maximum allowable character for field type Char, Varchar, ID DirectiveStringSize string = "size" // DefaultIndexSort specifies default order of sorting DefaultIndexSort string = "asc" // DefaultIndexOrder specifies default order of order DefaultIndexOrder int = 1 // DefaultScale specifies the default scale to be used for sql column types float,date,datetime if not provided DefaultScale int = 10 // DefaultPrecision specifies the default precision to be used for sql column types float if not provided DefaultPrecision int = 38 // DefaultDateTimePrecision specifies the default precision to be used for sql column types datetime & time DefaultDateTimePrecision int = 6 ) // InspectorFieldType is the type for storing sql inspection information type InspectorFieldType struct { // TableSchema is the schema name for postgres & sqlserver. // it is the database name for mysql TableSchema string `db:"TABLE_SCHEMA"` TableName string `db:"TABLE_NAME"` ColumnName string `db:"COLUMN_NAME"` // FieldType is the data type of column FieldType string `db:"DATA_TYPE"` // FieldNull specifies whether the given column can be null or not // It can be either (NO) or (YES) FieldNull string `db:"IS_NULLABLE"` OrdinalPosition string `db:"ORDINAL_POSITION"` // FieldDefault specifies the default value of columns FieldDefault string `db:"DEFAULT"` // AutoIncrement specifies whether the column has auto increment constraint // It can be either (true) or (false) AutoIncrement string `db:"AUTO_INCREMENT"` VarcharSize int `db:"CHARACTER_MAXIMUM_LENGTH"` NumericScale int `db:"NUMERIC_SCALE"` NumericPrecision int `db:"NUMERIC_PRECISION"` DateTimePrecision int `db:"DATETIME_PRECISION"` ConstraintName string `db:"CONSTRAINT_NAME"` DeleteRule string `db:"DELETE_RULE"` RefTableSchema string `db:"REFERENCED_TABLE_SCHEMA"` RefTableName string `db:"REFERENCED_TABLE_NAME"` RefColumnName string `db:"REFERENCED_COLUMN_NAME"` } // IndexType is the type use to indexkey information of sql inspection type IndexType struct { TableSchema string `db:"TABLE_SCHEMA"` TableName string `db:"TABLE_NAME"` ColumnName string `db:"COLUMN_NAME"` IndexName string `db:"INDEX_NAME"` Order int `db:"SEQ_IN_INDEX"` // Sort can be either (asc) or (desc) Sort string `db:"SORT"` // IsUnique specifies whether the column has a unique index IsUnique bool `db:"IS_UNIQUE"` // IsPrimary specifies whether the column has a index IsPrimary bool `db:"IS_PRIMARY"` }
gateway/model/schema_type.go
0.715126
0.450178
schema_type.go
starcoder
package types import ( "bytes" "fmt" "github.com/davecgh/go-spew/spew" "math/big" "reflect" "testing" "github.com/DxChainNetwork/godx/common" ) var checkEqualityHandler map[reflect.Type]func(*testing.T, string, string, reflect.Value, reflect.Value) var dumper = spew.ConfigState{DisableMethods: true, Indent: " "} func init() { checkEqualityHandler = make(map[reflect.Type]func(*testing.T, string, string, reflect.Value, reflect.Value)) checkEqualityHandler[reflect.TypeOf(new(big.Int))] = checkBigIntEqual checkEqualityHandler[reflect.TypeOf([]byte{})] = checkByteSliceEqual checkEqualityHandler[reflect.TypeOf(common.Hash{})] = checkHashEqual checkEqualityHandler[reflect.TypeOf(common.Address{})] = checkAddressEqual checkEqualityHandler[reflect.TypeOf(txdata{})] = checkTxdataEqual checkEqualityHandler[reflect.TypeOf(Transaction{})] = checkTransactionEqual checkEqualityHandler[reflect.TypeOf(Log{})] = checkLogEqual checkEqualityHandler[reflect.TypeOf(Receipt{})] = checkReceiptEqual checkEqualityHandler[reflect.TypeOf(Header{})] = checkHeaderEqual checkEqualityHandler[reflect.TypeOf(Body{})] = checkBodyEqual checkEqualityHandler[reflect.TypeOf(Block{})] = checkBlockEqual } func CheckError(t *testing.T, dataName string, got error, wants ...error) { if (len(wants) == 0 || wants[0] == nil) && got != nil { t.Errorf("%s Got unexpected error.\nGot %s\nWant nil", dataName, got.Error()) } if (len(wants) != 0 && wants[0] != nil) && got == nil { t.Errorf("%s does not get expected error.\nGot nil\nWant %s", dataName, wants[0].Error()) } if len(wants) != 0 && wants[0] != nil && got != nil { match := false for _, want := range wants { if want == nil { continue } if want.Error() == got.Error() { match = true } } if !match { t.Errorf("%s does not get expected error.\nGot %s\nWant %s", dataName, got.Error(), wants[0].Error()) } } } func CheckEquality(t *testing.T, inputName, fieldName string, got, want interface{}) { va := reflect.ValueOf(got) vb := reflect.ValueOf(want) checkEquality(t, inputName, fieldName, va, vb) } // checkEquality Check equality for two variables a and b. If not equal, print the error message // testName: name of the test e.g TestMyFunction // inputName: name of the input data e.g OK // fieldName: name of the field e.g TxHash // This function depend on some predefined equal function. // Notice when comparing two structures, only the first field difference will be printed as error. func checkEquality(t *testing.T, inputName, fieldName string, got, want reflect.Value) { if got.Type() != want.Type() { t.Fatalf("%s.%s have uncomparable types\n%v / %v", inputName, fieldName, got.Type(), want.Type()) } if !got.CanInterface() || !want.CanInterface() { // unexported fields are not compared return } if got.Kind() == reflect.Ptr && got.Type() != reflect.TypeOf(new(big.Int)) { // Deal with ptr if got.IsNil() { got = reflect.New(got.Type()) } if want.IsNil() { want = reflect.New(want.Type()) } // If the pointer is the same, underlying struct must be equal. if got.Pointer() == want.Pointer() { return } checkEquality(t, inputName, fieldName, got.Elem(), want.Elem()) } else if got.Kind() == reflect.Slice && got.Type() != reflect.TypeOf([]byte{}) { // deal with slice if got.Len() != want.Len() { t.Fatalf("%s.%s have unexpected length\nGot %d\nWant %d", inputName, fieldName, got.Len(), want.Len()) } for i := 0; i != got.Len(); i++ { checkEquality(t, inputName, fieldName+fmt.Sprintf("[%d]", i), got.Index(i), want.Index(i)) } } else if fn, ok := checkEqualityHandler[got.Type()]; ok { // type comparison method is defined fn(t, inputName, fieldName, got, want) } else if got.Kind() == reflect.Struct { // By default struct check all public fields checkStructFullEqual(t, inputName, fieldName, got, want) } else { // Other types use reflect.DeepEqual to check equality. if !reflect.DeepEqual(got.Interface(), want.Interface()) { t.Fatalf("%s.%s unexpected value\nGot %sWant %s", inputName, fieldName, dumper.Sdump(got.Interface()), dumper.Sdump(want.Interface())) } } } func checkBigIntEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { got.Interface() a := got.Interface().(*big.Int) b := want.Interface().(*big.Int) if a == nil { a = big.NewInt(0) } if b == nil { b = big.NewInt(0) } if a.Cmp(b) != 0 { t.Fatalf("%s.%s unexpected value\nGot %v\nWant %v", inputName, fieldName, a, b) } } // checkByteSliceEqual check whether two byte slices are equal. // If not equal, raise error. func checkByteSliceEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { a := got.Interface().([]byte) b := want.Interface().([]byte) if a == nil { a = []byte{} } if b == nil { b = []byte{} } if !bytes.Equal(a, b) { t.Fatalf("%s.%s unexpected value\nGot %x\nWant %x", inputName, fieldName, a, b) } } func checkHashEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { a := got.Interface().(common.Hash) b := want.Interface().(common.Hash) if a != b { t.Fatalf("%s.%s unexpected value\nGot %x\nWant %x", inputName, fieldName, a, b) } } func checkAddressEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { a := got.Interface().(common.Address) b := want.Interface().(common.Address) if a != b { t.Fatalf("%s.%s unexpected value\nGot %x\nWant %x", inputName, fieldName, a, b) } } // checkStructFullEqual check got == want in all fields func checkStructFullEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { checkStructEqual(t, inputName, fieldName, got, want, nil) } // checkStructEqual will check whether got equals want without skipFields func checkStructEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value, skipField map[string]struct{}) { for i := 0; i != got.NumField(); i++ { field := got.Type().Field(i).Name if _, ok := skipField[field]; ok || got.Field(i).CanInterface() { // The field is skipped continue } fa := got.Field(i) fb := want.Field(i) checkEquality(t, inputName, fieldName+"."+field, fa, fb) } } // checkTxdataEqual recursively call checkEquality to check equality for // AccountNonce, Price, GasLimit, Recipient, Amount, Payload, V, R, S field // Do not check the Hash value func checkTxdataEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { skip := map[string]struct{}{"Hash": {}} checkStructEqual(t, inputName, fieldName, got, want, skip) } // Transaction just check for data func checkTransactionEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { dataA := got.Interface().(Transaction) dataB := want.Interface().(Transaction) checkTxdataEqual(t, inputName, fieldName+".data", reflect.ValueOf(dataA.data), reflect.ValueOf(dataB.data)) } // For receipt, check all fields equals func checkReceiptEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { checkStructFullEqual(t, inputName, fieldName, got, want) } // For log, compare all fields func checkLogEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { checkStructFullEqual(t, inputName, fieldName, got, want) } func checkHeaderEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { checkStructFullEqual(t, inputName, fieldName, got, want) } func checkBodyEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { checkStructFullEqual(t, inputName, fieldName, got, want) } func checkBlockEqual(t *testing.T, inputName, fieldName string, got, want reflect.Value) { ba := got.Interface().(Block) bb := want.Interface().(Block) checkEquality(t, inputName, fieldName+".header", reflect.ValueOf(ba.header), reflect.ValueOf(bb.header)) checkEquality(t, inputName, fieldName+".uncles", reflect.ValueOf(ba.uncles), reflect.ValueOf(bb.uncles)) checkEquality(t, inputName, fieldName+".transactions", reflect.ValueOf(ba.transactions), reflect.ValueOf(bb.transactions)) checkEquality(t, inputName, fieldName+".td", reflect.ValueOf(ba.td), reflect.ValueOf(bb.td)) }
core/types/comparator.go
0.550124
0.48688
comparator.go
starcoder
package sorts // BubbleSort sorts array with Bubble Sort method func BubbleSort(array []int) []int { for i := 1; i < len(array); i++ { for j := 0; j < len(array)-i; j++ { if array[j] > array[j+1] { array[j], array[j+1] = array[j+1], array[j] } } } return array } // InsertSort sorts array with Insert Sort method func InsertSort(array []int) []int { for i := 0; i < len(array); i++ { for j := i; j > 0 && array[j-1] > array[j]; j-- { array[j-1], array[j] = array[j], array[j-1] } } return array } // ShellSort sorts array with Shell's Sort Method func ShellSort(array []int) []int { for gap := len(array) / 2; gap > 0; gap /= 2 { for i := gap; i < len(array); i++ { if array[i] < array[i-gap] { for j := i - gap; j >= 0 && array[j] > array[j+gap]; j -= gap { array[j], array[j+gap] = array[j+gap], array[j] } } } } return array } // SelectSort sorts array with Select Sort Method func SelectSort(array []int) []int { var min, max int for i := 0; i < len(array)/2; i++ { min = i max = len(array) - i - 1 for j := i; j < len(array)-i; j++ { if array[j] > array[max] { max = j continue } if array[j] < array[min] { min = j } } // Minimum array[i], array[min] = array[min], array[i] // Maximum if max == i { max = min } array[len(array)-i-1], array[max] = array[max], array[len(array)-i-1] } return array } // heapAdjust adjust array // The array must conform to a heap except array[pos], // adjust array[pos] to make sure array[i] >= array[2i+1] && array[i] >= array[2i+2]. func heapAdjust(array []int, pos int, length int) { var temp = array[pos] for i := pos*2 + 1; i < length; i = i*2 + 1 { if i+1 < length && array[i] < array[i+1] { i++ } if array[i] > temp { array[pos] = array[i] pos = i } else { break } array[i] = temp } } // HeapSort sorts array with Heap Sort Method func HeapSort(array []int) []int { // Build heap for i := len(array)/2 - 1; i >= 0; i-- { heapAdjust(array, i, len(array)) } // Sort for j := len(array) - 1; j > 0; j-- { array[j], array[0] = array[0], array[j] heapAdjust(array, 0, j) } return array } // QuickSort sorts array with Quick Sort Method func QuickSort(array []int, left int, right int) []int { if left < right { var pivot = array[left] var low = left var high = right for low < high { for low < high && array[high] >= pivot { high-- } array[low] = array[high] for low < high && array[low] <= pivot { low++ } array[high] = array[low] } array[low] = pivot QuickSort(array, left, low-1) QuickSort(array, low+1, right) } return array } // merge array func merge(array, aux []int, low, mid, high int) { for i := low; i <= high; i++ { aux[i] = array[i] } j := mid + 1 for k := low; k <= high; k++ { if low > mid { array[k] = aux[j] j++ } else if j > high { array[k] = aux[low] low++ } else if aux[low] > aux[j] { array[k] = aux[j] j++ } else { array[k] = aux[low] low++ } } } // mergeSortUTD merge array from up to down func mergeSortUTD(array, aux []int, low, high int) { if low >= high { return } mid := (low + high) / 2 mergeSortUTD(array, aux, low, mid) mergeSortUTD(array, aux, mid+1, high) merge(array, aux, low, mid, high) } // MergeSort sorts array with Merge Sort Method func MergeSort(array []int) []int { aux := make([]int, len(array)) mergeSortUTD(array, aux, 0, len(array)-1) return array }
golang/src/sorts/sorts.go
0.54819
0.666212
sorts.go
starcoder
package fingerprint // Get predefined DNS service providers // taken from https://github.com/indianajson/can-i-take-over-dns#dns-providers func Get() []DNS { return []DNS{ { Provider: "000Domains", Status: []int{1, 3}, Pattern: `^(fw)?ns[\d]\.000domains\.com$`, }, { Provider: "AWS Route 53", Status: []int{0}, Pattern: `^ns\-([\w]{4}\.awsdns\-[\w]{2}\.(co\.uk|org)|[\w]{3}\.awsdns\-[\w]{2}\.(com|net))$`, }, { Provider: "Microsoft Azure", Status: []int{1}, Pattern: `^ns(4\-[\w]{2}\.azure\-dns\.info|1\-[\w]{2}\.azure\-dns\.com|2\-[\w]{2}\.azure\-dns\.net|3\-[\w]{2}\.azure\-dns\.org)$`, }, { Provider: "Bizland", Status: []int{1}, Pattern: `^(clickme2?\.click2site\.com|ns[12]\.bizland\.com)$`, }, { Provider: "Cloudflare", Status: []int{2}, Pattern: `^[\w]+\.ns\.cloudflare\.com$`, }, { Provider: "DigitalOcean", Status: []int{1}, Pattern: `^ns[1-3]\.digitalocean\.com$`, }, { Provider: "DNSMadeEasy", Status: []int{1}, Pattern: `^ns[\w]{,2}\.dnsmade{2}asy\.com$`, }, { Provider: "DNSimple", Status: []int{1}, Pattern: `^ns[1-4]\.dnsimple\.com$`, }, { Provider: "Domain.com", Status: []int{1, 3}, Pattern: `^ns[1-2]\.domain\.com$`, }, { Provider: "DomainPeople", Status: []int{0}, Pattern: `^ns[1-2]\.domainpeople\.com$`, }, { Provider: "Dotster", Status: []int{1, 3}, Pattern: `^ns[12]\.(nameresolve|dotster)\.com$`, }, { Provider: "Dotster", Status: []int{1, 3}, Pattern: `^ns[12]\.(nameresolve|dotster)\.com$`, }, { Provider: "EasyDNS", Status: []int{1}, Pattern: `^dns(?:4\.easydns\.info|1\.easydns\.com|2\.easydns\.net|3\.easydns\.org)$`, }, { Provider: "Gandi.net", Status: []int{0}, Pattern: `^[\w]+\.dns\.gandi\.net$`, }, { Provider: "Google Cloud", Status: []int{1}, Pattern: `^ns\-cloud\-[\w]+\.go{2}gledomains\.com$`, }, { Provider: "Hover", Status: []int{0}, Pattern: `^ns[1-2]\.hover\.com$`, }, { Provider: "Hurricane Electric", Status: []int{1}, Pattern: `^ns[1-5]\.he\.net$`, }, { Provider: "Linode", Status: []int{1}, Pattern: `^ns[1-2]\.linode\.com$`, }, { Provider: "MediaTemple", Status: []int{0}, Pattern: `^ns[1-2]\.mediatemple\.net$`, }, { Provider: "MyDomain", Status: []int{1, 3}, Pattern: `^ns[1-2]\.mydomain\.com$`, }, { Provider: "Name.com", Status: []int{1, 3}, Pattern: `^ns[1-4][\w]+?\.name\.com$`, }, { Provider: "Network Solutions", Status: []int{0}, Pattern: `^ns[\w]+?\.worldnic\.com$`, }, { Provider: "NS1", Status: []int{1}, Pattern: `^dns[1-4]\.p[\d]{,2}\.nsone\.net$`, }, { Provider: "TierraNet", Status: []int{1}, Pattern: `^ns[1-2]\.domaindiscover\.com$`, }, { Provider: "Reg.ru", Status: []int{1, 3}, Pattern: `^ns[1-2]\.reg\.ru$`, }, { Provider: "UltraDNS", Status: []int{0}, Pattern: `^[psu]dns[\d]{,3}\.ultradns\.com$`, }, { Provider: "Yahoo Small Business", Status: []int{1, 3}, Pattern: `^yns[1-2]\.yahoo\.com$`, }, } }
pkg/fingerprint/fingerprint.go
0.521471
0.458167
fingerprint.go
starcoder
package carray import ( "reflect" ) type CArray struct { // count is the number of real data in ring buffer count int // cap is the cap of buf, and must be the power of two. cap int // buf is a ring-buffer structure with a fixed size. buf []interface{} // _type is the type of the buf's element which is determined when CArray is initialized. _type reflect.Type // front is the position of header element in buf. front int // tail is the position of tail element in buf. tail int // remove is the function whether to remove the element with data, return true if need removed. remove func (elem *(interface{}), data interface{}, arr *CArray) bool } func MakeCArray(cap int, remove func (elem *(interface{}), data interface{}, arr *CArray) bool) *CArray { if cap <= 0 { panic(plainError("MakeCArray: size must be a positive number")) } cap = LowPowerOfTwo(cap) return &CArray{ count: 0, cap: cap, buf: make([]interface{}, cap, cap), remove: remove, } } func (arr *CArray) Empty() bool { return arr.count == 0 } func (arr *CArray) Full() bool { return arr.count == arr.cap } func (arr *CArray) Front() interface{} { if arr.Empty() { return nil } return arr.buf[arr.front] } func (arr *CArray) Tail() interface{} { if arr.Empty() { return nil } return arr.buf[arr.tail] } func (arr *CArray) PushHeader(elem interface{}) bool { if !arr.checkElem(elem) { panic(plainError("CArray.PushHeader: type of elem is different from the first")) } if arr.Full() { return false } if !arr.Empty() { arr.front = arr.realIndex(arr.front - 1) } arr.buf[arr.front] = elem arr.count++ return true } func (arr *CArray) PushTail(elem interface{}) bool { if !arr.checkElem(elem) { panic(plainError("CArray.PushHeader: type of elem is different from the first")) } if arr.Full() { return false } if !arr.Empty() { arr.tail = arr.realIndex(arr.tail + 1) } arr.buf[arr.tail] = elem arr.count++ return true } func (arr *CArray) PopFront() interface{} { if arr.Empty() { return nil } defer func () { if !(arr.count == 1) { arr.front = arr.realIndex(arr.front + 1) } arr.count-- }() return arr.buf[arr.front] } func (arr *CArray) PopTail() interface{} { if arr.Empty() { return nil } defer func () { if !(arr.count == 1) { arr.tail = arr.realIndex(arr.tail - 1) } arr.count-- }() return arr.buf[arr.tail] } func (arr *CArray) Remove(data interface{}, front bool) (interface{}, bool) { if arr.Empty() { return nil, false } elem := arr.buf[arr.front] if !front { elem = arr.buf[arr.tail] } if arr.remove(&elem, data, arr) { if front { arr.PopFront() } else { arr.PopTail() } return elem, true } return nil, false } func (arr *CArray) realIndex(index int) int { return index & (arr.cap - 1) } func (arr *CArray) checkElem(elem interface{}) bool { if arr._type != nil && arr._type.Kind() != reflect.TypeOf(elem).Kind() { return false } if arr._type == nil { arr._type = reflect.TypeOf(elem) } return true }
carray.go
0.540196
0.422803
carray.go
starcoder
package ast type Assignment struct { baseNode Expression Node Value Node } func CreateAssignment(r *Range, expression Node, value Node) *Assignment { return &Assignment{ baseNode: baseNode{ nodeType: NodeTypeAssignment, nodeRange: r, }, Expression: expression, Value: value, } } type Increment struct { baseNode Expression Node } func CreateIncrement(r *Range, expression Node) *Increment { return &Increment{ baseNode: baseNode{ nodeType: NodeTypeIncrement, nodeRange: r, }, Expression: expression, } } type Decrement struct { baseNode Expression Node } func CreateDecrement(r *Range, expression Node) *Decrement { return &Decrement{ baseNode: baseNode{ nodeType: NodeTypeDecrement, nodeRange: r, }, Expression: expression, } } type LooseAssignment struct { baseNode Expression Node Value Node } func CreateLooseAssignment(r *Range, expression Node, value Node) *LooseAssignment { return &LooseAssignment{ baseNode: baseNode{ nodeType: NodeTypeLooseAssignment, nodeRange: r, }, Expression: expression, Value: value, } } type AdditionAssignment struct { baseNode Expression Node Value Node } func CreateAdditionAssignment(r *Range, expression Node, value Node) *AdditionAssignment { return &AdditionAssignment{ baseNode: baseNode{ nodeType: NodeTypeAdditionAssignment, nodeRange: r, }, Expression: expression, Value: value, } } type SubtractionAssignment struct { baseNode Expression Node Value Node } func CreateSubtractionAssignment(r *Range, expression Node, value Node) *SubtractionAssignment { return &SubtractionAssignment{ baseNode: baseNode{ nodeType: NodeTypeSubtractionAssignment, nodeRange: r, }, Expression: expression, Value: value, } } type MultiplicationAssignment struct { baseNode Expression Node Value Node } func CreateMultiplicationAssignment(r *Range, expression Node, value Node) *MultiplicationAssignment { return &MultiplicationAssignment{ baseNode: baseNode{ nodeType: NodeTypeMultiplicationAssignment, nodeRange: r, }, Expression: expression, Value: value, } } type DivisionAssignment struct { baseNode Expression Node Value Node } func CreateDivisionAssignment(r *Range, expression Node, value Node) *DivisionAssignment { return &DivisionAssignment{ baseNode: baseNode{ nodeType: NodeTypeDivisionAssignment, nodeRange: r, }, Expression: expression, Value: value, } } type ModuloAssignment struct { baseNode Expression Node Value Node } func CreateModuloAssignment(r *Range, expression Node, value Node) *ModuloAssignment { return &ModuloAssignment{ baseNode: baseNode{ nodeType: NodeTypeModuloAssignment, nodeRange: r, }, Expression: expression, Value: value, } }
ast/assignments.go
0.629775
0.636452
assignments.go
starcoder
package forminput import ( "math/big" ) // Enumerates data types that are supported as subscription input values. type InputDataType string type inputDataTypeValuesType struct { None InputDataType String InputDataType Number InputDataType Boolean InputDataType Guid InputDataType Uri InputDataType } var InputDataTypeValues = inputDataTypeValuesType{ // No data type is specified. None: "none", // Represents a textual value. String: "string", // Represents a numeric value. Number: "number", // Represents a value of true or false. Boolean: "boolean", // Represents a Guid. Guid: "guid", // Represents a URI. Uri: "uri", } // Describes an input for subscriptions. type InputDescriptor struct { // The ids of all inputs that the value of this input is dependent on. DependencyInputIds *[]string `json:"dependencyInputIds,omitempty"` // Description of what this input is used for Description *string `json:"description,omitempty"` // The group localized name to which this input belongs and can be shown as a header for the container that will include all the inputs in the group. GroupName *string `json:"groupName,omitempty"` // If true, the value information for this input is dynamic and should be fetched when the value of dependency inputs change. HasDynamicValueInformation *bool `json:"hasDynamicValueInformation,omitempty"` // Identifier for the subscription input Id *string `json:"id,omitempty"` // Mode in which the value of this input should be entered InputMode *InputMode `json:"inputMode,omitempty"` // Gets whether this input is confidential, such as for a password or application key IsConfidential *bool `json:"isConfidential,omitempty"` // Localized name which can be shown as a label for the subscription input Name *string `json:"name,omitempty"` // Custom properties for the input which can be used by the service provider Properties *map[string]interface{} `json:"properties,omitempty"` // Underlying data type for the input value. When this value is specified, InputMode, Validation and Values are optional. Type *string `json:"type,omitempty"` // Gets whether this input is included in the default generated action description. UseInDefaultDescription *bool `json:"useInDefaultDescription,omitempty"` // Information to use to validate this input's value Validation *InputValidation `json:"validation,omitempty"` // A hint for input value. It can be used in the UI as the input placeholder. ValueHint *string `json:"valueHint,omitempty"` // Information about possible values for this input Values *InputValues `json:"values,omitempty"` } // Defines a filter for subscription inputs. The filter matches a set of inputs if any (one or more) of the groups evaluates to true. type InputFilter struct { // Groups of input filter expressions. This filter matches a set of inputs if any (one or more) of the groups evaluates to true. Conditions *[]InputFilterCondition `json:"conditions,omitempty"` } // An expression which can be applied to filter a list of subscription inputs type InputFilterCondition struct { // Whether or not to do a case sensitive match CaseSensitive *bool `json:"caseSensitive,omitempty"` // The Id of the input to filter on InputId *string `json:"inputId,omitempty"` // The "expected" input value to compare with the actual input value InputValue *string `json:"inputValue,omitempty"` // The operator applied between the expected and actual input value Operator *InputFilterOperator `json:"operator,omitempty"` } type InputFilterOperator string type inputFilterOperatorValuesType struct { Equals InputFilterOperator NotEquals InputFilterOperator } var InputFilterOperatorValues = inputFilterOperatorValuesType{ Equals: "equals", NotEquals: "notEquals", } // Mode in which a subscription input should be entered (in a UI) type InputMode string type inputModeValuesType struct { None InputMode TextBox InputMode PasswordBox InputMode Combo InputMode RadioButtons InputMode CheckBox InputMode TextArea InputMode } var InputModeValues = inputModeValuesType{ // This input should not be shown in the UI None: "none", // An input text box should be shown TextBox: "textBox", // An password input box should be shown PasswordBox: "passwordBox", // A select/combo control should be shown Combo: "combo", // Radio buttons should be shown RadioButtons: "radioButtons", // Checkbox should be shown(for true/false values) CheckBox: "checkBox", // A multi-line text area should be shown TextArea: "textArea", } // Describes what values are valid for a subscription input type InputValidation struct { // Gets or sets the data data type to validate. DataType *InputDataType `json:"dataType,omitempty"` // Gets or sets if this is a required field. IsRequired *bool `json:"isRequired,omitempty"` // Gets or sets the maximum length of this descriptor. MaxLength *int `json:"maxLength,omitempty"` // Gets or sets the minimum value for this descriptor. MaxValue *big.Float `json:"maxValue,omitempty"` // Gets or sets the minimum length of this descriptor. MinLength *int `json:"minLength,omitempty"` // Gets or sets the minimum value for this descriptor. MinValue *big.Float `json:"minValue,omitempty"` // Gets or sets the pattern to validate. Pattern *string `json:"pattern,omitempty"` // Gets or sets the error on pattern mismatch. PatternMismatchErrorMessage *string `json:"patternMismatchErrorMessage,omitempty"` } // Information about a single value for an input type InputValue struct { // Any other data about this input Data *map[string]interface{} `json:"data,omitempty"` // The text to show for the display of this value DisplayValue *string `json:"displayValue,omitempty"` // The value to store for this input Value *string `json:"value,omitempty"` } // Information about the possible/allowed values for a given subscription input type InputValues struct { // The default value to use for this input DefaultValue *string `json:"defaultValue,omitempty"` // Errors encountered while computing dynamic values. Error *InputValuesError `json:"error,omitempty"` // The id of the input InputId *string `json:"inputId,omitempty"` // Should this input be disabled IsDisabled *bool `json:"isDisabled,omitempty"` // Should the value be restricted to one of the values in the PossibleValues (True) or are the values in PossibleValues just a suggestion (False) IsLimitedToPossibleValues *bool `json:"isLimitedToPossibleValues,omitempty"` // Should this input be made read-only IsReadOnly *bool `json:"isReadOnly,omitempty"` // Possible values that this input can take PossibleValues *[]InputValue `json:"possibleValues,omitempty"` } // Error information related to a subscription input value. type InputValuesError struct { // The error message. Message *string `json:"message,omitempty"` } type InputValuesQuery struct { CurrentValues *map[string]string `json:"currentValues,omitempty"` // The input values to return on input, and the result from the consumer on output. InputValues *[]InputValues `json:"inputValues,omitempty"` // Subscription containing information about the publisher/consumer and the current input values Resource interface{} `json:"resource,omitempty"` }
vendor/github.com/microsoft/azure-devops-go-api/azuredevops/forminput/models.go
0.756807
0.413063
models.go
starcoder
package resource import ( "fmt" "reflect" ) func modelComplexDeepEqual(model1 map[string]interface{}, model2 map[string]interface{}, equivalent bool) bool { var m1, m2 map[string]interface{} if len(model1) >= len(model2) { m1 = model1 m2 = model2 } else { m1 = model2 m2 = model1 } notFoundCount := 0 for k1, v1 := range m1 { if !equivalent || !isIgnoredEquivalencePropName(k1) { v2, found := m2[k1] if !found { if !isEmptyDynamicValue(v1) { return false } notFoundCount = notFoundCount + 1 } if found && !modelDeepEqual(v1, v2, equivalent) { return false } } } if notFoundCount > 0 { for k2, v2 := range m2 { _, found := m1[k2] if !found && !isEmptyDynamicValue(v2) { return false } } } return true } func modelCollectionDeepEqual(c1 []interface{}, c2 []interface{}, equivalent bool) bool { l := len(c1) if l != len(c2) { return false } if l > 0 { for pos, v1 := range c1 { if !modelDeepEqual(v1, c2[pos], equivalent) { return false } } } return true } func modelDeepEqual(v1 interface{}, v2 interface{}, equivalent bool) bool { if v1 == nil && v2 == nil { return true } else if (v1 == nil || v2 == nil) && isEmptyDynamicValue(v1) && isEmptyDynamicValue(v2) { return true } else { k := reflect.TypeOf(v1).Kind() if k != reflect.TypeOf(v2).Kind() { return false } switch k { case reflect.String, reflect.Float64, reflect.Bool: if v1 != v2 { return false } case reflect.Map: if !modelComplexDeepEqual(v1.(map[string]interface{}), v2.(map[string]interface{}), equivalent) { return false } case reflect.Slice: if !modelCollectionDeepEqual(v1.([]interface{}), v2.([]interface{}), equivalent) { return false } default: panic(fmt.Sprintf("Unhandled JSON type: %d", k)) } } return true } func isEmptyDynamicValue(value interface{}) bool { if value == nil { return true } kind := reflect.TypeOf(value).Kind() return (kind == reflect.Map && len(value.(map[string]interface{})) == 0) || (kind == reflect.Slice && len(value.([]interface{})) == 0) } func isIgnoredEquivalencePropName(propName string) bool { return propName == "id" || propName == "_id" }
resource/dynamic_model_util.go
0.514644
0.4165
dynamic_model_util.go
starcoder
package constant import ( "fmt" "math/big" "strconv" "subc/scan" ) // Type is a valid constant type. type Type int // Various types. const ( Unknown Type = iota Int String ) // Value represents a constant value. type Value interface { Type() Type // The type of the value. String() string // The value itself in form of a string. } type ( unknownVal struct{} int64Val int64 intVal struct{ val *big.Int } stringVal string ) func (unknownVal) Type() Type { return Unknown } func (int64Val) Type() Type { return Int } func (intVal) Type() Type { return Int } func (stringVal) Type() Type { return String } func (unknownVal) String() string { return "unknown" } func (x int64Val) String() string { return strconv.FormatInt(int64(x), 10) } func (x intVal) String() string { return x.val.String() } func (x stringVal) String() string { return strconv.Quote(string(x)) } var ( minInt64 = big.NewInt(-1 << 63) maxInt64 = big.NewInt(1<<63 - 1) ) func normInt(x *big.Int) Value { if minInt64.Cmp(x) <= 0 && x.Cmp(maxInt64) <= 0 { return int64Val(x.Int64()) } return intVal{x} } func is32bit(x int64) bool { const s = 32 return -1<<(s-1) <= x && x <= 1<<(s-1)-1 } func is63bit(x int64) bool { const s = 63 return -1<<(s-1) <= x && x <= 1<<(s-1)-1 } func truthInt(b bool) int64 { if b { return 1 } return 0 } // BinaryOp does a binary operation on two constant values. func BinaryOp(x Value, op scan.Type, y Value) (Value, error) { x, y = match(x, y) switch x := x.(type) { case unknownVal: return x, nil case int64Val: a := int64(x) b := int64(y.(int64Val)) var c int64 switch op { case scan.Plus: if !is63bit(a) || !is63bit(b) { return normInt(new(big.Int).Add(big.NewInt(a), big.NewInt(b))), nil } c = a + b case scan.Minus: if !is63bit(a) || !is63bit(b) { return normInt(new(big.Int).Sub(big.NewInt(a), big.NewInt(b))), nil } c = a - b case scan.Mul: if !is32bit(a) || !is32bit(b) { return normInt(new(big.Int).Mul(big.NewInt(a), big.NewInt(b))), nil } c = a * b case scan.Div: if b == 0 { return int64Val(0), fmt.Errorf("division by zero") } c = a / b case scan.Mod: if b == 0 { return int64Val(0), fmt.Errorf("modulo by zero") } c = a % b case scan.And: c = a & b case scan.Or: c = a | b case scan.Xor: c = a ^ b case scan.Eq, scan.Neq, scan.Lt, scan.Leq, scan.Gt, scan.Geq: c = truthInt(Compare(x, op, y)) case scan.Lsh, scan.Rsh: if b < 0 { return int64Val(0), fmt.Errorf("negative shift count") } return Shift(x, op, uint(b)) default: goto Error } return int64Val(c), nil case intVal: a := x.val b := y.(intVal).val var c big.Int switch op { case scan.Plus: c.Add(a, b) case scan.Minus: c.Sub(a, b) case scan.Mul: c.Mul(a, b) case scan.Div: if b.Sign() == 0 { return normInt(&c), fmt.Errorf("division by zero") } c.Quo(a, b) case scan.Mod: if b.Sign() == 0 { return normInt(&c), fmt.Errorf("modulus by zero") } c.Rem(a, b) case scan.And: c.And(a, b) case scan.Or: c.Or(a, b) case scan.Xor: c.Xor(a, b) case scan.Eq, scan.Neq, scan.Lt, scan.Leq, scan.Gt, scan.Geq: c.SetInt64(truthInt(Compare(x, op, y))) case scan.Lsh, scan.Rsh: if b.Int64() < 0 { return normInt(&c), fmt.Errorf("negative shift count") } return Shift(x, op, uint(b.Int64())) default: goto Error } return normInt(&c), nil } Error: panic(fmt.Sprintf("invalid binary operation %v %s %v", x, op, y)) } // UnaryOp does a unary operation on a unary expression. func UnaryOp(op scan.Type, y Value, prec uint) Value { switch op { case scan.Plus: switch y.(type) { case unknownVal, int64Val, intVal: return y } case scan.Minus: switch y := y.(type) { case unknownVal: return y case int64Val: if z := -y; z != y { return z // no overflow } return normInt(new(big.Int).Neg(big.NewInt(int64(y)))) case intVal: return normInt(new(big.Int).Neg(y.val)) } case scan.Negate: var z big.Int switch y := y.(type) { case unknownVal: return y case int64Val: z.Not(big.NewInt(int64(y))) case intVal: z.Not(y.val) default: goto Error } return normInt(&z) case scan.Not: switch y := y.(type) { case unknownVal: return y case int64Val: if y == 0 { return int64Val(1) } return int64Val(0) case intVal: z := new(big.Int).SetInt64(0) if y.val.Sign() == 0 { z.SetInt64(1) } return normInt(z) } } Error: panic(fmt.Sprintf("invalid unary operation %s%v", op, y)) } // ord the type size of a value to see if it needs to // be converted up or down. func ord(x Value) int { switch x.(type) { default: return 0 case int64Val: return 1 case intVal: return 2 } } // match casts the values into their proper types. func match(x, y Value) (Value, Value) { if ord(x) > ord(y) { y, x = match(y, x) return x, y } switch x := x.(type) { case unknownVal: return x, x case int64Val: switch y := y.(type) { case int64Val: return x, y case intVal: return intVal{big.NewInt(int64(x))}, y } case intVal: switch y := y.(type) { case intVal: return x, y } } panic(fmt.Errorf("unknown type: %T %T", x, y)) } // Shift applies a left or right shift on a constant value. func Shift(x Value, op scan.Type, s uint) (Value, error) { if s > 1024 { return int64Val(0), fmt.Errorf("shift count too large") } switch x := x.(type) { case unknownVal: return x, nil case int64Val: if s == 0 { return x, nil } switch op { case scan.Lsh: z := big.NewInt(int64(x)) return normInt(z.Lsh(z, s)), nil case scan.Rsh: return x >> s, nil } case intVal: if s == 0 { return x, nil } var z big.Int switch op { case scan.Lsh: return normInt(z.Lsh(x.val, s)), nil case scan.Rsh: return normInt(z.Rsh(x.val, s)), nil } } panic(fmt.Sprintf("invalid shift %v %s %d", x, op, s)) } // MakeFromLiteral creates a constant value from a string. func MakeFromLiteral(lit string, tok scan.Type, prec uint) Value { if prec != 0 { panic("limited precision not supported") } switch tok { case scan.Number: if x, err := strconv.ParseInt(lit, 0, 64); err == nil { return int64Val(x) } if x, ok := new(big.Int).SetString(lit, 0); ok { return intVal{x} } case scan.Rune: // special case because UnquoteChar fails on these if lit == "'\\'" { return int64Val('\\') } else if lit == "'''" { return int64Val('\'') } if n := len(lit); n >= 2 { if code, _, _, err := strconv.UnquoteChar(lit[1:n-1], '\''); err == nil { return int64Val(code) } } case scan.String: if s, err := strconv.Unquote(lit); err == nil { return stringVal(s) } } return nil } // cmpZero returns if a value <, >, <=, >=, or == to zero. func cmpZero(x int, op scan.Type) bool { switch op { case scan.Eq: return x == 0 case scan.Neq: return x != 0 case scan.Lt: return x < 0 case scan.Leq: return x <= 0 case scan.Gt: return x > 0 case scan.Geq: return x >= 0 } panic("unreachable") } // Compare compares two values. func Compare(x Value, op scan.Type, y Value) bool { x, y = match(x, y) switch x := x.(type) { case unknownVal: return false case int64Val: y := y.(int64Val) switch op { case scan.Eq: return x == y case scan.Neq: return x != y case scan.Lt: return x < y case scan.Leq: return x <= y case scan.Gt: return x > y case scan.Geq: return x >= y } case intVal: return cmpZero(x.val.Cmp(y.(intVal).val), op) } panic(fmt.Sprintf("invalid comparison %v %s %v", x, op, y)) } // MakeInt64 creates a constant value out of a int64. func MakeInt64(x int64) Value { return int64Val(x) } // MakeUint64 creates a constant value out of a uint64. func MakeUint64(x uint64) Value { return normInt(new(big.Int).SetUint64(x)) }
src/subc/constant/value.go
0.636014
0.441191
value.go
starcoder
package tiles import ( "math" ) // TileSize is a size of each tile in pixels const TileSize = 256 // Tile contains tile properties // Z,X,Y - tile coordinates according to OSM specs(see http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames) // Bounding box - geographical coordinates of each side of tile type Tile struct { Z, X, Y int Lat float64 Lon float64 BoundingBox BoundingBox } // Tile2lon returns longitude of the tile top side func Tile2lon(x int, z int) float64 { return float64(x)/math.Pow(2.0, float64(z))*360.0 - 180.0 } // Tile2lat returns latitude of the tile left side func Tile2lat(y int, z int) float64 { n := math.Pi - (2.0*math.Pi*float64(y))/math.Pow(2.0, float64(z)) return math.Atan(math.Sinh(float64(n))) * 180 / math.Pi } // NewTile is a tile factory function func NewTile(x int, y int, z int) *Tile { return &Tile{ X: x, Y: y, Z: z, BoundingBox: BoundingBox{ North: Tile2lat(y, z), South: Tile2lat(y+1, z), West: Tile2lon(x, z), East: Tile2lon(x+1, z), }} } // Lon2TileX converts longitude into a tile X coordinate func (tile *Tile) Lon2TileX(zoom int, lonDeg float64) int { x := (lonDeg + 180.0) / 360.0 * (math.Exp2(float64(zoom))) return int(math.Floor(TileSize * (x - float64(tile.X)))) } // Lat2TileY converts latitude into a tile Y coordinate func (tile *Tile) Lat2TileY(zoom int, latDeg float64) int { y := (1.0 - math.Log(math.Tan(latDeg*math.Pi/180.0)+1.0/math.Cos(latDeg*math.Pi/180.0))/math.Pi) / 2.0 * (math.Exp2(float64(zoom))) return int(math.Floor(TileSize * (y - float64(tile.Y)))) } // Degrees2Pixels takes point latitude and longitude and returns pixel coordinates of point on some tile. // May return negative values as well as values outside of tile func (tile *Tile) Degrees2Pixels(lat, lon float64) (x int, y int) { return tile.Lon2TileX(tile.Z, lon), tile.Lat2TileY(tile.Z, lat) } // Contains takes point latitude and longitude and returns true if this point is present on this tile. func (tile *Tile) Contains(lat, lon float64) bool { return tile.BoundingBox.Contains(lat, lon) }
tiles/tile.go
0.897297
0.631481
tile.go
starcoder
package postgres import ( "fmt" "strings" "github.com/jackc/pgx/v4" "github.com/pkg/errors" "github.com/uncharted-distil/distil-compute/model" api "github.com/uncharted-distil/distil/api/model" "github.com/uncharted-distil/distil/api/postgres" ) const coordinateBuckets = 20 // CoordinateField defines behaviour for the coordinate field type. type CoordinateField struct { BasicField XCol string YCol string } // NewCoordinateField creates a new field for coordinate types. func NewCoordinateField(key string, storage *Storage, datasetName string, datasetStorageName string, xCol string, yCol string, label string, typ string, count string) *CoordinateField { count = getCountSQL(count) field := &CoordinateField{ BasicField: BasicField{ Key: key, Storage: storage, DatasetName: datasetName, DatasetStorageName: datasetStorageName, Label: label, Type: typ, Count: count, }, XCol: xCol, YCol: yCol, } return field } // FetchSummaryData pulls summary data from the database and builds a histogram. func (f *CoordinateField) FetchSummaryData(resultURI string, filterParams *api.FilterParams, extrema *api.Extrema, mode api.SummaryMode) (*api.VariableSummary, error) { var baseline *api.Histogram var filtered *api.Histogram var err error if resultURI == "" { baseline, err = f.fetchHistogram(nil, coordinateBuckets) if err != nil { return nil, err } if !filterParams.IsEmpty(true) { filtered, err = f.fetchHistogram(filterParams, coordinateBuckets) if err != nil { return nil, err } } } else { baseline, err = f.fetchHistogramByResult(resultURI, nil, coordinateBuckets) if err != nil { return nil, err } if !filterParams.IsEmpty(true) { filtered, err = f.fetchHistogramByResult(resultURI, filterParams, coordinateBuckets) if err != nil { return nil, err } } } return &api.VariableSummary{ Key: f.Key, Label: f.Label, Type: model.GeoCoordinateType, VarType: f.Type, Baseline: baseline, Filtered: filtered, Timeline: nil, }, nil } func (f *CoordinateField) fetchHistogram(filterParams *api.FilterParams, numBuckets int) (*api.Histogram, error) { // create the filter for the query. wheres := make([]string, 0) params := make([]interface{}, 0) wheres, params = f.Storage.buildFilteredQueryWhere(f.GetDatasetName(), wheres, params, "", filterParams) where := "" if len(wheres) > 0 { where = fmt.Sprintf("AND %s", strings.Join(wheres, " AND ")) } // treat each axis as a separate field for the purposes of query generation xField := NewNumericalField(f.Storage, f.DatasetName, f.DatasetStorageName, f.XCol, f.XCol, model.RealType, "") yField := NewNumericalField(f.Storage, f.DatasetName, f.DatasetStorageName, f.YCol, f.YCol, model.RealType, "") // get the extrema for each axis xExtrema, err := xField.fetchExtrema() if err != nil { return nil, err } yExtrema, err := yField.fetchExtrema() if err != nil { return nil, err } xNumBuckets, yNumBuckets := getEqualBivariateBuckets(numBuckets, xExtrema, yExtrema) // generate a histogram query for each xHistogramName, xBucketQuery, xHistogramQuery := xField.getHistogramAggQuery(xExtrema, xNumBuckets, "") yHistogramName, yBucketQuery, yHistogramQuery := yField.getHistogramAggQuery(yExtrema, yNumBuckets, "") // Get count by x & y query := fmt.Sprintf(`SELECT %s as bucket, CAST(%s as double precision) AS %s, %s as bucket, CAST(%s as double precision) AS %s, COUNT(%s) AS count FROM %s WHERE "%s" != 'NaN' AND "%s" != 'NaN' %s GROUP BY %s, %s ORDER BY %s, %s;`, xBucketQuery, xHistogramQuery, xHistogramName, yBucketQuery, yHistogramQuery, yHistogramName, f.Count, f.DatasetStorageName, f.XCol, f.YCol, where, xBucketQuery, yBucketQuery, xHistogramName, yHistogramName) // execute the postgres query res, err := f.Storage.client.Query(query, params...) if err != nil { return nil, errors.Wrap(err, "failed to fetch histograms for variable summaries from postgres") } if res != nil { defer res.Close() } histogram, err := f.parseHistogram(res, xExtrema, yExtrema, xNumBuckets, yNumBuckets) if err != nil { return nil, err } return histogram, nil } func (f *CoordinateField) fetchHistogramByResult(resultURI string, filterParams *api.FilterParams, numBuckets int) (*api.Histogram, error) { // get filter where / params wheres, params, err := f.Storage.buildResultQueryFilters(f.GetDatasetName(), f.DatasetStorageName, resultURI, filterParams, baseTableAlias) if err != nil { return nil, err } params = append(params, resultURI) where := "" if len(wheres) > 0 { where = fmt.Sprintf("AND %s", strings.Join(wheres, " AND ")) } // create a numerical field for each of X and Y xField := NewNumericalField(f.Storage, f.DatasetName, f.DatasetStorageName, f.XCol, f.XCol, model.RealType, "") yField := NewNumericalField(f.Storage, f.DatasetName, f.DatasetStorageName, f.YCol, f.YCol, model.RealType, "") // get the extrema for each xExtrema, err := xField.fetchExtrema() if err != nil { return nil, err } yExtrema, err := yField.fetchExtrema() if err != nil { return nil, err } xNumBuckets, yNumBuckets := getEqualBivariateBuckets(numBuckets, xExtrema, yExtrema) // create histograms given the the extrema xHistogramName, xBucketQuery, xHistogramQuery := xField.getHistogramAggQuery(xExtrema, xNumBuckets, baseTableAlias) yHistogramName, yBucketQuery, yHistogramQuery := yField.getHistogramAggQuery(yExtrema, yNumBuckets, baseTableAlias) // Get count by x & y query := fmt.Sprintf(` SELECT %s as bucket, CAST(%s as double precision) AS %s, %s as bucket, CAST(%s as double precision) AS %s, COUNT(%s) AS count FROM %s data INNER JOIN %s result ON data."%s" = result.index WHERE result.result_id = $%d AND "%s" != 'NaN' AND "%s" != 'NaN' %s GROUP BY %s, %s ORDER BY %s, %s;`, xBucketQuery, xHistogramQuery, xHistogramName, yBucketQuery, yHistogramQuery, yHistogramName, f.Count, f.DatasetStorageName, f.Storage.getResultTable(f.DatasetStorageName), model.D3MIndexFieldName, len(params), f.XCol, f.YCol, where, xBucketQuery, yBucketQuery, xHistogramName, yHistogramName) // execute the postgres query res, err := f.Storage.client.Query(query, params...) if err != nil { return nil, errors.Wrap(err, "failed to fetch histograms for variable summaries from postgres") } if res != nil { defer res.Close() } histogram, err := f.parseHistogram(res, xExtrema, yExtrema, xNumBuckets, yNumBuckets) if err != nil { return nil, err } return histogram, nil } func (f *CoordinateField) parseHistogram(rows pgx.Rows, xExtrema *api.Extrema, yExtrema *api.Extrema, xNumBuckets int, yNumBuckets int) (*api.Histogram, error) { // get histogram agg name histogramAggName := api.HistogramAggPrefix + f.Key // Parse bucket results. xInterval := xExtrema.GetBucketInterval(xNumBuckets) yInterval := yExtrema.GetBucketInterval(yNumBuckets) xRounded := xExtrema.GetBucketMinMax(xNumBuckets) yRounded := yExtrema.GetBucketMinMax(yNumBuckets) xBucketCount := int64(xExtrema.GetBucketCount(xNumBuckets)) yBucketCount := int64(yExtrema.GetBucketCount(yNumBuckets)) // initialize empty histogram structure // float representation of the data could cause very slight deviation // leading to boundaries not being perfectly aligned xBuckets := make([]*api.Bucket, xBucketCount) for i := int64(0); i < xBucketCount; i++ { yBuckets := make([]*api.Bucket, yBucketCount) for j := int64(0); j < yBucketCount; j++ { yVal := yRounded.Min + float64(j)*yInterval yBuckets[j] = &api.Bucket{ Key: fmt.Sprintf("%f", yVal), Count: 0, Buckets: nil, } } xVal := xRounded.Min + float64(i)*xInterval xBuckets[i] = &api.Bucket{ Key: fmt.Sprintf("%f", xVal), Count: 0, Buckets: yBuckets, } } for rows.Next() { var xBucketValue float64 var yBucketValue float64 var xBucket int64 var yBucket int64 var yRowBucketCount int64 err := rows.Scan(&xBucket, &xBucketValue, &yBucket, &yBucketValue, &yRowBucketCount) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("no %s histogram aggregation found", histogramAggName)) } // Due to float representation, sometimes the lowest value < // first bucket interval and so ends up in bucket -1. // Since the max can match the limit, an extra bucket may exist. // Add the value to the second to last bucket. if xBucket < 0 { xBucket = 0 } else if xBucket >= xBucketCount { xBucket = xBucketCount - 1 } if yBucket < 0 { yBucket = 0 } else if yBucket >= yBucketCount { yBucket = yBucketCount - 1 } xBuckets[xBucket].Buckets[yBucket].Count += yRowBucketCount } err := rows.Err() if err != nil { return nil, errors.Wrapf(err, "error reading data from postgres") } // assign histogram attributes return &api.Histogram{ Buckets: xBuckets, }, nil } // FetchPredictedSummaryData pulls predicted data from the result table and builds // the coordinate histogram for the field. func (f *CoordinateField) FetchPredictedSummaryData(resultURI string, datasetResult string, filterParams *api.FilterParams, extrema *api.Extrema, mode api.SummaryMode) (*api.VariableSummary, error) { return nil, fmt.Errorf("not implemented") } func (f *CoordinateField) getDefaultFilter(inverse bool) string { defaultValue := fmt.Sprintf("%v", postgres.DefaultPostgresValueFromD3MType(f.GetType())) // null comparison is done with "is" rather than "=" op := "=" if defaultValue == "NULL" { op = "is" } negate := "" if inverse { negate = "NOT" } return fmt.Sprintf("%s(\"%s\" %s %s AND \"%s\" %s %s)", negate, f.XCol, op, defaultValue, f.YCol, op, defaultValue) }
api/model/storage/postgres/coordinate.go
0.705278
0.415907
coordinate.go
starcoder
package align import ( "errors" "fmt" "strings" "github.com/biogo/biogo/align/matrix" ) type MatrixScores struct { SubMatrix [][]int GapOpen int GapExtend int K float64 Lambda float64 } // Reference : https://github.com/bbuchfink/diamond/blob/master/src/basic/score_matrix.cpp var ( AllMatrixScores = map[string]MatrixScores{ "blosum45_13_3": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 13, GapExtend: 3, Lambda: 0.207, K: 0.049}, "blosum45_12_3": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 12, GapExtend: 3, Lambda: 0.199, K: 0.039}, "blosum45_11_3": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 11, GapExtend: 3, Lambda: 0.190, K: 0.031}, "blosum45_10_3": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 10, GapExtend: 3, Lambda: 0.179, K: 0.023}, "blosum45_16_2": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 16, GapExtend: 2, Lambda: 0.210, K: 0.051}, "blosum45_15_2": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 15, GapExtend: 2, Lambda: 0.203, K: 0.041}, "blosum45_14_2": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 14, GapExtend: 2, Lambda: 0.195, K: 0.032}, "blosum45_13_2": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 13, GapExtend: 2, Lambda: 0.185, K: 0.024}, "blosum45_12_2": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 12, GapExtend: 2, Lambda: 0.171, K: 0.016}, "blosum45_19_1": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 19, GapExtend: 1, Lambda: 0.205, K: 0.040}, "blosum45_18_1": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 18, GapExtend: 1, Lambda: 0.198, K: 0.032}, "blosum45_17_1": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 17, GapExtend: 1, Lambda: 0.189, K: 0.024}, "blosum45_16_1": MatrixScores{SubMatrix: matrix.BLOSUM45, GapOpen: 16, GapExtend: 1, Lambda: 0.176, K: 0.016}, "blosum50_13_3": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 13, GapExtend: 3, Lambda: 0.212, K: 0.063}, "blosum50_12_3": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 12, GapExtend: 3, Lambda: 0.206, K: 0.055}, "blosum50_11_3": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 11, GapExtend: 3, Lambda: 0.197, K: 0.042}, "blosum50_10_3": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 10, GapExtend: 3, Lambda: 0.186, K: 0.031}, "blosum50_9_3": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 9, GapExtend: 3, Lambda: 0.172, K: 0.022}, "blosum50_16_2": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 16, GapExtend: 2, Lambda: 0.215, K: 0.066}, "blosum50_15_2": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 15, GapExtend: 2, Lambda: 0.210, K: 0.058}, "blosum50_14_2": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 14, GapExtend: 2, Lambda: 0.202, K: 0.045}, "blosum50_13_2": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 13, GapExtend: 2, Lambda: 0.193, K: 0.035}, "blosum50_12_2": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 12, GapExtend: 2, Lambda: 0.181, K: 0.025}, "blosum50_19_1": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 19, GapExtend: 1, Lambda: 0.212, K: 0.057}, "blosum50_18_1": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 18, GapExtend: 1, Lambda: 0.207, K: 0.050}, "blosum50_17_1": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 17, GapExtend: 1, Lambda: 0.198, K: 0.037}, "blosum50_16_1": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 16, GapExtend: 1, Lambda: 0.186, K: 0.025}, "blosum50_15_1": MatrixScores{SubMatrix: matrix.BLOSUM50, GapOpen: 15, GapExtend: 1, Lambda: 0.171, K: 0.015}, "blosum62_11_2": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 11, GapExtend: 2, Lambda: 0.297, K: 0.082}, "blosum62_10_2": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 10, GapExtend: 2, Lambda: 0.291, K: 0.075}, "blosum62_9_2": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 9, GapExtend: 2, Lambda: 0.279, K: 0.058}, "blosum62_8_2": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 8, GapExtend: 2, Lambda: 0.264, K: 0.045}, "blosum62_7_2": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 7, GapExtend: 2, Lambda: 0.239, K: 0.027}, "blosum62_6_2": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 6, GapExtend: 2, Lambda: 0.201, K: 0.012}, "blosum62_13_1": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 13, GapExtend: 1, Lambda: 0.292, K: 0.071}, "blosum62_12_1": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 12, GapExtend: 1, Lambda: 0.283, K: 0.059}, "blosum62_11_1": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 11, GapExtend: 1, Lambda: 0.267, K: 0.041}, "blosum62_10_1": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 10, GapExtend: 1, Lambda: 0.243, K: 0.024}, "blosum62_9_1": MatrixScores{SubMatrix: matrix.BLOSUM62, GapOpen: 9, GapExtend: 1, Lambda: 0.206, K: 0.010}, "blosum80_25_2": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 25, GapExtend: 2, Lambda: 0.342, K: 0.17}, "blosum80_13_2": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 13, GapExtend: 2, Lambda: 0.336, K: 0.15}, "blosum80_9_2": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 9, GapExtend: 2, Lambda: 0.319, K: 0.11}, "blosum80_8_2": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 8, GapExtend: 2, Lambda: 0.308, K: 0.090}, "blosum80_7_2": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 7, GapExtend: 2, Lambda: 0.293, K: 0.070}, "blosum80_6_2": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 6, GapExtend: 2, Lambda: 0.268, K: 0.045}, "blosum80_11_1": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 11, GapExtend: 1, Lambda: 0.314, K: 0.095}, "blosum80_10_1": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 10, GapExtend: 1, Lambda: 0.299, K: 0.071}, "blosum80_9_1": MatrixScores{SubMatrix: matrix.BLOSUM80, GapOpen: 9, GapExtend: 1, Lambda: 0.279, K: 0.048}, "blosum90_9_2": MatrixScores{SubMatrix: matrix.BLOSUM90, GapOpen: 9, GapExtend: 2, Lambda: 0.310, K: 0.12}, "blosum90_8_2": MatrixScores{SubMatrix: matrix.BLOSUM90, GapOpen: 8, GapExtend: 2, Lambda: 0.300, K: 0.099}, "blosum90_7_2": MatrixScores{SubMatrix: matrix.BLOSUM90, GapOpen: 7, GapExtend: 2, Lambda: 0.283, K: 0.072}, "blosum90_6_2": MatrixScores{SubMatrix: matrix.BLOSUM90, GapOpen: 6, GapExtend: 2, Lambda: 0.259, K: 0.048}, "blosum90_11_1": MatrixScores{SubMatrix: matrix.BLOSUM90, GapOpen: 11, GapExtend: 1, Lambda: 0.302, K: 0.093}, "blosum90_10_1": MatrixScores{SubMatrix: matrix.BLOSUM90, GapOpen: 10, GapExtend: 1, Lambda: 0.290, K: 0.075}, "blosum90_9_1": MatrixScores{SubMatrix: matrix.BLOSUM90, GapOpen: 9, GapExtend: 1, Lambda: 0.265, K: 0.044}, "pam250_15_3": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 15, GapExtend: 3, Lambda: 0.205, K: 0.049}, "pam250_14_3": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 14, GapExtend: 3, Lambda: 0.200, K: 0.043}, "pam250_13_3": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 13, GapExtend: 3, Lambda: 0.194, K: 0.036}, "pam250_12_3": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 12, GapExtend: 3, Lambda: 0.186, K: 0.029}, "pam250_11_3": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 11, GapExtend: 3, Lambda: 0.174, K: 0.020}, "pam250_17_2": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 17, GapExtend: 2, Lambda: 0.204, K: 0.047}, "pam250_16_2": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 16, GapExtend: 2, Lambda: 0.198, K: 0.038}, "pam250_15_2": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 15, GapExtend: 2, Lambda: 0.191, K: 0.031}, "pam250_14_2": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 14, GapExtend: 2, Lambda: 0.182, K: 0.024}, "pam250_13_2": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 13, GapExtend: 2, Lambda: 0.171, K: 0.017}, "pam250_21_1": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 21, GapExtend: 1, Lambda: 0.205, K: 0.045}, "pam250_20_1": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 20, GapExtend: 1, Lambda: 0.199, K: 0.037}, "pam250_19_1": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 19, GapExtend: 1, Lambda: 0.192, K: 0.029}, "pam250_18_1": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 18, GapExtend: 1, Lambda: 0.183, K: 0.021}, "pam250_17_1": MatrixScores{SubMatrix: matrix.PAM250, GapOpen: 17, GapExtend: 1, Lambda: 0.171, K: 0.014}, "pam30_7_2": MatrixScores{SubMatrix: matrix.PAM30, GapOpen: 7, GapExtend: 2, Lambda: 0.305, K: 0.15}, "pam30_6_2": MatrixScores{SubMatrix: matrix.PAM30, GapOpen: 6, GapExtend: 2, Lambda: 0.287, K: 0.11}, "pam30_5_2": MatrixScores{SubMatrix: matrix.PAM30, GapOpen: 5, GapExtend: 2, Lambda: 0.264, K: 0.079}, "pam30_10_1": MatrixScores{SubMatrix: matrix.PAM30, GapOpen: 10, GapExtend: 1, Lambda: 0.309, K: 0.15}, "pam30_9_1": MatrixScores{SubMatrix: matrix.PAM30, GapOpen: 9, GapExtend: 1, Lambda: 0.294, K: 0.11}, "pam30_8_1": MatrixScores{SubMatrix: matrix.PAM30, GapOpen: 8, GapExtend: 1, Lambda: 0.270, K: 0.072}, "pam70_8_2": MatrixScores{SubMatrix: matrix.PAM70, GapOpen: 8, GapExtend: 2, Lambda: 0.301, K: 0.12}, "pam70_7_2": MatrixScores{SubMatrix: matrix.PAM70, GapOpen: 7, GapExtend: 2, Lambda: 0.286, K: 0.093}, "pam70_6_2": MatrixScores{SubMatrix: matrix.PAM70, GapOpen: 6, GapExtend: 2, Lambda: 0.264, K: 0.064}, "pam70_11_1": MatrixScores{SubMatrix: matrix.PAM70, GapOpen: 11, GapExtend: 1, Lambda: 0.305, K: 0.12}, "pam70_10_1": MatrixScores{SubMatrix: matrix.PAM70, GapOpen: 10, GapExtend: 1, Lambda: 0.291, K: 0.091}, "pam70_9_1": MatrixScores{SubMatrix: matrix.PAM70, GapOpen: 9, GapExtend: 1, Lambda: 0.270, K: 0.060}, } AAPosInMatrix = map[rune]int{'-': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'P': 15, 'Q': 16, 'R': 17, 'S': 18, 'T': 19, 'V': 20, 'W': 21, 'X': 22, 'Y': 23, 'Z': 24, '*': 25} ) func GetMatrixScores(subMatrix string, gapOpen int, gapExtend int) (MatrixScores, error) { key := fmt.Sprintf("%s_%d_%d", strings.ToLower(subMatrix), gapOpen, gapExtend) if m, ok := AllMatrixScores[key]; ok { return m, nil } return MatrixScores{}, errors.New("No matrix found") } func GetAlnScoreAA(mScores MatrixScores, aa1 rune, aa2 rune) int { return mScores.SubMatrix[AAPosInMatrix[aa1]][AAPosInMatrix[aa2]] }
pkg/align/matrixScores.go
0.782247
0.560674
matrixScores.go
starcoder
package levels import ( mgl "github.com/go-gl/mathgl/mgl32" "github.com/inkyblackness/hacked/editor/graphics" "github.com/inkyblackness/hacked/editor/render" "github.com/inkyblackness/hacked/ui/opengl" ) var mapIconsVertexShaderSource = ` #version 150 precision mediump float; in vec3 vertexPosition; in vec3 uvPosition; uniform mat4 modelMatrix; uniform mat4 viewMatrix; uniform mat4 projectionMatrix; uniform vec4 uvLimit; out vec2 uv; void main(void) { gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(vertexPosition, 1.0); uv = vec2(uvPosition.x * uvLimit.x, uvPosition.y * uvLimit.y); } ` var mapIconsFragmentShaderSource = ` #version 150 precision mediump float; uniform sampler2D palette; uniform sampler2D bitmap; in vec2 uv; out vec4 fragColor; void main(void) { vec4 pixel = texture(bitmap, uv); if (pixel.r > 0.0) { fragColor = texture(palette, vec2(pixel.r, 0.5)); } else { discard; } } ` // MapIcons is a renderable for simple bitmaps. type MapIcons struct { context *render.Context program uint32 vao *opengl.VertexArrayObject vertexPositionBuffer uint32 vertexPositionAttrib int32 uvPositionBuffer uint32 uvPositionAttrib int32 modelMatrixUniform opengl.Matrix4Uniform viewMatrixUniform opengl.Matrix4Uniform projectionMatrixUniform opengl.Matrix4Uniform uvLimitUniform opengl.Vector4Uniform paletteUniform int32 bitmapUniform int32 } type iconData struct { pos MapPosition texture *graphics.BitmapTexture } // NewMapIcons returns a new instance. func NewMapIcons(context *render.Context) *MapIcons { gl := context.OpenGL program, programErr := opengl.LinkNewStandardProgram(gl, mapIconsVertexShaderSource, mapIconsFragmentShaderSource) if programErr != nil { panic(opengl.NamedShaderError{Name: "MapIconsShader", Nested: programErr}) } renderable := &MapIcons{ context: context, program: program, vao: opengl.NewVertexArrayObject(gl, program), vertexPositionBuffer: gl.GenBuffers(1)[0], vertexPositionAttrib: gl.GetAttribLocation(program, "vertexPosition"), uvPositionBuffer: gl.GenBuffers(1)[0], uvPositionAttrib: gl.GetAttribLocation(program, "uvPosition"), modelMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "modelMatrix")), viewMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "viewMatrix")), projectionMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "projectionMatrix")), uvLimitUniform: opengl.Vector4Uniform(gl.GetUniformLocation(program, "uvLimit")), paletteUniform: gl.GetUniformLocation(program, "palette"), bitmapUniform: gl.GetUniformLocation(program, "bitmap")} { half := float32(0.5) var vertices = []float32{ -half, half, 0.0, half, half, 0.0, half, -half, 0.0, half, -half, 0.0, -half, -half, 0.0, -half, half, 0.0, } gl.BindBuffer(opengl.ARRAY_BUFFER, renderable.vertexPositionBuffer) gl.BufferData(opengl.ARRAY_BUFFER, len(vertices)*4, vertices, opengl.STATIC_DRAW) gl.BindBuffer(opengl.ARRAY_BUFFER, 0) } { var uv = []float32{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, } gl.BindBuffer(opengl.ARRAY_BUFFER, renderable.uvPositionBuffer) gl.BufferData(opengl.ARRAY_BUFFER, len(uv)*4, uv, opengl.STATIC_DRAW) gl.BindBuffer(opengl.ARRAY_BUFFER, 0) } renderable.vao.WithSetter(func(gl opengl.OpenGL) { gl.EnableVertexAttribArray(uint32(renderable.vertexPositionAttrib)) gl.BindBuffer(opengl.ARRAY_BUFFER, renderable.vertexPositionBuffer) gl.VertexAttribOffset(uint32(renderable.vertexPositionAttrib), 3, opengl.FLOAT, false, 0, 0) gl.EnableVertexAttribArray(uint32(renderable.uvPositionAttrib)) gl.BindBuffer(opengl.ARRAY_BUFFER, renderable.uvPositionBuffer) gl.VertexAttribOffset(uint32(renderable.uvPositionAttrib), 3, opengl.FLOAT, false, 0, 0) gl.BindBuffer(opengl.ARRAY_BUFFER, 0) }) return renderable } // Render renders the icons with their center at given position. func (renderable *MapIcons) Render(paletteTexture *graphics.PaletteTexture, iconSize float32, icons []iconData) { gl := renderable.context.OpenGL renderable.vao.OnShader(func() { renderable.viewMatrixUniform.Set(gl, renderable.context.ViewMatrix) renderable.projectionMatrixUniform.Set(gl, &renderable.context.ProjectionMatrix) textureUnit := int32(0) gl.ActiveTexture(opengl.TEXTURE0 + uint32(textureUnit)) gl.BindTexture(opengl.TEXTURE_2D, paletteTexture.Handle()) gl.Uniform1i(renderable.paletteUniform, textureUnit) textureUnit = 1 gl.ActiveTexture(opengl.TEXTURE0 + uint32(textureUnit)) gl.Uniform1i(renderable.bitmapUniform, textureUnit) for _, icon := range icons { x, y := float32(icon.pos.X), float32(icon.pos.Y) u, v := icon.texture.UV() width, height := renderable.limitedSize(iconSize, icon.texture) modelMatrix := mgl.Ident4(). Mul4(mgl.Translate3D(x, y, 0.0)). Mul4(mgl.Scale3D(width, height, 1.0)) renderable.modelMatrixUniform.Set(gl, &modelMatrix) uvLimit := [4]float32{u, v, 0, 0} renderable.uvLimitUniform.Set(gl, &uvLimit) gl.BindTexture(opengl.TEXTURE_2D, icon.texture.Handle()) gl.DrawArrays(opengl.TRIANGLES, 0, 6) } gl.BindTexture(opengl.TEXTURE_2D, 0) }) } func (renderable *MapIcons) limitedSize(iconSize float32, texture *graphics.BitmapTexture) (width, height float32) { width, height = texture.Size() referenceSize := float32(16.0) larger := width if larger < height { larger = height } if larger > referenceSize { ratio := referenceSize / larger width *= ratio height *= ratio } factor := iconSize / referenceSize width *= factor height *= factor return }
editor/levels/MapIcons.go
0.739799
0.555315
MapIcons.go
starcoder
package lister import ( "errors" "fmt" "regexp" "strconv" ) // ListRunesWithinRange returns the rune slice that contains letters within the specified range. func ListRunesWithinRange(from, to rune) ([]rune, error) { if from > to { return nil, errors.New("`from` is bigger than `to`: `from` must be smaller than `to`") } cur := from length := int(to - from + 1) rs := make([]rune, length) for i := 0; i < length; i, cur = i+1, cur+1 { rs[i] = cur } return rs, nil } // ListStringsWithinRange returns the string slice that contains letters within the specified range. func ListStringsWithinRange(from, to rune) ([]string, error) { if from > to { return nil, errors.New("`from` is bigger than `to`: `from` must be smaller than `to`") } cur := from length := int(to - from + 1) ss := make([]string, length) for i := 0; i < length; i, cur = i+1, cur+1 { ss[i] = string(cur) } return ss, nil } // ListRunesWithinRangeString returns the rune slice that contains letters within the specified range-string. // Range-string must have following format: `u{hex}-u{hex}`. The first hex is handled as `from` and the second one is handles as `to`. func ListRunesWithinRangeString(rangeString string) ([]rune, error) { from, to, err := extractRangeFromRangeString(rangeString) if err != nil { return nil, err } return ListRunesWithinRange(from, to) } // ListStringsWithinRangeString returns the string slice that contains letters within the specified range-string. // Range-string must have following format: `u{hex}-u{hex}`. The first hex is handled as `from` and the second one is handles as `to`. func ListStringsWithinRangeString(rangeString string) ([]string, error) { from, to, err := extractRangeFromRangeString(rangeString) if err != nil { return nil, err } return ListStringsWithinRange(from, to) } var rangeStringRe = regexp.MustCompile("^u{([0-9a-fA-F]+)}-u{([0-9a-fA-F]+)}$") func extractRangeFromRangeString(rangeString string) (rune, rune, error) { matched := rangeStringRe.FindStringSubmatch(rangeString) if matched == nil { return 0, 0, fmt.Errorf("given rangeString is invalid format (example: it should like be `%s`)", `u{3040}-u{309F}`) } from, _ := strconv.ParseUint(matched[1], 16, 64) to, _ := strconv.ParseUint(matched[2], 16, 64) return rune(from), rune(to), nil }
lister/range.go
0.843219
0.450178
range.go
starcoder
package binarytree // Tree represents a binary tree type Tree struct { Root *Node } // Iterator is a func that can iterate a tree type Iterator func(key Comparable, value interface{}) // Return a new empty binary tree func NewTree() *Tree { return &Tree{Root: nil} } // Add the supplied key and value to the tree. If the key already exists, the value will be overwritten. func (me *Tree) Set(key Comparable, value interface{}) { if me.Root == nil { me.Root = NewNodeKeyValue(key, value) } else { node := me.Root.Find(key) if node == nil { me.Root.Add(NewNodeKeyValue(key, value)) } else { node.Value = value } } } // Get the value associated with the supplied key. Return (true, value) if found, // (false, nil) if not. func (me *Tree) Get(key Comparable) (bool, interface{}) { node := me.GetNode(key) if node == nil { return false, nil } return true, node.Value } // Clear (Delete) the supplied key func (me *Tree) Clear(key Comparable) { if me.Root == nil { return } me.Root = me.Root.Remove(key) } // Get the node associated with the supplied key, or nil if not found func (me *Tree) GetNode(key Comparable) *Node { if me.Root == nil { return nil } return me.Root.Find(key) } // Return a deep copy of the tree. func (me *Tree) Copy() *Tree { newTree := NewTree() newTree.Root = me.Root if me.Root == nil { return newTree } newTree.Root = me.Root.Copy() return newTree } // Balance the tree. func (me *Tree) Balance() { if me.Root == nil { return } me.Root = me.Root.Balance() } // Return the value associated with the next smallest key than the supplied key. // If a smaller key exists, return (true, value), otherwise return (false, nil). func (me *Tree) Previous(key Comparable) (bool, Comparable, interface{}) { if me.Root == nil { return false, nil, nil } node := me.Root.Previous(key) if node == nil { return false, nil, nil } return true, node.Key, node.Value } // Return the value associated with the next largest key than the supplied key. // If a larger key exists, return (true, value), otherwise return (false, nil). func (me *Tree) Next(key Comparable) (bool, Comparable, interface{}) { if me.Root == nil { return false, nil, nil } node := me.Root.Next(key) if node == nil { return false, nil, nil } return true, node.Key, node.Value } // Return the first (lowest) key and value in the tree, or nil, nil if the tree is empty. func (me *Tree) First() (Comparable, interface{}) { if me.Root == nil { return nil, nil } node := me.Root.Minimum() return node.Key, node.Value } // Return the last (highest) key and value in the tree, or nil, nil if the tree is empty. func (me *Tree) Last() (Comparable, interface{}) { if me.Root == nil { return nil, nil } node := me.Root.Maximum() return node.Key, node.Value } // Iterate the tree with the function in the supplied direction func (me *Tree) Walk(iterator Iterator, forward bool) { if me.Root == nil { return } if forward { me.Root.WalkForward(func(node *Node) { iterator(node.Key, node.Value) }) } else { me.Root.WalkBackward(func(node *Node) { iterator(node.Key, node.Value) }) } } // Iterate the tree for all Nodes between the two keys, inclusive func (me *Tree) WalkRange(iterator func(key Comparable, value interface{}), from Comparable, to Comparable, forward bool) { if me.Root == nil { return } if forward { me.Root.WalkRangeForward(func(node *Node) { iterator(node.Key, node.Value) }, from, to) } else { me.Root.WalkRangeBackward(func(node *Node) { iterator(node.Key, node.Value) }, from, to) } }
tree.go
0.807157
0.417212
tree.go
starcoder
package fix16 import "math/bits" var ( Minimum = T{^0x7FFFFFFF} Maximum = T{0x7FFFFFFF} ) var ( Pi = T{205887} E = T{178145} Zero = T{} One = T{0x00010000} ) var Overflow = T{^0x7FFFFFFF} type T struct { f int32 } func Binary(a uint32) T { return T{int32(a)} } func Int(a int) T { return Int32(int32(a)) } func Int32(a int32) T { return T{a * int32(One.f)} } func Uint32(a uint32) T { return Int32(int32(a)) } func Int64(a int64) T { return Int32(int32(a)) } func Uint64(a uint64) T { return Uint32(uint32(a)) } func Float32(a float32) T { return Float64(float64(a)) } func Float64(a float64) T { tmp := a * float64(One.f) if tmp >= 0 { return T{int32(tmp + 0.5)} } return T{int32(tmp - 0.5)} } func (a T) Float32() float32 { return float32(a.Float64()) } func (a T) Float64() float64 { return float64(a.f) / float64(One.f) } func (a T) Binary() uint32 { return uint32(a.f) } func (a T) Int() int { return int(a.Int32()) } func (a T) Int32() int32 { if a.f >= 0 { return int32((a.f + (One.f >> 1)) / One.f) } return int32((a.f - (One.f >> 1)) / One.f) } func (a T) Uint32() uint32 { return uint32(a.Int32()) } func (a T) Int64() int64 { return int64(a.Int32()) } func (a T) Uint64() uint64 { return uint64(a.Int32()) } func (a T) Split() (T, T) { n := a.Negative() if n { a = a.Inv() } i := a.Floor() f := a.Sub(i) if n { i = i.Inv() } return i, f } func (a T) Add(b T) T { ua := uint32(a.f) ub := uint32(b.f) sum := ua + ub // Overflow can only happen if sign of a == sign of b, and then // it causes sign of sum != sign of a. if ((ua^ub)&0x80000000) == 0 && ((ua^sum)&0x80000000) != 0 { return Overflow } return Binary(sum) } func (a T) AddSaturate(b T) T { r := a.Add(b) if r == Overflow { if a.Negative() { return Minimum } else { return Maximum } } return r } func (a T) Sub(b T) T { ua := uint32(a.f) ub := uint32(b.f) diff := ua - ub // Overflow can only happen if sign of a == sign of b, and then // it causes sign of sum != sign of a. if ((ua^ub)&0x80000000) != 0 && ((ua^diff)&0x80000000) != 0 { return Overflow } return Binary(diff) } func (a T) SubSaturate(b T) T { r := a.Sub(b) if r == Overflow { if a.Negative() { return Minimum } else { return Maximum } } return r } func (a T) Mul(b T) T { product := int64(a.f) * int64(b.f) // The upper 17 bits should all be the same (the sign). upper := uint32(product >> 47) if product < 0 { if ^upper != 0 { return Overflow } // This adjustment is required in order to round -1/2 correctly. product-- } else if upper != 0 { return Overflow } result := int32(product >> 16) return T{result + int32((product&0x8000)>>15)} } func (a T) MulSaturate(b T) T { r := a.Mul(b) if r == Overflow { if a.Negative() == b.Negative() { return Minimum } else { return Maximum } } return r } func (a T) Div(b T) T { if b.f == 0 { return Minimum } remainder := uint32(-a.f) if a.f >= 0 { remainder = uint32(a.f) } divider := uint32(-b.f) if b.f >= 0 { divider = uint32(b.f) } quotient := uint32(0) bitPos := 17 // Kick-start the division a bit. // This improves speed in the worst-case scenarios where N and D are large // It gets a lower estimate for the result by N/(D >> 17 + 1). if divider&0xFFF00000 != 0 { shiftedDiv := (divider >> 17) + 1 quotient = remainder / shiftedDiv remainder -= uint32((uint64(quotient) * uint64(divider)) >> 17) } // If the divider is divisible by 2^n, take advantage of it. for divider&0xF == 0 && bitPos >= 4 { divider >>= 4 bitPos -= 4 } for remainder != 0 && bitPos >= 0 { // Shift remainder as much as we can without overflowing. shift := bits.LeadingZeros32(remainder) if shift > bitPos { shift = bitPos } remainder <<= uint32(shift) bitPos -= shift div := remainder / divider remainder = remainder % divider quotient += div << uint32(bitPos) if (div & (^(0xFFFFFFFF >> uint32(bitPos)))) != 0 { return Overflow } remainder <<= 1 bitPos-- } // Quotient is always positive so rounding is easy. quotient++ result := quotient >> 1 // Figure out the sign of the result. if (a.f^b.f)&(^0x7FFFFFFF) != 0 { if result == 0x80000000 { return Overflow } result = -result } return Binary(result) } func (a T) DivSaturate(b T) T { r := a.Div(b) if r == Overflow { if a.Negative() == b.Negative() { return Minimum } else { return Maximum } } return r } func (a T) Zero() bool { return a == Zero } func (a T) Negative() bool { return a.f < 0 } func (a T) Less(b T) bool { return a.f < b.f } func (a T) LEqual(b T) bool { return a.f <= b.f } func (a T) Inv() T { return T{-a.f} } func (a T) Mod(b T) T { return T{a.f % b.f} } func (a T) Abs() T { if a.f < 0 { return a.Inv() } return a } func (a T) Floor() T { return T{a.f & (^0x0000FFFF)} } func (a T) Ceil() T { var n int32 if a.f&0x0000FFFF != 0 { n = One.f } return T{(a.f & (^0x0000FFFF)) + n} } func (a T) Min(b T) T { if a.f < b.f { return a } return b } func (a T) Max(b T) T { if a.f > b.f { return a } return b } func (a T) Clamp(low, high T) T { return a.Max(low).Min(high) }
fix16.go
0.689619
0.424114
fix16.go
starcoder
package poly2tri import ( "math" ) type Point struct { X float64 Y float64 Edges []*Edge } func NewPoint(x, y float64) *Point { return &Point{ X: x, Y: y, Edges: make([]*Edge, 0), } } func (p *Point) GetX() float64 { return p.X } func (p *Point) GetY() float64 { return p.Y } func (p *Point) String() string { return XYString(p) } func (p *Point) ToJSON() string { panic("poly2tri:Point.ToJSON not implemented") } func (p *Point) Clone() *Point { return NewPoint(p.X, p.Y) } func (p *Point) SetZero() *Point { return p.Set(0, 0) } func (p *Point) Set(x, y float64) *Point { p.X = x p.Y = y return p } func (p *Point) Negate() *Point { p.X *= -1 p.Y *= -1 return p } func (p *Point) Add(n XYInterface) *Point { p.X += n.GetX() p.Y += n.GetY() return p } func (p *Point) Sub(n XYInterface) *Point { p.X -= n.GetX() p.Y -= n.GetY() return p } func (p *Point) Mul(n XYInterface) *Point { p.X *= n.GetX() p.Y *= n.GetY() return p } func (p *Point) Length() float64 { return math.Sqrt(p.X*p.X + p.Y*p.Y) } func (p *Point) Normalize() float64 { len := p.Length() p.X /= len p.Y /= len return len } func (p *Point) Equals(p2 XYInterface) bool { return XYEquals(p, p2) } func PointNegate(p *Point) *Point { return p.Clone().Negate() } func PointAdd(a, b *Point) *Point { return a.Clone().Add(b) } func PointSub(a, b *Point) *Point { return a.Clone().Sub(b) } func PointMul(a, b *Point) *Point { return a.Clone().Mul(b) } func PointCross(a, b *Point) { panic("poly2tri:Point.PointCross not implemented") } func PointString(a *Point) string { return a.String() } func PointCompare(a, b *Point) float64 { return XYCompare(a, b) } func PointEquals(a, b *Point) bool { return XYEquals(a, b) } func PointDot(a, b XYInterface) float64 { return a.GetX()*b.GetX() + a.GetY()*b.GetY() } type SortablePointsCollection []*Point func (c SortablePointsCollection) Len() int { return len(c) } func (c SortablePointsCollection) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c SortablePointsCollection) Less(i, j int) bool { return XYCompare(c[i], c[j]) < 0 }
vendor/github.com/ByteArena/poly2tri-go/point.go
0.8308
0.633467
point.go
starcoder