code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package streaming import ( "github.com/alexandre-normand/glukit/app/apimodel" "github.com/alexandre-normand/glukit/app/container" "github.com/alexandre-normand/glukit/app/glukitio" "time" ) type ExerciseStreamer struct { head *container.ImmutableList startTime *time.Time wr glukitio.ExerciseBatchWriter d time.Duration } // NewExerciseStreamerDuration returns a new ExerciseStreamer whose buffer has the specified size. func NewExerciseStreamerDuration(wr glukitio.ExerciseBatchWriter, bufferDuration time.Duration) *ExerciseStreamer { return newExerciseStreamerDuration(nil, nil, wr, bufferDuration) } func newExerciseStreamerDuration(head *container.ImmutableList, startTime *time.Time, wr glukitio.ExerciseBatchWriter, bufferDuration time.Duration) *ExerciseStreamer { w := new(ExerciseStreamer) w.head = head w.startTime = startTime w.wr = wr w.d = bufferDuration return w } // WriteExercise writes a single Exercise into the buffer. func (b *ExerciseStreamer) WriteExercise(c apimodel.Exercise) (s *ExerciseStreamer, err error) { return b.WriteExercises([]apimodel.Exercise{c}) } // WriteExercises writes the contents of p into the buffer. // It returns the number of bytes written. // If nn < len(p), it also returns an error explaining // why the write is short. p must be sorted by time (oldest to most recent). func (b *ExerciseStreamer) WriteExercises(p []apimodel.Exercise) (s *ExerciseStreamer, err error) { s = newExerciseStreamerDuration(b.head, b.startTime, b.wr, b.d) if err != nil { return s, err } for i := range p { c := p[i] t := c.GetTime() truncatedTime := t.Truncate(s.d) if s.head == nil { s = newExerciseStreamerDuration(container.NewImmutableList(nil, c), &truncatedTime, s.wr, s.d) } else if t.Sub(*s.startTime) >= s.d { s, err = s.Flush() if err != nil { return s, err } s = newExerciseStreamerDuration(container.NewImmutableList(nil, c), &truncatedTime, s.wr, s.d) } else { s = newExerciseStreamerDuration(container.NewImmutableList(s.head, c), s.startTime, s.wr, s.d) } } return s, err } // Flush writes any buffered data to the underlying glukitio.Writer as a batch. func (b *ExerciseStreamer) Flush() (s *ExerciseStreamer, err error) { r, size := b.head.ReverseList() batch := ListToArrayOfExerciseReads(r, size) if len(batch) > 0 { innerWriter, err := b.wr.WriteExerciseBatch(batch) if err != nil { return nil, err } else { return newExerciseStreamerDuration(nil, nil, innerWriter, b.d), nil } } return newExerciseStreamerDuration(nil, nil, b.wr, b.d), nil } func ListToArrayOfExerciseReads(head *container.ImmutableList, size int) []apimodel.Exercise { r := make([]apimodel.Exercise, size) cursor := head for i := 0; i < size; i++ { r[i] = cursor.Value().(apimodel.Exercise) cursor = cursor.Next() } return r } // Close flushes the buffer and the inner writer to effectively ensure nothing is left // unwritten func (b *ExerciseStreamer) Close() (s *ExerciseStreamer, err error) { g, err := b.Flush() if err != nil { return g, err } innerWriter, err := g.wr.Flush() if err != nil { return newExerciseStreamerDuration(g.head, g.startTime, innerWriter, b.d), err } return newExerciseStreamerDuration(nil, nil, innerWriter, g.d), nil }
app/streaming/exercisestreamer.go
0.774669
0.438785
exercisestreamer.go
starcoder
package rlwe import ( "math" ) // SecretKey is a type for generic RLWE secret keys. type SecretKey struct { Value PolyQP } // PublicKey is a type for generic RLWE public keys. type PublicKey struct { Value [2]PolyQP } // SwitchingKey is a type for generic RLWE public switching keys. type SwitchingKey struct { Value [][2]PolyQP } // RelinearizationKey is a type for generic RLWE public relinearization keys. It stores a slice with a // switching key per relinearizable degree. The switching key at index i is used to relinearize a degree // i+2 ciphertexts back to a degree i + 1 one. type RelinearizationKey struct { Keys []*SwitchingKey } // RotationKeySet is a type for storing generic RLWE public rotation keys. It stores a map indexed by the // galois element defining the automorphism. type RotationKeySet struct { Keys map[uint64]*SwitchingKey } // EvaluationKey is a type for storing generic RLWE public evaluation keys. An evaluation key is a union // of a relinearization key and a set of rotation keys. type EvaluationKey struct { Rlk *RelinearizationKey Rtks *RotationKeySet } // NewSecretKey generates a new SecretKey with zero values. func NewSecretKey(params Parameters) *SecretKey { return &SecretKey{Value: params.RingQP().NewPoly()} } // NewPublicKey returns a new PublicKey with zero values. func NewPublicKey(params Parameters) (pk *PublicKey) { return &PublicKey{Value: [2]PolyQP{params.RingQP().NewPoly(), params.RingQP().NewPoly()}} } // Equals checks two PublicKey struct for equality. func (pk *PublicKey) Equals(other *PublicKey) bool { if pk == other { return true } return pk.Value[0].Equals(other.Value[0]) && pk.Value[1].Equals(other.Value[1]) } // NewRotationKeySet returns a new RotationKeySet with pre-allocated switching keys for each distinct galoisElement value. func NewRotationKeySet(params Parameters, galoisElement []uint64) (rotKey *RotationKeySet) { rotKey = new(RotationKeySet) rotKey.Keys = make(map[uint64]*SwitchingKey, len(galoisElement)) for _, galEl := range galoisElement { rotKey.Keys[galEl] = NewSwitchingKey(params, params.QCount()-1, params.PCount()-1) } return } // GetRotationKey return the rotation key for the given galois element or nil if such key is not in the set. The // second argument is true iff the first one is non-nil. func (rtks *RotationKeySet) GetRotationKey(galoisEl uint64) (*SwitchingKey, bool) { if rtks.Keys == nil { return nil, false } rotKey, inSet := rtks.Keys[galoisEl] return rotKey, inSet } // NewSwitchingKey returns a new public switching key with pre-allocated zero-value func NewSwitchingKey(params Parameters, levelQ, levelP int) *SwitchingKey { decompSize := int(math.Ceil(float64(levelQ+1) / float64(levelP+1))) swk := new(SwitchingKey) swk.Value = make([][2]PolyQP, int(decompSize)) for i := 0; i < decompSize; i++ { swk.Value[i][0] = params.RingQP().NewPolyLvl(levelQ, levelP) swk.Value[i][1] = params.RingQP().NewPolyLvl(levelQ, levelP) } return swk } // NewRelinKey creates a new EvaluationKey with zero values. func NewRelinKey(params Parameters, maxRelinDegree int) (evakey *RelinearizationKey) { evakey = new(RelinearizationKey) evakey.Keys = make([]*SwitchingKey, maxRelinDegree) for d := 0; d < maxRelinDegree; d++ { evakey.Keys[d] = NewSwitchingKey(params, params.QCount()-1, params.PCount()-1) } return } // CopyNew creates a deep copy of the receiver secret key and returns it. func (sk *SecretKey) CopyNew() *SecretKey { if sk == nil { return nil } return &SecretKey{sk.Value.CopyNew()} } // CopyNew creates a deep copy of the receiver PublicKey and returns it. func (pk *PublicKey) CopyNew() *PublicKey { if pk == nil { return nil } return &PublicKey{[2]PolyQP{pk.Value[0].CopyNew(), pk.Value[1].CopyNew()}} } // Equals checks two RelinearizationKeys for equality. func (rlk *RelinearizationKey) Equals(other *RelinearizationKey) bool { if rlk == other { return true } if (rlk == nil) != (other == nil) { return false } if len(rlk.Keys) != len(other.Keys) { return false } for i := range rlk.Keys { if !rlk.Keys[i].Equals(other.Keys[i]) { return false } } return true } // CopyNew creates a deep copy of the receiver RelinearizationKey and returns it. func (rlk *RelinearizationKey) CopyNew() *RelinearizationKey { if rlk == nil || len(rlk.Keys) == 0 { return nil } rlkb := &RelinearizationKey{Keys: make([]*SwitchingKey, len(rlk.Keys))} for i, swk := range rlk.Keys { rlkb.Keys[i] = swk.CopyNew() } return rlkb } // Equals checks two SwitchingKeys for equality. func (swk *SwitchingKey) Equals(other *SwitchingKey) bool { if swk == other { return true } if (swk == nil) != (other == nil) { return false } if len(swk.Value) != len(other.Value) { return false } for i := range swk.Value { if !((&PublicKey{Value: swk.Value[i]}).Equals(&PublicKey{Value: other.Value[i]})) { return false } } return true } // CopyNew creates a deep copy of the receiver SwitchingKey and returns it. func (swk *SwitchingKey) CopyNew() *SwitchingKey { if swk == nil || len(swk.Value) == 0 { return nil } swkb := &SwitchingKey{Value: make([][2]PolyQP, len(swk.Value))} for i, el := range swk.Value { swkb.Value[i] = [2]PolyQP{el[0].CopyNew(), el[1].CopyNew()} } return swkb } // Equals checks to RotationKeySets for equality. func (rtks *RotationKeySet) Equals(other *RotationKeySet) bool { if rtks == other { return true } if (rtks == nil) || (other == nil) { return false } if len(rtks.Keys) != len(other.Keys) { return false } for galEl, otherKey := range other.Keys { if key, inSet := rtks.Keys[galEl]; !inSet || !otherKey.Equals(key) { return false } } return true } // Includes checks whether the receiver RotationKeySet includes the given other RotationKeySet. func (rtks *RotationKeySet) Includes(other *RotationKeySet) bool { if (rtks == nil) || (other == nil) { return false } for galEl := range other.Keys { if _, inSet := rtks.Keys[galEl]; !inSet { return false } } return true }
rlwe/keys.go
0.772616
0.421671
keys.go
starcoder
package main import ( "math" "math/rand" . "github.com/jakecoffman/cp" "github.com/jakecoffman/cp/examples" ) const ( DENSITY = 1.0 / 10000.0 MAX_VERTEXES_PER_VORNOI = 16 ) type WorleyContex struct { seed uint32 cellSize float64 width, height int bb BB focus Vector } func HashVect(x, y, seed uint32) Vector { border := 0.05 var h uint32 = (x*1640531513 ^ y*2654435789) + seed return Vector{ Lerp(border, 1.0-border, float64(h&0xFFFF)/0xFFFF), Lerp(border, 1.0-border, float64((h>>16)&0xFFFF)/0xFFFF), } } func WorleyPoint(i, j int, context *WorleyContex) Vector { size := context.cellSize width := context.width height := context.height bb := context.bb fv := HashVect(uint32(i), uint32(j), context.seed) return Vector{ Lerp(bb.L, bb.R, 0.5) + size*(float64(i)+fv.X-float64(width)*0.5), Lerp(bb.B, bb.T, 0.5) + size*(float64(j)+fv.Y-float64(height)*0.5), } } func ClipCell(shape *Shape, center Vector, i, j int, context *WorleyContex, verts []Vector, clipped []Vector, count int) int { other := WorleyPoint(i, j, context) if shape.PointQuery(other).Distance > 0 { copy(clipped[:count], verts[:count]) return count } n := other.Sub(center) dist := n.Dot(center.Lerp(other, 0.5)) var clippedCount int i = count - 1 for j = 0; j < count; j++ { a := verts[i] aDist := a.Dot(n) - dist if aDist <= 0 { clipped[clippedCount] = a clippedCount++ } b := verts[j] bDist := b.Dot(n) - dist if aDist*bDist < 0 { t := math.Abs(aDist) / (math.Abs(aDist) + math.Abs(bDist)) clipped[clippedCount] = a.Lerp(b, t) clippedCount++ } i = j } return clippedCount } func ShatterCell(space *Space, shape *Shape, cell Vector, cellI, cellJ int, context *WorleyContex) { body := shape.Body() ping := make([]Vector, MAX_VERTEXES_PER_VORNOI) pong := make([]Vector, MAX_VERTEXES_PER_VORNOI) poly := shape.Class.(*PolyShape) count := poly.Count() if count > MAX_VERTEXES_PER_VORNOI { count = MAX_VERTEXES_PER_VORNOI } for i := 0; i < count; i++ { ping[i] = body.LocalToWorld(poly.Vert(i)) } for i := 0; i < context.width; i++ { for j := 0; j < context.height; j++ { if !(i == cellI && j == cellJ) && shape.PointQuery(cell).Distance < 0 { count = ClipCell(shape, cell, i, j, context, ping, pong, count) copy(ping, pong) } } } centroid := CentroidForPoly(count, ping) mass := AreaForPoly(count, ping, 0) * DENSITY moment := MomentForPoly(mass, count, ping, centroid.Neg(), 0) newBody := space.AddBody(NewBody(mass, moment)) newBody.SetPosition(centroid) newBody.SetVelocityVector(body.VelocityAtWorldPoint(centroid)) newBody.SetAngularVelocity(body.AngularVelocity()) transform := NewTransformTranslate(centroid.Neg()) newShape := space.AddShape(NewPolyShape(newBody, count, ping, transform, 0)) newShape.SetFriction(shape.Friction()) } func ShatterShape(space *Space, shape *Shape, cellSize float64, focus Vector) { space.RemoveShape(shape) space.RemoveBody(shape.Body()) bb := shape.BB() width := int((bb.R-bb.L)/cellSize) + 1 height := int((bb.T-bb.B)/cellSize) + 1 context := WorleyContex{rand.Uint32(), cellSize, width, height, bb, focus} for i := 0; i < context.width; i++ { for j := 0; j < context.height; j++ { cell := WorleyPoint(i, j, &context) if shape.PointQuery(cell).Distance < 0 { ShatterCell(space, shape, cell, i, j, &context) } } } } func main() { space := NewSpace() space.Iterations = 30 space.SetGravity(Vector{0, -500}) space.SleepTimeThreshold = 0.5 space.SetCollisionSlop(0.5) shape := space.AddShape(NewSegment(space.StaticBody, Vector{-1000, -240}, Vector{1000, -240}, 0)) shape.SetElasticity(1) shape.SetFriction(1) shape.SetFilter(examples.NotGrabbableFilter) width := 200.0 height := 200.0 mass := width * height * DENSITY moment := MomentForBox(mass, width, height) body := space.AddBody(NewBody(mass, moment)) shape = space.AddShape(NewBox(body, width, height, 0)) shape.SetFriction(0.6) examples.Main(space, 1.0/60.0, update, examples.DefaultDraw) } func update(space *Space, dt float64) { space.Step(dt) if examples.RightDown { info := space.PointQueryNearest(examples.Mouse, 0, examples.GrabFilter) if info.Shape != nil { bb := info.Shape.BB() cellSize := math.Max(bb.R-bb.L, bb.T-bb.B) / 5.0 if cellSize > 5.0 { ShatterShape(space, info.Shape, cellSize, examples.Mouse) } } } }
examples/shatter/shatter.go
0.598547
0.474388
shatter.go
starcoder
package main func (ControllerDeployment) GetFieldDocString(fieldName string) string { switch fieldName { case "Affinity": return `Node affinity for pod assignment` case "ContainerSecurityContext": return "Container Security Context to be set on the controller component container\nref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" case "DeploymentAnnotations": return `Annotations to add to the deployment` case "ExtraArgs": return `Optional additional arguments for controller` case "ExtraEnv": return `Optional additional environment variables for controller` case "NodeSelector": return `Node labels for pod assignment` case "PodAnnotations": return `Annotations to add to the pods` case "PodLabels": return `Annotations to add to the pods` case "ReplicaCount": return `Number of replicas` case "Resources": return `CPU/memory resource requests/limits for the pods` case "SecurityContext": return "Pod Security Context\nref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" case "ServiceAccount": return `Service account configuration for controller` case "Tolerations": return `Node tolerations for pod assignment` default: return "" } } func (DeploymentStrategy) GetFieldDocString(fieldName string) string { switch fieldName { case "RollingUpdate": return `Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.` case "Type": return `Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.` default: return "" } } func (Image) GetFieldDocString(fieldName string) string { switch fieldName { case "Digest": return `Setting a digest will override any tag` case "PullPolicy": return `Image pull policy` case "Registry": return "You can manage a registry with\nExample:\n registry: quay.io\n repository: jetstack/cert-manager-controller" case "Repository": return `Image repository` case "Tag": return "Override the image tag to deploy by setting this variable.\nIf no value is set, the chart's appVersion will be used." default: return "" } } func (LeaderElection) GetFieldDocString(fieldName string) string { switch fieldName { case "LeaseDuration": return "The duration that non-leader candidates will wait after observing a\nleadership renewal until attempting to acquire leadership of a led but\nunrenewed leader slot. This is effectively the maximum duration that a\nleader can be stopped before it is replaced by another candidate." case "Namespace": return `Override the namespace used to store the ConfigMap for leader election` case "RenewDeadline": return "The interval between attempts by the acting master to renew a leadership\nslot before it stops leading. This must be less than or equal to the\nlease duration." case "RetryPeriod": return "The duration the clients should wait between attempting acquisition and\nrenewal of a leadership." default: return "" } } func (PodSecurityPolicy) GetFieldDocString(fieldName string) string { switch fieldName { case "Enabled": return "If `true`, create and use PodSecurityPolicy" case "UseAppArmor": return "If `true`, use Apparmor seccomp profile in PSP" default: return "" } } func (Probe) GetFieldDocString(fieldName string) string { switch fieldName { case "FailureThreshold": return `Minimum consecutive failures for the probe to be considered failed after having succeeded.` case "InitialDelaySeconds": return "Number of seconds after the container has started before liveness probes are initiated.\nref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" case "PeriodSeconds": return `How often (in seconds) to perform the probe.` case "SuccessThreshold": return `Minimum consecutive successes for the probe to be considered successful after having failed.` case "TimeoutSeconds": return "Number of seconds after which the probe times out.\nref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" default: return "" } } func (Prometheus) GetFieldDocString(fieldName string) string { switch fieldName { case "Enabled": return "If `true`, enable Prometheus monitoring" default: return "" } } func (RollingUpdateDeployment) GetFieldDocString(fieldName string) string { switch fieldName { case "MaxSurge": return "The maximum number of pods that can be scheduled above the desired number of\npods.\nValue can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).\nThis can not be 0 if MaxUnavailable is 0.\nAbsolute number is calculated from percentage by rounding up.\nDefaults to 25%.\nExample: when this is set to 30%, the new ReplicaSet can be scaled up immediately when\nthe rolling update starts, such that the total number of old and new pods do not exceed\n130% of desired pods. Once old pods have been killed,\nnew ReplicaSet can be scaled up further, ensuring that total number of pods running\nat any time during the update is at most 130% of desired pods." case "MaxUnavailable": return "The maximum number of pods that can be unavailable during the update.\nValue can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).\nAbsolute number is calculated from percentage by rounding down.\nThis can not be 0 if MaxSurge is 0.\nDefaults to 25%.\nExample: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods\nimmediately when the rolling update starts. Once new pods are ready, old ReplicaSet\ncan be scaled down further, followed by scaling up the new ReplicaSet, ensuring\nthat the total number of pods available at all times during the update is at\nleast 70% of desired pods." default: return "" } } func (ServiceAccount) GetFieldDocString(fieldName string) string { switch fieldName { case "Annotations": return `Annotations to add to the service account for the cert-manager controller` case "AutomountServiceAccountToken": return `Automount API credentials for the cert-manager service account` case "Create": return "If `true`, create a new service account for the cert-manager controller" case "Name": return "The name of the service account for the cert-manager controller to be used.\nIf not set and `serviceAccount.create` is `true`, a name is generated using\nthe fullname template" default: return "" } } func (Servicemonitor) GetFieldDocString(fieldName string) string { switch fieldName { case "Enabled": return `Enable Prometheus Operator ServiceMonitor monitoring` case "Interval": return `Prometheus scrape interval` case "Labels": return `Add custom labels to ServiceMonitor` case "Path": return `Prometheus scrape path` case "PrometheusInstance": return `Prometheus Instance definition` case "ScrapeTimeout": return `Prometheus scrape timeout` case "TargetPort": return `Prometheus scrape port` default: return "" } } func (Values) GetFieldDocString(fieldName string) string { switch fieldName { case "ClusterResourceNamespace": return "Override the namespace used to store DNS provider credentials etc. for ClusterIssuer\nresources. By default, the same namespace as cert-manager is deployed within is\nused. This namespace will not be automatically created by the Helm chart." case "FeatureGates": return `Comma separated list of feature gates that should be enabled on the` case "HttpProxy": return "Value of the `HTTP_PROXY` environment variable in the cert-manager pod" case "HttpsProxy": return "Value of the `HTTPS_PROXY` environment variable in the cert-manager pod" case "InstallCRDs": return "If true, CRD resources will be installed as part of the Helm chart.\nIf enabled, when uninstalling CRD resources will be deleted causing all\ninstalled custom resources to be DELETED." case "NoProxy": return "Value of the `NO_PROXY` environment variable in the cert-manager pod" case "PodDnsConfig": return `Optional cert-manager pod [DNS configurations](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-config)` case "PodDnsPolicy": return "Optional cert-manager pod [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy)\nUseful if you have a public and private DNS zone for\nthe same domain on Route 53. What follows is an example of ensuring\ncert-manager can access an ingress or DNS TXT records at all times.\n**NOTE:** This requires Kubernetes 1.10 or `CustomPodDNS` feature\ngate enabled for the cluster to work." case "ServiceLabels": return `Labels to add to the cert-manager controller service` case "VolumeMounts": return `Volume mounts to add to cert-manager` case "Volumes": return `Volumes to add to cert-manager` default: return "" } } func (Webhook) GetFieldDocString(fieldName string) string { switch fieldName { case "HostNetwork": return "Specifies if the webhook should be started in hostNetwork mode.\n\nRequired for use in some managed kubernetes clusters (such as AWS EKS) with custom\nCNI (such as calico), because control-plane managed by AWS cannot communicate\nwith pods' IP CIDR and admission webhooks are not working\n\nSince the default port for the webhook conflicts with kubelet on the host\nnetwork, `webhook.securePort` should be changed to an available port if\nrunning in hostNetwork mode." case "LoadBalancerIP": return "The specific load balancer IP to use (when `serviceType` is `LoadBalancer`)." case "MutatingWebhookConfigurationAnnotations": return `Annotations to add to the webhook MutatingWebhookConfiguration` case "SecurePort": return "The port that the webhook should listen on for requests.\nIn GKE private clusters, by default kubernetes apiservers are allowed to\ntalk to the cluster nodes only on 443 and 10250. so configuring\nsecurePort: 10250, will work out of the box without needing to add firewall\nrules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000" case "ServiceType": return "The type of the `Service`.\nSpecifies how the service should be handled. Useful if you want to expose the\nwebhook to outside of the cluster. In some cases, the control plane cannot\nreach internal services." case "TimeoutSeconds": return `Seconds the API server should wait the webhook to respond before treating the call as a failure.` case "Url": return "Overrides the mutating webhook and validating webhook so they reach the webhook\nservice using the `url` field instead of a service." case "ValidatingWebhookConfigurationAnnotations": return `Annotations to add to the webhook ValidatingWebhookConfiguration` default: return "" } }
example/values/comments_generated.go
0.85449
0.426202
comments_generated.go
starcoder
// Package add implements the rec.add command, // i.e. add specimen records. package add import ( "encoding/csv" "io" "os" "strconv" "strings" "time" "github.com/js-arias/biodv" "github.com/js-arias/biodv/cmdapp" "github.com/js-arias/biodv/geography" "github.com/js-arias/biodv/records" "github.com/pkg/errors" ) var cmd = &cmdapp.Command{ UsageLine: "rec.add [-g|--georef] [-l|--locatable] [<file>...]", Short: "add specimen records", Long: ` Command rec.add adds one or more records from the indicated files, or the standard input (if no file is defined) to the specimen records database. It assumes that the input file is a table with tab-delimited values. Recognized column names (and their accepted values) are: id the ID of the record. taxon name (or ID) of the taxon assigned to the specimen. catalog a catalog code, usually in the form <institution code>:<collection code>:<catalog number>. basis basis of record, it can be: unknown if the basis is unknown preserved if it is a preserved (museum) specimen fossil if it is a fossil (museum) specimen observation if the record is based on a human observation machine if the record is based on a machine sensor reading date the sampling date, it must be in the RFC3339 format, e.g. '2006-01-02T15:04:05Z07:00' country the country of the sample, a two letter ISO 3166-1 alpha-2 code. state the state, province, or a similar principal country subdivision. county a secondary country subdivision. locality the locality of the sampling. collector the person who collect the sample. z in flying or oceanic specimens, the distance to groud (depth as negative) when the sampling was made. latitude geographic latitude of the record. longitude geographic longitude of the record. geosource source of the georeference. validation validation of the georeference. uncertainty georeference uncertainty in meters. elevation elevation over sea level, in meters. reference a bibliographic reference. dataset source of the specimen record information. determiner the person who identified the specimen. organism the organism ID. stage the growth stage of the organism. sex sex of the organism. altitude in flying specimens, the altitude above ground when the observation was made. If no ID is defined, but a catalog code is given, then the catalog code will be used as the record ID. Other values are accepted and stored as given. Options are: -g --georef If set, only the records with a valid georefence will be added. -l --locatable If set, only records that can be locatable (i.e either georeferenced or with a complete description of the locality) will be stored. <file> One or more files to be processed by rec.add. If no file is given, the data will be read from the standard input. `, Run: run, } func init() { cmdapp.Add(cmd) } var georef bool var locatable bool func register(c *cmdapp.Command) { c.Flag.BoolVar(&georef, "georef", false, "") c.Flag.BoolVar(&georef, "g", false, "") c.Flag.BoolVar(&locatable, "locatable", false, "") c.Flag.BoolVar(&locatable, "l", false, "") } func run(c *cmdapp.Command, args []string) error { recs, err := records.Open("") if err != nil { return errors.Wrap(err, c.Name()) } if len(args) == 0 { args = append(args, "-") } for _, a := range args { if a == "-" { if err := read(recs, os.Stdin); err != nil { return errors.Wrapf(err, "%s: while reading from stdin", c.Name()) } continue } f, err := os.Open(a) if err != nil { return errors.Wrapf(err, "%s: unable to open %s", c.Name(), a) } err = read(recs, f) f.Close() if err != nil { return errors.Wrapf(err, "%s: while reading from %s", c.Name(), a) } } if err := recs.Commit(); err != nil { return errors.Wrap(err, c.Name()) } return nil } func read(recs *records.DB, in io.Reader) error { r := csv.NewReader(in) r.Comma = '\t' r.Comment = '#' // reads the header cols := make(map[string]int) head, err := r.Read() if err != nil { return errors.Wrap(err, "while reading header") } for i, h := range head { h = strings.ToLower(h) if _, dup := cols[h]; dup { return errors.Errorf("column name %q repeated", h) } cols[h] = i } if _, ok := cols["taxon"]; !ok { return errors.New("a column called 'taxon' must be defined") } // reads the records for i := 1; ; i++ { row, err := r.Read() if err == io.EOF { return nil } if err != nil { return errors.Wrapf(err, "on row %d", i) } pt := geography.NewPosition() nm := biodv.TaxCanon(row[cols["taxon"]]) if nm == "" { continue } id := "" if c, ok := cols["id"]; ok { id = row[c] } basis := biodv.UnknownBasis if c, ok := cols["basis"]; ok { basis = biodv.GetBasis(row[c]) } lt := "" if c, ok := cols["latitude"]; ok { lt = row[c] } ln := "" if c, ok := cols["longitude"]; ok { ln = row[c] } if lt != "" && ln != "" { lat, err := strconv.ParseFloat(lt, 64) if err != nil { return errors.Wrapf(err, "on row %d, col 'latitude'", i) } lon, err := strconv.ParseFloat(ln, 64) if err != nil { return errors.Wrapf(err, "on row %d, col 'longitude'", i) } pt.Lat = lat pt.Lon = lon } if georef && !pt.IsValid() { continue } cat := "" if c, ok := cols["catalog"]; ok { cat = row[c] } vals := make(map[string]string) ev := biodv.CollectionEvent{} for h, c := range cols { switch h { case "taxon", "id", "catalog", "basis": case "date": if row[c] != "" { ev.Date, _ = time.Parse(time.RFC3339, row[c]) } case "country": if cn := geography.Country(row[c]); cn != "" { ev.Admin.Country = strings.ToUpper(row[c]) } case "state": ev.Admin.State = row[c] case "county": ev.Admin.County = row[c] case "locality": ev.Locality = row[c] case "collector": ev.Collector = row[c] case "z": z, _ := strconv.Atoi(row[c]) ev.Z = z case "latitude", "longitude": case "elevation": elv, _ := strconv.Atoi(row[c]) pt.Elevation = uint(elv) case "uncertainty": un, _ := strconv.Atoi(row[c]) pt.Uncertainty = uint(un) case "geosource": pt.Source = row[c] case "validation": pt.Validation = row[c] default: vals[h] = row[c] } } if locatable && !isLocatable(pt, ev) { continue } rec, err := recs.Add(nm, id, cat, basis, pt.Lat, pt.Lon) if err != nil { return errors.Wrapf(err, "on row %d", i) } rec.SetCollEvent(ev) rec.SetGeoRef(pt) for k, v := range vals { if err := rec.Set(k, v); err != nil { return errors.Wrapf(err, "on row %d, col '%s'", i, k) } } } } // IsLocatable returns true if the record is locatable. func isLocatable(pt geography.Position, ev biodv.CollectionEvent) bool { if pt.IsValid() { return true } if !geography.IsValidCode(ev.CountryCode()) { return false } if ev.Locality != "" { return true } return ev.State() != "" || ev.County() != "" }
cmd/biodv/internal/records/add/add.go
0.609175
0.427755
add.go
starcoder
package main import ( "fmt" "strings" "github.com/diamondburned/arikawa/v3/discord" "github.com/hhhapz/doc" ) const ( docLimit = 2800 defLimit = 1000 accentColor = 0x00ADD8 ) func pkgEmbed(pkg doc.Package, full bool) (discord.Embed, bool) { c, more := comment(pkg.Overview, 32, full) return discord.Embed{ Title: "Package " + pkg.Name, URL: "https://pkg.go.dev/" + pkg.URL, Description: fmt.Sprintf("**Types:** %d\n**Functions:** %d\n\n%s", len(pkg.Types), len(pkg.Functions), c), Color: accentColor, }, more } func typEmbed(pkg doc.Package, typ doc.Type, full bool) (discord.Embed, bool) { def, dMore := typdef(typ.Signature, full) c, cMore := comment(typ.Comment, len(def), full) return discord.Embed{ Title: fmt.Sprintf("%s: %s", pkg.Name, typ.Name), URL: fmt.Sprintf("https://pkg.go.dev/%s#%s", pkg.URL, typ.Name), Description: fmt.Sprintf("```go\n%s\n```\n%s", def, c), Color: accentColor, }, dMore || cMore } func fnEmbed(pkg doc.Package, fn doc.Function, full bool) (discord.Embed, bool) { def, dMore := typdef(fn.Signature, full) c, cMore := comment(fn.Comment, len(def), full) return discord.Embed{ Title: fmt.Sprintf("%s: %s", pkg.Name, fn.Name), URL: fmt.Sprintf("https://pkg.go.dev/%s#%s", pkg.URL, fn.Name), Description: fmt.Sprintf("```go\n%s\n```\n%s", def, c), Color: accentColor, }, dMore || cMore } func methodEmbed(pkg doc.Package, method doc.Method, full bool) (discord.Embed, bool) { def, dMore := typdef(method.Signature, full) c, cMore := comment(method.Comment, len(def), full) return discord.Embed{ Title: fmt.Sprintf("%s: %s.%s", pkg.Name, method.For, method.Name), URL: fmt.Sprintf("https://pkg.go.dev/%s#%s.%s", pkg.URL, method.For, method.Name), Description: fmt.Sprintf("```go\n%s\n```\n%s", def, c), Color: accentColor, }, dMore || cMore } func helpEmbed() discord.Embed { return discord.Embed{ Title: "Docs help", Description: `Dr-Docso is a bot to query Go documentation. The parsing is done using [hhhapz/doc](https://github.com/hhhapz/doc). Here are some example queries:` + "```md" + ` # Docs help /docs help # List aliases /docs alias # Search a module /docs query:fmt # Search a type /docs query:github.com/hhhapz/doc.package # Search a type method /docs query:github.com/hhhapz/doc searcher search # Many standard library types have aliases /docs query:http (-> net/http) ` + "```", Footer: &discord.EmbedFooter{Text: "Source Code: https://github.com/DiscordGophers/dr-docso"}, Color: accentColor, } } func aliasList(aliases map[string]string) discord.Embed { keys := make([]string, 0, len(aliases)) for k, v := range aliases { keys = append(keys, fmt.Sprintf("%s -> %s", k, v)) } desc := "*No aliases defined*" if len(keys) > 0 { desc = fmt.Sprintf("```fix\n%s```", strings.Join(keys, "\n")) } return discord.Embed{ Title: "Current aliases", Description: desc, Color: accentColor, } } func ignoreList(blacklist map[discord.Snowflake]struct{}) discord.Embed { keys := make([]string, 0, len(blacklist)) for k := range blacklist { keys = append(keys, fmt.Sprintf("- <@!%s>", k)) } desc := "*No ignores set*" if len(keys) > 0 { desc = strings.Join(keys, "\n") } return discord.Embed{ Title: "Ignored Users", Description: desc, Color: accentColor, } } func failEmbed(title, description string) discord.Embed { return discord.Embed{ Title: title, Description: description, Color: 0xEE0000, } }
embed.go
0.613931
0.526891
embed.go
starcoder
package measurement import ( "regexp" "strconv" "strings" "github.com/wayn3h0/gop/decimal" "github.com/wayn3h0/gop/errors" ) // VolumeUnit represents the unit of volume. type VolumeUnit int // Volume Units. const ( CubicMillimeter VolumeUnit = 1 // base CubicCentimeter VolumeUnit = 1000 CubicMeter VolumeUnit = 1000000000 ) var ( // default volume unit DefaultVolumeUnit = CubicMeter ) // Volume represents a volume information. type Volume struct { unit VolumeUnit value *decimal.Decimal } func (v *Volume) ensureInitialized() { if v.value == nil { v.unit = DefaultVolumeUnit v.value = new(decimal.Decimal) } } // Unit returns the unit of volume. func (v *Volume) Unit() VolumeUnit { return v.unit } // Value returns the float64 value of volume and a indicator whether the value is exact. func (v *Volume) Value() (float64, bool) { return v.value.Float64() } // IsZero reports whether the value is zero. func (v *Volume) IsZero() bool { v.ensureInitialized() return v.value.IsZero() } // String returns the formatted string. func (v *Volume) String() string { v.ensureInitialized() switch v.unit { case CubicMillimeter: return v.value.String() + "mm3" case CubicCentimeter: return v.value.String() + "cm3" case CubicMeter: return v.value.String() + "m3" } panic(errors.Newf("measurement: unknown volume unit `%d`", v.unit)) } // Copy set x to y and return x. func (x *Volume) Copy(y *Volume) *Volume { x.ensureInitialized() y.ensureInitialized() x.unit = y.unit x.value.Copy(y.value) return x } // Convert converts the volume with given unit. func (v *Volume) Convert(unit VolumeUnit) *Volume { v.ensureInitialized() if v.unit != unit { if !v.value.IsZero() { src := new(decimal.Decimal).SetInt64(int64(v.unit)) dst := new(decimal.Decimal).SetInt64(int64(unit)) v.value.Mul(src).Div(dst) } v.unit = unit } return v } // RoundUp rounds the value up to integer. func (v *Volume) RoundUp() *Volume { v.value.RoundUp(0) return v } // Cmp compares x and y and returns: // -1 if x < y // 0 if x == y // +1 if x > y func (x *Volume) Cmp(y *Volume) int { x.ensureInitialized() y.ensureInitialized() yval := new(Volume).Copy(y).Convert(x.unit).value return x.value.Cmp(yval) } // Add sets x to x+y and returns x. func (x *Volume) Add(y *Volume) *Volume { x.ensureInitialized() y.ensureInitialized() yval := new(Volume).Copy(y).Convert(x.unit).value x.value.Add(yval) return x } // Sub sets x to x-y and returns x. func (x *Volume) Sub(y *Volume) *Volume { x.ensureInitialized() y.ensureInitialized() yval := new(Volume).Copy(y).Convert(x.unit).value x.value.Sub(yval) return x } // Mul sets x to x*y and returns x. func (x *Volume) Mul(y float64) *Volume { x.ensureInitialized() x.value.Mul(decimal.New(y)) return x } // Div sets x to quotient of x/y and returns x. func (x *Volume) Div(y float64) *Volume { x.ensureInitialized() x.value.Div(decimal.New(y)) return x } // Weight returns the dimensional (volumetric) weight in kg. // Description: https://en.wikipedia.org/wiki/Dimensional_weight func (v *Volume) Weight(metricFactor int) *Weight { v.ensureInitialized() val := new(Volume).Copy(v).Convert(CubicCentimeter).value w := &Weight{ unit: Kilogram, value: val, } return w.Div(float64(metricFactor)) } // NewVolume returns a new volume. func NewVolume(value float64, unit VolumeUnit) (*Volume, error) { val := decimal.New(value) if val.Sign() < 0 { return nil, errors.Newf("measurement: volume value `%f' is invalid", value) } return &Volume{ unit: unit, value: val, }, nil } // MustNewVolume is similar to NewVolume but panics if has error. func MustNewVolume(value float64, unit VolumeUnit) *Volume { v, err := NewVolume(value, unit) if err != nil { panic(err) } return v } // NewVolumeFromDimensions returns a new volumes by dimensions. func NewVolumeFromDimensions(length, width, height *Dimension) (*Volume, error) { if length == nil || length.IsZero() { return nil, errors.New("measurement: length is invalid") } if width == nil || width.IsZero() { return nil, errors.New("measurement: width is invalid") } if height == nil || height.IsZero() { return nil, errors.New("measurement: height is invalid") } l := new(Dimension).Copy(length).Convert(Millimeter).value w := new(Dimension).Copy(width).Convert(Millimeter).value h := new(Dimension).Copy(height).Convert(Millimeter).value volume := &Volume{ unit: CubicMillimeter, value: l.Mul(w).Mul(h), } return volume.Convert(DefaultVolumeUnit), nil } // MustNewVolumeFromDimensions is similar to NewVolumeFromDimension but panics if has error. func MustNewVolumeFromDimensions(length, width, height *Dimension) *Volume { v, err := NewVolumeFromDimensions(length, width, height) if err != nil { panic(err) } return v } var ( _VolumePattern = regexp.MustCompile(`^(\d+\.?\d*)(\s*)(mm3|cm3|m3)?$`) ) // ParseVolume returns a new volume by parsing string. // Default: m3 func ParseVolume(str string) (*Volume, error) { s := strings.ToLower(strings.Replace(str, " ", "", -1)) matches := _VolumePattern.FindStringSubmatch(s) if len(matches) != 4 { return nil, errors.Newf("measurement: volume string %q is invalid", str) } value, _ := strconv.ParseFloat(matches[1], 64) var unit VolumeUnit switch matches[3] { case "mm3": unit = CubicMillimeter case "cm3": unit = CubicCentimeter case "m3": unit = CubicMeter default: unit = DefaultVolumeUnit } return NewVolume(value, unit) } // MustParseVolume is similar to ParseVolume but panics if has error. func MustParseVolume(str string) *Volume { v, err := ParseVolume(str) if err != nil { panic(err) } return v } // MarshalJSON marshals to JSON data. // Implements Marshaler interface. func (v *Volume) MarshalJSON() ([]byte, error) { v.ensureInitialized() return []byte(strconv.Quote(v.String())), nil } // UnmarshalJSON unmarshas from JSON data. // Implements Unmarshaler interface. func (v *Volume) UnmarshalJSON(data []byte) error { str, err := strconv.Unquote(string(data)) if err != nil { return err } v2, err := ParseVolume(str) if err != nil { return err } v.Copy(v2) return nil }
measurement/volume.go
0.867892
0.507324
volume.go
starcoder
package slice import ( "fmt" "math/rand" "sort" ) // Clone returns a copy of the slice. func (slice Slice[T]) Clone() Slice[T] { ns := make(Slice[T], len(slice)) copy(ns, slice) return ns } // Reduce applies a function against an initial and each element in the slice. func (slice Slice[T]) Reduce(f func(T, T) T, initial T) T { result := initial for _, v := range slice { result = f(result, v) } return result } // Any returns true if any element in the slice satisfies the predicate. func (slice Slice[T]) Any(f func(T) bool) bool { for _, v := range slice { if f(v) { return true } } return false } // All returns true if all elements in the slice satisfy the predicate. func (slice Slice[T]) All(f func(T) bool) bool { for _, v := range slice { if !f(v) { return false } } return true } // BinarySearch returns the index of the element in the slice, or -1 if not found. func (slice Slice[T]) BinarySearch(value T, f func(T, T) int) int { low := 0 high := len(slice) for low < high { mid := (low + high) / 2 switch f(slice[mid], value) { case 0: return mid case 1: high = mid case -1: low = mid + 1 } } return -1 } // Count returns the number of elements satisfy the predicate in the slice. func (slice Slice[T]) Count(f func(T) bool) int { count := 0 for _, v := range slice { if f(v) { count++ } } return count } // Filter returns a new slice containing all elements satisfy the predicate in the slice. func (slice Slice[T]) Filter(f func(T) bool) Slice[T] { result := make([]T, 0) for _, v := range slice { if f(v) { result = append(result, v) } } return result } // FilterIndex returns a new slice containing all elements satisfy the predicate with index in the slice. func (slice Slice[T]) FilterIndex(f func(int) bool) Slice[T] { result := make([]T, 0) for i, v := range slice { if f(i) { result = append(result, v) } } return result } // FirstOf returns the first element in the slice that satisfies the predicate. func (slice Slice[T]) FirstOf(f func(T) bool) (r T) { for _, v := range slice { if f(v) { return v } } return r } // FirstIndexOf returns the index of the first element in the slice that satisfies the predicate. func (slice Slice[T]) FirstIndexOf(f func(T) bool) int { for i, v := range slice { if f(v) { return i } } return -1 } // LastOf returns the last element in the slice that satisfies the predicate. func (slice Slice[T]) LastOf(f func(T) bool) (r T) { for i := len(slice) - 1; i >= 0; i-- { if f(slice[i]) { return slice[i] } } return r } // LastIndexOf returns the index of the last element in the slice that satisfies the predicate. func (slice Slice[T]) LastIndexOf(f func(T) bool) int { for i := len(slice) - 1; i >= 0; i-- { if f(slice[i]) { return i } } return -1 } // Map applies a function to each element in the slice. func (slice Slice[T]) Map(f func(int, T) T) Slice[T] { result := make([]T, len(slice)) for i, v := range slice { result[i] = f(i, v) } return result } // Reverse returns a new slice with elements in reverse order. func (slice Slice[T]) Reverse() Slice[T] { result := make([]T, len(slice)) for i, v := range slice { result[len(slice)-i-1] = v } return result } // Max returns the maximum element in the slice. func (slice Slice[T]) Max(f func(T, T) int) (r T) { if len(slice) == 0 { return r } r = slice[0] for _, v := range slice[1:] { if f(v, r) == -1 { r = v } } return r } // Min returns the minimum element in the slice. func (slice Slice[T]) Min(f func(T, T) int) (r T) { if len(slice) == 0 { return r } r = slice[0] for _, v := range slice[1:] { if f(v, r) == 1 { r = v } } return r } // Random returns a random element in the slice. func (slice Slice[T]) Random() T { return slice[rand.Intn(len(slice))] } // Shuffle returns itself with elements in random order. func (slice Slice[T]) Shuffle() Slice[T] { for i := len(slice) - 1; i > 0; i-- { j := rand.Intn(i + 1) slice[i], slice[j] = slice[j], slice[i] } return slice } // Sort returns a new slice with elements in sorted order. func (slice Slice[T]) Sort(f func(T, T) int) Slice[T] { result := make([]T, len(slice)) copy(result, slice) sort.Slice(result, func(i, j int) bool { return f(result[i], result[j]) == -1 }) return result } //Chunk returns a slice of slices in length of size. func (slice Slice[T]) Chunk(size int) []Slice[T] { result := make([]Slice[T], 0) for i := 0; i < len(slice); i += size { min := i + size if min > len(slice) { min = len(slice) } // check one more time result = append(result, slice[i:min]) } return result } // JoinToString returns a string with all elements joined by sep. func (slice Slice[T]) JoinToString(sep string) string { result := "" for i, v := range slice { if i != 0 { result += sep } result += fmt.Sprintf("%v", v) } return result } // Foreach calls the function f for each element in the slice. func (slice Slice[T]) Foreach(f func(T)) { for _, v := range slice { f(v) } }
collections/slice/methods.go
0.861567
0.517632
methods.go
starcoder
// Package main a sample match function that uses the GRPC harness to set up // the match making function as a service. This sample is a reference // to demonstrate the usage of the GRPC harness and should only be used as // a starting point for your match function. You will need to modify the // matchmaking logic in this function based on your game's requirements. package main import ( "fmt" "time" goHarness "open-match.dev/open-match/internal/harness/golang" "open-match.dev/open-match/internal/pb" ) func main() { // Invoke the harness to setup a GRPC service that handles requests to run the // match function. The harness itself queries open match for player pools for // the specified request and passes the pools to the match function to generate // proposals. goHarness.RunMatchFunction(&goHarness.FunctionSettings{ FunctionName: "simple-matchfunction", Func: makeMatches, }) } // makeMatches is where your custom matchmaking logic lives. func makeMatches(view *goHarness.MatchFunctionParams) []*pb.Match { // This simple match function does the following things // 1. Flatten the poolNameToTickets map into an array // 2. Fill in the rosters by iterating through the tickets array // 3. Create one single match candidate and return it. allTickets := make([]*pb.Ticket, 0) ticketOffset := 0 for _, tickets := range view.PoolNameToTickets { allTickets = append(allTickets, tickets...) } // This example does nothing but fills the rosters until they are full. for _, roster := range view.Rosters { rosterSize := len(roster.TicketId) view.Logger.Tracef("Filling roster: %s, roster size: %d", roster.Name, rosterSize) for rosterOffset := 0; rosterOffset < rosterSize; rosterOffset++ { roster.TicketId[rosterOffset] = allTickets[ticketOffset].Id ticketOffset++ } } return []*pb.Match{ { MatchId: fmt.Sprintf("profile-%s-time-%s", view.ProfileName, time.Now().Format("00:00")), MatchProfile: view.ProfileName, MatchFunction: "a-simple-matchfunction", Ticket: allTickets[:ticketOffset:ticketOffset], Roster: view.Rosters, Properties: view.Properties, }, } }
examples/functions/golang/simple/main.go
0.708213
0.474692
main.go
starcoder
package linear import ( "bytes" "fmt" "math" tolerancepkg "github.com/a-h/linear/tolerance" "github.com/a-h/round" ) // Vector represents an array of values. type Vector []float64 // NewVector creates a vector with the dimensions specified by the argument. func NewVector(values ...float64) Vector { return Vector(values) } func (v1 Vector) String() string { if len(v1) == 1 { return fmt.Sprintf("[%v]", v1[0]) } buf := bytes.NewBufferString("[") for i, p := range v1 { buf.WriteString(fmt.Sprintf("%v", p)) if i < len(v1)-1 { buf.WriteString(", ") } } buf.WriteString("]") return buf.String() } // Eq compares an input vector against the current vector. func (v1 Vector) Eq(v2 Vector) bool { return v1.EqWithinTolerance(v2, DefaultTolerance) } // EqWithinTolerance tests that a vector is equal, within a given tolerance. func (v1 Vector) EqWithinTolerance(v2 Vector, tolerance float64) bool { if len(v1) != len(v2) { return false } for i := 0; i < len(v2); i++ { if !tolerancepkg.IsWithin(v1[i], v2[i], tolerance) { return false } } return true } // Add adds the input vector to the current vector and returns a new vector. func (v1 Vector) Add(v2 Vector) (Vector, error) { if len(v1) != len(v2) { return Vector{}, fmt.Errorf("cannot add vectors together because they have different dimensions (%d and %d)", len(v1), len(v2)) } op := make([]float64, len(v1)) for i := 0; i < len(v1); i++ { op[i] = v1[i] + v2[i] } return Vector(op), nil } // Sub subtracts the input vector from the current vector and returns a new vector. func (v1 Vector) Sub(v2 Vector) (Vector, error) { if len(v1) != len(v2) { return Vector{}, fmt.Errorf("cannot subtract vectors because they have different dimensions (%d and %d)", len(v1), len(v2)) } op := make([]float64, len(v1)) for i := 0; i < len(v1); i++ { op[i] = v1[i] - v2[i] } return Vector(op), nil } // Mul muliplies the input vector and the current vector together and returns a new vector. func (v1 Vector) Mul(v2 Vector) (Vector, error) { if len(v1) != len(v2) { return Vector{}, fmt.Errorf("cannot multiply vectors because they have different dimensions (%d and %d)", len(v1), len(v2)) } op := make([]float64, len(v1)) for i := 0; i < len(v1); i++ { op[i] = v1[i] * v2[i] } return Vector(op), nil } // Scale muliplies the current vector by the scalar input and returns a new vector. func (v1 Vector) Scale(scalar float64) Vector { op := make([]float64, len(v1)) for i := 0; i < len(v1); i++ { op[i] = v1[i] * scalar } return Vector(op) } // Magnitude calculates the magnitude of the vector by calculating the square root of // the sum of each element squared. func (v1 Vector) Magnitude() float64 { var sumOfSquares float64 for _, v := range v1 { sumOfSquares += (v * v) } return math.Sqrt(sumOfSquares) } // Normalize normalizes the magnitude of a vector to 1 and returns a new vector. func (v1 Vector) Normalize() Vector { mag := v1.Magnitude() if mag == 0 { return Vector(make([]float64, len(v1))) // Return a vector of zeroes if the magnitude is zero. } return v1.Scale(float64(1.0) / mag) } // IsZeroVector returns true if all of the values in the vector are within tolerance of zero. func (v1 Vector) IsZeroVector() bool { for _, v := range v1 { if !tolerancepkg.IsWithin(v, 0, DefaultTolerance) { return false } } return true } // DotProduct calculates the dot product of the current vector and the input vector, or an error if the dimensions // of the vectors do not match. func (v1 Vector) DotProduct(v2 Vector) (float64, error) { var rv float64 if len(v1) != len(v2) { return rv, fmt.Errorf("cannot calculate the dot product of the vectors because they have different dimensions (%d and %d)", len(v1), len(v2)) } for i := 0; i < len(v1); i++ { rv += v1[i] * v2[i] } return rv, nil } // AngleBetween returns the angle (in radians) between the current vector and v2, or an error if the dimensions of // the vectors do not match. func (v1 Vector) AngleBetween(v2 Vector) (Radian, error) { dp, err := v1.DotProduct(v2) if err != nil { return 0, err } return Radian(math.Acos(dp / (v1.Magnitude() * v2.Magnitude()))), nil } // IsParallelTo calculates whether the current vector is parallel to the input vector by normalizing both // vectors, and comparing them. In the case that the func (v1 Vector) IsParallelTo(v2 Vector) (bool, error) { if len(v1) != len(v2) { return false, fmt.Errorf("cannot calculate whether the vectors are parallel because they have different dimensions (%d and %d)", len(v1), len(v2)) } if v1.IsZeroVector() || v2.IsZeroVector() { return true, nil } u1 := v1.Normalize() u2 := v2.Normalize() parallelAndSameDirection := u1.EqWithinTolerance(u2, DefaultTolerance) parallelAndOppositeDirection := func() bool { return u1.EqWithinTolerance(u2.Scale(-1), DefaultTolerance) } return parallelAndSameDirection || parallelAndOppositeDirection(), nil } // IsOrthogonalTo calculates whether the current vector is orthogonal to the input vector by calculating // the dot product. If the dot product is zero, then the vectors are orthogonal. func (v1 Vector) IsOrthogonalTo(v2 Vector) (bool, error) { f, err := v1.DotProduct(v2) if err != nil { return false, fmt.Errorf("error calculating whether the vectors are orthogonal: %v", err) } return tolerancepkg.IsWithin(f, 0, DefaultTolerance), nil } // Projection calculates the projection of the v2 vector onto the basis vector (v1) by calculating the unit vector of v1 and scaling it. func (v1 Vector) Projection(v2 Vector) (Vector, error) { unitVectorOfBasis := v1.Normalize() dotProduct, err := v2.DotProduct(unitVectorOfBasis) if err != nil { return Vector{}, fmt.Errorf("error projecting %v onto %v with error: %v", v2, v1, err) } return unitVectorOfBasis.Scale(dotProduct), nil } // ProjectionOrthogonalComponent calculates the projection of v2 onto the basis vector (v1) and uses that to calculate a component // which is orthogonal to the basis vector and perpendicular to v2. func (v1 Vector) ProjectionOrthogonalComponent(v2 Vector) (Vector, error) { projection, err := v1.Projection(v2) if err != nil { return Vector{}, err } return v2.Sub(projection) } // Round rounds the vector to the specified number of places. func (v1 Vector) Round(decimals int) Vector { op := make([]float64, len(v1)) for i := 0; i < len(v1); i++ { op[i] = round.ToEven(v1[i], decimals) } return Vector(op) } // CrossProduct calculates the cross product of the current vector and the input vector. The cross product // produces a vector which is: // - orthogonal to both v1 and v2. // - has a magnitude of the magnitude of v1 * the magnitude of v2 * the sine of the angle between v1 and v2 // v2 must be a vector with 3 dimensions. func (v1 Vector) CrossProduct(v2 Vector) (Vector, error) { if len(v1) != 3 { return Vector{}, fmt.Errorf("the basis vector has %d dimensions but must have 3 because cross products do not generalize to multiple dimensions", len(v1)) } if len(v2) != 3 { return Vector{}, fmt.Errorf("the input vector has %d dimensions but must have 3 because cross products do not generalize to multiple dimensions", len(v2)) } var a1, a2, a3 = v1[0], v1[1], v1[2] var b1, b2, b3 = v2[0], v2[1], v2[2] c1 := (a2 * b3) - (a3 * b2) c2 := (a3 * b1) - (a1 * b3) if c2 == -0 { c2 = 0 } c3 := (a1 * b2) - (a2 * b1) return NewVector(c1, c2, c3), nil } // AreaOfParallelogram calculates the area of a parallelogram spanned by the basis vector and input vector for 2D and 3D inputs. func (v1 Vector) AreaOfParallelogram(v2 Vector) (float64, error) { // Add a z dimension initialised to zero for 2D inputs. if len(v1) == 2 { v1 = NewVector(v1[0], v1[0], 0) } if len(v2) == 2 { v2 = NewVector(v2[0], v2[1], 0) } cp, err := v1.CrossProduct(v2) if err != nil { return 0.0, err } return cp.Magnitude(), nil } // AreaOfTriangle calculates the area of a parallelogram spanned by the basis vector and input vector for 2D and 3D inputs. func (v1 Vector) AreaOfTriangle(v2 Vector) (float64, error) { // Add a z dimension initialised to zero for 2D inputs. if len(v1) == 2 { v1 = NewVector(v1[0], v1[0], 0) } if len(v2) == 2 { v2 = NewVector(v2[0], v2[1], 0) } p, err := v1.AreaOfParallelogram(v2) if err != nil { return p, err } return p * 0.5, nil }
vector.go
0.888831
0.775095
vector.go
starcoder
package generator // yWing removes candidates. If a cell has two candidates (AB) and in a neighboring unit (box, row, or column) of AB is another cell containing AC and in a second neighboring unit of AB is a cell containing BC, then any cell that can be "seen" by AC and BC (in both neighborhoods of AC and BC) that contain C can have C removed. It returns true if it changes any cells. func (g *Grid) yWing(verbose uint) (res bool) { for _, u := range box.unit { for _, p := range u { // Traverse all cells, using box units for convenience. cell := *g.pt(p) if bitCount[cell] != 2 { continue } candidates := g.findYWingCandidates(p, 1) for c1i, p1 := range candidates { cell1 := *g.pt(p1) n1 := neighbors(p1) for c2i, p2 := range candidates { if c1i == c2i { continue } cell2 := *g.pt(p2) if bitCount[cell1|cell2] != 3 || cell&cell1|cell&cell2 != cell { continue } n2 := neighbors(p2) var overlap [9][9]bool for r := 0; r < rows; r++ { for c := 0; c < cols; c++ { overlap[r][c] = n1[r][c] && n2[r][c] } } overlap[p.r][p.c] = false for r := 0; r < rows; r++ { for c := 0; c < cols; c++ { if overlap[r][c] { bits := (cell1 | cell2) &^ cell if (&g.cells[r][c]).andNot(bits) { g.cellChange(&res, verbose, "yWing: %s, %s, %s causes clearing %s from (%d, %d)\n", p, p1, p2, bits, r, c) } } } } } } } } return } func (g *Grid) findYWingCandidates(curr point, overlap int) (res []point) { m := make(map[point]bool) for p := range g.findYWingCandidatesUnit(&box.unit[boxOfPoint(curr)], curr, overlap) { m[p] = true } for p := range g.findYWingCandidatesUnit(&col.unit[curr.c], curr, overlap) { m[p] = true } for p := range g.findYWingCandidatesUnit(&row.unit[curr.r], curr, overlap) { m[p] = true } for p := range m { res = append(res, p) } return } func (g *Grid) findYWingCandidatesUnit(u *[9]point, curr point, overlap int) map[point]bool { res := make(map[point]bool) cell := *g.pt(curr) for _, p := range u { if p == curr { continue } candidate := *g.pt(p) if bitCount[candidate] != 2 || bitCount[cell&candidate] != overlap { continue } res[p] = true } return res }
generator/yWing.go
0.795777
0.571946
yWing.go
starcoder
package ast import ( "fmt" "strings" "mal/ast/token" ) type AtomKind uint8 const ( Nil AtomKind = iota Bool Int Float String Keyword Vector Map ) type ( Node interface { fmt.Stringer Pos() token.Pos End() token.Pos } Comment struct { pos token.Pos Content string // No newline included } Symbol struct { pos token.Pos Content string } AtomSingle struct { // nil true false number string keyword pos token.Pos Kind AtomKind Content string } AtomContainer struct { // vector map pos token.Pos end token.Pos Kind AtomKind Elems []Node } List struct { end token.Pos Symbol *Symbol Elems []Node } ) func (c *Comment) Pos() token.Pos { return c.pos } func (c *Comment) End() token.Pos { n := len(c.Content) return token.Pos{ Offset: c.pos.Offset + n, Line: c.pos.Line, Column: c.pos.Column + n, } } func (c *Comment) String() string { return c.Content } func (s *Symbol) Pos() token.Pos { return s.pos } func (s *Symbol) End() token.Pos { n := len(s.Content) return token.Pos{ Offset: s.pos.Offset + n, Line: s.pos.Line, Column: s.pos.Column + n, } } func (s *Symbol) String() string { return s.Content } func (a *AtomSingle) Pos() token.Pos { return a.pos } func (a *AtomSingle) End() token.Pos { n := len(a.Content) return token.Pos{ Offset: a.pos.Offset + n, Line: a.pos.Line, Column: a.pos.Column + n, } } func (a *AtomSingle) String() string { if a.Kind == String { return fmt.Sprintf("%q", a.Content[1:len(a.Content)-1]) } return a.Content } func (a *AtomContainer) Pos() token.Pos { return a.pos } func (a *AtomContainer) End() token.Pos { return a.end } func (a *AtomContainer) String() string { n := len(a.Elems) elems := make([]string, n+2) switch a.Kind { case Vector: elems[0] = "[" elems[n+1] = "]" case Map: elems[0] = "{" elems[n+1] = "}" } for i, elem := range a.Elems { elems[i+1] = elem.String() } return fmt.Sprintf("%s%s%s", elems[0], strings.Join(elems[1:n+1], " "), elems[n+1]) } func (l *List) Pos() token.Pos { return l.Symbol.pos } func (l *List) End() token.Pos { return l.end } func (l *List) String() string { n := len(l.Elems) elems := make([]string, n) for i, elem := range l.Elems { elems[i] = elem.String() } macro := l.Symbol.Content if macro != "" { macro += " " } return fmt.Sprintf("(%s%s)", macro, strings.Join(elems, " ")) }
go/src/mal/ast/node.go
0.582135
0.438244
node.go
starcoder
package holiday import ( "time" "github.com/onwsk8r/gotime" ) // TradingHolidays are days the US stock markets are closed. // This list does not include the days the markets close early: on July 3, // the day before Thanksgiving, and Christmas Eve the markets close at 1pm ET. var TradingHolidays List = []Finder{ NYDay, MLKDay, PresidentsDay, GoodFriday, MemorialDay, IndependenceDay, LaborDay, Thanksgiving, ChristmasDay, } // FederalHolidays are days the US government takes off. // The USPS and banks tend to observe these holidays as well. This // list does not include Inauguration Day, as it is only a holiday // under very specific circumstances. var FederalHolidays List = []Finder{ NYDay, MLKDay, PresidentsDay, MemorialDay, IndependenceDay, LaborDay, ColumbusDay, VeteransDay, Thanksgiving, ChristmasDay, } // Finder is an interface for holiday calculation functions. Each function // should accept an optional integer parameter for the year and return the // date of the holiday. If a parameter is not specified, the function // should calculate the date of the holiday for the current year. // The returned time should be in local time (ie time.Local) with // zeroes for hours, minutes, seconds, and nanoseconds. type Finder func(year ...int) time.Time // List represents a list of holiday Finders type List []Finder // Contains checks if <date> exists in the List. func (l *List) Contains(date time.Time) bool { return CheckExact(date, l) } // Observes checks if <date> is an observed holiday. func (l *List) Observes(date time.Time) bool { return Check(date, l) } // Observed returns the observed date of a holiday. // Generally if a holiday falls on a Saturday it is observed the // preceeding Friday, and if it falls on a Sunday it is observed // the following Monday. The only exception to this rule is New // Year's Day, which is observed the following Monday when it // occurs on a Saturday. func Observed(holiday time.Time) time.Time { // NYE Exception if holiday.Day() == 1 && holiday.Month() == time.January && holiday.Weekday() == time.Saturday { holiday = holiday.Add(48 * time.Hour) } switch holiday.Weekday() { case time.Saturday: return holiday.Add(-24 * time.Hour) case time.Sunday: return holiday.Add(24 * time.Hour) } return holiday } // Check whether the given date is a work holiday func Check(date time.Time, against *List) bool { y := date.Year() for _, holiday := range *against { if gotime.DateEquals(date, Observed(holiday(y))) { return true } } return false } // CheckExact whether the given date is an actual holiday. // This function differs from the Check function in that it does not // consider observed holidays. func CheckExact(date time.Time, against *List) bool { y := date.Year() for _, holiday := range *against { if gotime.DateEquals(date, holiday(y)) { return true } } return false } // The Check function was adapted from the below SQL function // CREATE OR REPLACE FUNCTION public.is_holiday( // d date DEFAULT CURRENT_DATE) // RETURNS boolean // LANGUAGE 'plpgsql' // COST 1 // IMMUTABLE LEAKPROOF PARALLEL SAFE // AS $BODY$ // DECLARE // y integer; -- the current year // m integer; -- interim month for calculations // hdate date; -- interim date for holiday // BEGIN // y := EXTRACT(YEAR FROM d); // -- New Year's Day, observed the Monday after for weekends // hdate := make_date(y, 1, 1); // LOOP // EXIT WHEN EXTRACT(DOW FROM hdate) NOT IN (0,6); // hdate := hdate + 1; // END LOOP; // IF d = hdate THEN RETURN true; END IF; // -- <NAME> Day, 3rd Mon in Jan // m := 1; // hdate := make_date(y, m, nthwday(3,1,m,y)); // IF d = hdate THEN RETURN true; END IF; // -- President's Day, 3rd Mon in Feb // m := 2; // hdate := make_date(y, m, nthwday(3,1,m,y)); // IF d = hdate THEN RETURN true; END IF; // -- Good Friday, Good Luck! // hdate := when_is_easter(y) - 2; // IF d = hdate THEN RETURN true; END IF; // -- Memorial Day, last Monday in May // m := 5; // BEGIN // hdate := make_date(y, m, nthwday(5,1,m,y)); // EXCEPTION WHEN datetime_field_overflow THEN // hdate := make_date(y, m, nthwday(4,1,m,y)); // END; // IF d = hdate THEN RETURN true; END IF; // -- Independence Day, July 4th // hdate := make_date(y, 7, 4); // IF d = hdate THEN RETURN true; END IF; // -- Labor Day, 1st Mon in Sep // m := 9; // hdate := make_date(y, m, nthwday(1,1,m,y)); // IF d = hdate THEN RETURN true; END IF; // -- Thanksgiving, 4th Thu in Nov // m := 11; // hdate := make_date(y, m, nthwday(4,4,m,y)); // IF d = hdate THEN RETURN true; END IF; // -- Christmas, Dec 25th // hdate := make_date(y, 12, 25); // IF d = hdate THEN RETURN true; END IF; // RETURN false; // END // $BODY$;
holiday/holiday.go
0.620852
0.5526
holiday.go
starcoder
package storyboard import ( "github.com/wieku/danser-go/bmath" "github.com/go-gl/mathgl/mgl32" "unicode" "strings" "math" "github.com/wieku/danser-go/render/texture" "github.com/wieku/danser-go/render/batches" ) const ( storyboardArea = 640.0 * 480.0 maxLoad = 1.3328125 //480*480*(16/9)/(640*480) ) type color struct { R, G, B, A float64 } type Object interface { Update(time int64) Draw(time int64, batch *batches.SpriteBatch) GetLoad() float64 GetStartTime() int64 GetEndTime() int64 GetZIndex() int64 GetPosition() bmath.Vector2d SetPosition(vec bmath.Vector2d) GetScale() bmath.Vector2d SetScale(vec bmath.Vector2d) GetRotation() float64 SetRotation(rad float64) GetColor() mgl32.Vec3 SetColor(color mgl32.Vec3) GetAlpha() float64 SetAlpha(alpha float64) SetHFlip(on bool) SetVFlip(on bool) SetAdditive(on bool) } type Sprite struct { texture []*texture.TextureRegion frameDelay float64 loopForever bool currentFrame int transform *Transformations loopQueue []*Loop loopProcessed []*Loop startTime, endTime, zIndex int64 position bmath.Vector2d origin bmath.Vector2d scale bmath.Vector2d flip bmath.Vector2d rotation float64 color color dirty bool additive bool firstupdate bool } func cutWhites(text string) (string, int) { for i, c := range text { if unicode.IsLetter(c) || unicode.IsNumber(c) { return text[i:], i } } return text, 0 } func NewSprite(texture []*texture.TextureRegion, frameDelay float64, loopForever bool, zIndex int64, position bmath.Vector2d, origin bmath.Vector2d, subCommands []string) *Sprite { sprite := &Sprite{texture: texture, frameDelay: frameDelay, loopForever: loopForever, zIndex: zIndex, position: position, origin: origin, scale: bmath.NewVec2d(1, 1), flip: bmath.NewVec2d(1, 1), color: color{1, 1, 1, 1}} sprite.transform = NewTransformations(sprite) var currentLoop *Loop = nil loopDepth := -1 for _, subCommand := range subCommands { command := strings.Split(subCommand, ",") var removed int command[0], removed = cutWhites(command[0]) if command[0] == "T" { continue } if removed == 1 { if currentLoop != nil { sprite.loopQueue = append(sprite.loopQueue, currentLoop) loopDepth = -1 } if command[0] != "L" { sprite.transform.Add(NewCommand(command)) } } if command[0] == "L" { currentLoop = NewLoop(command, sprite) loopDepth = removed + 1 } else if removed == loopDepth { currentLoop.Add(NewCommand(command)) } } if currentLoop != nil { sprite.loopQueue = append(sprite.loopQueue, currentLoop) loopDepth = -1 } sprite.transform.Finalize() sprite.startTime = sprite.transform.startTime sprite.endTime = sprite.transform.endTime for _, loop := range sprite.loopQueue { if loop.start < sprite.startTime { sprite.startTime = loop.start } if loop.end > sprite.endTime { sprite.endTime = loop.end } } return sprite } func (sprite *Sprite) Update(time int64) { sprite.currentFrame = 0 if len(sprite.texture) > 1 { frame := int(math.Floor(float64(time-sprite.startTime) / sprite.frameDelay)) if !sprite.loopForever { if frame >= len(sprite.texture) { frame = len(sprite.texture) - 1 } sprite.currentFrame = frame } else { sprite.currentFrame = frame % len(sprite.texture) } } sprite.transform.Update(time) for i := 0; i < len(sprite.loopQueue); i++ { c := sprite.loopQueue[i] if c.start <= time { sprite.loopProcessed = append(sprite.loopProcessed, c) sprite.loopQueue = append(sprite.loopQueue[:i], sprite.loopQueue[i+1:]...) i-- } } for i := 0; i < len(sprite.loopProcessed); i++ { c := sprite.loopProcessed[i] c.Update(time) if time > c.end { sprite.loopProcessed = append(sprite.loopProcessed[:i], sprite.loopProcessed[i+1:]...) i-- } } sprite.firstupdate = true } func (sprite *Sprite) Draw(time int64, batch *batches.SpriteBatch) { if !sprite.firstupdate || sprite.color.A < 0.01 { return } alpha := sprite.color.A if alpha > 1.001 { alpha -= math.Ceil(sprite.color.A) - 1 } batch.DrawStObject(sprite.position, sprite.origin, sprite.scale.Abs(), sprite.flip, sprite.rotation, mgl32.Vec4{float32(sprite.color.R), float32(sprite.color.G), float32(sprite.color.B), float32(alpha)}, sprite.additive, *sprite.texture[sprite.currentFrame], true) } func (sprite *Sprite) GetPosition() bmath.Vector2d { return sprite.position } func (sprite *Sprite) SetPosition(vec bmath.Vector2d) { sprite.position = vec sprite.dirty = true } func (sprite *Sprite) GetScale() bmath.Vector2d { return sprite.scale } func (sprite *Sprite) SetScale(vec bmath.Vector2d) { sprite.scale = vec sprite.dirty = true } func (sprite *Sprite) GetRotation() float64 { return sprite.rotation } func (sprite *Sprite) SetRotation(rad float64) { sprite.rotation = rad sprite.dirty = true } func (sprite *Sprite) GetColor() mgl32.Vec3 { return mgl32.Vec3{float32(sprite.color.R), float32(sprite.color.G), float32(sprite.color.B)} } func (sprite *Sprite) SetColor(color mgl32.Vec3) { sprite.color.R, sprite.color.G, sprite.color.B = float64(color[0]), float64(color[1]), float64(color[2]) sprite.dirty = true } func (sprite *Sprite) GetAlpha() float64 { return sprite.color.A } func (sprite *Sprite) SetAlpha(alpha float64) { sprite.color.A = alpha sprite.dirty = true } func (sprite *Sprite) SetHFlip(on bool) { j := 1.0 if on { j = -1 } sprite.flip.X = j sprite.dirty = true } func (sprite *Sprite) SetVFlip(on bool) { j := 1.0 if on { j = -1 } sprite.flip.Y = j sprite.dirty = true } func (sprite *Sprite) SetAdditive(on bool) { sprite.additive = on sprite.dirty = true } func (sprite *Sprite) GetStartTime() int64 { return sprite.startTime } func (sprite *Sprite) GetEndTime() int64 { return sprite.endTime } func (sprite *Sprite) GetZIndex() int64 { return sprite.zIndex } func (sprite *Sprite) GetLoad() float64 { if sprite.color.A >= 0.01 { return math.Min((float64(sprite.texture[0].Width)*sprite.scale.X*float64(sprite.texture[0].Height)*sprite.scale.Y)/storyboardArea, maxLoad) } return 0 }
storyboard/object.go
0.617628
0.474996
object.go
starcoder
package distuv import ( "math" "golang.org/x/exp/rand" "gonum.org/v1/gonum/mathext" ) // Poisson implements the Poisson distribution, a discrete probability distribution // that expresses the probability of a given number of events occurring in a fixed // interval. // The poisson distribution has density function: // f(k) = λ^k / k! e^(-λ) // For more information, see https://en.wikipedia.org/wiki/Poisson_distribution. type Poisson struct { // Lambda is the average number of events in an interval. // Lambda must be greater than 0. Lambda float64 Src rand.Source } // CDF computes the value of the cumulative distribution function at x. func (p Poisson) CDF(x float64) float64 { if x < 0 { return 0 } return mathext.GammaIncRegComp(math.Floor(x+1), p.Lambda) } // ExKurtosis returns the excess kurtosis of the distribution. func (p Poisson) ExKurtosis() float64 { return 1 / p.Lambda } // LogProb computes the natural logarithm of the value of the probability // density function at x. func (p Poisson) LogProb(x float64) float64 { if x < 0 || math.Floor(x) != x { return math.Inf(-1) } lg, _ := math.Lgamma(math.Floor(x) + 1) return x*math.Log(p.Lambda) - p.Lambda - lg } // Mean returns the mean of the probability distribution. func (p Poisson) Mean() float64 { return p.Lambda } // NumParameters returns the number of parameters in the distribution. func (Poisson) NumParameters() int { return 1 } // Prob computes the value of the probability density function at x. func (p Poisson) Prob(x float64) float64 { return math.Exp(p.LogProb(x)) } // Rand returns a random sample drawn from the distribution. func (p Poisson) Rand() float64 { // NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING (ISBN 0-521-43108-5) // p. 294 // <http://www.aip.de/groups/soe/local/numres/bookcpdf/c7-3.pdf> rnd := rand.ExpFloat64 var rng *rand.Rand if p.Src != nil { rng = rand.New(p.Src) rnd = rng.ExpFloat64 } if p.Lambda < 10.0 { // Use direct method. var em float64 t := 0.0 for { t += rnd() if t >= p.Lambda { break } em++ } return em } // Generate using: // <NAME>. "The transformed rejection method for generating Poisson // random variables." Insurance: Mathematics and Economics // 12.1 (1993): 39-45. // Algorithm PTRS rnd = rand.Float64 if rng != nil { rnd = rng.Float64 } b := 0.931 + 2.53*math.Sqrt(p.Lambda) a := -0.059 + 0.02483*b invalpha := 1.1239 + 1.1328/(b-3.4) vr := 0.9277 - 3.6224/(b-2) for { U := rnd() - 0.5 V := rnd() us := 0.5 - math.Abs(U) k := math.Floor((2*a/us+b)*U + p.Lambda + 0.43) if us >= 0.07 && V <= vr { return k } if k <= 0 || (us < 0.013 && V > us) { continue } lg, _ := math.Lgamma(k + 1) if math.Log(V*invalpha/(a/(us*us)+b)) <= k*math.Log(p.Lambda)-p.Lambda-lg { return k } } } // Skewness returns the skewness of the distribution. func (p Poisson) Skewness() float64 { return 1 / math.Sqrt(p.Lambda) } // StdDev returns the standard deviation of the probability distribution. func (p Poisson) StdDev() float64 { return math.Sqrt(p.Variance()) } // Survival returns the survival function (complementary CDF) at x. func (p Poisson) Survival(x float64) float64 { return 1 - p.CDF(x) } // Variance returns the variance of the probability distribution. func (p Poisson) Variance() float64 { return p.Lambda }
stat/distuv/poisson.go
0.903268
0.644756
poisson.go
starcoder
package tsm1 import ( "sort" "github.com/influxdata/platform/tsdb" ) // Array Cursors type floatArrayAscendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.FloatArray values *tsdb.FloatArray pos int keyCursor *KeyCursor } end int64 res *tsdb.FloatArray } func newFloatArrayAscendingCursor() *floatArrayAscendingCursor { c := &floatArrayAscendingCursor{ res: tsdb.NewFloatArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewFloatArrayLen(MaxPointsPerBlock) return c } func (c *floatArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadFloatArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) } func (c *floatArrayAscendingCursor) Err() error { return nil } // close closes the cursor and any dependent cursors. func (c *floatArrayAscendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } // Next returns the next key/value for the cursor. func (c *floatArrayAscendingCursor) Next() *tsdb.FloatArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value c.cache.pos++ } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos++ } pos++ if c.tsm.pos >= len(tvals.Timestamps) { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { if c.tsm.pos < len(tvals.Timestamps) { if pos == 0 { // optimization: all points served from TSM data copy(c.res.Timestamps, tvals.Timestamps) pos += copy(c.res.Values, tvals.Values) c.nextTSM() } else { // copy as much as we can n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) pos += n c.tsm.pos += n if c.tsm.pos >= len(tvals.Timestamps) { c.nextTSM() } } } if c.cache.pos < len(cvals) { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value pos++ c.cache.pos++ } } } if pos > 0 && c.res.Timestamps[pos-1] > c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] > c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *floatArrayAscendingCursor) nextTSM() *tsdb.FloatArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadFloatArrayBlock(c.tsm.buf) c.tsm.pos = 0 return c.tsm.values } type floatArrayDescendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.FloatArray values *tsdb.FloatArray pos int keyCursor *KeyCursor } end int64 res *tsdb.FloatArray } func newFloatArrayDescendingCursor() *floatArrayDescendingCursor { c := &floatArrayDescendingCursor{ res: tsdb.NewFloatArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewFloatArrayLen(MaxPointsPerBlock) return c } func (c *floatArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues if len(c.cache.values) > 0 { c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) if c.cache.pos == len(c.cache.values) { c.cache.pos-- } else if c.cache.values[c.cache.pos].UnixNano() != seek { c.cache.pos-- } } else { c.cache.pos = -1 } c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadFloatArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) if c.tsm.values.Len() > 0 { if c.tsm.pos == c.tsm.values.Len() { c.tsm.pos-- } else if c.tsm.values.Timestamps[c.tsm.pos] != seek { c.tsm.pos-- } } else { c.tsm.pos = -1 } } func (c *floatArrayDescendingCursor) Err() error { return nil } func (c *floatArrayDescendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } func (c *floatArrayDescendingCursor) Next() *tsdb.FloatArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value c.cache.pos-- } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos-- } pos++ if c.tsm.pos < 0 { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { // cache was exhausted if c.tsm.pos >= 0 { for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] c.res.Values[pos] = tvals.Values[c.tsm.pos] pos++ c.tsm.pos-- if c.tsm.pos < 0 { tvals = c.nextTSM() } } } if c.cache.pos >= 0 { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value pos++ c.cache.pos-- } } } if pos > 0 && c.res.Timestamps[pos-1] < c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] < c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *floatArrayDescendingCursor) nextTSM() *tsdb.FloatArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadFloatArrayBlock(c.tsm.buf) c.tsm.pos = len(c.tsm.values.Timestamps) - 1 return c.tsm.values } type integerArrayAscendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.IntegerArray values *tsdb.IntegerArray pos int keyCursor *KeyCursor } end int64 res *tsdb.IntegerArray } func newIntegerArrayAscendingCursor() *integerArrayAscendingCursor { c := &integerArrayAscendingCursor{ res: tsdb.NewIntegerArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewIntegerArrayLen(MaxPointsPerBlock) return c } func (c *integerArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) } func (c *integerArrayAscendingCursor) Err() error { return nil } // close closes the cursor and any dependent cursors. func (c *integerArrayAscendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } // Next returns the next key/value for the cursor. func (c *integerArrayAscendingCursor) Next() *tsdb.IntegerArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value c.cache.pos++ } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos++ } pos++ if c.tsm.pos >= len(tvals.Timestamps) { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { if c.tsm.pos < len(tvals.Timestamps) { if pos == 0 { // optimization: all points served from TSM data copy(c.res.Timestamps, tvals.Timestamps) pos += copy(c.res.Values, tvals.Values) c.nextTSM() } else { // copy as much as we can n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) pos += n c.tsm.pos += n if c.tsm.pos >= len(tvals.Timestamps) { c.nextTSM() } } } if c.cache.pos < len(cvals) { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value pos++ c.cache.pos++ } } } if pos > 0 && c.res.Timestamps[pos-1] > c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] > c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *integerArrayAscendingCursor) nextTSM() *tsdb.IntegerArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerArrayBlock(c.tsm.buf) c.tsm.pos = 0 return c.tsm.values } type integerArrayDescendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.IntegerArray values *tsdb.IntegerArray pos int keyCursor *KeyCursor } end int64 res *tsdb.IntegerArray } func newIntegerArrayDescendingCursor() *integerArrayDescendingCursor { c := &integerArrayDescendingCursor{ res: tsdb.NewIntegerArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewIntegerArrayLen(MaxPointsPerBlock) return c } func (c *integerArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues if len(c.cache.values) > 0 { c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) if c.cache.pos == len(c.cache.values) { c.cache.pos-- } else if c.cache.values[c.cache.pos].UnixNano() != seek { c.cache.pos-- } } else { c.cache.pos = -1 } c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) if c.tsm.values.Len() > 0 { if c.tsm.pos == c.tsm.values.Len() { c.tsm.pos-- } else if c.tsm.values.Timestamps[c.tsm.pos] != seek { c.tsm.pos-- } } else { c.tsm.pos = -1 } } func (c *integerArrayDescendingCursor) Err() error { return nil } func (c *integerArrayDescendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } func (c *integerArrayDescendingCursor) Next() *tsdb.IntegerArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value c.cache.pos-- } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos-- } pos++ if c.tsm.pos < 0 { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { // cache was exhausted if c.tsm.pos >= 0 { for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] c.res.Values[pos] = tvals.Values[c.tsm.pos] pos++ c.tsm.pos-- if c.tsm.pos < 0 { tvals = c.nextTSM() } } } if c.cache.pos >= 0 { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value pos++ c.cache.pos-- } } } if pos > 0 && c.res.Timestamps[pos-1] < c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] < c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *integerArrayDescendingCursor) nextTSM() *tsdb.IntegerArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerArrayBlock(c.tsm.buf) c.tsm.pos = len(c.tsm.values.Timestamps) - 1 return c.tsm.values } type unsignedArrayAscendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.UnsignedArray values *tsdb.UnsignedArray pos int keyCursor *KeyCursor } end int64 res *tsdb.UnsignedArray } func newUnsignedArrayAscendingCursor() *unsignedArrayAscendingCursor { c := &unsignedArrayAscendingCursor{ res: tsdb.NewUnsignedArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewUnsignedArrayLen(MaxPointsPerBlock) return c } func (c *unsignedArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) } func (c *unsignedArrayAscendingCursor) Err() error { return nil } // close closes the cursor and any dependent cursors. func (c *unsignedArrayAscendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } // Next returns the next key/value for the cursor. func (c *unsignedArrayAscendingCursor) Next() *tsdb.UnsignedArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value c.cache.pos++ } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos++ } pos++ if c.tsm.pos >= len(tvals.Timestamps) { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { if c.tsm.pos < len(tvals.Timestamps) { if pos == 0 { // optimization: all points served from TSM data copy(c.res.Timestamps, tvals.Timestamps) pos += copy(c.res.Values, tvals.Values) c.nextTSM() } else { // copy as much as we can n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) pos += n c.tsm.pos += n if c.tsm.pos >= len(tvals.Timestamps) { c.nextTSM() } } } if c.cache.pos < len(cvals) { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value pos++ c.cache.pos++ } } } if pos > 0 && c.res.Timestamps[pos-1] > c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] > c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *unsignedArrayAscendingCursor) nextTSM() *tsdb.UnsignedArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedArrayBlock(c.tsm.buf) c.tsm.pos = 0 return c.tsm.values } type unsignedArrayDescendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.UnsignedArray values *tsdb.UnsignedArray pos int keyCursor *KeyCursor } end int64 res *tsdb.UnsignedArray } func newUnsignedArrayDescendingCursor() *unsignedArrayDescendingCursor { c := &unsignedArrayDescendingCursor{ res: tsdb.NewUnsignedArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewUnsignedArrayLen(MaxPointsPerBlock) return c } func (c *unsignedArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues if len(c.cache.values) > 0 { c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) if c.cache.pos == len(c.cache.values) { c.cache.pos-- } else if c.cache.values[c.cache.pos].UnixNano() != seek { c.cache.pos-- } } else { c.cache.pos = -1 } c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) if c.tsm.values.Len() > 0 { if c.tsm.pos == c.tsm.values.Len() { c.tsm.pos-- } else if c.tsm.values.Timestamps[c.tsm.pos] != seek { c.tsm.pos-- } } else { c.tsm.pos = -1 } } func (c *unsignedArrayDescendingCursor) Err() error { return nil } func (c *unsignedArrayDescendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } func (c *unsignedArrayDescendingCursor) Next() *tsdb.UnsignedArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value c.cache.pos-- } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos-- } pos++ if c.tsm.pos < 0 { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { // cache was exhausted if c.tsm.pos >= 0 { for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] c.res.Values[pos] = tvals.Values[c.tsm.pos] pos++ c.tsm.pos-- if c.tsm.pos < 0 { tvals = c.nextTSM() } } } if c.cache.pos >= 0 { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value pos++ c.cache.pos-- } } } if pos > 0 && c.res.Timestamps[pos-1] < c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] < c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *unsignedArrayDescendingCursor) nextTSM() *tsdb.UnsignedArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedArrayBlock(c.tsm.buf) c.tsm.pos = len(c.tsm.values.Timestamps) - 1 return c.tsm.values } type stringArrayAscendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.StringArray values *tsdb.StringArray pos int keyCursor *KeyCursor } end int64 res *tsdb.StringArray } func newStringArrayAscendingCursor() *stringArrayAscendingCursor { c := &stringArrayAscendingCursor{ res: tsdb.NewStringArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewStringArrayLen(MaxPointsPerBlock) return c } func (c *stringArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadStringArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) } func (c *stringArrayAscendingCursor) Err() error { return nil } // close closes the cursor and any dependent cursors. func (c *stringArrayAscendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } // Next returns the next key/value for the cursor. func (c *stringArrayAscendingCursor) Next() *tsdb.StringArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value c.cache.pos++ } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos++ } pos++ if c.tsm.pos >= len(tvals.Timestamps) { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { if c.tsm.pos < len(tvals.Timestamps) { if pos == 0 { // optimization: all points served from TSM data copy(c.res.Timestamps, tvals.Timestamps) pos += copy(c.res.Values, tvals.Values) c.nextTSM() } else { // copy as much as we can n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) pos += n c.tsm.pos += n if c.tsm.pos >= len(tvals.Timestamps) { c.nextTSM() } } } if c.cache.pos < len(cvals) { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value pos++ c.cache.pos++ } } } if pos > 0 && c.res.Timestamps[pos-1] > c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] > c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *stringArrayAscendingCursor) nextTSM() *tsdb.StringArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadStringArrayBlock(c.tsm.buf) c.tsm.pos = 0 return c.tsm.values } type stringArrayDescendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.StringArray values *tsdb.StringArray pos int keyCursor *KeyCursor } end int64 res *tsdb.StringArray } func newStringArrayDescendingCursor() *stringArrayDescendingCursor { c := &stringArrayDescendingCursor{ res: tsdb.NewStringArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewStringArrayLen(MaxPointsPerBlock) return c } func (c *stringArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues if len(c.cache.values) > 0 { c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) if c.cache.pos == len(c.cache.values) { c.cache.pos-- } else if c.cache.values[c.cache.pos].UnixNano() != seek { c.cache.pos-- } } else { c.cache.pos = -1 } c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadStringArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) if c.tsm.values.Len() > 0 { if c.tsm.pos == c.tsm.values.Len() { c.tsm.pos-- } else if c.tsm.values.Timestamps[c.tsm.pos] != seek { c.tsm.pos-- } } else { c.tsm.pos = -1 } } func (c *stringArrayDescendingCursor) Err() error { return nil } func (c *stringArrayDescendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } func (c *stringArrayDescendingCursor) Next() *tsdb.StringArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value c.cache.pos-- } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos-- } pos++ if c.tsm.pos < 0 { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { // cache was exhausted if c.tsm.pos >= 0 { for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] c.res.Values[pos] = tvals.Values[c.tsm.pos] pos++ c.tsm.pos-- if c.tsm.pos < 0 { tvals = c.nextTSM() } } } if c.cache.pos >= 0 { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value pos++ c.cache.pos-- } } } if pos > 0 && c.res.Timestamps[pos-1] < c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] < c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *stringArrayDescendingCursor) nextTSM() *tsdb.StringArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadStringArrayBlock(c.tsm.buf) c.tsm.pos = len(c.tsm.values.Timestamps) - 1 return c.tsm.values } type booleanArrayAscendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.BooleanArray values *tsdb.BooleanArray pos int keyCursor *KeyCursor } end int64 res *tsdb.BooleanArray } func newBooleanArrayAscendingCursor() *booleanArrayAscendingCursor { c := &booleanArrayAscendingCursor{ res: tsdb.NewBooleanArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewBooleanArrayLen(MaxPointsPerBlock) return c } func (c *booleanArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) } func (c *booleanArrayAscendingCursor) Err() error { return nil } // close closes the cursor and any dependent cursors. func (c *booleanArrayAscendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } // Next returns the next key/value for the cursor. func (c *booleanArrayAscendingCursor) Next() *tsdb.BooleanArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value c.cache.pos++ c.tsm.pos++ } else if ckey < tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value c.cache.pos++ } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos++ } pos++ if c.tsm.pos >= len(tvals.Timestamps) { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { if c.tsm.pos < len(tvals.Timestamps) { if pos == 0 { // optimization: all points served from TSM data copy(c.res.Timestamps, tvals.Timestamps) pos += copy(c.res.Values, tvals.Values) c.nextTSM() } else { // copy as much as we can n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) pos += n c.tsm.pos += n if c.tsm.pos >= len(tvals.Timestamps) { c.nextTSM() } } } if c.cache.pos < len(cvals) { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value pos++ c.cache.pos++ } } } if pos > 0 && c.res.Timestamps[pos-1] > c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] > c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *booleanArrayAscendingCursor) nextTSM() *tsdb.BooleanArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanArrayBlock(c.tsm.buf) c.tsm.pos = 0 return c.tsm.values } type booleanArrayDescendingCursor struct { cache struct { values Values pos int } tsm struct { buf *tsdb.BooleanArray values *tsdb.BooleanArray pos int keyCursor *KeyCursor } end int64 res *tsdb.BooleanArray } func newBooleanArrayDescendingCursor() *booleanArrayDescendingCursor { c := &booleanArrayDescendingCursor{ res: tsdb.NewBooleanArrayLen(MaxPointsPerBlock), } c.tsm.buf = tsdb.NewBooleanArrayLen(MaxPointsPerBlock) return c } func (c *booleanArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { c.end = end c.cache.values = cacheValues if len(c.cache.values) > 0 { c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { return c.cache.values[i].UnixNano() >= seek }) if c.cache.pos == len(c.cache.values) { c.cache.pos-- } else if c.cache.values[c.cache.pos].UnixNano() != seek { c.cache.pos-- } } else { c.cache.pos = -1 } c.tsm.keyCursor = tsmKeyCursor c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanArrayBlock(c.tsm.buf) c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { return c.tsm.values.Timestamps[i] >= seek }) if c.tsm.values.Len() > 0 { if c.tsm.pos == c.tsm.values.Len() { c.tsm.pos-- } else if c.tsm.values.Timestamps[c.tsm.pos] != seek { c.tsm.pos-- } } else { c.tsm.pos = -1 } } func (c *booleanArrayDescendingCursor) Err() error { return nil } func (c *booleanArrayDescendingCursor) Close() { if c.tsm.keyCursor != nil { c.tsm.keyCursor.Close() c.tsm.keyCursor = nil } c.cache.values = nil c.tsm.values = nil } func (c *booleanArrayDescendingCursor) Next() *tsdb.BooleanArray { pos := 0 cvals := c.cache.values tvals := c.tsm.values c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] c.res.Values = c.res.Values[:cap(c.res.Values)] for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { ckey := cvals[c.cache.pos].UnixNano() tkey := tvals.Timestamps[c.tsm.pos] if ckey == tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value c.cache.pos-- c.tsm.pos-- } else if ckey > tkey { c.res.Timestamps[pos] = ckey c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value c.cache.pos-- } else { c.res.Timestamps[pos] = tkey c.res.Values[pos] = tvals.Values[c.tsm.pos] c.tsm.pos-- } pos++ if c.tsm.pos < 0 { tvals = c.nextTSM() } } if pos < len(c.res.Timestamps) { // cache was exhausted if c.tsm.pos >= 0 { for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] c.res.Values[pos] = tvals.Values[c.tsm.pos] pos++ c.tsm.pos-- if c.tsm.pos < 0 { tvals = c.nextTSM() } } } if c.cache.pos >= 0 { // TSM was exhausted for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value pos++ c.cache.pos-- } } } if pos > 0 && c.res.Timestamps[pos-1] < c.end { pos -= 2 for pos >= 0 && c.res.Timestamps[pos] < c.end { pos-- } pos++ } c.res.Timestamps = c.res.Timestamps[:pos] c.res.Values = c.res.Values[:pos] return c.res } func (c *booleanArrayDescendingCursor) nextTSM() *tsdb.BooleanArray { c.tsm.keyCursor.Next() c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanArrayBlock(c.tsm.buf) c.tsm.pos = len(c.tsm.values.Timestamps) - 1 return c.tsm.values }
tsdb/tsm1/array_cursor.gen.go
0.581065
0.476032
array_cursor.gen.go
starcoder
package main import ( "math" ) /** For clarity, the base computational `struct`s (per side aka 'option' and the total) will use the explicit 'PearsonsChiSq` naming convention. For simplicity, all `func`s will use the abbreviated `PCS` naming convention. */ const d20ChiTableLookup = 30.143 type DieConstantsBySides struct { NumberOfSides int ChiSqTableValue float64 // e.g. for a 5% significance error margin [X^2(degreesOfFreedom, 0.05)] OR X^2(#sides-1, 0.05) MinNumberOfRolls int // chi-square requires a minimum to be applicable: `TotalNumberOfRolls >== 5 * NumberOfSides` } /** Die Constant struct instances for conventional unconventionally sided die (and a d6 of course). */ var d4, d6, d8, d10, d12, d20 = initDieConstants() func initDieConstants() (d4, d6, d8, d10, d12, d20 DieConstantsBySides) { d4 = DieConstantsBySides{ NumberOfSides: 4, ChiSqTableValue: 7.815, // TODO MinNumberOfRolls: 20, // 5 rolls * 4 sides } d6 = DieConstantsBySides{ NumberOfSides: 6, ChiSqTableValue: 11.070, // TODO MinNumberOfRolls: 30, // 5 rolls * 6 sides } d8 = DieConstantsBySides{ NumberOfSides: 8, ChiSqTableValue: 14.067, // TODO MinNumberOfRolls: 40, // 5 rolls * 8 sides } d10 = DieConstantsBySides{ NumberOfSides: 10, ChiSqTableValue: 16.919, // TODO MinNumberOfRolls: 50, // 5 rolls * 10 sides } d12 = DieConstantsBySides{ NumberOfSides: 12, ChiSqTableValue: 19.675, // TODO MinNumberOfRolls: 60, // 5 rolls * 12 sides } d20 = DieConstantsBySides{ NumberOfSides: 20, ChiSqTableValue: 30.143, // TODO MinNumberOfRolls: 100, // 5 rolls * 20 sides } return } func GetDieConstantsBySides(numberOfSides int) (d DieConstantsBySides) { switch numberOfSides { case 4: d = d4 case 6: d = d6 case 8: d = d8 case 10: d = d10 case 12: d = d12 case 20: d = d20 default: d = d20 } return } /** Pearson's Chi-Square Test requires the following values for each die side option (`PearsonsChiSqOption`) and for each die as a whole (`ComputedPearsonsChiSqValues`). */ type PearsonsChiSqOption struct { SideRollCount int `default:"0"` // Should be passed in each time Compute is called, but stored for display. ExpectedRollCount float64 `default:"0.0"` // Should be passed in each time Compute is called, but stored for display. Error float64 `default:"0.0"` // `SideRollCount - ExpectedRollCount` SquaredError float64 `default:"0.0"` // `math.Pow(Error, 2)` aka Error^2 } type ComputedPearsonsChiSqValues struct { DieConstants DieConstantsBySides OptionComputations []PearsonsChiSqOption BalanceThreshold float64 `default:"0.0"` // Threshold for the SumSquaredError. ExpectedRollsPerSide float64 `default:"0.0"` // `TotalRollCount/NumberOfSides` SumSquaredError float64 `default:"0.0"` // Total of all options SquaredError different from expected roll count. aka 'SSE' IsBalanced bool `default:"false"` // SSE <= BalanceThreshold } /** Methods for computing the associated Chi-Square values for each Side Option */ func (option *PearsonsChiSqOption) ComputeErrorAndSquaredError( currentSideRollCount int, expectedRollCount float64) (pcsqError float64, squaredError float64) { option.SideRollCount, option.ExpectedRollCount = currentSideRollCount, expectedRollCount option.Error = float64(option.SideRollCount) - option.ExpectedRollCount option.SquaredError = math.Pow(option.Error, 2) pcsqError, squaredError = option.Error, option.SquaredError return } /** Methods for Computing the Pearson's Chi-Square analysis */ func NewComputedPCSValues(numberOfSides int) (newCPCSValues *ComputedPearsonsChiSqValues) { newCPCSValues = &ComputedPearsonsChiSqValues{} newCPCSValues.DieConstants = GetDieConstantsBySides(numberOfSides) newCPCSValues.OptionComputations = make([]PearsonsChiSqOption, numberOfSides) for i := 0; i < numberOfSides; i++ { newCPCSValues.OptionComputations[i] = PearsonsChiSqOption{} } return } // Groups and calls all necessary compute functions. func (cpcsv *ComputedPearsonsChiSqValues) ComputePChSqValues( currentRollCountTotal int, counts []int) (isBalanced bool, sse float64, balanceThreshold float64) { cpcsv.ComputeExpectedRolls(currentRollCountTotal) cpcsv.ComputeBalanceThreshold() cpcsv.ComputeSumSquaredErrorIfMinRollCountMet(currentRollCountTotal, counts) isBalanced, sse, balanceThreshold = cpcsv.ComputeIsBalanced() return } func (cpcsv *ComputedPearsonsChiSqValues) ComputeExpectedRolls(currentRollCountTotal int) { cpcsv.ExpectedRollsPerSide = float64(currentRollCountTotal) / float64(cpcsv.DieConstants.NumberOfSides) } func (cpcsv *ComputedPearsonsChiSqValues) ComputeBalanceThreshold() { cpcsv.BalanceThreshold = cpcsv.ExpectedRollsPerSide * cpcsv.DieConstants.ChiSqTableValue } func (cpcsv *ComputedPearsonsChiSqValues) ComputeSumSquaredErrorIfMinRollCountMet(currentRollCountTotal int, counts []int) (isMinRollCountMet bool) { if cpcsv.DieConstants.MinNumberOfRolls <= currentRollCountTotal { newSSE := 0.0 for i, option := range cpcsv.OptionComputations { option.ComputeErrorAndSquaredError(counts[i], cpcsv.ExpectedRollsPerSide) newSSE += option.SquaredError } cpcsv.SumSquaredError = newSSE isMinRollCountMet = true return } isMinRollCountMet = false return } func (cpcsv *ComputedPearsonsChiSqValues) ComputeIsBalanced() (isBalanced bool, sse float64, balanceThreshold float64) { cpcsv.IsBalanced = cpcsv.SumSquaredError <= cpcsv.BalanceThreshold return cpcsv.IsBalanced, cpcsv.SumSquaredError, cpcsv.BalanceThreshold }
pearsons-chi-sq-compute.go
0.645679
0.407451
pearsons-chi-sq-compute.go
starcoder
package pc import ( "encoding/binary" "math" "github.com/seqsense/pcgol/mat" ) type binaryIterator struct { data []byte pos int stride int } func (i *binaryIterator) Incr() { i.pos += i.stride } func (i *binaryIterator) Len() int { return len(i.data) / i.stride } type Float32Iterator interface { Incr() IsValid() bool Float32() float32 SetFloat32(float32) Float32At(int) float32 Len() int } type Vec3Iterator interface { Vec3RandomAccessor Vec3ForwardIterator } type Vec3ForwardIterator interface { Vec3ConstForwardIterator SetVec3(mat.Vec3) } type Vec3ConstForwardIterator interface { Incr() IsValid() bool Vec3() mat.Vec3 } type binaryFloat32Iterator struct { binaryIterator } func (i *binaryFloat32Iterator) Float32() float32 { return math.Float32frombits( binary.LittleEndian.Uint32(i.binaryIterator.data[i.binaryIterator.pos : i.binaryIterator.pos+4]), ) } func (i *binaryFloat32Iterator) Float32At(j int) float32 { pos := i.binaryIterator.pos + i.stride*j return math.Float32frombits( binary.LittleEndian.Uint32(i.binaryIterator.data[pos : pos+4]), ) } func (i *binaryFloat32Iterator) SetFloat32(v float32) { b := math.Float32bits(v) binary.LittleEndian.PutUint32( i.binaryIterator.data[i.binaryIterator.pos:i.binaryIterator.pos+4], b, ) } func (i *binaryFloat32Iterator) IsValid() bool { return i.pos+4 <= len(i.data) } type float32Iterator struct { data []float32 pos int stride int } func (i *float32Iterator) Incr() { i.pos += i.stride } func (i *float32Iterator) IsValid() bool { return i.pos+1 <= len(i.data) } func (i *float32Iterator) Len() int { return len(i.data) / i.stride } func (i *float32Iterator) Float32() float32 { return i.data[i.pos] } func (i *float32Iterator) Float32At(j int) float32 { return i.data[i.pos+i.stride*j] } func (i *float32Iterator) SetFloat32(v float32) { i.data[i.pos] = v } func (i *float32Iterator) Vec3() mat.Vec3 { var ret mat.Vec3 copy(ret[:], i.data[i.pos:i.pos+3]) return ret } func (i *float32Iterator) Vec3At(j int) mat.Vec3 { pos := i.pos + i.stride*j var ret mat.Vec3 copy(ret[:], i.data[pos:pos+3]) return ret } func (i *float32Iterator) SetVec3(v mat.Vec3) { copy(i.data[i.pos:i.pos+3], v[:]) } type naiveVec3Iterator [3]Float32Iterator func (i naiveVec3Iterator) IsValid() bool { return i[0].IsValid() } func (i naiveVec3Iterator) Len() int { return i[0].Len() } func (i naiveVec3Iterator) Incr() { i[0].Incr() i[1].Incr() i[2].Incr() } func (i naiveVec3Iterator) Vec3() mat.Vec3 { return mat.Vec3{i[0].Float32(), i[1].Float32(), i[2].Float32()} } func (i naiveVec3Iterator) Vec3At(j int) mat.Vec3 { return mat.Vec3{i[0].Float32At(j), i[1].Float32At(j), i[2].Float32At(j)} } func (i naiveVec3Iterator) SetVec3(v mat.Vec3) { i[0].SetFloat32(v[0]) i[1].SetFloat32(v[1]) i[2].SetFloat32(v[2]) } type Uint32Iterator interface { Uint32RandomAccessor Incr() IsValid() bool Uint32() uint32 SetUint32(uint32) } type binaryUint32Iterator struct { binaryIterator } func (i *binaryUint32Iterator) Uint32() uint32 { return binary.LittleEndian.Uint32(i.binaryIterator.data[i.binaryIterator.pos : i.binaryIterator.pos+4]) } func (i *binaryUint32Iterator) Uint32At(j int) uint32 { pos := i.binaryIterator.pos + i.binaryIterator.stride*j return binary.LittleEndian.Uint32(i.binaryIterator.data[pos : pos+4]) } func (i *binaryUint32Iterator) SetUint32(v uint32) { binary.LittleEndian.PutUint32( i.binaryIterator.data[i.binaryIterator.pos:i.binaryIterator.pos+4], v, ) } func (i *binaryUint32Iterator) IsValid() bool { return i.pos+4 <= len(i.data) }
pc/iterator.go
0.738103
0.411702
iterator.go
starcoder
package mporous // scratchpad for TPM calculations var TPM struct { // other porous media data Sg float64 // sl: saturation of gas Pc float64 // pc: capillary pressure: pg - pl P float64 // p: averaged pressue of fluids in porous // n variables Ns float64 // Ns: volume fraction of solids Nf float64 // Nf: volume fraction of fluids: liquid + gas Nl float64 // Nl: volume fraction of liquid Ng float64 // Ng: volume fraction of gas // ρ (partial) variables Rhol float64 // ρl: partial density of liquid Rhog float64 // ρg: partial density of gas Rhos float64 // ρs: partial density of solids Rho float64 // ρ: partial density of mixture // conductivity and retention models variables Klr float64 // relative liquid conductivity Kgr float64 // relative gas conductivity Ccb float64 // liquid retention model = dsl/dpc Cl float64 // liquid compresssibility Cg float64 // gas compressibility // liquid balance related coefficients Cpl float64 // Cpl coefficient Cpg float64 // Cpg coefficient Cvs float64 // Cvs coefficient // gas balance related coefficients Dpl float64 // Dpl coefficient Dpg float64 // Dpg coefficient Dvs float64 // Dvs coefficient // liquid retention model derivative Ccd float64 // dCc/dpc } // Derivatives var D struct { // liquid conductivity klr_pl float64 // ∂klr/∂pl klr_pg float64 // ∂klr/∂pg // gas conductivity kgr_pl float64 // ∂kgr/∂pl kgr_pg float64 // ∂kgr/∂pg // mixture density rho_pl float64 // ∂ρ/∂pl rho_pg float64 // ∂ρ/∂pg // 1 liquid balance related coefficients Cpl_pl float64 // ∂Cpl/∂pl Cpl_pg float64 // ∂Cpl/∂pg Cpl_usM float64 // ∂Cpl/∂us multiplier // 2 liquid balance related coefficients Cpg_pl float64 // ∂Cpg/∂pl Cpg_pg float64 // ∂Cpg/∂pg Cpg_usM float64 // ∂Cpg/∂us multiplier // 3 liquid balance related coefficients Cvs_pl float64 // ∂Cvs/∂pl Cvs_pg float64 // ∂Cvs/∂pg // 1 gas balance related coefficients Dpl_pl float64 // ∂Dpl/∂pl Dpl_pg float64 // ∂Dpl/∂pg Dpl_usM float64 // ∂Dpl/∂us multiplier // 2 gas balance related coefficients Dpg_pl float64 // ∂Dpg/∂pl Dpg_pg float64 // ∂Dpg/∂pg Dpg_usM float64 // ∂Dpg/∂us multiplier // 3 gas balance related coefficients Dvs_pl float64 // ∂Dvs/∂pl Dvs_pg float64 // ∂Dvs/∂pg } // CalcLGS calculates TPM variables for models with liquid, gas and solid func CalcLGS(divus float64, sta *State, mdl *Model, derivs bool) (err error) { // other porous media data TPM.Sg = 1.0 - sta.Sl TPM.Pc = sta.Pg - sta.Pl TPM.P = sta.Sl*sta.Pl + TPM.Sg*sta.Pg // n variables TPM.Ns = (1.0 - divus) * sta.Ns0 TPM.Nf = 1.0 - TPM.Ns TPM.Nl = TPM.Nf * sta.Sl TPM.Ng = TPM.Nf * TPM.Sg // ρ (partial) variables TPM.Rhol = TPM.Nl * sta.RhoL TPM.Rhog = TPM.Ng * sta.RhoG TPM.Rhos = TPM.Ns * mdl.RhoS0 TPM.Rho = TPM.Rhol + TPM.Rhog + TPM.Rhos // conductivity and retention models variables TPM.Klr = mdl.Cnd.Klr(sta.Sl) TPM.Kgr = mdl.Cnd.Kgr(TPM.Sg) TPM.Ccb, err = mdl.Ccb(sta) if err != nil { return } TPM.Cl = mdl.Cl TPM.Cg = mdl.Cg // liquid balance related coefficients TPM.Cpl = TPM.Nf * (sta.Sl*TPM.Cl - sta.RhoL*TPM.Ccb) TPM.Cpg = TPM.Nf * sta.RhoL * TPM.Ccb TPM.Cvs = sta.Sl * sta.RhoL // gas balance related coefficients TPM.Dpl = TPM.Nf * sta.RhoG * TPM.Ccb TPM.Dpg = TPM.Nf * (TPM.Sg*TPM.Cg - sta.RhoG*TPM.Ccb) TPM.Dvs = TPM.Sg * sta.RhoG // derivatives if derivs { // auxiliary variables ns0, nf := sta.Ns0, TPM.Nf sl, sg := sta.Sl, TPM.Sg ρL, ρG := sta.RhoL, sta.RhoG Cl, Cg := mdl.Cl, mdl.Cg Ccb, Ccd := TPM.Ccb, TPM.Ccd // liquid conductivity D.klr_pl = -mdl.Cnd.DklrDsl(sl) * Ccb D.klr_pg = +mdl.Cnd.DklrDsl(sl) * Ccb // gas conductivity D.kgr_pl = +mdl.Cnd.DkgrDsg(sg) * Ccb D.kgr_pg = -mdl.Cnd.DkgrDsg(sg) * Ccb // liquid retention model TPM.Ccd, err = mdl.Ccd(sta) if err != nil { return } // mixture density D.rho_pl = nf * (sl*Cl - ρL*Ccb + ρG*Ccb) D.rho_pg = nf * (sg*Cg - ρG*Ccb + ρL*Ccb) // 1 liquid balance related coefficients D.Cpl_pl = nf * (ρL*Ccd - 2.0*Ccb*Cl) D.Cpl_pg = nf * (Ccb*Cl - ρL*Ccd) D.Cpl_usM = (sl*Cl - ρL*Ccb) * ns0 // 2 liquid balance related coefficients D.Cpg_pl = nf * (Cl*Ccb - ρL*Ccd) D.Cpg_pg = nf * ρL * Ccd D.Cpg_usM = ρL * Ccb * ns0 // 3 liquid balance related coefficients D.Cvs_pl = sl*Cl - Ccb*ρL D.Cvs_pg = Ccb * ρL // 1 gas balance related coefficients D.Dpl_pl = -nf * ρG * Ccd D.Dpl_pg = nf * (ρG*Ccd + Cg*Ccb) D.Dpl_usM = ρG * Ccb * ns0 // 2 gas balance related coefficients D.Dpg_pl = nf * (Ccb*Cg + ρG*Ccd) D.Dpg_pg = -nf * (ρG*Ccd + 2.0*Cg*Ccb) D.Dpg_usM = (sg*Cg - ρG*Ccb) * ns0 // 3 gas balance related coefficients D.Dvs_pl = Ccb * ρG D.Dvs_pg = sg*Cg - Ccb*ρG } return }
mporous/tpm.go
0.531696
0.732592
tpm.go
starcoder
package blockchain import ( "encoding/json" "fmt" "github.com/incognitochain/incognito-chain/common" ) type BeaconBlock struct { // AggregatedSig string `json:"AggregatedSig"` // R string `json:"R"` // ValidatorsIdx [][]int `json:"ValidatorsIdx"` //[0]: r | [1]:AggregatedSig // ProducerSig string `json:"ProducerSig"` ValidationData string `json:"ValidationData"` Body BeaconBody Header BeaconHeader } func (beaconBlock *BeaconBlock) GetVersion() int { return beaconBlock.Header.Version } func (beaconBlock *BeaconBlock) GetPrevHash() common.Hash { return beaconBlock.Header.PreviousBlockHash } func NewBeaconBlock() *BeaconBlock { return &BeaconBlock{} } func (beaconBlock *BeaconBlock) GetProposer() string { return beaconBlock.Header.Proposer } func (beaconBlock *BeaconBlock) GetProposeTime() int64 { return beaconBlock.Header.ProposeTime } func (beaconBlock *BeaconBlock) GetProduceTime() int64 { return beaconBlock.Header.Timestamp } func (beaconBlock BeaconBlock) Hash() *common.Hash { hash := beaconBlock.Header.Hash() return &hash } func (beaconBlock BeaconBlock) GetCurrentEpoch() uint64 { return beaconBlock.Header.Epoch } func (beaconBlock BeaconBlock) GetHeight() uint64 { return beaconBlock.Header.Height } func (beaconBlock BeaconBlock) GetShardID() int { return -1 } // func (beaconBlock *BeaconBlock) GetProducerPubKey() string { // return string(beaconBlock.Header.ProducerAddress.Pk) // } func (beaconBlock *BeaconBlock) UnmarshalJSON(data []byte) error { tempBeaconBlock := &struct { ValidationData string `json:"ValidationData"` Header BeaconHeader Body BeaconBody }{} err := json.Unmarshal(data, &tempBeaconBlock) if err != nil { return NewBlockChainError(UnmashallJsonShardBlockError, err) } // beaconBlock.AggregatedSig = tempBlk.AggregatedSig // beaconBlock.R = tempBlk.R // beaconBlock.ValidatorsIdx = tempBlk.ValidatorsIdx // beaconBlock.ProducerSig = tempBlk.ProducerSig beaconBlock.ValidationData = tempBeaconBlock.ValidationData beaconBlock.Header = tempBeaconBlock.Header beaconBlock.Body = tempBeaconBlock.Body return nil } func (beaconBlock *BeaconBlock) AddValidationField(validationData string) error { beaconBlock.ValidationData = validationData return nil } func (beaconBlock BeaconBlock) GetValidationField() string { return beaconBlock.ValidationData } func (beaconBlock BeaconBlock) GetRound() int { return beaconBlock.Header.Round } func (beaconBlock BeaconBlock) GetRoundKey() string { return fmt.Sprint(beaconBlock.Header.Height, "_", beaconBlock.Header.Round) } func (beaconBlock BeaconBlock) GetInstructions() [][]string { return beaconBlock.Body.Instructions } func (beaconBlock BeaconBlock) GetProducer() string { return beaconBlock.Header.Producer } func (beaconBlock BeaconBlock) GetProducerPubKeyStr() string { return beaconBlock.Header.ProducerPubKeyStr } func (beaconBlock BeaconBlock) GetConsensusType() string { return beaconBlock.Header.ConsensusType }
blockchain/beaconblock.go
0.58059
0.435121
beaconblock.go
starcoder
package gohome import ( "io" "strings" "sync" ) const ( READ_ALL_BUFFER_SIZE = 512 * 512 ) // Reads the entire content of a reader. Uses a bigger buffer than the normal one func ReadAll(r io.Reader) (str string, err error) { str = "" var n int = 1 for err == nil && n != 0 { buf := make([]byte, READ_ALL_BUFFER_SIZE) n, err = r.Read(buf) str += string(buf[:n]) } if err == io.EOF { err = nil } return } // Returns the maximum value of a and b func Maxi(a, b int32) int32 { if a > b { return a } else { return b } } // Returns the minimum value of a and b func Mini(a, b int32) int32 { if a < b { return a } else { return b } } // Converts a mesh 3d vertex array to a float array used by OpenGL func Mesh3DVerticesToFloatArray(vertices []Mesh3DVertex) (array []float32) { const NUM_FLOATS = MESH3DVERTEXSIZE / 4 array = make([]float32, len(vertices)*NUM_FLOATS) var wg sync.WaitGroup wg.Add(len(array) / NUM_FLOATS) var index = 0 for _, v := range vertices { go func(_index int, _v Mesh3DVertex) { for i := 0; i < NUM_FLOATS; i++ { array[_index+i] = _v[i] } wg.Done() }(index, v) index += NUM_FLOATS } wg.Wait() return } // Converts a mesh 2d vertex array to a float array used by OpenGL func Mesh2DVerticesToFloatArray(vertices []Mesh2DVertex) (array []float32) { const NUM_FLOATS = MESH2DVERTEXSIZE / 4 array = make([]float32, len(vertices)*NUM_FLOATS) var wg sync.WaitGroup wg.Add(len(array) / NUM_FLOATS) var index = 0 for _, v := range vertices { go func(_index int, _v Mesh2DVertex) { for i := 0; i < NUM_FLOATS; i++ { array[_index+i] = _v[i] } wg.Done() }(index, v) index += NUM_FLOATS } wg.Wait() return } // Converts a shape 3d vertex array to a float array used by OpenGL func Shape3DVerticesToFloatArray(points []Shape3DVertex) (array []float32) { const NUM_FLOATS = SHAPE3DVERTEXSIZE / 4 array = make([]float32, len(points)*NUM_FLOATS) var wg sync.WaitGroup wg.Add(len(array) / NUM_FLOATS) var index = 0 for _, p := range points { go func(_index int, _p Shape3DVertex) { for j := 0; j < 3+4; j++ { array[_index+j] = _p[j] } wg.Done() }(index, p) index += NUM_FLOATS } wg.Wait() return } // Converts a shape 2d vertex array to a float array used by OpenGL func Shape2DVerticesToFloatArray(vertices []Shape2DVertex) (array []float32) { const NUM_FLOATS = SHAPE2DVERTEXSIZE / 4 array = make([]float32, len(vertices)*NUM_FLOATS) var wg sync.WaitGroup wg.Add(len(array) / NUM_FLOATS) var index = 0 for _, v := range vertices { go func(_index int, _v Shape2DVertex) { for i := 0; i < NUM_FLOATS; i++ { array[_index+i] = _v[i] } wg.Done() }(index, v) index += NUM_FLOATS } wg.Wait() return } // Returns wether one string equals the other ignoring the case func EqualIgnoreCase(str1, str string) bool { if len(str1) != len(str) { return false } for i := 0; i < len(str1); i++ { if str1[i] != str[i] { if str1[i] >= 65 && str1[i] <= 90 { if str[i] >= 97 && str[i] <= 122 { if str1[i]+32 != str[i] { return false } } else { return false } } else if str1[i] >= 97 && str1[i] <= 122 { if str[i] >= 65 && str[i] <= 90 { if str1[i]-32 != str[i] { return false } } else { return false } } else { return false } } } return true } // The returns the file extension of a file name func GetFileExtension(file string) string { index := strings.LastIndex(file, ".") if index == -1 { return "" } return file[index+1:] } // Returns the file name of a file path func GetFileFromPath(path string) string { if index := strings.LastIndex(path, "/"); index != -1 { return path[index+1:] } else { return path } } // Returns the directory of a file func GetPathFromFile(path string) string { if index := strings.LastIndex(path, "/"); index != -1 { return path[:index+1] } else { return "" } } // Opens a file in multiple paths returning the first one that works func OpenFileWithPaths(path string, paths []string) (File, string, error) { var reader File var err error var filename string for i := 0; i < len(paths); i++ { filename = paths[i] + path if reader, err = Framew.OpenFile(filename); err == nil { break } else if reader, err = Framew.OpenFile(paths[i] + GetFileFromPath(path)); err == nil { filename = paths[i] + GetFileFromPath(path) break } } return reader, filename, err }
src/gohome/utils.go
0.631253
0.4436
utils.go
starcoder
package types // difficulty.go defines the difficulty type and implements a few helper functions for // manipulating the difficulty type. import ( "bytes" "errors" "fmt" "io" "math/big" "github.com/threefoldtech/rivine/pkg/encoding/rivbin" "github.com/threefoldtech/rivine/build" "github.com/threefoldtech/rivine/pkg/encoding/siabin" ) type ( // A Difficulty represents a number in number of blockstake times time in seconds // Normally the difficulty is the number of active blockstake times the // BlockFrequency. ex. If the number of active blockstake grows, the // difficulty will also increase to maintain the same BlockFrequency. Difficulty struct { i big.Int } ) var ( // ErrNegativeDifficulty is the error that is returned if performing an // operation results in a negative difficulty. ErrNegativeDifficulty = errors.New("negative difficulty not allowed") ) // Big returns the value of c as a *big.Int. Importantly, it does not provide // access to the c's internal big.Int object, only a copy. func (c Difficulty) Big() *big.Int { return new(big.Int).Set(&c.i) } // NewDifficulty creates a Difficulty value from a big.Int. Undefined behavior // occurs if a negative input is used. func NewDifficulty(b *big.Int) (d Difficulty) { if b.Sign() < 0 { build.Critical(ErrNegativeDifficulty) } else { d.i = *b } return } // Div64 returns a new Difficulty value z = c / y. func (c Difficulty) Div64(y uint64) (z Difficulty) { z.i.Div(&c.i, new(big.Int).SetUint64(y)) return } // Cmp compares two Difficulty values. The return value follows the convention // of math/big. func (c Difficulty) Cmp(y Difficulty) int { return c.i.Cmp(&y.i) } // MarshalJSON implements the json.Marshaler interface. func (c Difficulty) MarshalJSON() ([]byte, error) { // Must enclosed the value in quotes; otherwise JS will convert it to a // double and lose precision. return []byte(`"` + c.String() + `"`), nil } // UnmarshalJSON implements the json.Unmarshaler interface. An error is // returned if a negative number is provided. func (c *Difficulty) UnmarshalJSON(b []byte) error { // UnmarshalJSON does not expect quotes b = bytes.Trim(b, `"`) err := c.i.UnmarshalJSON(b) if err != nil { return err } if c.i.Sign() < 0 { c.i = *big.NewInt(0) return ErrNegativeDifficulty } return nil } // MarshalSia implements the siabin.SiaMarshaler interface. It writes the // byte-slice representation of the Difficulty's internal big.Int to w. Note // that as the bytes of the big.Int correspond to the absolute value of the // integer, there is no way to marshal a negative Difficulty. func (c Difficulty) MarshalSia(w io.Writer) error { return siabin.WritePrefix(w, c.i.Bytes()) } // UnmarshalSia implements the siabin.SiaUnmarshaler interface. func (c *Difficulty) UnmarshalSia(r io.Reader) error { b, err := siabin.ReadPrefix(r, 256) if err != nil { return err } c.i.SetBytes(b) return nil } // MarshalRivine implements the rivbin.RivineMarshaler interface. It writes the // byte-slice representation of the Difficulty's internal big.Int to w. Note // that as the bytes of the big.Int correspond to the absolute value of the // integer, there is no way to marshal a negative Difficulty. func (c Difficulty) MarshalRivine(w io.Writer) error { return rivbin.WriteDataSlice(w, c.i.Bytes()) } // UnmarshalRivine implements the rivbin.RivineMarshaler interface. func (c *Difficulty) UnmarshalRivine(r io.Reader) error { b, err := rivbin.ReadDataSlice(r, 256) if err != nil { return err } c.i.SetBytes(b) return nil } // String implements the fmt.Stringer interface. func (c Difficulty) String() string { return c.i.String() } // Scan implements the fmt.Scanner interface, allowing Difficulty values to be // scanned from text. func (c *Difficulty) Scan(s fmt.ScanState, ch rune) error { err := c.i.Scan(s, ch) if err != nil { return err } if c.i.Sign() < 0 { return ErrNegativeDifficulty } return nil }
vendor/github.com/threefoldtech/rivine/types/difficulty.go
0.770551
0.411998
difficulty.go
starcoder
package timeslotutil import ( "fmt" "strconv" "time" ) //GetPreviousDate Returns a int data number based of passed in time and option // input option: select how far in the past you would like to calculate // options include: 0 (1 year), 1 (1 month), 2 (1 week), 3 (1 day) func GetPreviousDate(option int, now time.Time) int { var duration int switch option { case 0: duration, _ = strconv.Atoi(now.AddDate(-1, 0, 0).UTC().Format("20060102")) case 1: duration, _ = strconv.Atoi(now.AddDate(0, -1, 0).UTC().Format("20060102")) case 2: duration, _ = strconv.Atoi(now.AddDate(0, 0, -7).UTC().Format("20060102")) case 3: duration, _ = strconv.Atoi(now.AddDate(0, 0, -1).UTC().Format("20060102")) case 4: duration, _ = strconv.Atoi(now.UTC().Format("20060102")) } return duration } func GetPreviousWeekdayDate(option int, now time.Time) int { var duration int switch option { case 0: now = now.AddDate(-1, 0, 0) now = BackDateToWeekday(now) duration, _ = strconv.Atoi(now.Format("20060102")) case 1: now = now.AddDate(0, -1, 0) now = BackDateToWeekday(now) duration, _ = strconv.Atoi(now.Format("20060102")) case 2: now = now.AddDate(0, 0, -7) now = BackDateToWeekday(now) duration, _ = strconv.Atoi(now.Format("20060102")) case 3: now = now.AddDate(0, 0, -1) now = BackDateToWeekday(now) duration, _ = strconv.Atoi(now.Format("20060102")) case 4: now = BackDateToWeekday(now) duration, _ = strconv.Atoi(now.Format("20060102")) } return duration } func BackDateBusinessDays(t time.Time, days int) time.Time { for days > 0 { if t.Weekday() == 0 || t.Weekday() == 6 { t = t.AddDate(0, 0, -1) } else { t = t.AddDate(0, 0, -1) days-- } } return t } func BackDateToWeekday(t time.Time) time.Time { dayOfWeek := t.Weekday() if dayOfWeek == time.Saturday { return t.AddDate(0, 0, -1) } else if dayOfWeek == time.Sunday { return t.AddDate(0, 0, -2) } return t } func GetPreviousDateMinusBusinessDaysString(t time.Time, days int) string { for days > 0 { fmt.Println(int(t.Weekday())) if t.Weekday() == 0 || t.Weekday() == 6 { t = t.AddDate(0, 0, -1) } else { t = t.AddDate(0, 0, -1) days-- } } fmt.Println(t.Format("20060102")) return t.Format("20060102") } func GetPreviousDateMinusDaysString(days int, now time.Time) string { return now.AddDate(0, 0, -days).Format("20060102") } func GetPreviousDateMinusMonthsString(months int, now time.Time) string { return now.AddDate(0, -months, 0).Format("20060102") } func GetPreviousDateMinusYearsString(years int, now time.Time) string { return now.AddDate(-years, 0, 0).Format("20060102") } func GetDatePlusDaysString(days int, date time.Time) string { return date.AddDate(0, 0, days).Format("20060102") }
pkg/timeslotutil/timeslotutil.go
0.58747
0.638863
timeslotutil.go
starcoder
package tsm1 import ( "github.com/ivopetiz/influxdb/tsdb" ) // ReadFloatArrayBlock reads the next block as a set of float values. func (c *KeyCursor) ReadFloatArrayBlock(values *tsdb.FloatArray) (*tsdb.FloatArray, error) { LOOP: // No matching blocks to decode if len(c.current) == 0 { values.Timestamps = values.Timestamps[:0] values.Values = values.Values[:0] return values, nil } // First block is the oldest block containing the points we're searching for. first := c.current[0] err := first.r.ReadFloatArrayBlockAt(&first.entry, values) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(floatBlocksDecodedCounter).Add(1) c.col.GetCounter(floatBlocksSizeCounter).Add(int64(first.entry.Size)) } // Remove values we already read values.Exclude(first.readMin, first.readMax) // Remove any tombstones tombstones := first.r.TombstoneRange(c.key) excludeTombstonesFloatArray(tombstones, values) // If there are no values in this first block (all tombstoned or previously read) and // we have more potential blocks too search. Try again. if values.Len() == 0 && len(c.current) > 0 { c.current = c.current[1:] goto LOOP } // Only one block with this key and time range so return it if len(c.current) == 1 { if values.Len() > 0 { first.markRead(values.MinTime(), values.MaxTime()) } return values, nil } // Use the current block time range as our overlapping window minT, maxT := first.readMin, first.readMax if values.Len() > 0 { minT, maxT = values.MinTime(), values.MaxTime() } if c.ascending { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the min time range to ensure values are returned in ascending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MinTime < minT && !cur.read() { minT = cur.entry.MinTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MaxTime > maxT { maxT = cur.entry.MaxTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.FloatArray{} err := cur.r.ReadFloatArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(floatBlocksDecodedCounter).Add(1) c.col.GetCounter(floatBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesFloatArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) if v.Len() > 0 { // Only use values in the overlapping window v.Include(minT, maxT) // Merge the remaining values with the existing values.Merge(v) } cur.markRead(minT, maxT) } } else { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the max time range to ensure values are returned in descending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MaxTime > maxT && !cur.read() { maxT = cur.entry.MaxTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MinTime < minT { minT = cur.entry.MinTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.FloatArray{} err := cur.r.ReadFloatArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(floatBlocksDecodedCounter).Add(1) c.col.GetCounter(floatBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesFloatArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) // If the block we decoded should have all of it's values included, mark it as read so we // don't use it again. if v.Len() > 0 { v.Include(minT, maxT) // Merge the remaining values with the existing v.Merge(values) *values = *v } cur.markRead(minT, maxT) } } first.markRead(minT, maxT) return values, err } func excludeTombstonesFloatArray(t []TimeRange, values *tsdb.FloatArray) { for i := range t { values.Exclude(t[i].Min, t[i].Max) } } // ReadIntegerArrayBlock reads the next block as a set of integer values. func (c *KeyCursor) ReadIntegerArrayBlock(values *tsdb.IntegerArray) (*tsdb.IntegerArray, error) { LOOP: // No matching blocks to decode if len(c.current) == 0 { values.Timestamps = values.Timestamps[:0] values.Values = values.Values[:0] return values, nil } // First block is the oldest block containing the points we're searching for. first := c.current[0] err := first.r.ReadIntegerArrayBlockAt(&first.entry, values) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(integerBlocksDecodedCounter).Add(1) c.col.GetCounter(integerBlocksSizeCounter).Add(int64(first.entry.Size)) } // Remove values we already read values.Exclude(first.readMin, first.readMax) // Remove any tombstones tombstones := first.r.TombstoneRange(c.key) excludeTombstonesIntegerArray(tombstones, values) // If there are no values in this first block (all tombstoned or previously read) and // we have more potential blocks too search. Try again. if values.Len() == 0 && len(c.current) > 0 { c.current = c.current[1:] goto LOOP } // Only one block with this key and time range so return it if len(c.current) == 1 { if values.Len() > 0 { first.markRead(values.MinTime(), values.MaxTime()) } return values, nil } // Use the current block time range as our overlapping window minT, maxT := first.readMin, first.readMax if values.Len() > 0 { minT, maxT = values.MinTime(), values.MaxTime() } if c.ascending { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the min time range to ensure values are returned in ascending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MinTime < minT && !cur.read() { minT = cur.entry.MinTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MaxTime > maxT { maxT = cur.entry.MaxTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.IntegerArray{} err := cur.r.ReadIntegerArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(integerBlocksDecodedCounter).Add(1) c.col.GetCounter(integerBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesIntegerArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) if v.Len() > 0 { // Only use values in the overlapping window v.Include(minT, maxT) // Merge the remaining values with the existing values.Merge(v) } cur.markRead(minT, maxT) } } else { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the max time range to ensure values are returned in descending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MaxTime > maxT && !cur.read() { maxT = cur.entry.MaxTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MinTime < minT { minT = cur.entry.MinTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.IntegerArray{} err := cur.r.ReadIntegerArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(integerBlocksDecodedCounter).Add(1) c.col.GetCounter(integerBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesIntegerArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) // If the block we decoded should have all of it's values included, mark it as read so we // don't use it again. if v.Len() > 0 { v.Include(minT, maxT) // Merge the remaining values with the existing v.Merge(values) *values = *v } cur.markRead(minT, maxT) } } first.markRead(minT, maxT) return values, err } func excludeTombstonesIntegerArray(t []TimeRange, values *tsdb.IntegerArray) { for i := range t { values.Exclude(t[i].Min, t[i].Max) } } // ReadUnsignedArrayBlock reads the next block as a set of unsigned values. func (c *KeyCursor) ReadUnsignedArrayBlock(values *tsdb.UnsignedArray) (*tsdb.UnsignedArray, error) { LOOP: // No matching blocks to decode if len(c.current) == 0 { values.Timestamps = values.Timestamps[:0] values.Values = values.Values[:0] return values, nil } // First block is the oldest block containing the points we're searching for. first := c.current[0] err := first.r.ReadUnsignedArrayBlockAt(&first.entry, values) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(first.entry.Size)) } // Remove values we already read values.Exclude(first.readMin, first.readMax) // Remove any tombstones tombstones := first.r.TombstoneRange(c.key) excludeTombstonesUnsignedArray(tombstones, values) // If there are no values in this first block (all tombstoned or previously read) and // we have more potential blocks too search. Try again. if values.Len() == 0 && len(c.current) > 0 { c.current = c.current[1:] goto LOOP } // Only one block with this key and time range so return it if len(c.current) == 1 { if values.Len() > 0 { first.markRead(values.MinTime(), values.MaxTime()) } return values, nil } // Use the current block time range as our overlapping window minT, maxT := first.readMin, first.readMax if values.Len() > 0 { minT, maxT = values.MinTime(), values.MaxTime() } if c.ascending { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the min time range to ensure values are returned in ascending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MinTime < minT && !cur.read() { minT = cur.entry.MinTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MaxTime > maxT { maxT = cur.entry.MaxTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.UnsignedArray{} err := cur.r.ReadUnsignedArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesUnsignedArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) if v.Len() > 0 { // Only use values in the overlapping window v.Include(minT, maxT) // Merge the remaining values with the existing values.Merge(v) } cur.markRead(minT, maxT) } } else { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the max time range to ensure values are returned in descending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MaxTime > maxT && !cur.read() { maxT = cur.entry.MaxTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MinTime < minT { minT = cur.entry.MinTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.UnsignedArray{} err := cur.r.ReadUnsignedArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesUnsignedArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) // If the block we decoded should have all of it's values included, mark it as read so we // don't use it again. if v.Len() > 0 { v.Include(minT, maxT) // Merge the remaining values with the existing v.Merge(values) *values = *v } cur.markRead(minT, maxT) } } first.markRead(minT, maxT) return values, err } func excludeTombstonesUnsignedArray(t []TimeRange, values *tsdb.UnsignedArray) { for i := range t { values.Exclude(t[i].Min, t[i].Max) } } // ReadStringArrayBlock reads the next block as a set of string values. func (c *KeyCursor) ReadStringArrayBlock(values *tsdb.StringArray) (*tsdb.StringArray, error) { LOOP: // No matching blocks to decode if len(c.current) == 0 { values.Timestamps = values.Timestamps[:0] values.Values = values.Values[:0] return values, nil } // First block is the oldest block containing the points we're searching for. first := c.current[0] err := first.r.ReadStringArrayBlockAt(&first.entry, values) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(stringBlocksDecodedCounter).Add(1) c.col.GetCounter(stringBlocksSizeCounter).Add(int64(first.entry.Size)) } // Remove values we already read values.Exclude(first.readMin, first.readMax) // Remove any tombstones tombstones := first.r.TombstoneRange(c.key) excludeTombstonesStringArray(tombstones, values) // If there are no values in this first block (all tombstoned or previously read) and // we have more potential blocks too search. Try again. if values.Len() == 0 && len(c.current) > 0 { c.current = c.current[1:] goto LOOP } // Only one block with this key and time range so return it if len(c.current) == 1 { if values.Len() > 0 { first.markRead(values.MinTime(), values.MaxTime()) } return values, nil } // Use the current block time range as our overlapping window minT, maxT := first.readMin, first.readMax if values.Len() > 0 { minT, maxT = values.MinTime(), values.MaxTime() } if c.ascending { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the min time range to ensure values are returned in ascending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MinTime < minT && !cur.read() { minT = cur.entry.MinTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MaxTime > maxT { maxT = cur.entry.MaxTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.StringArray{} err := cur.r.ReadStringArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(stringBlocksDecodedCounter).Add(1) c.col.GetCounter(stringBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesStringArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) if v.Len() > 0 { // Only use values in the overlapping window v.Include(minT, maxT) // Merge the remaining values with the existing values.Merge(v) } cur.markRead(minT, maxT) } } else { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the max time range to ensure values are returned in descending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MaxTime > maxT && !cur.read() { maxT = cur.entry.MaxTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MinTime < minT { minT = cur.entry.MinTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.StringArray{} err := cur.r.ReadStringArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(stringBlocksDecodedCounter).Add(1) c.col.GetCounter(stringBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesStringArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) // If the block we decoded should have all of it's values included, mark it as read so we // don't use it again. if v.Len() > 0 { v.Include(minT, maxT) // Merge the remaining values with the existing v.Merge(values) *values = *v } cur.markRead(minT, maxT) } } first.markRead(minT, maxT) return values, err } func excludeTombstonesStringArray(t []TimeRange, values *tsdb.StringArray) { for i := range t { values.Exclude(t[i].Min, t[i].Max) } } // ReadBooleanArrayBlock reads the next block as a set of boolean values. func (c *KeyCursor) ReadBooleanArrayBlock(values *tsdb.BooleanArray) (*tsdb.BooleanArray, error) { LOOP: // No matching blocks to decode if len(c.current) == 0 { values.Timestamps = values.Timestamps[:0] values.Values = values.Values[:0] return values, nil } // First block is the oldest block containing the points we're searching for. first := c.current[0] err := first.r.ReadBooleanArrayBlockAt(&first.entry, values) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(first.entry.Size)) } // Remove values we already read values.Exclude(first.readMin, first.readMax) // Remove any tombstones tombstones := first.r.TombstoneRange(c.key) excludeTombstonesBooleanArray(tombstones, values) // If there are no values in this first block (all tombstoned or previously read) and // we have more potential blocks too search. Try again. if values.Len() == 0 && len(c.current) > 0 { c.current = c.current[1:] goto LOOP } // Only one block with this key and time range so return it if len(c.current) == 1 { if values.Len() > 0 { first.markRead(values.MinTime(), values.MaxTime()) } return values, nil } // Use the current block time range as our overlapping window minT, maxT := first.readMin, first.readMax if values.Len() > 0 { minT, maxT = values.MinTime(), values.MaxTime() } if c.ascending { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the min time range to ensure values are returned in ascending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MinTime < minT && !cur.read() { minT = cur.entry.MinTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MaxTime > maxT { maxT = cur.entry.MaxTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.BooleanArray{} err := cur.r.ReadBooleanArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesBooleanArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) if v.Len() > 0 { // Only use values in the overlapping window v.Include(minT, maxT) // Merge the remaining values with the existing values.Merge(v) } cur.markRead(minT, maxT) } } else { // Blocks are ordered by generation, we may have values in the past in later blocks, if so, // expand the window to include the max time range to ensure values are returned in descending // order for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.MaxTime > maxT && !cur.read() { maxT = cur.entry.MaxTime } } // Find first block that overlaps our window for i := 1; i < len(c.current); i++ { cur := c.current[i] if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { // Shrink our window so it's the intersection of the first overlapping block and the // first block. We do this to minimize the region that overlaps and needs to // be merged. if cur.entry.MinTime < minT { minT = cur.entry.MinTime } values.Include(minT, maxT) break } } // Search the remaining blocks that overlap our window and append their values so we can // merge them. for i := 1; i < len(c.current); i++ { cur := c.current[i] // Skip this block if it doesn't contain points we looking for or they have already been read if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { cur.markRead(minT, maxT) continue } v := &tsdb.BooleanArray{} err := cur.r.ReadBooleanArrayBlockAt(&cur.entry, v) if err != nil { return nil, err } if c.col != nil { c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(cur.entry.Size)) } tombstones := cur.r.TombstoneRange(c.key) // Remove any tombstoned values excludeTombstonesBooleanArray(tombstones, v) // Remove values we already read v.Exclude(cur.readMin, cur.readMax) // If the block we decoded should have all of it's values included, mark it as read so we // don't use it again. if v.Len() > 0 { v.Include(minT, maxT) // Merge the remaining values with the existing v.Merge(values) *values = *v } cur.markRead(minT, maxT) } } first.markRead(minT, maxT) return values, err } func excludeTombstonesBooleanArray(t []TimeRange, values *tsdb.BooleanArray) { for i := range t { values.Exclude(t[i].Min, t[i].Max) } }
tsdb/engine/tsm1/file_store_array.gen.go
0.549157
0.471953
file_store_array.gen.go
starcoder
package route import ( "fmt" "net/http" "regexp" "strings" ) type matcher interface { match(*http.Request) *match setMatch(match *match) canMerge(matcher) bool merge(matcher) (matcher, error) canChain(matcher) bool chain(matcher) (matcher, error) } func hostTrieMatcher(hostname string) (matcher, error) { return newTrieMatcher(strings.ToLower(hostname), &hostMapper{}, &match{}) } func hostRegexpMatcher(hostname string) (matcher, error) { return newRegexpMatcher(strings.ToLower(hostname), &hostMapper{}, &match{}) } func methodTrieMatcher(method string) (matcher, error) { return newTrieMatcher(method, &methodMapper{}, &match{}) } func methodRegexpMatcher(method string) (matcher, error) { return newRegexpMatcher(method, &methodMapper{}, &match{}) } func pathTrieMatcher(path string) (matcher, error) { return newTrieMatcher(path, &pathMapper{}, &match{}) } func pathRegexpMatcher(path string) (matcher, error) { return newRegexpMatcher(path, &pathMapper{}, &match{}) } func headerTrieMatcher(name, value string) (matcher, error) { return newTrieMatcher(value, &headerMapper{header: name}, &match{}) } func headerRegexpMatcher(name, value string) (matcher, error) { return newRegexpMatcher(value, &headerMapper{header: name}, &match{}) } type match struct { val interface{} } type andMatcher struct { a matcher b matcher } func newAndMatcher(a, b matcher) matcher { if a.canChain(b) { m, err := a.chain(b) if err == nil { return m } } return &andMatcher{ a: a, b: b, } } func (a *andMatcher) canChain(matcher) bool { return false } func (a *andMatcher) chain(matcher) (matcher, error) { return nil, fmt.Errorf("not supported") } func (a *andMatcher) String() string { return fmt.Sprintf("andMatcher(%v, %v)", a.a, a.b) } func (a *andMatcher) setMatch(m *match) { a.a.setMatch(m) a.b.setMatch(m) } func (a *andMatcher) canMerge(o matcher) bool { return false } func (a *andMatcher) merge(o matcher) (matcher, error) { return nil, fmt.Errorf("Method not supported") } func (a *andMatcher) match(req *http.Request) *match { result := a.a.match(req) if result == nil { return nil } return a.b.match(req) } // Regular expression matcher, takes a regular expression and requestMapper type regexpMatcher struct { // Uses this mapper to extract a string from a request to match against mapper requestMapper // Compiled regular expression expr *regexp.Regexp // match result result *match } func (r *regexpMatcher) canChain(matcher) bool { return false } func (r *regexpMatcher) chain(matcher) (matcher, error) { return nil, fmt.Errorf("not supported") } func (m *regexpMatcher) String() string { return fmt.Sprintf("regexpMatcher(%v)", m.expr) } func (m *regexpMatcher) setMatch(result *match) { m.result = result } func newRegexpMatcher(expr string, mapper requestMapper, m *match) (matcher, error) { r, err := regexp.Compile(expr) if err != nil { return nil, fmt.Errorf("Bad regular expression: %s %s", expr, err) } return &regexpMatcher{expr: r, mapper: mapper, result: m}, nil } func (m *regexpMatcher) canMerge(matcher) bool { return false } func (m *regexpMatcher) merge(matcher) (matcher, error) { return nil, fmt.Errorf("Method not supported") } func (m *regexpMatcher) match(req *http.Request) *match { if m.expr.MatchString(m.mapper.mapRequest(req)) { return m.result } return nil }
vendor/github.com/vulcand/vulcand/Godeps/_workspace/src/github.com/vulcand/route/matcher.go
0.693369
0.411879
matcher.go
starcoder
package transactionpool import ( "encoding/json" "github.com/acejam/Sia/build" "github.com/acejam/Sia/encoding" "github.com/acejam/Sia/modules" "github.com/acejam/Sia/types" "github.com/coreos/bbolt" "gitlab.com/NebulousLabs/errors" ) // database.go contains objects related to the layout of the transaction pool's // database, as well as getters and setters. Logic for interacting with the // database can be found in persist.go // Buckets in the database. var ( // bucketBlockHeight holds the most recent block height seen by the // transaction pool. bucketBlockHeight = []byte("BlockHeight") // bucketConfirmedTransactions holds the ids of every transaction that has // been confirmed on the blockchain. bucketConfirmedTransactions = []byte("ConfirmedTransactions") // bucketFeeMedian stores all of the persist data relating to the fee // median. bucketFeeMedian = []byte("FeeMedian") // bucketRecentConsensusChange holds the most recent consensus change seen // by the transaction pool. bucketRecentConsensusChange = []byte("RecentConsensusChange") ) // Explicitly named fields in the database. var ( // fieldBlockHeight is the field in bucketBlockHeight that holds the value of // the most recent block height. fieldBlockHeight = []byte("BlockHeight") // fieldFeeMedian is the fee median persist data stored in a fee median // field. fieldFeeMedian = []byte("FeeMedian") // fieldRecentBlockID is used to store the id of the most recent block seen // by the transaction pool. fieldRecentBlockID = []byte("RecentBlockID") // fieldRecentConsensusChange is the field in bucketRecentConsensusChange // that holds the value of the most recent consensus change. fieldRecentConsensusChange = []byte("RecentConsensusChange") ) // Errors relating to the database. var ( // errNilConsensusChange is returned if there is no consensus change in the // database. errNilConsensusChange = errors.New("no consensus change found") // errNilFeeMedian is the message returned if a database does not find fee // median persistence. errNilFeeMedian = errors.New("no fee median found") // errNilRecentBlock is returned if there is no data stored in // fieldRecentBlockID. errNilRecentBlock = errors.New("no recent block found in the database") ) // Complex objects that get stored in database fields. type ( // medianPersist is the json object that gets stored in the database so that // the transaction pool can persist its block based fee estimations. medianPersist struct { RecentMedians []types.Currency RecentMedianFee types.Currency } ) // deleteTransaction deletes a transaction from the list of confirmed // transactions. func (tp *TransactionPool) deleteTransaction(tx *bolt.Tx, id types.TransactionID) error { return tx.Bucket(bucketConfirmedTransactions).Delete(id[:]) } // getBlockHeight returns the most recent block height from the database. func (tp *TransactionPool) getBlockHeight(tx *bolt.Tx) (bh types.BlockHeight, err error) { err = encoding.Unmarshal(tx.Bucket(bucketBlockHeight).Get(fieldBlockHeight), &bh) return } // getFeeMedian will get the fee median struct stored in the database. func (tp *TransactionPool) getFeeMedian(tx *bolt.Tx) (medianPersist, error) { medianBytes := tp.dbTx.Bucket(bucketFeeMedian).Get(fieldFeeMedian) if medianBytes == nil { return medianPersist{}, errNilFeeMedian } var mp medianPersist err := json.Unmarshal(medianBytes, &mp) if err != nil { return medianPersist{}, build.ExtendErr("unable to unmarshal median data:", err) } return mp, nil } // getRecentBlockID will fetch the most recent block id and most recent parent // id from the database. func (tp *TransactionPool) getRecentBlockID(tx *bolt.Tx) (recentID types.BlockID, err error) { idBytes := tx.Bucket(bucketRecentConsensusChange).Get(fieldRecentBlockID) if idBytes == nil { return types.BlockID{}, errNilRecentBlock } copy(recentID[:], idBytes[:]) if recentID == (types.BlockID{}) { return types.BlockID{}, errNilRecentBlock } return recentID, nil } // getRecentConsensusChange returns the most recent consensus change from the // database. func (tp *TransactionPool) getRecentConsensusChange(tx *bolt.Tx) (cc modules.ConsensusChangeID, err error) { ccBytes := tx.Bucket(bucketRecentConsensusChange).Get(fieldRecentConsensusChange) if ccBytes == nil { return modules.ConsensusChangeID{}, errNilConsensusChange } copy(cc[:], ccBytes) return cc, nil } // putBlockHeight updates the transaction pool's block height. func (tp *TransactionPool) putBlockHeight(tx *bolt.Tx, height types.BlockHeight) error { tp.blockHeight = height return tx.Bucket(bucketBlockHeight).Put(fieldBlockHeight, encoding.Marshal(height)) } // putFeeMedian puts a median fees object into the database. func (tp *TransactionPool) putFeeMedian(tx *bolt.Tx, mp medianPersist) error { objBytes, err := json.Marshal(mp) if err != nil { return err } return tx.Bucket(bucketFeeMedian).Put(fieldFeeMedian, objBytes) } // putRecentBlockID will store the most recent block id and the parent id of // that block in the database. func (tp *TransactionPool) putRecentBlockID(tx *bolt.Tx, recentID types.BlockID) error { return tx.Bucket(bucketRecentConsensusChange).Put(fieldRecentBlockID, recentID[:]) } // putRecentConsensusChange updates the most recent consensus change seen by // the transaction pool. func (tp *TransactionPool) putRecentConsensusChange(tx *bolt.Tx, cc modules.ConsensusChangeID) error { return tx.Bucket(bucketRecentConsensusChange).Put(fieldRecentConsensusChange, cc[:]) } // putTransaction adds a transaction to the list of confirmed transactions. func (tp *TransactionPool) putTransaction(tx *bolt.Tx, id types.TransactionID) error { return tx.Bucket(bucketConfirmedTransactions).Put(id[:], []byte{}) }
modules/transactionpool/database.go
0.640636
0.400398
database.go
starcoder
package graphson import ( "fmt" "io" "reflect" "unsafe" jsoniter "github.com/json-iterator/go" "github.com/modern-go/reflect2" ) // DecoratorOfSlice decorates a value encoder of a slice type. func (encodeExtension) DecoratorOfSlice(typ reflect2.Type, enc jsoniter.ValEncoder) jsoniter.ValEncoder { encoder := typeEncoder{ValEncoder: enc} sliceType := typ.(reflect2.SliceType) if sliceType.Elem().Kind() == reflect.Uint8 { encoder.Type = byteBufferType } else { encoder.Type = listType } return sliceEncoder{sliceType, encoder} } // DecoratorOfArray decorates a value encoder of an array type. func (encodeExtension) DecoratorOfArray(enc jsoniter.ValEncoder) jsoniter.ValEncoder { return typeEncoder{enc, listType} } // DecoderOfSlice returns a value decoder of a slice type. func (ext decodeExtension) DecoderOfSlice(typ reflect2.Type) jsoniter.ValDecoder { sliceType := typ.(reflect2.SliceType) elemType := sliceType.Elem() if elemType.Kind() == reflect.Uint8 { return nil } return sliceDecoder{ sliceType: sliceType, elemDec: ext.LazyDecoderOf(elemType), } } // DecoderOfArray returns a value decoder of an array type. func (ext decodeExtension) DecoderOfArray(typ reflect2.Type) jsoniter.ValDecoder { arrayType := typ.(reflect2.ArrayType) return arrayDecoder{ arrayType: arrayType, elemDec: ext.LazyDecoderOf(arrayType.Elem()), } } // DecoratorOfSlice decorates a value decoder of a slice type. func (ext decodeExtension) DecoratorOfSlice(typ reflect2.Type, dec jsoniter.ValDecoder) jsoniter.ValDecoder { if typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { return typeDecoder{dec, byteBufferType} } return typeDecoder{dec, listType} } // DecoratorOfArray decorates a value decoder of an array type. func (ext decodeExtension) DecoratorOfArray(dec jsoniter.ValDecoder) jsoniter.ValDecoder { return typeDecoder{dec, listType} } type sliceEncoder struct { sliceType reflect2.SliceType jsoniter.ValEncoder } func (enc sliceEncoder) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) { if enc.sliceType.UnsafeIsNil(ptr) { stream.WriteNil() } else { enc.ValEncoder.Encode(ptr, stream) } } type sliceDecoder struct { sliceType reflect2.SliceType elemDec jsoniter.ValDecoder } func (dec sliceDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { dec.decode(ptr, iter) if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("decoding slice %s: %w", dec.sliceType, iter.Error) } } func (dec sliceDecoder) decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { sliceType := dec.sliceType if iter.ReadNil() { sliceType.UnsafeSetNil(ptr) return } sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) var length int iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { idx := length length++ sliceType.UnsafeGrow(ptr, length) elem := sliceType.UnsafeGetIndex(ptr, idx) dec.elemDec.Decode(elem, iter) return iter.Error == nil }) } type arrayDecoder struct { arrayType reflect2.ArrayType elemDec jsoniter.ValDecoder } func (dec arrayDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { dec.decode(ptr, iter) if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("decoding array %s: %w", dec.arrayType, iter.Error) } } func (dec arrayDecoder) decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { var ( arrayType = dec.arrayType length int ) iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { if length < arrayType.Len() { idx := length length++ elem := arrayType.UnsafeGetIndex(ptr, idx) dec.elemDec.Decode(elem, iter) } else { iter.Skip() } return iter.Error == nil }) }
dialect/gremlin/encoding/graphson/slice.go
0.762689
0.408454
slice.go
starcoder
package main import ( "github.com/MattSwanson/raylib-go/physics" "github.com/MattSwanson/raylib-go/raylib" ) const ( velocity = 0.5 ) func main() { screenWidth := float32(800) screenHeight := float32(450) rl.SetConfigFlags(rl.FlagMsaa4xHint) rl.InitWindow(int32(screenWidth), int32(screenHeight), "Physac [raylib] - physics movement") // Physac logo drawing position logoX := int32(screenWidth) - rl.MeasureText("Physac", 30) - 10 logoY := int32(15) // Initialize physics and default physics bodies physics.Init() // Create floor and walls rectangle physics body floor := physics.NewBodyRectangle(rl.NewVector2(screenWidth/2, screenHeight), screenWidth, 100, 10) platformLeft := physics.NewBodyRectangle(rl.NewVector2(screenWidth*0.25, screenHeight*0.6), screenWidth*0.25, 10, 10) platformRight := physics.NewBodyRectangle(rl.NewVector2(screenWidth*0.75, screenHeight*0.6), screenWidth*0.25, 10, 10) wallLeft := physics.NewBodyRectangle(rl.NewVector2(-5, screenHeight/2), 10, screenHeight, 10) wallRight := physics.NewBodyRectangle(rl.NewVector2(screenWidth+5, screenHeight/2), 10, screenHeight, 10) // Disable dynamics to floor and walls physics bodies floor.Enabled = false platformLeft.Enabled = false platformRight.Enabled = false wallLeft.Enabled = false wallRight.Enabled = false // Create movement physics body body := physics.NewBodyRectangle(rl.NewVector2(screenWidth/2, screenHeight/2), 50, 50, 1) body.FreezeOrient = true // Constrain body rotation to avoid little collision torque amounts rl.SetTargetFPS(60) for !rl.WindowShouldClose() { // Update created physics objects physics.Update() if rl.IsKeyPressed(rl.KeyR) { // Reset physics input // Reset movement physics body position, velocity and rotation body.Position = rl.NewVector2(screenWidth/2, screenHeight/2) body.Velocity = rl.NewVector2(0, 0) body.SetRotation(0) } // Physics body creation inputs if rl.IsKeyDown(rl.KeyRight) { body.Velocity.X = velocity } else if rl.IsKeyDown(rl.KeyLeft) { body.Velocity.X = -velocity } if rl.IsKeyDown(rl.KeyUp) && body.IsGrounded { body.Velocity.Y = -velocity * 4 } rl.BeginDrawing() rl.ClearBackground(rl.Black) rl.DrawFPS(int32(screenWidth)-90, int32(screenHeight)-30) // Draw created physics bodies for i, body := range physics.GetBodies() { vertexCount := physics.GetShapeVerticesCount(i) for j := 0; j < vertexCount; j++ { // Get physics bodies shape vertices to draw lines // NOTE: GetShapeVertex() already calculates rotation transformations vertexA := body.GetShapeVertex(j) jj := 0 if j+1 < vertexCount { // Get next vertex or first to close the shape jj = j + 1 } vertexB := body.GetShapeVertex(jj) rl.DrawLineV(vertexA, vertexB, rl.Green) // Draw a line between two vertex positions } } rl.DrawText("Use 'ARROWS' to move player", 10, 10, 10, rl.White) rl.DrawText("Press 'R' to reset example", 10, 30, 10, rl.White) rl.DrawText("Physac", logoX, logoY, 30, rl.White) rl.DrawText("Powered by", logoX+50, logoY-7, 10, rl.White) rl.EndDrawing() } physics.Close() // Unitialize physics rl.CloseWindow() }
examples/physics/physac/movement/main.go
0.599133
0.463748
main.go
starcoder
package merkle import ( "bytes" amino "github.com/tendermint/go-amino" "github.com/okex/exchain/libs/tendermint/crypto/tmhash" "github.com/okex/exchain/libs/tendermint/libs/kv" ) // Merkle tree from a map. // Leaves are `hash(key) | hash(value)`. // Leaves are sorted before Merkle hashing. type simpleMap struct { kvs kv.Pairs sorted bool } func newSimpleMap() *simpleMap { return &simpleMap{ kvs: nil, sorted: false, } } // Set creates a kv pair of the key and the hash of the value, // and then appends it to simpleMap's kv pairs. func (sm *simpleMap) Set(key string, value []byte) { sm.sorted = false // The value is hashed, so you can // check for equality with a cached value (say) // and make a determination to fetch or not. vhash := tmhash.Sum(value) sm.kvs = append(sm.kvs, kv.Pair{ Key: []byte(key), Value: vhash, }) } // Hash Merkle root hash of items sorted by key // (UNSTABLE: and by value too if duplicate key). func (sm *simpleMap) Hash() []byte { sm.Sort() return hashKVPairs(sm.kvs) } func (sm *simpleMap) Sort() { if sm.sorted { return } sm.kvs.Sort() sm.sorted = true } // Returns a copy of sorted KVPairs. // NOTE these contain the hashed key and value. func (sm *simpleMap) KVPairs() kv.Pairs { sm.Sort() kvs := make(kv.Pairs, len(sm.kvs)) copy(kvs, sm.kvs) return kvs } //---------------------------------------- // A local extension to KVPair that can be hashed. // Key and value are length prefixed and concatenated, // then hashed. type KVPair kv.Pair // Bytes returns key || value, with both the // key and value length prefixed. func (kv KVPair) Bytes() []byte { var b bytes.Buffer err := amino.EncodeByteSliceToBuffer(&b, kv.Key) if err != nil { panic(err) } err = amino.EncodeByteSliceToBuffer(&b, kv.Value) if err != nil { panic(err) } return b.Bytes() } func hashKVPairs(kvs kv.Pairs) []byte { kvsH := make([][]byte, len(kvs)) for i, kvp := range kvs { kvsH[i] = KVPair(kvp).Bytes() } return SimpleHashFromByteSlices(kvsH) }
libs/tendermint/crypto/merkle/simple_map.go
0.760917
0.402803
simple_map.go
starcoder
package main import ( "math" "github.com/pkg/errors" "gonum.org/v1/gonum/stat/distuv" "gorgonia.org/tensor" ) type NN struct { hidden, final *tensor.Dense b0, b1 float64 } func New(input, hidden, output int) (retVal *NN) { r := make([]float64, hidden*input) r2 := make([]float64, hidden*output) fillRandom(r, float64(len(r))) fillRandom(r2, float64(len(r2))) hiddenT := tensor.New(tensor.WithShape(hidden, input), tensor.WithBacking(r)) finalT := tensor.New(tensor.WithShape(output, hidden), tensor.WithBacking(r2)) return &NN{ hidden: hiddenT, final: finalT, } } func (nn *NN) Predict(a tensor.Tensor) (int, error) { if a.Dims() != 1 { return -1, errors.New("Expected a vector") } var m maybe hidden := m.do(func() (tensor.Tensor, error) { return nn.hidden.MatVecMul(a) }) act0 := m.do(func() (tensor.Tensor, error) { return hidden.Apply(sigmoid, tensor.UseUnsafe()) }) final := m.do(func() (tensor.Tensor, error) { return tensor.MatVecMul(nn.final, act0) }) pred := m.do(func() (tensor.Tensor, error) { return final.Apply(sigmoid, tensor.UseUnsafe()) }) if m.err != nil { return -1, m.err } return argmax(pred.Data().([]float64)), nil } func (nn *NN) PredHid(a tensor.Tensor) (act0, pred tensor.Tensor, retVal int) { var m maybe hidden := m.do(func() (tensor.Tensor, error) { return nn.hidden.MatVecMul(a) }) act0 = m.do(func() (tensor.Tensor, error) { return hidden.Apply(sigmoid, tensor.UseUnsafe()) }) final := m.do(func() (tensor.Tensor, error) { return tensor.MatVecMul(nn.final, act0) }) pred = m.do(func() (tensor.Tensor, error) { return final.Apply(sigmoid, tensor.UseUnsafe()) }) if m.err != nil { return nil, nil, 0 } retVal = argmax(pred.Data().([]float64)) return } // X is the image, Y is a one hot vector func (nn *NN) Train(x, y tensor.Tensor, learnRate float64) (cost float64, err error) { // predict var m maybe m.do(func() (tensor.Tensor, error) { err := x.Reshape(x.Shape()[0], 1); return x, err }) m.do(func() (tensor.Tensor, error) { err := y.Reshape(10, 1); return y, err }) hidden := m.do(func() (tensor.Tensor, error) { return tensor.MatMul(nn.hidden, x) }) act0 := m.do(func() (tensor.Tensor, error) { return hidden.Apply(sigmoid, tensor.UseUnsafe()) }) final := m.do(func() (tensor.Tensor, error) { return tensor.MatMul(nn.final, act0) }) pred := m.do(func() (tensor.Tensor, error) { return final.Apply(sigmoid, tensor.UseUnsafe()) }) // log.Printf("pred %v, correct %v", argmax(pred.Data().([]float64)), argmax(y.Data().([]float64))) // backpropagation. outputErrors := m.do(func() (tensor.Tensor, error) { return tensor.Sub(y, pred) }) cost = sum(outputErrors.Data().([]float64)) hidErrs := m.do(func() (tensor.Tensor, error) { if err := nn.final.T(); err != nil { return nil, err } defer nn.final.UT() return tensor.MatMul(nn.final, outputErrors) }) if m.err != nil { return 0, m.err } dpred := m.do(func() (tensor.Tensor, error) { return pred.Apply(dsigmoid, tensor.UseUnsafe()) }) m.do(func() (tensor.Tensor, error) { return tensor.Mul(dpred, outputErrors, tensor.UseUnsafe()) }) // m.do(func() (tensor.Tensor, error) { err := act0.T(); return act0, err }) dpred_dfinal := m.do(func() (tensor.Tensor, error) { if err := act0.T(); err != nil { return nil, err } defer act0.UT() return tensor.MatMul(outputErrors, act0) }) dact0 := m.do(func() (tensor.Tensor, error) { return act0.Apply(dsigmoid) }) m.do(func() (tensor.Tensor, error) { return tensor.Mul(hidErrs, dact0, tensor.UseUnsafe()) }) m.do(func() (tensor.Tensor, error) { err := hidErrs.Reshape(hidErrs.Shape()[0], 1); return hidErrs, err }) // m.do(func() (tensor.Tensor, error) { err := x.T(); return x, err }) dcost_dhidden := m.do(func() (tensor.Tensor, error) { if err := x.T(); err != nil { return nil, err } defer x.UT() return tensor.MatMul(hidErrs, x) }) // gradient update m.do(func() (tensor.Tensor, error) { return tensor.Mul(dpred_dfinal, learnRate, tensor.UseUnsafe()) }) m.do(func() (tensor.Tensor, error) { return tensor.Mul(dcost_dhidden, learnRate, tensor.UseUnsafe()) }) m.do(func() (tensor.Tensor, error) { return tensor.Add(nn.final, dpred_dfinal, tensor.UseUnsafe()) }) m.do(func() (tensor.Tensor, error) { return tensor.Add(nn.hidden, dcost_dhidden, tensor.UseUnsafe()) }) return cost, m.err } func sigmoid(a float64) float64 { return 1 / (1 + math.Exp(-1*a)) } func dsigmoid(a float64) float64 { return (1 - a) * a } func onesLike(t tensor.Tensor) tensor.Tensor { retVal := t.Clone().(tensor.Tensor) data := retVal.Data().([]float64) for i := range data { data[i] = 1 } return retVal } func fillRandom(a []float64, v float64) { dist := distuv.Uniform{ Min: -1 / math.Sqrt(v), Max: 1 / math.Sqrt(v), } for i := range a { a[i] = dist.Rand() } } type maybe struct { err error } func (m *maybe) do(fn func() (tensor.Tensor, error)) tensor.Tensor { if m.err != nil { return nil } var retVal tensor.Tensor if retVal, m.err = fn(); m.err == nil { return retVal } m.err = errors.WithStack(m.err) return nil }
Chapter06/rawnn.go
0.727589
0.422028
rawnn.go
starcoder
package schulze import "sort" // Voting holds voting state in memory for a list of choices and provides // methods to vote, to export current voting state and to calculate the winner // using the Schulze method. type Voting struct { choices []string matrix [][]voteCount } // NewVoting initializes a new voting with provided choices. func NewVoting(choices ...string) *Voting { return &Voting{ choices: choices, matrix: makeVoteCountMatrix(len(choices)), } } // Ballot represents a single vote with ranked choices. Lowest number represents // the highest rank. Not all choices have to be ranked and multiple choices can // have the same rank. Ranks do not have to be in consecutive order. type Ballot map[string]int func (e *Voting) Vote(b Ballot) error { ranks, err := ballotRanks(b, e.choices) if err != nil { return err } for rank, choices1 := range ranks { rest := ranks[rank+1:] for _, i := range choices1 { for _, choices1 := range rest { for _, j := range choices1 { e.matrix[i][j]++ } } } } return nil } // VoteMatrix returns the state of the voting in a form of VoteMatrix with // pairwise number of votes. func (e *Voting) VoteMatrix() VoteMatrix { l := len(e.matrix) matrix := make(VoteMatrix, l) for i := 0; i < l; i++ { for j := 0; j < l; j++ { if _, ok := matrix[e.choices[i]]; !ok { matrix[e.choices[i]] = make(map[string]int, l) } matrix[e.choices[i]][e.choices[j]] = int(e.matrix[i][j]) } } return matrix } // Compute calculates a sorted list of choices with the total number of wins for // each of them. If there are multiple winners, tie boolean parameter is true. func (e *Voting) Compute() (scores []Score, tie bool) { return compute(e.matrix, e.choices) } func ballotRanks(b Ballot, choices []string) ([][]choiceIndex, error) { ballotRanks := make(map[int][]choiceIndex) rankedChoices := make(map[choiceIndex]struct{}) for o, rank := range b { index := getChoiceIndex(o, choices) if index < 0 { return nil, &UnknownChoiceError{o} } ballotRanks[rank] = append(ballotRanks[rank], index) rankedChoices[index] = struct{}{} } rankNumbers := make([]int, 0, len(ballotRanks)) for rank := range ballotRanks { rankNumbers = append(rankNumbers, rank) } sort.Slice(rankNumbers, func(i, j int) bool { return rankNumbers[i] < rankNumbers[j] }) ranks := make([][]choiceIndex, 0) for _, rankNumber := range rankNumbers { ranks = append(ranks, ballotRanks[rankNumber]) } unranked := make([]choiceIndex, 0) for i, l := choiceIndex(0), len(choices); int(i) < l; i++ { if _, ok := rankedChoices[i]; !ok { unranked = append(unranked, i) } } if len(unranked) > 0 { ranks = append(ranks, unranked) } return ranks, nil } type choiceIndex int func getChoiceIndex(choice string, choices []string) choiceIndex { for i, o := range choices { if o == choice { return choiceIndex(i) } } return -1 }
voting.go
0.765593
0.57093
voting.go
starcoder
package universe import ( "math" "github.com/influxdata/flux" "github.com/influxdata/flux/array" "github.com/influxdata/flux/codes" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/internal/errors" "github.com/influxdata/flux/interpreter" "github.com/influxdata/flux/plan" "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/semantic" ) const CovarianceKind = "covariance" type CovarianceOpSpec struct { PearsonCorrelation bool `json:"pearsonr"` ValueDst string `json:"valueDst"` Columns []string `json:"column"` } func init() { var covarianceSignature = runtime.MustLookupBuiltinType("universe", "covariance") runtime.RegisterPackageValue("universe", CovarianceKind, flux.MustValue(flux.FunctionValue(CovarianceKind, createCovarianceOpSpec, covarianceSignature))) flux.RegisterOpSpec(CovarianceKind, newCovarianceOp) plan.RegisterProcedureSpec(CovarianceKind, newCovarianceProcedure, CovarianceKind) execute.RegisterTransformation(CovarianceKind, createCovarianceTransformation) } func createCovarianceOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) { if err := a.AddParentFromArgs(args); err != nil { return nil, err } spec := new(CovarianceOpSpec) pearsonr, ok, err := args.GetBool("pearsonr") if err != nil { return nil, err } else if ok { spec.PearsonCorrelation = pearsonr } label, ok, err := args.GetString("valueDst") if err != nil { return nil, err } else if ok { spec.ValueDst = label } else { spec.ValueDst = execute.DefaultValueColLabel } if cols, err := args.GetRequiredArray("columns", semantic.String); err != nil { return nil, err } else { columns, err := interpreter.ToStringArray(cols) if err != nil { return nil, err } spec.Columns = columns } if len(spec.Columns) != 2 { return nil, errors.New(codes.Invalid, "must provide exactly two columns") } return spec, nil } func newCovarianceOp() flux.OperationSpec { return new(CovarianceOpSpec) } func (s *CovarianceOpSpec) Kind() flux.OperationKind { return CovarianceKind } type CovarianceProcedureSpec struct { plan.DefaultCost PearsonCorrelation bool ValueLabel string Columns []string } func newCovarianceProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { spec, ok := qs.(*CovarianceOpSpec) if !ok { return nil, errors.Newf(codes.Internal, "invalid spec type %T", qs) } cs := CovarianceProcedureSpec{ PearsonCorrelation: spec.PearsonCorrelation, ValueLabel: spec.ValueDst, } cs.Columns = make([]string, len(spec.Columns)) copy(cs.Columns, spec.Columns) return &cs, nil } func (s *CovarianceProcedureSpec) Kind() plan.ProcedureKind { return CovarianceKind } func (s *CovarianceProcedureSpec) Copy() plan.ProcedureSpec { ns := new(CovarianceProcedureSpec) *ns = *s if s.Columns != nil { ns.Columns = make([]string, len(s.Columns)) copy(ns.Columns, s.Columns) } return ns } // TriggerSpec implements plan.TriggerAwareProcedureSpec func (s *CovarianceProcedureSpec) TriggerSpec() plan.TriggerSpec { return plan.NarrowTransformationTriggerSpec{} } type CovarianceTransformation struct { execute.ExecutionNode d execute.Dataset cache execute.TableBuilderCache spec CovarianceProcedureSpec n, xm1, ym1, xm2, ym2, xym2 float64 } func createCovarianceTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { s, ok := spec.(*CovarianceProcedureSpec) if !ok { return nil, nil, errors.Newf(codes.Internal, "invalid spec type %T", spec) } cache := execute.NewTableBuilderCache(a.Allocator()) d := execute.NewDataset(id, mode, cache) t := NewCovarianceTransformation(d, cache, s) return t, d, nil } func NewCovarianceTransformation(d execute.Dataset, cache execute.TableBuilderCache, spec *CovarianceProcedureSpec) *CovarianceTransformation { return &CovarianceTransformation{ d: d, cache: cache, spec: *spec, } } func (t *CovarianceTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey) error { return t.d.RetractTable(key) } func (t *CovarianceTransformation) Process(id execute.DatasetID, tbl flux.Table) error { cols := tbl.Cols() builder, created := t.cache.TableBuilder(tbl.Key()) if !created { return errors.Newf(codes.FailedPrecondition, "covariance found duplicate table with key: %v", tbl.Key()) } err := execute.AddTableKeyCols(tbl.Key(), builder) if err != nil { return err } valueIdx, err := builder.AddCol(flux.ColMeta{ Label: t.spec.ValueLabel, Type: flux.TFloat, }) if err != nil { return err } xIdx := execute.ColIdx(t.spec.Columns[0], cols) if xIdx < 0 { return errors.Newf(codes.FailedPrecondition, "specified column does not exist in table: %v", t.spec.Columns[0]) } yIdx := execute.ColIdx(t.spec.Columns[1], cols) if yIdx < 0 { return errors.Newf(codes.FailedPrecondition, "specified column does not exist in table: %v", t.spec.Columns[1]) } if cols[xIdx].Type != cols[yIdx].Type { return errors.New(codes.FailedPrecondition, "cannot compute the covariance between different types") } t.reset() err = tbl.Do(func(cr flux.ColReader) error { switch typ := cols[xIdx].Type; typ { case flux.TFloat: t.DoFloat(cr.Floats(xIdx), cr.Floats(yIdx)) default: return errors.Newf(codes.Invalid, "covariance does not support %v", typ) } return nil }) if err != nil { return err } if err := execute.AppendKeyValues(tbl.Key(), builder); err != nil { return err } return builder.AppendFloat(valueIdx, t.value()) } func (t *CovarianceTransformation) reset() { t.n = 0 t.xm1 = 0 t.ym1 = 0 t.xm2 = 0 t.ym2 = 0 t.xym2 = 0 } func (t *CovarianceTransformation) DoFloat(xs, ys *array.Float) { var xdelta, ydelta, xdelta2, ydelta2 float64 for i := 0; i < xs.Len(); i++ { if xs.IsNull(i) || ys.IsNull(i) { continue } x, y := xs.Value(i), ys.Value(i) t.n++ // Update means xdelta = x - t.xm1 ydelta = y - t.ym1 t.xm1 += xdelta / t.n t.ym1 += ydelta / t.n // Update variance sums xdelta2 = x - t.xm1 ydelta2 = y - t.ym1 t.xm2 += xdelta * xdelta2 t.ym2 += ydelta * ydelta2 // Update covariance sum // Covariance is symetric so we do not need to compute the yxm2 value. t.xym2 += xdelta * ydelta2 } } func (t *CovarianceTransformation) value() float64 { if t.n < 2 { return math.NaN() } if t.spec.PearsonCorrelation { return (t.xym2) / math.Sqrt(t.xm2*t.ym2) } return t.xym2 / (t.n - 1) } func (t *CovarianceTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { return t.d.UpdateWatermark(mark) } func (t *CovarianceTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { return t.d.UpdateProcessingTime(pt) } func (t *CovarianceTransformation) Finish(id execute.DatasetID, err error) { t.d.Finish(err) }
stdlib/universe/covariance.go
0.806815
0.402333
covariance.go
starcoder
// Package diff implements the Myers diff algorithm. package diff import "strings" // Sources: // https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/ // https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2 type Op struct { Kind OpKind Content []string // content from b I1, I2 int // indices of the line in a J1 int // indices of the line in b, J2 implied by len(Content) } type OpKind int const ( Delete OpKind = iota Insert Equal ) func (k OpKind) String() string { switch k { case Delete: return "delete" case Insert: return "insert" case Equal: return "equal" default: panic("unknown operation kind") } } func ApplyEdits(a []string, operations []*Op) []string { var b []string var prevI2 int for _, op := range operations { // catch up to latest indices if op.I1-prevI2 > 0 { for _, c := range a[prevI2:op.I1] { b = append(b, c) } } switch op.Kind { case Equal, Insert: b = append(b, op.Content...) } prevI2 = op.I2 } // final catch up if len(a)-prevI2 > 0 { for _, c := range a[prevI2:len(a)] { b = append(b, c) } } return b } // Operations returns the list of operations to convert a into b, consolidating // operations for multiple lines and not including equal lines. func Operations(a, b []string) []*Op { trace, offset := shortestEditSequence(a, b) snakes := backtrack(trace, len(a), len(b), offset) M, N := len(a), len(b) var i int solution := make([]*Op, len(a)+len(b)) add := func(op *Op, i2, j2 int) { if op == nil { return } op.I2 = i2 if op.Kind == Insert { op.Content = b[op.J1:j2] } solution[i] = op i++ } x, y := 0, 0 for _, snake := range snakes { if len(snake) < 2 { continue } var op *Op // delete (horizontal) for snake[0]-snake[1] > x-y { if op == nil { op = &Op{ Kind: Delete, I1: x, J1: y, } } x++ if x == M { break } } add(op, x, y) op = nil // insert (vertical) for snake[0]-snake[1] < x-y { if op == nil { op = &Op{ Kind: Insert, I1: x, J1: y, } } y++ } add(op, x, y) op = nil // equal (diagonal) for x < snake[0] { x++ y++ } if x >= M && y >= N { break } } return solution[:i] } // backtrack uses the trace for the edit sequence computation and returns the // "snakes" that make up the solution. A "snake" is a single deletion or // insertion followed by zero or diagnonals. func backtrack(trace [][]int, x, y, offset int) [][]int { snakes := make([][]int, len(trace)) d := len(trace) - 1 for ; x > 0 && y > 0 && d > 0; d-- { V := trace[d] if len(V) == 0 { continue } snakes[d] = []int{x, y} k := x - y var kPrev int if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { kPrev = k + 1 } else { kPrev = k - 1 } x = V[kPrev+offset] y = x - kPrev } if x < 0 || y < 0 { return snakes } snakes[d] = []int{x, y} return snakes } // shortestEditSequence returns the shortest edit sequence that converts a into b. func shortestEditSequence(a, b []string) ([][]int, int) { M, N := len(a), len(b) V := make([]int, 2*(N+M)+1) offset := N + M trace := make([][]int, N+M+1) // Iterate through the maximum possible length of the SES (N+M). for d := 0; d <= N+M; d++ { copyV := make([]int, len(V)) // k lines are represented by the equation y = x - k. We move in // increments of 2 because end points for even d are on even k lines. for k := -d; k <= d; k += 2 { // At each point, we either go down or to the right. We go down if // k == -d, and we go to the right if k == d. We also prioritize // the maximum x value, because we prefer deletions to insertions. var x int if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { x = V[k+1+offset] // down } else { x = V[k-1+offset] + 1 // right } y := x - k // Diagonal moves while we have equal contents. for x < M && y < N && a[x] == b[y] { x++ y++ } V[k+offset] = x // Return if we've exceeded the maximum values. if x == M && y == N { // Makes sure to save the state of the array before returning. copy(copyV, V) trace[d] = copyV return trace, offset } } // Save the state of the array. copy(copyV, V) trace[d] = copyV } return nil, 0 } func SplitLines(text string) []string { lines := strings.SplitAfter(text, "\n") if lines[len(lines)-1] == "" { lines = lines[:len(lines)-1] } return lines }
tenant/vendor/golang.org/x/tools/internal/lsp/diff/diff.go
0.697712
0.468669
diff.go
starcoder
package file import ( "sync" "time" "github.com/elastic/beats/libbeat/logp" ) // States handles list of FileState. One must use NewStates to instantiate a // file states regisry. Using the zero-value is not safe. type States struct { sync.RWMutex // states store states []State // idx maps state IDs to state indexes for fast lookup and modifications. idx map[string]int } // NewStates generates a new states registry. func NewStates() *States { return &States{ states: nil, idx: map[string]int{}, } } // Update updates a state. If previous state didn't exist, new one is created func (s *States) Update(newState State) { s.UpdateWithTs(newState, time.Now()) } // UpdateWithTs updates a state, assigning the given timestamp. // If previous state didn't exist, new one is created func (s *States) UpdateWithTs(newState State, ts time.Time) { s.Lock() defer s.Unlock() id := newState.ID() index := s.findPrevious(id) newState.Timestamp = ts if index >= 0 { s.states[index] = newState } else { // No existing state found, add new one s.idx[id] = len(s.states) s.states = append(s.states, newState) logp.Debug("input", "New state added for %s", newState.Source) } } // FindPrevious lookups a registered state, that matching the new state. // Returns a zero-state if no match is found. func (s *States) FindPrevious(newState State) State { s.RLock() defer s.RUnlock() i := s.findPrevious(newState.ID()) if i < 0 { return State{} } return s.states[i] } // findPrevious returns the previous state for the file. // In case no previous state exists, index -1 is returned func (s *States) findPrevious(id string) int { if i, exists := s.idx[id]; exists { return i } return -1 } // Cleanup cleans up the state array. All states which are older then `older` are removed // The number of states that were cleaned up and number of states that can be // cleaned up in the future is returned. func (s *States) Cleanup() (int, int) { s.Lock() defer s.Unlock() currentTime := time.Now() statesBefore := len(s.states) numCanExpire := 0 L := len(s.states) for i := 0; i < L; { state := &s.states[i] canExpire := state.TTL > 0 expired := (canExpire && currentTime.Sub(state.Timestamp) > state.TTL) if state.TTL == 0 || expired { if !state.Finished { logp.Err("State for %s should have been dropped, but couldn't as state is not finished.", state.Source) i++ continue } delete(s.idx, state.ID()) logp.Debug("state", "State removed for %v because of older: %v", state.Source, state.TTL) L-- if L != i { s.states[i] = s.states[L] s.idx[s.states[i].ID()] = i } } else { i++ if canExpire { numCanExpire++ } } } s.states = s.states[:L] return statesBefore - L, numCanExpire } // Count returns number of states func (s *States) Count() int { s.RLock() defer s.RUnlock() return len(s.states) } // GetStates creates copy of the file states. func (s *States) GetStates() []State { s.RLock() defer s.RUnlock() newStates := make([]State, len(s.states)) copy(newStates, s.states) return newStates } // SetStates overwrites all internal states with the given states array func (s *States) SetStates(states []State) { s.Lock() defer s.Unlock() s.states = states // create new index s.idx = map[string]int{} for i := range states { s.idx[states[i].ID()] = i } } // Copy create a new copy of the states object func (s *States) Copy() *States { new := NewStates() new.SetStates(s.GetStates()) return new }
filebeat/input/file/states.go
0.642545
0.455865
states.go
starcoder
// Package flow contains a number of constructors for Flow nodes // that are convenient for testing. package flow import ( "net/url" "regexp" "github.com/grailbio/reflow" "github.com/grailbio/reflow/flow" "github.com/grailbio/reflow/values" ) // Exec constructs a new flow.OpExec node. func Exec(image, cmd string, resources reflow.Resources, deps ...*flow.Flow) *flow.Flow { return &flow.Flow{Op: flow.Exec, Deps: deps, Cmd: cmd, Image: image, Resources: resources} } // Intern constructs a new flow.OpIntern node. func Intern(rawurl string) *flow.Flow { u, err := url.Parse(rawurl) if err != nil { panic(err) } return &flow.Flow{Op: flow.Intern, URL: u} } // Extern constructs a new flow.Extern node. func Extern(rawurl string, dep *flow.Flow) *flow.Flow { u, err := url.Parse(rawurl) if err != nil { panic(err) } return &flow.Flow{Op: flow.Extern, Deps: []*flow.Flow{dep}, URL: u} } // Groupby constructs a new flow.Groupby node. func Groupby(re string, dep *flow.Flow) *flow.Flow { return &flow.Flow{Op: flow.Groupby, Deps: []*flow.Flow{dep}, Re: regexp.MustCompile(re)} } // Map constructs a new flow.Map node. func Map(fn func(*flow.Flow) *flow.Flow, dep *flow.Flow) *flow.Flow { f := &flow.Flow{Op: flow.Map, Deps: []*flow.Flow{dep}, MapFunc: fn} f.MapInit() return f } // Collect constructs a new flow.Collect node. func Collect(re, repl string, dep *flow.Flow) *flow.Flow { return &flow.Flow{Op: flow.Collect, Re: regexp.MustCompile(re), Repl: repl, Deps: []*flow.Flow{dep}} } // Merge constructs a new flow.Merge node. func Merge(deps ...*flow.Flow) *flow.Flow { return &flow.Flow{Op: flow.Merge, Deps: deps} } // Pullup constructs a new flow.Pullup node. func Pullup(deps ...*flow.Flow) *flow.Flow { return &flow.Flow{Op: flow.Pullup, Deps: deps} } // Val constructs a new flow.Val node. func Val(v reflow.Fileset) *flow.Flow { return &flow.Flow{Op: flow.Val, Value: values.T(v), State: flow.Done} } // Data constructs a new reflow.Data node. func Data(b []byte) *flow.Flow { return &flow.Flow{Op: flow.Data, Data: b} }
test/flow/constructor.go
0.771585
0.539105
constructor.go
starcoder
package ckks import ( "github.com/ldsec/lattigo/ring" "math/big" ) // GaloisGen is an integer of order N/2 modulo M and that spans Z_M with the integer -1. The j-th ring automorphism takes the root zeta to zeta^(5j). // Any other integer or order N/2 modulo M and congruent with 1 modulo 4 could be used instead. const GaloisGen uint64 = 5 // Context is a struct that contains all the elements required to instantiate the CKKS Scheme. This includes the parameters (polynomial degree, ciphertext modulus, // Gaussian sampler, polynomial contexts and other parameters required for the homomorphic operations). type Context struct { // Context parameters logN uint64 logQ uint64 scale float64 n uint64 maxSlots uint64 // Number of available levels levels uint64 bigintChain []*big.Int // Contexts contextQ *ring.Context contextP *ring.Context contextQP *ring.Context // Samplers gaussianSampler *ring.KYSampler // Rotation params galElConjugate uint64 galElRotColLeft []uint64 galElRotColRight []uint64 } // NewContext creates a new Context with the given parameters. It returns an error if one of the parameters would not ensure the // correctness of the scheme (but it does not check for security). func newContext(params *Parameters) (ckkscontext *Context) { if !params.isValid { panic("cannot newContext: parameters are invalid (check if the generation was done properly)") } var err error ckkscontext = new(Context) ckkscontext.logN = uint64(params.LogN) ckkscontext.n = 1 << uint64(params.LogN) ckkscontext.maxSlots = 1 << (uint64(params.LogN) - 1) ckkscontext.scale = params.Scale ckkscontext.levels = uint64(len(params.Qi)) N := ckkscontext.n ckkscontext.bigintChain = genBigIntChain(params.Qi) if ckkscontext.contextQ, err = ring.NewContextWithParams(N, params.Qi); err != nil { panic(err) } if len(params.Pi) != 0 { if ckkscontext.contextP, err = ring.NewContextWithParams(N, params.Pi); err != nil { panic(err) } } if ckkscontext.contextQP, err = ring.NewContextWithParams(N, append(params.Qi, params.Pi...)); err != nil { panic(err) } ckkscontext.gaussianSampler = ckkscontext.contextQP.NewKYSampler(params.Sigma, int(6*params.Sigma)) ckkscontext.galElRotColLeft = ring.GenGaloisParams(N, GaloisGen) ckkscontext.galElRotColRight = ring.GenGaloisParams(N, ring.ModExp(GaloisGen, 2*N-1, 2*N)) ckkscontext.galElConjugate = 2*N - 1 return ckkscontext }
ckks/ckks.go
0.683208
0.441011
ckks.go
starcoder
package ioutil import ( "bufio" "io" ) var ( nByte byte = 10 // the byte that corresponds to the '\n' rune. rByte byte = 13 // the byte that corresponds to the '\r' rune. ) // DelimitedReader reduces the custom delimiter to `\n`. type DelimitedReader struct { r *bufio.Reader delimiter []rune // Select can have upto 2 characters as delimiter. assignEmpty bool // Decides whether the next read byte should be discarded. } // NewDelimitedReader detects the custom delimiter and replaces with `\n`. func NewDelimitedReader(r io.Reader, delimiter []rune) *DelimitedReader { return &DelimitedReader{r: bufio.NewReader(r), delimiter: delimiter, assignEmpty: false} } // Reads and replaces the custom delimiter with `\n`. func (r *DelimitedReader) Read(p []byte) (n int, err error) { n, err = r.r.Read(p) if err != nil { return } for i, b := range p { if r.assignEmpty { swapAndNullify(p, i) r.assignEmpty = false continue } if b == rByte && rune(b) != r.delimiter[0] { // Replace the carriage returns with `\n`. // Mac styled csv will have `\r` as their record delimiter. p[i] = nByte } else if rune(b) == r.delimiter[0] { // Eg, `\r\n`,`ab`,`a` are valid delimiters if i+1 == len(p) && len(r.delimiter) > 1 { // If the first delimiter match falls on the boundary, // Peek the next byte and if it matches, discard it in the next byte read. if nextByte, nerr := r.r.Peek(1); nerr == nil { if rune(nextByte[0]) == r.delimiter[1] { p[i] = nByte // To Discard in the next read. r.assignEmpty = true } } } else if len(r.delimiter) > 1 && rune(p[i+1]) == r.delimiter[1] { // The second delimiter falls in the same chunk. p[i] = nByte r.assignEmpty = true } else if len(r.delimiter) == 1 { // Replace with `\n` incase of single charecter delimiter match. p[i] = nByte } } } return } // Occupy the first byte space and nullify the last byte. func swapAndNullify(p []byte, n int) { for i := n; i < len(p)-1; i++ { p[i] = p[i+1] } p[len(p)-1] = 0 }
pkg/ioutil/delimited-reader.go
0.638497
0.438364
delimited-reader.go
starcoder
package model import ( "sort" ) const SeparatorByte byte = 255 var ( emptyLabelSignature = hashNew() ) func LabelsToSignature(labels map[string]string) uint64 { if len(labels) == 0 { return emptyLabelSignature } labelNames := make([]string, 0, len(labels)) for labelName := range labels { labelNames = append(labelNames, labelName) } sort.Strings(labelNames) sum := hashNew() for _, labelName := range labelNames { sum = hashAdd(sum, labelName) sum = hashAddByte(sum, SeparatorByte) sum = hashAdd(sum, labels[labelName]) sum = hashAddByte(sum, SeparatorByte) } return sum } func labelSetToFingerprint(ls LabelSet) Fingerprint { if len(ls) == 0 { return Fingerprint(emptyLabelSignature) } labelNames := make(LabelNames, 0, len(ls)) for labelName := range ls { labelNames = append(labelNames, labelName) } sort.Sort(labelNames) sum := hashNew() for _, labelName := range labelNames { sum = hashAdd(sum, string(labelName)) sum = hashAddByte(sum, SeparatorByte) sum = hashAdd(sum, string(ls[labelName])) sum = hashAddByte(sum, SeparatorByte) } return Fingerprint(sum) } func labelSetToFastFingerprint(ls LabelSet) Fingerprint { if len(ls) == 0 { return Fingerprint(emptyLabelSignature) } var result uint64 for labelName, labelValue := range ls { sum := hashNew() sum = hashAdd(sum, string(labelName)) sum = hashAddByte(sum, SeparatorByte) sum = hashAdd(sum, string(labelValue)) result ^= sum } return Fingerprint(result) } func SignatureForLabels(m Metric, labels ...LabelName) uint64 { if len(labels) == 0 { return emptyLabelSignature } sort.Sort(LabelNames(labels)) sum := hashNew() for _, label := range labels { sum = hashAdd(sum, string(label)) sum = hashAddByte(sum, SeparatorByte) sum = hashAdd(sum, string(m[label])) sum = hashAddByte(sum, SeparatorByte) } return sum } func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { if len(m) == 0 { return emptyLabelSignature } labelNames := make(LabelNames, 0, len(m)) for labelName := range m { if _, exclude := labels[labelName]; !exclude { labelNames = append(labelNames, labelName) } } if len(labelNames) == 0 { return emptyLabelSignature } sort.Sort(labelNames) sum := hashNew() for _, labelName := range labelNames { sum = hashAdd(sum, string(labelName)) sum = hashAddByte(sum, SeparatorByte) sum = hashAdd(sum, string(m[labelName])) sum = hashAddByte(sum, SeparatorByte) } return sum }
vendor/github.com/prometheus/common/model/signature.go
0.572245
0.428592
signature.go
starcoder
package network import ( "fmt" "github.com/yaricom/goNEAT/v2/neat" ) // Link is a connection from one node to another with an associated weight. // It can be marked as recurrent. type Link struct { // Weight of connection Weight float64 // NNode inputting into the link InNode *NNode // NNode that the link affects OutNode *NNode // If TRUE the link is recurrent IsRecurrent bool // If TRUE the link is time delayed IsTimeDelayed bool // Points to a trait of parameters for genetic creation Trait *neat.Trait /* ************ LEARNING PARAMETERS *********** */ // The following parameters are for use in neurons that learn through habituation, // sensitization, or Hebbian-type processes Params []float64 // The amount of weight adjustment AddedWeight float64 } // NewLink Creates new link with specified weight, input and output neurons connected recurrently or not. func NewLink(weight float64, inputNode, outNode *NNode, recurrent bool) *Link { link := newLink(weight) link.InNode = inputNode link.OutNode = outNode link.IsRecurrent = recurrent return link } // NewLinkWithTrait Creates new Link with specified Trait func NewLinkWithTrait(trait *neat.Trait, weight float64, inputNode, outNode *NNode, recurrent bool) *Link { link := newLink(weight) link.InNode = inputNode link.OutNode = outNode link.IsRecurrent = recurrent link.Trait = trait link.deriveTrait(trait) return link } // NewLinkCopy The copy constructor to create new link with parameters taken from provided ones and connecting specified nodes func NewLinkCopy(l *Link, inputNode, outNode *NNode) *Link { link := newLink(l.Weight) link.InNode = inputNode link.OutNode = outNode link.Trait = l.Trait link.deriveTrait(l.Trait) link.IsRecurrent = l.IsRecurrent return link } // The private default constructor func newLink(weight float64) *Link { return &Link{ Weight: weight, } } // IsEqualGenetically Checks if this link is genetically equal to provided one, i.e. connects nodes with the same IDs and has equal // recurrent flag. I.e. if both links represent the same Gene. func (l *Link) IsEqualGenetically(ol *Link) bool { sameInNode := l.InNode.Id == ol.InNode.Id sameOutNode := l.OutNode.Id == ol.OutNode.Id sameRecurrent := l.IsRecurrent == ol.IsRecurrent return sameInNode && sameOutNode && sameRecurrent } // The Link methods implementation func (l *Link) String() string { return fmt.Sprintf("[Link: (%s <-> %s), weight: %.3f, recurrent: %t, time delayed: %t]", l.InNode, l.OutNode, l.Weight, l.IsRecurrent, l.IsTimeDelayed) } // Copy trait parameters into this link's parameters func (l *Link) deriveTrait(t *neat.Trait) { if t != nil { l.Params = make([]float64, len(t.Params)) for i, p := range t.Params { l.Params[i] = p } } }
neat/network/link.go
0.675872
0.400779
link.go
starcoder
package metar // Routines to implement an on-demand data source for NOAA metar data, stored in DS // All 'Report' objects for the same UTC day stored into a singleton DS object, called a DayReport. // We store one such object per airport per day. // LookupOrFetch (via cron+ui:lookupHandler) will fetch new data from NOAA and store it, if needed. // Lookup will simply try to look it up, and fail if not found. // The main consumer of this stuff is prefetching by report context, which is used when // rendering resultsets, and also in various ui/ routines that pull the metar out of the context. // Given reporting load, I think the memcaching can be skipped. import( "fmt" "time" "golang.org/x/net/context" "github.com/skypies/util/date" "github.com/skypies/util/gcp/ds" sprovider "github.com/skypies/util/gcp/singleton" ) const DateFormat = "2006-01-02" var( DefaultStation = "KSFO" ErrDayReportUninitialized = fmt.Errorf("DayReport was uninitialized") ErrTimeNotInDayReport = fmt.Errorf("The time was not within the DayReport's UTC day") ErrNotFound = fmt.Errorf("No Metar record found") ) type DayReport struct { IcaoAirport string // E.g. "KSFO" Time time.Time // A time within the UTC day for this report Reports [24]Report // One report per UTC hour; must always have exactly 24 slots! } func NewDayReport() *DayReport { return &DayReport{} } func (dr *DayReport)IsInitialized() bool { return !dr.Time.IsZero() } // {{{ dr.String func (dr DayReport)String() string { str := "[" + dr.IcaoAirport + "] " if dr.Time.IsZero() { str += "t=0 {" } else { str += dr.Time.Format(DateFormat) + dr.Time.Format("MST")+ " {" } for _,r := range dr.Reports { if r.Raw == "" { str += " " } else { delta := StandardPressureInHg - r.AltimeterSettingInHg str += fmt.Sprintf("%c", pressureDeltaToRune(delta)) } } str += "}" return str } var downRunes = []rune{ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't'} var upRunes = []rune{ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T'} func pressureDeltaToRune(delta float64) rune { n := int(50.0 * delta) // typical range of delta: [-30,30] if n == 0 { return '.' } runeset := upRunes if n < 0 { runeset = downRunes n *= -1 } if n >= len(runeset) { n = len(runeset) -1 } return runeset[n] } // }}} // {{{ dr.Insert func (dr *DayReport)Insert(mr Report) error { if dr.Time.IsZero() { return ErrDayReportUninitialized } else if dr.Time.After(mr.Time) || dr.Time.AddDate(0,0,1).Before(mr.Time) { return ErrTimeNotInDayReport } dr.Reports[mr.Time.UTC().Hour()] = mr return nil } // }}} // {{{ dr.Lookup // Nil if nothing found func (dr *DayReport)Lookup(t time.Time) (*Report,error) { if dr.Time.IsZero() { return nil, ErrDayReportUninitialized } if dr.Time.After(t) || dr.Time.AddDate(0,0,1).Before(t) { return nil, ErrTimeNotInDayReport } h := t.UTC().Hour() if dr.Reports[h].Raw == "" { return nil, nil } return &dr.Reports[h],nil } // }}} // {{{ toMetarSingletonKey func toMetarSingletonKey(loc string, t time.Time) string { tstamp := date.TruncateToUTCDay(t).Format("2006-01-02") return fmt.Sprintf("metar:%s:%s", loc, tstamp) } // }}} // {{{ LookupDayReport // Pull an entire UTC day's worth of reports. func LookupDayReport(ctx context.Context, p ds.DatastoreProvider, loc string, t time.Time) (*DayReport, error) { sp := sprovider.NewProvider(p) dr := NewDayReport() t = t.UTC() key := toMetarSingletonKey(loc, t) err := sp.ReadSingleton(ctx, key, nil, dr) if err != nil { return nil,err } else if ! dr.IsInitialized() { return nil,ErrNotFound } else { return dr,nil } } // }}} // {{{ directLookup // This just looks up the relevant slot. No smarts about 56m past the hour. Use with care. func directLookup(ctx context.Context, p ds.DatastoreProvider, loc string, t time.Time) (*Report, error) { if dr,err := LookupDayReport(ctx, p, loc, t); err != nil { return nil, err } else if mr,err := dr.Lookup(t); err != nil { return nil,err } else if mr == nil { return nil,ErrNotFound } else { return mr,nil } } // }}} // {{{ LookupOrFetch func LookupOrFetch(ctx context.Context, p ds.DatastoreProvider, loc string, t time.Time) (*Report, error, string) { sp := sprovider.NewProvider(p) dr := NewDayReport() prevDr := NewDayReport() // when we're called for 00:00, we need to update 23:56 for prev day t = t.UTC() key := toMetarSingletonKey(loc, t) str := fmt.Sprintf("[LookupOrFetch] key=%s\n", key) err := sp.ReadSingleton(ctx, key, nil, dr) str += fmt.Sprintf("*** ReadSingleton\n* err : %v\n* dr : %s\n", err, dr) // Try to fetch previous day; ignore errors prevKey := toMetarSingletonKey(loc, t.AddDate(0,0,-1)) sp.ReadSingleton(ctx, prevKey, nil, prevDr) if prevDr.IsInitialized() { str += fmt.Sprintf("* prev: %s\n", prevDr) } shouldPersistChanges := false shouldPersistChangesToPrevDay := false if err != nil { str += fmt.Sprintf("*** DS lookup fail\n* err: %v\n", err) return nil,err,str } else if ! dr.IsInitialized() { str += fmt.Sprintf("*** DS lookup OK, but no day found\n") dr = NewDayReport() dr.IcaoAirport = loc dr.Time = date.TruncateToUTCDay(t) str += fmt.Sprintf("* fresh dr: %s\n", dr) shouldPersistChanges = true } else { str += fmt.Sprintf("*** DS lookup OK !\n* fetched dr: %s\n", dr) } str += fmt.Sprintf("*** dr.Lookup\n dr.Start= %s\n dr.End = %s\n t = %s\n", dr.Time, dr.Time.AddDate(0,0,1).Add(-1*time.Second), t.UTC()) mr,err := dr.Lookup(t) if err != nil { str += fmt.Sprintf("* err: %v\n", err) return nil,err,str } else if mr == nil { str += fmt.Sprintf("* dr.Lookup came up empty; going to NOAA\n") reps,err := fetchReportsFromNOAA(p.HTTPClient(ctx), loc, t.Add(-1*time.Hour), t.Add(time.Hour)) str += fmt.Sprintf("[fetchReportsFromNOAA]\n -- err=%v\n -- ar: %v\n", err, reps) for _,mr := range reps { if err := dr.Insert(mr); err == ErrTimeNotInDayReport { if prevDr.IsInitialized() { err2 := prevDr.Insert(mr) str += fmt.Sprintf(" -! %s {%s} %v\n", mr, prevDr, err2) shouldPersistChangesToPrevDay = true } } str += fmt.Sprintf(" -- %s {%s} %v\n", mr, dr, err) } shouldPersistChanges = true } str += fmt.Sprintf("*** final dr: %s\n", dr) str += fmt.Sprintf("* final mr: %s\n* shouldPersist: %v\n", mr, shouldPersistChanges) if shouldPersistChanges { if err := sp.WriteSingleton(ctx, key, nil, dr); err != nil { return nil,err,str } } if shouldPersistChangesToPrevDay { if err := sp.WriteSingleton(ctx, prevKey, nil, prevDr); err != nil { return nil,err,str } } return mr,nil,str } // }}} // {{{ LookupReport // This is the main API entrypoint. Does not fetch new data; only looks up from datastore. // Metar entries are published at 56m past the hour. Appengine hourly // cron entries can only run on the hour. The net result is that we // will *never* have a Metar entry for the current hour; we must // always lookup the previous hour. func LookupReport(ctx context.Context, p ds.DatastoreProvider, loc string, t time.Time) (*Report, error) { return directLookup(ctx, p, loc, t.Add(time.Hour * -1)) // See comment above, about the -1h } // }}} // {{{ LookupArchive // The actually used API entrypoint. Generates a metar archive for a timespan. func LookupArchive(ctx context.Context, p ds.DatastoreProvider, loc string, s,e time.Time) (*Archive, error) { ar := NewArchive() for _,t := range date.Timeslots(s.UTC(), e.UTC(), time.Hour) { mr,err := directLookup(ctx, p, loc, t) if err == ErrNotFound { continue } else if err != nil { return nil,err } else { ar.Add(*mr) } } return ar,nil } // }}} // {{{ -------------------------={ E N D }=---------------------------------- // Local variables: // folded-file: t // end: // }}}
metar/dayreport.go
0.540196
0.4575
dayreport.go
starcoder
package main import ( "image" "image/draw" "log" "os" "github.com/go-gl/gl/v2.1/gl" "github.com/paulmach/go.geo" ) // NewTexture loads texture from file. func NewTexture(file string) (texture uint32, bounds Rect) { imgFile, err := os.Open(file) if err != nil { log.Fatalf("texture %q not found on disk: %v\n", file, err) } img, _, err := image.Decode(imgFile) if err != nil { panic(err) } rgba := image.NewRGBA(img.Bounds()) if rgba.Stride != rgba.Rect.Size().X*4 { panic("unsupported stride") } draw.Draw(rgba, rgba.Bounds(), img, image.Point{0, 0}, draw.Src) //var texture uint32 // enable server-side GL capabilities gl.Enable(gl.TEXTURE_2D) // generate texture names gl.GenTextures(1, &texture) // bind a named texture to a texturing target gl.BindTexture(gl.TEXTURE_2D, texture) // set texture parameters // texture minifying function gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR) // texture magnification function gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR) // set the wrap parameter for texture coordinate s (x dimension) gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE) // set the wrap parameter for texture coordinate t (y dimension) gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE) // set the wrap parameter for texture coordinate r (z dimension) gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_R, gl.CLAMP_TO_EDGE) // specify a two-dimensional texture image gl.TexImage2D( gl.TEXTURE_2D, 0, gl.RGBA, int32(rgba.Rect.Size().X), int32(rgba.Rect.Size().Y), 0, gl.RGBA, gl.UNSIGNED_BYTE, gl.Ptr(rgba.Pix)) // disable server-side GL capabilities gl.Disable(gl.TEXTURE_2D) // translate image size into game units imgWidth := float32(img.Bounds().Max.X) / 100.0 imgHeight := float32(img.Bounds().Max.Y) / 100.0 bounds = Rect{ Left: Point{X: float32(0), Y: float32(0)}, Right: Point{X: imgWidth, Y: imgHeight}, } return texture, bounds } // DrawTexture draws texture into rectange. func DrawTexture(texture uint32, dst Rect) { gl.ActiveTexture(gl.TEXTURE0) gl.Enable(gl.TEXTURE_2D) gl.BindTexture(gl.TEXTURE_2D, texture) gl.Color4f(1, 1, 1, 1) gl.Begin(gl.QUADS) gl.TexCoord2f(0, 1) gl.Vertex2f(dst.Left.X, dst.Left.Y) gl.TexCoord2f(1, 1) gl.Vertex2f(dst.Right.X, dst.Left.Y) gl.TexCoord2f(1, 0) gl.Vertex2f(dst.Right.X, dst.Right.Y) gl.TexCoord2f(0, 0) gl.Vertex2f(dst.Left.X, dst.Right.Y) gl.End() gl.Disable(gl.TEXTURE_2D) } // CheckBoundaries checks violation of boundaries. func CheckBoundaries(rect Rect, boundaries ...Rect) (violated bool, violatedBound Rect) { for _, bound := range boundaries { p1 := geo.NewPath() pp := []geo.Point{ geo.Point{float64(rect.Left.X), float64(rect.Left.Y)}, geo.Point{float64(rect.Right.X), float64(rect.Left.Y)}, // top geo.Point{float64(rect.Right.X), float64(rect.Left.Y)}, geo.Point{float64(rect.Right.X), float64(rect.Right.Y)}, // right geo.Point{float64(rect.Right.X), float64(rect.Right.Y)}, geo.Point{float64(rect.Left.X), float64(rect.Right.Y)}, // bottom geo.Point{float64(rect.Left.X), float64(rect.Right.Y)}, geo.Point{float64(rect.Left.X), float64(rect.Left.Y)}, // left } p1.SetPoints(pp) p2 := geo.NewPath() pp = []geo.Point{ geo.Point{float64(bound.Left.X), float64(bound.Left.Y)}, geo.Point{float64(bound.Right.X), float64(bound.Left.Y)}, // top geo.Point{float64(bound.Right.X), float64(bound.Left.Y)}, geo.Point{float64(bound.Right.X), float64(bound.Right.Y)}, // right geo.Point{float64(bound.Right.X), float64(bound.Right.Y)}, geo.Point{float64(bound.Left.X), float64(bound.Right.Y)}, // bottom geo.Point{float64(bound.Left.X), float64(bound.Right.Y)}, geo.Point{float64(bound.Left.X), float64(bound.Left.Y)}, // left } p2.SetPoints(pp) if p1.Intersects(p2) { return true, bound } } return false, Rect{} } // IntToDigits returns array of digits from number func IntToDigits(number int64) []int { digits := make([]int, 0) if number == 0 { digits = append(digits, 0) } for number != 0 { digit := number % 10 number = number / 10 digits = append(digits, int(digit)) } for i, j := 0, len(digits)-1; i < j; i, j = i+1, j-1 { digits[i], digits[j] = digits[j], digits[i] } return digits }
utils.go
0.669529
0.417093
utils.go
starcoder
package method import ( "fmt" "math" ) /* Note: 1.Go does not have classes, but we can define methods on types such as struct and non-struct types. A method is a function with a special receiver argument. 2.We can only declare a method with a receiver whose type is defined in the same package as the method. 3.We can declare methods with pointer receivers. Methods with pointer receivers can modify the value to which the receiver points. Since methods often need to modify their receiver, pointer receivers are more common than value receivers. 4.With a value receiver, the Scale method operates on a copy of the original Vertex value. (This is the same behavior as for any other function argument.) 5.For the statement v.Scale(10), even though v is a value and not a pointer, the method with the pointer receiver is called automatically. Ifit is just a function with a pointer argument, we must pass a pointer. 6.Two reasons to use a pointer receiver: (1).The first is so that the method can modify the value that its receiver points to. (2).The second is to avoid copying the value on each method call. This can be more efficient if the receiver is a large struct, for example. */ type Vertex struct { X, Y float64 } func (v Vertex) Abs() float64 { return math.Sqrt(v.X*v.X + v.Y*v.Y) } func (v Vertex) ScaleValue(f float64) { v.X = v.X * f v.Y = v.Y * f } func (v *Vertex) Scale(f float64) { v.X = v.X * f v.Y = v.Y * f } func ScaleFunc(v *Vertex, f float64) { v.X = v.X * f v.Y = v.Y * f } func vertexMethod() { fmt.Println("=========Enter, MethodTest.vertexMethod()==========") defer fmt.Println("=========Exit, MethodTest.vertexMethod()==========") v := Vertex{5, 12} fmt.Println(v.Abs()) //13 v.Scale(10) fmt.Println(v.Abs()) //130, if the Scale receiver is not a pointer, the result is 13. v1 := &Vertex{3, 4} v1.Scale(10) fmt.Println(v1.Abs()) //50 //ScaleFunc(v, 10) //compile error: cannot use v (type Vertex) as type *Vertex in argument to ScaleFunc v2 := &Vertex{3, 4} v2.ScaleValue(10) //ScaleValue's receiver is value type, but we can use it with a pointer fmt.Println(v2.Abs()) //5 } func Test() { vertexMethod() }
golang/src/std/method/method.go
0.816333
0.569673
method.go
starcoder
package helper import ( "errors" "fmt" "strings" ) /** This Method will return a functionName along with column name. we did this so that we can handle multiple datatype. The implementation of each datatypes belongs to the helper package. Parameters: - colType: is the datatype of the column - colName: is the name given to the column - allowed: is the fixed set of values that are valid for the column This is used for enum and set colTypes. Returns: - The name of laravel version of functaion name (String) Example: - primaryKeyMethodNameGenerator("integer") */ func ColTypeSwitcher(colType string, colName string, allowed []string) (string, error) { colDataType := strings.Split(colType, "|") switch colDataType[0] { // TODO : Add more column types here case "unsignedBigInteger": return UnsignedBigInteger(colName) case "bigInteger": return BigInteger(colName) case "unsignedInteger": return UnsignedInteger(colName) case "integer": return Integer(colName) case "unsignedTinyInteger": return UnsignedTinyInteger(colName) case "tinyInteger": return TinyInteger(colName) case "unsignedMediumInteger": return UnsignedMediumInteger(colName) case "mediumInteger": return MediumInteger(colName) case "unsignedSmallInteger": return UnsignedSmallInteger(colName) case "smallInteger": return smallInteger(colName) case "string": return String(colName, colDataType) case "boolean": return Boolean(colName) case "char": return Char(colName, colDataType) case "date": return Date(colName, colDataType) case "double": return Double(colName, colDataType) case "float": return Float(colName, colDataType) case "enum": return Enum(colName, allowed) case "set": return Set(colName, allowed) case "dateTime": return DateTime(colName, colDataType) case "dateTimeTz": return DateTimeTz(colName, colDataType) case "decimal": return Decimal(colName, colDataType) case "geometry": return Geometry(colName) case "geometryCollection": return GeometryCollection(colName) case "ipAddress": return IpAddress(colName) case "json": return Json(colName) case "jsonb": return Jsonb(colName) case "lineString": return LineString(colName) case "longText": return LongText(colName) case "macAddress": return MacAddress(colName) case "morphs": return Morphs(colName) case "uuidMorphs": return UuidMorphs(colName) case "multiLineString": return MultiLineString(colName) case "multiPoint": return MultiPoint(colName) case "multiPolygon": return MultiPolygon(colName) case "nullableMorphs": return NullableMorphs(colName) case "nullableUuidMorphs": return NullableUuidMorphs(colName) case "point": return Point(colName) case "polygon": return Polygon(colName) case "softDelete": return SoftDeletes(colName, colDataType) case "softDeleteTz": return SoftDeletesTz(colName, colDataType) case "text": return Text(colName) case "time": return Time(colName, colDataType) case "timeTz": return TimeTz(colName, colDataType) case "timestamp": return Timestamp(colName, colDataType) case "timeStampTz": return TimestampTz(colName, colDataType) case "year": return Year(colName) case "binary": return Binary(colName) default: // TODO: Log this error and replace it with formatted error message. return "", errors.New("unsupported datatype") } } /** This Method will return a laravel version of the function name for the passed datatype Primary Key generation Parameters: - colType: Any Of the Input belonging to [ "integer", "mediumInteger", "smallInteger", "tinyInteger", "bigInteger" ] Returns: - The name of laravel version of functaion name (String) Example: - primaryKeyMethodNameGenerator("integer") */ func PrimaryKeyMethodNameGenerator(colType string) (string, error) { switch colType { case "integer": return "increments", nil case "mediumInteger": return "mediumIncrements", nil case "smallInteger": return "smallIncrements", nil case "tinyInteger": return "tinyIncrements", nil case "bigInteger": return "bigIncrements", nil default: return "", errors.New("type not supported or invalid inputs") } } func UnsignedBigInteger(colName string) (string, error) { return normalStringDataProcessor("unsignedBigInteger", colName), nil } func BigInteger(colName string) (string, error) { return normalStringDataProcessor("bigInteger", colName), nil } func UnsignedInteger(colName string) (string, error) { return normalStringDataProcessor("unsignedInteger", colName), nil } func Integer(colName string) (string, error) { return normalStringDataProcessor("integer", colName), nil } func UnsignedTinyInteger(colName string) (string, error) { return normalStringDataProcessor("unsignedTinyInteger", colName), nil } func TinyInteger(colName string) (string, error) { return normalStringDataProcessor("tinyInteger", colName), nil } func UnsignedMediumInteger(colName string) (string, error) { return normalStringDataProcessor("unsignedMediumInteger", colName), nil } func MediumInteger(colName string) (string, error) { return normalStringDataProcessor("mediumInteger", colName), nil } func UnsignedSmallInteger(colName string) (string, error) { return normalStringDataProcessor("unsignedSmallInteger", colName), nil } func smallInteger(colName string) (string, error) { return normalStringDataProcessor("smallInteger", colName), nil } func String(colName string, funcArgs []string) (string, error) { return multiParamColumnProcessor("string", colName, funcArgs), nil } func Boolean(colName string) (string, error) { return normalStringDataProcessor("boolean", colName), nil } func Char(colType string, dataType []string) (string, error) { return multiParamColumnProcessor("char", colType, dataType), nil } func Date(colName string, dataType []string) (string, error) { return normalStringDataProcessor("date", colName), nil } func Double(colType string, dataType []string) (string, error) { return multiParamColumnProcessor("double", colType, dataType), nil } func Float(colType string, dataType []string) (string, error) { return multiParamColumnProcessor("float", colType, dataType), nil } func Enum(colFunctionName string, allowed []string) (string, error) { return dataArrayProcessor(colFunctionName, allowed, "enum"), nil } func Set(colFunctionName string, allowed []string) (string, error) { return dataArrayProcessor(colFunctionName, allowed, "set"), nil } func Binary(colName string) (string, error) { return normalStringDataProcessor("binary", colName), nil } func DateTime(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("dateTime", colName, dataType), nil } func DateTimeTz(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("dateTimeTz", colName, dataType), nil } func Decimal(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("decimal", colName, dataType), nil } func Geometry(colName string) (string, error) { return normalStringDataProcessor("geometry", colName), nil } func GeometryCollection(colName string) (string, error) { return normalStringDataProcessor("geometryCollection", colName), nil } func IpAddress(colName string) (string, error) { return normalStringDataProcessor("ipAddress", colName), nil } func Json(colName string) (string, error) { return normalStringDataProcessor("json", colName), nil } func Jsonb(colName string) (string, error) { return normalStringDataProcessor("jsonb", colName), nil } func LineString(colName string) (string, error) { return normalStringDataProcessor("lineString", colName), nil } func LongText(colName string) (string, error) { return normalStringDataProcessor("longText", colName), nil } func MacAddress(colName string) (string, error) { return normalStringDataProcessor("macAddress", colName), nil } func Morphs(colName string) (string, error) { return normalStringDataProcessor("morphs", colName), nil } func UuidMorphs(colName string) (string, error) { return normalStringDataProcessor("uuidMorphs", colName), nil } func MultiLineString(colName string) (string, error) { return normalStringDataProcessor("multiLineString", colName), nil } func MultiPoint(colName string) (string, error) { return normalStringDataProcessor("multiPoint", colName), nil } func MultiPolygon(colName string) (string, error) { return normalStringDataProcessor("multiPolygon", colName), nil } func NullableMorphs(colName string) (string, error) { return normalStringDataProcessor("nullableMorphs", colName), nil } func NullableUuidMorphs(colName string) (string, error) { return normalStringDataProcessor("nullableUuidMorphs", colName), nil } func Point(colName string) (string, error) { return normalStringDataProcessor("point", colName), nil } func Polygon(colName string) (string, error) { return normalStringDataProcessor("polygon", colName), nil } func SoftDeletes(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("softDeletes", colName, dataType), nil } func SoftDeletesTz(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("softDeletesTz", colName, dataType), nil } func Text(colName string) (string, error) { return normalStringDataProcessor("text", colName), nil } func Time(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("time", colName, dataType), nil } func TimeTz(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("timeTz", colName, dataType), nil } func Timestamp(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("timestamp", colName, dataType), nil } func TimestampTz(colName string, dataType []string) (string, error) { return multiParamColumnProcessor("timestampTz", colName, dataType), nil } func Year(colName string) (string, error) { return normalStringDataProcessor("year", colName), nil } // All Other datatype processor func handleAllowedKeywordsToString(allowed []string) string { bldr := "'" + strings.Join(allowed, "', '") + "'" return "[" + bldr + "]" } func normalStringDataProcessor(colFunctionName string, colName string) string { return fmt.Sprintf("%s('%s')", colFunctionName, colName) } func dataArrayProcessor(colName string, allowed []string, functionName string) string { if len(allowed) > 0 { return fmt.Sprintf("%s('%s', %s)", functionName, colName, handleAllowedKeywordsToString(allowed)) } return fmt.Sprintf("%s('%s')", functionName, colName) } func multiParamColumnProcessor(functionName string, colName string, args []string) string { if len(args) > 1 { return fmt.Sprintf("%s('%s', %s)", functionName, colName, args[1]) } return fmt.Sprintf("%s('%s')", functionName, colName) }
internal/impl/laravel/5.8/handler/helper/column_datatype.go
0.663669
0.611005
column_datatype.go
starcoder
package core import ( "math/rand" "git.maze.io/go/math32" ) type MaterialSample struct { Continue bool PDF float32 Weight Vector3 Scattered Vector3 } type Material interface { Sample(wi Vector3, eta0, eta1 float32) MaterialSample Scatter(ray *Ray, hitRecord *HitRecord, attenuation *Vector3, scattered *Ray) bool } type Lambertian struct { Albedo Vector3 } func NewLambertian(albedo Vector3) Lambertian { return Lambertian{albedo} } func (lambertian *Lambertian) Sample(wi Vector3, eta0, eta1 float32) MaterialSample { if wi.Z <= Epsilon32 { return MaterialSample{false, 0.0, Vector3{}, Vector3{}} } wm := RandomOnCosineHemiSphere(eta0, eta1) pdf := wi.Z / math32.Pi //PDF of cosine hemisphere return MaterialSample{true, pdf, lambertian.Albedo, wm} } func (material *Lambertian) Scatter(ray *Ray, hitRecord *HitRecord, attenuation *Vector3, scattered *Ray) bool { coordinate := NewCoordinate(hitRecord.Normal) n := RandomOnHemiSphere(rand.Float32(), rand.Float32()) *scattered = Ray{hitRecord.Position, coordinate.LocalToWorld(n)} *attenuation = material.Albedo return true } func project2(x, y float32, v Vector3) float32 { if Equal32(x, y) { return x*y } sn := 1.0 - v.Z*v.Z if sn <= Epsilon32 { return x*y } invSn := 1.0/sn cs2 := v.X*v.X*invSn sn2 := v.Y*v.Y*invSn return cs2*x*x + sn2*y*y } func ggx_NDF(m Vector3, alpha2 float32) float32 { d := m.Z denom := (d*d) * (alpha2 - 1.0) + 1.0 return alpha2/(math32.Pi * denom * denom) } func ggx_G1(v Vector3, alpha2 float32) float32 { dotNV := v.Z denom := math32.Sqrt(alpha2 + (1.0-alpha2)*dotNV*dotNV) + dotNV return 2.0 * dotNV/denom } func ggx_G2(wi, wo Vector3, alpha2 float32) float32 { dotNI := wi.Z dotNO := wo.Z denomI := dotNO * math32.Sqrt(alpha2 + (1.0 - alpha2) * dotNI*dotNI) denomO := dotNI * math32.Sqrt(alpha2 + (1.0 - alpha2) * dotNO*dotNO) return 2.0 * dotNI * dotNO / (denomI + denomO) } func ggx_VNDF(wo Vector3, roughness, eta0, eta1 float32) Vector3 { //1. Transform the view direction to the hemisphere configuration v := NormalizeVector3(Vector3{roughness*wo.X, roughness*wo.Y, wo.Z}) //2. Construct orthonormal bais var t1 Vector3 if v.Z < 0.999 { t1 = NormalizeVector3(CrossVector3(v, Vector3{0.0, 0.0, 1.0})) }else { t1 = Vector3{1.0, 0.0, 0.0} } t2 := CrossVector3(t1, v) //3. Make parameterization of the projected area /* a := 1.0/(1.0+v.Y) r := math32.Sqrt(eta0) var phi float32 var b float32 if eta1<a { phi = eta1/a * math32.Pi b = 1.0 }else{ phi = math32.Pi + (eta1-a)/(1.0-a) * math32.Pi; b = v.Z } p1 := r * math32.Cos(phi) p2 := r * math32.Sin(phi) * b */ r := math32.Sqrt(eta0) phi := (math32.Pi * 2.0) * eta1 p1 := r * math32.Cos(phi) p2 := r * math32.Sin(phi) a := 0.5 * (1.0 + v.Z) p2 = (1.0-a)*math32.Sqrt(1.0-p1*p1) + a*p2 //4. Reproject onto hemisphere t1 = MulVector3(p1, t1) t2 = MulVector3(p2, t2) v = MulVector3(math32.Sqrt(math32.Max(0.0, 1.0-p1*p1-p2*p2)), v) n := AddVector3(AddVector3(t1, t2), v) //5. Transform the normal back to the elipsoid configuration return NormalizeVector3(Vector3{roughness*n.X, roughness*n.Y, math32.Max(0.0, n.Z)}) } // https://hal.archives-ouvertes.fr/hal-01509746/document type Metal struct { Albedo Vector3 Roughness float32 RefIndex float32 } func (metal *Metal) Sample(wi Vector3, eta0, eta1 float32) MaterialSample { wm := ggx_VNDF(wi, metal.Roughness, eta0, eta1) wo := SubVector3(MulVector3(2.0*DotVector3(wi, wm), wm), wi) if 0.0 < wo.Z { f := Schlick(DotVector3(wo, wm), metal.RefIndex) g1 := ggx_G1(wi, metal.Roughness) g2 := ggx_G2(wo, wi, metal.Roughness*metal.Roughness) pdf := f * (g2/g1) return MaterialSample{true, pdf, metal.Albedo, wm} }else{ return MaterialSample{false, 0.0, metal.Albedo, wm} } /* alpha2 := metal.Roughness * metal.Roughness theta := math32.Acos(math32.Sqrt((1.0-eta0)/((alpha2-1.0)*eta0 + 1.0))) phi := 2.0 * math32.Pi * eta1 sinTheta := math32.Sin(theta) cosTheta := math32.Cos(theta) sinPhi := math32.Sin(phi) cosPhi := math32.Cos(phi) wm := Vector3{sinTheta*cosPhi, sinTheta*sinPhi, cosTheta} wo := SubVector3(MulVector3(2.0*DotVector3(wi, wm), wm), wi) if 0.0 < wo.Z && 0.0<DotVector3(wo, wm) { f := Schlick(DotVector3(wo, wm), metal.RefIndex) g := ggx_G2(wo, wi, alpha2) pdf := f * g * math32.Abs(DotVector3(wi,wm))/(wi.Z * wm.Z) return MaterialSample{true, pdf, metal.Albedo, wm} }else{ return MaterialSample{false, 0.0, metal.Albedo, wm} } */ } func (metal *Metal) Scatter(ray *Ray, hitRecord *HitRecord, attenuation *Vector3, scattered *Ray) bool { reflected := NormalizeVector3(Reflect(ray.Direction, hitRecord.Normal)) *scattered = Ray{hitRecord.Position, reflected} *attenuation = metal.Albedo return 0.0001 < DotVector3(scattered.Direction, hitRecord.Normal) } type Dielectric struct { Albedo Vector3 RefIndex float32 } func (dielectric *Dielectric) Sample(wi Vector3, eta0, eta1 float32) MaterialSample { if wi.Z <= Epsilon32 { return MaterialSample{false, 0.0, Vector3{}, Vector3{}} } normal := Vector3{0.0, 0.0, 1.0} direction := wi.Minus() reflected := NormalizeVector3(Reflect(direction, normal)) var niOverNt float32 var cosine float32 var n Vector3 if 0.0 < direction.Z { n = Vector3{0.0, 0.0, -1.0} niOverNt = dielectric.RefIndex cosine = dielectric.RefIndex * direction.Z }else{ n = Vector3{0.0, 0.0, 1.0} niOverNt = 1.0 / dielectric.RefIndex cosine = -direction.Z } var refracted Vector3 if !Refract(&refracted, direction, n, niOverNt) { return MaterialSample{true, 1.0, Vector3{1.0, 1.0, 1.0}, reflected} } reflectProb := Schlick(cosine, dielectric.RefIndex) if rand.Float32() < reflectProb { return MaterialSample{true, 1.0, Vector3{1.0, 1.0, 1.0}, reflected} }else{ return MaterialSample{true, 1.0, Vector3{1.0, 1.0, 1.0}, refracted} } } func (dielectric *Dielectric) Scatter(ray *Ray, hitRecord *HitRecord, attenuation *Vector3, scattered *Ray) bool { reflected := NormalizeVector3(Reflect(ray.Direction, hitRecord.Normal)) *attenuation = dielectric.Albedo var niOverNt float32 var cosine float32 var normal Vector3 if 0.0 < DotVector3(ray.Direction, hitRecord.Normal) { normal = hitRecord.Normal.Minus() niOverNt = dielectric.RefIndex cosine = dielectric.RefIndex * DotVector3(ray.Direction, hitRecord.Normal) } else { normal = hitRecord.Normal niOverNt = 1.0 / dielectric.RefIndex cosine = -DotVector3(ray.Direction, hitRecord.Normal) } var refracted Vector3 if !Refract(&refracted, ray.Direction, normal, niOverNt) { *scattered = Ray{hitRecord.Position, reflected} return true } reflectProb := Schlick(cosine, dielectric.RefIndex) if rand.Float32() < reflectProb { *scattered = Ray{hitRecord.Position, reflected} }else{ *scattered = Ray{hitRecord.Position, refracted} } return true }
core/material.go
0.788135
0.533458
material.go
starcoder
package cuj import ( "context" "fmt" "chromiumos/tast/common/action" "chromiumos/tast/errors" "chromiumos/tast/local/bundles/cros/ui/cuj/volume" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/power" "chromiumos/tast/testing" ) const ( // expectedBrightness indicates the default screen brightness. expectedBrightness = 80.00 // expectedVolumePercent indicates the percentage of maximum volume. expectedVolumePercent = 10 ) // InitializeSetting sets all initial settings to DUT before performing CUJ testing. func InitializeSetting(ctx context.Context) (action.Action, error) { setBrightnessNormal, err := SetScreenBrightness(ctx, expectedBrightness) if err != nil { return nil, err } setVolumeNormal, err := SetAudioVolume(ctx, expectedVolumePercent) if err != nil { return nil, err } return func(ctx context.Context) error { setBrightnessErr := setBrightnessNormal(ctx) setVolumeErr := setVolumeNormal(ctx) if setBrightnessErr != nil && setVolumeErr != nil { return errors.Errorf("failed to reset initial settings: failed to reset brightness setting - %v; failed to reset volume setting - %v", setBrightnessErr, setVolumeErr) } if setBrightnessErr != nil { return errors.Wrap(setBrightnessErr, "failed to reset brightness setting") } if setVolumeErr != nil { return errors.Wrap(setVolumeErr, "failed to reset volume setting") } return nil }, nil } // SetScreenBrightness sets the screen brightness to the expectedBrightness and returns a function that restores the original brightness. func SetScreenBrightness(ctx context.Context, expectedBrightness float64) (action.Action, error) { pm, err := power.NewPowerManager(ctx) if err != nil { return nil, errors.Wrap(err, "failed to create a PowerManager object") } originalBrightness, err := pm.GetScreenBrightnessPercent(ctx) if err != nil { return nil, errors.Wrap(err, "failed to get the original brightness") } testing.ContextLogf(ctx, "Setting brightness to %.2f%% as default. Current brightness: %.2f%%", expectedBrightness, originalBrightness) if err := pm.SetScreenBrightness(ctx, expectedBrightness); err != nil { return nil, errors.Wrap(err, "failed to set the screen brightness") } name := fmt.Sprintf("reset screen brightness to original brightness: %.2f%%", originalBrightness) return uiauto.NamedAction(name, func(ctx context.Context) error { return pm.SetScreenBrightness(ctx, originalBrightness) }), nil } // SetAudioVolume sets the audio volume to the expected percentage of the maximum volume and returns a function that restores the original volume. func SetAudioVolume(ctx context.Context, expectedVolumePercent int) (action.Action, error) { vh, err := volume.NewVolumeHelper(ctx) if err != nil { return nil, errors.Wrap(err, "failed to create a volumeHelper") } originalVolume, err := vh.GetVolume(ctx) if err != nil { return nil, errors.Wrap(err, "failed to get volume") } testing.ContextLogf(ctx, "Setting the audio volume to %d%% of the maximum volume. Current volume: %d", expectedVolumePercent, originalVolume) if err := vh.SetVolume(ctx, expectedVolumePercent); err != nil { return nil, errors.Wrap(err, "failed to set volume") } name := fmt.Sprintf("reset volume to original volume: %d", originalVolume) return uiauto.NamedAction(name, func(ctx context.Context) error { return vh.SetVolume(ctx, originalVolume) }), nil }
src/chromiumos/tast/local/bundles/cros/ui/cuj/setup.go
0.758421
0.447279
setup.go
starcoder
package fixtures const NoSecurityGroups = `{ "pagination": { "total_results": 0, "total_pages": 1, "first": { "href": "https://api.[your-domain.com]/v3/apps?page=1&per_page=10" }, "last": { "href": "https://api.[your-domain.com]/v3/apps?page=1&per_page=10" } }, "resources": [] }` const OneSecurityGroup = `{ "pagination": { "total_results": 1, "total_pages": 1, "first": { "href": "https://api.[your-domain.com]/v3/apps?page=1&per_page=10" }, "last": { "href": "https://api.[your-domain.com]/v3/apps?page=1&per_page=10" } }, "resources": [ { "guid": "b85a788e-671f-4549-814d-e34cdb2f539a", "created_at": "2020-02-20T17:42:08Z", "updated_at": "2020-02-20T17:42:08Z", "name": "my-group0", "globally_enabled": { "running": true, "staging": false }, "rules": [ { "protocol": "tcp", "destination": "10.10.10.0/24", "ports": "443,80,8080" }, { "protocol": "icmp", "destination": "10.10.10.0/24", "type": 8, "code": 0, "description": "Allow ping requests to private services" } ], "relationships": { "staging_spaces": { "data": [ { "guid": "space-guid-1" }, { "guid": "space-guid-2" } ] }, "running_spaces": { "data": [ { "guid": "space-guid-3" }, { "guid": "space-guid-4" } ] } }, "links": { "self": { "href": "https://api.example.org/v3/security_groups/b85a788e-671f-4549-814d-e34cdb2f539a" } } } ] }` const TwoSecurityGroups = `{ "pagination": { "total_results": 2, "total_pages": 1, "first": { "href": "https://api.[your-domain.com]/v3/apps?page=1&per_page=10" }, "last": { "href": "https://api.[your-domain.com]/v3/apps?page=1&per_page=10" } }, "resources": [ { "guid": "b85a788e-671f-4549-814d-e34cdb2f539a", "created_at": "2020-02-20T17:42:08Z", "updated_at": "2020-02-20T17:42:08Z", "name": "my-group0", "globally_enabled": { "running": true, "staging": false }, "rules": [ { "protocol": "tcp", "destination": "10.10.10.0/24", "ports": "443,80,8080" }, { "protocol": "icmp", "destination": "10.10.10.0/24", "type": 8, "code": 0, "description": "Allow ping requests to private services" } ], "relationships": { "staging_spaces": { "data": [ { "guid": "space-guid-1" }, { "guid": "space-guid-2" } ] }, "running_spaces": { "data": [] } }, "links": { "self": { "href": "https://api.example.org/v3/security_groups/b85a788e-671f-4549-814d-e34cdb2f539a" } } }, { "guid": "second-guid", "created_at": "2020-02-20T17:42:08Z", "updated_at": "2020-02-20T17:42:08Z", "name": "my-group2", "globally_enabled": { "running": false, "staging": true }, "rules": [ { "protocol": "tcp", "destination": "10.10.10.0/24", "ports": "53" } ], "relationships": { "staging_spaces": { "data": [ { "guid": "space-guid-1" } ] }, "running_spaces": { "data": [ { "guid": "space-guid-1" } ] } }, "links": { "self": { "href": "https://api.example.org/v3/security_groups/second-guid" } } } ] }`
src/code.cloudfoundry.org/policy-server/cc_client/fixtures/get_security_groups.go
0.611614
0.423935
get_security_groups.go
starcoder
package bitcoin import ( "bytes" "encoding/hex" "errors" "fmt" "io" ) const Hash20Size = 20 // Hash20 is a 20 byte integer in little endian format. type Hash20 [Hash20Size]byte func NewHash20(b []byte) (*Hash20, error) { if len(b) != Hash20Size { return nil, errors.New("Wrong byte length") } result := Hash20{} copy(result[:], b) return &result, nil } // NewHash20FromStr creates a little endian hash from a big endian string. func NewHash20FromStr(s string) (*Hash20, error) { if len(s) != 2*Hash20Size { return nil, fmt.Errorf("Wrong size hex for Hash20 : %d", len(s)) } b := make([]byte, Hash20Size) _, err := hex.Decode(b, []byte(s[:])) if err != nil { return nil, err } result := Hash20{} reverse20(result[:], b) return &result, nil } // NewHash20FromData creates a Hash20 by hashing the data with a Ripemd160(Sha256(b)) func NewHash20FromData(b []byte) (*Hash20, error) { return NewHash20(Ripemd160(Sha256(b))) } // Bytes returns the data for the hash. func (h Hash20) Bytes() []byte { return h[:] } // SetBytes sets the value of the hash. func (h *Hash20) SetBytes(b []byte) error { if len(b) != Hash20Size { return errors.New("Wrong byte length") } copy(h[:], b) return nil } // String returns the hex for the hash. func (h *Hash20) String() string { var r [Hash20Size]byte reverse20(r[:], h[:]) return fmt.Sprintf("%x", r[:]) } // Equal returns true if the parameter has the same value. func (h *Hash20) Equal(o *Hash20) bool { if h == nil { return o == nil } if o == nil { return false } return bytes.Equal(h[:], o[:]) } // Serialize writes the hash into a writer. func (h *Hash20) Serialize(w io.Writer) error { _, err := w.Write(h[:]) return err } // Deserialize reads a hash from a reader. func DeserializeHash20(r io.Reader) (*Hash20, error) { result := Hash20{} _, err := r.Read(result[:]) if err != nil { return nil, err } return &result, err } // MarshalJSON converts to json. func (h *Hash20) MarshalJSON() ([]byte, error) { var r [Hash20Size]byte reverse20(r[:], h[:]) return []byte(fmt.Sprintf("\"%x\"", r[:])), nil } // UnmarshalJSON converts from json. func (h *Hash20) UnmarshalJSON(data []byte) error { if len(data) != (2*Hash20Size)+2 { return fmt.Errorf("Wrong size hex for Hash20 : %d", len(data)-2) } b := make([]byte, Hash20Size) _, err := hex.Decode(b, data[1:len(data)-1]) if err != nil { return err } reverse20(h[:], b) return nil } // Scan converts from a database column. func (h *Hash20) Scan(data interface{}) error { b, ok := data.([]byte) if !ok { return errors.New("Hash20 db column not bytes") } return h.SetBytes(b) } func reverse20(h, rh []byte) { i := Hash20Size - 1 for _, b := range rh[:] { h[i] = b i-- } }
pkg/bitcoin/hash20.go
0.807157
0.40157
hash20.go
starcoder
package config import "fmt" // LookupValues is a map of field names to field values for an individual row of a lookup. type LookupValues map[string]string // valuesForLookupFields returns a list of strings representing a LookupRow in the context of a LookupFields object. func (lookupValues LookupValues) valuesForLookupFields(lookupFields LookupFields) []string { fieldValues := make([]string, len(lookupFields)) for i, lookupField := range lookupFields { lookupValue, ok := lookupValues[lookupField.Name] if !ok { lookupValue = "" } fieldValues[i] = lookupValue } return fieldValues } // validateForLookupFields returns an error if LookupValues is not valid in the context of LookupFields. // A LookupValues object has no way to validate itself without such a context. func (lookupValues LookupValues) validateForLookupFields(lookupFields LookupFields) error { // check that at least one field is set if len(lookupValues) == 0 { return fmt.Errorf("LookupValues is empty") } // check that all set fields are available for fieldName := range lookupValues { if !lookupFields.hasFieldName(fieldName) { return fmt.Errorf("field %q not in LookupFields %v", fieldName, lookupFields) } } // check that all required fields are set for _, lookupField := range lookupFields { _, lookupFieldSet := lookupValues[lookupField.Name] if lookupField.Required && !lookupFieldSet { return fmt.Errorf("field %q is required, but not set in LookupValues %v", lookupField.Name, lookupValues) } } return nil } // withDefaultLookupValues returns a new LookupValues object with defaults applied. func (lookupValues LookupValues) withDefaultLookupValues(defaults LookupValues) LookupValues { withDefaults := defaults for fieldName, fieldValue := range lookupValues { withDefaults[fieldName] = fieldValue } return withDefaults } // hasFieldName returns a boolean indicating if fieldName is present. func (lookupValues LookupValues) hasFieldName(fieldName string) bool { if _, ok := lookupValues[fieldName]; ok { return true } return false } // hasFieldNames returns a boolean indicating if all fieldNames are present. func (lookupValues LookupValues) hasFieldNames(fieldNames []string) bool { for _, fieldName := range fieldNames { if !lookupValues.hasFieldName(fieldName) { return false } } return true }
internal/splunkconfig/config/lookupvalues.go
0.827445
0.449332
lookupvalues.go
starcoder
package types import "fmt" type TxType uint64 //Never change the order of these types. //If any types are deleted use a placeholder to prevent index number from changing. const ( TxTypeUnknown = TxType(iota) TxTypeIdentityCreate TxTypeTokenAccountCreate TxTypeTokenTx TxTypeDataChainCreate TxTypeDataEntry //per 256 btes TxTypeScratchChainCreate TxTypeScratchEntry //per 256 bytes TxTypeTokenCreate TxTypeKeyUpdate //update keys on the keychain the identity TxTypeMultisigTx TxTypeStateQuery //sends a query to a chain and returns its state information TxTypeCreateSigSpec TxTypeCreateSigSpecGroup TxTypeAddCredits TxTypeUpdateKeyPage //The Following are only valid for DC & BVC use: any other source of this message will be rejected TxTypeSyntheticIdentityCreate = TxType(iota + 0x20) TxTypeSyntheticCreateChain TxTypeSyntheticTokenAccountCreate TxTypeSyntheticTokenDeposit TxTypeSyntheticTxResponse TxTypeSyntheticDepositCredits TxTypeBvcSubmission TxTypeSyntheticGenesis TxTypeDataStore //Data Store can only be sent and thus authorized by an authority node TxTypeAdminVote txTypeSyntheticBase = TxTypeSyntheticIdentityCreate ) // Enum value map for TxType. var TxTypeValue = map[string]TxType{} func init() { all := []TxType{ TxTypeUnknown, TxTypeIdentityCreate, TxTypeTokenAccountCreate, TxTypeTokenTx, TxTypeDataChainCreate, TxTypeDataEntry, TxTypeScratchChainCreate, TxTypeScratchEntry, TxTypeTokenCreate, TxTypeKeyUpdate, TxTypeMultisigTx, TxTypeStateQuery, TxTypeCreateSigSpec, TxTypeCreateSigSpec, TxTypeCreateSigSpecGroup, TxTypeAddCredits, TxTypeUpdateKeyPage, TxTypeSyntheticIdentityCreate, TxTypeSyntheticCreateChain, TxTypeSyntheticTokenAccountCreate, TxTypeSyntheticTokenDeposit, TxTypeSyntheticTxResponse, TxTypeSyntheticDepositCredits, TxTypeBvcSubmission, TxTypeSyntheticGenesis, TxTypeDataStore, TxTypeAdminVote, } for _, t := range all { TxTypeValue[t.Name()] = t } } //Name will return the name of the type func (t TxType) Name() string { switch t { case TxTypeUnknown: return "unknown" case TxTypeIdentityCreate: return "identityCreate" case TxTypeTokenAccountCreate: return "tokenAccountCreate" case TxTypeTokenTx: return "tokenTx" case TxTypeDataChainCreate: return "dataChainCreate" case TxTypeDataEntry: return "dataEntry" case TxTypeScratchChainCreate: return "scratchChainCreate" case TxTypeScratchEntry: return "scratchEntry" case TxTypeTokenCreate: return "tokenCreate" case TxTypeKeyUpdate: return "keyUpdate" case TxTypeMultisigTx: return "multisigTx" case TxTypeStateQuery: return "stateQuery" case TxTypeCreateSigSpec: return "createSigSpec" case TxTypeCreateSigSpecGroup: return "createSigSpecGroup" case TxTypeAddCredits: return "addCredits" case TxTypeUpdateKeyPage: return "updateKeyPage" case TxTypeSyntheticIdentityCreate: return "syntheticIdentityCreate" case TxTypeSyntheticCreateChain: return "syntheticCreateChain" case TxTypeSyntheticTokenAccountCreate: return "syntheticTokenAccountCreate" case TxTypeSyntheticTokenDeposit: return "syntheticTokenDeposit" case TxTypeSyntheticTxResponse: return "syntheticTxResponse" case TxTypeSyntheticDepositCredits: return "syntheticDepositCredits" case TxTypeBvcSubmission: return "bvcSubmission" case TxTypeSyntheticGenesis: return "syntheticGenesis" case TxTypeDataStore: return "dataStore" case TxTypeAdminVote: return "adminVote" default: return fmt.Sprintf("TxType:%d", t) } } //SetType will set the type based on the string name submitted func (t *TxType) SetType(s string) { *t = TxTypeValue[s] } //AsUint64 casts as a uint64 func (t TxType) AsUint64() uint64 { return uint64(t) } func (t TxType) String() string { return t.Name() } func (t TxType) IsSynthetic() bool { return t >= txTypeSyntheticBase }
types/transaction_types.go
0.531209
0.452052
transaction_types.go
starcoder
package iso20022 // Set of elements used to provide further means of referencing a payment transaction. type PaymentIdentification3 struct { // Unique identification, as assigned by an instructing party for an instructed party, to unambiguously identify the instruction. // // Usage: The instruction identification is a point to point reference that can be used between the instructing party and the instructed party to refer to the individual instruction. It can be included in several messages related to the instruction. InstructionIdentification *Max35Text `xml:"InstrId,omitempty"` // Unique identification, as assigned by the initiating party, to unambiguously identify the transaction. This identification is passed on, unchanged, throughout the entire end-to-end chain. // // Usage: The end-to-end identification can be used for reconciliation or to link tasks relating to the transaction. It can be included in several messages related to the transaction. // // Usage: In case there are technical limitations to pass on multiple references, the end-to-end identification must be passed on throughout the entire end-to-end chain. EndToEndIdentification *Max35Text `xml:"EndToEndId"` // Unique identification, as assigned by the first instructing agent, to unambiguously identify the transaction that is passed on, unchanged, throughout the entire interbank chain. // Usage: The transaction identification can be used for reconciliation, tracking or to link tasks relating to the transaction on the interbank level. // Usage: The instructing agent has to make sure that the transaction identification is unique for a pre-agreed period. TransactionIdentification *Max35Text `xml:"TxId"` // Unique reference, as assigned by a clearing system, to unambiguously identify the instruction. ClearingSystemReference *Max35Text `xml:"ClrSysRef,omitempty"` } func (p *PaymentIdentification3) SetInstructionIdentification(value string) { p.InstructionIdentification = (*Max35Text)(&value) } func (p *PaymentIdentification3) SetEndToEndIdentification(value string) { p.EndToEndIdentification = (*Max35Text)(&value) } func (p *PaymentIdentification3) SetTransactionIdentification(value string) { p.TransactionIdentification = (*Max35Text)(&value) } func (p *PaymentIdentification3) SetClearingSystemReference(value string) { p.ClearingSystemReference = (*Max35Text)(&value) }
PaymentIdentification3.go
0.790611
0.546375
PaymentIdentification3.go
starcoder
package trie type TrieBuilder struct { words []string optimize bool } func NewTrie() *TrieBuilder { return &TrieBuilder{ optimize: true, } } // AddWord adds new word to list of words we will be searching for func (tb *TrieBuilder) AddWord(word string) *TrieBuilder { if len(word) != 0 { tb.words = append(tb.words, word) } return tb } // AddWord adds new word to list of words we will be searching for func (tb *TrieBuilder) WithWords(words ...string) *TrieBuilder { for _, word := range words { if len(word) != 0 { tb.words = append(tb.words, word) } } return tb } func (tb *TrieBuilder) Optimize(optimize bool) *TrieBuilder { tb.optimize = optimize return tb } // Build returns build search trie func (tb *TrieBuilder) Build() *Trie { var trie *Trie = &Trie{ root: newNode(), words: tb.words, } //Build our trie for indx, word := range trie.words { insertWord(trie, word, indx) } if tb.optimize { tb.optimizeSkiping(trie) } return trie } func insertWord(trie *Trie, word string, indx int) { currentNode := trie.root for _, char := range []byte(word) { if node := currentNode.FindChild(char); node != nil { currentNode = node } else { // Missing node, create one and add it to children. currentNode = currentNode.AddChild(char) } } //Last visited node is end of our word currentNode.Word = indx } // Calculates skipping, might take long time func (tb *TrieBuilder) optimizeSkiping(trie *Trie) { // Calculate for regular words. for _, word := range trie.words { calculateForWord(trie, word) } } func calculateForWord(trie *Trie, word string) { charBytes := []byte(word) //Small words are not interesting if len(charBytes) <= 1 { return } calculatedSkips := make([]int, len(charBytes)) for j := len(charBytes); j >= 2; j -= 1 { charactersToSkip := 1 for i := 1; i < j-1; i += 1 { sliceLength := j - i matchedBytes, matched := lookup(trie, charBytes[i:j]) // When matched is true: we found submatch along the way. // When matchedBytes == sliceLength, but matched is false: we can safely say that there is another word depeper in tree if matched || matchedBytes == sliceLength { // Skip by our current offset charactersToSkip = i break } else { // No equivalent of this path in our trie. // We can definetly skip first byte. charactersToSkip = i + 1 } } calculatedSkips[j-1] = charactersToSkip // There is no point to look further down. if charactersToSkip == 1 { for indx := 0; indx < j; indx += 1 { calculatedSkips[indx] = 1 } break } } applySkips(trie, charBytes, calculatedSkips) } func lookup(trie *Trie, part []byte) (int, bool) { matched := false currentNode := trie.root i := 0 for ; i < len(part); i++ { if node := currentNode.FindChild(part[i]); node != nil { if node.Word != -1 { matched = true } currentNode = node } else { break } } return i, matched } func applySkips(trie *Trie, word []byte, skips []int) { currentNode := trie.root for i := 0; i < len(word); i++ { if node := currentNode.FindChild(word[i]); node != nil { node.Skip = skips[i] currentNode = node } else { break } } }
trie_builder.go
0.709523
0.41941
trie_builder.go
starcoder
package wkb import ( "bytes" "encoding/binary" "fmt" "io" "github.com/airmap/tegola" ) // geometry types // http://edndoc.esri.com/arcsde/9.1/general_topics/wkb_representation.htm const ( GeoPoint uint32 = 1 GeoLineString = 2 GeoPolygon = 3 GeoMultiPoint = 4 GeoMultiLineString = 5 GeoMultiPolygon = 6 GeoGeometryCollection = 7 ) // Geometry describes a basic Geometry type that can decode it's self. type Geometry interface { Decode(bom binary.ByteOrder, r io.Reader) error Type() uint32 } func decodeByteOrderType(r io.Reader) (byteOrder binary.ByteOrder, typ uint32, err error) { var bom = make([]byte, 1, 1) // the bom is the first byte if _, err = r.Read(bom); err != nil { return byteOrder, typ, err } if bom[0] == 0 { byteOrder = binary.BigEndian } else { byteOrder = binary.LittleEndian } // Reading the type which is 4 bytes err = binary.Read(r, byteOrder, &typ) return byteOrder, typ, err } func encode(bom binary.ByteOrder, geometry tegola.Geometry) (data []interface{}) { if bom == binary.LittleEndian { data = append(data, byte(1)) } else { data = append(data, byte(0)) } switch geo := geometry.(type) { default: return nil case tegola.Point: data = append(data, GeoPoint) data = append(data, geo.X(), geo.Y()) return data case tegola.MultiPoint: data = append(data, GeoMultiPoint) pts := geo.Points() if len(pts) == 0 { return data } for _, p := range pts { pd := encode(bom, p) if pd == nil { return nil } data = append(data, pd...) } return data case tegola.LineString: data = append(data, GeoLineString) pts := geo.Subpoints() data = append(data, uint32(len(pts))) // Number of points in the line string for i := range pts { data = append(data, pts[i]) // The points. } return data case tegola.MultiLine: data = append(data, GeoMultiLineString) lns := geo.Lines() data = append(data, uint32(len(lns))) // Number of lines in the Multi line string for _, l := range lns { ld := encode(bom, l) if ld == nil { return nil } data = append(data, ld...) } return data case tegola.Polygon: data = append(data, GeoPolygon) lns := geo.Sublines() data = append(data, uint32(len(lns))) // Number of rings in the polygon for i := range lns { pts := lns[i].Subpoints() data = append(data, uint32(len(pts))) // Number of points in the ring for i := range pts { data = append(data, pts[i]) // The points in the ring } } return data case tegola.MultiPolygon: data = append(data, GeoMultiPolygon) pls := geo.Polygons() data = append(data, uint32(len(pls))) // Number of Polygons in the Multi. for _, p := range pls { pd := encode(bom, p) if pd == nil { return nil } data = append(data, pd...) } return data case tegola.Collection: data = append(data, GeoGeometryCollection) geometries := geo.Geometries() data = append(data, uint32(len(geometries))) // Number of Geometries for _, g := range geometries { gd := encode(bom, g) if gd == nil { return nil } data = append(data, gd...) } return data } } // Encode will encode the given Geometry as a binary representation with the given // byte order, and write it to the provided io.Writer. func Encode(w io.Writer, bom binary.ByteOrder, geometry tegola.Geometry) error { data := encode(bom, geometry) if data == nil { return fmt.Errorf("Unabled to encode %v", geometry) } return binary.Write(w, bom, data) } // WKB casts a tegola.Geometry to a wkb.Geometry type. // NOTE: Not sure if this is needed. I'm actually wondering if wkb types are even // needed, It seems like they could just be aliases to basic types, with additional // methods on them. -gdey func WKB(geometry tegola.Geometry) (geo Geometry, err error) { switch geo := geometry.(type) { case tegola.Point: p := NewPoint(geo.X(), geo.Y()) return &p, nil case tegola.Point3: // Not supported. case tegola.LineString: l := LineString{} for _, p := range geo.Subpoints() { l = append(l, NewPoint(p.X(), p.Y())) } return &l, nil case tegola.MultiLine: ml := MultiLineString{} for _, l := range geo.Lines() { g, err := WKB(l) if err != nil { return nil, err } lg, ok := g.(*LineString) if !ok { return nil, fmt.Errorf("Was not able to convert to LineString: %v", lg) } ml = append(ml, *lg) } return &ml, nil case tegola.Polygon: p := Polygon{} for _, l := range geo.Sublines() { g, err := WKB(l) if err != nil { return nil, err } lg, ok := g.(*LineString) if !ok { return nil, fmt.Errorf("Was not able to convert to LineString: %v", lg) } p = append(p, *lg) } return &p, nil case tegola.MultiPolygon: mp := MultiPolygon{} for _, p := range geo.Polygons() { g, err := WKB(p) if err != nil { return nil, err } pg, ok := g.(*Polygon) if !ok { return nil, fmt.Errorf("Was not able to convert to Polygon: %v", g) } mp = append(mp, *pg) } return &mp, nil case tegola.Collection: col := Collection{} for _, c := range geo.Geometries() { g, err := WKB(c) if err != nil { return nil, err } cg, ok := g.(Geometry) if !ok { return nil, fmt.Errorf("Was not able to convert to a Geometry type: %v", cg) } col = append(col, cg) } return &col, nil } return nil, fmt.Errorf("Not supported") } // DecodeBytes will decode the type into a Geometry func DecodeBytes(b []byte) (geo Geometry, err error) { buff := bytes.NewReader(b) return Decode(buff) } // Decode is the main function that given a io.Reader will attempt to decode the // Geometry from the byte stream. func Decode(r io.Reader) (geo Geometry, err error) { byteOrder, typ, err := decodeByteOrderType(r) if err != nil { return nil, err } switch typ { case GeoPoint: geo = new(Point) case GeoMultiPoint: geo = new(MultiPoint) case GeoLineString: geo = new(LineString) case GeoMultiLineString: geo = new(MultiLineString) case GeoPolygon: geo = new(Polygon) case GeoMultiPolygon: geo = new(MultiPolygon) case GeoGeometryCollection: geo = new(Collection) default: return nil, fmt.Errorf("Unknown Geometry! %v", typ) } if err := geo.Decode(byteOrder, r); err != nil { return nil, err } return geo, nil }
wkb/wkb.go
0.661048
0.407628
wkb.go
starcoder
// Package time provide common time and date operation common method. package timeutil import ( "strings" "time" ) const ( DEFAULTTIMEFORMAT = "2006-01-02 15:04:05" TIMEFORMAT = "20060102150405" ) // get current time func GetNowTime() time.Time { return time.Now() } // Gets the formatted string of the time func TimeFormat(time *time.Time, format string) string { var datePatterns = []string{ // year "Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003 "y", "06", //A two digit representation of a year Examples: 99 or 03 // month "m", "01", // Numeric representation of a month, with leading zeros 01 through 12 "n", "1", // Numeric representation of a month, without leading zeros 1 through 12 "M", "Jan", // A short textual representation of a month, three letters Jan through Dec "F", "January", // A full textual representation of a month, such as January or March January through December // day "d", "02", // Day of the month, 2 digits with leading zeros 01 to 31 "j", "2", // Day of the month without leading zeros 1 to 31 // week "D", "Mon", // A textual representation of a day, three letters Mon through Sun "l", "Monday", // A full textual representation of the day of the week Sunday through Saturday // time "g", "3", // 12-hour format of an hour without leading zeros 1 through 12 "G", "15", // 24-hour format of an hour without leading zeros 0 through 23 "h", "03", // 12-hour format of an hour with leading zeros 01 through 12 "H", "15", // 24-hour format of an hour with leading zeros 00 through 23 "a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm "A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM "i", "04", // Minutes with leading zeros 00 to 59 "s", "05", // Seconds, with leading zeros 00 through 59 // time zone "T", "MST", "P", "-07:00", "O", "-0700", // RFC 2822 "r", "Mon, 02 Jan 2006 15:04:05 -0700", } replacer := strings.NewReplacer(datePatterns...) format = replacer.Replace(format) return time.Format(format) } func StringFormatTime(timeLayout string) int64 { theTime, _ := time.Parse(DEFAULTTIMEFORMAT, timeLayout) timeUnix := theTime.Unix() return timeUnix } //Generating cron expressions by date time func GetCronStr(date time.Time) string { dateFormat := "05 04 15 02 01 ?" format := date.Format(dateFormat) return format } //Get time string func GetTimeString(t time.Time, timeLayout string) string { return t.Format(timeLayout) } // Timestamp to Seconds func GetTimeUnix(t time.Time) int64 { return t.Unix() } // Timestamp to milliseconds func GetTimeMills(t time.Time) int64 { return t.UnixNano() / 1e6 } // Time stamp to time func GetTimeByInt(t1 int64) time.Time { return time.Unix(t1, 0) } // Compare the two time sizes func CompareTime(t1, t2 time.Time) bool { return t1.Before(t2) } // How many hours is the difference between the two func GetHourDiffer(startTime, endTime string) float32 { var hour float32 t1, err := time.ParseInLocation(DEFAULTTIMEFORMAT, startTime, time.Local) t2, err := time.ParseInLocation(TIMEFORMAT, endTime, time.Local) if err == nil && CompareTime(t1, t2) { diff := GetTimeUnix(t2) - GetTimeUnix(t1) hour = float32(diff) / 3600 return hour } return hour } // Judge whether the current time is on the hour func CheckHours() bool { _, m, s := GetNowTime().Clock() if m == s && m == 0 && s == 0 { return true } return false }
time/timeutil.go
0.783243
0.514827
timeutil.go
starcoder
package dist import ( "github.com/jesand/stats" "math" ) // An ID for a particular outcome in a space type Outcome int // A set of outcomes for some probability measure. type Space interface { // Ask whether the space is the same as some other space Equals(other Space) bool } // Methods contained by spaces over real values type RealLikeSpace interface { // The infimum (min) value in the space, or negative infinity Inf() float64 // The supremum (max) value in the space, or positive infinity Sup() float64 } // A subset of the reals type RealSpace interface { Space RealLikeSpace } // The space of reals greater than zero type positiveRealSpace struct{} // The canonical instance of positiveRealSpace var PositiveRealSpace positiveRealSpace // The infimum (min) value in the space, or negative infinity func (sp positiveRealSpace) Inf() float64 { return 0 } // The supremum (max) value in the space, or positive infinity func (sp positiveRealSpace) Sup() float64 { return math.Inf(+1) } // Ask whether the space is the same as some other space func (sp positiveRealSpace) Equals(other Space) bool { if _, ok := other.(*positiveRealSpace); ok { return true } else if _, ok := other.(positiveRealSpace); ok { return true } return false } // Create a new RealIntervalSpace with the specified bounds func NewRealIntervalSpace(min, max float64) *RealIntervalSpace { return &RealIntervalSpace{ Min: min, Max: max, } } // A subset of the reals on a continuous closed interval type RealIntervalSpace struct { Min, Max float64 } // The infimum (min) value in the space, or negative infinity func (space RealIntervalSpace) Inf() float64 { return space.Min } // The supremum (max) value in the space, or positive infinity func (space RealIntervalSpace) Sup() float64 { return space.Max } // Ask whether the space is the same as some other space func (sp RealIntervalSpace) Equals(other Space) bool { ris, ok := other.(*RealIntervalSpace) if !ok { ri, ok := other.(RealIntervalSpace) if !ok { return false } else { ris = &ri } } return sp.Min == ris.Min && sp.Max == ris.Max } // The space of all reals var AllRealSpace = RealIntervalSpace{Min: math.Inf(-1), Max: math.Inf(+1)} // The canonical unit interval space var UnitIntervalSpace = RealIntervalSpace{Min: 0, Max: 1} // A sample space over a discrete set type DiscreteSpace interface { // Every discrete space is a space Space // Returns the number of outcomes in the space if finite, and // returns -1 if infinite. Size() int } // A discrete subset of the reals type DiscreteRealSpace interface { DiscreteSpace RealLikeSpace // The real value of an outcome F64Value(outcome Outcome) float64 // The outcome corresponding to a real value Outcome(value float64) Outcome } // A sample space over boolean outcomes type booleanSpace struct{} // The canonical instance of booleanSpace var BooleanSpace booleanSpace // The infimum (min) value in the space, or negative infinity func (sp booleanSpace) Inf() float64 { return 0 } // The supremum (max) value in the space, or positive infinity func (sp booleanSpace) Sup() float64 { return 1 } // Ask whether the space is the same as some other space func (sp booleanSpace) Equals(other Space) bool { if _, ok := other.(*booleanSpace); ok { return true } else if _, ok := other.(booleanSpace); ok { return true } return false } // Return the cardinality of the space func (sp booleanSpace) Size() int { return 2 } // The real value of an outcome func (sp booleanSpace) F64Value(outcome Outcome) float64 { if outcome == 0 { return 0.0 } else { return 1.0 } } // The outcome corresponding to a real value func (sp booleanSpace) Outcome(value float64) Outcome { if value == 0.0 { return 0 } else { return 1 } } // Return the specified outcome as a boolean func (sp booleanSpace) BoolValue(outcome Outcome) bool { return outcome != 0 } // Return the outcome corresponding to the provided boolean value func (sp booleanSpace) BoolOutcome(value bool) Outcome { if value { return 1 } else { return 0 } } // A discrete space over arbitrary objects type DiscreteObjectSpace struct { // The objects which the space is over Objects []interface{} } // Ask whether the space is the same as some other space func (sp DiscreteObjectSpace) Equals(other Space) bool { var sp2 *DiscreteObjectSpace if s, ok := other.(*DiscreteObjectSpace); ok { sp2 = s } else if s, ok := other.(DiscreteObjectSpace); ok { sp2 = &s } else { return false } if len(sp.Objects) != len(sp2.Objects) { return false } for i, v := range sp.Objects { if v != sp2.Objects[i] { return false } } return true } // Returns the number of outcomes in the space if finite, and // returns -1 if infinite. func (sp DiscreteObjectSpace) Size() int { return len(sp.Objects) } func (sp DiscreteObjectSpace) Outcome(val interface{}) Outcome { for i, v := range sp.Objects { if v == val { return Outcome(i) } } panic(stats.ErrfValNotInDomain(val)) } func (sp DiscreteObjectSpace) Value(outcome Outcome) interface{} { if int(outcome) < 0 || int(outcome) >= len(sp.Objects) { panic(stats.ErrfNotInDomain(int(outcome))) } return sp.Objects[int(outcome)] }
dist/space.go
0.847179
0.507446
space.go
starcoder
package color import ( "fmt" "math" ) const ( epsilon float64 = 216.0 / 24389.0 kappa float64 = 24389.0 / 27.0 ) // 3 компонента для определения цветов type Point [3]float64 type Matrix [9]float64 func (p Point) mulMatrix(m *Matrix) (result Point) { result[0] = p[0]*m[0] + p[1]*m[1] + p[2]*m[2] result[1] = p[0]*m[3] + p[1]*m[4] + p[2]*m[5] result[2] = p[0]*m[6] + p[1]*m[7] + p[2]*m[8] return } func sqr(v float64) float64 { return v * v } func (rgb RGBColor) String() string { return fmt.Sprintf("RGB:{%d %d %d}", rgb.R(), rgb.G(), rgb.B()) } func (lab LabColor) String() string { return fmt.Sprintf("Lab:{%4.7f %4.7f %4.7f}", lab[0], lab[1], lab[2]) } func (xyz XYZColor) String() string { return fmt.Sprintf("XYZ:{%4.7f %4.7f %4.7f}", xyz[0], xyz[1], xyz[2]) } //Observer = 2°, Illuminant = D65 // Излучатель и матрицы конвертации задают параметры конвертации между цветовыми пространствами RGB и XYZ. По умолчанию установлены на преобразование в пространство sRGB с излучателем D65 и наблюдателем под углов в 2 градуса var ( // цветовые координаты излучателя белого света D65 в пространстве XYZ WhitePoint = [3]float64{0.95047, 1.0, 1.08883} // матрица конвертации из пространства sRGB в пространство XYZ ConvertMatrix = Matrix{ 0.412383, 0.357585, 0.18048, 0.212635, 0.71517, 0.072192, 0.01933, 0.119195, 0.950528, } // матрица конвертации из пространства XYZ в пространство sRGB InverseMatrix = Matrix{ 3.24103, -1.53741, -0.49862, -0.969242, 1.87596, 0.041555, 0.055632, -0.203979, 1.05698, } ) // коэффициэнт масштабирования RGB var RGBScale = 255.0 // RGBColor определяет цвет в пространстве RGB, хранящийся в виде 3-х значений в диапазоне от 0 до 1 type RGBColor Point // R возвращает Red-компоненту RGB-цвета в диапазоне от 0 до RGBScale func (rgb RGBColor) R() int { return int(rgb[0] * RGBScale) } // G возвращает Green-компоненту RGB-цвета в диапазоне от 0 до RGBScale func (rgb RGBColor) G() int { return int(rgb[1] * RGBScale) } // B возвращает Blie-компоненту RGB-цвета в диапазоне от 0 до RGBScale func (rgb RGBColor) B() int { return int(rgb[2] * RGBScale) } // NewRGB создает RGB-цвет на основе 3-х компонент со значениями от 0 до RGBScale func NewRGB(r, g, b int) (rgb RGBColor) { rgb[0] = float64(r) / RGBScale rgb[1] = float64(g) / RGBScale rgb[2] = float64(b) / RGBScale return } // RGB конвертирует цвет в пространство RGB func (rgb RGBColor) RGB() RGBColor { return rgb } // XYZ конвертирует цвет в пространство XYZ func (rgb RGBColor) XYZ() XYZColor { // linearization f := func(n float64) float64 { if n > 0.04045 { return math.Exp(2.4 * math.Log((n+0.055)/1.055)) } else { return n / 12.92 } } p := Point{f(rgb[0]), f(rgb[1]), f(rgb[2])}.mulMatrix(&ConvertMatrix) return XYZColor(p) } // Lab конвертирует цвет в пространство Lab func (rgb RGBColor) Lab() LabColor { return rgb.XYZ().Lab() } type XYZColor Point // RGB конвертирует цвет в пространство RGB func (xyz XYZColor) RGB() RGBColor { // unlinearization f := func(n float64) float64 { if n > 0.0031308 { return 1.055*math.Exp((1/2.4)*math.Log(n)) - 0.055 } else { return 12.92 * n } } p := Point(xyz).mulMatrix(&InverseMatrix) p[0] = f(p[0]) p[1] = f(p[1]) p[2] = f(p[2]) return RGBColor(p) } // XYZ конвертирует цвет в пространство XYZ func (xyz XYZColor) XYZ() XYZColor { return xyz } // Lab конвертирует цвет в пространство Lab func (xyz XYZColor) Lab() LabColor { f := func(n float64) float64 { if n > epsilon { return math.Pow(n, 1.0/3.0) } else { return (kappa*n + 16.0) / 116.0 } } x := f(xyz[0] / WhitePoint[0]) y := f(xyz[1] / WhitePoint[1]) z := f(xyz[2] / WhitePoint[2]) return LabColor{ (116.0 * y) - 16.0, 500.0 * (x - y), 200.0 * (y - z), } } type LabColor Point func (lab LabColor) L() float64 { return lab[0] } func (lab LabColor) A() float64 { return lab[1] } func (lab LabColor) B() float64 { return lab[2] } // RGB конвертирует цвет в пространство RGB func (lab LabColor) RGB() RGBColor { return lab.XYZ().RGB() } // XYZ конвертирует цвет в пространство XYZ func (lab LabColor) XYZ() XYZColor { f := func(n float64) float64 { if n3 := n * n * n; n3 > epsilon { return n3 } else { return (n*116.0 - 16.0) / kappa } } y := (lab[0] + 16.0) / 116.0 x := 0.002*lab[1] + y z := y - 0.005*lab[2] return XYZColor{ f(x) * WhitePoint[0], f(y) * WhitePoint[1], f(z) * WhitePoint[2], } } // Lab конвертирует цвет в пространство Lab func (lab LabColor) Lab() LabColor { return lab } func (lab LabColor) LCh() LChColor { c := math.Sqrt(sqr(lab[0]) + sqr(lab[2])) h := 180.0 * math.Atan2(lab[2], lab[1]) / math.Pi if h < 0.0 { h += 360.0 } return LChColor{lab[0], c, h} } type LChColor Point // RGB конвертирует цвет в пространство RGB func (lch LChColor) RGB() RGBColor { return lch.Lab().XYZ().RGB() } // XYZ конвертирует цвет в пространство XYZ func (lch LChColor) XYZ() XYZColor { return lch.Lab().XYZ() } // Lab конвертирует цвет в пространство Lab func (lch LChColor) Lab() LabColor { a := lch[1] * math.Cos(lch[2]*math.Pi/180.0) b := lch[1] * math.Sin(lch[2]*math.Pi/180.0) return LabColor{lch[0], a, b} } type Colorer interface { RGB() RGBColor XYZ() XYZColor Lab() LabColor } func ToRGB(color Colorer) RGBColor { switch c := color.(type) { case XYZColor: return c.RGB() case LabColor: return c.XYZ().RGB() case LChColor: return c.Lab().XYZ().RGB() } return color.(RGBColor) } type Comparer interface { Compare(c1, c2 Colorer) float64 } type CIE76 struct{} var DeltaCIE76 CIE76 // DeltaCIE76.Compare computes the CIE76 color difference. // This is just Euclidean distance in Lab space, and therefore quite fast, // though it exhibits perceptual uniformity issues especially in the blue and desaturated regions. func (CIE76) Compare(c1, c2 Colorer) float64 { lab1 := c1.Lab() lab2 := c2.Lab() return math.Sqrt(sqr(lab1[0]-lab2[0]) + sqr(lab1[1]-lab2[1]) + sqr(lab1[2]-lab2[2])) } type CIE94 struct { // struct for weighting factors for CIE94 ΔE calculation. KL, KC, Kh, K1, K2 float64 } // KLCH94GraphicArts are the weighting factors for CIE94 used for most uses except textiles. var DeltaCIE94GraphicArts = CIE94{1, 1, 1, 0.045, 0.015} // KLCH94Textiles are the weighting factors for CIE94 used for textiles. var DeltaCIE94Textiles = CIE94{2, 1, 1, 0.048, 0.014} // DeltaCIE94.Compare computes the CIE94 color difference of two L*a*b* colors. // This is a distance calculation with the addition of weighting factors specified by klch. func (cie94 CIE94) Compare(c1, c2 Colorer) float64 { lab1 := c1.Lab() lab2 := c2.Lab() dL := sqr(lab1.L() - lab2.L()) c1ab := math.Sqrt(sqr(lab1.A()) + sqr(lab1.B())) c2ab := math.Sqrt(sqr(lab2.A()) + sqr(lab2.B())) dC := sqr(c1ab - c2ab) dH := sqr(lab1.A()-lab2.A()) + sqr(lab1.B()-lab2.B()) - dC sC := 1.0 + cie94.K1*c1ab sH := 1.0 + cie94.K2*c1ab return math.Sqrt( dL/sqr(cie94.KL) + dC/sqr(cie94.KC*sC) + dH/sqr(cie94.Kh*sH)) } type CIE2000 struct { KL, KC, Kh float64 } // KLCHDefault is the most commonly used set of weighting parameters for CIEDE2000 var DeltaCIE2000 = CIE2000{1, 1, 1} // CIE2000 computes the CIEDE2000 delta-E for two L*a*b* space color coordinates // klch is for configuring the weighting factors, but this almost always should be KLCHDefault // Note that this implementation will exhibit slightly different behavior around the discontinuities // of the function (these are grey colors) compared to Java and most C runtimes. The golang atan // function has different accuracy characteristics compared to most Unix platforms and Java Strict math func (cie2k CIE2000) Compare(col1, col2 Colorer) float64 { lab1 := col1.Lab() lab2 := col2.Lab() lBarPrime := (lab1.L() + lab2.L()) * 0.5 c1 := math.Sqrt(lab1.A()*lab1.A() + lab1.B()*lab1.B()) c2 := math.Sqrt(lab2.A()*lab2.A() + lab2.B()*lab2.B()) cBar := (c1 + c2) * 0.5 cBar7 := cBar * cBar * cBar cBar7 *= cBar7 * cBar g := 0.5 * (1.0 - math.Sqrt(cBar7/(cBar7+6103515625.0))) // 25**7 a1Prime := (1.0 + g) * lab1.A() a2Prime := (1.0 + g) * lab2.A() c1Prime := math.Sqrt(a1Prime*a1Prime + lab1.B()*lab1.B()) c2Prime := math.Sqrt(a2Prime*a2Prime + lab2.B()*lab2.B()) cBarPrime := (c1Prime + c2Prime) * 0.5 h1Prime := math.Atan2(lab1.B(), a1Prime) if h1Prime < 0 { h1Prime += 2 * math.Pi } h2Prime := math.Atan2(lab2.B(), a2Prime) if h2Prime < 0 { h2Prime += 2 * math.Pi } hBarPrime := (h1Prime + h2Prime) * 0.5 dhPrime := h2Prime - h1Prime if math.Abs(dhPrime) > math.Pi { hBarPrime += math.Pi if h2Prime <= h1Prime { dhPrime += 2 * math.Pi } else { dhPrime -= 2 * math.Pi } } t := 1.0 - 0.17*math.Cos(hBarPrime-math.Pi/6) + 0.24*math.Cos(2.0*hBarPrime) + 0.32*math.Cos(3.0*hBarPrime+math.Pi/30) - 0.20*math.Cos(4.0*hBarPrime-63.0*math.Pi/180) dLPrime := lab2.L() - lab1.L() dCPrime := c2Prime - c1Prime dHPrime := 2.0 * math.Sqrt(c1Prime*c2Prime) * math.Sin(dhPrime/2.0) lBarPrimeM50Sqr := lBarPrime - 50.0 lBarPrimeM50Sqr *= lBarPrimeM50Sqr sL := 1.0 + (0.015*lBarPrimeM50Sqr)/math.Sqrt(20.0+lBarPrimeM50Sqr) sC := 1.0 + 0.045*cBarPrime sH := 1.0 + 0.015*cBarPrime*t hBarPrimeM := (180/math.Pi*hBarPrime - 275.0) / 25.0 dTheta := math.Pi / 6 * math.Exp(-hBarPrimeM*hBarPrimeM) cBarPrime7 := cBarPrime * cBarPrime * cBarPrime cBarPrime7 *= cBarPrime7 * cBarPrime rC := math.Sqrt(cBarPrime7 / (cBarPrime7 + 6103515625.0)) rT := -2.0 * rC * math.Sin(2.0*dTheta) return math.Sqrt( sqr(dLPrime/(cie2k.KL*sL)) + sqr(dCPrime/(cie2k.KC*sC)) + sqr(dHPrime/(cie2k.Kh*sH)) + (dCPrime/(cie2k.KC*sC))*(dHPrime/(cie2k.Kh*sH))*rT) } func Approximate(color Colorer, comparer Comparer) (float64, int) { best := 0 bestdist := 10000000.0 for i, applicant := range XtermLabPalette { if dist := comparer.Compare(color, applicant); dist < bestdist { best, bestdist = i, dist } } return bestdist, best + 17 } //color = round(36 * (r * 5) + 6 * (g * 5) + (b * 5) + 16) func HackApproximate(color Colorer) int { c := color.RGB() return int(36*(c[0]*5)+6*(c[1]*5)+c[2]*5) + 17 } var ( XtermRGBPalette [240]RGBColor XtermLabPalette [240]LabColor ) func init() { // calculate xterm 240 color RGB palette // calculate 6x6x6 color cube cubeLevels := [6]int{0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff} for r := 0; r < 6; r++ { for g := 0; g < 6; g++ { for b := 0; b < 6; b++ { XtermRGBPalette[r*36+g*6+b] = NewRGB(cubeLevels[r], cubeLevels[g], cubeLevels[b]) } } } // calculate grayscale ramp for i := 0; i < 24; i++ { XtermRGBPalette[i+216] = NewRGB(0x08+i*0xA, 0x08+i*0xA, 0x08+i*0xA) } // calculate xterm 240 color Lab palette for i, color := range XtermRGBPalette { XtermLabPalette[i] = color.Lab() } }
color/color.go
0.641871
0.47457
color.go
starcoder
package client // APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification. type V1beta1ApiServiceSpec struct { // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. CaBundle string `json:"caBundle"` // Group is the API group name this server hosts Group string `json:"group,omitempty"` // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s GroupPriorityMinimum int32 `json:"groupPriorityMinimum"` // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead. InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty"` // Service is a reference to the service for this API server. It must communicate on port 443 If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled. Service *ApiregistrationV1beta1ServiceReference `json:"service"` // Version is the API version this server hosts. For example, \"v1\" Version string `json:"version,omitempty"` // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) Since it's inside of a group, the number can be small, probably in the 10s. VersionPriority int32 `json:"versionPriority"` }
vendor/github.com/kubernetes-client/go/kubernetes/client/v1beta1_api_service_spec.go
0.726037
0.419351
v1beta1_api_service_spec.go
starcoder
package skate import "strings" type runestring []rune // A safe way to index a runestring. It will return a null rune if you try // to index outside of the bounds of the runestring. func (r *runestring) SafeAt(pos int) rune { if pos < 0 || pos >= len(*r) { return 0 } else { return (*r)[pos] } } // A safe way to obtain a substring of a runestring. It will return a null // string ("") if you index somewhere outside its bounds. func (r *runestring) SafeSubstr(pos int, length int) string { if pos < 0 || pos > len(*r) || (pos+length) > len(*r) { return "" } else { return string((*r)[pos : pos+length]) } } // Delete characters at positions pos. It will do nothing if you provide // an index outside the bounds of the runestring. func (r *runestring) Del(pos ...int) { for _, i := range pos { if i >= 0 && i <= len(*r) { *r = append((*r)[:i], (*r)[i+1:]...) } } } // A helper to determine if any substrings exist within the given runestring. func (r *runestring) Contains(start int, length int, criteria ...string) bool { substring := r.SafeSubstr(start, length) for _, c := range criteria { if substring == c { return true } } return false } func cleanInput(input string) string { return strings.ToUpper(strings.TrimSpace(input)) } func isVowelNoY(c rune) bool { switch c { case 'A', 'E', 'I', 'O', 'U': return true default: return false } } // NYSIIS computes the NYSIIS phonetic encoding of the input string. It is a // modification of the traditional Soundex algorithm. func NYSIIS(s1 string) string { cleans1 := runestring(cleanInput(s1)) input := runestring(make([]rune, 0, len(s1))) // The output can't be larger than the string itself output := runestring(make([]rune, 0, len(s1))) // 0. Remove all non-ASCII characters for _, v := range cleans1 { if v >= 65 && v <= 90 { input = append(input, v) } } if len(input) == 0 { return "" } // 1. Transcoding first characters switch input[0] { case 'M': if input.SafeSubstr(0, 3) == "MAC" { // MAC -> MCC input[1] = 'C' } case 'K': if input.SafeSubstr(0, 2) == "KN" { // KN -> NN input[0] = 'N' } else { // K -> C input[0] = 'C' } case 'P': next := input.SafeAt(1) if next == 'H' { // PH -> FF input[0] = 'F' input[1] = 'F' } else if next == 'F' { // PF -> FF input[0] = 'F' } case 'S': if input.SafeSubstr(0, 3) == "SCH" { input[1] = 'S' input[2] = 'S' } } // 2. Transcoding last characters switch input.SafeSubstr(len(input)-2, 2) { case "EE", "IE": // EE, IE -> Y input.Del(len(input) - 2) input[len(input)-1] = 'Y' case "DT", "RT", "RD", "NT", "ND": // DT, RT, RD, NT, ND -> D input.Del(len(input) - 2) input[len(input)-1] = 'D' } // 3. First character of key = first character of name output = append(output, input[0]) last := input[0] for i := 1; i < len(input); i++ { c := input[i] switch c { case 'A', 'I', 'O', 'U': // A, E, I, O, U -> A (E is separate) input[i] = 'A' case 'E': // EV -> AF, else A if input.SafeAt(i+1) == 'V' { input[i+1] = 'F' } input[i] = 'A' case 'Q': // Q -> G input[i] = 'G' case 'Z': // Z -> S input[i] = 'S' case 'M': // M -> N input[i] = 'N' case 'K': // KN -> N, else K -> C if input.SafeAt(i+1) == 'N' { input.Del(i) } else { input[i] = 'C' } case 'S': // SCH -> SSS if input.SafeSubstr(i, 3) == "SCH" { input[i+1] = 'S' input[i+2] = 'S' } case 'P': // PH -> FF if input.SafeAt(i+1) == 'H' { input[i] = 'F' input[i+1] = 'F' } case 'H': // H -> $(previous character) if previous character or // next character is a non-vowel prev := input.SafeAt(i - 1) next := input.SafeAt(i + 1) if !isVowelNoY(prev) || !isVowelNoY(next) { input[i] = prev } case 'W': prev := input.SafeAt(i - 1) if isVowelNoY(prev) { input[i] = prev } } if input[i] != last && input[i] != 0 { output = append(output, input[i]) } last = input[i] } // have to be careful here because we've already added the first // key value if len(output) > 1 { // remove trailing s if output.SafeAt(len(output)-1) == 'S' { output.Del(len(output) - 1) } // trailing AY -> Y if len(output) > 2 && output.SafeSubstr(len(output)-2, 2) == "AY" { output.Del(len(output) - 2) } // trailing A -> remove it if output.SafeAt(len(output)-1) == 'A' { output.Del(len(output) - 1) } } if len(output) > 6 { return string(output[0:6]) } else { return string(output) } }
nysiis.go
0.547222
0.447098
nysiis.go
starcoder
package bls377 // GT target group of the pairing type GT = e12 type lineEvaluation struct { r0 e2 r1 e2 r2 e2 } // FinalExponentiation computes the final expo x**(p**6-1)(p**2+1)(p**4 - p**2 +1)/r func FinalExponentiation(z *GT, _z ...*GT) GT { var result GT result.Set(z) for _, e := range _z { result.Mul(&result, e) } result.FinalExponentiation(&result) return result } // FinalExponentiation sets z to the final expo x**((p**12 - 1)/r), returns z func (z *GT) FinalExponentiation(x *GT) *GT { // https://eprint.iacr.org/2016/130.pdf var result GT result.Set(x) // memalloc var t [6]GT // easy part t[0].Conjugate(&result) result.Inverse(&result) t[0].Mul(&t[0], &result) result.FrobeniusSquare(&t[0]). Mul(&result, &t[0]) // hard part (up to permutation) t[0].InverseUnitary(&result).Square(&t[0]) t[5].expt(&result) t[1].CyclotomicSquare(&t[5]) t[3].Mul(&t[0], &t[5]) t[0].expt(&t[3]) t[2].expt(&t[0]) t[4].expt(&t[2]) t[4].Mul(&t[1], &t[4]) t[1].expt(&t[4]) t[3].InverseUnitary(&t[3]) t[1].Mul(&t[3], &t[1]) t[1].Mul(&t[1], &result) t[0].Mul(&t[0], &result) t[0].FrobeniusCube(&t[0]) t[3].InverseUnitary(&result) t[4].Mul(&t[3], &t[4]) t[4].Frobenius(&t[4]) t[5].Mul(&t[2], &t[5]) t[5].FrobeniusSquare(&t[5]) t[5].Mul(&t[5], &t[0]) t[5].Mul(&t[5], &t[4]) t[5].Mul(&t[5], &t[1]) result.Set(&t[5]) z.Set(&result) return z } // MillerLoop Miller loop func MillerLoop(P G1Affine, Q G2Affine) *GT { var result GT result.SetOne() if P.IsInfinity() || Q.IsInfinity() { return &result } ch := make(chan struct{}, 20) var evaluations [69]lineEvaluation go preCompute(&evaluations, &Q, &P, ch) j := 0 for i := len(loopCounter) - 2; i >= 0; i-- { result.Square(&result) <-ch result.mulAssign(&evaluations[j]) j++ if loopCounter[i] == 1 { <-ch result.mulAssign(&evaluations[j]) j++ } } return &result } // lineEval computes the evaluation of the line through Q, R (on the twist) at P // Q, R are in jacobian coordinates func lineEval(Q, R *G2Jac, P *G1Affine, result *lineEvaluation) { // converts _Q and _R to projective coords var _Q, _R g2Proj _Q.FromJacobian(Q) _R.FromJacobian(R) result.r1.Mul(&_Q.y, &_R.z) result.r0.Mul(&_Q.z, &_R.x) result.r2.Mul(&_Q.x, &_R.y) _Q.z.Mul(&_Q.z, &_R.y) _Q.x.Mul(&_Q.x, &_R.z) _Q.y.Mul(&_Q.y, &_R.x) result.r1.Sub(&result.r1, &_Q.z) result.r0.Sub(&result.r0, &_Q.x) result.r2.Sub(&result.r2, &_Q.y) result.r1.MulByElement(&result.r1, &P.X) result.r0.MulByElement(&result.r0, &P.Y) } func (z *GT) mulAssign(l *lineEvaluation) *GT { var a, b, c GT a.MulByVW(z, &l.r1) b.MulByV(z, &l.r0) c.MulByV2W(z, &l.r2) z.Add(&a, &b).Add(z, &c) return z } // precomputes the line evaluations used during the Miller loop. func preCompute(evaluations *[69]lineEvaluation, Q *G2Affine, P *G1Affine, ch chan struct{}) { var Q1, Q2, Qbuf G2Jac Q1.FromAffine(Q) Q2.FromAffine(Q) Qbuf.FromAffine(Q) j := 0 for i := len(loopCounter) - 2; i >= 0; i-- { Q1.Set(&Q2) Q2.Double(&Q1).Neg(&Q2) lineEval(&Q1, &Q2, P, &evaluations[j]) // f(P), div(f) = 2(Q1)+(-2Q2)-3(O) ch <- struct{}{} Q2.Neg(&Q2) j++ if loopCounter[i] == 1 { lineEval(&Q2, &Qbuf, P, &evaluations[j]) // f(P), div(f) = (Q2)+(Q)+(-Q2-Q)-3(O) ch <- struct{}{} Q2.AddMixed(Q) j++ } } close(ch) } // MulByVW set z to x*(y*v*w) and return z // here y*v*w means the GT element with C1.B1=y and all other components 0 func (z *GT) MulByVW(x *GT, y *e2) *GT { var result GT var yNR e2 yNR.MulByNonResidue(y) result.C0.B0.Mul(&x.C1.B1, &yNR) result.C0.B1.Mul(&x.C1.B2, &yNR) result.C0.B2.Mul(&x.C1.B0, y) result.C1.B0.Mul(&x.C0.B2, &yNR) result.C1.B1.Mul(&x.C0.B0, y) result.C1.B2.Mul(&x.C0.B1, y) z.Set(&result) return z } // MulByV set z to x*(y*v) and return z // here y*v means the GT element with C0.B1=y and all other components 0 func (z *GT) MulByV(x *GT, y *e2) *GT { var result GT var yNR e2 yNR.MulByNonResidue(y) result.C0.B0.Mul(&x.C0.B2, &yNR) result.C0.B1.Mul(&x.C0.B0, y) result.C0.B2.Mul(&x.C0.B1, y) result.C1.B0.Mul(&x.C1.B2, &yNR) result.C1.B1.Mul(&x.C1.B0, y) result.C1.B2.Mul(&x.C1.B1, y) z.Set(&result) return z } // MulByV2W set z to x*(y*v^2*w) and return z // here y*v^2*w means the GT element with C1.B2=y and all other components 0 func (z *GT) MulByV2W(x *GT, y *e2) *GT { var result GT var yNR e2 yNR.MulByNonResidue(y) result.C0.B0.Mul(&x.C1.B0, &yNR) result.C0.B1.Mul(&x.C1.B1, &yNR) result.C0.B2.Mul(&x.C1.B2, &yNR) result.C1.B0.Mul(&x.C0.B1, &yNR) result.C1.B1.Mul(&x.C0.B2, &yNR) result.C1.B2.Mul(&x.C0.B0, y) z.Set(&result) return z } // expt set z to x^t in GT and return z func (z *GT) expt(x *GT) *GT { // const tAbsVal uint64 = 9586122913090633729 // tAbsVal in binary: 1000010100001000110000000000000000000000000000000000000000000001 // drop the low 46 bits (all 0 except the least significant bit): 100001010000100011 = 136227 // Shortest addition chains can be found at https://wwwhomes.uni-bielefeld.de/achim/addition_chain.html var result, x33 GT // a shortest addition chain for 136227 result.Set(x) // 0 1 result.CyclotomicSquare(&result) // 1( 0) 2 result.CyclotomicSquare(&result) // 2( 1) 4 result.CyclotomicSquare(&result) // 3( 2) 8 result.CyclotomicSquare(&result) // 4( 3) 16 result.CyclotomicSquare(&result) // 5( 4) 32 result.Mul(&result, x) // 6( 5, 0) 33 x33.Set(&result) // save x33 for step 14 result.CyclotomicSquare(&result) // 7( 6) 66 result.CyclotomicSquare(&result) // 8( 7) 132 result.CyclotomicSquare(&result) // 9( 8) 264 result.CyclotomicSquare(&result) // 10( 9) 528 result.CyclotomicSquare(&result) // 11(10) 1056 result.CyclotomicSquare(&result) // 12(11) 2112 result.CyclotomicSquare(&result) // 13(12) 4224 result.Mul(&result, &x33) // 14(13, 6) 4257 result.CyclotomicSquare(&result) // 15(14) 8514 result.CyclotomicSquare(&result) // 16(15) 17028 result.CyclotomicSquare(&result) // 17(16) 34056 result.CyclotomicSquare(&result) // 18(17) 68112 result.Mul(&result, x) // 19(18, 0) 68113 result.CyclotomicSquare(&result) // 20(19) 136226 result.Mul(&result, x) // 21(20, 0) 136227 // the remaining 46 bits for i := 0; i < 46; i++ { result.CyclotomicSquare(&result) } result.Mul(&result, x) z.Set(&result) return z }
bls377/pairing.go
0.72952
0.466359
pairing.go
starcoder
package date import ( "database/sql/driver" "fmt" "time" ) // Date represents a calendar day (and thus has day precision). It's stored as // the number of days since the epoch date 1970-01-01. It can be negative, to // represent dates before the epoch. Because it's a date, it doesn't exist // within any particular timezone. type Date int // FromTime creates a Date by truncating away the time of day portion of a // time.Time. func FromTime(t time.Time) Date { t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC) return Date(t.Unix() / (60 * 60 * 24)) } // FromString creates a date from its ISO8601 (YYYY-MM-DD) representation. func FromString(str string) (Date, error) { t, err := time.Parse("2006-01-02", str) if err != nil { return 0, err } return FromTime(t), nil } // MustFromString creates a date from its ISO8601 (YYYY-MM-DD) representation. // It panics if str is not in the right format. func MustFromString(str string) Date { d, err := FromString(str) if err != nil { panic(err) } return d } // Today gives today's date. func Today() Date { return FromTime(time.Now()) } // Yesterday gives yesterday's date. func Yesterday() Date { return Today() - 1 } // Tomorrow gives tomorrow's date. func Tomorrow() Date { return Today() + 1 } // TodayIn gives today's date in given timezone. func TodayIn(loc *time.Location) Date { return FromTime(time.Now().In(loc)) } // Yesterday gives yesterday's date in given timezone. func YesterdayIn(loc *time.Location) Date { return TodayIn(loc) - 1 } // Tomorrow gives tomorrow's date in given timezone. func TomorrowIn(loc *time.Location) Date { return TodayIn(loc) + 1 } // String returns the ISO8601 representation (YYYY-MM-DD). func (d Date) String() string { t := d.Time() return fmt.Sprintf("%04d-%02d-%02d", t.Year(), t.Month(), t.Day()) } // Time returns a time.Time at midnight at the start of the date in the UTC // timezone. func (d Date) Time() time.Time { return d.TimeIn(time.UTC) } // TimeIn returns a time.Time at midnight at the start of the date in the // Location specified. func (d Date) TimeIn(loc *time.Location) time.Time { t := time.Date(1970, time.January, 1, 0, 0, 0, 0, loc) return t.AddDate(0, 0, int(d)) } // AddMonths adds the number of specified months to create a new date. Dates are // normalized in the same way as `AddDate` in the `time` package. func (d Date) AddMonths(months int) Date { return FromTime(d.Time().AddDate(0, months, 0)) } // AddYears adds the number of specified years to create a new date. Dates are // normalized in the same way as `AddDate` in the `time` package. func (d Date) AddYears(years int) Date { return FromTime(d.Time().AddDate(years, 0, 0)) } // AddDays adds the number of specified days to create a new date. Since Dates // are just integers representing the number of days since 1970-01-01, the // usual `+` operator can be used instead. func (d Date) AddDays(days int) Date { return d + Date(days) } // StartOfMonth gives the date that is the 1st day of the current month. func (d Date) StartOfMonth() Date { t := d.Time() return FromTime(time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, time.UTC)) } // EndOfMonth gives the date that is the last day of the current month. func (d Date) EndOfMonth() Date { return d.StartOfMonth().AddMonths(1).AddDays(-1) } // DaysInMonth gives the number of days in the current month func (d Date) DaysInMonth() int { return d.EndOfMonth().Day() } // StartOfQuarter gives the date that is the 1st day of the current quarter // (starting in Jan, Apr, Jul, or Oct). func (d Date) StartOfQuarter() Date { t := d.Time() m := t.Month() for !startOfQuarterMonth(m) { m-- } return FromTime(time.Date(t.Year(), m, 1, 0, 0, 0, 0, time.UTC)) } // StartOfNextQuarter gives the date that is the 1st day of the next quarter // (starting in Jan, Apr, Jul, or Oct). func (d Date) StartOfNextQuarter() Date { t := d.Time() m := t.Month() y := t.Year() for { if m == time.December { m = time.January y++ } else { m++ } if startOfQuarterMonth(m) { break } } return FromTime(time.Date(y, m, 1, 0, 0, 0, 0, time.UTC)) } func startOfQuarterMonth(m time.Month) bool { return (m-1)%3 == 0 } // Day gives the day of the month (1-31). func (d Date) Day() int { return d.Time().Day() } // Month gives the month the date is in. func (d Date) Month() time.Month { return d.Time().Month() } // Year gives the year the date is in. func (d Date) Year() int { return d.Time().Year() } // Weekday gives the day of the week that the date falls on. func (d Date) Weekday() time.Weekday { return d.Time().Weekday() } // YearDay gives how many days into the year the date is (1-365). func (d Date) YearDay() int { return d.Time().YearDay() } // MarshalJSON marshals the date into a JSON string in ISO8601 format. func (d Date) MarshalJSON() ([]byte, error) { return []byte(`"` + d.String() + `"`), nil } // UnmarshalJSON unmarshals a JSON string in the ISO8601 format into a date. func (d *Date) UnmarshalJSON(p []byte) error { if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { return fmt.Errorf("could not unmarshal JSON into Date: value is not a string") } var err error *d, err = FromString(string(p)[1 : len(p)-1]) return err } // Scan implements the sql.Scanner interface, allowing the sql package to read // sql dates into Date. func (d *Date) Scan(src interface{}) error { t, ok := src.(time.Time) if !ok { return fmt.Errorf("can not scan as Date: %T", src) } *d = FromTime(t) return nil } // Value implements the driver.Valuer interface, allowing sql drivers to send // Dates to sql databases. func (d Date) Value() (driver.Value, error) { return d.String(), nil }
date.go
0.853165
0.54153
date.go
starcoder
package goTrie import "unicode/utf8" // Trie defines a Trie node representation type Trie struct { end bool letters map[rune]*Trie children uint32 } // New initializes new Trie object with attributes default values func New() *Trie { return &Trie{ end: false, letters: make(map[rune]*Trie), children: 0, } } // Children returns number of different suffixes, starting from this node. func (t *Trie) Children() uint32 { if t == nil { return uint32(0) } return t.children } // IsWord returns true if the node is also indicator for a whole word, // false otherwise func (t *Trie) IsWord() bool { if t == nil { return false } return t.end } // Add adds a string to the Trie datastructure func (t *Trie) Add(s string) bool { if t == nil { return false } if len(s) == 0 { t.end = true return true } var childNode = t for pos := 0; pos < len(s); { letter, size := utf8.DecodeRuneInString(s[pos:]) pos += size if _, ok := childNode.letters[letter]; !ok { childNode.letters[letter] = New() } childNode = childNode.letters[letter] // TODO(marek): Currently adding same word twice will count it twice. // Perhaps it's not what we really want. childNode.children++ } childNode.end = true return true } // Get checks if the string is a word stored in the Trie datastructure. func (t *Trie) Get(s string) *Trie { if t == nil { return nil } if len(s) == 0 { return t } var childNode = t var ok bool for pos := 0; pos < len(s); { letter, size := utf8.DecodeRuneInString(s[pos:]) pos += size if childNode, ok = childNode.letters[letter]; !ok { return nil } } return childNode } // Has looks for a word and return True if the word is present, false otherwise. func (t *Trie) Has(s string) bool { if t == nil { return false } node := t.Get(s) return node != nil && node.IsWord() } // GetWordsFromPrefix returns list of words starting with provided prefix func (t *Trie) GetWordsFromPrefix(s string) []string { result := make([]string, 0, 1) if t == nil { return result } node := t.Get(s) if node == nil { return result } result = node.getWordsFromPrefix(s) return result } // getWordsFromPrefix is internally used by GetWordsFromPrefix method. // It calls itself recursively and adds a word if the checked node is marked as end of the word func (t *Trie) getWordsFromPrefix(prefix string) []string { result := make([]string, 0, 0) if t.IsWord() { result = append(result, prefix) } for k, v := range t.letters { subresult := v.getWordsFromPrefix(prefix + string(k)) result = append(result, subresult...) } return result }
trie.go
0.549882
0.456046
trie.go
starcoder
package fuzzy import ( "math" "sort" "strings" "unicode/utf8" ) // Ratio computes a score of how close two unicode strings are // based on their Levenshtein edit distance. // Returns an integer score [0,100], higher score indicates // that strings are closer. func Ratio(s1, s2 string) int { return int(round(100 * floatRatio([]rune(s1), []rune(s2)))) } // PartialRatio computes a score of how close a string is with // the most similar substring from another string. // Order of arguments does not matter. // Returns an integer score [0,100], higher score indicates // that the string and substring are closer. func PartialRatio(s1, s2 string) int { shorter, longer := []rune(s1), []rune(s2) if len(shorter) > len(longer) { longer, shorter = shorter, longer } matchingBlocks := getMatchingBlocks(shorter, longer) bestScore := 0.0 for _, block := range matchingBlocks { longStart := block.dpos - block.spos if longStart < 0 { longStart = 0 } longEnd := longStart + len(shorter) if longEnd > len(longer) { longEnd = len(longer) } longSubStr := longer[longStart:longEnd] r := floatRatio(shorter, longSubStr) if r > .995 { return 100 } else if r > bestScore { bestScore = r } } return int(round(100 * bestScore)) } func floatRatio(chrs1, chrs2 []rune) float64 { lenSum := len(chrs1) + len(chrs2) if lenSum == 0 { return 0.0 } editDistance := optimizedEditDistance(chrs1, chrs2, 1) return float64(lenSum-editDistance) / float64(lenSum) } // QRatio computes a score similar to Ratio, except both strings are trimmed, // cleansed of non-ASCII characters, and case-standardized. func QRatio(s1, s2 string) int { return quickRatioHelper(s1, s2, true) } // UQRatio computes a score similar to Ratio, except both strings are trimmed // and case-standardized. func UQRatio(s1, s2 string) int { return quickRatioHelper(s1, s2, false) } func quickRatioHelper(s1, s2 string, asciiOnly bool) int { c1 := Cleanse(s1, asciiOnly) c2 := Cleanse(s2, asciiOnly) if len(c1) == 0 || len(c2) == 0 { return 0 } return Ratio(c1, c2) } // WRatio computes a score with the following steps: // 1. Cleanse both strings, remove non-ASCII characters. // 2. Take Ratio as baseline score. // 3. Run a few heuristics to determine whether partial ratios // should be taken. // 4. If partial ratios were determined to be necessary, // compute PartialRatio, PartialTokenSetRatio, and PartialTokenSortRatio. // Otherwise, compute TokenSortRatio and TokenSetRatio. // 5. Return the max of all computed ratios. func WRatio(s1, s2 string) int { return weightedRatioHelper(s1, s2, true) } // UWRatio computes a score similar to WRatio, except non-ASCII // characters are allowed. func UWRatio(s1, s2 string) int { return weightedRatioHelper(s1, s2, false) } func weightedRatioHelper(s1, s2 string, asciiOnly bool) int { c1 := Cleanse(s1, asciiOnly) c2 := Cleanse(s2, asciiOnly) if len(c1) == 0 || len(c2) == 0 { return 0 } unbaseScale := .95 partialScale := .9 baseScore := float64(Ratio(c1, c2)) lengthRatio := float64(utf8.RuneCountInString(c1)) / float64(utf8.RuneCountInString(c2)) if lengthRatio < 1 { lengthRatio = 1 / lengthRatio } tryPartial := true if lengthRatio < 1.5 { tryPartial = false } if lengthRatio > 8 { partialScale = .6 } if tryPartial { partialScore := float64(PartialRatio(c1, c2)) * partialScale tokenSortScore := float64(PartialTokenSortRatio(c1, c2, asciiOnly, false)) * unbaseScale * partialScale tokenSetScore := float64(PartialTokenSetRatio(c1, c2, asciiOnly, false)) * unbaseScale * partialScale return int(round(max(baseScore, partialScore, tokenSortScore, tokenSetScore))) } tokenSortScore := float64(TokenSortRatio(c1, c2, asciiOnly, false)) * unbaseScale tokenSetScore := float64(TokenSetRatio(c1, c2, asciiOnly, false)) * unbaseScale return int(round(max(baseScore, tokenSortScore, tokenSetScore))) } func max(args ...float64) float64 { maxVal := args[0] for _, arg := range args { if arg > maxVal { maxVal = arg } } return maxVal } // TokenSortRatio computes a score similar to Ratio, except tokens // are sorted and (optionally) cleansed prior to comparison. func TokenSortRatio(s1, s2 string, opts ...bool) int { return tokenSortRatioHelper(s1, s2, false, opts...) } // PartialTokenSortRatio computes a score similar to PartialRatio, except tokens // are sorted and (optionally) cleansed prior to comparison. func PartialTokenSortRatio(s1, s2 string, opts ...bool) int { return tokenSortRatioHelper(s1, s2, true, opts...) } func tokenSortRatioHelper(s1, s2 string, partial bool, opts ...bool) int { asciiOnly, cleanse := false, false for i, val := range opts { switch i { case 0: asciiOnly = val case 1: cleanse = val } } sorted1 := tokenSort(s1, asciiOnly, cleanse) sorted2 := tokenSort(s2, asciiOnly, cleanse) if partial { return PartialRatio(sorted1, sorted2) } return Ratio(sorted1, sorted2) } func tokenSort(s string, asciiOnly, cleanse bool) string { if cleanse { s = Cleanse(s, asciiOnly) } else if asciiOnly { s = ASCIIOnly(s) } tokens := strings.Fields(s) sort.Strings(tokens) return strings.Join(tokens, " ") } // TokenSetRatio extracts tokens from each input string, adds // them to a set, construct strings of the form // <sorted intersection><sorted remainder>, takes the ratios // of those two strings, and returns the max. func TokenSetRatio(s1, s2 string, opts ...bool) int { return tokenSetRatioHelper(s1, s2, false, opts...) } // PartialTokenSetRatio extracts tokens from each input string, adds // them to a set, construct two strings of the form // <sorted intersection><sorted remainder>, takes the partial ratios // of those two strings, and returns the max. func PartialTokenSetRatio(s1, s2 string, opts ...bool) int { return tokenSetRatioHelper(s1, s2, true, opts...) } func tokenSetRatioHelper(s1, s2 string, partial bool, opts ...bool) int { asciiOnly, cleanse := false, false for i, val := range opts { switch i { case 0: asciiOnly = val case 1: cleanse = val } } if cleanse { s1 = Cleanse(s1, asciiOnly) s2 = Cleanse(s2, asciiOnly) } else if asciiOnly { s1 = ASCIIOnly(s1) s2 = ASCIIOnly(s2) } if len(s1) == 0 || len(s2) == 0 { return 0 } set1 := NewStringSet(strings.Fields(s1)) set2 := NewStringSet(strings.Fields(s2)) intersection := set1.Intersect(set2).ToSlice() diff1to2 := set1.Difference(set2).ToSlice() diff2to1 := set2.Difference(set1).ToSlice() sort.Strings(intersection) sort.Strings(diff1to2) sort.Strings(diff2to1) sortedIntersect := strings.TrimSpace(strings.Join(intersection, " ")) combined1to2 := strings.TrimSpace(sortedIntersect + " " + strings.Join(diff1to2, " ")) combined2to1 := strings.TrimSpace(sortedIntersect + " " + strings.Join(diff2to1, " ")) var ratioFunction func(string, string) int if partial { ratioFunction = PartialRatio } else { ratioFunction = Ratio } score := ratioFunction(sortedIntersect, combined1to2) if alt1 := ratioFunction(sortedIntersect, combined2to1); alt1 > score { score = alt1 } if alt2 := ratioFunction(combined1to2, combined2to1); alt2 > score { score = alt2 } return score } func round(x float64) float64 { if x < 0 { return math.Ceil(x - 0.5) } return math.Floor(x + 0.5) }
vendor/github.com/paul-mannino/go-fuzzywuzzy/fuzz.go
0.837188
0.465327
fuzz.go
starcoder
package rexfile import ( "github.com/go-gl/mathgl/mgl32" "math" "sort" ) // NewCylinder returns a new cylinder with radius and height (meters) func NewCylinder(id, matID uint64, radius float32, height float32) (Mesh, Material) { const numberOfSegments = 16 mesh := Mesh{ ID: id, Name: "Cylinder", Coords: getCylinderCoords(radius, height, numberOfSegments), Triangles: getCylinderTriangles(numberOfSegments), MaterialID: matID, } mat := NewMaterial(matID) mat.KdRgb = mgl32.Vec3{0.9, 0.7, 0.1} return mesh, mat } func getCylinderCoords(radius float32, height float32, numberOfSegments int) []mgl32.Vec3 { coords := make([]mgl32.Vec3, numberOfSegments*2) for i := 0; i < numberOfSegments; i++ { angle := (2 * math.Pi) / float64(numberOfSegments) coords[i] = mgl32.Vec3{ radius * float32(math.Sin(float64(i)*angle)), 0, radius * float32(math.Cos(float64(i)*angle)), } coords[i+numberOfSegments] = mgl32.Vec3{ coords[i].X(), height, coords[i].Z(), } } return coords } func getCylinderTriangles(numberOfSegments int) []Triangle { baseShapeVertexRange := numberOfSegments wallTriangles := make([]Triangle, baseShapeVertexRange*2) ti := 0 for i := 0; i < baseShapeVertexRange; i++ { a := i + baseShapeVertexRange b := i c := i + 1 d := i + 1 + baseShapeVertexRange if d > numberOfSegments*2-1 { d = baseShapeVertexRange c = 0 } wallTriangles[ti].V0 = uint32(a) wallTriangles[ti].V1 = uint32(b) wallTriangles[ti].V2 = uint32(c) ti++ wallTriangles[ti].V0 = uint32(c) wallTriangles[ti].V1 = uint32(d) wallTriangles[ti].V2 = uint32(a) ti++ } capTriangles := make([]Triangle, baseShapeVertexRange*2-4) currentValidVertices := make([]int, baseShapeVertexRange) for i := range currentValidVertices { currentValidVertices[i] = i } ti = 0 for len(currentValidVertices) > 2 { sort.Ints(currentValidVertices) var nextValidVertices []int for i := 0; i < len(currentValidVertices)-1; i += 2 { aIndex := 0 if i+2 < len(currentValidVertices) { aIndex = i + 2 } a := currentValidVertices[aIndex] b := currentValidVertices[i+1] c := currentValidVertices[i] //add bottom tri capTriangles[ti].V0 = uint32(a) capTriangles[ti].V1 = uint32(b) capTriangles[ti].V2 = uint32(c) ti++ //add top tri capTriangles[ti].V0 = uint32(c + baseShapeVertexRange) capTriangles[ti].V1 = uint32(b + baseShapeVertexRange) capTriangles[ti].V2 = uint32(a + baseShapeVertexRange) ti++ //keep list of unique remaining vertices if !contains(nextValidVertices, a) { nextValidVertices = append(nextValidVertices, a) } if !contains(nextValidVertices, c) { nextValidVertices = append(nextValidVertices, c) } } currentValidVertices = nextValidVertices } return append(wallTriangles, capTriangles...) } func contains(arr []int, val int) bool { for _, av := range arr { if av == val { return true } } return false }
encoding/rexfile/cylinder.go
0.688049
0.521288
cylinder.go
starcoder
package txn import ( "fmt" "strings" "github.com/pingcap/tipocket/pkg/elle/core" ) // FilterExType is a predication on a cycle case type FilterExType = func(cycleCase *core.CycleExplainerResult) bool // CycleAnomalySpecType specifies different anomalies type CycleAnomalySpecType struct { // A set of relationships which must intersect with every edge in the cycle. Rels map[core.Rel]struct{} // A set of relationships which must intersect with the first edge in the cycle. FirstRel core.Rel // A set of relationships which must intersect with remaining edges. RestRels map[core.Rel]struct{} // A predicate over a cycle explanation. We use this to restrict cycles to e.g. *just* G2 instead of G-single. FilterEx FilterExType // A predicate over a cycle With core.CyclePredicate } // CycleAnomalySpecs defines anomaly specs var CycleAnomalySpecs map[string]CycleAnomalySpecType // CycleTypeNames ... var CycleTypeNames map[string]struct{} // UnknownAnomalyTypes ... var UnknownAnomalyTypes map[string]struct{} // RealtimeAnalysisTypes saves types involving realtime edges. var RealtimeAnalysisTypes map[string]struct{} // ProcessAnalysisTypes saves types involving process edges var ProcessAnalysisTypes map[string]struct{} func fromRels(rels ...core.Rel) CycleAnomalySpecType { return fromRelsWithFilter(nil, rels...) } func fromRelsAndWith(with core.CyclePredicate, rels ...core.Rel) CycleAnomalySpecType { r := fromRels(rels...) r.With = with return r } func fromRelsWithFilter(filter FilterExType, rels ...core.Rel) CycleAnomalySpecType { relsSet := map[core.Rel]struct{}{} for _, v := range rels { relsSet[v] = struct{}{} } return CycleAnomalySpecType{ Rels: relsSet, FilterEx: filter, } } func fromFirstRelAndRest(first core.Rel, rests ...core.Rel) CycleAnomalySpecType { return fromFirstRelAndRestWithFilter(nil, first, rests...) } func fromFirstRelAndRestWithFilter(filter FilterExType, first core.Rel, rests ...core.Rel) CycleAnomalySpecType { restSet := map[core.Rel]struct{}{} for _, v := range rests { restSet[v] = struct{}{} } return CycleAnomalySpecType{ FirstRel: first, RestRels: restSet, FilterEx: filter, } } func buildFilterExByType(required string) FilterExType { return func(cr *core.CycleExplainerResult) bool { return string(cr.Typ) == required } } // nonadjacentRW ensures that no :rw is next to another by testing successive edge types. // In addition, we ensure that the first edge in the cycle is not an rw. // And we need more than one rw edge for this to count, otherwise it's G-single func nonadjacentRW(trace []core.CycleTrace) bool { if len(trace) < 2 { return false } // ensure that the first edge in the cycle is not an rw lastIsRw := true rwCount := 0 for _, path := range trace { rw := len(path.Rels) == 1 && path.Rels[0] == core.RW if lastIsRw && rw { return false } if rw { rwCount++ } lastIsRw = rw } return rwCount > 1 } func init() { CycleAnomalySpecs = map[string]CycleAnomalySpecType{ "G0": fromRels(core.WW), "G1c": fromFirstRelAndRest(core.WR, core.WW, core.WR), "G-single": fromFirstRelAndRest(core.RW, core.WW, core.WR), "G-nonadjacent": fromRelsAndWith(nonadjacentRW, core.WW, core.WR, core.RW), "G2-item": fromFirstRelAndRestWithFilter(buildFilterExByType("G2-item"), core.RW, core.WR, core.RW, core.WW), "G0-process": fromRelsWithFilter(buildFilterExByType("G0-process"), core.WW, core.Process), "G1c-process": fromFirstRelAndRestWithFilter(buildFilterExByType("G1c-process"), core.WR, core.WW, core.WR, core.Process), "G-single-process": fromFirstRelAndRestWithFilter(buildFilterExByType("G-single-process"), core.RW, core.WW, core.WR, core.Process), "G2-item-process": fromFirstRelAndRestWithFilter(buildFilterExByType("G2-item-process"), core.RW, core.WW, core.WR, core.RW, core.Process), // realtime "G0-realtime": fromRelsWithFilter(buildFilterExByType("G0-realtime"), core.WW, core.Realtime), "G1c-realtime": fromFirstRelAndRestWithFilter(buildFilterExByType("G1c-realtime"), core.WR, core.WW, core.WR, core.Realtime), "G-single-realtime": fromFirstRelAndRestWithFilter(buildFilterExByType("G-single-realtime"), core.RW, core.WW, core.WR, core.Realtime), "G2-item-realtime": fromFirstRelAndRestWithFilter(buildFilterExByType("G2-item-realtime"), core.RW, core.WW, core.WR, core.Realtime, core.RW), } CycleTypeNames = map[string]struct{}{ "G-nonadjacent-process": {}, "G-nonadjacent-realtime": {}, } for k := range CycleAnomalySpecs { CycleTypeNames[k] = struct{}{} } UnknownAnomalyTypes = map[string]struct{}{ "empty-transaction-graph": {}, "cycle-search-timeout": {}, } ProcessAnalysisTypes = map[string]struct{}{} RealtimeAnalysisTypes = map[string]struct{}{} for k := range CycleTypeNames { if strings.Contains(k, "process") { ProcessAnalysisTypes[k] = struct{}{} } if strings.Contains(k, "realtime") { RealtimeAnalysisTypes[k] = struct{}{} } } } // CycleExplainerWrapper is a ICycleExplainer, it's also a wrapper for core. type CycleExplainerWrapper struct{} // ExplainCycle ... func (c CycleExplainerWrapper) ExplainCycle(pairExplainer core.DataExplainer, circle core.Circle) core.CycleExplainerResult { ce := core.CycleExplainer{} ex := ce.ExplainCycle(pairExplainer, circle) steps := ex.Steps typeFrequencies := make(map[core.DependType]int) for _, step := range ex.Steps { t := step.Result.Type() if _, ok := typeFrequencies[t]; !ok { typeFrequencies[t] = 0 } typeFrequencies[t]++ } realtime := typeFrequencies[core.RealtimeDepend] process := typeFrequencies[core.ProcessDepend] ww := typeFrequencies[core.WWDepend] wr := typeFrequencies[core.WRDepend] rw := typeFrequencies[core.RWDepend] var rwAdj bool var lastType = steps[len(steps)-1].Result.Type() for _, step := range steps { if lastType == core.RWDepend && step.Result.Type() == core.RWDepend { rwAdj = true break } lastType = step.Result.Type() } var dataDepType string if rw == 1 { dataDepType = "G-single" } else if 1 < rw { if rwAdj { dataDepType = "G2-item" } else { dataDepType = "G-nonadjacent" } } else if 0 < wr { dataDepType = "G1c" } else if 0 < ww { dataDepType = "G0" } else { panic(fmt.Sprintf("Don't know how to classify: %+v", ex)) } var subtype string if 0 < realtime { subtype = "-realtime" } else if 0 < process { subtype = "-process" } return core.CycleExplainerResult{ Circle: ex.Circle, Steps: ex.Steps, Typ: fmt.Sprintf("%s%s", dataDepType, subtype), } } // RenderCycleExplanation ... func (c CycleExplainerWrapper) RenderCycleExplanation(explainer core.DataExplainer, cr core.CycleExplainerResult) string { exp := core.CycleExplainer{} return exp.RenderCycleExplanation(explainer, cr) } // AdditionalGraphs determines what additional graphs we'll need to consider for this analysis. func AdditionalGraphs(opts Opts) []core.Analyzer { ats := reportableAnomalyTypes(opts.ConsistencyModels, opts.Anomalies) var graphFn core.Analyzer if hasIntersection(ats, RealtimeAnalysisTypes) { graphFn = core.RealtimeGraph } else if hasIntersection(ats, ProcessAnalysisTypes) { graphFn = core.ProcessGraph } else { return opts.AdditionalGraphs } return append(opts.AdditionalGraphs, graphFn) } // Anomalies worth reporting on, even if they don't cause the test to fail. func reportableAnomalyTypes(cm []core.ConsistencyModelName, anomalies []string) map[string]struct{} { dest := prohibitedAnomalyTypes(cm, anomalies) union(dest, UnknownAnomalyTypes) return dest } func prohibitedAnomalyTypes(cm []core.ConsistencyModelName, anomalies []string) map[string]struct{} { if cm == nil { cm = append(cm, "strict-serializable") } a1 := core.AllAnomaliesImplying(anomalies) a2 := core.AnomaliesProhibitedBy(cm) dest := compactAnomalies(a1...) union(dest, compactAnomalies(a2...)) return dest } func compactAnomalies(anomalies ...string) map[string]struct{} { ret := map[string]struct{}{} for _, v := range anomalies { ret[v] = struct{}{} } return ret } func union(dest map[string]struct{}, unionSrc map[string]struct{}) { for k := range unionSrc { dest[k] = struct{}{} } } func hasIntersection(m1, m2 map[string]struct{}) bool { for k := range m2 { _, e := m1[k] if e { return true } } return false }
pkg/elle/txn/cycle.go
0.652574
0.42656
cycle.go
starcoder
package main import ( "fmt" "math" "gonum.org/v1/plot" "gonum.org/v1/plot/plotter" "gonum.org/v1/plot/plotutil" "gonum.org/v1/plot/vg" ) const G = 6.674e-11 type Satellite struct { //Each satellite variable represents one particle Mass, radius float64 Coords, Force, Velocity Vector Name string } type System struct { //only one System struct should exist as this contains information about entire system TotalMass float64 MassCentre Vector Satellites []Satellite } type Vector struct { X, Y float64 } func (ps *System) Initialise() { //calculates system data at the start of each iteration ps.CentreOfMass() } func (ps *System) CentreOfMass() { xmass := 0.0 ymass := 0.0 for i := 0; i < len(ps.Satellites); i++ { ps.TotalMass = ps.TotalMass + ps.Satellites[i].Mass xmass = xmass + ps.Satellites[i].Mass*ps.Satellites[i].Coords.X ymass = ymass + ps.Satellites[i].Mass*ps.Satellites[i].Coords.Y } ps.MassCentre = Vector{X: xmass / ps.TotalMass, Y: ymass / ps.TotalMass} } func (vec Vector) Magnitude() float64{ return math.Sqrt(math.Pow(vec.X,2)+math.Pow(vec.Y,2)) } func RelUnitVector(vec1 Vector, vec2 Vector) Vector{ rel := Vector{X: vec2.X-vec1.X, Y: vec2.Y - vec1.Y} mag := rel.Magnitude() return Vector{X:rel.X/mag, Y:rel.Y/mag} } func (ps *System) GravitationalForces(){ for i, el := range ps.Satellites{ el.Force = Vector{X:0, Y:0} for j,obj := range ps.Satellites{ if (i != j){ force := (G*el.Mass*obj.Mass)/(math.Pow((obj.Coords.X-el.Coords.X),2)+math.Pow((obj.Coords.Y-el.Coords.Y),2)) rel := RelUnitVector(el.Coords,obj.Coords) el.Force.X += force*rel.X el.Force.Y += force*rel.Y ps.Satellites[i] = el } } } } func (ps *System) Accelerate(){ for i,el := range ps.Satellites{ el.Velocity.X += el.Force.X/el.Mass el.Velocity.Y += el.Force.Y/el.Mass ps.Satellites[i] = el } } func (ps *System) Move(){ for i,el := range ps.Satellites{ el.Coords.X += el.Velocity.X el.Coords.Y += el.Velocity.Y ps.Satellites[i] = el } } func (ps System) TotalMomentum() Vector{ momentum := Vector{X:0,Y:0} for _,el := range ps.Satellites{ momentum.X += el.Velocity.X*el.Mass momentum.Y += el.Velocity.Y*el.Mass } return momentum } func (ps *System) Update(){ ps.GravitationalForces() ps.Accelerate() ps.Move() fmt.Println(ps.TotalMomentum(), ps.Satellites[1].Velocity, ps.Satellites[1].Force) } func (ps System) Plot() { points := make([]plotter.XYs, len(ps.Satellites)) for i := range points{ points[i] = make(plotter.XYs, 500000) } pl, err := plot.New() if err != nil { panic(err) } pl.X.Min = 0 pl.Y.Min = 0 pl.X.Max = 0 pl.Y.Max = 0 for i := 0; float64(i) < 500000; i++ { ps.Update() for j,el := range ps.Satellites{ points[j][i].X = el.Coords.X points[j][i].Y = el.Coords.Y if (points[j][i].X > pl.X.Max){ pl.X.Max = 1.2*points[j][i].X } if (points[j][i].X < pl.X.Min){ pl.X.Min = 1.2*points[j][i].X } if (points[j][i].Y > pl.Y.Max){ pl.Y.Max = 1.2*points[j][i].Y } if (points[j][i].Y < pl.Y.Min){ pl.Y.Min = 1.2*points[j][i].Y } } } pl.Title.Text = "Satellites and shit" pl.X.Label.Text = "X" pl.Y.Label.Text = "Y" for i,el := range ps.Satellites{ err = plotutil.AddLinePoints(pl, el.Name, points[i]) if err != nil { panic(err) } } pl.Add(plotter.NewGrid()) if err := pl.Save(20*vg.Inch, 20*vg.Inch, "points.png"); err != nil { panic(err) } } func main() { sat1 := Satellite{Mass: 1.9891e+30, Coords: Vector{X: -149.6e+9, Y: 0}, Velocity: Vector{X:0, Y:0}, Name: "Sun"} //sun sat2 := Satellite{Mass: 5.972e+24, Coords: Vector{X: 0, Y: 0}, Velocity: Vector{X:0,Y:30000}, Name: "Earth"} //earth sat3 := Satellite{Mass:7.3476731e+22, Coords: Vector{X:384403000, Y:0}, Velocity: Vector{X:0,Y:31000}, Name: "Moon"} //moon sys := System{Satellites: []Satellite{sat1, sat2, sat3}} sys.Plot() }
Orbits/Orbits.go
0.609524
0.526099
Orbits.go
starcoder
Package flexconfig provides a uniform interface to retrieve configuration properties, independent of how or where the properties are specified. Files, environment variables, command line arguments, and a configuration store can be used alone or in combination with each other to define the configuration for the application. An application can retrieve and set property values using a key with a canonical form. The canonical form is all lowercase with hierarchical fields separated by dots. For example, animal.bear.polar.habitat Initialization of a configuration is done by an application near the beginning of its main function by calling NewFlexibleConfiguration. The application can specify several parameters about how and where configuration properties will be obtained from. Configuration sources include (in priority order, lowest to highest): - directories on the local file system - environment variables - command line arguments - configuration store The static configuration sources (not including the configuration store) are read from lowest priority to highest priority, creating configuration properties from data found in the sources. If the same canonical property key is set in a data source read later, its value will override a value from a data source read earlier. Configuration directory names are derived from the ApplicationName in the ConfigurationParameters. A non-empty ApplicationName indicates that file-based properties will be searched for. Multiple directories are searched as described under ConfigurationParameters. The subset of files read in these directories is controlled by AcceptedFileSuffixes, where the suffix ".conf" is used if none are specified. The contents of the files may have formats that include JSON, YAML, and INI. Even if the application has been compiled with a value for ApplicationName, it is possible to override the behavior of searching for configuration files and specify a single configuration file through the use of a special command line argument or a special environment variable. The command line argument --flexconfig.configuration.file.location specifies the file from which to read the static configuration and stops the search for files in the standard configuration locations. The environment variable FLEXCONFIG_CONFIGURATION_FILE_LOCATION can be used instead to specifyy the location of the single configuration file. If both the command line argument and the environment variable are set, the value of the command line argument will be used. Hierarchical properties (multiple fields separated by dots) are defined by parsing JSON and YAML files. Arrays defined in these files result in property names that include fields consisting of digits. For example, the YAML file: myapp: plugins: - name: foo loglevel: debug - name: bar server: address: 192.168.1.1 will result in the following properties in the configuration: myapp.plugins.0.name myapp.plugins.0.loglevel myapp.plugins.1.name myapp.plugins.1.server.address Environment variables will be searched if EnvironmentVariablePrefixes includes non-empty members. The members specify prefixes for environment variable names. For instance, specifying a prefix of "SUN" will match both SUNSHINE_DAILY SUN_MICROSYSTEMS whereas, specifying "SUN_" will only match SUN_MICROSYSTEMS Environment variable names are converted into the canonical form before storing in the configuration. Command line arguments are checked for property definitions without the application needing to manage arguments beyond calling NewFlexibleConfiguration. Any argument beginning with a double dash (--) and being all lowercase is used to create configuration properties. Arguments are checked up to the point where all arguments have been read, or an argument consisting entirely of "--" is found. The argument name matches the canonical form for names while the value can be anything. For example, given the following commandline: hello -v --smiley.face=true -n --happy.day -- --dont.check.sig=true "Hi!" will result in the following properties being defined in the configuration: smiley.face: true Note, the '=' separating name and value is required with no intermediate spaces. Single or double quotes can enclose the value of a property. Accessing the configuration to obtain property values will consult the configuration store first, if it has been configured. If this results in an error or an empty value, the in-memory configuration read from files, env vars, and the command line, is consulted. */ package flexconfig
doc.go
0.838217
0.794704
doc.go
starcoder
package chesseract import ( "fmt" ) func init() { RegisterRuleSet("Chesseract", func() RuleSet { return Chesseract{} }) } // A position4D represents a position on the hyper-board type position4D [4]int func (p position4D) String() string { return fmt.Sprintf("%c%d%c%d", 'a'+rune(p[0]), p[1]+1, 'm'+rune(p[2]), p[3]+1) } func (p position4D) Equals(q Position) bool { if q0, ok := q.(position4D); ok { return p == q0 } return false } func (p position4D) CellColour() Colour { if (p[0]+p[1]+p[2]+p[3])%2 == 0 { return BLACK } else { return WHITE } } func (p position4D) WorldPosition() (x, y, z float32) { y = (float32(p[2]) - 1.5) * 3.6 x = float32(p[0]) - 2.5 z = (float32(p[1]) - 2.5) + 9*(float32(p[3])-2.5) return } type Chesseract struct{} func (Chesseract) String() string { return "Chesseract" } func (Chesseract) PlayerColours() []Colour { return []Colour{WHITE, BLACK} } func (Chesseract) DefaultBoard() Board { return Board{ Pieces: []Piece{ // White pieces {KING, WHITE, position4D{3, 0, 2, 0}}, {QUEEN, WHITE, position4D{2, 0, 2, 0}}, {BISHOP, WHITE, position4D{1, 0, 2, 0}}, {BISHOP, WHITE, position4D{2, 1, 2, 0}}, {BISHOP, WHITE, position4D{3, 1, 2, 0}}, {BISHOP, WHITE, position4D{4, 0, 2, 0}}, {BISHOP, WHITE, position4D{2, 0, 2, 1}}, {BISHOP, WHITE, position4D{3, 0, 2, 1}}, {KNIGHT, WHITE, position4D{2, 0, 1, 0}}, {KNIGHT, WHITE, position4D{3, 0, 1, 0}}, {KNIGHT, WHITE, position4D{2, 0, 3, 0}}, {KNIGHT, WHITE, position4D{3, 0, 3, 0}}, {ROOK, WHITE, position4D{2, 0, 0, 0}}, {ROOK, WHITE, position4D{3, 0, 0, 0}}, {ROOK, WHITE, position4D{2, 0, 4, 0}}, {ROOK, WHITE, position4D{3, 0, 4, 0}}, {PAWN, WHITE, position4D{0, 0, 2, 0}}, {PAWN, WHITE, position4D{1, 1, 2, 0}}, {PAWN, WHITE, position4D{2, 2, 2, 0}}, {PAWN, WHITE, position4D{3, 2, 2, 0}}, {PAWN, WHITE, position4D{4, 1, 2, 0}}, {PAWN, WHITE, position4D{5, 0, 2, 0}}, {PAWN, WHITE, position4D{1, 0, 1, 0}}, {PAWN, WHITE, position4D{2, 1, 1, 0}}, {PAWN, WHITE, position4D{3, 1, 1, 0}}, {PAWN, WHITE, position4D{4, 0, 1, 0}}, {PAWN, WHITE, position4D{1, 0, 0, 0}}, {PAWN, WHITE, position4D{2, 1, 0, 0}}, {PAWN, WHITE, position4D{3, 1, 0, 0}}, {PAWN, WHITE, position4D{4, 0, 0, 0}}, {PAWN, WHITE, position4D{1, 0, 3, 0}}, {PAWN, WHITE, position4D{2, 1, 3, 0}}, {PAWN, WHITE, position4D{3, 1, 3, 0}}, {PAWN, WHITE, position4D{4, 0, 3, 0}}, {PAWN, WHITE, position4D{1, 0, 4, 0}}, {PAWN, WHITE, position4D{2, 1, 4, 0}}, {PAWN, WHITE, position4D{3, 1, 4, 0}}, {PAWN, WHITE, position4D{4, 0, 4, 0}}, {PAWN, WHITE, position4D{2, 0, 5, 0}}, {PAWN, WHITE, position4D{3, 0, 5, 0}}, {PAWN, WHITE, position4D{1, 0, 2, 1}}, {PAWN, WHITE, position4D{2, 1, 2, 1}}, {PAWN, WHITE, position4D{3, 1, 2, 1}}, {PAWN, WHITE, position4D{4, 0, 2, 1}}, {PAWN, WHITE, position4D{2, 0, 2, 2}}, {PAWN, WHITE, position4D{3, 0, 2, 2}}, {PAWN, WHITE, position4D{2, 0, 0, 1}}, {PAWN, WHITE, position4D{3, 0, 0, 1}}, {PAWN, WHITE, position4D{2, 0, 1, 1}}, {PAWN, WHITE, position4D{3, 0, 1, 1}}, {PAWN, WHITE, position4D{2, 0, 3, 1}}, {PAWN, WHITE, position4D{3, 0, 3, 1}}, {PAWN, WHITE, position4D{2, 0, 4, 1}}, {PAWN, WHITE, position4D{3, 0, 4, 1}}, // Black pieces {KING, BLACK, position4D{3, 5, 3, 5}}, {QUEEN, BLACK, position4D{2, 5, 3, 5}}, {BISHOP, BLACK, position4D{1, 5, 3, 5}}, {BISHOP, BLACK, position4D{2, 4, 3, 5}}, {BISHOP, BLACK, position4D{3, 4, 3, 5}}, {BISHOP, BLACK, position4D{4, 5, 3, 5}}, {BISHOP, BLACK, position4D{2, 5, 3, 4}}, {BISHOP, BLACK, position4D{3, 5, 3, 4}}, {KNIGHT, BLACK, position4D{2, 5, 4, 5}}, {KNIGHT, BLACK, position4D{3, 5, 4, 5}}, {KNIGHT, BLACK, position4D{2, 5, 2, 5}}, {KNIGHT, BLACK, position4D{3, 5, 2, 5}}, {ROOK, BLACK, position4D{2, 5, 5, 5}}, {ROOK, BLACK, position4D{3, 5, 5, 5}}, {ROOK, BLACK, position4D{2, 5, 1, 5}}, {ROOK, BLACK, position4D{3, 5, 1, 5}}, {PAWN, BLACK, position4D{0, 5, 3, 5}}, {PAWN, BLACK, position4D{1, 4, 3, 5}}, {PAWN, BLACK, position4D{2, 3, 3, 5}}, {PAWN, BLACK, position4D{3, 3, 3, 5}}, {PAWN, BLACK, position4D{4, 4, 3, 5}}, {PAWN, BLACK, position4D{5, 5, 3, 5}}, {PAWN, BLACK, position4D{1, 5, 4, 5}}, {PAWN, BLACK, position4D{2, 4, 4, 5}}, {PAWN, BLACK, position4D{3, 4, 4, 5}}, {PAWN, BLACK, position4D{4, 5, 4, 5}}, {PAWN, BLACK, position4D{1, 5, 5, 5}}, {PAWN, BLACK, position4D{2, 4, 5, 5}}, {PAWN, BLACK, position4D{3, 4, 5, 5}}, {PAWN, BLACK, position4D{4, 5, 5, 5}}, {PAWN, BLACK, position4D{1, 5, 2, 5}}, {PAWN, BLACK, position4D{2, 4, 2, 5}}, {PAWN, BLACK, position4D{3, 4, 2, 5}}, {PAWN, BLACK, position4D{4, 5, 2, 5}}, {PAWN, BLACK, position4D{1, 5, 1, 5}}, {PAWN, BLACK, position4D{2, 4, 1, 5}}, {PAWN, BLACK, position4D{3, 4, 1, 5}}, {PAWN, BLACK, position4D{4, 5, 1, 5}}, {PAWN, BLACK, position4D{2, 5, 0, 5}}, {PAWN, BLACK, position4D{3, 5, 0, 5}}, {PAWN, BLACK, position4D{1, 5, 3, 4}}, {PAWN, BLACK, position4D{2, 4, 3, 4}}, {PAWN, BLACK, position4D{3, 4, 3, 4}}, {PAWN, BLACK, position4D{4, 5, 3, 4}}, {PAWN, BLACK, position4D{2, 5, 3, 3}}, {PAWN, BLACK, position4D{3, 5, 3, 3}}, {PAWN, BLACK, position4D{2, 5, 5, 4}}, {PAWN, BLACK, position4D{3, 5, 5, 4}}, {PAWN, BLACK, position4D{2, 5, 4, 4}}, {PAWN, BLACK, position4D{3, 5, 4, 4}}, {PAWN, BLACK, position4D{2, 5, 2, 4}}, {PAWN, BLACK, position4D{3, 5, 2, 4}}, {PAWN, BLACK, position4D{2, 5, 1, 4}}, {PAWN, BLACK, position4D{3, 5, 1, 4}}, }, Turn: WHITE, } } func (Chesseract) AllPositions() []Position { rv := make([]Position, 6*6*6*6) for x := 0; x < 6; x++ { for y := 0; y < 6; y++ { for z := 0; z < 6; z++ { for w := 0; w < 6; w++ { rv[216*w+36*z+6*y+x] = position4D{x, y, z, w} } } } } return rv } func (Chesseract) ParsePosition(s string) (Position, error) { if len(s) != 4 { return invalidPosition{}, errInvalidFormat } rv := position4D{} for i, r := range s { if i == 0 { rv[i] = int(r - 'a') } else if i == 2 { rv[i] = int(r - 'm') } else { rv[i] = int(r - '1') } if rv[i] < 0 || rv[i] >= 6 { return invalidPosition{}, errInvalidFormat } } return rv, nil } func (Chesseract) CanMove(board Board, piece Piece, pos Position) bool { // TODO return false } func (rs Chesseract) ApplyMove(board Board, move Move) (Board, error) { piece, ok := board.At(move.From) if !ok { return Board{}, errIllegalMove } if !rs.CanMove(board, piece, move.To) { return Board{}, errIllegalMove } newBoard := board.movePiece(move) // TODO: pawn promotion // TODO: castling // TODO: check if this results in the player being in check. Reject with errIllegalMove if it does. if newBoard.Turn == BLACK { newBoard.Turn = WHITE } else { newBoard.Turn = BLACK } return newBoard, nil }
chesseract/hyperboard.go
0.662469
0.548008
hyperboard.go
starcoder
package onshape import ( "encoding/json" ) // BTExportTessellatedEdgesBody890 struct for BTExportTessellatedEdgesBody890 type BTExportTessellatedEdgesBody890 struct { BTExportTessellatedBody3398 BtType *string `json:"btType,omitempty"` Edges *[]BTExportTessellatedEdgesEdge1364 `json:"edges,omitempty"` } // NewBTExportTessellatedEdgesBody890 instantiates a new BTExportTessellatedEdgesBody890 object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewBTExportTessellatedEdgesBody890() *BTExportTessellatedEdgesBody890 { this := BTExportTessellatedEdgesBody890{} return &this } // NewBTExportTessellatedEdgesBody890WithDefaults instantiates a new BTExportTessellatedEdgesBody890 object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewBTExportTessellatedEdgesBody890WithDefaults() *BTExportTessellatedEdgesBody890 { this := BTExportTessellatedEdgesBody890{} return &this } // GetBtType returns the BtType field value if set, zero value otherwise. func (o *BTExportTessellatedEdgesBody890) GetBtType() string { if o == nil || o.BtType == nil { var ret string return ret } return *o.BtType } // GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTExportTessellatedEdgesBody890) GetBtTypeOk() (*string, bool) { if o == nil || o.BtType == nil { return nil, false } return o.BtType, true } // HasBtType returns a boolean if a field has been set. func (o *BTExportTessellatedEdgesBody890) HasBtType() bool { if o != nil && o.BtType != nil { return true } return false } // SetBtType gets a reference to the given string and assigns it to the BtType field. func (o *BTExportTessellatedEdgesBody890) SetBtType(v string) { o.BtType = &v } // GetEdges returns the Edges field value if set, zero value otherwise. func (o *BTExportTessellatedEdgesBody890) GetEdges() []BTExportTessellatedEdgesEdge1364 { if o == nil || o.Edges == nil { var ret []BTExportTessellatedEdgesEdge1364 return ret } return *o.Edges } // GetEdgesOk returns a tuple with the Edges field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTExportTessellatedEdgesBody890) GetEdgesOk() (*[]BTExportTessellatedEdgesEdge1364, bool) { if o == nil || o.Edges == nil { return nil, false } return o.Edges, true } // HasEdges returns a boolean if a field has been set. func (o *BTExportTessellatedEdgesBody890) HasEdges() bool { if o != nil && o.Edges != nil { return true } return false } // SetEdges gets a reference to the given []BTExportTessellatedEdgesEdge1364 and assigns it to the Edges field. func (o *BTExportTessellatedEdgesBody890) SetEdges(v []BTExportTessellatedEdgesEdge1364) { o.Edges = &v } func (o BTExportTessellatedEdgesBody890) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} serializedBTExportTessellatedBody3398, errBTExportTessellatedBody3398 := json.Marshal(o.BTExportTessellatedBody3398) if errBTExportTessellatedBody3398 != nil { return []byte{}, errBTExportTessellatedBody3398 } errBTExportTessellatedBody3398 = json.Unmarshal([]byte(serializedBTExportTessellatedBody3398), &toSerialize) if errBTExportTessellatedBody3398 != nil { return []byte{}, errBTExportTessellatedBody3398 } if o.BtType != nil { toSerialize["btType"] = o.BtType } if o.Edges != nil { toSerialize["edges"] = o.Edges } return json.Marshal(toSerialize) } type NullableBTExportTessellatedEdgesBody890 struct { value *BTExportTessellatedEdgesBody890 isSet bool } func (v NullableBTExportTessellatedEdgesBody890) Get() *BTExportTessellatedEdgesBody890 { return v.value } func (v *NullableBTExportTessellatedEdgesBody890) Set(val *BTExportTessellatedEdgesBody890) { v.value = val v.isSet = true } func (v NullableBTExportTessellatedEdgesBody890) IsSet() bool { return v.isSet } func (v *NullableBTExportTessellatedEdgesBody890) Unset() { v.value = nil v.isSet = false } func NewNullableBTExportTessellatedEdgesBody890(val *BTExportTessellatedEdgesBody890) *NullableBTExportTessellatedEdgesBody890 { return &NullableBTExportTessellatedEdgesBody890{value: val, isSet: true} } func (v NullableBTExportTessellatedEdgesBody890) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableBTExportTessellatedEdgesBody890) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
onshape/model_bt_export_tessellated_edges_body_890.go
0.643441
0.583559
model_bt_export_tessellated_edges_body_890.go
starcoder
package vida import ( "fmt" "math/rand" "strings" "time" "unsafe" ) // Record is a non-ordered structured {property:value} data type. type Record struct { Properties Namespace } // Interface SelectorOperator. func (record Record) SelectorGet(field string) (Value, error) { if value, ok := record.Properties[field]; ok { return value, nil } return nil, NameNotDefinedInCompoundDataType(field, record) } func (record Record) SelectorSet(property string, value Value) error { record.Properties[property] = value return nil } // Interface Value func (record Record) TypeName() string { return "Record" } func (record Record) Description() string { var builder strings.Builder builder.WriteString("{ ") for key, value := range record.Properties { builder.WriteString(fmt.Sprintf("%v:%v ", key, value.Description())) } builder.WriteString("}") return builder.String() } func (record Record) Equals(other Value) bool { if value, ok := other.(Record); ok { if unsafe.Pointer(&record) == unsafe.Pointer(&value) { return true } if len(record.Properties) != len(value.Properties) { return false } for k, v := range record.Properties { if val, ok := value.Properties[k]; !(ok && v.Equals(val)) { return false } } return true } return false } func (record Record) BinaryOp(op byte, rhs Value) (Value, error) { switch rhs := rhs.(type) { case Record: switch op { case TKAdd: record := Record{Properties: make(Namespace)} for key, value := range rhs.Properties { record.Properties[key] = value } for key, value := range record.Properties { record.Properties[key] = value } return record, nil default: return nil, TypeErrorInBinaryOperator(KindDescription[op], record, rhs) } default: return nil, TypeErrorInBinaryOperator(KindDescription[op], record, rhs) } } func (record Record) PrefixOp(op byte) (Value, error) { return nil, TypeErrorInPrefixOperator(KindDescription[op], record) } func (record Record) IsIterable() bool { return true } func (record Record) MakeIterator() Iterator { return NewRecordIterator(record, false) } func (record Record) IsHashable() bool { return false } func (record Record) MakeHashKey() HashKey { return HashKey{} } func (record Record) IsValueSemantics() bool { return false } func (record Record) HasMethods() bool { return true } func (record Record) GetMethod(name string) (Value, bool, error) { if method, ok := record.Properties[name]; ok { return method, true, nil } return nil, false, MethodNotDefined(name, record) } func (record Record) Clone() Value { rec, _ := recordClone(record) return rec } // Interface Subscript Operator. func (record Record) SubscriptGet(index Value) (Value, error) { switch key := index.(type) { case *String: if value, ok := record.Properties[key.Value]; ok { return value, nil } else { return nil, NameNotDefinedInCompoundDataType(key.Value, record) } default: return nil, RecordPropertyError(key) } } func (record Record) SubscriptSet(index, value Value) error { property := index.(*String).Value record.Properties[property] = value return nil } func (fiber *Fiber) loadRecord() { modName := "Record" module := GModule{Name: modName, Namespace: make(Namespace)} module.Namespace["isEmpty"] = GFunction{Name: "isEmpty", Value: recordIsEmpty} module.Namespace["length"] = GFunction{Name: "length", Value: recordLength} module.Namespace["clear"] = GFunction{Name: "clear", Value: recordClear} module.Namespace["randomElement"] = GFunction{Name: "randomElement", Value: recordRandomElement} module.Namespace["remove"] = GFunction{Name: "remove", Value: recordRemoveProperty} module.Namespace["addPair"] = GFunction{Name: "addPair", Value: recordAddProperty} module.Namespace["keys"] = GFunction{Name: "keys", Value: recordGetKeys} module.Namespace["clone"] = GFunction{Name: "clone", Value: recordClone} module.Namespace["merged"] = GFunction{Name: "merged", Value: recordMerged} module.Namespace["merge"] = GFunction{Name: "merge", Value: recordMerge} module.Namespace["difference"] = GFunction{Name: "difference", Value: recordDifference} module.Namespace["contains"] = GFunction{Name: "contains", Value: recordContains} module.Namespace["makeIterator"] = GFunction{Name: "makeIterator", Value: recordMakeIterator} module.Namespace["prettyPrint"] = GFunction{Name: "prettyPrint", Value: recordPrettyPrint} fiber.module.namespace[modName] = module } func recordIsEmpty(args ...Value) (Value, error) { if len(args) == 1 { if record, ok := args[0].(Record); ok { return Bool(len(record.Properties) == 0), nil } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 0, len(args)) } func recordLength(args ...Value) (Value, error) { if len(args) == 1 { if record, ok := args[0].(Record); ok { return Int(len(record.Properties)), nil } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 0, len(args)) } func recordClear(args ...Value) (Value, error) { if len(args) == 1 { if record, ok := args[0].(Record); ok { for k := range record.Properties { delete(record.Properties, k) } return NilValue, nil } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 0, len(args)) } func recordRandomElement(args ...Value) (Value, error) { if len(args) == 1 { if record, ok := args[0].(Record); ok { length := int64(len(record.Properties)) if length == 0 { return NilValue, nil } rand.Seed(time.Now().UnixNano()) randomIndex := rand.Int63n(length) index := int64(0) for key := range record.Properties { if randomIndex == index { return record.Properties[key], nil } index++ } return NilValue, nil } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 0, len(args)) } func recordAddProperty(args ...Value) (Value, error) { if len(args) == 3 { if record, okRecord := args[0].(Record); okRecord { if attribute, okAttribute := args[1].(*String); okAttribute { record.Properties[attribute.Value] = args[2] return args[2], nil } return nil, fmt.Errorf("expected a string as second argument") } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 3, len(args)) } func recordRemoveProperty(args ...Value) (Value, error) { if len(args) == 2 { if record, okRecord := args[0].(Record); okRecord { if attribute, okAttribute := args[1].(*String); okAttribute { delete(record.Properties, attribute.Value) return NilValue, nil } return nil, fmt.Errorf("expected a string as second argument") } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 2, len(args)) } func recordGetKeys(args ...Value) (Value, error) { if len(args) == 1 { if record, ok := args[0].(Record); ok { keys := make([]Value, 0, len(record.Properties)) for k := range record.Properties { keys = append(keys, &String{Value: k}) } return &List{Elements: keys}, nil } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v argument and got %v", 1, len(args)) } func recordClone(args ...Value) (Value, error) { if len(args) == 1 { if record, ok := args[0].(Record); ok { properties := make(Namespace) for k, v := range record.Properties { properties[k] = v.Clone() } return Record{Properties: properties}, nil } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v argument and got %v", 1, len(args)) } func recordMerged(args ...Value) (Value, error) { if len(args) == 2 { if lhs, ok := args[0].(Record); ok { if rhs, ok := args[1].(Record); ok { newMap := make(Namespace) for key, value := range rhs.Properties { newMap[key] = value } for key, value := range lhs.Properties { newMap[key] = value } return Record{Properties: newMap}, nil } return nil, fmt.Errorf("expected a record as second argument") } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 2, len(args)) } func recordMerge(args ...Value) (Value, error) { if len(args) == 2 { if dest, ok := args[0].(Record); ok { if source, ok := args[1].(Record); ok { for key, value := range source.Properties { if _, exists := dest.Properties[key]; !exists { dest.Properties[key] = value } } return NilValue, nil } return nil, fmt.Errorf("expected a record as second argument") } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 2, len(args)) } func recordDifference(args ...Value) (Value, error) { if len(args) == 2 { if lhs, ok := args[0].(Record); ok { if rhs, ok := args[1].(Record); ok { newMap := make(Namespace) for key, value := range lhs.Properties { if _, ok := rhs.Properties[key]; !ok { newMap[key] = value } } return Record{Properties: newMap}, nil } return nil, fmt.Errorf("expected a record as second argument") } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 2, len(args)) } func recordContains(args ...Value) (Value, error) { if len(args) == 2 { if record, okRecord := args[0].(Record); okRecord { if attribute, okAttribute := args[1].(*String); okAttribute { var contains bool _, contains = record.Properties[attribute.Value] return Bool(contains), nil } return nil, fmt.Errorf("expected a string as second argument") } return nil, fmt.Errorf("expected a record as first argument") } return nil, fmt.Errorf("expected %v arguments and got %v", 2, len(args)) } func recordPrettyPrint(args ...Value) (Value, error) { if len(args) == 1 { if record, ok := args[0].(Record); ok { prettyRecord(record, 1) return NilValue, nil } return nil, fmt.Errorf("expected a record as argument") } return nil, fmt.Errorf("expected %v argument and got %v", 1, len(args)) } func recordMakeIterator(args ...Value) (Value, error) { if len(args) == 1 { if record, ok := args[0].(Record); ok { return NewRecordIterator(record, false), nil } return nil, fmt.Errorf("expected a record as argument") } return nil, fmt.Errorf("expected %v argument and got %v", 1, len(args)) } func spaces(level int) string { return strings.Repeat(" ", level) } func prettyRecord(record Record, level int) { fmt.Printf("{\n") for key, value := range record.Properties { fmt.Printf("%v%v : ", spaces(level+1), key) switch v := value.(type) { case *String: fmt.Printf("\"%v\",\n", v) case *List: prettyList(v, level+2) case Map: prettyMap(v, level+2) case Record: prettyRecord(v, level+2) default: fmt.Printf("%v,\n", v) } } fmt.Printf("%v},\n", spaces(level-1)) } func prettyList(xs *List, level int) { fmt.Printf("\n%v[\n", spaces(level-1)) for _, value := range xs.Elements { fmt.Printf("%v", spaces(level+1)) switch v := value.(type) { case *String: fmt.Printf("\"%v\",\n", v) case *List: prettyList(v, level+2) case Map: prettyMap(v, level+2) case Record: prettyRecord(v, level+2) default: fmt.Printf("%v,\n", v) } } fmt.Printf("%v],\n", spaces(level-1)) } func prettyMap(m Map, level int) { fmt.Printf("\n%v[\n", spaces(level-1)) for _, value := range m { fmt.Printf("%v%v : ", spaces(level+1), value.key) switch v := (value.value).(type) { case *String: fmt.Printf("\"%v\",\n", v) case *List: prettyList(v, level+2) case Map: prettyMap(v, level+2) case Record: prettyRecord(v, level+2) default: fmt.Printf("%v,\n", v) } } fmt.Printf("%v],\n", spaces(level-1)) }
vida/record.go
0.626696
0.468304
record.go
starcoder
package quantile import ( "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/aggregators" ) type Quantile struct { Quantiles []float64 `toml:"quantiles"` Compression float64 `toml:"compression"` AlgorithmType string `toml:"algorithm"` newAlgorithm newAlgorithmFunc cache map[uint64]aggregate suffixes []string } type aggregate struct { name string fields map[string]algorithm tags map[string]string } type newAlgorithmFunc func(compression float64) (algorithm, error) var sampleConfig = ` ## General Aggregator Arguments: ## The period on which to flush & clear the aggregator. period = "30s" ## If true, the original metric will be dropped by the ## aggregator and will not get sent to the output plugins. drop_original = false ## Quantiles to output in the range [0,1] # quantiles = [0.25, 0.5, 0.75] ## Type of aggregation algorithm ## Supported are: ## "t-digest" -- approximation using centroids, can cope with large number of samples ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) ## NOTE: Do not use "exact" algorithms with large number of samples ## to not impair performance or memory consumption! # algorithm = "t-digest" ## Compression for approximation (t-digest). The value needs to be ## greater or equal to 1.0. Smaller values will result in more ## performance but less accuracy. # compression = 100.0 ` func (q *Quantile) SampleConfig() string { return sampleConfig } func (q *Quantile) Description() string { return "Keep the aggregate quantiles of each metric passing through." } func (q *Quantile) Add(in telegraf.Metric) { id := in.HashID() if cached, ok := q.cache[id]; ok { fields := in.Fields() for k, algo := range cached.fields { if field, ok := fields[k]; ok { if v, isconvertible := convert(field); isconvertible { algo.Add(v) } } } return } // New metric, setup cache and init algorithm a := aggregate{ name: in.Name(), tags: in.Tags(), fields: make(map[string]algorithm), } for k, field := range in.Fields() { if v, isconvertible := convert(field); isconvertible { // This should never error out as we tested it in Init() algo, _ := q.newAlgorithm(q.Compression) algo.Add(v) a.fields[k] = algo } } q.cache[id] = a } func (q *Quantile) Push(acc telegraf.Accumulator) { for _, aggregate := range q.cache { fields := map[string]interface{}{} for k, algo := range aggregate.fields { for i, qtl := range q.Quantiles { fields[k+q.suffixes[i]] = algo.Quantile(qtl) } } acc.AddFields(aggregate.name, fields, aggregate.tags) } } func (q *Quantile) Reset() { q.cache = make(map[uint64]aggregate) } func convert(in interface{}) (float64, bool) { switch v := in.(type) { case float64: return v, true case int64: return float64(v), true case uint64: return float64(v), true default: return 0, false } } func (q *Quantile) Init() error { switch q.AlgorithmType { case "t-digest", "": q.newAlgorithm = newTDigest case "exact R7": q.newAlgorithm = newExactR7 case "exact R8": q.newAlgorithm = newExactR8 default: return fmt.Errorf("unknown algorithm type %q", q.AlgorithmType) } if _, err := q.newAlgorithm(q.Compression); err != nil { return fmt.Errorf("cannot create %q algorithm: %v", q.AlgorithmType, err) } if len(q.Quantiles) == 0 { q.Quantiles = []float64{0.25, 0.5, 0.75} } duplicates := make(map[float64]bool) q.suffixes = make([]string, len(q.Quantiles)) for i, qtl := range q.Quantiles { if qtl < 0.0 || qtl > 1.0 { return fmt.Errorf("quantile %v out of range", qtl) } if _, found := duplicates[qtl]; found { return fmt.Errorf("duplicate quantile %v", qtl) } duplicates[qtl] = true q.suffixes[i] = fmt.Sprintf("_%03d", int(qtl*100.0)) } q.Reset() return nil } func init() { aggregators.Add("quantile", func() telegraf.Aggregator { return &Quantile{Compression: 100} }) }
plugins/aggregators/quantile/quantile.go
0.813053
0.476458
quantile.go
starcoder
package device // A Weatherlink device is simulated by guessing what commands were // requested based on the packet sizes. It's not perfect but is a // convenient way to allow low level protocol testing. import ( "io" "math/rand" "time" "github.com/ebarkie/weatherlink/data" ) // Sim represents a simulted Weatherlink device. type Sim struct { l data.Loop // Current loop packet state nextLoopType int // Loop type to send next (so they are interleaved) // lastWrite and readsSinceWrite are used by ReadFull() to determine // what's expected to be read. This is simple and avoids implementing // a state machine. lastWrite []byte readsSinceWrite int } // Dial initializes the state of a simulated Weatherlink device. func (s *Sim) Dial(addr string) error { // Starting loop values which will pass typical QC processes. s.l.Bar.Altimeter = 29.0 s.l.Bar.SeaLevel = 29.0 s.l.Bar.Station = 29.0 s.l.OutHumidity = 50 s.l.OutTemp = 65.0 s.l.Wind.Cur.Speed = 3 return nil } // Close closes the simulated Weatherlink device. func (s *Sim) Close() error { s.l = data.Loop{} s.nextLoopType = 0 return nil } // Flush flushes the input buffers of the simulated Weatherlink device. func (Sim) Flush() error { return nil } // Read reads up to the size of the provided byte buffer from the // simulated Weatherlink device. func (Sim) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF } // ReadFull reads the full size of the provided byte buffer from the // simulted Weatherlink device. func (s *Sim) ReadFull(b []byte) (n int, err error) { const ack = 0x06 // Acknowledge s.readsSinceWrite++ var p []byte switch { case len(b) == 1: // Command ack p = []byte{ack} case len(b) == 6 && s.readsSinceWrite < 2: // Command OK p = []byte("\n\rOK\n\r") case string(s.lastWrite) == "GETTIME\n": ct := data.ConsTime(time.Now()) p, err = ct.MarshalBinary() case string(s.lastWrite) == "NVER\n": fv := data.FirmVer("1.73") p, err = fv.MarshalText() case string(s.lastWrite) == "TEST\n": p = []byte("\n\rTEST\n\r") case string(s.lastWrite) == "VER\n": ft := data.FirmTime(time.Date(2002, time.April, 24, 0, 0, 0, 0, time.UTC)) p, err = ft.MarshalText() case len(b) == 99: // LPS 3 x // Interleave loop types. s.l.LoopType = s.nextLoopType + 1 s.nextLoopType = (s.nextLoopType + 1) % 2 // Make observation values wander around like they would on a // real station. s.l.Bar.Altimeter = wander(s.l.Bar.Altimeter, 0.01) s.l.Bar.SeaLevel = wander(s.l.Bar.SeaLevel, 0.01) s.l.Bar.Station = wander(s.l.Bar.Station, 0.01) s.l.OutHumidity = int(wander(float64(s.l.OutHumidity), 1)) s.l.OutTemp = wander(s.l.OutTemp, 0.5) s.l.Wind.Cur.Speed = int(wander(float64(s.l.Wind.Cur.Speed), 1)) s.l.LoopType = s.nextLoopType + 1 s.nextLoopType = (s.nextLoopType + 1) % 2 p, err = s.l.MarshalBinary() // Create 2s delay between packets. time.Sleep(2 * time.Second) default: return 0, io.ErrUnexpectedEOF } n = copy(b, p) return } // Write simulates a write of the byte buffer. func (s *Sim) Write(b []byte) (int, error) { s.lastWrite = b s.readsSinceWrite = 0 return len(b), nil } // wander takes a value and randomly adds +/- step or zero. func wander(v, step float64) float64 { rand.Seed(int64(time.Now().Nanosecond())) return v + float64(rand.Intn(3)-1)*step }
internal/device/sim.go
0.695441
0.51501
sim.go
starcoder
package vm import ( "crypto/sha256" "github.com/Dipper-Protocol/x/vm/common" math2 "github.com/Dipper-Protocol/x/vm/common/math" "math/big" "golang.org/x/crypto/ripemd160" ethsecp256k1 "github.com/ethereum/go-ethereum/crypto/secp256k1" sdk "github.com/Dipper-Protocol/types" "github.com/tendermint/tendermint/crypto" ) // PrecompiledContract is the basic interface for native Go contracts. The implementation // requires a deterministic gas count based on the input size of the Run method of the // contract. type PrecompiledContract interface { RequiredGas(input []byte) uint64 // RequiredPrice calculates the contract gas use Run(input []byte) ([]byte, error) // Run runs the precompiled contract } // PrecompiledContracts contains the default set of pre-compiled contracts used in the Istanbul release. var PrecompiledContracts = map[string]PrecompiledContract{ (sdk.BytesToAddress([]byte{1})).String(): &ecrecover{}, (sdk.BytesToAddress([]byte{2})).String(): &sha256hash{}, (sdk.BytesToAddress([]byte{3})).String(): &ripemd160hash{}, (sdk.BytesToAddress([]byte{4})).String(): &dataCopy{}, (sdk.BytesToAddress([]byte{5})).String(): &bigModExp{}, //(sdk.BytesToAddress([]byte{6})).String(): &bn256Add{}, //(sdk.BytesToAddress([]byte{7})).String(): &bn256ScalarMul{}, //(sdk.BytesToAddress([]byte{8})).String(): &bn256Pairing{}, //(sdk.BytesToAddress([]byte{9})).String(): &blake2F{}, } var ( // true32Byte is returned if the bn256 pairing check succeeds. true32Byte = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} // false32Byte is returned if the bn256 pairing check fails. false32Byte = make([]byte, 32) ) // RunPrecompiledContract runs and evaluates the output of a precompiled contract. func RunPrecompiledContract(p PrecompiledContract, input []byte, contract *Contract) (ret []byte, err error) { gas := p.RequiredGas(input) if contract.UseGas(gas) { return p.Run(input) } return nil, ErrOutOfGas } var ( secp256k1N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16) secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2)) ) // ValidateSignatureValues verifies whether the signature values are valid with // the given chain rules. The v value is assumed to be either 0 or 1. func ValidateSignatureValues(v byte, r, s *big.Int) bool { if r.Cmp(common.Big1) < 0 || s.Cmp(common.Big1) < 0 { return false } // reject upper range of s values (ECDSA malleability) // see discussion in secp256k1/libsecp256k1/include/secp256k1.h if s.Cmp(secp256k1halfN) > 0 { return false } // Frontier: allow s to be in full N range return r.Cmp(secp256k1N) < 0 && s.Cmp(secp256k1N) < 0 && (v == 0 || v == 1) } // Ecrecover returns the uncompressed public key that created the given signature. func Ecrecover(hash, sig []byte) ([]byte, error) { return ethsecp256k1.RecoverPubkey(hash, sig) } // ECRECOVER implemented as a native contract. type ecrecover struct{} func (c *ecrecover) RequiredGas(input []byte) uint64 { return EcrecoverGas } func (c *ecrecover) Run(input []byte) ([]byte, error) { const ecRecoverInputLength = 128 input = common.RightPadBytes(input, ecRecoverInputLength) // "input" is (hash, v, r, s), each 32 bytes // but for ecrecover we want (r, s, v) r := new(big.Int).SetBytes(input[64:96]) s := new(big.Int).SetBytes(input[96:128]) v := input[63] - 27 // tighter sig s values input homestead only apply to tx sigs if !allZero(input[32:64]) || !ValidateSignatureValues(v, r, s) { return nil, nil } sig := make([]byte, 65) copy(sig, input[64:128]) sig[64] = v // v needs to be at the end for libsecp256k1 pubKey, err := Ecrecover(input[:32], sig) // make sure the public key is a valid one if err != nil { return nil, nil } // the first byte of pubkey is bitcoin heritage return common.LeftPadBytes(crypto.Sha256(pubKey[1:])[12:], 32), nil } // SHA256 implemented as a native contract. type sha256hash struct{} func (c *sha256hash) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*Sha256PerWordGas + Sha256BaseGas } func (c *sha256hash) Run(input []byte) ([]byte, error) { h := sha256.Sum256(input) return h[:], nil } // RIPEMD160 implemented as a native contract. type ripemd160hash struct{} func (c *ripemd160hash) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*Ripemd160PerWordGas + Ripemd160BaseGas } func (c *ripemd160hash) Run(input []byte) ([]byte, error) { ripemd := ripemd160.New() ripemd.Write(input) return common.LeftPadBytes(ripemd.Sum(nil), 32), nil } // data copy implemented as a native contract. type dataCopy struct{} func (c *dataCopy) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*IdentityPerWordGas + IdentityBaseGas } func (c *dataCopy) Run(in []byte) ([]byte, error) { return in, nil } // bigModExp implements a native big integer exponential modular operation. type bigModExp struct{} // RequiredGas returns the gas required to execute the pre-compiled contract. func (c *bigModExp) RequiredGas(input []byte) uint64 { var ( baseLen = new(big.Int).SetBytes(getData(input, 0, 32)) expLen = new(big.Int).SetBytes(getData(input, 32, 32)) modLen = new(big.Int).SetBytes(getData(input, 64, 32)) ) if len(input) > 96 { input = input[96:] } else { input = input[:0] } // Retrieve the head 32 bytes of exp for the adjusted exponent length var expHead *big.Int if big.NewInt(int64(len(input))).Cmp(baseLen) <= 0 { expHead = new(big.Int) } else { if expLen.Cmp(common.Big32) > 0 { expHead = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), 32)) } else { expHead = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), expLen.Uint64())) } } // Calculate the adjusted exponent length var msb int if bitlen := expHead.BitLen(); bitlen > 0 { msb = bitlen - 1 } adjExpLen := new(big.Int) if expLen.Cmp(common.Big32) > 0 { adjExpLen.Sub(expLen, common.Big32) adjExpLen.Mul(common.Big8, adjExpLen) } adjExpLen.Add(adjExpLen, big.NewInt(int64(msb))) // Calculate the gas cost of the operation gas := new(big.Int).Set(math2.BigMax(modLen, baseLen)) switch { case gas.Cmp(common.Big64) <= 0: gas.Mul(gas, gas) case gas.Cmp(common.Big1024) <= 0: gas = new(big.Int).Add( new(big.Int).Div(new(big.Int).Mul(gas, gas), common.Big4), new(big.Int).Sub(new(big.Int).Mul(common.Big96, gas), common.Big3072), ) default: gas = new(big.Int).Add( new(big.Int).Div(new(big.Int).Mul(gas, gas), common.Big16), new(big.Int).Sub(new(big.Int).Mul(common.Big480, gas), common.Big199680), ) } gas.Mul(gas, math2.BigMax(adjExpLen, common.Big1)) gas.Div(gas, new(big.Int).SetUint64(ModExpQuadCoeffDiv)) if gas.BitLen() > 64 { return math2.MaxUint64 } return gas.Uint64() } func (c *bigModExp) Run(input []byte) ([]byte, error) { var ( baseLen = new(big.Int).SetBytes(getData(input, 0, 32)).Uint64() expLen = new(big.Int).SetBytes(getData(input, 32, 32)).Uint64() modLen = new(big.Int).SetBytes(getData(input, 64, 32)).Uint64() ) if len(input) > 96 { input = input[96:] } else { input = input[:0] } // Handle a special case when both the base and mod length is zero if baseLen == 0 && modLen == 0 { return []byte{}, nil } // Retrieve the operands and execute the exponentiation var ( base = new(big.Int).SetBytes(getData(input, 0, baseLen)) exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) ) if mod.BitLen() == 0 { // Modulo 0 is undefined, return zero return common.LeftPadBytes([]byte{}, int(modLen)), nil } return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil }
x/vm/contracts.go
0.648132
0.424591
contracts.go
starcoder
package vit import ( "fmt" "github.com/omniskop/vitrum/vit/script" ) // Component describes a generic vit component type Component interface { DefineProperty(name string, vitType string, expression string, position *PositionRange) error // Creates a new property. On failure it returns either a RedeclarationError or UnknownTypeError. DefineEnum(Enumeration) bool Property(name string) (Value, bool) // returns the property with the given name, and a boolean indicating whether the property exists MustProperty(name string) Value // same as Property but panics if the property doesn't exist SetProperty(name string, value interface{}, position *PositionRange) bool // sets the property with the given name to the given value and returns a boolean indicating whether the property exists ResolveVariable(name string) (interface{}, bool) // searches the scope for a variable with the given name. Returns either an expression or a component. The boolean indicates wether the variable was found. ResolveID(id string) (Component, bool) // Recursively searches the children for a component with the given id. It does not check itself, only it's children! AddChild(Component) // Adds the given component as a child and also set's their parent to this component Children() []Component // Returns all children of this component SetParent(Component) // Sets the parent of this component to the given component ID() string // Returns the id of this component String() string // Returns a short string representation of this component UpdateExpressions() (int, ErrorGroup) // Recursively reevaluate all expressions that got dirty. Returns the number of reevaluated expression (includes potential failed ones) As(*Component) bool // Returns true if this component is of the same type as the given parameter. It also changes the parameter to point to this component. RootC() *Root // returns the root of this component Finish() error // Finishes the component instantiation. Should only be called by components that embed this one. } func FinishComponent(comp Component) error { return comp.Finish() } type Instantiator interface { Instantiate() (Component, error) } type Enumeration struct { Name string Embedded bool Values map[string]int Position PositionRange } func (e Enumeration) ResolveVariable(name string) (interface{}, bool) { if value, ok := e.Values[name]; ok { return value, true } return nil, false } type AbstractComponent interface { script.VariableSource Instantiate(string, ComponentContainer) (Component, error) Name() string // Static values } // ComponentContainer holds a list of abstract components type ComponentContainer struct { Global map[string]AbstractComponent // globally defined componets Local map[string]AbstractComponent // components specific to the current document } func NewComponentContainer() ComponentContainer { return ComponentContainer{ Global: make(map[string]AbstractComponent), Local: make(map[string]AbstractComponent), } } // Returns a new ComponentContainer carrying over the global components from this one. func (c ComponentContainer) JustGlobal() ComponentContainer { return ComponentContainer{ Global: c.Global, Local: make(map[string]AbstractComponent), } } // Returns a new ComponentContainer using this local components as global ones. func (c ComponentContainer) ToGlobal() ComponentContainer { return ComponentContainer{ Global: c.Local, Local: make(map[string]AbstractComponent), } } func (c ComponentContainer) Set(name string, comp AbstractComponent) { c.Local[name] = comp } func (c ComponentContainer) Get(names string) (AbstractComponent, bool) { src, ok := c.Local[names] if !ok { src, ok = c.Global[names] } return src, ok } // ErrorGroup contains a list of multiple error and may be used whenever multiple errors may occur without the need to fail immediately. // To check if an error actually occurred use the method 'Failed'. type ErrorGroup struct { Errors []error } // Add the error to the list. If err is nil it won't be added. func (e *ErrorGroup) Add(err error) { if err != nil { e.Errors = append(e.Errors, err) } } // AddGroup adds all errors of another group to this one. It doesn't matter if the other group is empty or not. func (e *ErrorGroup) AddGroup(group ErrorGroup) { if !group.Failed() { return } e.Errors = append(e.Errors, group.Errors...) } // Failed returns true if the group contains at least one error. func (e *ErrorGroup) Failed() bool { return len(e.Errors) > 0 } // Error implements the error interface. It does not actually return any of the errors itself, but just a short information about the amount of errors. func (e ErrorGroup) Error() string { if !e.Failed() { return "no errors" } return fmt.Sprintf("group with %d errors", len(e.Errors)) } func (e ErrorGroup) Is(target error) bool { _, ok := target.(ErrorGroup) return ok } type RedeclarationError struct { PropertyName string PreviousDefinition PositionRange } func (e RedeclarationError) Error() string { return fmt.Sprintf("property %q is already declared. (Previous declaration at %s)", e.PropertyName, e.PreviousDefinition.String()) } func (e RedeclarationError) Is(target error) bool { _, ok := target.(RedeclarationError) return ok } type UnknownTypeError struct { TypeName string } func (e UnknownTypeError) Error() string { return fmt.Sprintf("unknown type '%s'", e.TypeName) } func (e UnknownTypeError) Is(target error) bool { _, ok := target.(UnknownTypeError) return ok }
vit/vit.go
0.816699
0.452113
vit.go
starcoder
package main import ( "../aoc" "fmt" "log" "strings" ) var Active = "#" var Inactive = "." type Grid3D struct { Points map[aoc.Point3D]string MinX, MaxX, MinY, MaxY, MinZ, MaxZ int Neighbors map[aoc.Point3D][]aoc.Point3D } func NewGrid() *Grid3D { var grid Grid3D grid.Points = make(map[aoc.Point3D]string) grid.Neighbors = make(map[aoc.Point3D][]aoc.Point3D) return &grid } func (g *Grid3D) GetNeighbors(p aoc.Point3D) []aoc.Point3D { n, ok := g.Neighbors[p] if !ok { // Not already in the map, so generate the neighbors n = p.Neighbors() g.Neighbors[p] = n } return n } func (g *Grid3D) CountActiveNeighbors(p aoc.Point3D) int { result := 0 for _, n := range g.GetNeighbors(p) { if g.Points[n] == Active { result += 1 } } return result } func (g *Grid3D) CountActiveCells() int { result := 0 for _, s := range g.Points { if s == Active { result++ } } return result } func loadGrid3D(filename string) (*Grid3D, error) { lines, err := aoc.ReadFileOfStrings(filename) if err != nil { return nil, err } grid := NewGrid() maxX := 0 maxY := 0 for y, line := range lines { if y > maxY { maxY = y } for x, state := range strings.Split(line, "") { point := aoc.Point3D{X: x, Y: y} grid.Points[point] = state if x > maxX { maxX = x } } } grid.MaxX = maxX grid.MaxY = maxY return grid, nil } func RunCycle(grid *Grid3D) *Grid3D { result := NewGrid() result.MinX = grid.MinX - 1 result.MaxX = grid.MaxX + 1 result.MinY = grid.MinY - 1 result.MaxY = grid.MaxY + 1 result.MinZ = grid.MinZ - 1 result.MaxZ = grid.MaxZ + 1 result.Neighbors = grid.Neighbors for x := result.MinX; x <= result.MaxX; x++ { for y := result.MinY; y <= result.MaxY; y++ { for z := result.MinZ; z <= result.MaxZ; z++ { loc := aoc.Point3D{X: x, Y: y, Z: z} //log.Printf("%v: %s", loc, grid.Points[loc]) activeNeighbors := grid.CountActiveNeighbors(loc) state := grid.Points[loc] if state == Active { if activeNeighbors == 2 || activeNeighbors == 3 { result.Points[loc] = Active } else { result.Points[loc] = Inactive } } else { if activeNeighbors == 3 { result.Points[loc] = Active } else { result.Points[loc] = Inactive } } } } } return result } func part1(filename string) (int, error) { grid, err := loadGrid3D(filename) if err != nil { return 0, err } for i := 0; i < 6; i++ { grid = RunCycle(grid) } return grid.CountActiveCells(), nil } type Grid4D struct { Points map[aoc.Point4D]string Min aoc.Point4D Max aoc.Point4D Neighbors map[aoc.Point4D][]aoc.Point4D } func NewGrid4D() *Grid4D { var grid Grid4D grid.Points = make(map[aoc.Point4D]string) grid.Neighbors = make(map[aoc.Point4D][]aoc.Point4D) return &grid } func (g *Grid4D) GetNeighbors(p aoc.Point4D) []aoc.Point4D { n, ok := g.Neighbors[p] if !ok { // Not already in the map, so generate the neighbors n = p.Neighbors() g.Neighbors[p] = n } return n } func (g *Grid4D) CountActiveNeighbors(p aoc.Point4D) int { result := 0 for _, n := range g.GetNeighbors(p) { if g.Points[n] == Active { result += 1 } } return result } func (g *Grid4D) CountActiveCells() int { result := 0 for _, s := range g.Points { if s == Active { result++ } } return result } func loadGrid4D(filename string) (*Grid4D, error) { lines, err := aoc.ReadFileOfStrings(filename) if err != nil { return nil, err } grid := NewGrid4D() maxX := 0 maxY := 0 for y, line := range lines { if y > maxY { maxY = y } for x, state := range strings.Split(line, "") { point := aoc.Point4D{X: x, Y: y} grid.Points[point] = state if x > maxX { maxX = x } } } grid.Max.X = maxX grid.Max.Y = maxY return grid, nil } func RunCycle4D(grid *Grid4D) *Grid4D { result := NewGrid4D() result.Min.X = grid.Min.X - 1 result.Max.X = grid.Max.X + 1 result.Min.Y = grid.Min.Y - 1 result.Max.Y = grid.Max.Y + 1 result.Min.Z = grid.Min.Z - 1 result.Max.Z = grid.Max.Z + 1 result.Min.W = grid.Min.W - 1 result.Max.W = grid.Max.W + 1 result.Neighbors = grid.Neighbors for x := result.Min.X; x <= result.Max.X; x++ { for y := result.Min.Y; y <= result.Max.Y; y++ { for z := result.Min.Z; z <= result.Max.Z; z++ { //log.Printf("%v: %s", loc, grid.Points[loc]) for w := result.Min.W; w <= result.Max.W; w++ { loc := aoc.Point4D{X: x, Y: y, Z: z, W: w} activeNeighbors := grid.CountActiveNeighbors(loc) state := grid.Points[loc] if state == Active { if activeNeighbors == 2 || activeNeighbors == 3 { result.Points[loc] = Active } else { result.Points[loc] = Inactive } } else { if activeNeighbors == 3 { result.Points[loc] = Active } else { result.Points[loc] = Inactive } } } } } } return result } func part2(filename string) (int, error) { grid, err := loadGrid4D(filename) if err != nil { return 0, err } for i := 0; i < 6; i++ { grid = RunCycle4D(grid) //active := grid.CountActiveCells() //log.Printf("Cycle %d: %d active cells", i + 1, active) } return grid.CountActiveCells(), nil } func main() { fmt.Println("Hello, World!") filename := "input.txt" filename = "test-input1.txt" //filename = "test-input2.txt" p1, err := part1(filename) if err != nil { log.Fatal(err) } fmt.Printf("Part 1: %d\n", p1) p2, err := part2(filename) if err != nil { log.Fatal(err) } fmt.Printf("Part 2: %d\n", p2) }
2020/day17/main.go
0.529507
0.552359
main.go
starcoder
package ai import ( "github.com/xwjdsh/2048-ai/grid" ) type AI struct { // Grid is 4x4 grid. Grid *grid.Grid // Active is true represent need to select a direction to move, else represent computer need fill a number("2" or "4") into grid. Active bool } var directions = []grid.Direction{ grid.UP, grid.LEFT, grid.DOWN, grid.RIGHT, } // The chance is 10% about fill "4" into grid and 90% fill "2" in the 2048 game. var expectMap = map[int]float64{ 2: 0.9, 4: 0.1, } var ( // There are three model weight matrix, represents three formation for 2048 game, it from internet. // The evaluate function is simple and crude, so actually it's not stable. // If you feel interesting in evaluation function, you can read https://github.com/ovolve/2048-AI project source code. model1 = [][]int{ {16, 15, 14, 13}, {9, 10, 11, 12}, {8, 7, 6, 5}, {1, 2, 3, 4}, } model2 = [][]int{ {16, 15, 12, 4}, {14, 13, 11, 3}, {10, 9, 8, 2}, {7, 6, 5, 1}, } model3 = [][]int{ {16, 15, 14, 4}, {13, 12, 11, 3}, {10, 9, 8, 2}, {7, 6, 5, 1}, } ) // Search method compute each could move direction score result by expect search algorithm func (a *AI) Search() grid.Direction { var ( bestDire = grid.NONE bestScore float64 ) // depth value depending on grid's max value. dept := a.deptSelect() for _, dire := range directions { newGrid := a.Grid.Clone() if newGrid.Move(dire) { // Could move. // Active is false represent computer should fill number to grid now. newAI := &AI{Grid: newGrid, Active: false} if newScore := newAI.expectSearch(dept); newScore > bestScore { bestDire = dire bestScore = newScore } } } return bestDire } // expect search implements func (a *AI) expectSearch(dept int) float64 { if dept == 0 { return float64(a.score()) } var score float64 if a.Active { for _, d := range directions { newGrid := a.Grid.Clone() if newGrid.Move(d) { newAI := &AI{Grid: newGrid, Active: false} if newScore := newAI.expectSearch(dept - 1); newScore > score { score = newScore } } } } else { // computer fill a number to grid now, it will try each vacant point with "2" or "4" points := a.Grid.VacantPoints() for k, v := range expectMap { for _, point := range points { newGrid := a.Grid.Clone() newGrid.Data[point.X][point.Y] = k // Change active, select a direction to move now. newAI := &AI{Grid: newGrid, Active: true} newScore := newAI.expectSearch(dept - 1) score += float64(newScore) * v } } score /= float64(len(points)) } return score } // score method evaluate a grid func (a *AI) score() int { result := make([]int, 24) for x := 0; x < 4; x++ { for y := 0; y < 4; y++ { if value := a.Grid.Data[x][y]; value != 0 { // get eight result(rotate and flip grid) for each model, modelScore(0, x, y, value, model1, &result) modelScore(1, x, y, value, model2, &result) modelScore(2, x, y, value, model3, &result) } } } // get max score in above 24 result, apply best formation var max int for _, v := range result { if v > max { max = v } } return max } // get eight result(rotate and flip grid) for each model func modelScore(index, x, y, value int, model [][]int, result *[]int) { start := index * 8 r := *result r[start] += value * model[x][y] r[start+1] += value * model[x][3-y] r[start+2] += value * model[y][x] r[start+3] += value * model[3-y][x] r[start+4] += value * model[3-x][3-y] r[start+5] += value * model[3-x][y] r[start+6] += value * model[y][3-x] r[start+7] += value * model[3-y][3-x] } // the return value is search depth, it depending on grid's max value // the max value larger and depth larger, this will takes more calculations and make move became slowly but maybe have a better score result. func (a *AI) deptSelect() int { dept := 4 max := a.Grid.Max() if max >= 2048 { dept = 6 } else if max >= 1024 { dept = 5 } return dept }
ai/ai.go
0.636692
0.460835
ai.go
starcoder
package sqlx import ( "database/sql" "github.com/tietang/sqlx/reflectx" "reflect" "upper.io/db.v3" ) type hasConvertValues interface { ConvertValues(values []interface{}) []interface{} } // fetchRow receives a *sql.Rows value and tries to map all the rows into a // single struct given by the pointer `dst`. func fetchRow(rows *sql.Rows, dst interface{}) error { var columns []string var err error dstv := reflect.ValueOf(dst) if dstv.IsNil() || dstv.Kind() != reflect.Ptr { return ErrExpectingPointer } itemV := dstv.Elem() if columns, err = rows.Columns(); err != nil { return err } reset(dst) next := rows.Next() if next == false { if err = rows.Err(); err != nil { return err } return db.ErrNoMoreRows } itemT := itemV.Type() item, err := fetchResult(rows, itemT, columns) if err != nil { return err } if itemT.Kind() == reflect.Ptr { itemV.Set(item) } else { itemV.Set(reflect.Indirect(item)) } return nil } // fetchRows receives a *sql.Rows value and tries to map all the rows into a // slice of structs given by the pointer `dst`. func fetchRows(rows *sql.Rows, dst interface{}) error { var err error defer rows.Close() // Destination. dstv := reflect.ValueOf(dst) if dstv.IsNil() || dstv.Kind() != reflect.Ptr { return ErrExpectingPointer } if dstv.Elem().Kind() != reflect.Slice { return ErrExpectingSlicePointer } if dstv.Kind() != reflect.Ptr || dstv.Elem().Kind() != reflect.Slice || dstv.IsNil() { return ErrExpectingSliceMapStruct } var columns []string if columns, err = rows.Columns(); err != nil { return err } slicev := dstv.Elem() itemT := slicev.Type().Elem() reset(dst) for rows.Next() { item, err := fetchResult(rows, itemT, columns) if err != nil { return err } if itemT.Kind() == reflect.Ptr { slicev = reflect.Append(slicev, item) } else { slicev = reflect.Append(slicev, reflect.Indirect(item)) } } dstv.Elem().Set(slicev) return rows.Err() } func fetchResult(rows *sql.Rows, itemT reflect.Type, columns []string) (reflect.Value, error) { var item reflect.Value var err error objT := itemT switch objT.Kind() { case reflect.Map: item = reflect.MakeMap(objT) case reflect.Struct: item = reflect.New(objT) case reflect.Ptr: objT = itemT.Elem() if objT.Kind() != reflect.Struct { return item, ErrExpectingMapOrStruct } item = reflect.New(objT) default: return item, ErrExpectingMapOrStruct } switch objT.Kind() { case reflect.Struct: values := make([]interface{}, len(columns)) typeMap := mapper().TypeMap(itemT) fieldMap := typeMap.Names for i, k := range columns { fi, ok := fieldMap[k] if !ok { values[i] = new(interface{}) continue } // Check for deprecated jsonb tag. if _, hasJSONBTag := fi.Options["jsonb"]; hasJSONBTag { return item, errDeprecatedJSONBTag } f := reflectx.FieldByIndexes(item, fi.Index) values[i] = f.Addr().Interface() } if err = rows.Scan(values...); err != nil { return item, err } case reflect.Map: columns, err := rows.Columns() if err != nil { return item, err } values := make([]interface{}, len(columns)) for i := range values { if itemT.Elem().Kind() == reflect.Interface { values[i] = new(interface{}) } else { values[i] = reflect.New(itemT.Elem()).Interface() } } if err = rows.Scan(values...); err != nil { return item, err } for i, column := range columns { item.SetMapIndex(reflect.ValueOf(column), reflect.Indirect(reflect.ValueOf(values[i]))) } } return item, nil } func reset(data interface{}) error { // Resetting element. v := reflect.ValueOf(data).Elem() t := v.Type() var z reflect.Value switch v.Kind() { case reflect.Slice: z = reflect.MakeSlice(t, 0, v.Cap()) default: z = reflect.Zero(t) } v.Set(z) return nil }
fetch.go
0.722233
0.409693
fetch.go
starcoder
This file contains abstractions of certain complex map types that are used often throughout summarize.go. They also have some often-used methods such as getting just the keys, or getting the key-value pairs as a map. */ package summarize import ( "sort" ) // map[string][]failure type alias // failuresGroup maps strings to failure slices. type failuresGroup map[string][]failure // failuresGroupPair is a representation of a failuresGroup key-value mapping as a two-element // struct. type failuresGroupPair struct { Key string `json:"key"` Failures []failure `json:"failures"` } // keys provides the failuresGroup's keys as a string slice. func (fg *failuresGroup) keys() []string { result := make([]string, 0, len(*fg)) for key := range *fg { result = append(result, key) } return result } // asSlice returns the failuresGroup as a failuresGroupPair slice. func (fg *failuresGroup) asSlice() []failuresGroupPair { result := make([]failuresGroupPair, 0, len(*fg)) for str, failures := range *fg { result = append(result, failuresGroupPair{str, failures}) } return result } // sortByMostFailures returns a failuresGroupPair slice sorted by the number of failures in each // pair, descending. If the number of failures is the same for two pairs, they are sorted alphabetically // by their keys. func (fg *failuresGroup) sortByMostFailures() []failuresGroupPair { result := fg.asSlice() // Sort the slice. sort.Slice(result, func(i, j int) bool { iFailures := len(result[i].Failures) jFailures := len(result[j].Failures) if iFailures == jFailures { return result[i].Key < result[j].Key } return iFailures > jFailures }) return result } // equal determines whether this failuresGroup is deeply equal to another failuresGroup. func (a *failuresGroup) equal(b *failuresGroup) bool { // First check the length to deal with different-length maps if len(*a) != len(*b) { return false } for key, failuresA := range *a { // Make sure the other map contains the same keys if failuresB, ok := (*b)[key]; ok { // Check lengths if len(failuresA) != len(failuresB) { return false } // Compare the failures slices for i := range failuresA { if failuresA[i] != failuresB[i] { return false } } } else { // The other map is missing a key return false } } return true } // map[string]failuresGroup type alias, which is really a map[string]map[string][]failure type alias // nestedFailuresGroups maps strings to failuresGroup instances. type nestedFailuresGroups map[string]failuresGroup // nestedFailuresGroupsPair is a representation of a nestedFailuresGroups key-value mapping as a // two-element struct. type nestedFailuresGroupsPair struct { Key string `json:"key"` Group failuresGroup `json:"group"` } // keys provides the nestedFailuresGroups's keys as a string slice. func (nfg *nestedFailuresGroups) keys() []string { result := make([]string, len(*nfg)) iter := 0 for key := range *nfg { result[iter] = key iter++ } return result } // asSlice returns the nestedFailuresGroups as a nestedFailuresGroupsPair slice. func (nfg *nestedFailuresGroups) asSlice() []nestedFailuresGroupsPair { result := make([]nestedFailuresGroupsPair, len(*nfg)) iter := 0 for str, group := range *nfg { result[iter] = nestedFailuresGroupsPair{str, group} iter++ } return result } // sortByMostAggregatedFailures returns a nestedFailuresGroupsPair slice sorted by the aggregate // number of failures across all failure slices in each failuresGroup, descending. If the aggregate // number of failures is the same for two pairs, they are sorted alphabetically by their keys. func (nfg *nestedFailuresGroups) sortByMostAggregatedFailures() []nestedFailuresGroupsPair { result := nfg.asSlice() // Pre-compute the aggregate failures for each element of result so that the less // function doesn't have to compute it on every compare. // aggregates maps nestedFailuresGroups strings to number of aggregate failures across all of // their failure slices. aggregates := make(map[string]int, len(*nfg)) for str, fg := range *nfg { aggregate := 0 for _, group := range fg { aggregate += len(group) } aggregates[str] = aggregate } // Sort the slice. sort.Slice(result, func(i, j int) bool { if aggregates[result[i].Key] == aggregates[result[j].Key] { return result[i].Key < result[j].Key } return aggregates[result[i].Key] > aggregates[result[j].Key] }) return result } // equal determines whether this nestedFailuresGroups object is deeply equal to another nestedFailuresGroups object. func (a *nestedFailuresGroups) equal(b *nestedFailuresGroups) bool { // First check the length to deal with different-length maps if len(*a) != len(*b) { return false } for key, failuresGroupA := range *a { // Make sure the other map contains the same keys if failuresGroupB, ok := (*b)[key]; ok { if !failuresGroupA.equal(&failuresGroupB) { return false } } else { // The other map is missing a key return false } } return true }
triage/summarize/map_abstractions.go
0.888451
0.427337
map_abstractions.go
starcoder
package kv import "github.com/vjeantet/bitfan/processors/doc" func (p *processor) Doc() *doc.Processor { return &doc.Processor{ Name: "kv", ImportPath: "github.com/vjeantet/bitfan/processors/filter-kv", Doc: "This filter helps automatically parse messages (or specific event fields)\nwhich are of the foo=bar variety.", DocShort: "Parses key-value pairs", Options: &doc.ProcessorOptions{ Doc: "", Options: []*doc.ProcessorOption{ &doc.ProcessorOption{ Name: "processors.CommonOptions", Alias: ",squash", Doc: "", Required: false, Type: "processors.CommonOptions", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "AllowDuplicateValues", Alias: "allow_duplicate_values", Doc: "A bool option for removing duplicate key/value pairs.\nWhen set to false, only one unique key/value pair will be preserved.\nFor example, consider a source like from=me from=me.\n[from] will map to an Array with two elements: [\"me\", \"me\"].\nto only keep unique key/value pairs, you could use this configuration\n```\nkv {\n allow_duplicate_values => false\n}\n```", Required: false, Type: "bool", DefaultValue: "true", PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "DefaultKeys", Alias: "default_keys", Doc: "A hash specifying the default keys and their values which should be added\nto the event in case these keys do not exist in the source field being parsed.\n\nExample\n```\nkv {\n default_keys => { \"from\"=> \"<EMAIL>\",\n \"to\"=> \"default@dev.null\" }\n}\n```", Required: false, Type: "hash", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "ExcludeKeys", Alias: "exclude_keys", Doc: "An array specifying the parsed keys which should not be added to the event.\n\nBy default no keys will be excluded.\n\nFor example, consider a source like Hey, from=<abc>, to=def foo=bar.\n\nTo exclude from and to, but retain the foo key, you could use this configuration:\n```\nkv {\n exclude_keys => [ \"from\", \"to\" ]\n}\n```", Required: false, Type: "array", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "FieldSplit", Alias: "field_split", Doc: "A string of characters to use as delimiters for parsing out key-value pairs.\n\nThese characters form a regex character class and thus you must escape special regex characters like [ or ] using \\.\n#### Example with URL Query Strings\nFor example, to split out the args from a url query string such as ?pin=12345~0&d=123&e=foo@<EMAIL>.com&oq=bobo&ss=12345:\n```\n kv {\n field_split => \"&?\"\n }\n```\nThe above splits on both & and ? characters, giving you the following fields:\n\n* pin: 12345~0\n* d: 123\n* e: foo@bar.com\n* oq: bobo\n* ss: 12345", Required: false, Type: "string", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "IncludeBrackets", Alias: "include_brackets", Doc: "A boolean specifying whether to include brackets as value wrappers (the default is true)\n```\nkv {\n include_brackets => true\n}\n```\nFor example, the result of this line: bracketsone=(hello world) bracketstwo=[hello world]\nwill be:\n\n* bracketsone: hello world\n* bracketstwo: hello world\n\ninstead of:\n\n* bracketsone: (hello\n* bracketstwo: [hello", Required: false, Type: "bool", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "IncludeKeys", Alias: "include_keys", Doc: "An array specifying the parsed keys which should be added to the event. By default all keys will be added.\n\nFor example, consider a source like Hey, from=<abc>, to=def foo=bar. To include from and to, but exclude the foo key, you could use this configuration:\n```\nkv {\n include_keys => [ \"from\", \"to\" ]\n}\n```", Required: false, Type: "array", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "Prefix", Alias: "", Doc: "A string to prepend to all of the extracted keys.\n\nFor example, to prepend arg_ to all keys:\n```\nkv {\n prefix => \"arg_\" }\n}\n```", Required: false, Type: "string", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "Recursive", Alias: "", Doc: "A boolean specifying whether to drill down into values and recursively get more key-value pairs from it. The extra key-value pairs will be stored as subkeys of the root key.\n\nDefault is not to recursive values.\n```\nkv {\n recursive => \"true\"\n}\n```", Required: false, Type: "bool", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "Source", Alias: "", Doc: "The field to perform key=value searching on\n\nFor example, to process the not_the_message field:\n```\nkv { source => \"not_the_message\" }\n```", Required: false, Type: "string", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "Target", Alias: "", Doc: "The name of the container to put all of the key-value pairs into.\n\nIf this setting is omitted, fields will be written to the root of the event, as individual fields.\n\nFor example, to place all keys into the event field kv:\n```\nkv { target => \"kv\" }\n```", Required: false, Type: "string", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "Trimkey", Alias: "trim_key", Doc: "A string of characters to trim from the key. This is useful if your keys are wrapped in brackets or start with space.\n\nFor example, to strip < > [ ] and , characters from keys:\n```\nkv {\n trimkey => \"<>[],\"\n}\n```", Required: false, Type: "string", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "TrimValue", Alias: "trim_value", Doc: "Constants used for transform check A string of characters to trim from the value. This is useful if your values are wrapped in brackets or are terminated with commas (like postfix logs).\n\nThese characters form a regex character class and thus you must escape special regex characters like [ or ] using \\.\n\nOnly leading and trailing characters are trimed from the value.\n\nFor example, to trim <, >, [, ] and , characters from values:\n\n```\nfilter {\n kv {\n trim_value => \"<>\\[\\],\"\n }\n}\n```", Required: false, Type: "string", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, &doc.ProcessorOption{ Name: "ValueSplit", Alias: "value_split", Doc: "A string of characters to use as delimiters for identifying key-value relations.\n\nThese characters form a regex character class and thus you must escape special regex characters like [ or ] using \\.\n\nFor example, to identify key-values such as key1:value1 key2:value2:\n```\n{ kv { value_split => \":\" }\n```", Required: false, Type: "string", DefaultValue: nil, PossibleValues: []string{}, ExampleLS: "", }, }, }, Ports: []*doc.ProcessorPort{ &doc.ProcessorPort{ Default: true, Name: "PORT_SUCCESS", Number: 0, Doc: "", }, }, } }
processors/filter-kv/docdoc.go
0.788827
0.691771
docdoc.go
starcoder
package mahalanobis import ( "errors" "github.com/skelterjohn/go.matrix" "math" ) // Given a set a points, return the mean vector. // points.Rows() = dimensions. points.Cols() = number of points. func MeanVector(points *matrix.DenseMatrix) *matrix.DenseMatrix { mean := matrix.Zeros(points.Rows(), 1) for i := 0; i < points.Rows(); i++ { sum := 0.0 for j := 0; j < points.Cols(); j++ { sum += points.Get(i, j) } mean.Set(i, 0, sum/float64(points.Cols())) } return mean } func sample_covariance_matrix(points, mean *matrix.DenseMatrix) *matrix.DenseMatrix { dim := points.Rows() cov := matrix.Zeros(dim, dim) for i := 0; i < dim; i++ { for j := 0; j < dim; j++ { if i > j { // symetric matrix continue } // TODO in go routines ? sum := 0.0 for k := 0; k < points.Cols(); k++ { sum += (points.Get(i, k) - mean.Get(i, 0)) * (points.Get(j, k) - mean.Get(j, 0)) } // this is the sample covariance, divide by (N - 1) covariance := sum / (float64(points.Cols() - 1)) cov.Set(i, j, covariance) // symetric matrix cov.Set(j, i, covariance) } } return cov } // Return the covariance matrix for this set of points (sample covariance is used) // points.Rows() = dimensions. points.Cols() = number of points. func CovarianceMatrix(points *matrix.DenseMatrix) *matrix.DenseMatrix { mean := MeanVector(points) return sample_covariance_matrix(points, mean) } // Return the square of the Mahalanobis distance // points.Rows() = dimensions. points.Cols() = number of points. // target.Cols = 1 func DistanceSquare(points, target *matrix.DenseMatrix) (float64, error) { // TODO support multiple points for target, and return a matrix of distances if target.Rows() != points.Rows() { err := errors.New("target does not have the same dimension than points") return -1, err } mean := MeanVector(points) delta := target.Copy() delta.SubtractDense(mean) cov := sample_covariance_matrix(points, mean) inv, err := cov.Inverse() if err != nil { return -1, err } product1, err := inv.TimesDense(delta) if err != nil { return -1, err } delta_t := delta.Transpose() product2, err := delta_t.TimesDense(product1) if err != nil { return -1, err } return product2.Get(0, 0), nil } // Return the Mahalanobis distance // points.Rows() = dimensions. points.Cols() = number of points. // target.Cols = 1 func Distance(points, target *matrix.DenseMatrix) (float64, error) { square, err := DistanceSquare(points, target) if err != nil { return -1, err } return math.Sqrt(square), nil }
mahalanobis.go
0.708717
0.584242
mahalanobis.go
starcoder
package unit import ( "math" "gitlab.com/alephledger/consensus-go/pkg/gomel" ) // unitInDag is a unit that is already inside the dag, and has all its properties precomputed and cached. // It uses forking heights to optimize AboveWithinProc calls. type unitInDag struct { gomel.Unit forkingHeight int } // Embed transforms the given unit into unitInDag and computes forking height. // The returned unit overrides AboveWithinProc method to use that forking height. func Embed(u gomel.Unit, dag gomel.Dag) gomel.Unit { result := &unitInDag{u, math.MaxInt32} result.computeForkingHeight(dag) return result } func (u *unitInDag) AboveWithinProc(v gomel.Unit) bool { if u.Height() < v.Height() || u.Creator() != v.Creator() { return false } if vInDag, ok := v.(*unitInDag); ok && v.Height() <= commonForkingHeight(u, vInDag) { return true } // Either we have a fork or a different type of unit, either way no optimization is possible. return u.Unit.AboveWithinProc(v) } func (u *unitInDag) computeForkingHeight(dag gomel.Dag) { // this implementation works as long as there is no race for writing/reading to dag.maxUnits, i.e. // as long as units created by one process are added atomically if gomel.Dealing(u) { if len(dag.MaximalUnitsPerProcess().Get(u.Creator())) > 0 { // this is a forking dealing unit u.forkingHeight = -1 } else { u.forkingHeight = math.MaxInt32 } return } if predecessor, ok := gomel.Predecessor(u).(*unitInDag); ok { found := false for _, v := range dag.MaximalUnitsPerProcess().Get(u.Creator()) { if v == predecessor { found = true break } } if found { u.forkingHeight = predecessor.forkingHeight } else { // there is already a unit that has 'predecessor' as a predecessor, hence u is a fork if predecessor.forkingHeight < predecessor.Height() { u.forkingHeight = predecessor.forkingHeight } else { u.forkingHeight = predecessor.Height() } } } } func commonForkingHeight(u, v *unitInDag) int { if u.forkingHeight < v.forkingHeight { return u.forkingHeight } return v.forkingHeight }
pkg/unit/unit_in_dag.go
0.681197
0.454351
unit_in_dag.go
starcoder
package chunk import ( "bytes" "fmt" "io" ) // Chunk provides meta information as well as access to its contained blocks. type Chunk struct { // Fragmented tells whether the chunk should be serialized with a directory. // Fragmented chunks can have zero, one, or more blocks. // Unfragmented chunks always have exactly one block. Fragmented bool // ContentType describes how the block data shall be interpreted. ContentType ContentType // Compressed tells whether the data shall be serialized in compressed form. Compressed bool // BlockProvider is the keeper of original block data. // This provider will be referred to if no other data was explicitly set. BlockProvider BlockProvider blockLimit int blocks map[int][]byte } // BlockCount returns the number of available blocks in the chunk. // Unfragmented chunks will always have exactly one block. func (chunk Chunk) BlockCount() (count int) { count = chunk.providerBlockCount() if count < chunk.blockLimit { count = chunk.blockLimit } return } func (chunk Chunk) providerBlockCount() (count int) { if chunk.BlockProvider != nil { count = chunk.BlockProvider.BlockCount() } return } // Block returns the reader for the identified block. // Each call returns a new reader instance. // Data provided by this reader is always uncompressed. func (chunk Chunk) Block(index int) (io.Reader, error) { if chunk.blocks != nil { data, set := chunk.blocks[index] if set { return bytes.NewReader(data), nil } else if chunk.providerBlockCount() <= index { return bytes.NewReader(nil), nil } } if chunk.BlockProvider == nil { return nil, fmt.Errorf("no blocks available") } return chunk.BlockProvider.Block(index) } // SetBlock registers new data for a block. // For any block set this way, the block provider of this chunk will no longer be queried. func (chunk *Chunk) SetBlock(index int, data []byte) { if index < 0 { panic(fmt.Errorf("index must be a non-negative value")) } chunk.ensureBlockMap() chunk.blocks[index] = data if chunk.blockLimit <= index { chunk.blockLimit = index + 1 } } func (chunk *Chunk) ensureBlockMap() { if chunk.blocks == nil { chunk.blocks = make(map[int][]byte) } }
chunk/Chunk.go
0.735547
0.450722
Chunk.go
starcoder
package wot import ( "time" ) const MediaTypeThingDescription = "application/td+json" /* This file has go models for Web Of Things (WoT) Things Description following : https://www.w3.org/TR/2019/CR-wot-thing-description-20191106/ (W3C Candidate Recommendation 6 November 2019) */ type any = interface{} // ThingDescription is the structured data describing a Thing type ThingDescription struct { // JSON-LD keyword to define short-hand names called terms that are used throughout a TD document. Context any `json:"@context"` // JSON-LD keyword to label the object with semantic tags (or types). Type any `json:"@type,omitempty"` // Identifier of the Thing in form of a URI [RFC3986] (e.g., stable URI, temporary and mutable URI, URI with local IP address, URN, etc.). ID AnyURI `json:"id,omitempty"` // Provides a human-readable title (e.g., display a text for UI representation) based on a default language. Title string `json:"title"` // Provides multi-language human-readable titles (e.g., display a text for UI representation in different languages). Titles map[string]string `json:"titles,omitempty"` // Provides additional (human-readable) information based on a default language Description string `json:"description,omitempty"` // Can be used to support (human-readable) information in different languages. Descriptions map[string]string `json:"descriptions,omitempty"` // Provides version information. Version *VersionInfo `json:"version,omitempty"` // Provides information when the TD instance was created. Created time.Time `json:"created,omitempty"` // Provides information when the TD instance was last modified. Modified time.Time `json:"modified,omitempty"` // Provides information about the TD maintainer as URI scheme (e.g., mailto [RFC6068], tel [RFC3966], https). Support AnyURI `json:"support,omitempty"` /* Define the base URI that is used for all relative URI references throughout a TD document. In TD instances, all relative URIs are resolved relative to the base URI using the algorithm defined in [RFC3986]. base does not affect the URIs used in @context and the IRIs used within Linked Data [LINKED-DATA] graphs that are relevant when semantic processing is applied to TD instances. */ Base string `json:"base,omitempty"` // All Property-based Interaction Affordances of the Thing. Properties map[string]PropertyAffordance `json:"properties,omitempty"` // All Action-based Interaction Affordances of the Thing. Actions map[string]ActionAffordance `json:"actions,omitempty"` // All Event-based Interaction Affordances of the Thing. Events map[string]EventAffordance `json:"events,omitempty"` // Provides Web links to arbitrary resources that relate to the specified Thing Description. Links []Link `json:"links,omitempty"` // Set of form hypermedia controls that describe how an operation can be performed. Forms are serializations of Protocol Bindings. In this version of TD, all operations that can be described at the Thing level are concerning how to interact with the Thing's Properties collectively at once. Forms []Form `json:"forms,omitempty"` // Set of security definition names, chosen from those defined in securityDefinitions. These must all be satisfied for access to resources Security any `json:"security"` // Set of named security configurations (definitions only). Not actually applied unless names are used in a security name-value pair. SecurityDefinitions map[string]SecurityScheme `json:"securityDefinitions"` } /*Metadata of a Thing that shows the possible choices to Consumers, thereby suggesting how Consumers may interact with the Thing. There are many types of potential affordances, but W3C WoT defines three types of Interaction Affordances: Properties, Actions, and Events.*/ type InteractionAffordance struct { // JSON-LD keyword to label the object with semantic tags (or types). Type any `json:"@type,omitempty"` // Provides a human-readable title (e.g., display a text for UI representation) based on a default language. Title string `json:"title,omitempty"` // Provides multi-language human-readable titles (e.g., display a text for UI representation in different languages). Titles map[string]string `json:"titles,omitempty"` // Provides additional (human-readable) information based on a default language Description string `json:"description,omitempty"` // Can be used to support (human-readable) information in different languages. Descriptions map[string]string `json:"descriptions,omitempty"` /* Set of form hypermedia controls that describe how an operation can be performed. Forms are serializations of Protocol Bindings. When a Form instance is within an ActionAffordance instance, the value assigned to op MUST be invokeaction. When a Form instance is within an EventAffordance instance, the value assigned to op MUST be either subscribeevent, unsubscribeevent, or both terms within an Array. When a Form instance is within a PropertyAffordance instance, the value assigned to op MUST be one of readproperty, writeproperty, observeproperty, unobserveproperty or an Array containing a combination of these terms. */ Forms []Form `json:"forms"` // Define URI template variables as collection based on DataSchema declarations. UriVariables map[string]DataSchema `json:"uriVariables,omitempty"` } /* An Interaction Affordance that exposes state of the Thing. This state can then be retrieved (read) and optionally updated (write). Things can also choose to make Properties observable by pushing the new state after a change. */ type PropertyAffordance struct { InteractionAffordance DataSchema Observable bool `json:"observable,omitempty"` } /* An Interaction Affordance that allows to invoke a function of the Thing, which manipulates state (e.g., toggling a lamp on or off) or triggers a process on the Thing (e.g., dim a lamp over time). */ type ActionAffordance struct { InteractionAffordance // Used to define the input data schema of the Action. Input DataSchema `json:"input,omitempty"` // Used to define the output data schema of the Action. Output DataSchema `json:"output,omitempty"` // Signals if the Action is safe (=true) or not. Used to signal if there is no internal state (cf. resource state) is changed when invoking an Action. In that case responses can be cached as example. Safe bool `json:"safe"` //default: false // Indicates whether the Action is idempotent (=true) or not. Informs whether the Action can be called repeatedly with the same result, if present, based on the same input. Idempotent bool `json:"idempotent"` //default: false } /* An Interaction Affordance that describes an event source, which asynchronously pushes event data to Consumers (e.g., overheating alerts). */ type EventAffordance struct { InteractionAffordance // Defines data that needs to be passed upon subscription, e.g., filters or message format for setting up Webhooks. Subscription DataSchema `json:"subscription,omitempty"` // Defines the data schema of the Event instance messages pushed by the Thing. Data DataSchema `json:"data,omitempty"` // Defines any data that needs to be passed to cancel a subscription, e.g., a specific message to remove a Webhook. Cancellation DataSchema `json:"optional,omitempty"` } /* A form can be viewed as a statement of "To perform an operation type operation on form context, make a request method request to submission target" where the optional form fields may further describe the required request. In Thing Descriptions, the form context is the surrounding Object, such as Properties, Actions, and Events or the Thing itself for meta-interactions. */ type Form struct { /* Indicates the semantic intention of performing the operation(s) described by the form. For example, the Property interaction allows get and set operations. The protocol binding may contain a form for the get operation and a different form for the set operation. The op attribute indicates which form is for which and allows the client to select the correct form for the operation required. op can be assigned one or more interaction verb(s) each representing a semantic intention of an operation. It can be one of: readproperty, writeproperty, observeproperty, unobserveproperty, invokeaction, subscribeevent, unsubscribeevent, readallproperties, writeallproperties, readmultipleproperties, or writemultipleproperties a. When a Form instance is within an ActionAffordance instance, the value assigned to op MUST be invokeaction. b. When a Form instance is within an EventAffordance instance, the value assigned to op MUST be either subscribeevent, unsubscribeevent, or both terms within an Array. c. When a Form instance is within a PropertyAffordance instance, the value assigned to op MUST be one of readproperty, writeproperty, observeproperty, unobserveproperty or an Array containing a combination of these terms. */ Op any `json:"op"` // Target IRI of a link or submission target of a form. Href AnyURI `json:"href"` // Assign a content type based on a media type (e.g., text/plain) and potential parameters (e.g., charset=utf-8) for the media type [RFC2046]. ContentType string `json:"contentType"` //default: "application/json" // Content coding values indicate an encoding transformation that has been or can be applied to a representation. Content codings are primarily used to allow a representation to be compressed or otherwise usefully transformed without losing the identity of its underlying media type and without loss of information. Examples of content coding include "gzip", "deflate", etc. . // Possible values for the contentCoding property can be found, e.g., in thttps://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding ContentCoding string `json:"contentCoding,omitempty"` // Indicates the exact mechanism by which an interaction will be accomplished for a given protocol when there are multiple options. // For example, for HTTP and Events, it indicates which of several available mechanisms should be used for asynchronous notifications such as long polling (longpoll), WebSub [websub] (websub), Server-Sent Events [eventsource] (sse). Please note that there is no restriction on the subprotocol selection and other mechanisms can also be announced by this subprotocol term. SubProtocol string `json:"subprotocol,omitempty"` // Set of security definition names, chosen from those defined in securityDefinitions. These must all be satisfied for access to resources. Security any `json:"security,omitempty"` // Set of authorization scope identifiers provided as an array. These are provided in tokens returned by an authorization server and associated with forms in order to identify what resources a client may access and how. The values associated with a form should be chosen from those defined in an OAuth2SecurityScheme active on that form. Scopes any `json:"scopes,omitempty"` // This optional term can be used if, e.g., the output communication metadata differ from input metadata (e.g., output contentType differ from the input contentType). The response name contains metadata that is only valid for the response messages. Response *ExpectedResponse `json:"response,omitempty"` } /* A link can be viewed as a statement of the form "link context has a relation type resource at link target", where the optional target attributes may further describe the resource. */ type Link struct { // Target IRI of a link or submission target of a form. Href AnyURI `json:"href"` // Target attribute providing a hint indicating what the media type (RFC2046) of the result of dereferencing the link should be. Type string `json:"type,omitempty"` // A link relation type identifies the semantics of a link. Rel string `json:"rel,omitempty"` // Overrides the link context (by default the Thing itself identified by its id) with the given URI or IRI. Anchor AnyURI `json:"anchor,omitempty"` } type SecurityScheme struct { // JSON-LD keyword to label the object with semantic tags (or types). Type any `json:"@type,omitempty"` // Identification of the security mechanism being configured. e.g. nosec, basic, cert, digest, bearer, pop, psk, public, oauth2, or apike Scheme string `json:"scheme"` // Provides additional (human-readable) information based on a default language Description string `json:"description,omitempty"` // Can be used to support (human-readable) information in different languages. Descriptions map[string]string `json:"descriptions,omitempty"` // URI of the proxy server this security configuration provides access to. If not given, the corresponding security configuration is for the endpoint. Proxy AnyURI `json:"proxy,omitempty"` *BasicSecurityScheme *DigestSecurityScheme *APIKeySecurityScheme *BearerSecurityScheme *CertSecurityScheme *PSKSecurityScheme *PublicSecurityScheme *PoPSecurityScheme *OAuth2SecurityScheme } type DataSchema struct { // JSON-LD keyword to label the object with semantic tags (or types) Type any `json:"@type,omitempty"` // Const corresponds to the JSON schema field "const". Const any `json:"const,omitempty"` // Provides multi-language human-readable titles (e.g., display a text for UI representation in different languages). Description string `json:"description,omitempty"` // Can be used to support (human-readable) information in different languages Descriptions string `json:"descriptions,omitempty"` // Restricted set of values provided as an array. Enum []any `json:"enum,omitempty"` // Allows validation based on a format pattern such as "date-time", "email", "uri", etc. (Also see below.) Format string `json:"format,omitempty"` // OneOf corresponds to the JSON schema field "oneOf". OneOf []DataSchema `json:"oneOf,omitempty"` // ReadOnly corresponds to the JSON schema field "readOnly". ReadOnly bool `json:"readOnly,omitempty"` // Provides a human-readable title (e.g., display a text for UI representation) based on a default language. Title string `json:"title,omitempty"` // Provides multi-language human-readable titles (e.g., display a text for UI representation in different languages). Titles []string `json:"titles,omitempty"` // Assignment of JSON-based data types compatible with JSON Schema (one of boolean, integer, number, string, object, array, or null). // DataType corresponds to the JSON schema field "type". DataType string `json:"type,omitempty"` // Unit corresponds to the JSON schema field "unit". Unit string `json:"unit,omitempty"` // Boolean value that is a hint to indicate whether a property interaction / value is write only (=true) or not (=false). WriteOnly bool `json:"writeOnly,omitempty"` // Metadata describing data of type Array. This Subclass is indicated by the value array assigned to type in DataSchema instances. *ArraySchema // Metadata describing data of type number. This Subclass is indicated by the value number assigned to type in DataSchema instances. *NumberSchema // Metadata describing data of type object. This Subclass is indicated by the value object assigned to type in DataSchema instances. *ObjectSchema } // DataSchemaTypeEnumValues are the allowed values allowed for DataSchema.DataType var DataSchemaDataTypeEnumValues = []string{ "boolean", "integer", "number", "string", "object", "array", "null", } type ArraySchema struct { // Used to define the characteristics of an array. Items any `json:"items,omitempty"` // Defines the maximum number of items that have to be in the array. MaxItems *int `json:"maxItems,omitempty"` // Defines the minimum number of items that have to be in the array. MinItems *int `json:"minItems,omitempty"` } //Specifies both float and double type NumberSchema struct { // Specifies a maximum numeric value. Only applicable for associated number or integer types. Maximum *any `json:"maximum,omitempty"` // Specifies a minimum numeric value. Only applicable for associated number or integer types. Minimum *any `json:"minimum,omitempty"` } type ObjectSchema struct { // Data schema nested definitions. Properties map[string]DataSchema `json:"properties,omitempty"` // Required corresponds to the JSON schema field "required". Required []string `json:"required,omitempty"` } type AnyURI = string /* Communication metadata describing the expected response message. */ type ExpectedResponse struct { ContentType string `json:"contentType,omitempty"` } /* Metadata of a Thing that provides version information about the TD document. If required, additional version information such as firmware and hardware version (term definitions outside of the TD namespace) can be extended via the TD Context Extension mechanism. It is recommended that the values within instances of the VersionInfo Class follow the semantic versioning pattern (https://semver.org/), where a sequence of three numbers separated by a dot indicates the major version, minor version, and patch version, respectively. */ type VersionInfo struct { // Provides a version indicator of this TD instance. Instance string `json:"instance"` } /* Basic Authentication [RFC7617] security configuration identified by the Vocabulary Term basic (i.e., "scheme": "basic"), using an unencrypted username and password. This scheme should be used with some other security mechanism providing confidentiality, for example, TLS. */ type BasicSecurityScheme struct { // Specifies the location of security authentication information. In string `json:"in"` // default: header // Name for query, header, or cookie parameters. Name string `json:"name,omitempty"` } /* Digest Access Authentication [RFC7616] security configuration identified by the Vocabulary Term digest (i.e., "scheme": "digest"). This scheme is similar to basic authentication but with added features to avoid man-in-the-middle attacks. */ type DigestSecurityScheme struct { // Specifies the location of security authentication information. In string `json:"in"` // default: header // Name for query, header, or cookie parameters. Name string `json:"name,omitempty"` //Quality of protection. Qop string `json:"qop,omitempty"` //default: auth } /* API key authentication security configuration identified by the Vocabulary Term apikey (i.e., "scheme": "apikey"). This is for the case where the access token is opaque and is not using a standard token format. */ type APIKeySecurityScheme struct { // Specifies the location of security authentication information. In string `json:"in"` // default: header // Name for query, header, or cookie parameters. Name string `json:"name,omitempty"` } /* Bearer Token [RFC6750] security configuration identified by the Vocabulary Term bearer (i.e., "scheme": "bearer") for situations where bearer tokens are used independently of OAuth2. If the oauth2 scheme is specified it is not generally necessary to specify this scheme as well as it is implied. For format, the value jwt indicates conformance with [RFC7519], jws indicates conformance with [RFC7797], cwt indicates conformance with [RFC8392], and jwe indicates conformance with [RFC7516], with values for alg interpreted consistently with those standards. Other formats and algorithms for bearer tokens MAY be specified in vocabulary extensions */ type BearerSecurityScheme struct { // Specifies the location of security authentication information. In string `json:"in"` // default: header // Name for query, header, or cookie parameters. Name string `json:"name,omitempty"` // URI of the authorization server. Authorization AnyURI `json:"authorization,omitempty"` // Encoding, encryption, or digest algorithm. Alg string `json:"alg"` // default:ES256 // Specifies format of security authentication information. Format string `json:"format"` // default: jwt } /* Certificate-based asymmetric key security configuration conformant with [X509V3] identified by the Vocabulary Term cert (i.e., "scheme": "cert"). */ type CertSecurityScheme struct { // Identifier providing information which can be used for selection or confirmation. Identity string `json:"identity,omitempty"` } /* Pre-shared key authentication security configuration identified by the Vocabulary Term psk (i.e., "scheme": "psk"). */ type PSKSecurityScheme struct { // Identifier providing information which can be used for selection or confirmation. Identity string `json:"identity,omitempty"` } /* Raw public key asymmetric key security configuration identified by the Vocabulary Term public (i.e., "scheme": "public"). */ type PublicSecurityScheme struct { // Identifier providing information which can be used for selection or confirmation. Identity string `json:"identity,omitempty"` } /* Proof-of-possession (PoP) token authentication security configuration identified by the Vocabulary Term pop (i.e., "scheme": "pop"). Here jwt indicates conformance with [RFC7519], jws indicates conformance with [RFC7797], cwt indicates conformance with [RFC8392], and jwe indicates conformance with [RFC7516], with values for alg interpreted consistently with those standards. Other formats and algorithms for PoP tokens MAY be specified in vocabulary extensions.. */ type PoPSecurityScheme struct { // Specifies the location of security authentication information. In string `json:"in"` // default: header // Name for query, header, or cookie parameters. Name string `json:"name,omitempty"` // Encoding, encryption, or digest algorithm. Alg string `json:"alg"` // default:ES256 // Specifies format of security authentication information. Format string `json:"format"` // default: jwt // URI of the authorization server. Authorization AnyURI `json:"authorization,omitempty"` } /* OAuth2 authentication security configuration for systems conformant with [RFC6749] and [RFC8252], identified by the Vocabulary Term oauth2 (i.e., "scheme": "oauth2"). For the implicit flow authorization MUST be included. For the password and client flows token MUST be included. For the code flow both authorization and token MUST be included. If no scopes are defined in the SecurityScheme then they are considered to be empty. */ type OAuth2SecurityScheme struct { // URI of the authorization server. Authorization AnyURI `json:"authorization,omitempty"` //URI of the token server. Token AnyURI `json:"token,omitempty"` //URI of the refresh server. Refresh AnyURI `json:"refresh,omitempty"` //Set of authorization scope identifiers provided as an array. These are provided in tokens returned by an authorization server and associated with forms in order to identify what resources a client may access and how. The values associated with a form should be chosen from those defined in an OAuth2SecurityScheme active on that form. Scopes any `json:"scopes,omitempty"` //Authorization flow. Flow string `json:"flow"` }
wot/thing_description.go
0.817647
0.465387
thing_description.go
starcoder
package physics import ( "time" "github.com/g3n/engine/experimental/physics" "github.com/g3n/engine/experimental/physics/object" "github.com/g3n/engine/geometry" "github.com/g3n/engine/gls" "github.com/g3n/engine/graphic" "github.com/g3n/engine/light" "github.com/g3n/engine/material" "github.com/g3n/engine/math32" "github.com/g3n/engine/texture" "github.com/g3n/engine/util/helper" "github.com/g3n/engine/window" "github.com/g3n/g3nd/app" ) func init() { app.DemoMap["physics-experimental.spheres2"] = &PhysicsSpheres2{} } type PhysicsSpheres2 struct { app *app.App sim *physics.Simulation sphereGeom *geometry.Geometry matSphere *material.Standard } // Start is called once at the start of the demo. func (t *PhysicsSpheres2) Start(a *app.App) { t.app = a // Subscribe to key events a.Subscribe(window.OnKeyRepeat, t.onKey) a.Subscribe(window.OnKeyDown, t.onKey) // Create axes helper axes := helper.NewAxes(1) a.Scene().Add(axes) pl := light.NewPoint(math32.NewColor("white"), 1.0) pl.SetPosition(1, 0, 1) a.Scene().Add(pl) // Add directional light from top l2 := light.NewDirectional(&math32.Color{1, 1, 1}, 0.3) l2.SetPosition(0, 0.1, 0) a.Scene().Add(l2) // Add directional light from top l3 := light.NewDirectional(&math32.Color{1, 1, 1}, 0.3) l3.SetPosition(0.1, 0, 0.1) a.Scene().Add(l3) t.sim = physics.NewSimulation(a.Scene()) gravity := physics.NewConstantForceField(&math32.Vector3{0, -0.98, 0}) // //gravity := physics.NewAttractorForceField(&math32.Vector3{0.1,1,0}, 1) t.sim.AddForceField(gravity) // Creates sphere 1 t.sphereGeom = geometry.NewSphere(0.1, 16, 8) texfileG := a.DirData() + "/images/ground2.jpg" texG, err := texture.NewTexture2DFromImage(texfileG) texG.SetRepeat(10, 10) texG.SetWrapS(gls.REPEAT) texG.SetWrapT(gls.REPEAT) if err != nil { a.Log().Fatal("Error loading texture: %s", err) } mat := material.NewStandard(&math32.Color{1, 1, 1}) mat.SetTransparent(true) mat.SetOpacity(0.5) mat.AddTexture(texG) //mat.SetWireframe(true) //sphere1 := graphic.NewMesh(sphereGeom, mat) //a.Scene().Add(sphere1) //t.rb = object.NewBody(sphere1) //t.sim.AddBody(t.rb, "Sphere1") floorGeom := geometry.NewBox(10, 0.5, 10) floor := graphic.NewMesh(floorGeom, mat) floor.SetPosition(3, -0.2, 0) a.Scene().Add(floor) floorBody := object.NewBody(floor) floorBody.SetBodyType(object.Static) t.sim.AddBody(floorBody, "Floor") // Creates texture 3 texfile := a.DirData() + "/images/uvgrid.jpg" tex3, err := texture.NewTexture2DFromImage(texfile) if err != nil { a.Log().Fatal("Error loading texture: %s", err) } //tex3.SetFlipY(false) // Creates sphere 3 t.matSphere = material.NewStandard(&math32.Color{1, 1, 1}) t.matSphere.AddTexture(tex3) sphere2 := graphic.NewMesh(t.sphereGeom, t.matSphere) sphere2.SetPosition(0, 1, -0.02) a.Scene().Add(sphere2) rb2 := object.NewBody(sphere2) t.sim.AddBody(rb2, "Sphere2") sphere3 := graphic.NewMesh(t.sphereGeom, t.matSphere) sphere3.SetPosition(0.05, 1.2, 0.05) a.Scene().Add(sphere3) rb3 := object.NewBody(sphere3) t.sim.AddBody(rb3, "Sphere3") sphere4 := graphic.NewMesh(t.sphereGeom, t.matSphere) sphere4.SetPosition(-0.05, 1.4, 0) a.Scene().Add(sphere4) rb4 := object.NewBody(sphere4) t.sim.AddBody(rb4, "Sphere4") } func (t *PhysicsSpheres2) ThrowBall() { camPos := t.app.Camera().Position() camTarget := t.app.Orbit().Target() throwDir := math32.NewVec3().SubVectors(&camTarget, &camPos).SetLength(3) sphere := graphic.NewMesh(t.sphereGeom, t.matSphere) sphere.SetPositionVec(&camPos) t.app.Scene().Add(sphere) rb := object.NewBody(sphere) rb.SetVelocity(throwDir) t.sim.AddBody(rb, "Sphere4") } func (t *PhysicsSpheres2) onKey(evname string, ev interface{}) { kev := ev.(*window.KeyEvent) switch kev.Key { case window.KeyP: t.sim.SetPaused(!t.sim.Paused()) case window.KeyO: t.sim.SetPaused(false) t.sim.Step(0.016) t.sim.SetPaused(true) case window.KeySpace: t.ThrowBall() case window.Key1: // TODO case window.Key2: // TODO } } // Update is called every frame. func (t *PhysicsSpheres2) Update(a *app.App, deltaTime time.Duration) { t.sim.Step(float32(deltaTime.Seconds())) } // Cleanup is called once at the end of the demo. func (t *PhysicsSpheres2) Cleanup(a *app.App) {}
demos/experimental/physics/spheres2.go
0.578567
0.436622
spheres2.go
starcoder
package trees import ( "errors" "github.com/TectusDreamlab/go-common-utils/datastructure/shared" ) // BTree defines a B-Tree type BTree struct { Root *BTreeNode // root has min 2 children if it's not leaf. size int order int comparator shared.Comparator } // BTreeNode defines a B-Tree Node type BTreeNode struct { Keys []interface{} // The separation keys, for non-leaf node, count = count(children) - 1 Values []interface{} Children []*BTreeNode // The childrens, maximum at order Parent *BTreeNode } // NewBTree creates a new B-Tree func NewBTree(order int, comparator shared.Comparator) *BTree { if order < 3 { panic("the order must be at least 3") } return &BTree{nil, 0, order, comparator} } // Put puts an key (index) and it's value into the B-Tree, if the key exists, update the value func (b *BTree) Put(key, value interface{}) { if b.Root == nil { b.Root = &BTreeNode{[]interface{}{key}, []interface{}{value}, []*BTreeNode{}, nil} b.size++ return } if b.insert(b.Root, key, value) { b.size++ } return } // Search searchs the key (index) to get the value from the B-Tree func (b *BTree) Search(key interface{}) (node *BTreeNode, index int, err error) { if b.Root == nil { return nil, -1, errors.New("empty tree") } return b.search(b.Root, key) } // Delete deletes the key (index) and its value from the B-Tree func (b *BTree) Delete(key interface{}) (value interface{}, err error) { if b.Root == nil { return nil, errors.New("empty tree") } return b.delete(b.Root, key) } // GetSize gets the total number of values. func (b *BTree) GetSize() int { return b.size } // GetHeight gets the height of the B-Tree. func (b *BTree) GetHeight() int { node := b.Root height := 0 for ; node != nil && len(node.Children) > 0; node = node.Children[0] { height++ } return height } // Clear clears the B-Tree func (b *BTree) Clear() { b.Root = nil b.size = 0 } func (b *BTree) insert(root *BTreeNode, key, value interface{}) (inserted bool) { return true } func (b *BTree) search(root *BTreeNode, key interface{}) (node *BTreeNode, index int, err error) { node = root for node != nil { // Let's binary search through the keys, to find the value or locate the sub-stree to search from if not found var found bool index, found = shared.BinarySearch(node.Keys, key, b.comparator) // Found if found { return } if !b.isLeaf(node) { node = node.Children[index+1] } } return nil, -1, errors.New("not found") } func (b *BTree) delete(root *BTreeNode, key interface{}) (value interface{}, err error) { return nil, nil } func (b *BTree) isLeaf(node *BTreeNode) bool { return len(node.Children) == 0 } func (b *BTree) isFull(node *BTreeNode) bool { return len(node.Children) == b.order } func (b *BTree) minChildrenPerInternalNode() int { return (b.order + 1) / 2 } func (b *BTree) maxKeysPerNode() int { return b.order - 1 } func (b *BTree) minKeysPerInternalNode() int { return b.minChildrenPerInternalNode() - 1 }
datastructure/trees/b_tree.go
0.720467
0.453443
b_tree.go
starcoder
package simat import ( "fmt" "github.com/emer/etable/etensor" "github.com/emer/etable/metric" ) // Tensor computes a similarity / distance matrix on tensor // using given metric function. Outer-most dimension ("rows") is // used as "indexical" dimension and all other dimensions within that // are compared. // Results go in smat which is ensured to have proper square 2D shape // (rows * rows). func Tensor(smat etensor.Tensor, a etensor.Tensor, mfun metric.Func64) error { rows := a.Dim(0) nd := a.NumDims() if nd < 2 || rows == 0 { return fmt.Errorf("simat.Tensor: must have 2 or more dims and rows != 0") } ln := a.Len() sz := ln / rows sshp := []int{rows, rows} smat.SetShape(sshp, nil, nil) av := make([]float64, sz) bv := make([]float64, sz) ardim := []int{0} brdim := []int{0} sdim := []int{0, 0} for ai := 0; ai < rows; ai++ { ardim[0] = ai sdim[0] = ai ar := a.SubSpace(ardim) ar.Floats(&av) for bi := 0; bi <= ai; bi++ { // lower diag brdim[0] = bi sdim[1] = bi br := a.SubSpace(brdim) br.Floats(&bv) sv := mfun(av, bv) smat.SetFloat(sdim, sv) } } // now fill in upper diagonal with values from lower diagonal // note: assumes symmetric distance function fdim := []int{0, 0} for ai := 0; ai < rows; ai++ { sdim[0] = ai fdim[1] = ai for bi := ai + 1; bi < rows; bi++ { // upper diag fdim[0] = bi sdim[1] = bi sv := smat.FloatVal(fdim) smat.SetFloat(sdim, sv) } } return nil } // Tensors computes a similarity / distance matrix on two tensors // using given metric function. Outer-most dimension ("rows") is // used as "indexical" dimension and all other dimensions within that // are compared. Resulting reduced 2D shape of two tensors must be // the same (returns error if not). // Rows of smat = a, cols = b func Tensors(smat etensor.Tensor, a, b etensor.Tensor, mfun metric.Func64) error { arows := a.Dim(0) and := a.NumDims() brows := b.Dim(0) bnd := b.NumDims() if and < 2 || bnd < 2 || arows == 0 || brows == 0 { return fmt.Errorf("simat.Tensors: must have 2 or more dims and rows != 0") } alen := a.Len() asz := alen / arows blen := b.Len() bsz := blen / brows if asz != bsz { return fmt.Errorf("simat.Tensors: size of inner dimensions must be same") } sshp := []int{arows, brows} smat.SetShape(sshp, nil, []string{"a", "b"}) av := make([]float64, asz) bv := make([]float64, bsz) ardim := []int{0} brdim := []int{0} sdim := []int{0, 0} for ai := 0; ai < arows; ai++ { ardim[0] = ai sdim[0] = ai ar := a.SubSpace(ardim) ar.Floats(&av) for bi := 0; bi < brows; bi++ { brdim[0] = bi sdim[1] = bi br := b.SubSpace(brdim) br.Floats(&bv) sv := mfun(av, bv) smat.SetFloat(sdim, sv) } } return nil } // TensorStd computes a similarity / distance matrix on tensor // using given Std metric function. Outer-most dimension ("rows") is // used as "indexical" dimension and all other dimensions within that // are compared. // Results go in smat which is ensured to have proper square 2D shape // (rows * rows). // This Std version is usable e.g., in Python where the func cannot be passed. func TensorStd(smat etensor.Tensor, a etensor.Tensor, met metric.StdMetrics) error { return Tensor(smat, a, metric.StdFunc64(met)) } // TensorsStd computes a similarity / distance matrix on two tensors // using given Std metric function. Outer-most dimension ("rows") is // used as "indexical" dimension and all other dimensions within that // are compared. Resulting reduced 2D shape of two tensors must be // the same (returns error if not). // Rows of smat = a, cols = b // This Std version is usable e.g., in Python where the func cannot be passed. func TensorsStd(smat etensor.Tensor, a, b etensor.Tensor, met metric.StdMetrics) error { return Tensors(smat, a, b, metric.StdFunc64(met)) }
simat/tensor.go
0.755457
0.620593
tensor.go
starcoder
package engine import ( "github.com/alexandre-normand/glukit/app/apimodel" "github.com/alexandre-normand/glukit/app/model" "github.com/alexandre-normand/glukit/app/store" "github.com/alexandre-normand/glukit/app/util" "context" "google.golang.org/appengine/log" "google.golang.org/appengine/taskqueue" "math" "time" ) const ( // The multiplier applied to any deviation from the target, on the low spectrum (i.e. anything less than 83) LOW_MULTIPLIER = 1 // The multiplier applied to any deviation from the target, on the high spectrum (i.e. anything above 83) HIGH_MULTIPLIER = 2 // Glukit score calculation period GLUKIT_SCORE_PERIOD = 7 // One period of reads minus on day for potential data gaps READS_REQUIREMENT = 288 * (GLUKIT_SCORE_PERIOD - 1) // The current Glukit scoring version SCORING_VERSION = 1 // The max number of days to look back when starting a new batch of calculation MAX_CALCULATION_DAYS_TO_LOOK_BACK = 30 ) // January 1st, 2014 var A1C_CALCULATION_START = time.Unix(1388534400, 0) // CalculateGlukitScore computes the GlukitScore for a given user. This is done in a few steps: // 1. Get the latest GLUKIT_SCORE_PERIOD days of reads // 2. For the most recent reads up to READS_REQUIREMENT, calculate the individual score // contribution and add it to the GlukitScore. // 3. If we had enough reads to satisfy the requirements, we return the sum of // all individual score contributions. func CalculateGlukitScore(context context.Context, glukitUser *model.GlukitUser, endOfPeriod time.Time) (glukitScore *model.GlukitScore, err error) { // Get the last period's worth of reads upperBound := util.GetMidnightUTCBefore(endOfPeriod) lowerBound := upperBound.AddDate(0, 0, -1*GLUKIT_SCORE_PERIOD) score := model.UNDEFINED_SCORE_VALUE log.Debugf(context, "Getting reads for glukit score calculation from [%s] to [%s]", lowerBound, upperBound) if reads, err := store.GetGlucoseReads(context, glukitUser.Email, lowerBound, upperBound); err != nil { return &model.UNDEFINED_SCORE, err } else { // We might want to do some interpolation of missing reads at some point but for now, we'll only use // actual values. Since we know we'll have gaps in a 2 weeks window because of sensor warm-ups, let's // just normalize by stopping after the equivalent of full 14 days of reads (assuming most people won't have // more than 2 days worth of missing data) readCount := 0 score = 0 for i := 0; i < len(reads) && i < READS_REQUIREMENT; i++ { score = score + int64(CalculateIndividualReadScoreWeight(context, reads[i])) readCount = readCount + 1 } log.Infof(context, "Readcount of [%d] used for glukit score calculation of [%d]", readCount, score) if readCount < READS_REQUIREMENT { log.Infof(context, "Received only [%d] but required [%d] to calculate valid GlukitScore", readCount, READS_REQUIREMENT) return &model.UNDEFINED_SCORE, nil } } if score == model.UNDEFINED_SCORE_VALUE { glukitScore = &model.UNDEFINED_SCORE } else { glukitScore = &model.GlukitScore{ Value: score, LowerBound: lowerBound, UpperBound: upperBound, CalculatedOn: time.Now(), ScoringVersion: SCORING_VERSION} } return glukitScore, nil } // An individual score is either 0 if it's straight on perfection (83) or it's the deviation from 83 weighted // by whether it's high (multiplier of 2) or lower (multiplier of 1) func CalculateIndividualReadScoreWeight(context context.Context, read apimodel.GlucoseRead) (weightedScoreContribution float64) { weightedScoreContribution = 0. convertedValue, err := read.GetNormalizedValue(apimodel.MG_PER_DL) if err != nil { util.Propagate(err) } value := float64(convertedValue) if value > model.TARGET_GLUCOSE_VALUE { weightedScoreContribution = (value - model.TARGET_GLUCOSE_VALUE) * HIGH_MULTIPLIER } else if value < model.TARGET_GLUCOSE_VALUE { weightedScoreContribution = -(value - model.TARGET_GLUCOSE_VALUE) * LOW_MULTIPLIER } return weightedScoreContribution } // CalculateUserFacingScore maps an internal GlukitScore to a user facing value (should be between 0 and 100) func CalculateUserFacingScore(internal model.GlukitScore) (external *int64) { if internal.Value == model.UNDEFINED_SCORE_VALUE { return nil } else { internalAsFloat := float64(internal.Value) externalAsFloat := 100 + 1.043e-9*math.Pow(internalAsFloat, 2) + 6.517e-22*math.Pow(internalAsFloat, 4) - 0.0003676*internalAsFloat - 1.434e-15*math.Pow(internalAsFloat, 3) externalAsInt := int64(externalAsFloat) return &externalAsInt } } // StartGlukitScoreBatch tries to calculate glukit scores for any week following the most recent calculated score func StartGlukitScoreBatch(context context.Context, glukitUser *model.GlukitUser) (err error) { lowerBoundOfLastScore := glukitUser.MostRecentScore.LowerBound // Calculate our minimum allowed lower bound since we don't want to incur the cost of too many reads when // a user is coming back from a long absence minLowerBound := time.Now().AddDate(0, 0, -1*MAX_CALCULATION_DAYS_TO_LOOK_BACK) // Set the lower bound to one day after the last lower bound lowerBound := lowerBoundOfLastScore.AddDate(0, 0, -1*GLUKIT_SCORE_PERIOD+1) if lowerBound.Before(minLowerBound) { lowerBound = minLowerBound } // Kick off the first chunk of glukit score calculation task, err := RunGlukitScoreCalculationChunk.Task(glukitUser.Email, lowerBound) if err != nil { log.Criticalf(context, "Couldn't schedule the next execution of [%s] for user [%s]. "+ "This breaks batch calculation of glukit scores for that user!: %v", GLUKIT_SCORE_BATCH_CALCULATION_FUNCTION_NAME, glukitUser.Email, err) } taskqueue.Add(context, task, BATCH_CALCULATION_QUEUE_NAME) log.Infof(context, "Queued up first chunk of glukit score calculation for user [%s] and lowerBound [%s]", glukitUser.Email, lowerBound.Format(util.TIMEFORMAT)) return nil } // StartA1CCalculationBatch tries to calculate a1c estimates for any week following the most recent calculated glukit score (a hack, we should have the most recent // a1c calculation date) func StartA1CCalculationBatch(context context.Context, glukitUser *model.GlukitUser) (err error) { lowerBoundOfLastA1C := glukitUser.MostRecentA1C.LowerBound // Uninitialized, default to January 1st, 2014 if lowerBoundOfLastA1C.Before(A1C_CALCULATION_START) { lowerBoundOfLastA1C = A1C_CALCULATION_START } // Calculate our minimum allowed lower bound since we don't want to incur the cost of too many reads when // a user is coming back from a long absence minLowerBound := time.Now().AddDate(0, 0, -1*MAX_CALCULATION_DAYS_TO_LOOK_BACK) // Set the lower bound to one day after the last lower bound lowerBound := lowerBoundOfLastA1C.AddDate(0, 0, -1*A1C_ESTIMATION_SCORE_PERIOD+1) if lowerBound.Before(minLowerBound) { lowerBound = minLowerBound } // Kick off the first chunk of glukit score calculation task, err := RunA1CCalculationChunk.Task(glukitUser.Email, lowerBound) if err != nil { log.Criticalf(context, "Couldn't schedule the next execution of [%s] for user [%s]. "+ "This breaks batch calculation of a1c estimates scores for that user!: %v", A1C_BATCH_CALCULATION_FUNCTION_NAME, glukitUser.Email, err) } taskqueue.Add(context, task, BATCH_CALCULATION_QUEUE_NAME) log.Infof(context, "Queued up first chunk of a1c calculation for user [%s] and lowerBound [%s]", glukitUser.Email, lowerBound.Format(util.TIMEFORMAT)) return nil }
app/engine/engine.go
0.602997
0.512937
engine.go
starcoder
package imagex type IPixelChanImage interface { Max() (maxX, maxY int) NumberChanel() int } //------------------------------- type PixelChanImage struct { C []uint8 Width, Height int NumChan int } func (p *PixelChanImage) Max() (maxX, maxY int) { return p.Width, p.Height } func (p *PixelChanImage) NumberChanel() int { return p.NumChan } func (p *PixelChanImage) ChanValueAt(x, y int, chanNum int) uint8 { index := p.getIndex(x, y) return p.C[index+chanNum] } func (p *PixelChanImage) ChanValuesAt(x, y int) []uint8 { index := p.getIndex(x, y) return p.C[index : index+p.NumChan : index+p.NumChan] } func (p *PixelChanImage) PixelAt(x, y int) uint32 { values := p.ChanValuesAt(x, y) var rs uint32 = 0 for i := 0; i < p.NumChan; i-- { rs = rs | (uint32(values[i]) << (uint32(p.NumChan - 1 - i)) * 8) } return rs } func (p *PixelChanImage) SetChanValue(x, y int, chanNum int, value uint8) { index := p.getIndex(x, y) p.C[index+chanNum] = value } func (p *PixelChanImage) SetChanValues(x, y int, value ...uint8) { index := p.getIndex(x, y) for i := 0; i < p.NumChan && i < len(value); i++ { p.C[index+1] = value[i] } } func (p *PixelChanImage) getIndex(x, y int) int { return 4 * (y*p.Width + x) } //---------------------------------------- type Pixel64ChanImage struct { C []uint16 Width, Height int NumChan int } func (p *Pixel64ChanImage) Max() (maxX, maxY int) { return p.Width, p.Height } func (p *Pixel64ChanImage) NumberChanel() int { return p.NumChan } func (p *Pixel64ChanImage) ChanValueAt(x, y int, chanNum int) uint16 { index := p.getIndex(x, y) return p.C[index+chanNum] } func (p *Pixel64ChanImage) ChanValuesAt(x, y int) []uint16 { index := p.getIndex(x, y) return p.C[index : index+p.NumChan : index+p.NumChan] } func (p *Pixel64ChanImage) PixelAt(x, y int) uint32 { values := p.ChanValuesAt(x, y) var rs uint32 = 0 for i := 0; i < p.NumChan; i-- { rs = rs | (uint32(values[i]) << (uint32(p.NumChan - 1 - i)) * 8) } return rs } func (p *Pixel64ChanImage) SetChanValue(x, y int, chanNum int, value uint16) { index := p.getIndex(x, y) p.C[index+chanNum] = value } func (p *Pixel64ChanImage) SetChanValues(x, y int, value ...uint16) { index := p.getIndex(x, y) for i := 0; i < p.NumChan && i < len(value); i++ { p.C[index+1] = value[i] } } func (p *Pixel64ChanImage) getIndex(x, y int) int { return 4 * (y*p.Width + x) } //--------------------------------------------- type GrayChanImage struct { PixelChanImage } func (p *GrayChanImage) SetGray(x, y int, gray uint8) { p.SetChanValue(x, y, 0, gray) } func (p *GrayChanImage) GrayAt(x, y int) uint8 { return p.ChanValueAt(x, y, 0) } //--------------------------------------------- type Gray16ChanImage struct { Pixel64ChanImage } func (p *Gray16ChanImage) SetGray16(x, y int, gray uint16) { p.SetChanValue(x, y, 0, gray) } func (p *Gray16ChanImage) Gray16At(x, y int) uint16 { return p.ChanValueAt(x, y, 0) } //--------------------------------------------- type RGBChanImage struct { PixelChanImage } func (p *RGBChanImage) SetRGB(x, y int, R, G, B uint8) { p.SetChanValues(x, y, R, G, B) } func (p *RGBChanImage) SetPixel(x, y int, pixel uint32) { R := uint8((pixel & 0xff0000) >> 16) G := uint8((pixel & 0x00ff00) >> 8) B := uint8(pixel & 0x0000ff) p.SetChanValues(x, y, R, G, B) } func (p *RGBChanImage) RGBAt(x, y int) (R, G, B uint8) { values := p.ChanValuesAt(x, y) return values[0], values[1], values[2] } func (p *RGBChanImage) PixelAt(x, y int) (pixel uint32) { values := p.ChanValuesAt(x, y) return (uint32(values[0]) << 16) | (uint32(values[1]) << 8) | uint32(values[2]) } //--------------------------------------------- type RGB64ChanImage struct { Pixel64ChanImage } func (p *RGB64ChanImage) SetRGB64(x, y int, R, G, B uint16) { p.SetChanValues(x, y, R, G, B) } func (p *RGB64ChanImage) SetPixel64(x, y int, pixel uint64) { R := uint16((pixel & 0xffff00000000) >> 32) G := uint16((pixel & 0x0000ffff0000) >> 16) B := uint16(pixel & 0x00000000ffff) p.SetChanValues(x, y, R, G, B) } func (p *RGB64ChanImage) RGB64At(x, y int) (R, G, B uint16) { values := p.ChanValuesAt(x, y) return values[0], values[1], values[2] } func (p *RGB64ChanImage) Pixel64At(x, y int) (pixel uint64) { values := p.ChanValuesAt(x, y) return (uint64(values[0]) << 32) | (uint64(values[1]) << 16) | uint64(values[2]) } //--------------------------------------------- type ARGBChanImage struct { PixelChanImage } func (p *ARGBChanImage) SetARGB(x, y int, A, R, G, B uint8) { p.SetChanValues(x, y, A, R, G, B) } func (p *ARGBChanImage) SetPixel(x, y int, pixel uint32) { A := uint8((pixel & 0xff000000) >> 24) R := uint8((pixel & 0x00ff0000) >> 16) G := uint8((pixel & 0x0000ff00) >> 8) B := uint8(pixel & 0x000000ff) p.SetChanValues(x, y, A, R, G, B) } func (p *ARGBChanImage) ARGBAt(x, y int) (A, R, G, B uint8) { values := p.ChanValuesAt(x, y) return values[0], values[1], values[2], values[3] } func (p *ARGBChanImage) PixelAt(x, y int) (pixel uint32) { values := p.ChanValuesAt(x, y) return (uint32(values[0]) << 24) | (uint32(values[1]) << 16) | (uint32(values[2]) << 8) | uint32(values[3]) } //--------------------------------------------- type ARGB64ChanImage struct { Pixel64ChanImage } func (p *ARGB64ChanImage) SetARGB64(x, y int, A, R, G, B uint16) { p.SetChanValues(x, y, A, R, G, B) } func (p *ARGB64ChanImage) SetPixel64(x, y int, pixel uint64) { A := uint16((pixel & 0xffff000000000000) >> 48) R := uint16((pixel & 0x0000ffff00000000) >> 32) G := uint16((pixel & 0x00000000ffff0000) >> 16) B := uint16(pixel & 0x000000000000ffff) p.SetChanValues(x, y, A, R, G, B) } func (p *ARGB64ChanImage) ARGB64At(x, y int) (A, R, G, B uint16) { values := p.ChanValuesAt(x, y) return values[0], values[1], values[2], values[3] } func (p *ARGB64ChanImage) Pixel64At(x, y int) (pixel uint64) { values := p.ChanValuesAt(x, y) return (uint64(values[0]) << 48) | (uint64(values[1]) << 32) | (uint64(values[2]) << 16) | uint64(values[3]) }
imagex/pixel_chan.go
0.776157
0.575767
pixel_chan.go
starcoder
package proto import ( "github.com/pkg/errors" "github.com/umbracle/fastrlp" "github.com/wavesplatform/gowaves/pkg/crypto" "github.com/wavesplatform/gowaves/pkg/util/common" ) const ( // EthereumHashSize is the expected length of the hash in bytes EthereumHashSize = 32 ) // EthereumHash represents the 32 byte Keccak256 hash of arbitrary data. type EthereumHash [EthereumHashSize]byte // Keccak256EthereumHash calculates and returns the Keccak256 hash of the input data, // converting it to an EthereumHash data structure. func Keccak256EthereumHash(data []byte) EthereumHash { return EthereumHash(crypto.MustKeccak256(data)) } // BytesToEthereumHash sets b to hash. // If b is larger than len(h), b will be cropped from the left. func BytesToEthereumHash(b []byte) EthereumHash { var h EthereumHash h.SetBytes(b) return h } // Bytes converts the fixed-length byte array of the EthereumHash to a slice of bytes. func (h EthereumHash) Bytes() []byte { return h[:] } func (h EthereumHash) MarshalJSON() ([]byte, error) { return common.ToHexJSON(h[:]), nil } func (h *EthereumHash) UnmarshalJSON(bytes []byte) error { hashBytes, err := common.FromHexJSON(bytes, EthereumHashSize, "EthereumHash") if err != nil { return errors.Wrap(err, "failed to unmarshal EthereumHash from JSON") } *h = BytesToEthereumHash(hashBytes) return err } // Bytes converts the fixed-length byte array of the EthereumHash to a slice of bytes. // If *EthereumAddress == nil copy returns nil. func (h *EthereumHash) tryToBytes() []byte { if h == nil { return nil } return h.Bytes() } // String implements the stringer interface and is used also by the logger when // doing full logging into a file. func (h EthereumHash) String() string { return h.Hex() } // Hex converts a hash to a hex string. func (h EthereumHash) Hex() string { return EncodeToHexString(h[:]) } // SetBytes sets the hash to the value of b. // If b is larger than len(h), b will be cropped from the left. func (h *EthereumHash) SetBytes(b []byte) { if len(b) > len(h) { b = b[len(b)-EthereumHashSize:] } copy(h[EthereumHashSize-len(b):], b) } func (h *EthereumHash) unmarshalFromFastRLP(val *fastrlp.Value) error { if err := val.GetHash(h[:]); err != nil { return errors.Wrap(err, "failed to unmarshal EthereumHash from fastRLP value") } return nil } func (h *EthereumHash) marshalToFastRLP(arena *fastrlp.Arena) *fastrlp.Value { return arena.NewBytes(h.tryToBytes()) } func unmarshalHashesFromFastRLP(value *fastrlp.Value) ([]EthereumHash, error) { elems, err := value.GetElems() if err != nil { return nil, errors.Wrap(err, "failed to get elements array") } hashes := make([]EthereumHash, 0, len(elems)) for _, elem := range elems { var h EthereumHash if err := h.unmarshalFromFastRLP(elem); err != nil { return nil, errors.Wrap(err, "failed to unmarshal EthereumHash from fastRLP value") } hashes = append(hashes, h) } return hashes, nil } func marshalHashesToFastRLP(arena *fastrlp.Arena, hashes []EthereumHash) *fastrlp.Value { array := arena.NewArray() for _, h := range hashes { val := h.marshalToFastRLP(arena) array.Set(val) } return array }
pkg/proto/eth_hash.go
0.727104
0.414129
eth_hash.go
starcoder
package timecode import ( "errors" "regexp" "strconv" "time" ) // TimecodeRegex is the pattern for a valid SMPTE timecode var TimecodeRegex = regexp.MustCompile(`^(\d\d)(:|;)(\d\d)(:|;)(\d\d)(:|;)(\d\d)$`) // Parse parses a timecode from a string, and treats it using the provided frame rate value func Parse(timecode string, rate Rate) (*Timecode, error) { // Match it against the regular expression match := TimecodeRegex.FindStringSubmatch(timecode) if match == nil { return nil, errors.New("invalid timecode format") } // Get the components hours, _ := strconv.ParseInt(match[1], 10, 64) minutes, _ := strconv.ParseInt(match[3], 10, 64) seconds, _ := strconv.ParseInt(match[5], 10, 64) frames, _ := strconv.ParseInt(match[7], 10, 64) // Combine the components return FromComponents(Components{ hours, minutes, seconds, frames, }, rate) } func FromComponents(components Components, rate Rate) (*Timecode, error) { // If the rate is drop frame, we need to check that the provided frame // isn't a dropped frame, which needs to be rounded to the nearest // valid frame timecode if rate.DropFrame { // If it's a dropped frame if (components.Minutes%10 > 0) && (components.Seconds == 0) && (components.Frames == 0 || components.Frames == 1) { // Move to the next valid frame in sequence components.Frames = 2 } } // Count up the total number of frames totalSeconds := (((components.Hours * 60) + components.Minutes) * 60) + components.Seconds totalFrames := totalSeconds*rate.Num + components.Frames // Count the number of dropped frames if rate.DropFrame { totalFrames -= CountDroppedFrames(components.Minutes) } // Return the timecode with the total frames return &Timecode{ frame: totalFrames, rate: rate, }, nil } func FromFrame(frame int64, rate Rate) *Timecode { return &Timecode{ frame, rate, } } func FromPresentationTime(presentationTime time.Duration, rate Rate) *Timecode { return &Timecode{ frame: int64(presentationTime / rate.PlaybackFrameDuration()), rate: rate, } }
parse.go
0.800029
0.427337
parse.go
starcoder
package terminal import ( "github.com/cimomo/portfolio-go/pkg/portfolio" "github.com/gdamore/tcell" "github.com/rivo/tview" ) // PortfolioViewer displays real-time portfolio data type PortfolioViewer struct { portfolio *portfolio.Portfolio table *tview.Table } // NewPortfolioViewer returns a new viewer for the real-time portfolio data func NewPortfolioViewer(portfolio *portfolio.Portfolio) *PortfolioViewer { return &PortfolioViewer{ portfolio: portfolio, table: tview.NewTable().SetBorders(false), } } // Reload updates the portfolio data object func (viewer *PortfolioViewer) Reload(portfolio *portfolio.Portfolio) { viewer.portfolio = portfolio } // Draw fetches the latest portfolio data and refreshes the viewer func (viewer *PortfolioViewer) Draw() { viewer.table.Clear() viewer.drawHeader() viewer.drawPortfolio() } func (viewer *PortfolioViewer) drawHeader() { var cell *tview.TableCell header := []string{ "SYMBOL", "CLASS", "QUANTITY", "PRICE", "WATCH", "1-DAY CHANGE$", "1-DAY CHANGE%", "VALUE", "1-Day VALUE CHANGE$", "UNREALIZED$", "UNREALIZED%", "ALLOCATION", "TARGET", } for c := 0; c < len(header); c++ { cell = tview.NewTableCell(header[c]).SetTextColor(tcell.ColorYellow).SetBackgroundColor(tcell.ColorDarkSlateGray).SetAttributes(tcell.AttrBold) if c < 2 { cell.SetAlign(tview.AlignLeft) } else { cell.SetAlign((tview.AlignRight)) } viewer.table.SetCell(0, c, cell) } } func (viewer *PortfolioViewer) drawPortfolio() { port := viewer.portfolio holdings := port.Holdings r := 1 for _, symbol := range port.Symbols { holding := holdings[symbol] setString(viewer.table, symbol, r, 0, tcell.ColorWhite, tview.AlignLeft) setString(viewer.table, string(holding.Asset.Subclass), r, 1, tcell.ColorWhite, tview.AlignLeft) setQuantity(viewer.table, holding.Quantity, r, 2, tview.AlignCenter) setDollarAmountAgainstWatch(viewer.table, holding.Quote.RegularMarketPrice, holding.Watch, r, 3) setNonZeroDollarAmount(viewer.table, holding.Watch, r, 4, tcell.ColorWhite) setDollarChange(viewer.table, holding.Quote.RegularMarketChange, r, 5) setPercentChange(viewer.table, holding.Quote.RegularMarketChangePercent, r, 6) setDollarAmount(viewer.table, holding.Status.Value, r, 7, tcell.ColorWhite) setDollarChange(viewer.table, holding.Quote.RegularMarketChange*holding.Quantity, r, 8) setDollarChange(viewer.table, holding.Status.Unrealized, r, 9) setPercentChange(viewer.table, holding.Status.UnrealizedPercent, r, 10) setPercent(viewer.table, port.Status.Allocation[symbol], r, 11, tcell.ColorWhite) setPercent(viewer.table, port.TargetAllocation[symbol], r, 12, tcell.ColorWhite) r++ } setString(viewer.table, "TOTAL", r, 0, tcell.ColorYellow, tview.AlignLeft) setPercentChange(viewer.table, port.Status.RegularMarketChangePercent, r, 6) setDollarAmount(viewer.table, port.Status.Value, r, 7, tcell.ColorYellow) setDollarChange(viewer.table, port.Status.RegularMarketChange, r, 8) setDollarChange(viewer.table, port.Status.Unrealized, r, 9) setPercentChange(viewer.table, port.Status.UnrealizedPercent, r, 10) setPercent(viewer.table, 100.0, r, 11, tcell.ColorYellow) setPercent(viewer.table, 100.0, r, 12, tcell.ColorYellow) }
pkg/terminal/portfolio.go
0.595728
0.47591
portfolio.go
starcoder
package mafsa import ( "encoding/binary" "sort" ) // Encoder is a type which can encode a BuildTree into a byte slice // which can be written to a file. type Encoder struct { queue []*BuildTreeNode counter int } // Encode serializes a BuildTree t into a byte slice. func (e *Encoder) Encode(t *BuildTree) ([]byte, error) { e.queue = []*BuildTreeNode{} e.counter = len(t.Root.Edges) + 1 // First "word" (fixed-length entry) is a null entry // that specifies the file format: // First byte indicates the flag scheme (basically a file format verison number) // Second byte is word length in bytes (at least 4) // Third byte is char length in bytes // Fourth byte is pointer length in bytes // Note: Word length (the first byte) // must be exactly Second byte + 1 (flags) + Fourth byte // Any leftover bytes in this first word are zero data := []byte{0x01, 0x06, 0x01, 0x04} for i := len(data); i < int(data[1]); i++ { data = append(data, 0x00) } data = e.encodeEdges(t.Root, data) for len(e.queue) > 0 { // Pop first item off the queue top := e.queue[0] e.queue = e.queue[1:] // Recursively marshal child nodes data = e.encodeEdges(top, data) } return data, nil } // encodeEdges encodes the edges going out of node into bytes which are appended // to data. The modified byte slice is returned. func (e *Encoder) encodeEdges(node *BuildTreeNode, data []byte) []byte { // We want deterministic output for testing purposes, // so we need to order the keys of the edges map. edgeKeys := sortEdgeKeys(node) for i := 0; i < len(edgeKeys); i++ { child := node.Edges[edgeKeys[i]] word := []byte(string(edgeKeys[i])) var flags byte if child.final { flags |= 0x01 // end of word } if i == len(edgeKeys)-1 { flags |= 0x02 // end of node (last child outgoing from this node) } word = append(word, flags) // If bytePos is 0, we haven't encoded this edge yet if child.bytePos == 0 { if len(child.Edges) > 0 { child.bytePos = e.counter e.counter += len(child.Edges) } e.queue = append(e.queue, child) } pointer := child.bytePos pointerBytes := make([]byte, int(data[3])) switch len(pointerBytes) { case 2: binary.BigEndian.PutUint16(pointerBytes, uint16(pointer)) case 4: binary.BigEndian.PutUint32(pointerBytes, uint32(pointer)) case 8: binary.BigEndian.PutUint64(pointerBytes, uint64(pointer)) } word = append(word, pointerBytes...) data = append(data, word...) } return data } // sortEdgeKeys returns a sorted list of the keys // of the map containing outgoing edges. func sortEdgeKeys(node *BuildTreeNode) []rune { edgeKeys := make(runeSlice, 0, len(node.Edges)) for char := range node.Edges { edgeKeys = append(edgeKeys, char) } sort.Sort(edgeKeys) return []rune(edgeKeys) } type runeSlice []rune func (s runeSlice) Len() int { return len(s) } func (s runeSlice) Less(i, j int) bool { return s[i] < s[j] } func (s runeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
encoder.go
0.750461
0.548129
encoder.go
starcoder
package utils import ( "errors" "gopkg.in/yaml.v2" "reflect" "strconv" ) // GetAsBool parses a string to a bool or returns the bool if bool is passed in func GetAsBool(value interface{}, defaultValue bool) (result bool, err error) { result = defaultValue switch value.(type) { case string: fromString, e := strconv.ParseBool(value.(string)) if e != nil { err = errors.New("Failed to convert value to a bool. Falling back to default") result = defaultValue } else { result = fromString } case bool: result = value.(bool) } return } // GetAsFloat parses a string to a float or returns the float if float is passed in func GetAsFloat(value interface{}, defaultValue float64) (result float64, err error) { result = defaultValue switch value.(type) { case string: fromString, e := strconv.ParseFloat(value.(string), 64) if e != nil { err = errors.New("Failed to convert value to a bool. Falling back to default") result = defaultValue } else { result = fromString } case float64: result = value.(float64) } return } // GetAsInt parses a string/float to an int or returns the int if int is passed in func GetAsInt(value interface{}, defaultValue int) (result int, err error) { result = defaultValue switch value.(type) { case string: fromString, e := strconv.ParseInt(value.(string), 10, 64) if e == nil { result = int(fromString) } else { err = errors.New("Failed to convert value to an int") } case int: result = value.(int) case int32: result = int(value.(int32)) case int64: result = int(value.(int64)) case float64: result = int(value.(float64)) } return } // GetAsString parses a int/float to a string or returns the string if string is passed in func GetAsString(value interface{}) (result string) { result = "" switch value.(type) { case string: result = value.(string) case int: result = strconv.Itoa(value.(int)) case float64: result = strconv.FormatFloat(value.(float64), 'G', -1, 64) } return } // GetAsMap parses a string to a map[string]string func GetAsMap(value interface{}) (result map[string]string, err error) { result = make(map[string]string) switch value.(type) { case string: e := yaml.Unmarshal([]byte(value.(string)), &result) if e != nil { err = errors.New("Failed to convert value to a map") } case map[string]interface{}: temp := value.(map[string]interface{}) for k, v := range temp { result[k] = GetAsString(v) } case map[string]string: result = value.(map[string]string) default: err = errors.New("Expected a string but got" + reflect.TypeOf(value).Name()) } return } // GetAsSlice : Parses a yaml array string to []string func GetAsSlice(value interface{}) (result []string, err error) { result = []string{} switch realValue := value.(type) { case string: e := yaml.Unmarshal([]byte(realValue), &result) if e != nil { err = errors.New("Failed to convert string:" + realValue + "to a []string") } case []string: result = realValue case []interface{}: result = make([]string, len(realValue)) for i, value := range realValue { result[i] = GetAsString(value) } default: err = errors.New("Expected a string array but got" + reflect.TypeOf(realValue).Name() + ". Returning empty slice!") } return }
utils/utils.go
0.668231
0.435481
utils.go
starcoder
package imagext import ( "image" "image/color" "image/gif" "image/jpeg" "image/png" "os" "path/filepath" ) // Gray converts values r (red), g (green) and b (blue) // to a value of gray and returns it. func Gray(r, g, b uint8) uint8 { return uint8((uint(r)*1742 + uint(g)*5859 + uint(b)*591) >> 13) } // NewGray converts img to a new image of grayscale and returns it. // Result differes depending on image format. func NewGray(img image.Image) *image.Gray { var gray *image.Gray if img != nil { bounds := img.Bounds() xMin := bounds.Min.X xMax := bounds.Max.X yMin := bounds.Min.Y yMax := bounds.Max.Y width := xMax - xMin height := yMax - yMin if width*height > 0 { gray = image.NewGray(image.Rect(0, 0, width, height)) switch imgStruct := img.(type) { case *image.RGBA: toGrayImageRGBA(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.RGBA64: toGrayImageRGBA64(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.Alpha: toGrayImageAlpha(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.Alpha16: toGrayImageAlpha16(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.CMYK: toGrayImageCMYK(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.Gray: toGrayImageGray(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.Gray16: toGrayImageGray16(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.NRGBA: toGrayImageNRGBA(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.NRGBA64: toGrayImageNRGBA64(imgStruct.Pix, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) case *image.Paletted: toGrayImagePaletted(imgStruct.Pix, imgStruct.Palette, imgStruct.Stride, xMin, xMax, yMin, yMax, gray) default: toGrayImageGeneric(img, xMin, xMax, yMin, yMax, gray) } } else { gray = image.NewGray(image.Rect(0, 0, 0, 0)) } } else { gray = image.NewGray(image.Rect(0, 0, 0, 0)) } return gray } // LoadImage reads image from file and returns it. func LoadImage(path string) image.Image { var img image.Image if len(path) > 0 { file, err := os.Open(path) if err == nil { defer file.Close() ext := filepath.Ext(path) if ext == ".jpg" || ext == ".jpeg" { img, _ = jpeg.Decode(file) } else if ext == ".png" || ext == ".apng" { img, _ = png.Decode(file) } else if ext == ".gif" { img, _ = gif.Decode(file) } else { img, _, _ = image.Decode(file) } } } return img } // SaveImage saves image to file. Format is recognized from // extension in path. Default is PNG. func SaveImage(path string, img image.Image) error { if len(path) > 0 { file, err := os.Create(path) if err == nil { defer file.Close() ext := filepath.Ext(path) if ext == ".jpg" || ext == ".jpeg" { opt := jpeg.Options{100} err = jpeg.Encode(file, img, &opt) } else if ext == ".gif" { err = gif.Encode(file, img, nil) } else { err = png.Encode(file, img) } } return err } return nil } // ToMonochrome convertes image to black and white. // Higher threshold means darker image. func ToMonochrome(img *image.Gray, threshold uint8) { if (img.Rect.Max.X-img.Rect.Min.X)*(img.Rect.Max.Y-img.Rect.Min.Y) > 0 { for i, gray := range img.Pix { if gray < threshold { img.Pix[i] = 0 } else { img.Pix[i] = 255 } } } } // ToMedian sets pixel values to median in // area size*size. Median value of {9, 5, 17} is 9. func ToMedian(img *image.Gray, size uint) { if (img.Rect.Max.X-img.Rect.Min.X)*(img.Rect.Max.Y-img.Rect.Min.Y) > 0 && size > 1 { lenImg := img.Rect.Max.X - img.Rect.Min.X hist := make([]uint8, 256, 256) histZero := make([]uint8, 256, 256) offLines := int(size) / 2 lines := newLines(img, int(size)) limit := img.Rect.Max.Y - offLines idxLastLine := len(lines) - 1 for y := img.Rect.Min.Y; y < limit; y++ { offImg := (y - img.Rect.Min.Y) * img.Stride offImgNew := offImg + offLines*img.Stride copy(lines[idxLastLine][offLines:], img.Pix[offImgNew:offImgNew+lenImg:offImgNew+lenImg]) shiftLines(lines, lines[idxLastLine]) for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ { fillHistogram(hist, histZero, lines, int(size), x) img.Pix[offImg+x-img.Rect.Min.X] = median(hist) } } whiteLine := lines[idxLastLine] setArrayValues(whiteLine, 255) for y, x := limit, img.Rect.Min.X; x < img.Rect.Max.X && y < img.Rect.Max.Y; x, y = x+1, y+1 { offImg := (y - img.Rect.Min.Y) * img.Stride shiftLines(lines, whiteLine) fillHistogram(hist, histZero, lines, int(size), x) img.Pix[offImg+x-img.Rect.Min.X] = median(hist) } } } // ToAvarage sets pixel values to avarage in // area size*size. Avarage of {9, 5, 16} is 10. func ToAvarage(img *image.Gray, size uint) { if (img.Rect.Max.X-img.Rect.Min.X)*(img.Rect.Max.Y-img.Rect.Min.Y) > 0 && size > 1 { lenImg := img.Rect.Max.X - img.Rect.Min.X hist := make([]uint8, 256, 256) histZero := make([]uint8, 256, 256) offLines := int(size) / 2 lines := newLines(img, int(size)) limit := img.Rect.Max.Y - offLines idxLastLine := len(lines) - 1 for y := img.Rect.Min.Y; y < limit; y++ { offImg := (y - img.Rect.Min.Y) * img.Stride offImgNew := offImg + offLines*img.Stride copy(lines[idxLastLine][offLines:], img.Pix[offImgNew:offImgNew+lenImg:offImgNew+lenImg]) shiftLines(lines, lines[idxLastLine]) for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ { fillHistogram(hist, histZero, lines, int(size), x) img.Pix[offImg+x-img.Rect.Min.X] = avarage(hist, int(size)) } } whiteLine := lines[idxLastLine] setArrayValues(whiteLine, 255) for y := limit; y < img.Rect.Max.Y; y++ { offImg := (y - img.Rect.Min.Y) * img.Stride shiftLines(lines, whiteLine) for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ { fillHistogram(hist, histZero, lines, int(size), x) img.Pix[offImg+x-img.Rect.Min.X] = avarage(hist, int(size)) } } } } func setArrayValues(array []uint8, value uint8) { for i := range array { array[i] = value } } func newLines(img *image.Gray, size int) [][]uint8 { lines := make([][]uint8, size, size) lenImg := img.Rect.Max.X - img.Rect.Min.X lenLine := lenImg + size - 1 offLines := size / 2 for i := range lines { lines[i] = make([]uint8, lenLine, lenLine) if i == 0 { setArrayValues(lines[0], 255) } else { copy(lines[i], lines[0]) } } for i, y := offLines-1, img.Rect.Min.Y; i >= 0 && y < img.Rect.Max.Y; i, y = i-1, y+1 { offImg := (y - img.Rect.Min.Y) * img.Stride copy(lines[i][offLines:], img.Pix[offImg:offImg+lenImg:offImg+lenImg]) } return lines } func shiftLines(lines [][]uint8, line0 []uint8) { copy(lines[1:], lines[0:len(lines)-1]) lines[0] = line0 } func fillHistogram(hist, histZero []uint8, lines [][]uint8, size, x int) { copy(hist, histZero) for _, line := range lines { for j := 0; j < size; j++ { idx := line[x+j] hist[idx]++ } } } func sum(values []uint8) uint { var sum uint for _, v := range values { sum += uint(v) } return sum } func median(hist []uint8) uint8 { var leftSumPrev uint var rightSumPrev uint left := 0 right := len(hist) for left < right { middle := (left + right) / 2 if left != middle { leftSum := sum(hist[left:middle:middle]) rightSum := sum(hist[middle:right:right]) if leftSumPrev+leftSum > rightSum+rightSumPrev { right = middle rightSumPrev += rightSum } else if leftSumPrev+leftSum < rightSum+rightSumPrev { left = middle leftSumPrev += leftSum } else { left = middle } } else { return uint8(left) } } return 0 } func avarage(hist []uint8, size int) uint8 { var sum uint for i, v := range hist { sum += (uint(i) * uint(v)) } sum /= uint(size * size) return uint8(sum) } func cmykToGray(c, m, y, k uint) uint8 { kDiff := 255 - k r := ((k * c >> 8) + kDiff) - c g := ((k * m >> 8) + kDiff) - m b := ((k * y >> 8) + kDiff) - y // 0.299 * R + 0.587 * G + 0.114 * B return uint8((uint(r)*2449 + uint(g)*4809 + uint(b)*934) >> 13) } func toGrayImageRGBA(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*4 r := pix[offset] g := pix[offset+1] b := pix[offset+2] gray.Pix[i] = Gray(r, g, b) i++ } } } func toGrayImageRGBA64(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*8 r := pix[offset+1] g := pix[offset+3] b := pix[offset+5] gray.Pix[i] = Gray(r, g, b) i++ } } } func toGrayImageAlpha(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*1 a := pix[offset] gray.Pix[i] = 255 - a i++ } } } func toGrayImageAlpha16(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*2 a := pix[offset+1] gray.Pix[i] = 255 - a i++ } } } func toGrayImageCMYK(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*4 c := pix[offset] m := pix[offset+1] z := pix[offset+2] k := pix[offset+3] gray.Pix[i] = cmykToGray(uint(c), uint(m), uint(z), uint(k)) i++ } } } func toGrayImageGray(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { length := xMax - xMin i := 0 for y := yMin; y < yMax; y++ { offset := (y-yMin)*stride + xMin copy(gray.Pix[i:i+length:i+length], pix[offset:offset+length:offset+length]) i += length } } func toGrayImageGray16(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*2 gray.Pix[i] = pix[offset+1] i++ } } } func toGrayImageNRGBA(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*4 r := pix[offset] g := pix[offset+1] b := pix[offset+2] gray.Pix[i] = Gray(r, g, b) i++ } } } func toGrayImageNRGBA64(pix []uint8, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*8 r := pix[offset+1] g := pix[offset+3] b := pix[offset+5] gray.Pix[i] = Gray(r, g, b) i++ } } } func toGrayImagePaletted(pix []uint8, palette []color.Color, stride, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { offset := (y-yMin)*stride + (x-xMin)*1 grayColor := color.GrayModel.Convert(palette[offset]) r, _, _, _ := grayColor.RGBA() gray.Pix[i] = uint8(r >> 8) i++ } } } func toGrayImageGeneric(img image.Image, xMin, xMax, yMin, yMax int, gray *image.Gray) { i := 0 for y := yMin; y < yMax; y++ { for x := xMin; x < xMax; x++ { grayColor := color.GrayModel.Convert(img.At(x, y)) r, _, _, _ := grayColor.RGBA() gray.Pix[i] = uint8(r >> 8) i++ } } }
imagext.go
0.652906
0.430566
imagext.go
starcoder
package entity import ( "math/rand" "github.com/relnod/evo/pkg/math64" "github.com/relnod/evo/pkg/math64/collision" ) // InitPopulation initializes a population with a given count and a world size. func InitPopulation(count, width, height int) []*Creature { creatures := make([]*Creature, count) for i := range creatures { radius := rand.Float64()*rand.Float64()*rand.Float64()*10 + 2.0 creatures[i] = NewCreature(randomPosition(creatures, width, height, radius), radius) } return creatures } // randomPosition returns a new random position in the world that is free. // A position is free, if it won't collide with any other creature. func randomPosition(creatures []*Creature, width, height int, radius float64) math64.Vec2 { pos := math64.Vec2{ X: rand.Float64()*(float64(width)-(2*radius)) + radius, Y: rand.Float64()*(float64(height)-(2*radius)) + radius, } for _, creature := range creatures { if creature == nil { continue } if collision.CircleCircle(&creature.Pos, creature.Radius, &pos, radius) { return randomPosition(creatures, width, height, radius) } } return pos } // PopulationUpdater implements the evo.EntityUpdater. type PopulationUpdater struct { animalStats *DeathStats plantStats *DeathStats collectStats bool } // NewPopulationUpdater returns a new population updater. func NewPopulationUpdater() *PopulationUpdater { return &PopulationUpdater{ animalStats: &DeathStats{}, plantStats: &DeathStats{}, collectStats: true, } } // UpdatePopulation updates all entities. // Also adds new child entities and removes dead ones. func (p *PopulationUpdater) UpdatePopulation(creatures []*Creature) []*Creature { var remove []int for i, c := range creatures { c.Update() if !c.Alive { if p.collectStats { if c.Brain == nil { p.plantStats.Add(c) } else { p.animalStats.Add(c) } } remove = append(remove, i) continue } if c.State == StateBreading { c.State = StateAdult c.LastBread = c.Age c.Energy -= c.Radius for i := 0; i < rand.Intn(int(1/(c.Radius*c.Radius*c.Radius*c.Radius)*100)+1)+1; i++ { child := c.NewChild() if c.Energy-child.Energy > 0 { c.Energy -= child.Energy creatures = append(creatures, child) } } } } for _, i := range remove { creatures = RemoveEntity(creatures, i) } return creatures } // AnimalStats returns the death stats for animals. func (p *PopulationUpdater) AnimalStats() *DeathStats { return p.animalStats } // PlantStats returns the death stats for plants. func (p *PopulationUpdater) PlantStats() *DeathStats { return p.plantStats } // ClearStats resets the internal stats counters. func (p *PopulationUpdater) ClearStats() { p.plantStats.Clear() p.animalStats.Clear() } // RemoveEntity removes an entity at a given index. func RemoveEntity(creatures []*Creature, i int) []*Creature { if i+1 >= len(creatures) { return creatures[:i] } return append(creatures[:i], creatures[i+1:]...) }
pkg/entity/population.go
0.737725
0.455199
population.go
starcoder
package astutil import ( "fmt" "github.com/mewkiz/pkg/errutil" "github.com/mewmew/uc/ast" ) // Walk traverses the given parse tree, calling f(n) for each node n in the // tree, in a bottom-up traversal. func Walk(node ast.Node, f func(ast.Node) error) error { nop := func(n ast.Node) error { return nil } return WalkBeforeAfter(node, nop, f) } // WalkBeforeAfter traverses the given parse tree, calling before(n) before // traversing the node's children, and after(n) afterwards, in a bottom-up // traversal. func WalkBeforeAfter(node ast.Node, before, after func(ast.Node) error) error { switch n := node.(type) { // Source file. case *ast.File: if n != nil { return walkFile(n, before, after) } // Declarations. case *ast.FuncDecl: if n != nil { return walkFuncDecl(n, before, after) } case *ast.VarDecl: if n != nil { return walkVarDecl(n, before, after) } case *ast.TypeDef: if n != nil { return walkTypeDef(n, before, after) } // Statements. case *ast.BlockStmt: if n != nil { return walkBlockStmt(n, before, after) } case *ast.EmptyStmt: if n != nil { return walkEmptyStmt(n, before, after) } case *ast.ExprStmt: if n != nil { return walkExprStmt(n, before, after) } case *ast.IfStmt: if n != nil { return walkIfStmt(n, before, after) } case *ast.ReturnStmt: if n != nil { return walkReturnStmt(n, before, after) } case *ast.WhileStmt: if n != nil { return walkWhileStmt(n, before, after) } // Expressions. case *ast.BasicLit: if n != nil { return walkBasicLit(n, before, after) } case *ast.BinaryExpr: if n != nil { return walkBinaryExpr(n, before, after) } case *ast.CallExpr: if n != nil { return walkCallExpr(n, before, after) } case *ast.Ident: if n != nil { return walkIdent(n, before, after) } case *ast.IndexExpr: if n != nil { return walkIndexExpr(n, before, after) } case *ast.ParenExpr: if n != nil { return walkParenExpr(n, before, after) } case *ast.UnaryExpr: if n != nil { return walkUnaryExpr(n, before, after) } // Types. case *ast.ArrayType: if n != nil { return walkArrayType(n, before, after) } case *ast.FuncType: if n != nil { return walkFuncType(n, before, after) } case nil: // Nothing to do. return nil default: panic(fmt.Sprintf("support for walking node of type %T not yet implemented", node)) } return nil } // === [ Source file ] === // walkFile walks the parse tree of the given source file in depth first order. func walkFile(file *ast.File, before, after func(ast.Node) error) error { if err := before(file); err != nil { return errutil.Err(err) } for _, decl := range file.Decls { if err := WalkBeforeAfter(decl, before, after); err != nil { return errutil.Err(err) } } if err := after(file); err != nil { return errutil.Err(err) } return nil } // === [ Top-level declarations ] === // walkFuncDecl walks the parse tree of the given function declaration in depth // first order. func walkFuncDecl(decl *ast.FuncDecl, before, after func(ast.Node) error) error { if err := before(decl); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(decl.FuncName, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(decl.FuncType, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(decl.Body, before, after); err != nil { return errutil.Err(err) } if err := after(decl); err != nil { return errutil.Err(err) } return nil } // walkVarDecl walks the parse tree of the given variable declaration in depth // first order. func walkVarDecl(decl *ast.VarDecl, before, after func(ast.Node) error) error { if err := before(decl); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(decl.VarType, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(decl.VarName, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(decl.Val, before, after); err != nil { return errutil.Err(err) } if err := after(decl); err != nil { return errutil.Err(err) } return nil } // walkTypeDef walks the parse tree of the given type declaration in depth first // order. func walkTypeDef(def *ast.TypeDef, before, after func(ast.Node) error) error { if err := before(def); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(def.DeclType, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(def.TypeName, before, after); err != nil { return errutil.Err(err) } if err := after(def); err != nil { return errutil.Err(err) } return nil } // === [ Statements ] === // walkBlockStmt walks the parse tree of the given block statement in depth // first order. func walkBlockStmt(block *ast.BlockStmt, before, after func(ast.Node) error) error { if err := before(block); err != nil { return errutil.Err(err) } for _, item := range block.Items { if err := WalkBeforeAfter(item, before, after); err != nil { return errutil.Err(err) } } if err := after(block); err != nil { return errutil.Err(err) } return nil } // walkEmptyStmt walks the parse tree of the given empty statement in depth // first order. func walkEmptyStmt(stmt *ast.EmptyStmt, before, after func(ast.Node) error) error { if err := before(stmt); err != nil { return errutil.Err(err) } if err := after(stmt); err != nil { return errutil.Err(err) } return nil } // walkExprStmt walks the parse tree of the given expression statement in depth // first order. func walkExprStmt(stmt *ast.ExprStmt, before, after func(ast.Node) error) error { if err := before(stmt); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(stmt.X, before, after); err != nil { return errutil.Err(err) } if err := after(stmt); err != nil { return errutil.Err(err) } return nil } // walkIfStmt walks the parse tree of the given if statement in depth first // order. func walkIfStmt(stmt *ast.IfStmt, before, after func(ast.Node) error) error { if err := before(stmt); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(stmt.Cond, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(stmt.Body, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(stmt.Else, before, after); err != nil { return errutil.Err(err) } if err := after(stmt); err != nil { return errutil.Err(err) } return nil } // walkReturnStmt walks the parse tree of the given return statement in depth // first order. func walkReturnStmt(stmt *ast.ReturnStmt, before, after func(ast.Node) error) error { if err := before(stmt); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(stmt.Result, before, after); err != nil { return errutil.Err(err) } if err := after(stmt); err != nil { return errutil.Err(err) } return nil } // walkWhileStmt walks the parse tree of the given while statement in depth // first order. func walkWhileStmt(stmt *ast.WhileStmt, before, after func(ast.Node) error) error { if err := before(stmt); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(stmt.Cond, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(stmt.Body, before, after); err != nil { return errutil.Err(err) } if err := after(stmt); err != nil { return errutil.Err(err) } return nil } // === [ Expressions ] === // walkBasicLit walks the parse tree of the given basic literal expression in // depth first order. func walkBasicLit(lit *ast.BasicLit, before, after func(ast.Node) error) error { if err := before(lit); err != nil { return errutil.Err(err) } if err := after(lit); err != nil { return errutil.Err(err) } return nil } // walkBinaryExpr walks the parse tree of the given binary expression in depth // first order. func walkBinaryExpr(expr *ast.BinaryExpr, before, after func(ast.Node) error) error { if err := before(expr); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(expr.X, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(expr.Y, before, after); err != nil { return errutil.Err(err) } if err := after(expr); err != nil { return errutil.Err(err) } return nil } // walkCallExpr walks the parse tree of the given call expression in depth first // order. func walkCallExpr(call *ast.CallExpr, before, after func(ast.Node) error) error { if err := before(call); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(call.Name, before, after); err != nil { return errutil.Err(err) } for _, arg := range call.Args { if err := WalkBeforeAfter(arg, before, after); err != nil { return errutil.Err(err) } } if err := after(call); err != nil { return errutil.Err(err) } return nil } // walkIdent walks the parse tree of the given identifier expression in depth // first order. func walkIdent(ident *ast.Ident, before, after func(ast.Node) error) error { if err := before(ident); err != nil { return errutil.Err(err) } if err := after(ident); err != nil { return errutil.Err(err) } return nil } // walkIndexExpr walks the parse tree of the given index expression in depth // first order. func walkIndexExpr(expr *ast.IndexExpr, before, after func(ast.Node) error) error { if err := before(expr); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(expr.Name, before, after); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(expr.Index, before, after); err != nil { return errutil.Err(err) } if err := after(expr); err != nil { return errutil.Err(err) } return nil } // walkParenExpr walks the parse tree of the given parenthesized expression in // depth first order. func walkParenExpr(expr *ast.ParenExpr, before, after func(ast.Node) error) error { if err := before(expr); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(expr.X, before, after); err != nil { return errutil.Err(err) } if err := after(expr); err != nil { return errutil.Err(err) } return nil } // walkUnaryExpr walks the parse tree of the given unary expression in depth // first order. func walkUnaryExpr(expr *ast.UnaryExpr, before, after func(ast.Node) error) error { if err := before(expr); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(expr.X, before, after); err != nil { return errutil.Err(err) } if err := after(expr); err != nil { return errutil.Err(err) } return nil } // === [ Types ] === // walkArrayType walks the parse tree of the given array type in depth first // order. func walkArrayType(arr *ast.ArrayType, before, after func(ast.Node) error) error { if err := before(arr); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(arr.Elem, before, after); err != nil { return errutil.Err(err) } if err := after(arr); err != nil { return errutil.Err(err) } return nil } // walkFuncType walks the parse tree of the given function signature in depth // first order. func walkFuncType(fn *ast.FuncType, before, after func(ast.Node) error) error { if err := before(fn); err != nil { return errutil.Err(err) } if err := WalkBeforeAfter(fn.Result, before, after); err != nil { return errutil.Err(err) } for _, param := range fn.Params { if err := WalkBeforeAfter(param, before, after); err != nil { return errutil.Err(err) } } if err := after(fn); err != nil { return errutil.Err(err) } return nil }
ast/astutil/walk.go
0.668988
0.423041
walk.go
starcoder
package responses type ScoringBreakdown2016 struct { Red ScoringBreakdownAlliance2016 `mapstructure:"red"` Blue ScoringBreakdownAlliance2016 `mapstructure:"blue"` } type ScoringBreakdownAlliance2016 struct { AutoPoints int `mapstructure:"autoPoints"` TeleopPoints int `mapstructure:"teleopPoints"` BreachPoints int `mapstructure:"breachPoints"` FoulPoints int `mapstructure:"foulPoints"` CapturePoints int `mapstructure:"capturePoints"` AdjustPoints int `mapstructure:"adjustPoints"` TotalPoints int `mapstructure:"totalPoints"` Robot1Auto string `mapstructure:"robot1Auto"` Robot2Auto string `mapstructure:"robot2Auto"` Robot3Auto string `mapstructure:"robot3Auto"` AutoReachPoints int `mapstructure:"autoReachPoints"` AutoCrossingPoints int `mapstructure:"autoCrossingPoints"` AutoBouldersLow int `mapstructure:"autoBouldersLow"` AutoBouldersHigh int `mapstructure:"autoBouldersHigh"` AutoBouldersPoints int `mapstructure:"autoBouldersPoints"` TeleopCrossingPoints int `mapstructure:"teleopCrossingPoints"` TeleopBouldersLow int `mapstructure:"teleopBouldersLow"` TeleopBouldersHigh int `mapstructure:"teleopBouldersHigh"` TeleopBouldersPoints int `mapstructure:"teleopBouldersPoints"` TeleopDefensesBreached bool `mapstructure:"teleopDefensesBreached"` TeleopChallengePoints int `mapstructure:"teleopChallengePoints"` TeleopScalePoints int `mapstructure:"teleopScalePoints"` TeleopTowerCaptured int `mapstructure:"teleopTowerCaptured"` TowerFaceA string `mapstructure:"towerFaceA"` TowerFaceB string `mapstructure:"towerFaceB"` TowerFaceC string `mapstructure:"towerFaceC"` TowerEndStrength int `mapstructure:"towerEndStrength"` TechnicalFoulCount int `mapstructure:"techFoulCount"` FoulCount int `mapstructure:"foulCount"` Position2 string `mapstructure:"position2"` Position3 string `mapstructure:"position3"` Position4 string `mapstructure:"position4"` Position5 string `mapstructure:"position5"` Position1Crossings int `mapstructure:"position1crossings"` Position2Crossings int `mapstructure:"position2crossings"` Position3Crossings int `mapstructure:"position3crossings"` Position4Crossings int `mapstructure:"position4crossings"` Position5Crossings int `mapstructure:"position5crossings"` } type ScoringBreakdown2017 struct { Red ScoringBreakdownAlliance2017 `mapstructure:"red"` Blue ScoringBreakdownAlliance2017 `mapstructure:"blue"` } type ScoringBreakdownAlliance2017 struct { AutoPoints int `mapstructure:"autoPoints"` TeleopPoints int `mapstructure:"teleopPoints"` FoulPoints int `mapstructure:"foulPoints"` AdjustPoints int `mapstructure:"adjustPoints"` TotalPoints int `mapstructure:"totalPoints"` Robot1Auto string `mapstructure:"robot1Auto"` Robot2Auto string `mapstructure:"robot2Auto"` Robot3Auto string `mapstructure:"robot3Auto"` Rotor1Auto bool `mapstructure:"rotor1Auto"` Rotor2Auto bool `mapstructure:"rotor2Auto"` AutoFuelLow int `mapstructure:"autoFuelLow"` AutoFuelHigh int `mapstructure:"autoFuelHigh"` AutoMobilityPoints int `mapstructure:"autoMobilityPoints"` AutoRotorPoints int `mapstructure:"autoRotorPoints"` AutoFuelPoints int `mapstructure:"autoFuelPoints"` TeleopFuelPoints int `mapstructure:"teleopFuelPoints"` TeleopFuelLow int `mapstructure:"teleopFuelLow"` TeleopFuelHigh int `mapstructure:"teleopFuelHigh"` TeleopRotorPoints int `mapstructure:"teleopRotorPoints"` KpaRankingPointAchieved bool `mapstructure:"kPaRankingPointAchieved"` TeleopTakeoffPoints int `mapstructure:"teleopTakeoffPoints"` KpaBonusPoints int `mapstructure:"kPaBonusPoints"` RotorBonusPoints int `mapstructure:"rotorBonusPoints"` Rotor1Engaged bool `mapstructure:"rotor1Engaged"` Rotor2Engaged bool `mapstructure:"rotor2Engaged"` Rotor3Engaged bool `mapstructure:"rotor3Engaged"` Rotor4Engaged bool `mapstructure:"rotor4Engaged"` RotorRankingPointAchieved bool `mapstructure:"rotorRankingPointAchieved"` TechnicalFoulCount int `mapstructure:"techFoulCount"` FoulCount int `mapstructure:"foulCount"` TouchpadNear string `mapstructure:"touchpadNear"` TouchpadMiddle string `mapstructure:"touchpadMiddle"` TouchpadFar string `mapstructure:"touchpadFar"` } type ScoringBreakdown2018 struct { Red ScoringBreakdownAlliance2018 `mapstructure:"red"` Blue ScoringBreakdownAlliance2018 `mapstructure:"blue"` } type ScoringBreakdownAlliance2018 struct { AdjustPoints int `mapstructure:"adjustPoints"` AutoOwnershipPoints int `mapstructure:"autoOwnershipPoints"` AutoPoints int `mapstructure:"autoPoints"` AutoQuestRankingPoint bool `mapstructure:"autoQuestRankingPoint"` AutoRobot1 string `mapstructure:"autoRobot1"` AutoRobot2 string `mapstructure:"autoRobot2"` AutoRobot3 string `mapstructure:"autoRobot3"` AutoRunPoints int `mapstructure:"autoRunPoints"` AutoScaleOwnershipSeconds float64 `mapstructure:"autoScaleOwnershipSec"` AutoSwitchAtZero bool `mapstructure:"autoSwitchAtZero"` AutoSwitchOwnershipSeconds float64 `mapstructure:"autoSwitchOwnershipSec"` EndgamePoints int `mapstructure:"endgamePoints"` EndgameRobot1 string `mapstructure:"endgameRobot1"` EndgameRobot2 string `mapstructure:"endgameRobot2"` EndgameRobot3 string `mapstructure:"endgameRobot3"` FaceTheBossRankingPoint bool `mapstructure:"faceTheBossRankingPoint"` FoulCount int `mapstructure:"foulCount"` FoulPoints int `mapstructure:"foulPoints"` RankingPoints int `mapstructure:"rp"` TechnicalFoulCount int `mapstructure:"techFoulCount"` TeleopOwnershipPoints int `mapstructure:"teleopOwnershipPoints"` TeleopPoints int `mapstructure:"teleopPoints"` TeleopScaleBoostSeconds float64 `mapstructure:"teleopScaleBoostSec"` TeleopScaleForceSeconds float64 `mapstructure:"teleopScaleForceSec"` TeleopScaleOwnershipSeconds float64 `mapstructure:"teleopOwnershipSec"` TeleopSwitchBoostSeconds float64 `mapstructure:"teleopSwitchBoostSec"` TeleopSwitchForceSeconds float64 `mapstructure:"teleopSwitchForceSec"` TeleopSwitchOwnershipSeconds float64 `mapstructure:"teleopSwitchOwnershipSec"` TotalPoints int `mapstructure:"totalPoints"` VaultBoostPlayed int `mapstructure:"vaultBoostPlayed"` VaultBoostTotal int `mapstructure:"vaultBoostTotal"` VaultForcePlayed int `mapstructure:"vaultForcePlayed"` VaultForceTotal int `mapstructure:"vaultForceTotal"` VaultLevitatePlayed int `mapstructure:"vaultLevitatePlayed"` VaultLevitateTotal int `mapstructure:"vaultLevitateTotal"` VaultPoints int `mapstructure:"vaultPoints"` TbaGameData string `mapstructure:"tba_gameData"` } type ScoringBreakdown2019 struct { Red ScoringBreakdownAlliance2019 `mapstructure:"red"` Blue ScoringBreakdownAlliance2019 `mapstructure:"blue"` } type ScoringBreakdownAlliance2019 struct { AdjustPoints int `mapstructure:"adjustPoints"` AutoPoints int `mapstructure:"autoPoints"` Bay1 string `mapstructure:"bay1"` Bay2 string `mapstructure:"bay2"` Bay3 string `mapstructure:"bay3"` Bay4 string `mapstructure:"bay4"` Bay5 string `mapstructure:"bay5"` Bay6 string `mapstructure:"bay6"` Bay7 string `mapstructure:"bay7"` Bay8 string `mapstructure:"bay8"` CargoPoints int `mapstructure:"cargoPoints"` CompleteRocketRankingPoint bool `mapstructure:"completeRocketRankingPoint"` CompletedRocketFar bool `mapstructure:"completedRocketFar"` CompletedRocketNear bool `mapstructure:"completedRocketNear"` EndgameRobot1 string `mapstructure:"endgameRobot1"` EndgameRobot2 string `mapstructure:"endgameRobot2"` EndgameRobot3 string `mapstructure:"endgameRobot3"` FoulCount int `mapstructure:"foulCount"` FoulPoints int `mapstructure:"foulPoints"` HabClimbPoints int `mapstructure:"habClimbPoints"` HabDockingRankingPoint bool `mapstructure:"habDockingRankingPoint"` HabLineRobot1 string `mapstructure:"habLineRobot1"` HabLineRobot2 string `mapstructure:"habLineRobot2"` HabLineRobot3 string `mapstructure:"habLineRobot3"` HatchPanelPoints int `mapstructure:"hatchPanelPoints"` LowLeftRocketFar string `mapstructure:"lowLeftRocketFar"` LowLeftRocketNear string `mapstructure:"lowLeftRocketNear"` LowRightRocketFar string `mapstructure:"lowRightRocketFar"` LowRightRocketNear string `mapstructure:"lowRightRocketNear"` MidLeftRocketFar string `mapstructure:"midLeftRocketFar"` MidLeftRocketNear string `mapstructure:"midLeftRocketNear"` MidRightRockerFar string `mapstructure:"midRightRocketFar"` MidRightRocketNear string `mapstructure:"midRightRocketNear"` PreMatchBay1 string `mapstructure:"preMatchBay1"` PreMatchBay2 string `mapstructure:"preMatchBay2"` PreMatchBay3 string `mapstructure:"preMatchBay3"` PreMatchBay6 string `mapstructure:"preMatchBay6"` PreMatchBay7 string `mapstructure:"preMatchBay7"` PreMatchBay8 string `mapstructure:"preMatchBay8"` PreMatchRobot1 string `mapstructure:"preMatchLevelRobot1"` PreMatchRobot2 string `mapstructure:"preMatchLevelRobot2"` PreMatchRobot3 string `mapstructure:"preMatchLevelRobot3"` RankingPoints int `mapstructure:"rp"` SandStormBonusPoints int `mapstructure:"sandStormBonusPoints"` TechnicalFoulCount int `mapstructure:"techFoulCount"` TeleopPoints int `mapstructure:"teleopPoints"` TopLeftRocketFar string `mapstructure:"topLeftRocketFar"` TopLeftRocketNear string `mapstructure:"topLeftRocketNear"` TopRightRocketFar string `mapstructure:"topRightRocketFar"` TopRightRocketNear string `mapstructure:"topRightRocketNear"` TotalPoints int `mapstructure:"totalPoints"` } type ScoringBreakdown2020 struct { Red ScoringBreakdown2020Alliance `mapstructure:"red"` Blue ScoringBreakdown2020Alliance `mapstructure:"blue"` } type ScoringBreakdown2020Alliance struct { InitLineRobot1 string `mapstructure:"initLineRobot1"` EndgameRobot1 string `mapstructure:"endgameRobot1"` InitLineRobot2 string `mapstructure:"initLineRobot2"` EndgameRobot2 string `mapstructure:"endgameRobot2"` InitLineRobot3 string `mapstructure:"initLineRobot3"` EndgameRobot3 string `mapstructure:"endgameRobot3"` AutoCellsBottom int `mapstructure:"autoCellsBottom"` AutoCellsOuter int `mapstructure:"autoCellsOuter"` AutoCellsInner int `mapstructure:"autoCellsInner"` TeleopCellsBottom int `mapstructure:"teleopCellsBottom"` TeleopCellsOuter int `mapstructure:"teleopCellsOuter"` TeleopCellsInner int `mapstructure:"teleopCellsInner"` Stage1Activated bool `mapstructure:"stage1Activated"` Stage2Activated bool `mapstructure:"stage2Activated"` Stage3Activated bool `mapstructure:"stage3Activated"` Stage3TargetColor string `mapstructure:"stage3TargetColor"` EndgameRungIsLevel string `mapstructure:"endgameRungIsLevel"` AutoInitLinePoints int `mapstructure:"autoInitLinePoints"` AutoCellPoints int `mapstructure:"autoCellPoints"` AutoPoints int `mapstructure:"autoPoints"` TeleopCellPoints int `mapstructure:"teleopCellPoints"` ControlPanelPoints int `mapstructure:"controlPanelPoints"` EndgamePoints int `mapstructure:"endgamePoints"` TeleopPoints int `mapstructure:"teleopPoints"` ShieldOperationalRankingPoint bool `mapstructure:"shieldOperationalRankingPoint"` ShieldEnergizedRankingPoint bool `mapstructure:"shieldEnergizedRankingPoint"` FoulCount int `mapstructure:"foulCount"` TechFoulCount int `mapstructure:"techFoulCount"` AdjustPoints int `mapstructure:"adjustPoints"` FoulPoints int `mapstructure:"foulPoints"` RankingPoints int `mapstructure:"rp"` TotalPoints int `mapstructure:"totlaPoints"` }
responses/scoring.go
0.657978
0.430896
scoring.go
starcoder
package orbplot import ( "image/color" "math" "github.com/emilyselwood/orbcalc/orbcore" "github.com/emilyselwood/orbcalc/orbdata" "gonum.org/v1/plot" "gonum.org/v1/plot/plotter" "gonum.org/v1/plot/vg" "gonum.org/v1/plot/vg/draw" ) // PlotSolarSystemLines plots the major planets of the solar system on the provided plot func PlotSolarSystemLines(p *plot.Plot, legend bool) error { if err := PlotFullOrbitLines(p, orbdata.SolarSystem, RainbowList(len(orbdata.SolarSystem)), legend); err != nil { return err } return PlotSun(p) } // PlotInnerSolarSystemLines plots the major planets of the inner solar system on the provided plot func PlotInnerSolarSystemLines(p *plot.Plot, legend bool) error { if err := PlotFullOrbitLines(p, orbdata.InnerSolarSystem, RainbowList(len(orbdata.InnerSolarSystem)), legend); err != nil { return err } return PlotSun(p) } // PlotOuterSolarSystemLines plots the major planets of the outer solar system on the provided plot func PlotOuterSolarSystemLines(p *plot.Plot, legend bool) error { if err := PlotFullOrbitLines(p, orbdata.OuterSolarSystem, RainbowList(len(orbdata.OuterSolarSystem)), legend); err != nil { return err } return PlotSun(p) } // PlotFullOrbitLines plots the full orbits of the provided orbits on the plot func PlotFullOrbitLines(p *plot.Plot, orbits []orbcore.Orbit, colors []color.RGBA, legend bool) error { for i, orb := range orbits { if err := PlotFullOrbitLine(p, orb, colors[i], legend); err != nil { return err } } return nil } // PlotFullOrbitLine takes a plot and orbit and draws a line for its full orbit in the provided color func PlotFullOrbitLine(p *plot.Plot, orb orbcore.Orbit, c color.RGBA, legend bool) error { result := propogate(&orb) l, err := plotter.NewLine(PositionToPointsXY(result)) if err != nil { return err } l.LineStyle.Width = vg.Points(1) l.LineStyle.Color = c p.Add(l) if legend { p.Legend.Add(orb.ID, l) } return nil } // PlotPoints renders a point for each orbit object paired with the color list func PlotPoints(p *plot.Plot, orb []*orbcore.Orbit, c []color.RGBA, legend bool) error { result := make([]*orbcore.Position, len(orb)) for i, d := range orb { result[i] = orbcore.OrbitToPosition(d) } scatter, err := plotter.NewScatter(PositionToPointsXY(result)) if err != nil { return err } scatter.GlyphStyleFunc = func(i int) draw.GlyphStyle { var result draw.GlyphStyle result.Color = c[i] return result } p.Add(scatter) return nil } // PositionToPointsXY Converts many Position objects to something that can be plotted. func PositionToPointsXY(rows []*orbcore.Position) plotter.XYs { pts := make(plotter.XYs, len(rows)) for i := range pts { pts[i].X = rows[i].X pts[i].Y = rows[i].Y } return pts } // PlotSun adds a yellow dot at the origin. func PlotSun(p *plot.Plot) error { pts := make(plotter.XYs, 1) pts[0].X = 0 pts[0].Y = 0 points, err := plotter.NewScatter(pts) if err != nil { return err } points.Color = color.RGBA{ R: 255, G: 255, B: 0, A: 255, } points.Radius = 2 points.Shape = draw.CircleGlyph{} p.Add(points) return nil } func propogate(orb *orbcore.Orbit) []*orbcore.Position { steps := orbcore.MeanMotionFullOrbit(orb, 366) result := make([]*orbcore.Position, len(steps)) for i, d := range steps { result[i] = orbcore.OrbitToPosition(d) } return result } // RainbowList returns a list of colours func RainbowList(numOfSteps int) []color.RGBA { result := make([]color.RGBA, numOfSteps) for i := 0; i < numOfSteps; i++ { result[i] = Rainbow(numOfSteps, i) } return result } // Rainbow generates a colour range that roughly matches the rainbow colour spectrum. func Rainbow(numOfSteps, step int) color.RGBA { var r, g, b float64 h := float64(step) / float64(numOfSteps) i := math.Floor(h * 6) f := h*6 - i q := 1 - f os := math.Remainder(i, 6) switch os { case 0: r = 255 g = f * 255 b = 0 case 1: r = q * 255 g = 255 b = 0 case 2: r = 0 g = 255 b = f * 255 case 3: r = 0 g = q * 255 b = 255 case 4: r = f * 255 g = 0 b = 255 case 5: r = 255 g = 0 b = q * 255 } return color.RGBA{ R: uint8(r), G: uint8(g), B: uint8(b), A: 255, } }
orbplot/plotting.go
0.854536
0.494263
plotting.go
starcoder
package intsets import ( "sort" ) // Naive is a naive intsets representation type Naive struct { vals []int } // Copy sets s to the value of x func (s *Naive) Copy(x *Naive) { n := len(x.vals) s.ensure(n) s.vals = s.vals[:n] copy(s.vals, x.vals) } // AppendTo appends the entries to dst and returns the response func (s *Naive) AppendTo(dst []int) []int { return append(dst, s.vals...) } // IsEmpty reports whether the set s is empty. func (s *Naive) IsEmpty() bool { return s.Len() == 0 } // Len returns the number of elements in the set s. func (s *Naive) Len() int { return len(s.vals) } // Clear clears the set func (s *Naive) Clear() { s.vals = s.vals[:0] } // Insert adds x to the set s func (s *Naive) Insert(x int) bool { if pos := sort.SearchInts(s.vals, x); pos < len(s.vals) { if s.vals[pos] == x { return false } s.vals = append(s.vals, 0) copy(s.vals[pos+1:], s.vals[pos:]) s.vals[pos] = x } else { s.vals = append(s.vals, x) } return true } // UnionWith sets s to the union s ∪ x, and reports whether s grew. func (s *Naive) UnionWith(x *Naive) bool { if s == x { return false } on := len(s.vals) s.vals = append(s.vals, x.vals...) sn := len(s.vals) if on == sn { return false } sort.Ints(s.vals) p := 0 for i := 1; i < sn; i++ { if s.vals[p] >= s.vals[i] { continue } p++ if p < i { s.vals[p], s.vals[i] = s.vals[i], s.vals[p] } } s.vals = s.vals[:p+1] return p+1 > on } // IntersectionWith sets s to the intersection s ∩ x. func (s *Naive) IntersectionWith(x *Naive) { if s == x { return } p, i, j := 0, 0, 0 sn, xn := len(s.vals), len(x.vals) for i < sn && j < xn { switch { case s.vals[i] < x.vals[j]: i++ case s.vals[i] > x.vals[j]: j++ case p < i: s.vals[p], s.vals[i] = s.vals[i], s.vals[p] fallthrough default: p, i, j = p+1, i+1, j+1 } } s.vals = s.vals[:p] } // DifferenceWith sets s to the difference s ∖ x. func (s *Naive) DifferenceWith(x *Naive) { if s == x { s.Clear() return } p, i, j := 0, 0, 0 sn, xn := len(s.vals), len(x.vals) for i < sn && j < xn { switch { case s.vals[i] < x.vals[j]: if p < i { s.vals[p], s.vals[i] = s.vals[i], s.vals[p] } p, i = p+1, i+1 case s.vals[i] > x.vals[j]: j++ default: i, j = i+1, j+1 } } for p < sn && i < sn { s.vals[p], s.vals[i] = s.vals[i], s.vals[p] p, i = p+1, i+1 } s.vals = s.vals[:p] } func (s *Naive) ensure(n int) { cp := cap(s.vals) if delta := n - cp; delta > 0 { s.vals = append(s.vals[:cp], make([]int, delta)...) } if len(s.vals) < n { s.vals = s.vals[:n] } }
internal/intsets/naive.go
0.68784
0.530966
naive.go
starcoder
package dprotocol import ( "encoding/binary" "fmt" "time" ) // Packet represents a D protocol packet. type Packet struct { // Header is the packet header. Header Header // EventID is the event ID. EventID EventID // EventInformation contains additional, event-dependent information. EventInformation uint8 // GPSFlags contains the GPS flags. GPSFlags GPSFlags // Time is the time the snapshot was recorded. // Does not indicate snapshot sending time. // Snapshot recorded time starts running from zero if the device doesn't have // GPS fix at startup and it is updated to correct time when GPS fix is received. Time time.Time // GPSTime is the time when GPS position data was recorded.. GPSTime time.Time // LatitudeMicroDegrees is the GPS latitude in millionths of a degree. // Southbound is negative, northbound is positive. LatitudeMicroDegrees uint32 // LongitudeMicroDegrees is the GPS longitude in millionths of a degree. // Westbound is negative, eastbound is positive. LongitudeMicroDegrees uint32 // NumSatellites is the number of visible satellites. NumSatellites uint8 // SpeedKph is the current speed in km/h. // Note: Wrap-around in speeds over 255km/h. SpeedKph uint8 // MaximumSpeedKph is the maximum detected speed since last event in km/h. MaximumSpeedKph uint8 // HeadingHalfDegrees is the vehicle heading in degrees / 2. Multiply value by 2 to get degrees. // For example, 260 degrees is sent as a value of 130. // 0 or 360 degrees equals heading to north. HeadingHalfDegrees uint8 // DigitalInputs contains the digital input status. DigitalInputs DigitalInputs // AnalogInput1VoltageMilliVolts is the voltage of analog input AD1 in millivolts. AnalogInput1VoltageMilliVolts uint16 // AnalogInput2VoltageMilliVolts is the voltage of analog input AD2 in millivolts. AnalogInput2VoltageMilliVolts uint16 // AnalogInput3VoltageMilliVolts is the voltage of analog input AD3 in millivolts. AnalogInput3VoltageMilliVolts uint16 // AnalogInput4VoltageMilliVolts is the voltage of analog input AD4 in millivolts. AnalogInput4VoltageMilliVolts uint16 // MainPowerVoltageMilliVolts is the voltage of main power in millivolts. MainPowerVoltageMilliVolts uint16 // ExternalBatteryVoltageMilliVolts is the voltage of external battery in millivolts. ExternalBatteryVoltageMilliVolts uint16 // PulseCounter1Rate is the latest pulse rate of counter channel 1. // Unit depends on configuration. Default is pulses per second (PPS). PulseCounter1Rate uint16 // PulseCounter1 is the counter for pulse counter channel 1. // Unit depends on configuration. Default is number of pulses. PulseCounter1 uint32 // PulseCounter2Rate is the latest pulse rate of counter channel 2. // Unit depends on configuration. Default is pulses per second (PPS). PulseCounter2Rate uint16 // PulseCounter2 is the counter for pulse counter channel 2. // Unit depends on configuration. Default is number of pulses. PulseCounter2 uint32 // Trip1DistanceMeters is the distance travelled in meters since trip meter was reset. Trip1DistanceMetres uint32 // Trip2DistanceMeters is the distance travelled in meters since trip meter was reset. Trip2DistanceMetres uint32 // OutputStatus contains the status of outputs. OutputStatus OutputStatus // IButtonKeyID is the iButton key ID without family code or checksum. // iButton key ID is all zeroes if driver was not logged in at the time the snapshot was recorded. IButtonKeyID uint64 // DriverLogKeypad is the Driver Log Keypad (DLKP) / 3PAD button pressed state. DriverLogKeypad DriverLogKeypad // GPSAltituteMeters is the GPS altitude in meters. Antenna height above/below mean sea level. GPSAltitudeMetres int16 // SnapshotCounter is the transport-based snapshot counter. // Counter is incremented for every snapshot at message formatting time, and it is destination specific. // Note that formatting time is not send time. SnapshotCounter uint16 // StateFlags contains the state flags. StateFlags StateFlags // UserDefinedFlags contains the user-definable flag bits. UserDefinedFlags uint32 // CellInfoFlags contains the cell info. CellInfoFlags CellInfoFlags // LocationAreaIdentity contains the Location Area Identity (LAI). LocationAreaIdentity uint32 // LocationAreaCode contains the Location Area Code (LAC). LocationAreaCode uint16 // GSMCellID is the GSM cell ID. GSMCellID uint32 // RSSI is the Received Signal Strength Indicator (RSSI). RSSI int8 // EventSpecificBytes contains the event-specific bytes. EventSpecificBytes []byte } const indexOfSnapshot = lengthOfPacketHeader // snapshot field lengths. const ( lengthOfEventID = 1 lengthOfEventInformation = 1 lengthOfGPSFlags = 1 lengthOfTime = 4 lengthOfGPSTime = 4 lengthOfLatitude = 4 lengthOfLongitude = 4 lengthOfNumSatellites = 1 lengthOfSpeed = 1 lengthOfMaximumSpeed = 1 lengthOfHeading = 1 lengthOfDigitalInputs = 1 lengthOfAnalogInput1 = 2 lengthOfAnalogInput2 = 2 lengthOfAnalogInput3 = 2 lengthOfAnalogInput4 = 2 lengthOfMainPower = 2 lengthOfExternalBattery = 2 lengthOfPulseCounter1Rate = 2 lengthOfPulseCounter1 = 4 lengthOfPulseCounter2Rate = 2 lengthOfPulseCounter2 = 4 lengthOfTrip1Distance = 4 lengthOfTrip2Distance = 4 lengthOfOutputStatus = 1 lengthOfIButtonKeyID = 6 lengthOfDriverLogKeypadState = 1 lengthOfGPSAltitude = 2 lengthOfSnapshotCounter = 2 lengthOfStateFlags = 4 lengthOfUserDefinedFlags = 4 lengthOfCellInfoFlags = 1 lengthOfLocationAreaIdentity = 3 lengthOfLocationAreaCode = 2 lengthOfGSMCellID = 4 lengthOfReceivedSignalStrengthIndicator = 1 lengthOfExtendedDigitalInputs = 2 ) // UnmarshalBinary unmarshals the packet from the provided bytes. func (p *Packet) UnmarshalBinary(b []byte) error { if err := p.Header.UnmarshalBinary(b); err != nil { return fmt.Errorf("unmarshal packet: %w", err) } if len(b) < lengthOfPacketHeader+p.Header.FieldSelectors.SnapshotLength() { return fmt.Errorf("unmarshal packet: insufficient data for selectors: %v", p.Header.FieldSelectors) } i := indexOfSnapshot p.EventID = EventID(b[i]) i += lengthOfEventID p.EventInformation = b[i] i += lengthOfEventInformation if p.Header.FieldSelectors.Has(FieldSelectorGPSFlags) { p.GPSFlags = GPSFlags(b[i]) i += lengthOfGPSFlags } if p.Header.FieldSelectors.Has(FieldSelectorTime) { p.Time = time.Unix(int64(binary.BigEndian.Uint32(b[i:])), 0).UTC() i += lengthOfTime } if p.Header.FieldSelectors.Has(FieldSelectorGPS) { p.GPSTime = time.Unix(int64(binary.BigEndian.Uint32(b[i:])), 0).UTC() i += lengthOfGPSTime p.LatitudeMicroDegrees = binary.BigEndian.Uint32(b[i:]) i += lengthOfLatitude p.LongitudeMicroDegrees = binary.BigEndian.Uint32(b[i:]) i += lengthOfLongitude p.NumSatellites = b[i] i += lengthOfNumSatellites } if p.Header.FieldSelectors.Has(FieldSelectorGPSSpeed) { p.SpeedKph = b[i] i += lengthOfSpeed p.MaximumSpeedKph = b[i] i += lengthOfMaximumSpeed p.HeadingHalfDegrees = b[i] i += lengthOfHeading } if p.Header.FieldSelectors.Has(FieldSelectorIO) { p.DigitalInputs = DigitalInputs(b[i]) i += lengthOfDigitalInputs } if p.Header.FieldSelectors.Has(FieldSelectorAnalogInput) { p.AnalogInput1VoltageMilliVolts = binary.BigEndian.Uint16(b[i:]) i += lengthOfAnalogInput1 p.AnalogInput2VoltageMilliVolts = binary.BigEndian.Uint16(b[i:]) i += lengthOfAnalogInput2 p.AnalogInput3VoltageMilliVolts = binary.BigEndian.Uint16(b[i:]) i += lengthOfAnalogInput3 p.AnalogInput4VoltageMilliVolts = binary.BigEndian.Uint16(b[i:]) i += lengthOfAnalogInput4 } if p.Header.FieldSelectors.Has(FieldSelectorPower) { p.MainPowerVoltageMilliVolts = binary.BigEndian.Uint16(b[i:]) i += lengthOfMainPower p.ExternalBatteryVoltageMilliVolts = binary.BigEndian.Uint16(b[i:]) i += lengthOfExternalBattery } if p.Header.FieldSelectors.Has(FieldSelectorPulseCounter1) { p.PulseCounter1Rate = binary.BigEndian.Uint16(b[i:]) i += lengthOfPulseCounter1Rate p.PulseCounter1 = binary.BigEndian.Uint32(b[i:]) i += lengthOfPulseCounter1 } if p.Header.FieldSelectors.Has(FieldSelectorPulseCounter2) { p.PulseCounter2Rate = binary.BigEndian.Uint16(b[i:]) i += lengthOfPulseCounter2Rate p.PulseCounter2 = binary.BigEndian.Uint32(b[i:]) i += lengthOfPulseCounter2 } if p.Header.FieldSelectors.Has(FieldSelectorTrip1) { p.Trip1DistanceMetres = binary.BigEndian.Uint32(b[i:]) i += lengthOfTrip1Distance } if p.Header.FieldSelectors.Has(FieldSelectorTrip2) { p.Trip2DistanceMetres = binary.BigEndian.Uint32(b[i:]) i += lengthOfTrip2Distance } if p.Header.FieldSelectors.Has(FieldSelectorIO) { p.OutputStatus = OutputStatus(b[i]) i += lengthOfOutputStatus } if p.Header.FieldSelectors.Has(FieldSelectorIButton) { p.IButtonKeyID = bigEndianUint48(b[i:]) i += lengthOfIButtonKeyID } if p.Header.FieldSelectors.Has(FieldSelectorDriverLogKeypad) { p.DriverLogKeypad = DriverLogKeypad(b[i]) i += lengthOfDriverLogKeypadState } if p.Header.FieldSelectors.Has(FieldSelectorGPSExtras) { p.GPSAltitudeMetres = int16(binary.BigEndian.Uint16(b[i:])) i += lengthOfGPSAltitude } if p.Header.FieldSelectors.Has(FieldSelectorSnapshotCounter) { p.SnapshotCounter = binary.BigEndian.Uint16(b[i:]) i += lengthOfSnapshotCounter } if p.Header.FieldSelectors.Has(FieldSelectorFlags) { p.StateFlags = StateFlags(binary.BigEndian.Uint32(b[i:])) i += lengthOfStateFlags p.UserDefinedFlags = binary.BigEndian.Uint32(b[i:]) i += lengthOfUserDefinedFlags } if p.Header.FieldSelectors.Has(FieldSelectorCellInfo) { p.CellInfoFlags = CellInfoFlags(b[i]) i += lengthOfCellInfoFlags p.LocationAreaIdentity = bigEndianUint24(b[i:]) i += lengthOfLocationAreaIdentity p.LocationAreaCode = binary.BigEndian.Uint16(b[i:]) i += lengthOfLocationAreaCode p.GSMCellID = binary.BigEndian.Uint32(b[i:]) i += lengthOfGSMCellID p.RSSI = int8(b[i]) i += lengthOfReceivedSignalStrengthIndicator } if p.Header.FieldSelectors.Has(FieldSelectorExtendedDigitalInputs) { p.DigitalInputs = DigitalInputs(binary.BigEndian.Uint16(b[i:])) i += lengthOfExtendedDigitalInputs } if p.Header.FieldSelectors.Has(FieldSelectorEventSpecificBytes) { p.EventSpecificBytes = b[i:] // remainder of packet } return nil }
dprotocol/packet.go
0.646237
0.567997
packet.go
starcoder
package iso20022 // Chain of parties involved in the settlement of a transaction, including receipts and deliveries, book transfers, treasury deals, or other activities, resulting in the movement of a security or amount of money from one account to another. type SettlementParties13 struct { // First party in the settlement chain. In a plain vanilla settlement, it is the Central Securities Depository where the counterparty requests to receive the financial instrument or from where the counterparty delivers the financial instruments. Depository *PartyIdentification46 `xml:"Dpstry,omitempty"` // Party that, in a settlement chain interacts with the depository. Party1 *PartyIdentificationAndAccount44 `xml:"Pty1,omitempty"` // Party that, in a settlement chain interacts with the party 1. Party2 *PartyIdentificationAndAccount44 `xml:"Pty2,omitempty"` // Party that, in a settlement chain interacts with the party 2. Party3 *PartyIdentificationAndAccount44 `xml:"Pty3,omitempty"` // Party that, in a settlement chain interacts with the party 3. Party4 *PartyIdentificationAndAccount44 `xml:"Pty4,omitempty"` // Party that, in a settlement chain interacts with the party 4. Party5 *PartyIdentificationAndAccount44 `xml:"Pty5,omitempty"` } func (s *SettlementParties13) AddDepository() *PartyIdentification46 { s.Depository = new(PartyIdentification46) return s.Depository } func (s *SettlementParties13) AddParty1() *PartyIdentificationAndAccount44 { s.Party1 = new(PartyIdentificationAndAccount44) return s.Party1 } func (s *SettlementParties13) AddParty2() *PartyIdentificationAndAccount44 { s.Party2 = new(PartyIdentificationAndAccount44) return s.Party2 } func (s *SettlementParties13) AddParty3() *PartyIdentificationAndAccount44 { s.Party3 = new(PartyIdentificationAndAccount44) return s.Party3 } func (s *SettlementParties13) AddParty4() *PartyIdentificationAndAccount44 { s.Party4 = new(PartyIdentificationAndAccount44) return s.Party4 } func (s *SettlementParties13) AddParty5() *PartyIdentificationAndAccount44 { s.Party5 = new(PartyIdentificationAndAccount44) return s.Party5 }
SettlementParties13.go
0.683631
0.499573
SettlementParties13.go
starcoder
package types type AutoCompleteValueSyncDefinition struct { // The label of the autocomplete value. Label string `json:"label"` // The value of the autocomplete value. Value string `json:"value"` } type EstimatedUsageDetails struct { // Amount of data scanned in bytes, to run the query. DataScannedInBytes int64 `json:"dataScannedInBytes,omitempty"` } type EstimatedUsageDetailsWithTier struct { // Name of the data tier. Supported Values are Continuous, Frequent, Infrequent Tier string `json:"tier,omitempty"` // Amount of data scanned in bytes, to run the query. DataScannedInBytes int64 `json:"dataScannedInBytes,omitempty"` } type LogSearchEstimatedUsageByTierDefinition struct { // Time zone to get the estimated usage details. Follow the format in the [IANA Time Zone Database](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). Timezone string `json:"timezone"` EstimatedUsageDetails []EstimatedUsageDetailsWithTier `json:"estimatedUsageDetails"` } type LogSearchEstimatedUsageDefinition struct { // Time zone to get the estimated usage details. Follow the format in the [IANA Time Zone Database](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). Timezone string `json:"timezone"` EstimatedUsageDetails *EstimatedUsageDetails `json:"estimatedUsageDetails"` } type LogSearchEstimatedUsageRequest struct { // Define the parsing mode to scan the JSON format log messages. Possible values are: 1. `AutoParse` 2. `Manual` In AutoParse mode, the system automatically figures out fields to parse based on the search query. While in the Manual mode, no fields are parsed out automatically. For more information see [Dynamic Parsing](https://help.sumologic.com/?cid=0011). ParsingMode string `json:"parsingMode,omitempty"` // Time zone to get the estimated usage details. Follow the format in the [IANA Time Zone Database](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). Timezone string `json:"timezone"` } type LogSearchEstimatedUsageRequestV2 struct { // Query to perform. QueryString string `json:"queryString"` TimeRange *ResolvableTimeRange `json:"timeRange"` // This has the value `true` if the search is to be run by receipt time and `false` if it is to be run by message time. RunByReceiptTime bool `json:"runByReceiptTime,omitempty"` // Definition of the query parameters. QueryParameters []QueryParameterSyncDefinition `json:"queryParameters,omitempty"` // Time zone to get the estimated usage details. Follow the format in the [IANA Time Zone Database](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). Timezone string `json:"timezone"` } type ParameterAutoCompleteSyncDefinition struct { // The autocomplete parameter type. Supported values are: 1. `SKIP_AUTOCOMPLETE` 2. `CSV_AUTOCOMPLETE` 3. `AUTOCOMPLETE_KEY` 4. `VALUE_ONLY_AUTOCOMPLETE` 5. `VALUE_ONLY_LOOKUP_AUTOCOMPLETE` 6. `LABEL_VALUE_LOOKUP_AUTOCOMPLETE` AutoCompleteType string `json:"autoCompleteType"` // The autocomplete key to be used to fetch autocomplete values. AutoCompleteKey string `json:"autoCompleteKey,omitempty"` // The array of values of the corresponding autocomplete parameter. AutoCompleteValues []AutoCompleteValueSyncDefinition `json:"autoCompleteValues,omitempty"` // The lookup file to use as a source for autocomplete values. LookupFileName string `json:"lookupFileName,omitempty"` // The column from the lookup file to use for autocomplete labels. LookupLabelColumn string `json:"lookupLabelColumn,omitempty"` // The column from the lookup file to fill the actual value when a particular label is selected. LookupValueColumn string `json:"lookupValueColumn,omitempty"` } type QueryParameterSyncDefinition struct { // The name of the parameter. Name string `json:"name"` // The label of the parameter. Label string `json:"label"` // A description of the parameter. Description string `json:"description"` // The data type of the parameter. Supported values are: 1. `NUMBER` 2. `STRING` 3. `QUERY_FRAGMENT` 4. `SEARCH_KEYWORD` DataType string `json:"dataType"` // A value for the parameter. Should be compatible with the type set in dataType field. Value string `json:"value"` AutoComplete *ParameterAutoCompleteSyncDefinition `json:"autoComplete"` }
service/cip/types/log_searches_estimated_usage_types.go
0.711631
0.574305
log_searches_estimated_usage_types.go
starcoder