code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package g2d import ( "github.com/angelsolaorbaiceta/inkgeom/nums" ) // A Segment is a straignt line defined between two points. type Segment struct { start, end *Point } // MakeSegment creates a new segment defined between the given start and end points. func MakeSegment(start, end *Point) *Segment { return &Segment{start, end} } // MakeSegmentFromCoords creates a new segment from the projections of the start and end points. func MakeSegmentFromCoords(startX, startY, endX, endY float64) *Segment { return MakeSegment( MakePoint(startX, startY), MakePoint(endX, endY), ) } func (s *Segment) Start() *Point { return s.start } func (s *Segment) End() *Point { return s.end } // Length computes the total length of the segment. func (s *Segment) Length() float64 { return s.start.DistanceTo(s.end) } /* <-- Methods--> */ // LengthBetween computes the length of a portion of the segment between two given t values. func (s *Segment) LengthBetween(startT, endT nums.TParam) float64 { return s.Length() * startT.DistanceTo(endT) } // PointAt computes an intermediate point in the segment. func (s *Segment) PointAt(t nums.TParam) *Point { var ( minTVal = nums.MinT.Value() maxTVal = nums.MaxT.Value() ) return MakePoint( nums.LinInterpol(minTVal, s.start.x, maxTVal, s.end.x, t.Value()), nums.LinInterpol(minTVal, s.start.y, maxTVal, s.end.y, t.Value()), ) } /* DirectionVersor computes the versor which points in the advancing direction of the segment's [start -> end]. */ func (s *Segment) DirectionVersor() *Vector { return s.start.VectorTo(s.end).ToVersor() } // NormalVersor computes the versor perpendicular to the direction versor of the segment. func (s *Segment) NormalVersor() *Vector { return s.DirectionVersor().Perpendicular() } /* RefFrame returns the reference frame of the segment. The Reference Frame i's versor points in the direction of the direction versor. */ func (s *Segment) RefFrame() *RefFrame { return MakeRefFrameWithIVersor(s.DirectionVersor()) }
g2d/segment.go
0.85186
0.592166
segment.go
starcoder
package filter import ( "fmt" "gorm.io/gorm" "goyave.dev/goyave/v4/helper" ) // Operator used by filters to build the SQL query. // The operator function modifies the GORM statement (most of the time by adding // a WHERE condition) then returns the modified statement. // Operators may need arguments (e.g. "$eq", equals needs a value to compare the field to); // RequiredArguments define the minimum number of arguments a client must send in order to // use this operator in a filter. RequiredArguments is checked during Filter parsing. type Operator struct { Function func(tx *gorm.DB, filter *Filter, column string) *gorm.DB RequiredArguments uint8 } var ( // Operators definitions. The key is the query representation of the operator, (e.g. "$eq"). Operators = map[string]*Operator{ "$eq": {Function: basicComparison("="), RequiredArguments: 1}, "$ne": {Function: basicComparison("<>"), RequiredArguments: 1}, "$gt": {Function: basicComparison(">"), RequiredArguments: 1}, "$lt": {Function: basicComparison("<"), RequiredArguments: 1}, "$gte": {Function: basicComparison(">="), RequiredArguments: 1}, "$lte": {Function: basicComparison("<="), RequiredArguments: 1}, "$starts": { Function: func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { query := column + " LIKE ?" value := helper.EscapeLike(filter.Args[0]) + "%" return filter.Where(tx, query, value) }, RequiredArguments: 1, }, "$ends": { Function: func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { query := column + " LIKE ?" value := "%" + helper.EscapeLike(filter.Args[0]) return filter.Where(tx, query, value) }, RequiredArguments: 1, }, "$cont": { Function: func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { query := column + " LIKE ?" value := "%" + helper.EscapeLike(filter.Args[0]) + "%" return filter.Where(tx, query, value) }, RequiredArguments: 1, }, "$excl": { Function: func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { query := column + " NOT LIKE ?" value := "%" + helper.EscapeLike(filter.Args[0]) + "%" return filter.Where(tx, query, value) }, RequiredArguments: 1, }, "$in": {Function: multiComparison("IN"), RequiredArguments: 1}, "$notin": {Function: multiComparison("NOT IN"), RequiredArguments: 1}, "$isnull": { Function: func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { return filter.Where(tx, column+" IS NULL") }, RequiredArguments: 0, }, "$notnull": { Function: func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { return filter.Where(tx, column+" IS NOT NULL") }, RequiredArguments: 0, }, "$between": { Function: func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { query := column + " BETWEEN ? AND ?" return filter.Where(tx, query, filter.Args[0], filter.Args[1]) }, RequiredArguments: 2, }, } ) func basicComparison(op string) func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { return func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { query := fmt.Sprintf("%s %s ?", column, op) return filter.Where(tx, query, filter.Args[0]) } } func multiComparison(op string) func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { return func(tx *gorm.DB, filter *Filter, column string) *gorm.DB { query := fmt.Sprintf("%s %s ?", column, op) return filter.Where(tx, query, filter.Args) } }
operator.go
0.695545
0.453504
operator.go
starcoder
package envelopes import ( "bytes" "errors" "fmt" "math/big" "regexp" "sort" "strings" ) // AssetType is a character code the uniquely identifies a type of asset. For currencies, that is a three-letter code. // For securities like stocks, it will be the set of characters that are used to trade shares of those stocks. // For instance: // United States Dollar -> USD // Microsoft Stock Shares -> MSFT type AssetType string const ( // DefaultAsset is the label that will be used when parsing a balance given as just a number i.e. with no label. DefaultAsset AssetType = "USD" ) // Exchange represents the known conversion rates from one asset to another. For example an instance of Exchange may // contain the rates needed to get all types TO United States Dollars, from a host of other types of assets, like Euros, // shares of stock, etc. type Exchange map[AssetType]float64 // ErrUnknownAsset indicates that an asset was requested that is not present. type ErrUnknownAsset AssetType func (e ErrUnknownAsset) Error() string { return fmt.Sprintf("could not find AssetType %s", AssetType(e)) } // Balance captures an amount of USD pennies. type Balance map[AssetType]*big.Rat var zero = big.NewRat(0, 100) // Add combines two balances, summing any shared components, and including unmatched components without further // processing. // Returns a new instance of a Balance, without modifying the two original balances. func (b Balance) Add(other Balance) Balance { sum := make(Balance, len(b)) unseen := make(map[AssetType]struct{}, len(other)) for key := range other { unseen[key] = struct{}{} } for key, bMag := range b { if otherMag, ok := other[key]; ok { delete(unseen, key) newMag := &big.Rat{} sum[key] = newMag.Add(bMag, otherMag) } else { clone := *bMag sum[key] = &clone } } for key := range unseen { clone := *other[key] sum[key] = &clone } return sum } // Sub combines two balances, however all of the parameters magnitudes are treated as if they are inverted. // See Add for more behavior details. func (b Balance) Sub(other Balance) Balance { sum := make(Balance, len(b)) unseen := make(map[AssetType]struct{}, len(other)) for key := range other { unseen[key] = struct{}{} } for key, bMag := range b { if otherMag, ok := other[key]; ok { delete(unseen, key) newMag := &big.Rat{} sum[key] = newMag.Sub(bMag, otherMag) } else { clone := *bMag sum[key] = &clone } } for key := range unseen { var negated big.Rat sum[key] = negated.Neg(other[key]) } return sum } // Equal determines whether two balances are compromised of the same mix of assets with the same magnitude assigned to // each. func (b Balance) Equal(other Balance) bool { bNonZero := b.nonZeroBalances() if bNonZero == 0 && other == nil { return true } else if other == nil { return false } if bNonZero != other.nonZeroBalances() { return false } for id, mag := range b { if mag.Cmp(zero) == 0 { continue } if otherMag, ok := other[id]; !ok || mag.Cmp(otherMag) != 0 { return false } } return true } func (b Balance) nonZeroBalances() uint { count := uint(0) for _, mag := range b { if mag.Cmp(zero) != 0 { count++ } } return count } // Negate inverts the sign of each entry in a balance. func (b Balance) Negate() Balance { retval := make(Balance, len(b)) for key, value := range b { var negated big.Rat retval[key] = negated.Neg(value) } return retval } // Scale multiplies each entry in a Balance by a constant amount. This may be useful for diving func (b Balance) Scale(s float64) Balance { retval := make(Balance, len(b)) t := new(big.Rat).SetFloat64(s) for key, value := range b { var scaled big.Rat retval[key] = scaled.Mul(value, t) } return retval } // Normalize finds the total value of a Balance, but expresses the answer as a scalar instead of a multi-component // Balance. func (b Balance) Normalize(rates Exchange) (*big.Rat, error) { sum := new(big.Rat) var scaled big.Rat for k, v := range b { if rawRate, ok := rates[k]; ok { rate := new(big.Rat).SetFloat64(rawRate) scaled.Mul(v, rate) sum.Add(sum, &scaled) } else { return nil, ErrUnknownAsset(k) } } return sum, nil } func (b Balance) String() string { const defaultResult = "USD 0.00" const precision = 3 if len(b) == 1 { // When there's only a single asset type - we don't want to pare or deal with extra allocations. for k := range b { return fmt.Sprintf("%s %s", k, b[k].FloatString(precision)) } } else if len(b) > 1 { // When there are multiple asset types, we want to remove unnecessary components b.pare() if len(b) == 0 { // Just like the default case below, if there are multiple asset types that should all fall away, skip // all further processing. return defaultResult } keys := make([]string, 0, len(b)) for key := range b { keys = append(keys, string(key)) } sort.Strings(keys) buf := &bytes.Buffer{} for i := range keys { fmt.Fprintf(buf, "%s %s:", keys[i], b[AssetType(keys[i])].FloatString(precision)) } buf.Truncate(buf.Len() - 1) return buf.String() } // In the default case, where balance is zero because there are no assets, we want to continue keeping the same IDs // that were previously generated. Because previously a zero balance specifically meant that there were zero USD, to // preserve the existing persist package's behavior without any breaking changes, we must assume the value // "USD 0.00" here. return defaultResult } var ( balancePattern = regexp.MustCompile(`(?m:^\s*(?P<id>[^\s\-\d]+?)??\s*(?P<magnitude>-?(?:[\d]*|(?:\d{1,3}(?:,\d{3})+))(?:\.\d+)?)$)`) ) // ParseBalanceWithDefault extracts information about a balance from text. Any line items that do not have a label are // treated as the specified default asset type. // Lines with the same asset type are summed together. func ParseBalanceWithDefault(raw []byte, def AssetType) (Balance, error) { var created Balance const noMatchText = "unable to find balance in text" clauses := strings.Split(string(raw), ":") for _, clause := range clauses { matches := balancePattern.FindAllStringSubmatch(clause, -1) if len(matches) == 0 { return nil, errors.New(noMatchText) } for _, match := range matches { if len(match[0]) == 0 { continue } id := AssetType(match[1]) rawMagnitude := match[2] rawMagnitude = strings.Replace(string(rawMagnitude), ",", "", -1) rehydrated := new(big.Rat) if err := rehydrated.UnmarshalText([]byte(rawMagnitude)); err != nil { return nil, err } if id == "" { id = def } if created == nil { created = make(Balance) } if existing, ok := created[id]; ok { created[id].Add(existing, rehydrated) } else { created[id] = rehydrated } } var err error if created == nil { err = errors.New(noMatchText) return nil, err } } return created, nil } // ParseBalance converts between a string representation of an amount of dollars // into an int64 number of cents. func ParseBalance(raw []byte) (result Balance, err error) { return ParseBalanceWithDefault(raw, DefaultAsset) } // pare removes components of a balance that are inconsequential - i.e. magnitude of zero. func (b Balance) pare() { for k, v := range b { if v.Cmp(zero) == 0 { delete(b, k) } } }
balance.go
0.766818
0.531513
balance.go
starcoder
package layers import ( "github.com/google/gopacket" ) // TLSType defines the type of data after the TLS Record type TLSHandshakeType uint8 // TLSType known values. const ( TLSHandshakeUnknown TLSHandshakeType = 0 TLSHandshakeClientHello TLSHandshakeType = 1 TLSHandshakeServerHello TLSHandshakeType = 2 ) type TLSHandshakeProtocol struct { HandshakeType TLSHandshakeType ServerName string } // TLSHandshakeRecord defines the structure of a Handshare Record type TLSHandshakeRecord struct { TLSRecordHeader TLSHandshakeProtocol } // DecodeFromBytes decodes the slice into the TLS struct. func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { // TLS Record Header t.ContentType = h.ContentType t.Version = h.Version t.Length = h.Length current := 0 current = current + 1 if data[0] != 0x1 { return nil } t.HandshakeType = TLSHandshakeClientHello // Skip over another length current += 3 // Skip over protocolversion current += 2 // Skip over random number current += 4 + 28 // Skip over session ID sessionIDLength := int(data[current]) current += 1 current += sessionIDLength cipherSuiteLength := (int(data[current]) << 8) + int(data[current+1]) current += 2 current += cipherSuiteLength compressionMethodLength := int(data[current]) current += 1 current += compressionMethodLength if current > len(data) { return nil } current += 2 hostname := "" for current < len(data) && hostname == "" { extensionType := (int(data[current]) << 8) + int(data[current+1]) current += 2 extensionDataLength := (int(data[current]) << 8) + int(data[current+1]) current += 2 if extensionType == 0 { // Skip over number of names as we're assuming there's just one current += 2 nameType := data[current] current += 1 if nameType != 0 { return nil } nameLen := (int(data[current]) << 8) + int(data[current+1]) current += 2 hostname = string(data[current : current+nameLen]) } current += extensionDataLength } if hostname == "" { return nil } t.ServerName = hostname return nil }
layers/tls_handshake.go
0.615435
0.429429
tls_handshake.go
starcoder
package gophertags import ( "crypto/rand" "math/big" "math/bits" r255 "github.com/gtank/ristretto255" "golang.org/x/crypto/sha3" ) // SecretKey is the secret key held by the ultimate recipient of the messages. // It is used to derive public keys and detection keys for distribution. // Internally, it's a vector of Ristretto255 scalars (the detection key) and Ristretto255 elements (the public key). type SecretKey struct { sk []*r255.Scalar pk []*r255.Element } // PublicKey is the public key that will be used to send messages to the recipient. type PublicKey struct { internal []*r255.Element } // DetectionKey is given to the adversarial mailbox to test inbound messages for a given recipient. // Detection keys have an inherent false positive rate set at construction. type DetectionKey struct { internal []*r255.Scalar } type Flag struct { u *r255.Element y *r255.Scalar ciphertexts *big.Int // as bitvec } // NewSecretKey constructs a secret key with a maximum false positive rate of 2^-gamma. func NewSecretKey(gamma int) *SecretKey { key := &SecretKey{ sk: make([]*r255.Scalar, gamma), pk: make([]*r255.Element, gamma), } randBytes := make([]byte, 64) for i := 0; i < gamma; i++ { n, err := rand.Read(randBytes) if n != 64 || err != nil { // If you aren't getting randomness, there's no way the rest of this is going to work. // TODO: It would be good to add a function that takes a custom reader for more predictable testing. panic("panic! at the keygen") } key.sk[i] = r255.NewScalar().FromUniformBytes(randBytes) key.pk[i] = r255.NewElement().ScalarBaseMult(key.sk[i]) } return key } // PublicKey returns a deep copy of the secret key's associated public key. func (sk *SecretKey) PublicKey() *PublicKey { // Language Wars Episode 2: The Lack of the Clones // TODO: https://github.com/gtank/ristretto255/issues/35 pkCopy := make([]*r255.Element, len(sk.pk)) for i := 0; i < len(pkCopy); i++ { byteRepr := sk.pk[i].Encode(nil) pkCopy[i] = r255.NewElement() _ = pkCopy[i].Decode(byteRepr) } return &PublicKey{internal: pkCopy} } // ExtractDetectionKey produces a detection key with false positive rate 0 <= 2^-n <= 2^-gamma. // Internally, it's a copy of the first n scalars in the secret key. func (sk *SecretKey) ExtractDetectionKey(n int) *DetectionKey { secrets := make([]*r255.Scalar, n) for i := 0; i < n; i++ { byteRepr := sk.sk[i].Encode(nil) secrets[i] = r255.NewScalar() _ = secrets[i].Decode(byteRepr) } return &DetectionKey{internal: secrets} } // hashG3Bit implements H: G^3 -> {0,1} in a manner consistent with the Rust crate `fuzzytags` func hashG3ToBit(rB, rH, zB *r255.Element) uint { digest := sha3.New256() digest.Write(rB.Encode(nil)) digest.Write(rH.Encode(nil)) digest.Write(zB.Encode(nil)) return uint(digest.Sum(nil)[0] & 0x01) } // hashGVecToScalar hashes a Ristretto element and a bit vector of ciphertexts to a // Ristretto scalar in a manner consistent with the Rust crate `fuzzytags`. func hashGVecToScalar(u *r255.Element, bitVec *big.Int) *r255.Scalar { // TODO: Recall enough big.Int internals to use Bytes() or FillBytes() here? // Pack bits into byte slice of necessary size, implicitly zero-padded to nearest byte. byteRepr := make([]byte, 0, bitVec.BitLen()+7/8) for _, word := range bitVec.Bits() { for i := 0; i < bits.UintSize; i += 8 { if len(byteRepr) >= cap(byteRepr) { break } byteRepr = append(byteRepr, byte(word)) word >>= 8 } } digest := sha3.Sum512(u.Encode(byteRepr)) return r255.NewScalar().FromUniformBytes(digest[:]) } // GenerateFlag creates a randomized flag ciphertext for the given public key. func (pk *PublicKey) GenerateFlag() *Flag { uniformBytes := make([]byte, 128) _, err := rand.Read(uniformBytes) if err != nil { panic("error sampling scalar entropy") } // Random group elements r := r255.NewScalar().FromUniformBytes(uniformBytes[0:64]) z := r255.NewScalar().FromUniformBytes(uniformBytes[64:128]) u := r255.NewElement().ScalarBaseMult(r) w := r255.NewElement().ScalarBaseMult(z) // TODO need to double check that this actually behaves like I think it does. Specifically check padding. bitVec := new(big.Int) for i, H := range pk.internal { rH := r255.NewElement().ScalarMult(r, H) c := hashG3ToBit(u, rH, w) ^ 0x01 bitVec.SetBit(bitVec, i, c) } m := hashGVecToScalar(u, bitVec) // y = 1/r * (z - m) y := r255.NewScalar().Invert(r) y.Multiply(y, z.Subtract(z, m)) // smashes z return &Flag{u, y, bitVec} } // Test returns true if the given flag matches the detection key. func (dk *DetectionKey) Test(f *Flag) bool { // Thanks to <NAME> and <NAME>, without whom I would also // have written a universal tag bug here. See // https://git.openprivacy.ca/openprivacy/fuzzytags/commit/e19b99112e3fe70cb92b09db9595d3e05ef26f7c if f.u.Equal(r255.NewElement()) == 1 || f.y.Equal(r255.NewScalar()) == 1 { return false } m := hashGVecToScalar(f.u, f.ciphertexts) scalars := []*r255.Scalar{m, f.y} elements := []*r255.Element{r255.NewElement().Base(), f.u} w := r255.NewElement().MultiScalarMult(scalars, elements) var pass uint = 0x01 for i, x_i := range dk.internal { xU := r255.NewElement().ScalarMult(x_i, f.u) k := hashG3ToBit(f.u, xU, w) b := k ^ f.ciphertexts.Bit(i) pass = pass & b } if pass == 0x01 { return true } else { return false } }
tags.go
0.579757
0.418935
tags.go
starcoder
package dag import ( "github.com/goombaio/dag" "github.com/darkowlzz/operator-toolkit/operator/v1/operand" ) // OperandDAG is a directed acyclic graph representation of the opereand // dependencies. This is used to resolve the dependencies of the operands on // each other and find an optimal execution path. type OperandDAG struct { *dag.DAG } func NewOperandDAG(operands []operand.Operand) (*OperandDAG, error) { od := &OperandDAG{DAG: dag.NewDAG()} // Create vertices for all the operands. for _, op := range operands { v := dag.NewVertex(op.Name(), op) if err := od.AddVertex(v); err != nil { return nil, err } } // Create edges between the vertices based on the operand's depends on // property. for _, op := range operands { headVertex, err := od.GetVertex(op.Name()) if err != nil { return nil, err } // Connect the operand to all the vertices it depends on. for _, dep := range op.Requires() { tailVertex, err := od.GetVertex(dep) if err != nil { return nil, err } if err := od.AddEdge(tailVertex, headVertex); err != nil { return nil, err } } } return od, nil } func (od *OperandDAG) Order() (operand.OperandOrder, error) { soln, steps, err := od.solve() if err != nil { return nil, err } result := make([][]operand.Operand, steps) for name, step := range soln { v, verr := od.GetVertex(name) if verr != nil { return result, verr } result[step] = append(result[step], v.Value.(operand.Operand)) } return result, nil } // Solve solves the graph traversal in DAG with steps. Returns a map containing // vertex name with step number and total number of steps in the solution. func (od *OperandDAG) solve() (map[string]int, int, error) { order := map[string]int{} // Start from root. roots := od.SourceVertices() // Init order step and roots. step := 0 newRoots := roots var err error for len(newRoots) > 0 { newRoots, err = od.solveStep(step, newRoots, order) if err != nil { return nil, step, err } step++ } return order, step, nil } // solveStep takes a step number, current roots and an order, and returns new // current roots and updates the order. func (od *OperandDAG) solveStep(step int, currentRoots []*dag.Vertex, order map[string]int) ([]*dag.Vertex, error) { newRoots := []*dag.Vertex{} for _, c := range currentRoots { // Check if the current root exists in the order. if _, exists := order[c.ID]; !exists { // Check if the predecessors exists in the order. If not, skip, // else, add to order. pp, perr := od.Predecessors(c) if perr != nil { return nil, perr } if len(pp) == 0 { // If no predecessor, add to order, it's the root. order[c.ID] = step var serr error newRoots, serr = od.addSuccessorsToNewRoots(c, newRoots) if serr != nil { return nil, serr } continue } satisfied := true for _, p := range pp { if _, exists := order[p.ID]; !exists { satisfied = false } } // Satisfied, then add to order. if satisfied { order[c.ID] = step } } // Get successors and add to new roots. var serr error newRoots, serr = od.addSuccessorsToNewRoots(c, newRoots) if serr != nil { return nil, serr } } return newRoots, nil } // addSuccessorsToNewRoots takes a vertex, fetches its successors and adds the // successors to the newRoots list. This is used to create a list of all the // adjacent vertices at the same level in the graph. func (od *OperandDAG) addSuccessorsToNewRoots(v *dag.Vertex, newRoots []*dag.Vertex) ([]*dag.Vertex, error) { ss, serr := od.Successors(v) if serr != nil { return nil, serr } // Add to root if not exists. for _, s := range ss { if !od.vertexExists(newRoots, s) { newRoots = append(newRoots, s) } } return newRoots, nil } func (od *OperandDAG) vertexExists(vs []*dag.Vertex, target *dag.Vertex) bool { for _, v := range vs { if v.ID == target.ID { return true } } return false }
operator/v1/dag/dag.go
0.746786
0.557243
dag.go
starcoder
package compare import "fmt" // Path represents a path from the root of an object hierarchy type Path []Fragment // Fragment is an entry in a Path type Fragment struct { // Operation holds the operation occurring at this level in the path. Operation interface{} // Reference holds the entry in the reference hierarchy to apply the operation to Reference interface{} // Value holds the equivalent entry in the value hierarchy to apply the the operation to Value interface{} } // MemberOp is the fragment operation type for member comparisons. type MemberOp string // IndexOp is the fragment operation type for array / slice index comparisons. type IndexOp int // EntryOp is the fragment operation type for map entry comparisons. type EntryOp struct { Key interface{} // The map key } // LengthOp is the fragment operation type for array / slice length comparisons. type LengthOp string // TypeOp is the fragment operation type for type comparisons. type TypeOp string // NilOp is the fragment operation type for nil-equality comparisons. type NilOp string // MissingOp is the fragment operation type for absent entries in arrays, // slices and maps. type MissingOp string const ( Length = LengthOp("·length") Type = TypeOp("·type") Nil = NilOp("nil") Key = MissingOp("key") ) func (m MemberOp) Format(f fmt.State, r rune) { fmt.Fprint(f, ".", string(m)) } func (i IndexOp) Format(f fmt.State, r rune) { fmt.Fprintf(f, "[%v]", int(i)) } func (e EntryOp) Format(f fmt.State, r rune) { fmt.Fprintf(f, "[%v]", e.Key) } func (p Path) with(op, reference, value interface{}) Path { r := make(Path, len(p)+1) copy(r, p) r[len(p)] = Fragment{op, reference, value} return r } // Member returns a new Path with a member access fragment appended. func (p Path) Member(name string, reference, value interface{}) Path { return p.with(MemberOp(name), reference, value) } // Length returns a new Path with a length query fragment appended. func (p Path) Length(reference, value interface{}) Path { return p.with(Length, reference, value) } // Type returns a new Path with a type query fragment appended. func (p Path) Type(reference, value interface{}) Path { return p.with(Type, reference, value) } // Nil returns a new Path with a nil query fragment appended. func (p Path) Nil(reference, value interface{}) Path { return p.with(Nil, reference, value) } // Missing returns a new Path with a missing value fragment appended. func (p Path) Missing(reference, value interface{}) Path { return p.with(Key, reference, value) } // Index returns a new Path with an array/slice index fragment appended. func (p Path) Index(i int, reference, value interface{}) Path { return p.with(IndexOp(i), reference, value) } // Entry returns a new Path with a map entry fragment appended. func (p Path) Entry(key, reference, value interface{}) Path { return p.with(EntryOp{key}, reference, value) } // Diff returns a new Path with a terminal diff fragment appended. func (p Path) Diff(reference, value interface{}) Path { return p.with(nil, reference, value) } func (p Path) Format(f fmt.State, r rune) { if len(p) == 0 { return } last := p[len(p)-1] remains := p[:len(p)-1] if last.Operation != nil { fmt.Fprint(f, last.Operation, " ") } fmt.Fprint(f, "⟦", last.Reference, "⟧ != ⟦", last.Value, "⟧") if len(remains) > 0 { fmt.Fprint(f, " for v") for _, e := range remains { fmt.Fprint(f, e.Operation) } } }
core/data/compare/path.go
0.765769
0.544014
path.go
starcoder
package resolv import "fmt" /*A Space represents a collection that holds Shapes for collision detection in the same common space. A Space is arbitrarily large - you can use one Space for a single level, room, or area in your game, or split it up if it makes more sense for your game design. Technically, a Space is just a slice of Shapes. Spaces fulfill the required functions for Shapes, which means you can also use them as compound shapes themselves. In these cases, the first Shape is the "root" or pivot from which attempts to move the Shape will be focused. In other words, Space.SetXY(40, 40) will move all Shapes in the Space in such a way that the first Shape will be at 40, 40, and all other Shapes retain their original spacing relative to it.*/ type Space []Shape // NewSpace creates a new Space for shapes to exist in and be tested against in. func NewSpace() *Space { sp := &Space{} return sp } // Add adds the designated Shapes to the Space. You cannot add the Space to itself. func (sp *Space) Add(shapes ...Shape) { for _, shape := range shapes { if shape == sp { panic(fmt.Sprintf("ERROR! Space %s cannot add itself!", shape)) } *sp = append(*sp, shape) } } // Remove removes the designated Shapes from the Space. func (sp *Space) Remove(shapes ...Shape) { for _, shape := range shapes { for deleteIndex, s := range *sp { if s == shape { s := *sp s[deleteIndex] = nil s = append(s[:deleteIndex], s[deleteIndex+1:]...) *sp = s break } } } } // Clear "resets" the Space, cleaning out the Space of references to Shapes. func (sp *Space) Clear() { *sp = make(Space, 0) } // IsColliding returns whether the provided Shape is colliding with something in this Space. func (sp *Space) IsColliding(shape Shape) bool { for _, other := range *sp { if other != shape { if shape.IsColliding(other) { return true } } } return false } // GetCollidingShapes returns a Space comprised of Shapes that collide with the checking Shape. func (sp *Space) GetCollidingShapes(shape Shape) *Space { newSpace := NewSpace() for _, other := range *sp { if other != shape { if shape.IsColliding(other) { newSpace.Add(other) } } } return newSpace } // Resolve runs Resolve() using the checking Shape, checking against all other Shapes in the Space. The first Collision // that returns true is the Collision that gets returned. func (sp *Space) Resolve(checkingShape Shape, deltaX, deltaY int32) Collision { res := Collision{} for _, other := range *sp { if other != checkingShape && checkingShape.WouldBeColliding(other, int32(deltaX), int32(deltaY)) { res = Resolve(checkingShape, other, deltaX, deltaY) if res.Colliding() { break } } } return res } // Filter filters out a Space, returning a new Space comprised of Shapes that return true for the boolean function you provide. // This can be used to focus on a set of object for collision testing or resolution, or lower the number of Shapes to test // by filtering some out beforehand. func (sp *Space) Filter(filterFunc func(Shape) bool) *Space { subSpace := NewSpace() for _, shape := range *sp { if filterFunc(shape) { subSpace.Add(shape) } } return subSpace } // FilterByTags filters a Space out, creating a new Space that has just the Shapes that have all of the specified tags. func (sp *Space) FilterByTags(tags ...string) *Space { return sp.Filter(func(s Shape) bool { if s.HasTags(tags...) { return true } return false }) } // FilterOutByTags filters a Space out, creating a new Space that has just the Shapes that don't have all of the specified tags. func (sp *Space) FilterOutByTags(tags ...string) *Space { return sp.Filter(func(s Shape) bool { if s.HasTags(tags...) { return false } return true }) } // Contains returns true if the Shape provided exists within the Space. func (sp *Space) Contains(shape Shape) bool { for _, s := range *sp { if s == shape { return true } } return false } func (sp *Space) String() string { str := "" for _, s := range *sp { str += fmt.Sprintf("%v ", s) } return str } /* ----------------------------- -- SPACE-SHAPE FUNCTIONS -- ----------------------------- These functions allows a Space to fulfill the contract of a Shape as well, thereby allowing them to serve as easy-use compound Shapes themselves. Functions that should logically function on all Shapes within a Space do that, while functions that return singular values look at the first shape as a "root" of sorts. */ // WouldBeColliding returns true if any of the Shapes within the Space would be colliding should they move along the delta // X and Y values provided (dx and dy). func (sp *Space) WouldBeColliding(other Shape, dx, dy int32) bool { for _, shape := range *sp { if shape == other { return false } if shape.WouldBeColliding(other, dx, dy) { return true } } return false } // GetTags returns the tag list of the first Shape within the Space. If there are no Shapes within the Space, // it returns an empty array of string type. func (sp *Space) GetTags() []string { if len(*sp) > 0 { return (*sp)[0].GetTags() } return []string{} } // AddTags sets the provided tags on all Shapes contained within the Space. func (sp *Space) AddTags(tags ...string) { for _, shape := range *sp { shape.AddTags(tags...) } } // RemoveTags removes the provided tags from all Shapes contained within the Space. func (sp *Space) RemoveTags(tags ...string) { for _, shape := range *sp { shape.RemoveTags(tags...) } } // ClearTags removes all tags from all Shapes within the Space. func (sp *Space) ClearTags() { for _, shape := range *sp { shape.ClearTags() } } // HasTags returns true if all of the Shapes contained within the Space have the tags specified. func (sp *Space) HasTags(tags ...string) bool { for _, shape := range *sp { if !shape.HasTags(tags...) { return false } } return true } // GetData returns the pointer to the object contained in the Data field of the first Shape within the Space. If there aren't // any Shapes within the Space, it returns nil. func (sp *Space) GetData() interface{} { if len(*sp) > 0 { return (*sp)[0].GetData() } return nil } // SetData sets the pointer provided to the Data field of all Shapes within the Space. func (sp *Space) SetData(data interface{}) { for _, shape := range *sp { shape.SetData(data) } } // GetXY returns the X and Y position of the first Shape in the Space. If there aren't any Shapes within the Space, it // returns 0, 0. func (sp *Space) GetXY() (int32, int32) { if len(*sp) > 0 { return (*sp)[0].GetXY() } return 0, 0 } // SetXY sets the X and Y position of all Shapes within the Space to the position provided using the first Shape's position as // reference. Basically, it moves the first Shape within the Space to the target location and then moves all other Shapes // by the same delta movement. func (sp *Space) SetXY(x, y int32) { if len(*sp) > 0 { dx, dy := (*sp)[0].GetXY() dx = x - dx dy = y - dy for _, shape := range *sp { shape.Move(dx, dy) } } } // Move moves all Shapes in the Space by the displacement provided. func (sp *Space) Move(dx, dy int32) { for _, shape := range *sp { shape.Move(dx, dy) } } // Length returns the length of the Space (number of Shapes contained within the Space). This is a convenience function, standing in for len(*space). func (sp *Space) Length() int { return len(*sp) } // Get allows you to get a Shape by index from the Space easily. This is a convenience function, standing in for (*space)[index]. func (sp *Space) Get(index int) Shape { return (*sp)[index] }
resolv/space.go
0.884133
0.632616
space.go
starcoder
package iso20022 // Specifies rates related to a corporate action option. type CorporateActionRate47 struct { // Rate proposed in a remarketing of variable rate notes. ProposedRate *PercentageRate `xml:"PropsdRate,omitempty"` // Rate of allowed over-subscription. OversubscriptionRate *RateAndAmountFormat5Choice `xml:"OvrsbcptRate,omitempty"` // Requested tax rate in case of breakdown of tax rate, for example, used for adjustment of tax rate. This is the new requested applicable rate. RequestedTaxationRate []*RateAndAmountFormat21Choice `xml:"ReqdTaxtnRate,omitempty"` // Requested rate at which the income will be withheld by the jurisdiction in which the income was originally paid, for which relief at source and/or reclaim may be possible. RequestedWithholdingOfForeignTax []*RateAndAmountFormat21Choice `xml:"ReqdWhldgOfFrgnTax,omitempty"` // Requested rate at which the income will be withheld by the jurisdiction in which the account owner is located, for which relief at source and/or reclaim may be possible. RequestedWithholdingOfLocalTax []*RateAndAmountFormat21Choice `xml:"ReqdWhldgOfLclTax,omitempty"` } func (c *CorporateActionRate47) SetProposedRate(value string) { c.ProposedRate = (*PercentageRate)(&value) } func (c *CorporateActionRate47) AddOversubscriptionRate() *RateAndAmountFormat5Choice { c.OversubscriptionRate = new(RateAndAmountFormat5Choice) return c.OversubscriptionRate } func (c *CorporateActionRate47) AddRequestedTaxationRate() *RateAndAmountFormat21Choice { newValue := new (RateAndAmountFormat21Choice) c.RequestedTaxationRate = append(c.RequestedTaxationRate, newValue) return newValue } func (c *CorporateActionRate47) AddRequestedWithholdingOfForeignTax() *RateAndAmountFormat21Choice { newValue := new (RateAndAmountFormat21Choice) c.RequestedWithholdingOfForeignTax = append(c.RequestedWithholdingOfForeignTax, newValue) return newValue } func (c *CorporateActionRate47) AddRequestedWithholdingOfLocalTax() *RateAndAmountFormat21Choice { newValue := new (RateAndAmountFormat21Choice) c.RequestedWithholdingOfLocalTax = append(c.RequestedWithholdingOfLocalTax, newValue) return newValue }
CorporateActionRate47.go
0.787073
0.459015
CorporateActionRate47.go
starcoder
package square // Defines the parameters that can be included in the body of a request to the [Charge](#endpoint-charge) endpoint. Deprecated - recommend using [CreatePayment](#endpoint-payments-createpayment) type ChargeRequest struct { // A value you specify that uniquely identifies this transaction among transactions you've created. If you're unsure whether a particular transaction succeeded, you can reattempt it with the same idempotency key without worrying about double-charging the buyer. See [Idempotency keys](#idempotencykeys) for more information. IdempotencyKey string `json:"idempotency_key"` AmountMoney *Money `json:"amount_money"` // A nonce generated from the `SqPaymentForm` that represents the card to charge. The application that provides a nonce to this endpoint must be the _same application_ that generated the nonce with the `SqPaymentForm`. Otherwise, the nonce is invalid. Do not provide a value for this field if you provide a value for `customer_card_id`. CardNonce string `json:"card_nonce,omitempty"` // The ID of the customer card on file to charge. Do not provide a value for this field if you provide a value for `card_nonce`. If you provide this value, you _must_ also provide a value for `customer_id`. CustomerCardId string `json:"customer_card_id,omitempty"` // If `true`, the request will only perform an Auth on the provided card. You can then later perform either a Capture (with the `CaptureTransaction` or a Void (with the `VoidTransaction`. Default value: `false` DelayCapture bool `json:"delay_capture,omitempty"` // An optional ID you can associate with the transaction for your own purposes (such as to associate the transaction with an entity ID in your own database). This value cannot exceed 40 characters. ReferenceId string `json:"reference_id,omitempty"` // An optional note to associate with the transaction. This value cannot exceed 60 characters. Note string `json:"note,omitempty"` // The ID of the customer to associate this transaction with. This field is required if you provide a value for `customer_card_id`, and optional otherwise. CustomerId string `json:"customer_id,omitempty"` BillingAddress *Address `json:"billing_address,omitempty"` ShippingAddress *Address `json:"shipping_address,omitempty"` // The buyer's email address, if available. This value is optional, but this transaction is ineligible for chargeback protection if it is not provided. BuyerEmailAddress string `json:"buyer_email_address,omitempty"` // The ID of the order to associate with this transaction. If you provide this value, the `amount_money` value of your request must __exactly match__ the value of the order's `total_money` field. OrderId string `json:"order_id,omitempty"` // The basic primitive of multi-party transaction. The value is optional. The transaction facilitated by you can be split from here. If you provide this value, the `amount_money` value in your additional_recipients must not be more than 90% of the `amount_money` value in the charge request. The `location_id` must be the valid location of the app owner merchant. This field requires the `PAYMENTS_WRITE_ADDITIONAL_RECIPIENTS` OAuth permission. This field is currently not supported in sandbox. AdditionalRecipients []AdditionalRecipient `json:"additional_recipients,omitempty"` // A token generated by SqPaymentForm's verifyBuyer() that represents customer's device info and 3ds challenge result. VerificationToken string `json:"verification_token,omitempty"` }
square/model_charge_request.go
0.787319
0.455259
model_charge_request.go
starcoder
package game import ( "github.com/oakmound/lowrez17/game/forceSpace" "github.com/oakmound/oak/collision" "github.com/oakmound/oak/physics" "github.com/oakmound/oak/render" ) // We keep track of all anchors and walls, and run an initialization on said anchors and walls // once they are all tracked var ( anchors = []physics.Vector{} walls = []physics.Vector{} fans = []physics.Vector{} lowDamageWalls = []physics.Vector{} highDamageWalls = []physics.Vector{} ) func addTo(vs *[]physics.Vector) func(x, y int, r render.Renderable) { return func(x, y int, r render.Renderable) { *vs = append(*vs, physics.NewVector(float64(x)*tileDimf64, float64(y)*tileDimf64)) } } func startupWalls() { for _, w := range walls { addWall(w, collision.Label(Blocked)) } for _, w := range lowDamageWalls { addWall(w, collision.Label(LowDamage)) } for _, w := range highDamageWalls { addWall(w, collision.Label(HighDamage)) } } func addWall(w physics.Vector, label collision.Label) { // Find the minimum distance anchor to this wall minDist := w.Distance(anchors[0]) minV := anchors[0] for i := 1; i < len(anchors); i++ { dist := w.Distance(anchors[i]) if minDist > dist { minDist = dist minV = anchors[i] } } // Initialize a directional collision space pointing toward the nearby anchor ds := forceSpace.NewDirectionSpace(collision.NewLabeledSpace(w.X(), w.Y(), tileDimf64, tileDimf64, label), physics.NewForceVector(w.Sub(minV).Normalize(), 10)) collision.Add(ds.Space) tileSpaces = append(tileSpaces, ds.Space) } func startupFans() { if len(fans) > 1 { f := fans[0] fans = fans[1:] for len(fans) > 0 { // Find a close by second fan dist := f.Distance(fans[0]) minFan := 0 for i := 1; i < len(fans); i++ { d := f.Distance(fans[i]) if d < dist { minFan = i dist = d } } // set the direction of the fan to be towards that close second fan ds := forceSpace.NewDirectionSpace(collision.NewLabeledSpace(f.X()-tileDimf64, f.Y()-tileDimf64, tileDimf64*3, tileDimf64*3, collision.Label(PressureFan)), physics.NewForceVector(f.Sub(fans[minFan]).Normalize(), 1)) collision.Add(ds.Space) tileSpaces = append(tileSpaces, ds.Space) // update current fan, reduce length of list fans[0], fans[minFan] = fans[minFan], fans[0] f = fans[0] fans = fans[1:] } } }
game/walls.go
0.518302
0.4081
walls.go
starcoder
package sudoku import ( "fmt" "math/rand" "time" ) // Get a Sudoku square (9x9) based on complexity sent in the input func newSudoku(numbersToFill int) Sudoku { var mySudoku Sudoku var randNum int for i := 0; i < RowLength; i++ { // generate a brand new Row with all zeros and append to Sudoku mySudoku = append(mySudoku, _newRow(numbersToFill)) // Append random numbers on specific number of places on this Row // numbersToFill decides the complexity, i.e. how many numbers are prefilled. for j := 0; j < numbersToFill; j++ { _colIndex := genRandomNumber(ColLength) randNum = mySudoku._genUniqueRandomNumber(i, _colIndex) mySudoku[i][_colIndex] = randNum } } return mySudoku } func _newRow(numbersToFill int) Row { Row := Row{} // Append zero on all slots for i := 0; i < ColLength; i++ { Row = append(Row, 0) } return Row } // Get a random number integer with a fresh source every time this function is called func genRandomNumber(maxNumber int) int { // Get a random number source with a fresh new seed every time this function is called source := rand.NewSource(time.Now().UnixNano()) r := rand.New(source) randNum := r.Intn(maxNumber) return randNum } func (r Row) print() { for i, col := range r { fmt.Println(i, col) } } // Generate a unique number that is NOT already present in the Row func (s Sudoku) _genUniqueRandomNumber(rowID int, colID int) int { var randNum int fmt.Println("Row: ", rowID) fmt.Println("col: ", colID) // Keep generating a random number until it satisfies Sudoku criteria iterationNum := 0 for { iterationNum++ fmt.Println("iteration: ", iterationNum) randNum = genRandomNumber(ColLength + 1) if !s.isPresent(randNum, rowID, colID) { break } } return randNum } /* Validate whether the number to be filled satisfies the following criteria: 1) It must not be present in any column of the same Row 2) It must not be present in the same column across other rRws 3) It must not be present in the bounding box (3x3) to which the (rowID, colID) belongs */ func (s Sudoku) isPresent(numToFill int, rowID int, colID int) bool { // Loop through the Sudoku by each Row for _rowInRex, Row := range s { // Loop through the Sudoku Row by each column for _colIndex, _colValue := range Row { // If the number to fill is already present in any column of the current Row if numToFill == _colValue && _rowInRex == rowID { return true } // If the number to fill is already present in the same column id of any Row if numToFill == _colValue && _colIndex == colID { return true } // If the number to fill is already present in the 3x3 bounded box to which // (colId, rowID) belongs to switch { case colID <= 2: switch { case rowID <= 2: // top left bounding box if _isPresentBoundingBox(2, 2, _rowInRex, _colIndex, numToFill, _colValue) { return true } break case rowID <= 5: // middle left bounding box if _isPresentBoundingBox(5, 2, _rowInRex, _colIndex, numToFill, _colValue) { return true } break case rowID <= 8: // down left bounding box if _isPresentBoundingBox(8, 2, _rowInRex, _colIndex, numToFill, _colValue) { return true } break } break case colID <= 5: switch { case rowID <= 2: // top middle bounding box if _isPresentBoundingBox(2, 5, _rowInRex, _colIndex, numToFill, _colValue) { return true } break case rowID <= 5: // middle middle bounding box if _isPresentBoundingBox(5, 5, _rowInRex, _colIndex, numToFill, _colValue) { return true } break case rowID <= 8: // down middle bounding box if _isPresentBoundingBox(8, 5, _rowInRex, _colIndex, numToFill, _colValue) { return true } break } break case colID <= 8: switch { case rowID <= 2: // top right bounding box if _isPresentBoundingBox(2, 8, _rowInRex, _colIndex, numToFill, _colValue) { return true } break case rowID <= 5: // middle right bounding box if _isPresentBoundingBox(5, 8, _rowInRex, _colIndex, numToFill, _colValue) { return true } break case rowID <= 8: // down right bounding box if _isPresentBoundingBox(8, 8, _rowInRex, _colIndex, numToFill, _colValue) { return true } break } break } } } return false } func _isPresentBoundingBox(rowBoundRry int, colBoundary int, rowInRex int, colIndex int, numToFill int, colValue int) bool { var rowLowerBoundRry int var rowUpperBoundRry int var colLowerBoundary int var colUpperBoundary int switch { case rowBoundRry <= 2 && colBoundary <= 2: rowLowerBoundRry = 0 rowUpperBoundRry = 2 colLowerBoundary = 0 colUpperBoundary = 2 break case rowBoundRry <= 5 && colBoundary <= 2: rowLowerBoundRry = 3 rowUpperBoundRry = 5 colLowerBoundary = 0 colUpperBoundary = 2 break case rowBoundRry <= 8 && colBoundary <= 2: rowLowerBoundRry = 6 rowUpperBoundRry = 8 colLowerBoundary = 0 colUpperBoundary = 2 break case rowBoundRry <= 2 && colBoundary <= 5: rowLowerBoundRry = 0 rowUpperBoundRry = 2 colLowerBoundary = 3 colUpperBoundary = 5 break case rowBoundRry <= 5 && colBoundary <= 5: rowLowerBoundRry = 3 rowUpperBoundRry = 5 colLowerBoundary = 3 colUpperBoundary = 5 break case rowBoundRry <= 8 && colBoundary <= 5: rowLowerBoundRry = 6 rowUpperBoundRry = 8 colLowerBoundary = 3 colUpperBoundary = 5 break case rowBoundRry <= 2 && colBoundary <= 8: rowLowerBoundRry = 0 rowUpperBoundRry = 2 colLowerBoundary = 6 colUpperBoundary = 8 break case rowBoundRry <= 5 && colBoundary <= 8: rowLowerBoundRry = 3 rowUpperBoundRry = 5 colLowerBoundary = 6 colUpperBoundary = 8 break case rowBoundRry <= 8 && colBoundary <= 8: rowLowerBoundRry = 6 rowUpperBoundRry = 8 colLowerBoundary = 6 colUpperBoundary = 8 break } if rowInRex >= rowLowerBoundRry && rowInRex <= rowUpperBoundRry && colIndex >= colLowerBoundary && colIndex <= colUpperBoundary && colValue == numToFill { return true } return false }
sudoku/sudokugen.go
0.591251
0.430806
sudokugen.go
starcoder
package gene import ( "errors" "github.com/vertgenlab/gonomics/dna" ) type Feature int32 type MutationType byte const ( Intron Feature = -1 UtrThree Feature = -3 UtrFive Feature = -5 // All positive values refer to cDNA position Silent MutationType = 0 Missense MutationType = 1 Nonsense MutationType = 2 Frameshift MutationType = 3 Intergenic MutationType = 4 Intronic MutationType = 5 Splice MutationType = 6 // +/- 1-2 of E-I boundary FarSplice MutationType = 7 // +/- 3-10 of E-I boundary DisruptStart MutationType = 8 DisruptStop MutationType = 9 InFrameInsertion MutationType = 10 InFrameDeletion MutationType = 11 ) // Gene is a processed version of a gtf record that enables easy // traversal and manipulation of genes on the genomic and mRNA levels. type Gene struct { id string // Identifier for the transcript the Gene is derived from. In GTF this is the GeneID field. symbol string // Human readable gene symbol startPos int // Genomic start position of the gene. This should use the coordinate system of the reference fasta, rather than Gene internal genomic coordinates posStrand bool // True if gene is on the positive strand, False if on negative strand. cdsStarts []int // The start position of each CDS. This value is stored in Gene genomic coordinates (slice index of genomeSeq) cdsEnds []int // The end position of each CDS. This value is stored in Gene genomic coordinates (slice index of genomeSeq) genomeSeq []dna.Base // The genomic sequence of the gene from 5' to 3'. NOTE: This field is reverse complemented relative to reference file if the gene is on the negative strand cdnaSeq []dna.Base // The cDNA sequence of the gene from 5' to 3'. codingSeq subSeq // The coding sequence of the gene from 5' to 3'. This field is a sub-slice of cdnaSeq. utrFive subSeq // The sequence of the 5'UTR from 5' to 3'. This field is a sub-slice of cdnaSeq. utrThree subSeq // The sequence of the 3'UTR from 5' to 3'. This field is a sub-slice of cdnaSeq. protSeq []dna.AminoAcid // The polypeptide sequence resulting from the cDNA. featureArray []Feature // FeatureArray is a slice with len(featureArray) = len(genomeSeq). The index of featureArray corresponds to the same index of genomeSeq. featureArray denotes the features listed above as negative values, or the cDNA pos using all values >= 0. orig goGeneBackup // Copy of initial Gene state to enable the Reset() function. changeLog []diff // Log of any mutations that have been performed on the Gene to enable the Reset() function. } // subSeq stores a pointer to a portion of a larger sequence with defined start and end points type subSeq struct { start int // base zero, closed start position in cdnaSeq end int // base zero, open end position in cdnaSeq seq []dna.Base // sub-slice of cdnaSeq } // goGeneBackup stores the initial state of a GoGene to enable Reset() functionality. type goGeneBackup struct { startPos int cdsStarts []int cdsEnds []int genomeSeq []dna.Base cdnaSeq []dna.Base codingSeq subSeq utrFive subSeq utrThree subSeq featureArray []Feature } // diff acts as an entry in a changelog listing how the sequence has been manipulated. type diff struct { genomePos int removed []dna.Base added []dna.Base } // EffectPrediction outputs the effects of a mutation on the cDNA and protein sequences. type EffectPrediction struct { Consequence MutationType // Classification of mutation (see above for values) CdnaPos int // Base-zero position in the cDNA CdnaDist int // Distance from nearest CDS. Zero if in a CDS, >0 if 3' of CDS, <0 if 5' of CDS. AaPos int // Base-zero position of first changed amino acid AaRef []dna.AminoAcid // Slice of Ref amino acids (removed from protein) AaAlt []dna.AminoAcid // Slice of Alt amino acids (added to protein) StopDist int // Distance to stop codon. This value is filled if and only if it changes as a result of the mutation. Value is -1 if unchanged and -2 if no stop is hit before the end of the mRNA. NOT YET IMPLEMENTED FOR POINT MUTATIONS THAT DISRUPT THE STOP CODON } var ( ErrNonACGTBase = errors.New("alt base must be A, C, T, or G") ErrNegativeInputValue = errors.New("genomePos must be positive") ErrInputPosNotInGene = errors.New("input genomePos is not in the gene") ErrInvalidRange = errors.New("genomeStartPos must be less than genomeEndPos") ) // GenomicToCdna converts genomic coordinates to cDNA coordinates. The return format is c.100+10 (HGVS). // The first int return is the nearest position in the coding sequence in cDNA coordinates. // The second int return is the distance from the nearest coding exon // (>0 if 5' of cds; <0 if 3' of cds, ==0 if inside coding sequence, ties break to <0). // Input and output positions are zero-based func GenomicPosToCdna(g *Gene, genomePos int) (int, int, error) { var queryPos int if g.posStrand { // Positive Strand queryPos = genomePos - g.startPos } else { // Negative Strand queryPos = g.startPos - genomePos } feature := g.featureArray[queryPos] switch feature { case Intron: var forwardOffset, reverseOffset int = 1, -1 for { if g.featureArray[queryPos+reverseOffset] > 0 { // Note: the offset values are returned with their sign flipped as we are returning // the offset distance FROM the cds, not the offset distance TO the cds return int(g.featureArray[queryPos+reverseOffset]), reverseOffset * -1, nil } if g.featureArray[queryPos+forwardOffset] > 0 { return int(g.featureArray[queryPos+forwardOffset]), forwardOffset * -1, nil } forwardOffset++ reverseOffset-- if queryPos+forwardOffset > len(g.featureArray) || queryPos+reverseOffset < 0 { return 0, 0, errors.New("no coding sequence could be found") } } case UtrThree: var reverseOffset int = -1 for g.featureArray[queryPos+reverseOffset] < 0 { reverseOffset-- if queryPos+reverseOffset < 0 { return 0, 0, errors.New("no coding sequence found before 3'UTR") } } return int(g.featureArray[queryPos+reverseOffset]), reverseOffset * -1, nil case UtrFive: var forwardOffset int = 1 for g.featureArray[queryPos+forwardOffset] < 0 { forwardOffset++ if queryPos+forwardOffset > len(g.featureArray) { return 0, 0, errors.New("no coding sequence found after 5'UTR") } } return int(g.featureArray[queryPos+forwardOffset]), forwardOffset * -1, nil default: return int(feature), 0, nil } } // CodingPosToGenomic converts cDna coordinates to genomic coordinates // Input and output positions are zero-based func CodingPosToGenomic(g *Gene, cdnaPos int) (int, error) { if cdnaPos < 0 { return 0, errors.New("input CDS position must be positive") } if cdnaPos > len(g.codingSeq.seq)-1 { return 0, errors.New("input position is greater than the length of the CDS") } var searchStartPos int = g.cdsStarts[0] for _, val := range g.cdsStarts { if int(g.featureArray[val]) > cdnaPos { break } searchStartPos = val } if g.posStrand { // Positive Strand return searchStartPos + (cdnaPos - int(g.featureArray[searchStartPos])) + g.startPos, nil } else { // Negative Strand return g.startPos - (searchStartPos + (cdnaPos - int(g.featureArray[searchStartPos]))), nil } } func CdnaPosToCodon(g *Gene, cdnaPos int) (dna.Codon, error) { var answer dna.Codon if cdnaPos < 0 { return answer, errors.New("input cDNA position must be positive") } if cdnaPos > len(g.codingSeq.seq)-1 { return answer, errors.New("input position is greater than the length of the cDNA") } switch cdnaPos % 3 { case 0: return dna.BasesToCodons(g.codingSeq.seq[cdnaPos : cdnaPos+3])[0], nil case 1: return dna.BasesToCodons(g.codingSeq.seq[cdnaPos-1 : cdnaPos+2])[0], nil case 2: return dna.BasesToCodons(g.codingSeq.seq[cdnaPos-2 : cdnaPos+1])[0], nil default: // never used return answer, errors.New("problem determining frame") } }
gene/gene.go
0.614278
0.623348
gene.go
starcoder
package main import "fmt" // Given a 2D board containing 'X' and 'O' (the letter O), // capture all regions surrounded by 'X'. // A region is captured by flipping all 'O's into 'X's in that surrounded region. // Example: // X X X X // X O O X // X X O X // X O X X // After running your function, the board should be: // X X X X // X X X X // X X X X // X O X X func solve(board [][]byte) { h := len(board) if h <= 1 { return } w := len(board[0]) var dfs func(x, y int) dfs = func(x, y int) { board[y][x] = '*' if y > 0 && board[y-1][x] == 'O' { dfs(x, y-1) } if x < w-1 && board[y][x+1] == 'O' { dfs(x+1, y) } if y < h-1 && board[y+1][x] == 'O' { dfs(x, y+1) } if x > 0 && board[y][x-1] == 'O' { dfs(x-1, y) } } for y, row := range board { for x, val := range row { if val == 'O' && (y == 0 || y == h-1 || x == 0 || x == w-1) { dfs(x, y) } } } for y, row := range board { for x, val := range row { if val == '*' { board[y][x] = 'O' } else if val == 'O' { board[y][x] = 'X' } } } } func test(board [][]byte, expect [][]byte) { solve(board) pass := true for i := 0; i < len(expect); i++ { for j := 0; j < len(expect[i]); j++ { if expect[i][j] != board[i][j] { pass = false break } } } if !pass { fmt.Printf("fail! expect %v, got %v\n", expect, board) } else { fmt.Println("pass") } } func main() { test( [][]byte{ []byte{'O', 'O'}, []byte{'O', 'O'}, }, [][]byte{ []byte{'O', 'O'}, []byte{'O', 'O'}, }, ) test( [][]byte{ []byte{'O', 'O', 'O'}, []byte{'O', 'O', 'O'}, []byte{'O', 'O', 'O'}, }, [][]byte{ []byte{'O', 'O', 'O'}, []byte{'O', 'O', 'O'}, []byte{'O', 'O', 'O'}, }, ) test( [][]byte{ []byte{'X', 'X', 'X', 'X'}, []byte{'X', 'O', 'O', 'X'}, []byte{'X', 'X', 'O', 'X'}, []byte{'X', 'O', 'X', 'X'}, }, [][]byte{ []byte{'X', 'X', 'X', 'X'}, []byte{'X', 'X', 'X', 'X'}, []byte{'X', 'X', 'X', 'X'}, []byte{'X', 'O', 'X', 'X'}, }, ) solve([][]byte{ []byte{'O', 'X', 'X', 'O', 'X'}, []byte{'X', 'O', 'O', 'X', 'O'}, []byte{'X', 'O', 'X', 'O', 'X'}, []byte{'O', 'X', 'O', 'O', 'O'}, []byte{'X', 'X', 'O', 'X', 'O'}, }) }
130.surrounded-regions/main.go
0.61682
0.522689
main.go
starcoder
package learners import ( "github.com/PaddlePaddle/PaddleDTX/dai/mpc/learners/linear_reg_vl" "github.com/PaddlePaddle/PaddleDTX/dai/mpc/learners/logic_reg_vl" pbCom "github.com/PaddlePaddle/PaddleDTX/dai/protos/common" pb "github.com/PaddlePaddle/PaddleDTX/dai/protos/mpc" ) // Learner is assigned with a specific algorithm and data used for training a model // participates in the multi-parts-calculation during training process type Learner interface { // Advance does calculation with local data and communicates with other nodes in cluster to train a model step by step // When we implement the method, we should pay attention to performance in case it takes a long time and blocks the client. // payload could be resolved by Learner defined by specific algorithm Advance(payload []byte) (*pb.TrainResponse, error) } // RpcHandler used to request remote mpc-node type RpcHandler interface { StepTrain(req *pb.TrainRequest, peerName string) (*pb.TrainResponse, error) // StepTrainWithRetry sends training message to remote mpc-node // retries 2 times at most // inteSec indicates the interval between retry requests, in seconds StepTrainWithRetry(req *pb.TrainRequest, peerName string, times int, inteSec int64) (*pb.TrainResponse, error) } // ResultHandler handles final result which is successful or failed // Should be called when learning finished type ResultHandler interface { SaveResult(*pbCom.TrainTaskResult) } // LiveEvaluator performs staged evaluation during training. // The basic steps of LiveEvaluator: // Divide the dataset in the way of proportional random division. // Initiate a learner for evaluation with training part. // Train the model, and pause training when the pause round is reached, // and instantiate the staged model for validation, // then, calculate the evaluation metric scores with prediction result obtained on the validation set. // Repeat Train-Pause-validate until the stop signal is received. type LiveEvaluator interface { // Trigger triggers model evaluation. // The parameter contains two types of messages. // One is to set the learner for evaluation with training set and start it. // The other is to drive the learner to continue training. When the conditions are met(reaching pause round), // stop training and instantiate the model for validation. Trigger(*pb.LiveEvaluationTriggerMsg) error } // NewLearner returns a Learner defined by algorithm and training samples // id is the assigned id for Learner // address indicates local mpc-node // algo is the assigned algorithm for learner // parties are other learners who participates in MPC, assigned with mpc-node address usually // rpc is used to request remote mpc-node // rh handles final result which is successful or failed // params are parameters for training model // samplesFile contains samples for training model // le is an LiveEvaluator, and LiveEvaluation should be performed by learner if it is assigned without nil func NewLearner(id string, address string, algo pbCom.Algorithm, params *pbCom.TrainParams, samplesFile []byte, parties []string, rpc RpcHandler, rh ResultHandler, le LiveEvaluator) (Learner, error) { if pbCom.Algorithm_LINEAR_REGRESSION_VL == algo { return linear_reg_vl.NewLearner(id, address, params, samplesFile, parties, rpc, rh, le) } else { // pbCom.Algorithm_LOGIC_REGRESSION_VL return logic_reg_vl.NewLearner(id, address, params, samplesFile, parties, rpc, rh, le) } } // NewLearner returns a Learner defined by algorithm and training samples, but doesn't run it // id is the assigned id for Learner // address indicates local mpc-node // algo is the assigned algorithm for learner // parties are other learners who participates in MPC, assigned with mpc-node address usually // rpc is used to request remote mpc-node // rh handles final result which is successful or failed // params are parameters for training model func NewLearnerWithoutSamples(id string, address string, algo pbCom.Algorithm, params *pbCom.TrainParams, parties []string, rpc RpcHandler, rh ResultHandler) (Learner, error) { if pbCom.Algorithm_LINEAR_REGRESSION_VL == algo { return linear_reg_vl.NewLearnerWithoutSamples(id, address, params, parties, rpc, rh) } else { // pbCom.Algorithm_LOGIC_REGRESSION_VL return logic_reg_vl.NewLearnerWithoutSamples(id, address, params, parties, rpc, rh) } }
dai/mpc/learners/learners.go
0.611962
0.511656
learners.go
starcoder
package model // Accessdown. Use Layer 2 mode to bridge the packets sent to this service if it is marked as DOWN. If the service is DOWN, and this parameter is disabled, the packets are dropped.<br>Default value: NO<br>Possible values = YES, NO. // All. Display both user-configured and dynamically learned services. // Appflowlog. Enable logging of AppFlow information for the specified service group.<br>Default value: ENABLED<br>Possible values = ENABLED, DISABLED. // Autoscale. Auto scale option for a servicegroup.<br>Default value: DISABLED<br>Possible values = DISABLED, DNS, POLICY. // Cacheable. Use the transparent cache redirection virtual server to forward requests to the cache server.<br>Note: Do not specify this parameter if you set the Cache Type parameter.<br>Default value: NO<br>Possible values = YES, NO. // Cachetype. Cache type supported by the cache server.<br>Possible values = TRANSPARENT, REVERSE, FORWARD. // Cip. Before forwarding a request to the service, insert an HTTP header with the clients IPv4 or IPv6 address as its value. Used if the server needs the clients IP address for security, accounting, or other purposes, and setting the Use Source IP parameter is not a viable option.<br>Possible values = ENABLED, DISABLED. // Cipheader. Name for the HTTP header whose value must be set to the IP address of the client. Used with the Client IP parameter. If you set the Client IP parameter, and you do not specify a name for the header, the appliance uses the header name specified for the global Client IP Header parameter (the cipHeader parameter in the set ns param CLI command or the Client IP Header parameter in the Configure HTTP Parameters dialog box at System ;gt; Settings ;gt; Change HTTP parameters). If the global Client IP Header parameter is not specified, the appliance inserts a header with the name "client-ip.".<br>Minimum length = 1. // Cka. Enable client keep-alive for the service group.<br>Possible values = YES, NO. // Cleartextport. Port to which clear text data must be sent after the appliance decrypts incoming SSL traffic. Applicable to transparent SSL services.<br>Minimum value = 1. // Clmonowner. Tells the mon owner of the service. // Clmonview. Tells the view id of the monitoring owner. // Clttimeout. Time, in seconds, after which to terminate an idle client connection.<br>Minimum value = 0<br>Maximum value = 31536000. // Cmp. Enable compression for the service.<br>Possible values = YES, NO. // Comment. Any information about the service group. // Customserverid. The identifier for this IP:Port pair. Used when the persistency type is set to Custom Server ID.<br>Default value: "None". // Delay. Time, in seconds, allocated for a shutdown of the services in the service group. During this period, new requests are sent to the service only for clients who already have persistent sessions on the appliance. Requests from new clients are load balanced among other available services. After the delay time expires, no requests are sent to the service, and the service is marked as unavailable (OUT OF SERVICE). // Dnsprofilename. Name of the DNS profile to be associated with the service. DNS profile properties will applied to the transactions processed by a service. This parameter is valid only for ADNS and ADNS-TCP services.<br>Minimum length = 1<br>Maximum length = 127. // Downstateflush. Flush all active transactions associated with a service whose state transitions from UP to DOWN. Do not enable this option for applications that must complete their transactions.<br>Default value: ENABLED<br>Possible values = ENABLED, DISABLED. // Dup_State. Added this field for getting state value from table.<br>Possible values = ENABLED, DISABLED. // Dup_Weight. weight of the monitor that is bound to servicegroup.<br>Minimum value = 1. // Graceful. Shut down gracefully, not accepting any new connections, and disabling the service when all of its connections are closed.<br>Default value: NO<br>Possible values = YES, NO. // Groupcount. Servicegroup Count. // Gslb. The GSLB option for the corresponding virtual server.<br>Possible values = REMOTE, LOCAL. // Hashid. A numerical identifier that can be used by hash based load balancing methods. Must be unique for each service.<br>Minimum value = 1. // Healthmonitor. Monitor the health of this service. Available settings function as follows:<br>YES - Send probes to check the health of the service.<br>NO - Do not send probes to check the health of the service. With the NO option, the appliance shows the service as UP at all times.<br>Default value: YES<br>Possible values = YES, NO. // Httpprofilename. Name of the HTTP profile that contains HTTP configuration settings for the service group.<br>Minimum length = 1<br>Maximum length = 127. // Includemembers. Display the members of the listed service groups in addition to their settings. Can be specified when no service group name is provided in the command. In that case, the details displayed for each service group are identical to the details displayed when a service group name is provided, except that bound monitors are not displayed. // Internal. Display only dynamically learned services. // Ip. IP Address. // Ip. IP to assign to the service.<br>Minimum length = 1. // Ipaddress. The new IP address of the service. // Lastresponse. The string form of monstatcode. // Maxbandwidth. Maximum bandwidth, in Kbps, allocated for all the services in the service group.<br>Minimum value = 0<br>Maximum value = 4294967287. // Maxclient. Maximum number of simultaneous open connections for the service group.<br>Minimum value = 0<br>Maximum value = 4294967294. // Maxreq. Maximum number of requests that can be sent on a persistent connection to the service group. <br>Note: Connection requests beyond this value are rejected.<br>Minimum value = 0<br>Maximum value = 65535. // Memberport. member port. // Monconnectionclose. Close monitoring connections by sending the service a connection termination message with the specified bit set.<br>Default value: NONE<br>Possible values = RESET, FIN. // Monitor_Name_Svc. Name of the monitor bound to the service group. Used to assign a weight to the monitor.<br>Minimum length = 1. // Monitor_State. The running state of the monitor on this service.<br>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED. // Monstatcode. The code indicating the monitor response. // Monstatparam1. First parameter for use with message code. // Monstatparam2. Second parameter for use with message code. // Monstatparam3. Third parameter for use with message code. // Monthreshold. Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to mark a service as UP or DOWN.<br>Minimum value = 0<br>Maximum value = 65535. // Name. Name for the service. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the service has been created.<br>Minimum length = 1. // Netprofile. Network profile for the service group.<br>Minimum length = 1<br>Maximum length = 127. // Newname. New name for the service group.<br>Minimum length = 1. // Numofconnections. This will tell the number of client side connections are still open. // Oracleserverversion. Oracle server version.<br>Default value: 10G<br>Possible values = 10G, 11G. // Pathmonitor. Path monitoring for clustering.<br>Possible values = YES, NO. // Pathmonitorindv. Individual Path monitoring decisions.<br>Possible values = YES, NO. // Policyname. The name of the policyname for which this service is bound. // Port. Port number of the service.<br>Range 1 - 65535<br>* in CLI is represented as 65535 in NITRO API. // Port. Server port number.<br>Range 1 - 65535<br>* in CLI is represented as 65535 in NITRO API. // Processlocal. By turning on this option packets destined to a service in a cluster will not under go any steering. Turn this option for single packet request response mode or when the upstream device is performing a proper RSS for connection based distribution.<br>Default value: DISABLED<br>Possible values = ENABLED, DISABLED. // Publicip. public ip. // Publicport. public port.<br>Minimum value = 1<br>Range 1 - 65535<br>* in CLI is represented as 65535 in NITRO API. // Responsetime. Response time of this monitor. // Riseapbrstatsmsgcode. The code indicating the rise apbr status. // Riseapbrstatsmsgcode2. The code indicating other rise stats. // Rtspsessionidremap. Enable RTSP session ID mapping for the service group.<br>Default value: OFF<br>Possible values = ON, OFF. // Sc. State of the SureConnect feature for the service group.<br>Default value: OFF<br>Possible values = ON, OFF. // Serverid. The identifier for the service. This is used when the persistency type is set to Custom Server ID. // Servername. Name of the server to which to bind the service group.<br>Minimum length = 1. // Serviceconftype. The configuration type of the service group. // Servicegroupeffectivestate. Indicates the effective servicegroup state based on the state of the bound service items.If all services are UP the effective state is UP, if all are DOWN its DOWN,if all are OFS its OFS.If atleast one serviceis UP and rest are either DOWN or OFS, the effective state is PARTIAL-UP.If atleast one bound service is DOWN and rest are OFS the effective state is PARTIAL DOWN.<br>Possible values = UP, DOWN, OUT OF SERVICE, PARTIAL-UP, PARTIAL-DOWN. // Servicegroupname. Name of the service group. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Can be changed after the name is created.<br>Minimum length = 1. // Serviceipstr. This field has been intorduced to show the dbs services ip. // Servicetype. Protocol used to exchange data with the service.<br>Possible values = HTTP, FTP, TCP, UDP, SSL, SSL_BRIDGE, SSL_TCP, DTLS, NNTP, RPCSVR, DNS, ADNS, SNMP, RTSP, DHCPRA, ANY, SIP_UDP, SIP_TCP, SIP_SSL, DNS_TCP, ADNS_TCP, MYSQL, MSSQL, ORACLE, RADIUS, RADIUSListener, RDP, DIAMETER, SSL_DIAMETER, TFTP, SMPP, PPTP, GRE, SYSLOGTCP, SYSLOGUDP, FIX, SSL_FIX, USER_TCP, USER_SSL_TCP. // Sp. Enable surge protection for the service group.<br>Default value: OFF<br>Possible values = ON, OFF. // State. Initial state of the service group.<br>Default value: ENABLED<br>Possible values = ENABLED, DISABLED. // Statechangetimemsec. Time when last state change occurred. Milliseconds part. // Stateupdatereason. Checks state update reason on the secondary node. // Svrstate. The state of the service.<br>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED. // Svrtimeout. Time, in seconds, after which to terminate an idle server connection.<br>Minimum value = 0<br>Maximum value = 31536000. // Tcpb. Enable TCP buffering for the service group.<br>Possible values = YES, NO. // Tcpprofilename. Name of the TCP profile that contains TCP configuration settings for the service group.<br>Minimum length = 1<br>Maximum length = 127. // Td. Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br>Minimum value = 0<br>Maximum value = 4094. // Useproxyport. Use the proxy port as the source port when initiating connections with the server. With the NO setting, the client-side connection port is used as the source port for the server-side connection. <br>Note: This parameter is available only when the Use Source IP (USIP) parameter is set to YES.<br>Possible values = YES, NO. // Usip. Use clients IP address as the source IP address when initiating connection to the server. With the NO setting, which is the default, a mapped IP (MIP) address or subnet IP (SNIP) address is used as the source IP address to initiate server side connections.<br>Possible values = YES, NO. // Value. SSL Status.<br>Possible values = Certkey not bound, SSL feature disabled. // Weight. Weight to assign to the servers in the service group. Specifies the capacity of the servers relative to the other servers in the load balancing configuration. The higher the weight, the higher the percentage of requests sent to the service.<br>Minimum value = 1<br>Maximum value = 100. // ServicegroupAdd defines add request. type ServicegroupAdd struct { Servicegroup ServicegroupAddBody `json:"servicegroup"` } // ServicegroupAddBody body to add object. type ServicegroupAddBody struct { Appflowlog string `json:"appflowlog,omitempty"` Autoscale string `json:"autoscale,omitempty"` Cacheable string `json:"cacheable,omitempty"` Cachetype string `json:"cachetype,omitempty"` Cip string `json:"cip,omitempty"` Cipheader string `json:"cipheader,omitempty"` Cka string `json:"cka,omitempty"` Clttimeout int `json:"clttimeout,omitempty"` Cmp string `json:"cmp,omitempty"` Comment string `json:"comment,omitempty"` Downstateflush string `json:"downstateflush,omitempty"` Healthmonitor string `json:"healthmonitor,omitempty"` Httpprofilename string `json:"httpprofilename,omitempty"` Maxbandwidth string `json:"maxbandwidth,omitempty"` Maxclient int `json:"maxclient,omitempty"` Maxreq string `json:"maxreq,omitempty"` Memberport int `json:"memberport,omitempty"` Monconnectionclose string `json:"monconnectionclose,omitempty"` Monthreshold string `json:"monthreshold,omitempty"` Netprofile string `json:"netprofile,omitempty"` Pathmonitor string `json:"pathmonitor,omitempty"` Pathmonitorindv string `json:"pathmonitorindv,omitempty"` Rtspsessionidremap string `json:"rtspsessionidremap,omitempty"` Sc string `json:"sc,omitempty"` Servicegroupname string `json:"servicegroupname"` Servicetype string `json:"servicetype"` Sp string `json:"sp,omitempty"` State string `json:"state,omitempty"` Svrtimeout int `json:"svrtimeout,omitempty"` Tcpb string `json:"tcpb,omitempty"` Tcpprofilename string `json:"tcpprofilename,omitempty"` Td string `json:"td,omitempty"` Useproxyport string `json:"useproxyport,omitempty"` Usip string `json:"usip,omitempty"` } // ServicegroupUpdateBody body to update object. type ServicegroupUpdateBody struct { Appflowlog string `json:"appflowlog,omitempty"` Cacheable string `json:"cacheable,omitempty"` Cip string `json:"cip,omitempty"` Cipheader string `json:"cipheader,omitempty"` Cka string `json:"cka,omitempty"` Clttimeout int `json:"clttimeout,omitempty"` Cmp string `json:"cmp,omitempty"` Comment string `json:"comment,omitempty"` Customserverid string `json:"customserverid,omitempty"` Downstateflush string `json:"downstateflush,omitempty"` DupWeight int `json:"dup_weight,omitempty"` Hashid string `json:"hashid,omitempty"` Healthmonitor string `json:"healthmonitor,omitempty"` Httpprofilename string `json:"httpprofilename,omitempty"` Maxbandwidth string `json:"maxbandwidth,omitempty"` Maxclient string `json:"maxclient,omitempty"` Maxreq int `json:"maxreq,omitempty"` Monconnectionclose string `json:"monconnectionclose,omitempty"` MonitorNameSvc string `json:"monitor_name_svc,omitempty"` Monthreshold string `json:"monthreshold,omitempty"` Netprofile string `json:"netprofile,omitempty"` Pathmonitor string `json:"pathmonitor,omitempty"` Pathmonitorindv string `json:"pathmonitorindv,omitempty"` Port int `json:"port,omitempty"` Rtspsessionidremap string `json:"rtspsessionidremap,omitempty"` Sc string `json:"sc,omitempty"` Serverid int `json:"serverid,omitempty"` Servername string `json:"servername,omitempty"` Servicegroupname string `json:"servicegroupname"` Sp string `json:"sp,omitempty"` Svrtimeout int `json:"svrtimeout,omitempty"` Tcpb string `json:"tcpb,omitempty"` Tcpprofilename string `json:"tcpprofilename,omitempty"` Useproxyport string `json:"useproxyport,omitempty"` Usip string `json:"usip,omitempty"` Weight int `json:"weight,omitempty"` } // ServicegroupEnableBody body for enabling object. type ServicegroupEnableBody struct { Port int `json:"port,omitempty"` Servername string `json:"servername,omitempty"` Servicegroupname string `json:"servicegroupname"` } // ServicegroupDisableBody body for disabling object. type ServicegroupDisableBody struct { Servicegroupname string `json:"servicegroupname"` Servername string `json:"servername,omitempty"` Port int `json:"port,omitempty"` Delay int `json:"delay,omitempty"` Graceful string `json:"graceful,omitempty"` } // ServicegroupRenameBody body to rename object. type ServicegroupRenameBody struct { Servicegroupname string `json:"servicegroupname"` Newname string `json:"newname"` } // ServicegroupUpdate defines update request. type ServicegroupUpdate struct { Servicegroup ServicegroupUpdateBody `json:"servicegroup"` } // ServicegroupEnable defines enable request. type ServicegroupEnable struct { Servicegroup ServicegroupEnableBody `json:"servicegroup"` } // ServicegroupDisable defines disable request. type ServicegroupDisable struct { Servicegroup ServicegroupDisableBody `json:"servicegroup"` } // ServicegroupRename defines rename request. type ServicegroupRename struct { Servicegroup ServicegroupRenameBody `json:"servicegroup"` } // ServicegroupWrapper wraps the object and serves as default response. type ServicegroupWrapper struct { Errorcode int `json:"errorcode,omitempty"` Message string `json:"message,omitempty"` Severity string `json:"severity,omitempty"` Servicegroup []Servicegroup `json:"servicegroup"` } // Servicegroup describes the object. type Servicegroup struct { Appflowlog string `json:"appflowlog,omitempty"` Autoscale string `json:"autoscale,omitempty"` Cacheable string `json:"cacheable,omitempty"` Cachetype string `json:"cachetype,omitempty"` Cip string `json:"cip,omitempty"` Cipheader string `json:"cipheader,omitempty"` Cka string `json:"cka,omitempty"` Clmonowner string `json:"clmonowner,omitempty"` Clmonview string `json:"clmonview,omitempty"` Clttimeout int `json:"clttimeout,omitempty"` Cmp string `json:"cmp,omitempty"` Comment string `json:"comment,omitempty"` Customserverid string `json:"customserverid,omitempty"` Delay int `json:"delay,omitempty"` Downstateflush string `json:"downstateflush,omitempty"` Graceful string `json:"graceful,omitempty"` Groupcount string `json:"groupcount,omitempty"` Hashid string `json:"hashid,omitempty"` Healthmonitor string `json:"healthmonitor,omitempty"` Httpprofilename string `json:"httpprofilename,omitempty"` Includemembers bool `json:"includemembers,omitempty"` IP string `json:"ip,omitempty"` Maxbandwidth string `json:"maxbandwidth,omitempty"` Maxclient string `json:"maxclient,omitempty"` Maxreq string `json:"maxreq,omitempty"` Memberport int `json:"memberport,omitempty"` Monconnectionclose string `json:"monconnectionclose,omitempty"` Monstatcode int `json:"monstatcode,omitempty"` Monstatparam1 int `json:"monstatparam1,omitempty"` Monstatparam2 int `json:"monstatparam2,omitempty"` Monstatparam3 int `json:"monstatparam3,omitempty"` Monthreshold string `json:"monthreshold,omitempty"` Netprofile string `json:"netprofile,omitempty"` Numofconnections int `json:"numofconnections,omitempty"` Pathmonitor string `json:"pathmonitor,omitempty"` Pathmonitorindv string `json:"pathmonitorindv,omitempty"` Port int `json:"port,omitempty"` Riseapbrstatsmsgcode int `json:"riseapbrstatsmsgcode,omitempty"` Riseapbrstatsmsgcode2 int `json:"riseapbrstatsmsgcode2,omitempty"` Rtspsessionidremap string `json:"rtspsessionidremap,omitempty"` Sc string `json:"sc,omitempty"` Serverid int `json:"serverid,omitempty"` Servername string `json:"servername,omitempty"` Serviceconftpye bool `json:"serviceconftpye,omitempty"` Serviceconftype bool `json:"serviceconftype,omitempty"` Servicegroupeffectivestate string `json:"servicegroupeffectivestate,omitempty"` Servicegroupname string `json:"servicegroupname"` Serviceipstr string `json:"serviceipstr,omitempty"` Servicetype string `json:"servicetype"` Sp string `json:"sp,omitempty"` State string `json:"state"` Statechangetimemsec string `json:"statechangetimemsec,omitempty"` Stateupdatereason string `json:"stateupdatereason,omitempty"` Svrstate string `json:"svrstate,omitempty"` Svrtimeout int `json:"svrtimeout,omitempty"` Tcpb string `json:"tcpb,omitempty"` Tcpprofilename string `json:"tcpprofilename,omitempty"` Td string `json:"td,omitempty"` Timesincelaststatechange int `json:"timesincelaststatechange,omitempty"` Useproxyport string `json:"useproxyport,omitempty"` Usip string `json:"usip,omitempty"` Value string `json:"value,omitempty"` Weight int `json:"weight,omitempty"` }
model/servicegroup.go
0.636127
0.448306
servicegroup.go
starcoder
package geom import "fmt" // ApplyToPoints applys the given function to each point in the geometry and any sub geometries, return a new transformed geometry. func ApplyToPoints(geometry Geometry, f func(coords ...float64) ([]float64, error)) (Geometry, error) { switch geo := geometry.(type) { default: return nil, fmt.Errorf("unknown Geometry: %T", geometry) case Point: c, err := f(geo.X(), geo.Y()) if err != nil { return nil, err } if len(c) < 2 { return nil, fmt.Errorf("function did not return minimum number of coordinates got %v expected 2", len(c)) } return Point{c[0], c[1]}, nil case MultiPoint: pts := make(MultiPoint, len(geo)) for i, pt := range geo { c, err := f(pt[:]...) if err != nil { return nil, err } if len(c) < 2 { return nil, fmt.Errorf("function did not return minimum number of coordinates got %v expected 2", len(c)) } pts[i][0], pts[i][1] = c[0], c[1] } return pts, nil case LineString: line := make(LineString, len(geo)) for i, pt := range geo { c, err := f(pt[:]...) if err != nil { return nil, err } if len(c) < 2 { return nil, fmt.Errorf("function did not return minimum number of coordinates got %v expected 2", len(c)) } line[i][0], line[i][1] = c[0], c[1] } return line, nil case MultiLineString: lines := make(MultiLineString, len(geo)) for i, line := range geo { // getting a geometry interface back linei, err := ApplyToPoints(LineString(line), f) if err != nil { return nil, fmt.Errorf("got error converting line(%v) of multiline: %v", i, err) } // get the value linev, ok := linei.(LineString) if !ok { panic("we did not get the conversion we were expecting") } lines[i] = linev } return lines, nil case Polygon: poly := make(Polygon, len(geo)) for i, line := range geo { // getting a geometry inteface back linei, err := ApplyToPoints(LineString(line), f) if err != nil { return nil, fmt.Errorf("got error converting line(%v) of polygon: %v", i, err) } // get the value linev, ok := linei.(LineString) if !ok { panic("we did not get the conversion we were expecting") } poly[i] = linev } return poly, nil case MultiPolygon: mpoly := make(MultiPolygon, len(geo)) for i, poly := range geo { // getting a geometry inteface back polyi, err := ApplyToPoints(Polygon(poly), f) if err != nil { return nil, fmt.Errorf("got error converting poly(%v) of multipolygon: %v", i, err) } // get the value polyv, ok := polyi.(Polygon) if !ok { panic("we did not get the conversion we were expecting") } mpoly[i] = polyv } return mpoly, nil } } // Clone returns a deep clone of the Geometry. func Clone(geometry Geometry) (Geometry, error) { switch geo := geometry.(type) { default: return nil, fmt.Errorf("unknown Geometry: %T", geometry) case Point: return Point{geo.X(), geo.Y()}, nil case MultiPoint: pts := make(MultiPoint, len(geo)) for i, pt := range geo { pts[i] = pt } return pts, nil case LineString: line := make(LineString, len(geo)) for i, pt := range geo { line[i] = pt } return line, nil case MultiLineString: lines := make(MultiLineString, len(geo)) for i, line := range geo { // getting a geometry interface back linei, err := Clone(LineString(line)) if err != nil { return nil, fmt.Errorf("got error converting line(%v) of multiline: %v", i, err) } // get the value linev, ok := linei.(LineString) if !ok { panic("we did not get the conversion we were expecting") } lines[i] = linev } return lines, nil case Polygon: // getting a geometry inteface back poly := make(Polygon, len(geo)) for i, line := range geo { linei, err := Clone(LineString(line)) if err != nil { return nil, fmt.Errorf("got error converting line(%v) of polygon: %v", i, err) } // get the value linev, ok := linei.(LineString) if !ok { panic("we did not get the conversion we were expecting") } poly[i] = linev } return poly, nil case MultiPolygon: mpoly := make(MultiPolygon, len(geo)) for i, poly := range geo { // getting a geometry inteface back polyi, err := Clone(Polygon(poly)) if err != nil { return nil, fmt.Errorf("got error converting polygon(%v) of multipolygon: %v", i, err) } // get the value polyv, ok := polyi.(Polygon) if !ok { panic("we did not get the conversion we were expecting") } mpoly[i] = polyv } return mpoly, nil } }
utils.go
0.779112
0.638046
utils.go
starcoder
package slices import "fmt" // slice capacity vs slice length: https://tour.golang.org/moretypes/11 // The length of a slice is the number of elements it contains. // The capacity of a slice is the number of elements in the underlying array, counting from the first element in the slice. func Slices() { // declare slice var grades = make([]string, 2, 100) // type, length, capacity // you can change value of slice element using index: grades[0] = "khkhkhkhkhkhkhkh" grades[1] = "a7a" fmt.Printf("%v, %T \n", grades, grades) fmt.Printf("grades slice length: %v, %T \n", len(grades), len(grades)) fmt.Printf("grades slice capacity: %v, %T \n", cap(grades), cap(grades)) // push to slice grades = append(grades, "wOw") fmt.Printf("%v, %T \n", grades, grades) fmt.Printf("grades slice length: %v, %T \n", len(grades), len(grades)) fmt.Printf("grades slice capacity: %v, %T \n", cap(grades), cap(grades)) femaleStudents := []string{"Shery", "Nefertiti", "Mirna", "Marina", "Isis", "Teti"} maleStudents := []string{"Abanoub", "Bakhom", "Fam", "Ramses", "Kamus", "Ahmus"} // [index:index-1] a := maleStudents[:] // slice of all elements b := maleStudents[2:] // slice from 3rd elements to end c := maleStudents[:4] // slice first 4 elements d := maleStudents[2:5] // slice from 3rd element to 5th element e := maleStudents[4] // get the 5th element // remove from slice // first element femaleStudents = femaleStudents[1:] // last element femaleStudents = femaleStudents[:len(femaleStudents)-1] // remove element using index (we want to remove 3rd element (Marina) - which it's index is 2 - from ["Nefertiti", "Mirna", "Marina", "Isis"]) femaleStudents = append(femaleStudents[:2], femaleStudents[3:]...) fmt.Printf("%v, %T \n", femaleStudents, femaleStudents) fmt.Printf("femaleStudents slice length: %v, %T \n", len(femaleStudents), len(femaleStudents)) fmt.Printf("femaleStudents slice capacity: %v, %T \n", cap(femaleStudents), cap(femaleStudents)) fmt.Printf("%v, %T \n", maleStudents, maleStudents) fmt.Printf("%v, %T \n", a, a) fmt.Printf("%v, %T \n", b, b) fmt.Printf("%v, %T \n", c, c) fmt.Printf("%v, %T \n", d, d) fmt.Printf("%v, %T \n", e, e) }
slices/slices.go
0.60778
0.435121
slices.go
starcoder
package table import ( "reflect" "github.com/aclements/go-gg/generic" ) // Pivot converts rows of g into columns. label and value must name // columns in g, and the label column must have type []string. Pivot // returns a Grouping with a new column named after each distinct // value in the label column, where the values in that column // correspond to the values from the value column. All other columns // (besides label and value) are copied to the output. If, for a given // column in an output row, no input row has that column in the label // column, the output cell will have the zero value for its type. func Pivot(g Grouping, label, value string) Grouping { // Find all unique values of label. These are the new columns. labels := []string{} lset := map[string]int{} for _, gid := range g.Tables() { for _, l := range g.Table(gid).MustColumn(label).([]string) { if _, ok := lset[l]; !ok { lset[l] = len(lset) labels = append(labels, l) } } } // Get all columns that are not label or value. groupCols := []string{} for _, col := range g.Columns() { if col != label && col != value { groupCols = append(groupCols, col) } } return MapTables(g, func(_ GroupID, t *Table) *Table { var nt Builder // Group by all other columns. Each group in gg // becomes an output row. gg := GroupBy(t, groupCols...) // Copy grouped-by values. for _, groupCol := range groupCols { cv := reflect.MakeSlice(reflect.TypeOf(t.Column(groupCol)), len(gg.Tables()), len(gg.Tables())) for i, gid := range gg.Tables() { sub := gg.Table(gid) cv.Index(i).Set(reflect.ValueOf(sub.Column(groupCol)).Index(0)) } nt.Add(groupCol, cv.Interface()) } // Initialize new columns. newCols := make([]reflect.Value, len(lset)) vt := reflect.TypeOf(t.MustColumn(value)) for i := range newCols { newCols[i] = reflect.MakeSlice(vt, len(gg.Tables()), len(gg.Tables())) } // Fill in new columns. for i, gid := range gg.Tables() { sub := gg.Table(gid) vcol := reflect.ValueOf(sub.MustColumn(value)) for j, l := range sub.MustColumn(label).([]string) { val := vcol.Index(j) newCols[lset[l]].Index(i).Set(val) } } // Add new columns to output table. for i, newCol := range newCols { nt.Add(labels[i], newCol.Interface()) } return nt.Done() }) } // Unpivot converts columns of g into rows. The returned Grouping // consists of the columns of g *not* listed in cols, plus two columns // named by the label and value arguments. For each input row in g, // the returned Grouping will have len(cols) output rows. The i'th // such output row corresponds to column cols[i] in the input row. The // label column will contain the name of the unpivoted column, // cols[i], and the value column will contain that column's value from // the input row. The values of all other columns in the input row // will be repeated across the output rows. All columns in cols must // have the same type. func Unpivot(g Grouping, label, value string, cols ...string) Grouping { if len(cols) == 0 { panic("Unpivot requires at least 1 column") } colSet := map[string]bool{} for _, col := range cols { colSet[col] = true } return MapTables(g, func(_ GroupID, t *Table) *Table { var nt Builder // Repeat all other columns len(cols) times. ntlen := t.Len() * len(cols) for _, name := range t.Columns() { if colSet[name] || name == label || name == value { continue } col := reflect.ValueOf(t.Column(name)) ncol := reflect.MakeSlice(col.Type(), ntlen, ntlen) for i, l := 0, col.Len(); i < l; i++ { v := col.Index(i) for j := range cols { ncol.Index(i*len(cols) + j).Set(v) } } nt.Add(name, ncol.Interface()) } // Get input columns. var vt reflect.Type colvs := make([]reflect.Value, len(cols)) for i, col := range cols { colvs[i] = reflect.ValueOf(t.MustColumn(col)) if i == 0 { vt = colvs[i].Type() } else if vt != colvs[i].Type() { panic(&generic.TypeError{vt, colvs[i].Type(), "; cannot Unpivot columns with different types"}) } } // Create label and value columns. lcol := make([]string, 0, ntlen) vcol := reflect.MakeSlice(vt, ntlen, ntlen) for i := 0; i < t.Len(); i++ { lcol = append(lcol, cols...) for j, colv := range colvs { vcol.Index(i*len(cols) + j).Set(colv.Index(i)) } } nt.Add(label, lcol).Add(value, vcol.Interface()) return nt.Done() }) }
vendor/github.com/aclements/go-gg/table/pivot.go
0.706494
0.57081
pivot.go
starcoder
package iso20022 // Specifies rates of a corporate action. type CorporateActionRate3 struct { // Annual rate of a financial instrument. Interest *RateAndAmountFormat3Choice `xml:"Intrst,omitempty"` // Percentage of securities the offeror/issuer will purchase or redeem under the terms of the event. This can be a number or the term "any and all". PercentageSought *RateFormat5Choice `xml:"PctgSght,omitempty"` // Index rate related to the interest rate of the forthcoming interest payment. RelatedIndex *RateFormat2Choice `xml:"RltdIndx,omitempty"` // Margin allowed over or under a given rate. Spread *RateFormat2Choice `xml:"Sprd,omitempty"` // Acceptable price increment used for submitting a bid. BidInterval *RateAndAmountFormat3Choice `xml:"BidIntrvl,omitempty"` // Factor used to calculate the value of the outstanding principal of the financial instrument (for factored securities) until the next redemption (factor) date. PreviousFactor *RateFormat3Choice `xml:"PrvsFctr,omitempty"` // Factor used to calculate the value of the outstanding principal of the financial instrument (for factored securities) that will applicable after the redemption (factor) date. NextFactor *RateFormat3Choice `xml:"NxtFctr,omitempty"` // Rate of discount for securities purchased through a reinvestment scheme as compared to the current market price of security. ReinvestmentDiscountRateToMarket *RateFormat2Choice `xml:"RinvstmtDscntRateToMkt,omitempty"` } func (c *CorporateActionRate3) AddInterest() *RateAndAmountFormat3Choice { c.Interest = new(RateAndAmountFormat3Choice) return c.Interest } func (c *CorporateActionRate3) AddPercentageSought() *RateFormat5Choice { c.PercentageSought = new(RateFormat5Choice) return c.PercentageSought } func (c *CorporateActionRate3) AddRelatedIndex() *RateFormat2Choice { c.RelatedIndex = new(RateFormat2Choice) return c.RelatedIndex } func (c *CorporateActionRate3) AddSpread() *RateFormat2Choice { c.Spread = new(RateFormat2Choice) return c.Spread } func (c *CorporateActionRate3) AddBidInterval() *RateAndAmountFormat3Choice { c.BidInterval = new(RateAndAmountFormat3Choice) return c.BidInterval } func (c *CorporateActionRate3) AddPreviousFactor() *RateFormat3Choice { c.PreviousFactor = new(RateFormat3Choice) return c.PreviousFactor } func (c *CorporateActionRate3) AddNextFactor() *RateFormat3Choice { c.NextFactor = new(RateFormat3Choice) return c.NextFactor } func (c *CorporateActionRate3) AddReinvestmentDiscountRateToMarket() *RateFormat2Choice { c.ReinvestmentDiscountRateToMarket = new(RateFormat2Choice) return c.ReinvestmentDiscountRateToMarket }
CorporateActionRate3.go
0.824638
0.574335
CorporateActionRate3.go
starcoder
package tinystat import ( "math" "gonum.org/v1/gonum/stat" "gonum.org/v1/gonum/stat/distuv" ) // A Summary is a statistical summary of a normally distributed data set. type Summary struct { N float64 // N is the number of measurements in the set. Mean float64 // Mean is the arithmetic mean of the measurements. Variance float64 // Variance is the sample variance of the data set. } // StdDev returns the standard deviation of the sample. func (s *Summary) StdDev() float64 { return math.Sqrt(s.Variance) } // StdErr returns the standard error of the sample. func (s *Summary) StdErr() float64 { return stat.StdErr(s.StdDev(), s.N) } // Summarize analyzes the given data set and returns a Summary. func Summarize(data []float64) Summary { m, v := stat.MeanVariance(data, nil) return Summary{Mean: m, Variance: v, N: float64(len(data))} } // Difference represents the statistical difference between two Summary values. type Difference struct { // Effect is the absolute difference between the samples' means. Effect float64 // EffectSize is the difference in means between the two samples, normalized for variance. // Technically, this is Cohen's d. EffectSize float64 // CriticalValue is the minimum allowed Effect at the given confidence level. CriticalValue float64 // PValue is the p-value for the test: the probability that accepting the results of this test // will be a Type 1 error, in which the null hypothesis (i.e. there is no difference between the // means of the two samples) will be rejected when it is in fact true. PValue float64 // Alpha is the significance level of the test. It is the maximum allowed value of the p-value. Alpha float64 // Beta is the probability of a Type 2 error: the probability that the null hypothesis will be // retained despite it not being true. Beta float64 } // Significant returns true if the difference is statistically significant. func (d Difference) Significant() bool { return d.Effect > d.CriticalValue } // Compare returns the statistical difference between the two summaries using a two-tailed Welch's // t-test. The confidence level must be in the range (0, 100). func Compare(control, experiment Summary, confidence float64) Difference { if 0 >= confidence || 1 >= confidence { panic("confidence must be between 0 and 1") } a, b := control, experiment // Calculate the significance level. alpha := 1 - (confidence / 100) // Calculate the degrees of freedom. nu := math.Pow(a.Variance/a.N+b.Variance/b.N, 2) / (math.Pow(a.Variance, 2)/(math.Pow(a.N, 2)*(a.N-1)) + math.Pow(b.Variance, 2)/(math.Pow(b.N, 2)*(b.N-1))) // Create a Student's T distribution with location of 0, a scale of 1, and a shape of the number // of degrees of freedom in the test. studentsT := distuv.StudentsT{Mu: 0, Sigma: 1, Nu: nu} // Calculate the hypothetical two-tailed t-value for the given significance level. tHyp := studentsT.Quantile(1 - (alpha / tails)) // Calculate the absolute difference between the means of the two samples. d := math.Abs(a.Mean - b.Mean) // Calculate the standard error. s := math.Sqrt(a.Variance/a.N + b.Variance/b.N) // Calculate the experimental t-value. tExp := d / s // Calculate the p-value given the experimental t-value. p := studentsT.CDF(-tExp) * tails // Calculate the critical value. cv := tHyp * s // Calculate the standard deviation using mean variance. sd := math.Sqrt((a.Variance + b.Variance) / 2) // Calculate Cohen's d for the effect size. cd := d / sd // Create a standard normal distribution. stdNormal := distuv.UnitNormal // Calculate the statistical power. z := d / (sd * math.Sqrt(1/a.N+1/b.N)) za := stdNormal.Quantile(1 - alpha/tails) beta := stdNormal.CDF(z-za) - stdNormal.CDF(-z-za) return Difference{ Effect: d, CriticalValue: cv, EffectSize: cd, PValue: p, Alpha: alpha, Beta: beta, } } // tails is the number of distribution tails used to determine significance. In this case, we always // use a two-tailed test because our null hypothesis is that the samples are not different. const tails = 2
tinystat.go
0.933764
0.812607
tinystat.go
starcoder
package geotools import ( "bytes" "encoding/binary" "encoding/hex" "fmt" "googlemaps.github.io/maps" ) const ( defaultSRID = 4326 // https://en.wikipedia.org/wiki/World_Geodetic_System ) type pointEWKB struct { Endiness byte Type uint32 SRID uint32 X float64 Y float64 } // LatLnger defines a struct that can convert itself to cartesian coordinates. type LatLnger interface { LatLng() []float64 } // LatLng implements LatLnger type LatLng maps.LatLng // LatLng returns an array of [lat, lon] func (l LatLng) LatLng() []float64 { return []float64{l.Lng, l.Lat} } // Point is a standard GeoJSON 2d Point with x,y coordinates type Point struct { Type string `json:"type"` Coordinates []float64 `json:"coordinates"` } // NewPoint creates a GeoJSON 2d point. func NewPoint(x, y float64) *Point { p := Point{Type: "point", Coordinates: []float64{x, y}} return &p } // NewPointFromLatLng creates a cartesian point that represents the given // geographic coordinates. func NewPointFromLatLng(lat, lng float64) *Point { return NewPoint(lng, lat) } // LatLng implements LatLngr. func (p Point) LatLng() []float64 { return []float64{p.Coordinates[1], p.Coordinates[0]} } // MarshalDB prepares the point to be stored. func (p Point) MarshalDB() (interface{}, error) { return p.WKT(), nil } // UnmarshalDB converts an stored point into a Point struct. func (p *Point) UnmarshalDB(v interface{}) error { s := string(v.([]byte)) b, err := hex.DecodeString(s) if err != nil { return err } buf := bytes.NewReader(b) var ewkb pointEWKB err = binary.Read(buf, binary.LittleEndian, &ewkb) if err != nil { return err } p.Type = "point" p.Coordinates = []float64{ewkb.X, ewkb.Y} return nil } // WKT implements Geometry. func (p Point) WKT() string { return fmt.Sprintf("SRID=%d;POINT(%0.6f %0.6f)", defaultSRID, p.Coordinates[0], p.Coordinates[1]) } // String returns a text representation of the point. func (p Point) String() string { return p.WKT() } // PointFromLatLng converts a latLng to a cartesian Point. func PointFromLatLng(latlon LatLnger) *Point { l := latlon.LatLng() x, y := l[1], l[0] return NewPoint(x, y) } // LatLngFromPoint converts a Point to a LatLng. func LatLngFromPoint(p Point) *LatLng { return &LatLng{Lat: p.Coordinates[1], Lng: p.Coordinates[0]} } // InstagramLocation is an object representing the location information returned by the Instagram API type InstagramLocation struct { Latitude float64 Longitude float64 ID string Name string } // LatLng implements LatLnger func (l InstagramLocation) LatLng() []float64 { return []float64{l.Latitude, l.Longitude} } // FacebookLocation is an object representing the location information returned // by the Facebook API type FacebookLocation InstagramLocation // TwitterLocation is an object representing the location information returned // by the Twitter API (a GeoJSON Point) type TwitterLocation Point // Geometry defines WKT, which provides a text representation of a vector. type Geometry interface { WKT() string } // Envelope is a GeoJSON like shape where coordinates contains [[left, top], // [right, bottom]] type Envelope struct { Type string `json:"type"` Coordinates [][]float64 `json:"coordinates"` } // NewEnvelope creates an envelope. func NewEnvelope(left, top, right, bottom float64) *Envelope { e := Envelope{Type: "envelope", Coordinates: [][]float64{[]float64{left, top}, []float64{right, bottom}}} return &e } // WKT implements Geometry. func (e Envelope) WKT() string { l, t := e.Coordinates[0][0], e.Coordinates[0][1] r, b := e.Coordinates[1][0], e.Coordinates[1][1] return fmt.Sprintf("POLYGON((%0.6f %0.6f, %0.6f %0.6f, %0.6f %0.6f, %0.6f %0.6f, %0.6f %0.6f))", l, t, l, b, r, b, r, t, l, t) } // MarshalDB implements db.Marshaler func (e Envelope) MarshalDB() (interface{}, error) { return e.WKT(), nil } // Implements fmt.Stringer func (e Envelope) String() string { return e.WKT() }
shapes.go
0.859987
0.474875
shapes.go
starcoder
package geoutil import ( "encoding/json" "fmt" "regexp" "strings" "github.com/PerformLine/go-stockutil/rxutil" "github.com/PerformLine/go-stockutil/typeutil" ) var rxDistanceExtract = regexp.MustCompile(`^(?P<number>\d+(?:\.\d+)?)\s*(?P<unit>[\w\s]+)\W*$`) type Distance float64 const ( Meter = 1 Kilometer = 1000 Foot = 0.3048 Yard = 0.9144 Mile = 1609.344 NauticalMile = 1852 ) var DistanceDisplayUnit = MeasurementSystem(Imperial) func MustParseDistance(in interface{}) Distance { if distance, err := ParseDistance(in); err == nil { return distance } else { panic(`invalid distance: ` + err.Error()) } } func ParseDistance(in interface{}) (Distance, error) { if typeutil.IsZero(in) { return 0, nil } if match := rxutil.Match(rxDistanceExtract, strings.TrimSpace(fmt.Sprintf("%v", in))); match != nil { if v := typeutil.V(match.Group(`number`)).Float(); v >= 0 { unit := match.Group(`unit`) unit = strings.TrimSpace(unit) unit = strings.ToLower(unit) unit = strings.TrimSuffix(unit, `s`) switch unit { case `meter`, `m`: return Distance(v), nil case `kilometer`, `km`: return Distance(v) * Kilometer, nil case `mile`, `mi`: return Distance(v) * Mile, nil case `feet`, `foot`, `ft`: return Distance(v) * Foot, nil case `yard`, `yd`: return Distance(v) * Yard, nil case `nm`, `nautical mile`: return Distance(v) * NauticalMile, nil default: return 0, fmt.Errorf("Unrecognized distance unit %q", unit) } } else { return 0, fmt.Errorf("Unable to extract number from distance value") } } else if v := typeutil.V(in).Float(); v >= 0 { return Distance(v), nil } else { return 0, fmt.Errorf("unable to parse distance value") } } func (self Distance) Within(other Distance) bool { return (self <= other) } func (self Distance) Beyond(other Distance) bool { return (self > other) } func (self Distance) Equal(other Distance) bool { return (self == other) } func (self Distance) MetricString() string { switch { case self >= 10*Kilometer: return fmt.Sprintf("%.0f kilometers", self/Kilometer) case self > Kilometer: return fmt.Sprintf("%.1f kilometers", self/Kilometer) case self == Kilometer: return fmt.Sprintf("%.0f kilometer", self/Kilometer) default: return fmt.Sprintf("%.0f meters", self) } } func (self Distance) ImperialString() string { switch { case self >= 5*Mile: return fmt.Sprintf("%.0f miles", self/Mile) case self >= (0.9*Mile) && self <= (1.1*Mile): return fmt.Sprintf("%.0f mile", self/Mile) case self >= (0.1 * Mile): return fmt.Sprintf("%.1f miles", self/Mile) default: return fmt.Sprintf("%.0f feet", self/Foot) } } func (self Distance) MarshalJSON() ([]byte, error) { return json.Marshal(map[string]interface{}{ `value`: float64(self), `units`: map[string]interface{}{ `default`: self.String(), `imperial`: self.ImperialString(), `metric`: self.MetricString(), }, }) } func (self *Distance) UnmarshalJSON(data []byte) error { if typeutil.IsScalar(string(data)) { var v float64 if err := json.Unmarshal(data, &v); err == nil { *self = Distance(v) return nil } else { return err } } else { var in map[string]interface{} if err := json.Unmarshal(data, &in); err == nil { *self = Distance(typeutil.V(in[`value`]).Float()) return nil } else { return err } } } func (self Distance) String() string { switch DistanceDisplayUnit { case Metric: return self.MetricString() case Imperial: return self.ImperialString() } return fmt.Sprintf("%f meters", self) }
geoutil/distance.go
0.709523
0.411406
distance.go
starcoder
package v1alpha1 import ( internalinterfaces "kubeform.dev/provider-aws-api/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. type Interface interface { // ByteMatchSets returns a ByteMatchSetInformer. ByteMatchSets() ByteMatchSetInformer // GeoMatchSets returns a GeoMatchSetInformer. GeoMatchSets() GeoMatchSetInformer // Ipsets returns a IpsetInformer. Ipsets() IpsetInformer // RateBasedRules returns a RateBasedRuleInformer. RateBasedRules() RateBasedRuleInformer // RegexMatchSets returns a RegexMatchSetInformer. RegexMatchSets() RegexMatchSetInformer // RegexPatternSets returns a RegexPatternSetInformer. RegexPatternSets() RegexPatternSetInformer // Rules returns a RuleInformer. Rules() RuleInformer // RuleGroups returns a RuleGroupInformer. RuleGroups() RuleGroupInformer // SizeConstraintSets returns a SizeConstraintSetInformer. SizeConstraintSets() SizeConstraintSetInformer // SqlInjectionMatchSets returns a SqlInjectionMatchSetInformer. SqlInjectionMatchSets() SqlInjectionMatchSetInformer // WebACLs returns a WebACLInformer. WebACLs() WebACLInformer // WebACLAssociations returns a WebACLAssociationInformer. WebACLAssociations() WebACLAssociationInformer // XssMatchSets returns a XssMatchSetInformer. XssMatchSets() XssMatchSetInformer } type version struct { factory internalinterfaces.SharedInformerFactory namespace string tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ByteMatchSets returns a ByteMatchSetInformer. func (v *version) ByteMatchSets() ByteMatchSetInformer { return &byteMatchSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // GeoMatchSets returns a GeoMatchSetInformer. func (v *version) GeoMatchSets() GeoMatchSetInformer { return &geoMatchSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Ipsets returns a IpsetInformer. func (v *version) Ipsets() IpsetInformer { return &ipsetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // RateBasedRules returns a RateBasedRuleInformer. func (v *version) RateBasedRules() RateBasedRuleInformer { return &rateBasedRuleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // RegexMatchSets returns a RegexMatchSetInformer. func (v *version) RegexMatchSets() RegexMatchSetInformer { return &regexMatchSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // RegexPatternSets returns a RegexPatternSetInformer. func (v *version) RegexPatternSets() RegexPatternSetInformer { return &regexPatternSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Rules returns a RuleInformer. func (v *version) Rules() RuleInformer { return &ruleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // RuleGroups returns a RuleGroupInformer. func (v *version) RuleGroups() RuleGroupInformer { return &ruleGroupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // SizeConstraintSets returns a SizeConstraintSetInformer. func (v *version) SizeConstraintSets() SizeConstraintSetInformer { return &sizeConstraintSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // SqlInjectionMatchSets returns a SqlInjectionMatchSetInformer. func (v *version) SqlInjectionMatchSets() SqlInjectionMatchSetInformer { return &sqlInjectionMatchSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // WebACLs returns a WebACLInformer. func (v *version) WebACLs() WebACLInformer { return &webACLInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // WebACLAssociations returns a WebACLAssociationInformer. func (v *version) WebACLAssociations() WebACLAssociationInformer { return &webACLAssociationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // XssMatchSets returns a XssMatchSetInformer. func (v *version) XssMatchSets() XssMatchSetInformer { return &xssMatchSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} }
client/informers/externalversions/wafregional/v1alpha1/interface.go
0.80651
0.40116
interface.go
starcoder
package util type Set struct { values map[string]struct{} } // Create a new empty set func NewSet() *Set { set := &Set{} set.values = make(map[string]struct{}) return set } // Create a new set from the array func CreateSet(values []string) *Set { set := &Set{} set.values = make(map[string]struct{}) for _, value := range values { set.add(value) } return set } // Create a copy of the set func (s *Set) Copy() *Set { set := NewSet() for value, _ := range s.values { set.values[value] = struct{}{} } return set } // Subtract the subset from the set func (s *Set) Subtract(subset *Set) { // Iterate over each element in the set to see if it's in the subset for value := range s.values { if _, ok := subset.values[value]; ok { delete(s.values, value) } } } // Add a value to the set. Returns true if the value was added, false if it already exists. func (s *Set) Add(newValue string) bool { if _, ok := s.values[newValue]; !ok { s.add(newValue) return true } // The value is already in the set return false } // Add a value to the set. Returns true if the value was added, false if it already exists. func (s *Set) Remove(oldValue string) bool { if _, ok := s.values[oldValue]; ok { delete(s.values, oldValue) return true } // The value is not in the set return false } // Add the value to the set func (s *Set) add(value string) { s.values[value] = struct{}{} } // Check whether a value is already contained in the set func (s *Set) Contains(value string) bool { _, ok := s.values[value] return ok } // Iterate over the items in the set func (s *Set) Iter() <-chan string { channel := make(chan string) go func() { for value := range s.values { channel <- value } close(channel) }() return channel } // Get the count of items in the set func (s *Set) Count() int { return len(s.values) } // Add other set items func (s *Set) AddSet(other *Set) { for value := range other.Iter() { s.add(value) } } // Add multiple items more efficiently func (s *Set) AddMultiple(values []string) { for _, value := range values { s.add(value) } } // Check if two sets contain the same elements func (s *Set) Equals(other *Set) bool { if s.Count() != other.Count() { return false } for value := range s.Iter() { if !other.Contains(value) { return false } } return true } // Convert the set to an array func (s *Set) ToSlice() []string { values := []string{} for value := range s.values { values = append(values, value) } return values } // find items in the left slice that are not in the right slice func SetDifference(left, right []string) *Set { result := NewSet() for _, leftItem := range left { foundItem := false // search for the left item in the right set for _, rightItem := range right { if leftItem == rightItem { foundItem = true break } } if !foundItem { result.Add(leftItem) } } return result }
pkg/util/set.go
0.764892
0.452113
set.go
starcoder
package events import ( "bytes" "fmt" "strings" "text/template" "github.com/Masterminds/sprig" ) // MarkdownRenderer renders responses as markdown. type MarkdownRenderer struct{} // CommonData is data that all responses have. type CommonData struct { Command string Verbose bool Log string } // ErrData is data about an error response. type ErrData struct { Error string CommonData } // FailureData is data about a failure response. type FailureData struct { Failure string CommonData } // ResultData is data about a successful response. type ResultData struct { Results []ProjectResultTmplData CommonData } type ProjectResultTmplData struct { Workspace string RepoRelDir string Rendered string } // Render formats the data into a markdown string. // nolint: interfacer func (m *MarkdownRenderer) Render(res CommandResult, cmdName CommandName, log string, verbose bool) string { commandStr := strings.Title(cmdName.String()) common := CommonData{commandStr, verbose, log} if res.Error != nil { return m.renderTemplate(errWithLogTmpl, ErrData{res.Error.Error(), common}) } if res.Failure != "" { return m.renderTemplate(failureWithLogTmpl, FailureData{res.Failure, common}) } return m.renderProjectResults(res.ProjectResults, common) } func (m *MarkdownRenderer) renderProjectResults(results []ProjectResult, common CommonData) string { var resultsTmplData []ProjectResultTmplData for _, result := range results { resultData := ProjectResultTmplData{ Workspace: result.Workspace, RepoRelDir: result.RepoRelDir, } if result.Error != nil { resultData.Rendered = m.renderTemplate(errTmpl, struct { Command string Error string }{ Command: common.Command, Error: result.Error.Error(), }) } else if result.Failure != "" { resultData.Rendered = m.renderTemplate(failureTmpl, struct { Command string Failure string }{ Command: common.Command, Failure: result.Failure, }) } else if result.PlanSuccess != nil { resultData.Rendered = m.renderTemplate(planSuccessTmpl, *result.PlanSuccess) } else if result.ApplySuccess != "" { resultData.Rendered = m.renderTemplate(applySuccessTmpl, struct{ Output string }{result.ApplySuccess}) } else { resultData.Rendered = "Found no template. This is a bug!" } resultsTmplData = append(resultsTmplData, resultData) } var tmpl *template.Template if len(resultsTmplData) == 1 { tmpl = singleProjectTmpl } else { tmpl = multiProjectTmpl } return m.renderTemplate(tmpl, ResultData{resultsTmplData, common}) } func (m *MarkdownRenderer) renderTemplate(tmpl *template.Template, data interface{}) string { buf := &bytes.Buffer{} if err := tmpl.Execute(buf, data); err != nil { return fmt.Sprintf("Failed to render template, this is a bug: %v", err) } return buf.String() } var singleProjectTmpl = template.Must(template.New("").Parse("{{$result := index .Results 0}}Ran {{.Command}} in dir: `{{$result.RepoRelDir}}` workspace: `{{$result.Workspace}}`\n\n{{$result.Rendered}}\n" + logTmpl)) var multiProjectTmpl = template.Must(template.New("").Funcs(sprig.TxtFuncMap()).Parse( "Ran {{.Command}} for {{ len .Results }} projects:\n" + "{{ range $result := .Results }}" + "1. workspace: `{{$result.Workspace}}` dir: `{{$result.RepoRelDir}}`\n" + "{{end}}\n" + "{{ range $i, $result := .Results }}" + "### {{add $i 1}}. workspace: `{{$result.Workspace}}` dir: `{{$result.RepoRelDir}}`\n" + "{{$result.Rendered}}\n" + "---\n{{end}}" + logTmpl)) var planSuccessTmpl = template.Must(template.New("").Parse( "```diff\n" + "{{.TerraformOutput}}\n" + "```\n\n" + "* To **discard** this plan click [here]({{.LockURL}}).")) var applySuccessTmpl = template.Must(template.New("").Parse( "```diff\n" + "{{.Output}}\n" + "```")) var errTmplText = "**{{.Command}} Error**\n" + "```\n" + "{{.Error}}\n" + "```\n" var errTmpl = template.Must(template.New("").Parse(errTmplText)) var errWithLogTmpl = template.Must(template.New("").Parse(errTmplText + logTmpl)) var failureTmplText = "**{{.Command}} Failed**: {{.Failure}}\n" var failureTmpl = template.Must(template.New("").Parse(failureTmplText)) var failureWithLogTmpl = template.Must(template.New("").Parse(failureTmplText + logTmpl)) var logTmpl = "{{if .Verbose}}\n<details><summary>Log</summary>\n <p>\n\n```\n{{.Log}}```\n</p></details>{{end}}\n"
server/events/markdown_renderer.go
0.570571
0.404155
markdown_renderer.go
starcoder
package scimark2 import ( "math" "math/rand" "time" ) func fft_num_flops(N int) float64 { Nd := float64(N) logN := float64(fft_log2(N)) return (5.0*Nd-2)*logN + 2*(Nd+1) } func fft_transform(data []float64) { fft_transform_internal(data, -1) } func fft_inverse(data []float64) { fft_transform_internal(data, +1) // Normalize nd := len(data) n := nd / 2 norm := 1.0 / float64(n) for i := 0; i < nd; i++ { data[i] *= norm } } func fft_test(data []float64) float64 { nd := len(data) copy := make([]float64, nd) for iii := 0; iii < (nd); iii++ { copy[(0)+iii] = data[(0)+iii] } fft_transform(data) fft_inverse(data) diff := .0 for i := 0; i < nd; i++ { d := data[i] - copy[i] diff += (d * d) } return math.Sqrt(diff / float64(nd)) } func fft_makeRandom(n int) []float64 { rand.Seed(time.Now().Unix()) nd := 2 * n data := make([]float64, nd) for i := 0; i < nd; i++ { data[i] = rand.Float64() } return data } func fft_log2(n int) int { log := uint32(0) for k := 1; k < n; k *= 2 { log++ } if n != (1 << log) { return -1 //, errors.New("FFT: Data length is not a power of 2!: " + string(n)) } return int(log) } func fft_transform_internal(data []float64, direction int) { if len(data) == 0 { return } n := len(data) / 2 if n == 1 { return } // Identity operation! logn := fft_log2(n) /* bit reverse the input data for decimation in time algorithm */ fft_bitreverse(data) /* apply fft recursion */ /* this loop executed log2(N) times */ dual := 1 for bit := 0; bit < logn; bit++ { w_real := 1.0 w_imag := 0.0 theta := 2.0 * float64(direction) * math.Pi / (2.0 * float64(dual)) s := math.Sin(theta) t := math.Sin(theta / 2.0) s2 := 2.0 * t * t /* a = 0 */ for b := 0; b < n; b += 2 * dual { i := 2 * b j := 2 * (b + dual) wd_real := data[j] wd_imag := data[j+1] data[j] = data[i] - wd_real data[j+1] = data[i+1] - wd_imag data[i] += wd_real data[i+1] += wd_imag } /* a = 1 .. (dual-1) */ for a := 1; a < dual; a++ { /* trignometric recurrence for w-> exp(i theta) w */ { tmp_real := w_real - s*w_imag - s2*w_real tmp_imag := w_imag + s*w_real - s2*w_imag w_real = tmp_real w_imag = tmp_imag } for b := 0; b < n; b += 2 * dual { i := 2 * (b + a) j := 2 * (b + a + dual) z1_real := data[j] z1_imag := data[j+1] wd_real := w_real*z1_real - w_imag*z1_imag wd_imag := w_real*z1_imag + w_imag*z1_real data[j] = data[i] - wd_real data[j+1] = data[i+1] - wd_imag data[i] += wd_real data[i+1] += wd_imag } } dual *= 2 } } func fft_bitreverse(data []float64) { /* This is the Goldrader bit-reversal algorithm */ n := len(data) / 2 nm1 := n - 1 i := 0 j := 0 for ; i < nm1; i++ { ii := i << 1 jj := j << 1 k := n >> 1 if i < j { tmp_real := data[ii] tmp_imag := data[ii+1] data[ii] = data[jj] data[ii+1] = data[jj+1] data[jj] = tmp_real data[jj+1] = tmp_imag } for k <= j { j -= k k >>= 1 } j += k } }
src/go/entityfx/scimark2/fft.go
0.54819
0.401805
fft.go
starcoder
package hashdict import ( "math/bits" "github.com/peterzeller/go-fun/dict" "github.com/peterzeller/go-fun/iterable" "github.com/peterzeller/go-fun/reducer" "github.com/peterzeller/go-fun/zero" ) // a sparse array with at most 32 entries type sparseArray[T any] struct { // bitmap has a bit value of 1 where the array has an element bitmap uint32 values []T } func newSparseArray[T any](values ...dict.Entry[int, T]) (res sparseArray[T]) { res.values = make([]T, len(values)) i := 0 reducer.ApplySlice(values, reducer.Sorted(func(a, b dict.Entry[int, T]) bool { return a.Key < b.Key }, reducer.Do(func(e dict.Entry[int, T]) { res.bitmap = res.bitmap | (1 << e.Key) res.values[i] = e.Value i++ }))) return } func newSparseArraySorted[T any](values ...dict.Entry[int, T]) (res sparseArray[T]) { res.values = make([]T, len(values)) for i, e := range values { res.bitmap = res.bitmap | (1 << e.Key) res.values[i] = e.Value } return } func (a sparseArray[T]) get(i int) (T, bool) { mask := uint32(1) << i if mask&a.bitmap == 0 { return zero.Value[T](), false } // count the numbers of bits in the bitmap that are smaller than mask to get the real index realIndex := bits.OnesCount32(uint32(a.bitmap & (mask - 1))) return a.values[realIndex], true } func (a sparseArray[T]) set(i int, value T) (res sparseArray[T]) { res.bitmap = a.bitmap mask := uint32(1) << i if res.bitmap&mask == 0 { // value does not exist yet res.bitmap = res.bitmap | mask realIndex := bits.OnesCount32(uint32(a.bitmap & (mask - 1))) newValues := make([]T, len(a.values)+1) for i := 0; i < realIndex; i++ { newValues[i] = a.values[i] } newValues[realIndex] = value for i := realIndex; i < len(a.values); i++ { newValues[i+1] = a.values[i] } res.values = newValues } else { // overwrite existing value newValues := make([]T, len(a.values)) for i, v := range a.values { newValues[i] = v } realIndex := bits.OnesCount32(uint32(a.bitmap & (mask - 1))) newValues[realIndex] = value res.values = newValues } return } func (a sparseArray[T]) remove(i int) sparseArray[T] { mask := uint32(1) << i if a.bitmap&mask == 0 { // removed index does not exist -> unchanged return a } var res sparseArray[T] // remove from bitmap res.bitmap = a.bitmap & (^mask) // remove from array realIndex := bits.OnesCount32(uint32(a.bitmap & (mask - 1))) res.values = append(append(res.values, a.values[:realIndex]...), a.values[realIndex+1:]...) return res } func (a sparseArray[T]) size() int { return len(a.values) } func sparseArrayFilterMap[A, B any](a sparseArray[A], f func(int, A) (B, bool)) sparseArray[B] { bitmap := uint32(0) values := make([]B, 0) j := 0 for i := 0; i < 32; i++ { mask := uint32(1) << i if a.bitmap&mask == 0 { continue } newV, keep := f(i, a.values[j]) j++ if keep { bitmap = bitmap | mask values = append(values, newV) } } return sparseArray[B]{ bitmap: bitmap, values: values, } } func (a sparseArray[T]) Iterator() iterable.Iterator[dict.Entry[int, T]] { i := 0 return iterable.Fun[dict.Entry[int, T]](func() (dict.Entry[int, T], bool) { for i < 32 { if v, ok := a.get(i); ok { res := dict.Entry[int, T]{Key: i, Value: v} i++ return res, true } i++ } return zero.Value[dict.Entry[int, T]](), false }) }
dict/hashdict/sparese-array.go
0.625095
0.577078
sparese-array.go
starcoder
package gruff import ( "fmt" "github.com/jinzhu/gorm" ) /* * Arguments are described in detail in the Canonical Debate White Paper: https://github.com/canonical-debate-lab/paper#312_Argument An Argument connects a Claim to another Claim or Argument That is: a Claim can be used as an ARGUMENT to either prove or disprove the truth of a claim, or to modify the relevance or impact of another argument. The TYPE of the argument indicates how the claim (or CLAIM) is being used: PRO TRUTH: The Claim is a claim that is being used to prove the truth of another claim Ex: "The defendant was in Cincinatti on the date of the murder" CON TRUTH: The Claim is used as evidence against another claim Ex: "The defendant was hospitalized on the date of the murder" PRO RELEVANCE: The Claim is being used to show that another Argument is relevant and/or important Ex: "The murder occurred in Cincinatti" Ex: "This argument clearly shows that the defendant has no alibi" CON RELEVANCE: The Claim is being used to show that another Argument is irrelevant and/or unimportant Ex: "The murder occurred in the same hospital in which the defendant was hospitalized" Ex: "There is no evidence that the defendant ever left their room" A quick explanation of the fields: Claim: The Debate (or claim) that is being used as the basis of the argument Target Claim: The "parent" Claim against which a pro/con truth argument is being made Target Argument: In the case of a relevance or impact argument, the argument to which it refers Strength: The strength of an Argument is a combination of the Truth of its underlying Claim, and the Relevance Score. It is a cached value derived from the Flat popular votes, as described here: https://github.com/canonical-debate-lab/paper#33311_Flat_Scores and here: https://github.com/canonical-debate-lab/paper#33323_Popular_Vote StrengthRU: The roll-up score, similar to Strength, but rolled up to Level 1 as described here: https://github.com/canonical-debate-lab/paper#33312_Rollup_Scores To help understand the difference between relevance and impact arguments, imagine an argument is a bullet: Impact is the size of your bullet Relevance is how well you hit your target (note that because this difference is subtle enough to be difficult to separate one from the other, the two concepts are reflected together in a single score called Relevance) Scoring: Truth: 1.0 = definitely true; 0.5 = equal chance true or false; 0.0 = definitely false. "The world is flat" should have a 0.000000000000000001 truth score. Relevance: 1.0 = Completely on-topic and important; 0.5 = Circumstantial or somewhat relevant; 0.01 = Totally off-point, should be ignored Strength: 1.0 = This argument is definitely the most important argument for this side - no need to read any others; 0.5 = This is one more argument to consider; 0.01 = Probably not even worth including in the discussion */ const DEFAULT_ARGUMENT_SCORE float32 = 1.00 type Argument struct { VersionedModel TargetClaimID *string `json:"targetClaimId,omitempty"` TargetClaim *Claim `json:"targetClaim,omitempty" transient:"true"` TargetArgumentID *string `json:"targetArgId,omitempty"` TargetArgument *Argument `json:"targetArg,omitempty" transient:"true"` ClaimID string `json:"claimId"` Claim *Claim `json:"claim,omitempty" transient:"true"` Title string `json:"title" valid:"length(3|1000)"` Negation string `json:"negation"` Question string `json:"question"` Description string `json:"desc" valid:"length(3|4000)"` Note string `json:"note"` Pro bool `json:"pro"` Relevance float32 `json:"relevance"` Str float32 `json:"strength"` ProArgs []Argument `json:"proargs" transient:"true"` ConArgs []Argument `json:"conargs" transient:"true"` } // ArangoObject interface func (a Argument) CollectionName() string { return "arguments" } func (a Argument) ArangoKey() string { return a.Key } func (a Argument) ArangoID() string { return fmt.Sprintf("%s/%s", a.CollectionName(), a.ArangoKey()) } func (a Argument) DefaultQueryParameters() ArangoQueryParameters { return DEFAULT_QUERY_PARAMETERS } func (a *Argument) Create(ctx *ServerContext) Error { var target ArangoObject if a.TargetClaimID != nil { claim := Claim{} claim.ID = *a.TargetClaimID if err := claim.Load(ctx); err != nil { ctx.Rollback() return NewBusinessError(err.Error()) } // TODO: Test if claim.MultiPremise { ctx.Rollback() return NewBusinessError("Multi-premise claims can't have their own arguments. Arguments should be added directly to one of their premises.") } // TODO: Test if a.ClaimID == claim.ID { ctx.Rollback() return NewBusinessError("A claim cannot be used as an argument for or against itself. That's called \"Begging the Question\".") } // TODO: Test if err := claim.ValidateForUpdate(Updates{}); err != nil { ctx.Rollback() return err } target = &claim } else if a.TargetArgumentID != nil { arg := Argument{} arg.ID = *a.TargetArgumentID if err := arg.Load(ctx); err != nil { ctx.Rollback() return NewBusinessError(err.Error()) } // TODO: Test if err := arg.ValidateForUpdate(Updates{}); err != nil { ctx.Rollback() return err } target = &arg } var baseClaim Claim if a.ClaimID == "" { // Need to create a Base Claim for this Argument with the same title and description baseClaim = Claim{ Title: a.Title, Description: a.Description, Negation: a.Negation, Question: a.Question, Note: a.Note, } if err := baseClaim.Create(ctx); err != nil { ctx.Rollback() return err } a.ClaimID = baseClaim.ID } else { baseClaim.ID = a.ClaimID if err := baseClaim.Load(ctx); err != nil { ctx.Rollback() return err } } a.Relevance = DEFAULT_ARGUMENT_SCORE a.Str = a.Relevance * baseClaim.Truth if err := CreateArangoObject(ctx, a); err != nil { ctx.Rollback() return err } edge := BaseClaimEdge{Edge: Edge{ From: a.ArangoID(), To: baseClaim.ArangoID(), }} if err := edge.Create(ctx); err != nil { ctx.Rollback() return err } inf := Inference{Edge: Edge{ From: target.ArangoID(), To: a.ArangoID(), }} if err := inf.Create(ctx); err != nil { ctx.Rollback() return err } return nil } func (a *Argument) Update(ctx *ServerContext, updates Updates) Error { return UpdateArangoObject(ctx, a, updates) } func (a *Argument) version(ctx *ServerContext, updates Updates) Error { oldVersion := *a // Don't use the standard Delete method because it deletes arguments, too if err := oldVersion.performDelete(ctx); err != nil { ctx.Rollback() return err } if err := a.Create(ctx); err != nil { ctx.Rollback() return err } // Find all edges going to old ver, make copy to new ver // The Inference edge is created during the Create method inference, err := oldVersion.Inference(ctx) if err != nil { ctx.Rollback() return err } if err := inference.Delete(ctx); err != nil { ctx.Rollback() return err } // Arguments inferences, err := oldVersion.Inferences(ctx) if err != nil { ctx.Rollback() return err } for _, edge := range inferences { newEdge := Inference{Edge: Edge{ From: a.ArangoID(), To: edge.To, }} if err := newEdge.Create(ctx); err != nil { ctx.Rollback() return err } if err := edge.Delete(ctx); err != nil { ctx.Rollback() return err } } // Base Claim edge baseClaimEdge, err := oldVersion.BaseClaimEdge(ctx) if err != nil { ctx.Rollback() return err } newBaseClaimEdge := BaseClaimEdge{Edge: Edge{ To: baseClaimEdge.To, From: a.ArangoID(), }} if err := newBaseClaimEdge.Create(ctx); err != nil { ctx.Rollback() return err } if err := baseClaimEdge.Delete(ctx); err != nil { ctx.Rollback() return err } // TODO: Links // UserScores // TODO: Do this as a bulk operation // TODO: Test userScores, err := oldVersion.UserScores(ctx) if err != nil { ctx.Rollback() return err } for _, edge := range userScores { newEdge := UserScore{ Edge: Edge{ From: edge.From, To: a.ArangoID(), }, Score: edge.Score, } if err := newEdge.Create(ctx); err != nil { ctx.Rollback() return err } } if err := a.UpdateScore(ctx); err != nil { ctx.Rollback() return err } return nil } func (a *Argument) Delete(ctx *ServerContext) Error { // TODO: test if err := a.performDelete(ctx); err != nil { ctx.Rollback() return err } // Find all edges going to old ver, make copy to new ver inference, err := a.Inference(ctx) if err != nil { ctx.Rollback() return err } if err := inference.Delete(ctx); err != nil { ctx.Rollback() return err } // Base Claim edge baseClaimEdge, err := a.BaseClaimEdge(ctx) if err != nil { ctx.Rollback() return err } if err := baseClaimEdge.Delete(ctx); err != nil { ctx.Rollback() return err } // Arguments // WARNING: could create an infinite loop of deletions args, err := a.Arguments(ctx) if err != nil { ctx.Rollback() return err } for _, arg := range args { if err := arg.Delete(ctx); err != nil { ctx.Rollback() return err } } return nil } // Execute the delete action without verifications or deleting args func (a *Argument) performDelete(ctx *ServerContext) Error { // TODO: test if err := DeleteArangoObject(ctx, a); err != nil { ctx.Rollback() return err } // UserScores // TODO: Test filter := "obj._to == @arg" bindVars := BindVars{ "arg": a.ArangoID(), } if err := DeleteArangoObjects(ctx, UserScore{}.CollectionName(), filter, bindVars); err != nil { ctx.Rollback() return err } return nil } // Restrictor // TODO: Test // TODO: Call in CRUD and other methods func (a Argument) UserCanView(ctx *ServerContext) (bool, Error) { return true, nil } func (a Argument) UserCanCreate(ctx *ServerContext) (bool, Error) { return ctx.UserLoggedIn(), nil } func (a Argument) UserCanUpdate(ctx *ServerContext, updates Updates) (bool, Error) { return a.UserCanDelete(ctx) } func (a Argument) UserCanDelete(ctx *ServerContext) (bool, Error) { u := ctx.UserContext if u.Curator { return true, nil } return a.CreatedByID == u.ArangoID(), nil } // Validator func (a Argument) ValidateForCreate() Error { if err := a.ValidateField("title"); err != nil { return err } if err := a.ValidateField("desc"); err != nil { return err } if err := a.ValidateIDs(); err != nil { return err } return nil } func (a Argument) ValidateForUpdate(updates Updates) Error { if a.DeletedAt != nil { return NewBusinessError("An argument that has already been deleted, or has a newer version, cannot be modified.") } if err := SetJsonValuesOnStruct(&a, updates, false); err != nil { return err } return a.ValidateForCreate() } func (a Argument) ValidateForDelete() Error { if a.DeletedAt != nil { return NewBusinessError("This argument has already been deleted or versioned.") } return nil } func (a Argument) ValidateField(f string) Error { err := ValidateStructField(a, f) return err } func (a Argument) ValidateIDs() Error { if a.ClaimID == "" { return NewBusinessError("claimId: non zero value required;") } if a.TargetClaimID == nil && a.TargetArgumentID == nil { return NewBusinessError("An Argument must have a target Claim or target Argument ID") } if a.TargetClaimID != nil && a.TargetArgumentID != nil { return NewBusinessError("An Argument can have only one target Claim or target Argument ID") } return nil } // Loader func (a *Argument) Load(ctx *ServerContext) Error { var err Error if a.ID != "" { bindVars := BindVars{ "id": a.ID, } query := fmt.Sprintf(`FOR obj IN %s FILTER obj.id == @id %s SORT obj.start DESC LIMIT 1 RETURN obj`, a.CollectionName(), a.DateFilter(bindVars)) err = FindArangoObject(ctx, query, bindVars, a) } else if a.ArangoKey() != "" { err = LoadArangoObject(ctx, a, a.ArangoKey()) } else { err = NewBusinessError("There is no key or id for this Argument.") } return err } func (a *Argument) LoadFull(ctx *ServerContext) Error { queryAt := a.QueryAt if err := a.Load(ctx); err != nil { return err } a.QueryAt = queryAt args, err := a.Arguments(ctx) if err != nil { return err } var proArgs, conArgs []Argument for _, arg := range args { bc := Claim{} bc.ID = arg.ClaimID bc.QueryAt = a.QueryDate() if err := bc.Load(ctx); err != nil { return err } bc.QueryAt = nil arg.Claim = &bc if arg.Pro { proArgs = append(proArgs, arg) } else { conArgs = append(conArgs, arg) } } a.ProArgs = proArgs a.ConArgs = conArgs baseClaim := Claim{} baseClaim.ID = a.ClaimID baseClaim.QueryAt = a.QueryDate() if err = baseClaim.LoadFull(ctx); err != nil { return err } baseClaim.QueryAt = nil a.Claim = &baseClaim return nil } // Scorer func (a *Argument) Score(ctx *ServerContext) (float32, Error) { if a.QueryAt == nil { return a.Relevance, nil } return a.scoreAt(ctx) } func (a *Argument) UpdateScore(ctx *ServerContext) Error { // TODO: not on deleted - validate for update a.QueryAt = nil score, err := a.scoreAt(ctx) if err != nil { return err } claim := Claim{} claim.ID = a.ClaimID if err := claim.Load(ctx); err != nil { return err } truth, err := claim.Score(ctx) if err != nil { return err } strength := score * truth updates := Updates{ "relevance": score, "strength": strength, } col, grr := ctx.Arango.CollectionFor(a) if grr != nil { return grr } if _, err := col.UpdateDocument(ctx.Context, a.ArangoKey(), updates); err != nil { return NewServerError(err.Error()) } a.Relevance = score a.Str = strength return nil } func (a *Argument) scoreAt(ctx *ServerContext) (float32, Error) { var score float32 results := map[string]interface{}{} bindVars := BindVars{ "argument": a.ID, } query := fmt.Sprintf(`FOR obj IN %s FOR a IN %s FILTER obj._to == a._id AND a.id == @argument %s COLLECT AGGREGATE num = COUNT(obj), score = AVG(obj.score) RETURN { num, score }`, UserScore{}.CollectionName(), a.CollectionName(), a.DateFilter(bindVars)) db := ctx.Arango.DB cursor, err := db.Query(ctx.Context, query, bindVars) defer CloseCursor(cursor) if err != nil { return score, NewServerError(err.Error()) } _, err = cursor.ReadDocument(ctx.Context, &results) if err != nil { return score, NewServerError(err.Error()) } if val, ok := results["score"].(float64); ok { score = float32(val) } if score == 0.0 { if count, ok := results["num"].(float64); ok { if count == 0.0 { score = DEFAULT_ARGUMENT_SCORE } } } return score, nil } func (a *Argument) Strength(ctx *ServerContext) (float32, Error) { if a.QueryAt == nil { return a.Str, nil } relevance, err := a.Score(ctx) if err != nil { return 0.0, err } claim := Claim{} claim.ID = a.ClaimID if err := claim.Load(ctx); err != nil { return 0.0, err } truth, err := claim.Score(ctx) if err != nil { return 0.0, err } return relevance * truth, nil } func (a Argument) UserScores(ctx *ServerContext) ([]UserScore, Error) { edges := []UserScore{} bindVars := BindVars{ "to": a.ArangoID(), } query := fmt.Sprintf(`FOR obj IN %s FILTER obj._to == @to %s SORT obj.start RETURN obj`, UserScore{}.CollectionName(), a.DateFilter(bindVars)) err := FindArangoObjects(ctx, query, bindVars, &edges) return edges, err } // Business methods func (a Argument) AddArgument(ctx *ServerContext, arg Argument) Error { // TODO: test updates := Updates{} if err := a.ValidateForUpdate(updates); err != nil { return err } // TODO: Test can, err := a.UserCanUpdate(ctx, updates) if err != nil { return err } if !can { return NewPermissionError("You do not have permission to modify this item") } edge := Inference{Edge: Edge{ From: a.ArangoID(), To: arg.ArangoID(), }} if err := edge.Create(ctx); err != nil { ctx.Rollback() return err } return nil } func (a Argument) Arguments(ctx *ServerContext) ([]Argument, Error) { args := []Argument{} bindVars := BindVars{ "arg": a.ArangoID(), } query := fmt.Sprintf(`FOR obj IN %s FOR a IN %s FILTER obj._to == a._id AND obj._from == @arg %s SORT a.start ASC RETURN a`, Inference{}.CollectionName(), Argument{}.CollectionName(), a.DateFilter(bindVars), ) err := FindArangoObjects(ctx, query, bindVars, &args) return args, err } func (a Argument) Inferences(ctx *ServerContext) ([]Inference, Error) { edges := []Inference{} bindVars := BindVars{ "from": a.ArangoID(), } query := fmt.Sprintf(`FOR obj IN %s FILTER obj._from == @from %s RETURN obj`, Inference{}.CollectionName(), a.DateFilter(bindVars)) err := FindArangoObjects(ctx, query, bindVars, &edges) return edges, err } func (a Argument) Inference(ctx *ServerContext) (Inference, Error) { edge := Inference{} query := fmt.Sprintf("FOR e IN %s FILTER e._to == @to LIMIT 1 RETURN e", edge.CollectionName()) bindVars := BindVars{ "to": a.ArangoID(), } err := FindArangoObject(ctx, query, bindVars, &edge) return edge, err } func (a Argument) BaseClaimEdge(ctx *ServerContext) (BaseClaimEdge, Error) { edge := BaseClaimEdge{} query := fmt.Sprintf("FOR e IN %s FILTER e._from == @from LIMIT 1 RETURN e", edge.CollectionName()) bindVars := BindVars{ "from": a.ArangoID(), } err := FindArangoObject(ctx, query, bindVars, &edge) return edge, err } func (a *Argument) LoadTarget(ctx *ServerContext) Error { var err Error if a.TargetClaimID != nil { t := Claim{} t.ID = *a.TargetClaimID t.QueryAt = a.QueryAt err = t.Load(ctx) a.TargetClaim = &t } else if a.TargetArgumentID != nil { t := Argument{} t.ID = *a.TargetArgumentID t.QueryAt = a.QueryAt err = t.Load(ctx) a.TargetArgument = &t } return err } // Curation // TODO: Test func (a *Argument) MoveTo(ctx *ServerContext, target ArangoObject, pro bool) Error { // Create a new version with the new target id updates := Updates{ "pro": pro, } if claim, ok := target.(*Claim); ok { if claim.ID == a.ClaimID { return NewBusinessError("An argument cannot be moved to its own base claim") } if a.TargetClaimID != nil && *a.TargetClaimID == claim.ID && a.Pro == pro { // Ignore the request return nil } updates["targetClaimId"] = claim.ID if a.TargetArgumentID != nil { updates["targetArgId"] = nil } } else if arg, ok := target.(*Argument); ok { if arg.TargetArgumentID != nil && *arg.TargetArgumentID == a.ID { return NewBusinessError("An argument cannot be moved to one of its own arguments") } if a.TargetArgumentID != nil && *a.TargetArgumentID == arg.ID && a.Pro == pro { // Ignore the request return nil } if a.TargetClaimID != nil { updates["targetClaimId"] = nil } updates["targetArgId"] = arg.ID } else { ctx.Rollback() return NewServerError("Target must be either a claim or another argument") } if err := a.Update(ctx, updates); err != nil { ctx.Rollback() return err } // Point the (new) inference to the new target inference, err := a.Inference(ctx) if err != nil { ctx.Rollback() return err } updates = Updates{ "_from": target.ArangoID(), } if err := UpdateArangoObject(ctx, &inference, updates); err != nil { ctx.Rollback() return err } // TODO: Handle/invalidate scores // TODO: re-evalute relevance of arguments return nil } // Scopes func OrderByBestArgument(db *gorm.DB) *gorm.DB { return db.Joins("LEFT JOIN claims c ON c.id = arguments.claim_id"). Order("(arguments.strength * c.truth) DESC") }
gruff/argument.go
0.57821
0.65379
argument.go
starcoder
// Package zero provides facilities for efficiently zeroing Go values. package zero import ( "reflect" "sync" "unsafe" ) var cache sync.Map // map[reflect.Type]func(ptr uintptr, n int) // Slice zeroes the elements 0 <= i < v.Len() of the provided slice. // Slice panics if the value is not a slice. f func Slice(v interface{}) { SliceValue(reflect.ValueOf(v)) } // SliceValue zeroes the elements 0 <= i < v.Len() of the provided slice // value. Slice panics if the value is not a slice. f func SliceValue(v reflect.Value) { if v.Kind() != reflect.Slice { panic("zero.Slice: called on non-slice value") } Unsafe(v.Type().Elem(), v.Pointer(), v.Len()) } // Unsafe zeroes n elements starting at the address ptr. Elements // must of type t. func Unsafe(t reflect.Type, ptr uintptr, n int) { zi, ok := cache.Load(t) if !ok { zi, _ = cache.LoadOrStore(t, slice(t)) } z := zi.(func(ptr uintptr, n int)) z(ptr, n) } func slice(elem reflect.Type) func(ptr uintptr, n int) { switch kind := elem.Kind(); { case isValueType(elem): return sliceValue(elem) case kind == reflect.String: return func(ptr uintptr, n int) { h := reflect.SliceHeader{Data: ptr, Len: n, Cap: n} strs := *(*[]string)(unsafe.Pointer(&h)) for i := range strs { strs[i] = "" } } case kind == reflect.Slice: return func(ptr uintptr, n int) { h := reflect.SliceHeader{Data: ptr, Len: n, Cap: n} slices := *(*[]reflect.SliceHeader)(unsafe.Pointer(&h)) for i := range slices { *(*unsafe.Pointer)(unsafe.Pointer(&slices[i].Data)) = unsafe.Pointer(uintptr(0)) slices[i].Len = 0 slices[i].Cap = 0 } } case kind == reflect.Ptr: return func(ptr uintptr, n int) { h := reflect.SliceHeader{Data: ptr, Len: n, Cap: n} ps := *(*[]unsafe.Pointer)(unsafe.Pointer(&h)) for i := range ps { ps[i] = unsafe.Pointer(uintptr(0)) } } default: // Slow case: use reflection API. zero := reflect.Zero(elem) sliceType := reflect.SliceOf(elem) return func(ptr uintptr, n int) { h := reflect.SliceHeader{Data: ptr, Len: n, Cap: n} v := reflect.Indirect(reflect.NewAt(sliceType, unsafe.Pointer(&h))) for i := 0; i < v.Len(); i++ { v.Index(i).Set(zero) } } } } func sliceValue(elem reflect.Type) func(ptr uintptr, n int) { switch size := elem.Size(); size { case 8: return func(ptr uintptr, n int) { h := reflect.SliceHeader{Data: ptr, Len: n, Cap: n} vs := *(*[]int64)(unsafe.Pointer(&h)) for i := range vs { vs[i] = 0 } } case 4: return func(ptr uintptr, n int) { h := reflect.SliceHeader{Data: ptr, Len: n, Cap: n} vs := *(*[]int32)(unsafe.Pointer(&h)) for i := range vs { vs[i] = 0 } } case 2: return func(ptr uintptr, n int) { h := reflect.SliceHeader{Data: ptr, Len: n, Cap: n} vs := *(*[]int16)(unsafe.Pointer(&h)) for i := range vs { vs[i] = 0 } } case 1: return func(ptr uintptr, n int) { h := reflect.SliceHeader{Data: ptr, Len: n, Cap: n} vs := *(*[]int8)(unsafe.Pointer(&h)) for i := range vs { vs[i] = 0 } } default: // Slow case: reinterpret to []byte, and set that. Note that the // compiler should be able to optimize this too. In this case // it's always a value type, so this is always safe to do. return func(ptr uintptr, n int) { var h reflect.SliceHeader h.Data = ptr h.Len = int(size) * n h.Cap = h.Len b := *(*[]byte)(unsafe.Pointer(&h)) for i := range b { b[i] = 0 } } } } func isValueType(t reflect.Type) bool { switch t.Kind() { case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: return true case reflect.Array: return isValueType(t.Elem()) case reflect.Struct: for i := 0; i < t.NumField(); i++ { if !isValueType(t.Field(i).Type) { return false } } return true } return false }
internal/zero/zero.go
0.586996
0.583144
zero.go
starcoder
package main import ( "fmt" "github.com/go-gl/gl" glmath "github.com/go-gl/mathgl/mgl64" "math" ) const ( red = iota blue = iota green = iota ) type Block struct { active bool blockType int } func (block *Block) draw() { if block.active { gl.Begin(gl.QUADS) switch block.blockType { case red: gl.Color3f(1.0, 0.0, 0.0) case green: gl.Color3f(0.0, 1.0, 0.0) case blue: gl.Color3f(0.0, 0.0, 1.0) } // when looking down the z axis: // front face gl.Normal3d(0.0, 0.0, -1.0) gl.Vertex3f(1.0, 0.0, 0.0) gl.Vertex3f(1.0, 1.0, 0.0) gl.Vertex3f(0.0, 1.0, 0.0) gl.Vertex3f(0.0, 0.0, 0.0) // back face gl.Normal3d(0.0, 0.0, -1.0) gl.Vertex3f(1.0, 0.0, 1.0) gl.Vertex3f(1.0, 1.0, 1.0) gl.Vertex3f(0.0, 1.0, 1.0) gl.Vertex3f(0.0, 0.0, 1.0) // right face gl.Normal3d(1.0, 0.0, 1.0) gl.Vertex3f(1.0, 0.0, 0.0) gl.Vertex3f(1.0, 1.0, 0.0) gl.Vertex3f(1.0, 1.0, 1.0) gl.Vertex3f(1.0, 0.0, 1.0) // left face gl.Normal3d(-1.0, 0.0, 1.0) gl.Vertex3f(0.0, 0.0, 1.0) gl.Vertex3f(0.0, 1.0, 1.0) gl.Vertex3f(0.0, 1.0, 0.0) gl.Vertex3f(0.0, 0.0, 0.0) // top face gl.Normal3d(0.0, 1.0, 0.0) gl.Vertex3f(1.0, 1.0, 1.0) gl.Vertex3f(1.0, 1.0, 0.0) gl.Vertex3f(0.0, 1.0, 0.0) gl.Vertex3f(0.0, 1.0, 1.0) // bottom face gl.Normal3d(0.0, -1.0, 0.0) gl.Vertex3f(1.0, 0.0, 0.0) gl.Vertex3f(1.0, 0.0, 1.0) gl.Vertex3f(0.0, 0.0, 1.0) gl.Vertex3f(0.0, 0.0, 0.0) gl.End() } } func (block *Block) lighting() bool { return false } const ( chunkWidth = 16 chunkHeight = 16 chunkDepth = 16 ) type Chunk struct { blocks [chunkWidth][chunkWidth][chunkWidth]Block position ChunkCoordinate } func (chunk *Chunk) draw() { gl.Color4f(1.0, 1.0, 1.0, 1.0) for x := 0; x < chunkWidth; x++ { for y := 0; y < chunkHeight; y++ { for z := 0; z < chunkDepth; z++ { gl.PushMatrix() position := chunk.position.toVector() gl.Translated(position.X(), position.Y(), position.Z()) gl.PushMatrix() gl.Translated(float64(x), float64(y), float64(z)) block := chunk.blocks[x][y][z] block.draw() gl.PopMatrix() gl.PopMatrix() } } } } func (chunk *Chunk) lighting() bool { return false } const ( DefaultRenderDistance = 1 ) type ChunkManager struct { listManager *DisplayListManager chunks map[string]*Chunk ids map[string]uint renderDistance int lastPosition ChunkCoordinate } type ChunkCoordinate []int func (coord *ChunkCoordinate) Id() string { return fmt.Sprintf("%v:%v:%v", coord.X(), coord.Y(), coord.Z()) } func (coord ChunkCoordinate) X() int { return coord[0] } func (coord ChunkCoordinate) Y() int { return coord[1] } func (coord ChunkCoordinate) Z() int { return coord[2] } func idsFromCoords(coords []ChunkCoordinate) []string { ids := make([]string, len(coords), len(coords)) for i, coord := range coords { ids[i] = coord.Id() } return ids } func (coord ChunkCoordinate) Equals(other ChunkCoordinate) bool { return coord.X() == other.X() && coord.Y() == other.Y() && coord.Z() == other.Z() } func NewChunkManager(listManager *DisplayListManager) *ChunkManager { return &ChunkManager{ listManager: listManager, chunks: make(map[string]*Chunk), ids: make(map[string]uint), renderDistance: DefaultRenderDistance, lastPosition: nil, } } func (manager *ChunkManager) update(position glmath.Vec3) { chunkCoordinate := toChunkCoordinates(position) if manager.lastPosition == nil || !chunkCoordinate.Equals(manager.lastPosition) { chunksToLoad := chunksWithinDistance(chunkCoordinate, manager.renderDistance) for _, chunkCoord := range chunksToLoad { id := chunkCoord.Id() _, ok := manager.chunks[id] if !ok { chunk := generateChunk(chunkCoord) manager.chunks[id] = chunk listId := manager.listManager.add(chunk) manager.ids[id] = listId } } if manager.lastPosition != nil { chunksToDestroy := chunksWithinDistance(manager.lastPosition, manager.renderDistance) chunkIdsToLoad := idsFromCoords(chunksToLoad) for _, toDestroy := range chunksToDestroy { id := toDestroy.Id() if !stringInSlice(id, chunkIdsToLoad) { delete(manager.chunks, id) listId := manager.ids[id] manager.listManager.remove(listId) delete(manager.ids, id) } } } manager.lastPosition = chunkCoordinate } } func stringInSlice(a string, list []string) bool { for _, b := range list { if b == a { return true } } return false } func chunksWithinDistance(position ChunkCoordinate, distance int) []ChunkCoordinate { chunks := make([]ChunkCoordinate, 0, distance*distance*distance) for x := -distance; x <= distance; x++ { for z := -distance; z <= distance; z++ { chunks = append(chunks, ChunkCoordinate{position.X() + x, position.Y(), position.Z() + z}) } } return chunks } func toChunkCoordinates(position glmath.Vec3) ChunkCoordinate { return ChunkCoordinate{floorInt(position.X() / chunkWidth), floorInt(position.Y() / chunkHeight), floorInt(position.Z() / chunkDepth)} } func (position *ChunkCoordinate) toVector() glmath.Vec3 { return glmath.Vec3{float64(position.X() * chunkWidth), float64(position.Y() * chunkHeight), float64(position.Z() * chunkDepth)} } func floorInt(x float64) int { return int(math.Floor(x)) }
model.go
0.592195
0.499451
model.go
starcoder
package dht import ( "machine" "time" ) type device struct { pin machine.Pin measurements DeviceType temperature int16 humidity uint16 } func (t *device) Temperature() int16 { return t.temperature } func (t *device) TemperatureFloat(scale TemperatureScale) float32 { return scale.convertToFloat(t.temperature) } func (t *device) Humidity() uint16 { return t.humidity } func (t *device) HumidityFloat() float32 { return float32(t.humidity) / 10. } func initiateCommunication(p machine.Pin) { // Send low signal to the device p.Configure(machine.PinConfig{Mode: machine.PinOutput}) p.Low() time.Sleep(startingLow) // Set pin to high and wait for reply p.High() p.Configure(machine.PinConfig{Mode: machine.PinInput}) } func (t *device) ReadMeasurements() error { // initial waiting state := powerUp(t.pin) defer t.pin.Set(state) return t.read() } func (t *device) read() error { // initialize loop variables bufferData := [5]byte{} buf := bufferData[:] signalsData := [80]uint16{} signals := signalsData[:] initiateCommunication(t.pin) err := waitForDataTransmission(t.pin) if err != nil { return err } t.receiveSignals(signals) err = t.extractData(signals[:], buf) if err != nil { return err } if !isValid(buf[:]) { return checksumError } t.temperature, t.humidity = t.measurements.extractData(buf) return nil } func (t *device) receiveSignals(result []uint16) { i := uint8(0) machine.UART1.Interrupt.Disable() defer machine.UART1.Interrupt.Enable() for ; i < 40; i++ { result[i*2] = expectChange(t.pin, false) result[i*2+1] = expectChange(t.pin, true) } } func (t *device) extractData(signals []uint16, buf []uint8) error { for i := uint8(0); i < 40; i++ { lowCycle := signals[i*2] highCycle := signals[i*2+1] if lowCycle == timeout || highCycle == timeout { return noDataError } byteN := i >> 3 buf[byteN] <<= 1 if highCycle > lowCycle { buf[byteN] |= 1 } } return nil } func waitForDataTransmission(p machine.Pin) error { // wait for thermometer to pull down if expectChange(p, true) == timeout { return noSignalError } //wait for thermometer to pull up if expectChange(p, false) == timeout { return noSignalError } // wait for thermometer to pull down and start sending the data if expectChange(p, true) == timeout { return noSignalError } return nil } type Device interface { ReadMeasurements() error Temperature() int16 TemperatureFloat(scale TemperatureScale) float32 Humidity() uint16 HumidityFloat() float32 } func New(p machine.Pin, deviceType DeviceType) Device { return &device{ pin: p, measurements: deviceType, temperature: 0, humidity: 0, } }
dht/thermometer.go
0.687315
0.4474
thermometer.go
starcoder
package trie import ( "unicode/utf8" ) // Trie represents a trie and contains a pointer to the root node type Trie struct { Root *Node `json:"root,omitempty"` } // Node represents a single node in a trie, usually representing a character type Node struct { Data string `json:"data,omitempty"` Prefix string `json:"prefix,omitempty"` IsEnd bool `json:"is_end,omitempty"` Children map[string]*Node `json:"children,omitempty"` Key string `json:"key,omitempty"` Val string `json:"val,omitempty"` } // Init creates a Trie pre-initialized with a pointer to the root Node func Init() *Trie { trie := &Trie{ Root: &Node{ Data: "", Prefix: "", IsEnd: false, Children: make(map[string]*Node), Key: "", Val: "", }, } return trie } // Update adds a key, value pair to the Trie // It functions similarly to a key, value pair in a map // Once all of the letters of the key have been exhausted, the Val attribute of // the leaf node in the trie is updated with the value v provided. func (trie *Trie) Update(k string, v string) { currentNode := trie.Root lenRunes := utf8.RuneCountInString(k) for idx := 0; idx < lenRunes; idx++ { s := string(k[idx]) if _, ok := currentNode.Children[s]; !ok { child := &Node{ Data: s, Prefix: currentNode.Prefix + currentNode.Data, IsEnd: false, Children: make(map[string]*Node), Key: currentNode.Prefix + currentNode.Data, Val: "", } currentNode.Children[s] = child } currentNode = currentNode.Children[s] } currentNode.Key = currentNode.Prefix + currentNode.Data currentNode.Val = v currentNode.IsEnd = true } // Search attempts to retrieve the corresponding value for the key provided // It provides a similar experience to referencing a key, value pair in a map // where both the string value and a bool are returned if the key is found; // otherwise, it returns an empty string and false. func (trie *Trie) Search(k string) (s string, ok bool) { currentNode := trie.Root lenRunes := utf8.RuneCountInString(k) for idx := 0; idx < lenRunes; idx++ { s := string(k[idx]) if _, ok := currentNode.Children[s]; ok { currentNode = currentNode.Children[s] } else { return "", false } } currentNode.Key = currentNode.Prefix + currentNode.Data if currentNode.IsEnd { s = currentNode.Val } return s, true } // longestPrefixNode returns the node that contains the longest prefix // available in the trie of the argument k // It provides a similar experience to referencing a key, value pair in a map // where both the node value and a bool are returned if the key is found; // otherwise, it returns an empty string and false. func (trie *Trie) longestPrefixNode(k string) (n *Node, ok bool) { lenRunes := utf8.RuneCountInString(k) if lenRunes < 1 { return &Node{}, false } currentNode := trie.Root for idx := 0; idx < lenRunes; idx++ { s := string(k[idx]) if _, ok := currentNode.Children[s]; ok { currentNode = currentNode.Children[s] } else if idx == 0 { return &Node{}, false } else { return currentNode, true } } return currentNode, true } // LongestPrefix returns the key, value pair of the longest prefix // available in the trie of the argument s. // If there is no path in the trie that contains a prefix of the // argument s, then empty strings are returned for the key and value func (trie *Trie) LongestPrefix(s string) (k string, v string) { if n, ok := trie.longestPrefixNode(s); !ok { return "", "" } else { return n.Key, n.Val } }
trie/trie.go
0.680348
0.545467
trie.go
starcoder
package lexer type regularExpressionOperator int const ( union regularExpressionOperator = iota concat star ) func (r regularExpressionOperator) length() int { switch r { case union: return 1 case star: return 1 default: return 0 } } // RegularExpression represents the string representation of regular expressions. It has methods for // regular expression operations and compilation. type RegularExpression string func (r RegularExpression) getCharacters() []string { characters := make([]string, 0, 100) for i := 0; i < len(r); { if r[i] == '/' { characters = append(characters, string(r[i])+string(r[i+1])) i += 2 } else { characters = append(characters, string(r[i])) i++ } } return characters } func (r RegularExpression) isValid() bool { s := make(stack, 0, 10) for _, currentCharacter := range r.getCharacters() { switch currentCharacter { case "(": s.push('(') case ")": if s.empty() { return false } s.pop() } } return s.empty() } func (r RegularExpression) getMatchingParenIndex() int { characters := r.getCharacters() if characters[0] != "(" { return -1 } s := make(stack, 0, 10) stringLengthSoFar := 0 for _, character := range characters { stringLengthSoFar += len(character) switch character { case "(": s.push('(') case ")": s.pop() } if s.empty() { return stringLengthSoFar - 1 } } return -1 } func (r RegularExpression) trimParenthesis() RegularExpression { characters := r.getCharacters() if characters[0] != "(" { return r } if characters[len(characters)-1] != ")" { return r } return r[1 : len(r)-1] } func (r RegularExpression) getFirstOperand() RegularExpression { characters := r.getCharacters() if characters[0] != "(" { return RegularExpression(characters[0]) } return RegularExpression(r[:r.getMatchingParenIndex()+1]) } func (r RegularExpression) getOperator() regularExpressionOperator { operatorIndex := len(r.getFirstOperand()) switch r[operatorIndex] { case '|': return union case '*': return star default: return concat } } func (r RegularExpression) getSecondOperand() RegularExpression { secondOperandIndex := len(r.getFirstOperand()) + r.getOperator().length() return RegularExpression(r[secondOperandIndex:]) } func (r RegularExpression) compile() nondeterministicFiniteAutomata { characters := r.getCharacters() if len(characters) == 0 { var f nondeterministicFiniteAutomata f.init("") return f } if len(characters) == 1 { var f nondeterministicFiniteAutomata character := characters[0] switch len(character) { case 2: f.init(transitionLabel(character[1])) case 1: f.init(transitionLabel(character[0])) } return f } firstOperand := r.getFirstOperand() switch r.getOperator() { case star: firstOperandAutomata := firstOperand.trimParenthesis().compile() firstOperandAutomata.applyStar() return firstOperandAutomata case union: firstOperandAutomata := firstOperand.trimParenthesis().compile() secondOperand := r.getSecondOperand() secondOperandAutomata := secondOperand.trimParenthesis().compile() firstOperandAutomata.combineUsingUnion(&secondOperandAutomata) return firstOperandAutomata default: firstOperandAutomata := firstOperand.trimParenthesis().compile() secondOperand := r.getSecondOperand() secondOperandAutomata := secondOperand.trimParenthesis().compile() firstOperandAutomata.combineUsingConcat(&secondOperandAutomata) return firstOperandAutomata } }
lexer/regular_expression.go
0.697403
0.503723
regular_expression.go
starcoder
package data import ( "reflect" ) // Sequence is an ordered collection of values. type Sequence []interface{} // Map is an unordered collection of values associated with key values. type Map map[interface{}]interface{} // AsBool converts an untyped value to a boolean. func AsBool(data interface{}) (b bool, ok bool) { b, ok = data.(bool) return } // AsFloat converts an untyped value to a floating-point number. func AsFloat(data interface{}) (f float64, ok bool) { val := reflect.ValueOf(data) ok = true switch val.Kind() { case reflect.Float32, reflect.Float64: f = val.Float() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: f = float64(val.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: f = float64(val.Uint()) default: ok = false } return } // AsUint converts an untyped value to an unsigned integer. func AsUint(data interface{}) (i uint, ok bool) { i64, ok := AsUint64(data) if ok { i = uint(i64) } return } // AsUint64 converts an untyped value to a uint64. func AsUint64(data interface{}) (i uint64, ok bool) { val := reflect.ValueOf(data) ok = true switch val.Kind() { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: i = val.Uint() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if val.Int() >= 0 { i = uint64(val.Int()) } else { ok = false } default: ok = false } return } // AsInt converts an untyped value to a signed integer. func AsInt(data interface{}) (i int, ok bool) { i64, ok := AsInt64(data) if ok { i = int(i64) } return } // AsInt64 converts an untyped value to a int64. func AsInt64(data interface{}) (i int64, ok bool) { val := reflect.ValueOf(data) ok = true switch val.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: i = val.Int() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: i = int64(val.Uint()) default: ok = false } return } // AsSequence converts an untyped value to a sequence of values. func AsSequence(data interface{}) (seq Sequence, ok bool) { switch s := data.(type) { case []interface{}: seq, ok = Sequence(s), true case Sequence: seq, ok = s, true } return } // AsMap converts an untyped value to a map of values. func AsMap(data interface{}) (m Map, ok bool) { switch d := data.(type) { case map[interface{}]interface{}: m, ok = Map(d), true case Map: m, ok = d, true } return } // HasKeys returns whether a given map contains all of the keys given. func (m Map) HasKeys(keys ...interface{}) bool { for _, k := range keys { if _, found := m[k]; !found { return false } } return true } // CopyMap creates a shallow copy of a map. func (m Map) Copy() (clone Map) { clone = make(Map, len(m)) for k, v := range m { clone[k] = v } return } // SetDefault adds a new key to a map if the key isn't already present, and // returns the latest value for the key. func (m Map) SetDefault(k, d interface{}) (v interface{}) { v, ok := m[k] if !ok { m[k] = d v = d } return }
data/values.go
0.7874
0.571109
values.go
starcoder
package main import ( "fmt" "strconv" "github.com/theatlasroom/advent-of-code/go/utils" ) /** --- Day 3: Binary Diagnostic --- The submarine has been making some odd creaking noises, so you ask it to produce a diagnostic report just in case. The diagnostic report (your puzzle input) consists of a list of binary numbers which, when decoded properly, can tell you many useful things about the conditions of the submarine. The first parameter to check is the power consumption. You need to use the binary numbers in the diagnostic report to generate two new binary numbers (called the gamma rate and the epsilon rate). The power consumption can then be found by multiplying the gamma rate by the epsilon rate. Each bit in the gamma rate can be determined by finding the most common bit in the corresponding position of all numbers in the diagnostic report. For example, given the following diagnostic report: 00100 11110 10110 10111 10101 01111 00111 11100 10000 11001 00010 01010 Considering only the first bit of each number, there are five 0 bits and seven 1 bits. Since the most common bit is 1, the first bit of the gamma rate is 1. The most common second bit of the numbers in the diagnostic report is 0, so the second bit of the gamma rate is 0. The most common value of the third, fourth, and fifth bits are 1, 1, and 0, respectively, and so the final three bits of the gamma rate are 110. So, the gamma rate is the binary number 10110, or 22 in decimal. The epsilon rate is calculated in a similar way; rather than use the most common bit, the least common bit from each position is used. So, the epsilon rate is 01001, or 9 in decimal. Multiplying the gamma rate (22) by the epsilon rate (9) produces the power consumption, 198. Use the binary numbers in your diagnostic report to calculate the gamma rate and epsilon rate, then multiply them together. What is the power consumption of the submarine? (Be sure to represent your answer in decimal, not binary.) */ func generateMask(maskLength int) uint32 { var str string for len(str) < maskLength { str += "1" } return binaryStringAsUint32(str) } func flipBits(v uint32, mask uint32) uint32 { // bitwise negate the value v, then perform a bitwise AND using the provided mask // given v = 1100100010, mask 0b111111111111, returns 110011011101 return ^v & mask } func binaryStringAsUint32(s string) uint32 { v, err := strconv.ParseUint(s, 2, 32) if err != nil { return 0 } return uint32(v) } func calculatePower(counts []int, threshhold int, mask uint32) uint32 { str := generateRates(counts, threshhold, mask) gamma := binaryStringAsUint32(str) epsilon := flipBits(gamma, mask) return gamma * epsilon } func generateRates(counts []int, threshhold int, mask uint32) string { str := "" for _, i := range counts { if i > threshhold { str += "1" continue } str += "0" } return str } type lifeSupportRating struct { Zeros, Ones []string } type lifeSupportComparatorFn = func(lfr lifeSupportRating) []string func matchCriteria(input []string, bit int, comparator lifeSupportComparatorFn) string { if len(input) == 1 { return input[0] } var zeros []string var ones []string for _, str := range input { if str[bit] == '1' { ones = append(ones, str) } else { zeros = append(zeros, str) } } rating := lifeSupportRating{Zeros: zeros, Ones: ones} return matchCriteria(comparator(rating), bit+1, comparator) } func part2(data []string) { oxygen := matchCriteria(data, 0, func(lfr lifeSupportRating) []string { if len(lfr.Ones) >= len(lfr.Zeros) { return lfr.Ones } return lfr.Zeros }) c02 := matchCriteria(data, 0, func(lfr lifeSupportRating) []string { if len(lfr.Ones) < len(lfr.Zeros) { return lfr.Ones } return lfr.Zeros }) fmt.Printf("Part 2: life support rating %v\n", binaryStringAsUint32(oxygen)*binaryStringAsUint32(c02)) } func part1(data []string) { size := 12 mask := generateMask(size) threshhold := len(data) / 2 diagnostics := make([]int, size, size) for _, str := range data { for i, c := range str { if c == '1' { diagnostics[i] += 1 } } } power := calculatePower(diagnostics[:], threshhold, mask) fmt.Printf("Part 1: power consumption %d\n", power) } func main() { cfg := utils.BannerConfig{Year: 2021, Day: 3} utils.Banner(cfg) // Read all the numbers input := utils.LoadData("3.txt") part1(input) part2(input) }
go/2021/3.go
0.761006
0.691868
3.go
starcoder
package types import ( "errors" "fmt" "reflect" "strconv" ) func Byte(value interface{}) byte { return Uint8(value) } func Int(value interface{}) int { return int(Int32(value)) } func Int8(value interface{}) int8 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return int8(value) case int8: return int8(value) case int16: return int8(value) case int32: return int8(value) case int64: return int8(value) case uint: return int8(value) case uint8: return int8(value) case uint16: return int8(value) case uint32: return int8(value) case uint64: return int8(value) case float32: return int8(value) case float64: return int8(value) case string: var result, err = strconv.ParseInt(value, 10, 64) if err == nil { return int8(result) } else { floatResult, err := strconv.ParseFloat(value, 64) if err == nil { return int8(floatResult) } } } return 0 } func Int16(value interface{}) int16 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return int16(value) case int8: return int16(value) case int16: return int16(value) case int32: return int16(value) case int64: return int16(value) case uint: return int16(value) case uint8: return int16(value) case uint16: return int16(value) case uint32: return int16(value) case uint64: return int16(value) case float32: return int16(value) case float64: return int16(value) case string: var result, err = strconv.ParseInt(value, 10, 64) if err == nil { return int16(result) } else { floatResult, err := strconv.ParseFloat(value, 64) if err == nil { return int16(floatResult) } } } return 0 } func Int64(value interface{}) int64 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return int64(value) case int8: return int64(value) case int16: return int64(value) case int32: return int64(value) case int64: return int64(value) case uint: return int64(value) case uint8: return int64(value) case uint16: return int64(value) case uint32: return int64(value) case uint64: return int64(value) case float32: return int64(value) case float64: return int64(value) case string: var result, err = strconv.ParseInt(value, 10, 64) if err == nil { return result } else { floatResult, err := strconv.ParseFloat(value, 64) if err == nil { return int64(floatResult) } } } return 0 } func Uint64(value interface{}) uint64 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return uint64(value) case int8: return uint64(value) case int16: return uint64(value) case int32: return uint64(value) case int64: return uint64(value) case uint: return uint64(value) case uint8: return uint64(value) case uint16: return uint64(value) case uint32: return uint64(value) case uint64: return uint64(value) case float32: return uint64(value) case float64: return uint64(value) case string: var result, err = strconv.ParseInt(value, 10, 64) if err == nil { return uint64(result) } else { floatResult, err := strconv.ParseFloat(value, 64) if err == nil { return uint64(floatResult) } } } return 0 } func Int32(value interface{}) int32 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return int32(value) case int8: return int32(value) case int16: return int32(value) case int32: return int32(value) case int64: return int32(value) case uint: return int32(value) case uint8: return int32(value) case uint16: return int32(value) case uint32: return int32(value) case uint64: return int32(value) case float32: return int32(value) case float64: return int32(value) case string: var result, err = strconv.ParseInt(value, 10, 32) if err == nil { return int32(result) } else { floatResult, err := strconv.ParseFloat(value, 32) if err == nil { return int32(floatResult) } } } return 0 } func Uint(value interface{}) uint { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return uint(value) case int8: return uint(value) case int16: return uint(value) case int32: return uint(value) case int64: return uint(value) case uint: return uint(value) case uint8: return uint(value) case uint16: return uint(value) case uint32: return uint(value) case uint64: return uint(value) case float32: return uint(value) case float64: return uint(value) case string: var result, err = strconv.ParseInt(value, 10, 32) if err == nil { return uint(result) } else { floatResult, err := strconv.ParseFloat(value, 32) if err == nil { return uint(floatResult) } } } return 0 } func Uint8(value interface{}) uint8 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return uint8(value) case int8: return uint8(value) case int16: return uint8(value) case int32: return uint8(value) case int64: return uint8(value) case uint: return uint8(value) case uint8: return uint8(value) case uint16: return uint8(value) case uint32: return uint8(value) case uint64: return uint8(value) case float32: return uint8(value) case float64: return uint8(value) case string: var result, err = strconv.ParseInt(value, 10, 32) if err == nil { return uint8(result) } else { floatResult, err := strconv.ParseFloat(value, 32) if err == nil { return uint8(floatResult) } } } return 0 } func Uint16(value interface{}) uint16 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return uint16(value) case int8: return uint16(value) case int16: return uint16(value) case int32: return uint16(value) case int64: return uint16(value) case uint: return uint16(value) case uint8: return uint16(value) case uint16: return uint16(value) case uint32: return uint16(value) case uint64: return uint16(value) case float32: return uint16(value) case float64: return uint16(value) case string: var result, err = strconv.ParseInt(value, 10, 32) if err == nil { return uint16(result) } else { floatResult, err := strconv.ParseFloat(value, 32) if err == nil { return uint16(floatResult) } } } return 0 } func Uint32(value interface{}) uint32 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return uint32(value) case int8: return uint32(value) case int16: return uint32(value) case int32: return uint32(value) case int64: return uint32(value) case uint: return uint32(value) case uint8: return uint32(value) case uint16: return uint32(value) case uint32: return uint32(value) case uint64: return uint32(value) case float32: return uint32(value) case float64: return uint32(value) case string: var result, err = strconv.ParseInt(value, 10, 32) if err == nil { return uint32(result) } else { floatResult, err := strconv.ParseFloat(value, 32) if err == nil { return uint32(floatResult) } } } return 0 } func Int32Value(value interface{}) (int32, error) { if value == nil { return 0, errors.New("value should not be nil") } switch value := value.(type) { case bool: if value { return 1, nil } return 0, nil case int: return int32(value), nil case int8: return int32(value), nil case int16: return int32(value), nil case int32: return int32(value), nil case int64: return int32(value), nil case uint: return int32(value), nil case uint8: return int32(value), nil case uint16: return int32(value), nil case uint32: return int32(value), nil case uint64: return int32(value), nil case float32: return int32(value), nil case float64: return int32(value), nil case string: var result, err = strconv.ParseInt(value, 10, 32) if err == nil { return int32(result), nil } else { floatResult, err := strconv.ParseFloat(value, 32) if err == nil { return int32(floatResult), nil } return 0, err } } return 0, nil } func Float64(value interface{}) float64 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return float64(value) case int8: return float64(value) case int16: return float64(value) case int32: return float64(value) case int64: return float64(value) case uint: return float64(value) case uint8: return float64(value) case uint16: return float64(value) case uint32: return float64(value) case uint64: return float64(value) case float32: return float64(value) case float64: return float64(value) case string: floatResult, err := strconv.ParseFloat(value, 64) if err == nil { return floatResult } } return 0 } func Float32(value interface{}) float32 { if value == nil { return 0 } switch value := value.(type) { case bool: if value { return 1 } return 0 case int: return float32(value) case int8: return float32(value) case int16: return float32(value) case int32: return float32(value) case int64: return float32(value) case uint: return float32(value) case uint8: return float32(value) case uint16: return float32(value) case uint32: return float32(value) case uint64: return float32(value) case float32: return float32(value) case float64: return float32(value) case string: floatResult, err := strconv.ParseFloat(value, 32) if err == nil { return float32(floatResult) } } return 0 } func Bool(value interface{}) bool { if value == nil { return false } var kind = reflect.TypeOf(value).Kind() switch kind { case reflect.Bool: return value.(bool) } return Int64(value) > 0 } func String(value interface{}) string { if value == nil { return "" } valueString, ok := value.(string) if ok { return valueString } switch value.(type) { case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: return fmt.Sprintf("%d", value) } return fmt.Sprintf("%#v", value) } func Compare(value1 interface{}, value2 interface{}) bool { if value1 == nil { return false } switch value1 := value1.(type) { case bool: return Int(value1) > Int(value2) case int: return Int(value1) > Int(value2) case int8: return Int8(value1) > Int8(value2) case int16: return Int16(value1) > Int16(value2) case int32: return Int32(value1) > Int32(value2) case int64: return Int64(value1) > Int64(value2) case uint: return Uint(value1) > Uint(value2) case uint8: return Uint8(value1) > Uint8(value2) case uint16: return Uint16(value1) > Uint16(value2) case uint32: return Uint32(value1) > Uint32(value2) case uint64: return Uint64(value1) > Uint64(value2) case float32: return Float32(value1) > Float32(value2) case float64: return Float64(value1) > Float64(value2) case string: return String(value1) > String(value2) } return String(value1) > String(value2) } // 判断是否为数字 func IsNumber(value interface{}) bool { switch value.(type) { case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: return true } return false } // 判断是否为整形数字 func IsInteger(value interface{}) bool { switch value.(type) { case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: return true } return false } // 判断是否为浮点型数字 func IsFloat(value interface{}) bool { switch value.(type) { case float32, float64: return true } return false } // 判断是否为nil func IsNil(value interface{}) bool { if value == nil { return true } return reflect.ValueOf(value).IsNil() } // 转换Slice类型 func Slice(fromSlice interface{}, toSliceType reflect.Type) (interface{}, error) { if fromSlice == nil { return nil, errors.New("'fromSlice' should not be nil") } fromValue := reflect.ValueOf(fromSlice) if fromValue.Kind() != reflect.Slice { return nil, errors.New("'fromSlice' should be slice") } if toSliceType.Kind() != reflect.Slice { return nil, errors.New("'toSliceType' should be slice") } v := reflect.Indirect(reflect.New(toSliceType)) count := fromValue.Len() toElemKind := toSliceType.Elem().Kind() for i := 0; i < count; i++ { elem := fromValue.Index(i) elemVar := elem.Interface() switch toElemKind { case reflect.Int: v = reflect.Append(v, reflect.ValueOf(Int(elemVar))) case reflect.Int8: v = reflect.Append(v, reflect.ValueOf(Int8(elemVar))) case reflect.Int16: v = reflect.Append(v, reflect.ValueOf(Int16(elemVar))) case reflect.Int32: v = reflect.Append(v, reflect.ValueOf(Int32(elemVar))) case reflect.Int64: v = reflect.Append(v, reflect.ValueOf(Int64(elemVar))) case reflect.Uint: v = reflect.Append(v, reflect.ValueOf(Uint(elemVar))) case reflect.Uint8: v = reflect.Append(v, reflect.ValueOf(Uint8(elemVar))) case reflect.Uint16: v = reflect.Append(v, reflect.ValueOf(Uint16(elemVar))) case reflect.Uint32: v = reflect.Append(v, reflect.ValueOf(Uint32(elemVar))) case reflect.Uint64: v = reflect.Append(v, reflect.ValueOf(Uint64(elemVar))) case reflect.Bool: v = reflect.Append(v, reflect.ValueOf(Bool(elemVar))) case reflect.Float32: v = reflect.Append(v, reflect.ValueOf(Float32(elemVar))) case reflect.Float64: v = reflect.Append(v, reflect.ValueOf(Float64(elemVar))) case reflect.String: v = reflect.Append(v, reflect.ValueOf(String(elemVar))) } } return v.Interface(), nil }
internal/utils/types/types.go
0.513181
0.412175
types.go
starcoder
package tpe import ( "math" "gonum.org/v1/gonum/floats" ) // ParzenEstimatorParams holds the parameters of ParzenEstimator type ParzenEstimatorParams struct { ConsiderPrior bool ConsiderMagicClip bool ConsiderEndpoints bool Weights FuncWeights PriorWeight float64 // optional } // ParzenEstimator is a surrogate model for TPE> type ParzenEstimator struct { Weights []float64 Mus []float64 Sigmas []float64 } func buildEstimator( mus []float64, low float64, high float64, params ParzenEstimatorParams, ) ([]float64, []float64, []float64) { considerPrior := params.ConsiderPrior priorWeight := params.PriorWeight considerMagicClip := params.ConsiderMagicClip considerEndpoints := params.ConsiderEndpoints weightsFunc := params.Weights var sortedWeights []float64 var sortedMus []float64 var sigma []float64 var order []int var priorPos int var priorSigma float64 if considerPrior { priorMu := 0.5 * (low + high) priorSigma = 1.0 * (high - low) if len(mus) == 0 { sortedMus = []float64{priorMu} sigma = []float64{priorSigma} priorPos = 0 order = make([]int, 0) } else { order = make([]int, len(mus)) floats.Argsort(mus, order) priorPos = location(choice(mus, order), priorMu) sortedMus = make([]float64, 0, len(mus)+1) sortedMus = append(sortedMus, choice(mus, order[:priorPos])...) sortedMus = append(sortedMus, priorMu) sortedMus = append(sortedMus, choice(mus, order[priorPos:])...) } } else { order = make([]int, len(mus)) floats.Argsort(mus, order) sortedMus = choice(mus, order) } // we decide the sigma. if len(mus) > 0 { lowSortedMusHigh := append(sortedMus, high) lowSortedMusHigh = append([]float64{low}, lowSortedMusHigh...) l := len(lowSortedMusHigh) sigma = make([]float64, l) for i := 0; i < l-2; i++ { sigma[i+1] = math.Max(lowSortedMusHigh[i+1]-lowSortedMusHigh[i], lowSortedMusHigh[i+2]-lowSortedMusHigh[i+1]) } if !considerEndpoints && len(lowSortedMusHigh) > 2 { sigma[1] = lowSortedMusHigh[2] - lowSortedMusHigh[1] sigma[l-2] = lowSortedMusHigh[l-2] - lowSortedMusHigh[l-3] } sigma = sigma[1 : l-1] } // we decide the weights. unsortedWeights := weightsFunc(len(mus)) if considerPrior { sortedWeights = make([]float64, 0, len(sortedMus)) sortedWeights = append(sortedWeights, choice(unsortedWeights, order[:priorPos])...) sortedWeights = append(sortedWeights, priorWeight) sortedWeights = append(sortedWeights, choice(unsortedWeights, order[priorPos:])...) } else { sortedWeights = choice(unsortedWeights, order) } sumSortedWeights := floats.Sum(sortedWeights) for i := range sortedWeights { sortedWeights[i] /= sumSortedWeights } // We adjust the range of the 'sigma' according to the 'consider_magic_clip' flag. maxSigma := 1.0 * (high - low) var minSigma float64 if considerMagicClip { minSigma = 1.0 * (high - low) / math.Min(100.0, 1.0+float64(len(sortedMus))) } else { minSigma = eps } clip(sigma, minSigma, maxSigma) if considerPrior { sigma[priorPos] = priorSigma } return sortedWeights, sortedMus, sigma } // NewParzenEstimator returns the parzen estimator object. func NewParzenEstimator(mus []float64, low, high float64, params ParzenEstimatorParams) *ParzenEstimator { estimator := &ParzenEstimator{ Weights: nil, Mus: nil, Sigmas: nil, } sWeights, sMus, sigma := buildEstimator(mus, low, high, params) estimator.Weights = sWeights estimator.Mus = sMus estimator.Sigmas = sigma return estimator }
tpe/parzen_estimator.go
0.7181
0.452234
parzen_estimator.go
starcoder
package dice import ( "fmt" "math/rand" "strings" "time" ) var rng = rand.New(rand.NewSource(time.Now().UnixNano())) // Dice represents a set of 1 type of dice, i.e: 3d20 OR 2d4 OR 1d6 type Dice struct { number, sides int } // NewDice takes the common notation "nds" where n is the number of dice and s is the number of sides; // i.e 1d6 and returns a new Dice set. Returns error if s is not a valid dice string func NewDice(s string) (*Dice, error) { number, sides, err := strToVal(s) if err != nil { return new(Dice), err } return &Dice{number, sides}, nil } // Add adds n die to a single set func (d *Dice) Add(n int) { d.number += n } // Remove removes n die from a single set to a minimum of 1 func (d *Dice) Remove(n int) { if d.number-n < 1 { d.number = 1 } else { d.number -= n } } // Min returns the minimume possible roll func (d *Dice) Min() int { return d.number } // Max returns the maximum possible roll func (d *Dice) Max() int { return d.number * d.sides } // Roll all dice in set and return the aggregate result and an array of individual results func (d *Dice) Roll() (int, []int) { t, a := 0, []int{} for i := 0; i < d.number; i++ { n := rng.Intn(d.sides) + 1 t += n a = append(a, n) } return t, a } // String satisfies the Stringer interface for Dice func (d *Dice) String() string { return fmt.Sprintf("%dd%d", d.number, d.sides) } // Bag is a collection of different types of Dice; i.e [3d20, 2d4, 1d6] type Bag struct { dice []*Dice } // NewBag returns a new Bag object. A bag can be created with a collection of // dice specified in string form for convenience. I.e b := NewBag("2d20", "1d6", "8d8"). // Returns error if any item in dice is not a valid dice string func NewBag(dice ...string) (*Bag, error) { b := new(Bag) // Account for different inputs for _, a := range dice { items := strings.Split(a, ",") for _, i := range items { if err := b.Add(i); err != nil { return b, err } } } return b, nil } // Add puts more dice in the bag, adding to existing sets where possible. // Returns error if s is not a valid dice string func (b *Bag) Add(s string) error { d, err := NewDice(s) if err != nil { return err } // increment existing set if it exists for _, set := range b.dice { if set.sides == d.sides { set.number += d.number return nil } } // Otherwise add a new set b.dice = append(b.dice, d) return nil } // Remove reduces the number of dice by the specified s string if s exists in the bag. // Returns error if s is not a valid dice string func (b *Bag) Remove(s string) error { number, sides, err := strToVal(s) if err != nil { return err } // Remove specified dice from set for _, set := range b.dice { if set.sides == sides { // ensure no < 0 values if set.number-number < 0 { set.number = 0 } else { set.number -= number } break } } return nil } // Min returns the minimum possible roll func (b *Bag) Min() int { t := 0 for _, d := range b.dice { t += d.Min() } return t } // Max returns the maximum possible roll func (b *Bag) Max() int { t := 0 for _, d := range b.dice { t += d.Max() } return t } // Roll returns aggregate rolls of all Dice in the bag and a map set of results func (b *Bag) Roll() (int, map[string][]int) { t, a := 0, make(map[string][]int) for _, d := range b.dice { n, s := d.Roll() t += n a[d.String()] = s } return t, a } // String satisfies the Stringer interface for Bags func (b *Bag) String() string { v := make([]string, len(b.dice)) for i, d := range b.dice { v[i] = fmt.Sprint(d) } return strings.Join(v, ", ") } // returns int values for numbers, sides func strToVal(a string) (number, sides int, err error) { _, err = fmt.Sscanf(a, "%dd%d", &number, &sides) if err != nil { return number, sides, fmt.Errorf("%s is not a valid dice string", a) } if number > 0 && sides > 0 { return number, sides, nil } return number, sides, fmt.Errorf("%s is not a valid dice string", a) }
dice.go
0.8059
0.417212
dice.go
starcoder
package generators import ( "math" "github.com/go-gl/gl/v4.5-core/gl" ) // These shape generators define vertices and indices that represent // unit-sized vector shapes. These shapes are then loaded into an Atlas. // GenerateUnitHLineVectorShape builds a horizontal unit length line. func GenerateUnitHLineVectorShape() (vertices []float32, indices []uint32, mode int) { vertices = []float32{ -0.5, 0.0, 0.0, 0.5, 0.0, 0.0, } indices = []uint32{ 0, 1, } return vertices, indices, gl.LINES } // GenerateUnitVLineVectorShape builds a vertical unit length line. func GenerateUnitVLineVectorShape() (vertices []float32, indices []uint32, mode int) { vertices = []float32{ 0.0, -0.5, 0.0, 0.0, 0.5, 0.0, } indices = []uint32{ 0, 1, } return vertices, indices, gl.LINES } // GenerateUnitRectangleVectorShape builds a rectangle unit length line. func GenerateUnitRectangleVectorShape(centered bool, forFilling bool) (vertices []float32, indices []uint32, mode int) { if centered { vertices = []float32{ -0.5, -0.5, 0.0, 0.5, -0.5, 0.0, 0.5, 0.5, 0.0, -0.5, 0.5, 0.0, } } else { vertices = []float32{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, } } // These indices can be used with the same vertices. You don't need // separate vertex arrays. if forFilling { indices = []uint32{ 0, 1, 2, 0, 2, 3, } mode = gl.TRIANGLES } else { indices = []uint32{ 0, 1, 2, 3, // CCW } mode = gl.LINE_LOOP } return vertices, indices, mode } // GenerateUnitTriangleVectorShape builds a triangle with equal length sides. func GenerateUnitTriangleVectorShape(forFilling bool) (vertices []float32, indices []uint32, mode int) { vertices = []float32{ -0.5, -0.5, 0.0, 0.5, -0.5, 0.0, 0.0, math.Pi / 10.0, 0.0, } indices = []uint32{ 0, 1, 2, } if forFilling { mode = gl.TRIANGLES } else { mode = gl.LINE_LOOP } return vertices, indices, mode } // GenerateUnitPlusVectorShape builds a plus-sign of unit length func GenerateUnitPlusVectorShape() (vertices []float32, indices []uint32, mode int) { vertices = []float32{ -0.5, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, -0.5, 0.0, 0.0, 0.5, 0.0, } indices = []uint32{ 0, 1, 2, 3, } return vertices, indices, gl.LINES } // GenerateUnitZBarVectorShape builds a "Z" shape for testing purposes. func GenerateUnitZBarVectorShape(forFilling bool) (vertices []float32, indices []uint32, mode int) { vertices = []float32{ -0.1, -0.5, 0.0, 0.5, -0.5, 0.0, 0.5, -0.4, 0.0, 0.1, -0.4, 0.0, 0.1, 0.5, 0.0, -0.5, 0.5, 0.0, -0.5, 0.4, 0.0, -0.1, 0.4, 0.0, } // These indices can be used with the same vertices. You don't need // separate vertex arrays. if forFilling { indices = []uint32{ 0, 1, 2, 2, 3, 0, 0, 3, 4, 0, 4, 7, 6, 7, 4, 6, 4, 5, } mode = gl.TRIANGLES } else { indices = []uint32{ 0, 1, 2, 3, 4, 5, 6, 7, // CCW } mode = gl.LINE_LOOP } return vertices, indices, mode } // GenerateUnitCircleVectorShape builds a circle with radius 0.5. func GenerateUnitCircleVectorShape(segments int, forFilling bool) (vertices []float32, indices []uint32, mode int) { radius := 0.5 // diameter of 1.0 step := math.Pi / float64(segments) index := uint32(0) if forFilling { // Filled circles have a center point for the Fan fill algorithm vertices = append(vertices, 0.0, 0.0, 0.0) // Reference the center point indices = append(indices, 0) index++ mode = gl.TRIANGLE_FAN } else { mode = gl.LINE_LOOP } angle := 0.0 for i := 0; i <= 2*segments; i++ { x := math.Cos(angle) * radius y := math.Sin(angle) * radius vertices = append(vertices, float32(x), float32(y), 0.0) indices = append(indices, index) index++ angle += step } return vertices, indices, mode } // GenerateUnitArcVectorShape builds a arc/pie with radius 0.5. func GenerateUnitArcVectorShape(startAngle, endAngle float64, segments int, forFilling bool) (vertices []float32, indices []uint32, mode int) { radius := 0.5 // diameter of 1.0 step := (endAngle - startAngle) / float64(segments) index := uint32(0) vertices = append(vertices, 0.0, 0.0, 0.0) // Reference the center point indices = append(indices, 0) if forFilling { mode = gl.TRIANGLE_FAN } else { mode = gl.LINE_LOOP } index++ for i := startAngle; i <= startAngle+endAngle; i += step { x := math.Cos(i) * radius y := math.Sin(i) * radius vertices = append(vertices, float32(x), float32(y), 0.0) indices = append(indices, index) index++ } return vertices, indices, mode }
extras/generators/vector_shape_builder.go
0.819099
0.626238
vector_shape_builder.go
starcoder
package es_array import ( "github.com/watermint/toolbox/essentials/collections/es_number" "github.com/watermint/toolbox/essentials/collections/es_value" "os" "sort" ) type Array interface { // Returns first entry of the array. // Returns Null es_value.Value (not nil) when the array is empty. First() es_value.Value // Returns last entry of the array. // Returns Null es_value.Value (not nil) when the array is empty. Last() es_value.Value // Returns first n entries. Left(n int) Array // Returns first elements while the f returns true. LeftWhile(f func(v es_value.Value) bool) Array // Returns last n entries. Right(n int) Array // Returns last elements while the f returns true. RightWhile(f func(v es_value.Value) bool) Array // Returns size of the array. Size() int // Reverse order Reverse() Array // Returns true if an array is empty. IsEmpty() bool // Counts the number of entries in the array which satisfy a condition. Count(f func(v es_value.Value) bool) int // Returns an entry of given index. // Returns Null es_value.Value (not nil) when the index is out of range. At(i int) es_value.Value // Return entries in array. Entries() []es_value.Value // Returns unique values Unique() Array // Returns joined array of other and this instance. Append(other Array) Array // Returns unique intersect entries of other and this array. Intersection(other Array) Array // Returns unique union entries of other and this array. Union(other Array) Array // Returns an array removing all occurrences of entries in other. Diff(other Array) Array // Returns sorted array Sort() Array // Returns array in []string AsStringArray() []string // Returns array in []Number AsNumberArray() []es_number.Number // Returns array in []interface{} AsInterfaceArray() []interface{} // es_value.Value#Hash and es_value.Value map HashMap() map[string]es_value.Value // Create a new array containing the values returned by the function Map(f func(v es_value.Value) es_value.Value) Array // For each values Each(f func(v es_value.Value)) } func Empty() Array { return &arrayImpl{ entries: make([]interface{}, 0), } } func NewByString(entries ...string) Array { vals := make([]interface{}, len(entries)) for i, entry := range entries { vals[i] = entry } return &arrayImpl{ entries: vals, } } func NewByInterface(entries ...interface{}) Array { if entries == nil { return Empty() } return &arrayImpl{ entries: entries, } } func NewByFileInfo(entries ...os.FileInfo) Array { vals := make([]interface{}, len(entries)) for i, entry := range entries { vals[i] = entry } return &arrayImpl{ entries: vals, } } func NewByValue(entries ...es_value.Value) Array { vals := make([]interface{}, len(entries)) for i, entry := range entries { vals[i] = entry } return &arrayImpl{ entries: vals, } } func NewByHashValueMap(entries map[string]es_value.Value) Array { vals := make([]interface{}, 0) for _, entry := range entries { vals = append(vals, entry) } return &arrayImpl{ entries: vals, } } type arrayImpl struct { entries []interface{} } func (z arrayImpl) Each(f func(v es_value.Value)) { for _, e := range z.entries { f(es_value.New(e)) } } func (z arrayImpl) Reverse() Array { entries := make([]es_value.Value, 0) for i := len(z.entries) - 1; i >= 0; i-- { v := es_value.New(z.entries[i]) entries = append(entries, v) } return NewByValue(entries...) } func (z arrayImpl) LeftWhile(f func(v es_value.Value) bool) Array { entries := make([]es_value.Value, 0) for i := 0; i < len(z.entries); i++ { v := es_value.New(z.entries[i]) if !f(v) { break } entries = append(entries, v) } return NewByValue(entries...) } func (z arrayImpl) RightWhile(f func(v es_value.Value) bool) Array { entries := make([]es_value.Value, 0) for i := len(z.entries) - 1; i >= 0; i-- { v := es_value.New(z.entries[i]) if !f(v) { break } entries = append(entries, v) } return NewByValue(entries...).Reverse() } func (z arrayImpl) Left(n int) Array { ne := es_number.Min(len(z.entries), n) entries := make([]es_value.Value, 0) for i := 0; i < ne.Int(); i++ { entries = append(entries, es_value.New(z.entries[i])) } return NewByValue(entries...) } func (z arrayImpl) Right(n int) Array { le := len(z.entries) ne := es_number.Min(le, n) entries := make([]es_value.Value, 0) for i := le - ne.Int(); i < le; i++ { entries = append(entries, es_value.New(z.entries[i])) } return NewByValue(entries...) } func (z arrayImpl) IsEmpty() bool { return len(z.entries) < 1 } func (z arrayImpl) Count(f func(v es_value.Value) bool) int { count := 0 for _, entry := range z.entries { if f(es_value.New(entry)) { count++ } } return count } func (z arrayImpl) First() es_value.Value { if len(z.entries) < 1 { return es_value.Null() } return es_value.New(z.entries[0]) } func (z arrayImpl) Last() es_value.Value { n := len(z.entries) if n < 1 { return es_value.Null() } return es_value.New(z.entries[n-1]) } func (z arrayImpl) Size() int { return len(z.entries) } func (z arrayImpl) At(i int) es_value.Value { n := len(z.entries) if i < 0 || i <= n { return es_value.Null() } return es_value.New(z.entries[i]) } func (z arrayImpl) Entries() []es_value.Value { entries := make([]es_value.Value, len(z.entries)) for i, entry := range z.entries { entries[i] = es_value.New(entry) } return entries } func (z arrayImpl) HashMap() map[string]es_value.Value { em := make(map[string]es_value.Value) for _, entry := range z.Entries() { em[entry.Hash()] = entry } return em } func (z arrayImpl) Unique() Array { em := z.HashMap() vals := make([]es_value.Value, 0) for _, v := range em { vals = append(vals, v) } return NewByValue(vals...) } func (z arrayImpl) Append(other Array) Array { entries := z.Entries() entries = append(entries, other.Entries()...) return NewByValue(entries...) } func (z arrayImpl) Intersection(other Array) Array { em1 := z.HashMap() em2 := other.HashMap() ema := make([]es_value.Value, 0) for k, e := range em1 { if _, ok := em2[k]; ok { ema = append(ema, e) } } return NewByValue(ema...) } func (z arrayImpl) Union(other Array) Array { em := z.HashMap() em2 := other.HashMap() for k, v := range em2 { em[k] = v } return NewByHashValueMap(em) } func (z arrayImpl) Diff(other Array) Array { em := z.HashMap() em2 := other.HashMap() for k := range em2 { delete(em, k) } return NewByHashValueMap(em) } func (z arrayImpl) Sort() Array { entries := z.Entries() sort.SliceStable(entries, func(i, j int) bool { return entries[i].Compare(entries[j]) < 0 }) return NewByValue(entries...) } func (z arrayImpl) AsStringArray() []string { entries := make([]string, len(z.entries)) for i, entry := range z.entries { entries[i] = es_value.New(entry).String() } return entries } func (z arrayImpl) AsNumberArray() []es_number.Number { entries := make([]es_number.Number, len(z.entries)) for i, entry := range z.entries { entries[i] = es_value.New(entry).AsNumber() } return entries } func (z arrayImpl) AsInterfaceArray() []interface{} { return z.entries } func (z arrayImpl) Map(f func(v es_value.Value) es_value.Value) Array { entries := make([]es_value.Value, len(z.entries)) for i, v := range z.entries { entries[i] = f(es_value.New(v)) } return NewByValue(entries...) }
essentials/collections/es_array/array.go
0.792384
0.490663
array.go
starcoder
package num import ( "github.com/cpmech/gosl/fun" "github.com/cpmech/gosl/la" ) // LineSolver finds the scalar λ that zeroes or minimizes f(x+λ⋅n) type LineSolver struct { // configuration UseDeriv bool // use Jacobian function [default = true if Jfcn is provided] // internal ffcn fun.Sv // scalar function of vector: y = f({x}) Jfcn fun.Vv // vector function of vector: {J} = df/d{x} @ {x} [optional / may be nil] y la.Vector // {y} = {x} + λ⋅{n} dfdx la.Vector // derivative df/d{x} bracket *Bracket // bracket solver *Brent // scalar minimizer // Stat NumFeval int // number of function evaluations NumJeval int // number of Jacobian evaluations // pointers x la.Vector // starting point n la.Vector // direction } // NewLineSolver returns a new LineSolver object // size -- length(x) // ffcn -- scalar function of vector: y = f({x}) // Jfcn -- vector function of vector: {J} = df/d{x} @ {x} [optional / may be nil] func NewLineSolver(size int, ffcn fun.Sv, Jfcn fun.Vv) (o *LineSolver) { o = new(LineSolver) o.ffcn = ffcn o.Jfcn = Jfcn o.y = la.NewVector(size) o.dfdx = la.NewVector(size) o.bracket = NewBracket(o.G) o.solver = NewBrent(o.G, o.H) if Jfcn != nil { o.UseDeriv = true } return } // Root finds the scalar λ that zeroes f(x+λ⋅n) func (o *LineSolver) Root(x, n la.Vector) (λ float64) { o.Set(x, n) λ = o.solver.Root(0, 1) o.NumFeval = o.solver.NumFeval + o.bracket.NumFeval o.NumJeval = o.solver.NumJeval return } // Min finds the scalar λ that minimizes f(x+λ⋅n) func (o *LineSolver) Min(x, n la.Vector) (λ float64) { o.Set(x, n) λmin, _, λmax, _, _, _ := o.bracket.Min(0, 1) if o.UseDeriv { λ = o.solver.MinUseD(λmin, λmax) } else { λ = o.solver.Min(λmin, λmax) } o.NumFeval = o.solver.NumFeval + o.bracket.NumFeval o.NumJeval = o.solver.NumJeval return } // MinUpdateX finds the scalar λ that minimizes f(x+λ⋅n), updates x and returns fmin = f({x}) // Input: // x -- initial point // n -- direction // Output: // λ -- scale parameter // x -- x @ minimum // fmin -- f({x}) func (o *LineSolver) MinUpdateX(x, n la.Vector) (λ, fmin float64) { λ = o.Min(x, n) la.VecAdd(o.x, 1, x, λ, n) // x := x + λ⋅n fmin = o.ffcn(o.x) o.NumFeval++ return } // Set sets x and n vectors as required by G(λ) and H(λ) functions func (o *LineSolver) Set(x, n la.Vector) { o.x = x o.n = n } // G implements g(λ) := f({y}(λ)) where {y}(λ) := {x} + λ⋅{n} func (o *LineSolver) G(λ float64) float64 { la.VecAdd(o.y, 1, o.x, λ, o.n) // xpn := x + λ⋅n return o.ffcn(o.y) } // H implements h(λ) = dg/dλ = df/d{y} ⋅ d{y}/dλ where {y} == {x} + λ⋅{n} func (o *LineSolver) H(λ float64) float64 { la.VecAdd(o.y, 1, o.x, λ, o.n) // y := x + λ⋅n o.Jfcn(o.dfdx, o.y) // dfdx @ y return la.VecDot(o.dfdx, o.n) // dfdx ⋅ n }
num/linesolver.go
0.706596
0.552359
linesolver.go
starcoder
package pt import "math" type Cube struct { Min Vector Max Vector Material Material Box Box } func NewCube(min, max Vector, material Material) *Cube { box := Box{min, max} return &Cube{min, max, material, box} } func (c *Cube) Compile() { } func (c *Cube) BoundingBox() Box { return c.Box } func (c *Cube) Intersect(r Ray) Hit { n := c.Min.Sub(r.Origin).Div(r.Direction) f := c.Max.Sub(r.Origin).Div(r.Direction) n, f = n.Min(f), n.Max(f) t0 := math.Max(math.Max(n.X, n.Y), n.Z) t1 := math.Min(math.Min(f.X, f.Y), f.Z) if t0 > 0 && t0 < t1 { return Hit{c, t0, nil} } return NoHit } func (c *Cube) UV(p Vector) Vector { p = p.Sub(c.Min).Div(c.Max.Sub(c.Min)) return Vector{p.X, p.Z, 0} } func (c *Cube) MaterialAt(p Vector) Material { return c.Material } func (c *Cube) NormalAt(p Vector) Vector { switch { case p.X < c.Min.X+EPS: return Vector{-1, 0, 0} case p.X > c.Max.X-EPS: return Vector{1, 0, 0} case p.Y < c.Min.Y+EPS: return Vector{0, -1, 0} case p.Y > c.Max.Y-EPS: return Vector{0, 1, 0} case p.Z < c.Min.Z+EPS: return Vector{0, 0, -1} case p.Z > c.Max.Z-EPS: return Vector{0, 0, 1} } return Vector{0, 1, 0} } func (c *Cube) Mesh() *Mesh { a := c.Min b := c.Max z := Vector{} m := c.Material v000 := Vector{a.X, a.Y, a.Z} v001 := Vector{a.X, a.Y, b.Z} v010 := Vector{a.X, b.Y, a.Z} v011 := Vector{a.X, b.Y, b.Z} v100 := Vector{b.X, a.Y, a.Z} v101 := Vector{b.X, a.Y, b.Z} v110 := Vector{b.X, b.Y, a.Z} v111 := Vector{b.X, b.Y, b.Z} triangles := []*Triangle{ NewTriangle(v000, v100, v110, z, z, z, m), NewTriangle(v000, v110, v010, z, z, z, m), NewTriangle(v001, v101, v111, z, z, z, m), NewTriangle(v001, v111, v011, z, z, z, m), NewTriangle(v000, v100, v101, z, z, z, m), NewTriangle(v000, v101, v001, z, z, z, m), NewTriangle(v010, v110, v111, z, z, z, m), NewTriangle(v010, v111, v011, z, z, z, m), NewTriangle(v000, v010, v011, z, z, z, m), NewTriangle(v000, v011, v001, z, z, z, m), NewTriangle(v100, v110, v111, z, z, z, m), NewTriangle(v100, v111, v101, z, z, z, m), } return NewMesh(triangles) }
pt/cube.go
0.795857
0.456834
cube.go
starcoder
package generator import ( "fmt" "strings" ) type ( group struct { name string unit } unit [rows][cols]point uint128 struct { ms, ls uint64 } ) var ( box = group{name: "box"} // These are all of the coordinates in a box (first dimension). col = group{name: "col"} // These are all of the coordinates in a column (first dimension). row = group{name: "row"} // These are all of the coordinates in a row (first dimension). influence [rows][cols]uint128 // These bit masks contain a 1 for locations that the point "shadows" (can see). ) func init() { for r := zero; r < rows; r++ { for c := zero; c < cols; c++ { p := point{r, c} box.unit[boxOf(r, c)][r%3*3+c%3] = p col.unit[c][r] = p row.unit[r][c] = p } } for r := zero; r < rows; r++ { for c := zero; c < cols; c++ { for _, p := range box.unit[boxOf(r, c)] { if p.r == r && p.c == c { continue } bit := p.r*9 + p.c if bit < 64 { influence[r][c].ls |= (1 << bit) } else { influence[r][c].ms |= (1 << (bit - 64)) } } for _, p := range col.unit[c] { if p.r == r && p.c == c { continue } bit := p.r*9 + p.c if bit < 64 { influence[r][c].ls |= (1 << bit) } else { influence[r][c].ms |= (1 << (bit - 64)) } } for _, p := range row.unit[r] { if p.r == r && p.c == c { continue } bit := p.r*9 + p.c if bit < 64 { influence[r][c].ls |= (1 << bit) } else { influence[r][c].ms |= (1 << (bit - 64)) } } } } } func (u uint128) and(other uint128) uint128 { return uint128{u.ms & other.ms, u.ls & other.ls} } func (u uint128) process(f func(uint8, uint8)) { for r := zero; r < rows; r++ { for c := zero; c < cols; c++ { bit := r*9 + c if bit < 64 { if u.ls&(1<<bit) != 0 { f(r, c) } } else { if u.ms&(1<<(bit-64)) != 0 { f(r, c) } } } } } func (u uint128) String() string { var b strings.Builder for r := zero; r < rows; r++ { for c := zero; c < cols; c++ { bit := r*9 + c if bit < 64 { if u.ls&(1<<bit) != 0 { fmt.Fprint(&b, "1") } else { fmt.Fprint(&b, "0") } } else { if u.ms&(1<<(bit-64)) != 0 { fmt.Fprint(&b, "1") } else { fmt.Fprint(&b, "0") } } } fmt.Fprintln(&b) } return b.String() //fmt.Sprintf("%64.64b%64.64b", u.ms, u.ls) } func (u *uint128) unset(p point) *uint128 { bit := p.r*9 + p.c if bit < 64 { u.ls &^= 1 << bit } else { u.ms &^= 1 << (bit - 64) } return u } func boxOf(r, c uint8) uint8 { return r/3*3 + c/3 } func boxOfPoint(p point) uint8 { return boxOf(p.r, p.c) }
generator/group.go
0.535341
0.511229
group.go
starcoder
package sgf import ( "strconv" "github.com/dgf/gotv/model" ) type decode func(g *model.Game, p string) // see https://en.wikipedia.org/wiki/Smart_Game_Format#About_the_format var decoder = map[string]decode{ // Add Black: locations of Black stones to be placed on the board prior to the first move //"AB": func(g *model.Game, p string) {}, // Annotations: name of the person commenting the game. //"AN": func(g *model.Game, p string) {}, // Application: application that was used to create the SGF file (e.g. CGOban2,...). //"AP": func(g *model.Game, p string) {}, // Add White: locations of White stones to be placed on the board prior to the first move. //"AW": func(g *model.Game, p string) {}, // Black Rank: rank of the Black player. "BR": func(g *model.Game, p string) { g.Black.Rank = model.ToRank(p) }, // Black Team: name of the Black team. "BT": func(g *model.Game, p string) { g.Black.Name = p }, // Copyright: copyright information. //"CP": func(g *model.Game, p string) {}, // Date: date of the game. "DT": func(g *model.Game, p string) { g.Date = model.ToDate(p) }, // Event: name of the event (e.g. 58th Honinbō Title Match). //"EV": func(g *model.Game, p string) {}, // File format: version of SGF specification governing this SGF file. //"FF": func(g *model.Game, p string) {}, // Game: type of game represented by this SGF file. A property value of 1 refers to Go. //"GM": func(g *model.Game, p string) {}, // Game Name: name of the game record. "GN": func(g *model.Game, p string) { g.Name = p }, // Handicap: the number of handicap stones given to Black. Placement of the handicap stones are set using the AB property. "HA": func(g *model.Game, p string) { h, _ := strconv.Atoi(p) g.Handicap = model.Handicap(h) }, // Komi: komi. "KM": func(g *model.Game, p string) { k, _ := strconv.ParseFloat(p, 64) g.Komi = model.Komi(k) }, // Opening: information about the opening (Fuseki), rarely used in any file. //"ON": func(g *model.Game, p string) {}, // Overtime: overtime system. //"OT": func(g *model.Game, p string) {}, // Black Name: name of the black player. "PB": func(g *model.Game, p string) { g.Black.Name = p }, // Player: color of player to start. "PC": func(g *model.Game, p string) {}, // Place: place where the game was played (e.g.: Tokyo). "PL": func(g *model.Game, p string) { g.Place = p }, // White Name: name of the white player. "PW": func(g *model.Game, p string) { g.White.Name = p }, // Result: result, usually in the format "B+R" (Black wins by resign) or "B+3.5" (black wins by 3.5). "RE": func(g *model.Game, p string) { g.Result = p }, // Round: round (e.g.: 5th game). //"RO": func(g *model.Game, p string) {}, // Rules: ruleset (e.g.: Japanese). //"RU": func(g *model.Game, p string) {}, // Source: source of the SGF file. //"SO": func(g *model.Game, p string) {}, // Size: size of the board, non-square boards are not supported. "SZ": func(g *model.Game, p string) { s, _ := strconv.Atoi(p) g.Size = model.Size(s) }, // Time limit: time limit in seconds. //"TM": func(g *model.Game, p string) {}, // User: name of the person who created the SGF file. //"US": func(g *model.Game, p string) {}, // White Rank: rank of the White player. "WR": func(g *model.Game, p string) { g.White.Rank = model.ToRank(p) }, // White Team: name of the White team. "WT": func(g *model.Game, p string) { if len(g.White.Name) == 0 { g.White.Name = p } }, }
sgf/decode.go
0.599954
0.402392
decode.go
starcoder
通过提高精度来生成更多级别的分形。使用四种不同精度类型的数字实现相同的分形:complex64、complex128、big.Float和big.Rat。(后面两种类型在math/big包声明。Float是有指定限精度的浮点数;Rat是无限精度的有理数。)它们间的性能和内存使用对比如何?当渲染图可见时缩放的级别是多少? */ // ref: https://github.com/torbiak/gopl/blob/master/ex3.8/main.go package main import ( "image" "image/color" "image/png" "math/cmplx" "os" "time" "fmt" "math/big" ) func main() { const ( xmin, ymin, xmax, ymax = -2, -2, +2, +2 width, height = 1024, 1024 ) start := time.Now() png1, _ := os.Create("./mandelbrot1.png") img := image.NewRGBA(image.Rect(0, 0, width, height)) for py := 0; py < height; py++ { y := float64(py)/height*(ymax-ymin) + ymin for px := 0; px < width; px++ { x := float64(px)/width*(xmax-xmin) + xmin var z complex128 z = complex(x, y) // Image point (px, py) represents complex value z. img.Set(px, py, mandelbrot128(z)) } } png.Encode(png1, img) // NOTE: ignoring errors fmt.Printf("elapsed %f\n", time.Since(start).Seconds()) start = time.Now() png2, _ := os.Create("./mandelbrot2.png") img = image.NewRGBA(image.Rect(0, 0, width, height)) for py := 0; py < height; py++ { y := float64(py)/height*(ymax-ymin) + ymin for px := 0; px < width; px++ { x := float64(px)/width*(xmax-xmin) + xmin z := complex(x, y) // Image point (px, py) represents complex value z. img.Set(px, py, mandelbrot64(z)) } } png.Encode(png2, img) // NOTE: ignoring errors fmt.Printf("elapsed %f\n", time.Since(start).Seconds()) start = time.Now() png3, _ := os.Create("./mandelbrot3.png") img = image.NewRGBA(image.Rect(0, 0, width, height)) for py := 0; py < height; py++ { y := float64(py)/height*(ymax-ymin) + ymin for px := 0; px < width; px++ { x := float64(px)/width*(xmax-xmin) + xmin z := complex(x, y) // Image point (px, py) represents complex value z. img.Set(px, py, mandelbrotBigFloat(z)) } } png.Encode(png3, img) // NOTE: ignoring errors fmt.Printf("elapsed %f\n", time.Since(start).Seconds()) start = time.Now() png4, _ := os.Create("./mandelbrot4.png") img = image.NewRGBA(image.Rect(0, 0, width, height)) for py := 0; py < height; py++ { y := float64(py)/height*(ymax-ymin) + ymin for px := 0; px < width; px++ { x := float64(px)/width*(xmax-xmin) + xmin z := complex(x, y) // Image point (px, py) represents complex value z. img.Set(px, py, mandelbrotRat(z)) } } png.Encode(png4, img) // NOTE: ignoring errors fmt.Printf("elapsed %f\n", time.Since(start).Seconds()) defer png1.Close() defer png2.Close() defer png3.Close() defer png4.Close() } func mandelbrot128(z complex128) color.Color { const iterations = 200 const contrast = 15 var v complex128 for n := uint8(0); n < iterations; n++ { v = v*v + z if cmplx.Abs(v) > 2 { return color.Gray{255 - contrast*n} } } return color.Black } func mandelbrot64(z complex128) color.Color { const iterations = 200 const contrast = 15 var v complex64 for n := uint8(0); n < iterations; n++ { v = v*v + complex64(z) if cmplx.Abs(complex128(v)) > 2 { return color.Gray{255 - contrast*n} } } return color.Black } func mandelbrotBigFloat(z complex128) color.Color { const iterations = 200 const contrast = 15 zR := (&big.Float{}).SetFloat64(real(z)) zI := (&big.Float{}).SetFloat64(imag(z)) var vR, vI = &big.Float{}, &big.Float{} for n := uint8(0); n < iterations; n++ { // (r+i)^2 = r^2 + 2ri + i^2 vR2, vI2 := &big.Float{}, &big.Float{} vR2.Mul(vR, vR).Sub(vR2, (&big.Float{}).Mul(vI, vI)).Add(vR2, zR) vI2.Mul(vR, vI).Mul(vI2, big.NewFloat(2)).Add(vI2, zI) vR, vI = vR2, vI2 squareSum := &big.Float{} squareSum.Mul(vR, vR).Add(squareSum, (&big.Float{}).Mul(vI, vI)) if squareSum.Cmp(big.NewFloat(4)) == 1 { return color.Gray{255 - contrast*n} } } return color.Black } func mandelbrotRat(z complex128) color.Color { // 有生之年 const iterations = 20 const contrast = 15 zR := (&big.Rat{}).SetFloat64(real(z)) zI := (&big.Rat{}).SetFloat64(imag(z)) var vR, vI = &big.Rat{}, &big.Rat{} for n := uint8(0); n < iterations; n++ { // (r+i)^2 = r^2 + 2ri + i^2 vR2, vI2 := &big.Rat{}, &big.Rat{} vR2.Mul(vR, vR).Sub(vR2, (&big.Rat{}).Mul(vI, vI)).Add(vR2, zR) vI2.Mul(vR, vI).Mul(vI2, big.NewRat(2, 1)).Add(vI2, zI) vR, vI = vR2, vI2 squareSum := &big.Rat{} squareSum.Mul(vR, vR).Add(squareSum, (&big.Rat{}).Mul(vI, vI)) if squareSum.Cmp(big.NewRat(4, 1)) == 1 { return color.Gray{255 - contrast*n} } } return color.Black }
ch3/ex3.8/main.go
0.610221
0.709126
main.go
starcoder
package powersoftau import ( "bytes" "encoding/binary" "github.com/FiloSottile/powersoftau/bls12" "github.com/FiloSottile/powersoftau/internal/chacha20" ) /* The Rust hash_to_g2 implementation, which we have to match to pass verification, uses the Rand trait as implemented by ChaChaRng. Here is a reversed spec: 1. Split the first 32 bytes of the digest into 8 uint32, reverse their byte order and use the result as a ChaCha20 key [hash_to_g2] [read_u32::<BigEndian>] [ChaChaRng::from_seed] 2. Pick a random field element x = c0 + c1 * u [Fq2::Rand] 2.1. Pick a random c0 [Fq::Rand] 2.1.1. Extract 12 random little-endian uint32 from the ChaCha20 RNG, arrange them into little-endian pairs as uint64 and interpret those in little-endian order as a 384-bit number [FqRepr::Rand] [Rng::next_u64] [ChaChaRng::next_u32] The resulting big-endian byte order is like this: ... 19 18 17 16 23 22 21 20 11 10 9 8 15 14 13 12 3 2 1 0 7 6 5 4 2.1.2. Mask away the 3 top bits [FqRepr::Rand] 2.1.3. If the result is not lower than the field modulus [Fq::is_valid], go back to 2.1.1 2.1.4. Perform a Montgomery reduction [Fq::into_repr] [G2Uncompressed::from_affine] [Fq::mont_reduce] 2.2. Pick a random c1, like in 2.1 3. Pick a random flag by extracting a little-endian uint32 from the RNG and checking if the LSB is 1 [bool::Rand] 4. Compute y [G2Affine::get_point_from_x] 4.1. Compute ±y = sqrt(x^3 + b) 4.2. If no square root exists, go back to 2 4.3. Select the higher (modulo the field modulus) of ±y if the flag at 3 is set, the lower otherwise 5. Scale p = (x, y) by the curve cofactor [G2Affine::scale_by_cofactor] 5.1. Perform the scalar multiplication cofactor×p 6. If p is zero (the point at infinity) go back to 2 7. Return p [G2::Rand] */ func HashToG2(digest []byte) *bls12.EP2 { var key [32]byte for i := 0; i < 32; i += 4 { k := binary.LittleEndian.Uint32(digest[i:]) binary.BigEndian.PutUint32(key[i:], k) } rng := chacha20.NewRng(&key) p := bls12.NewEP2() for { c0 := extractFieldElement(rng) c1 := extractFieldElement(rng) greater := extractBool(rng) // Use point deserialization instead of reimplementing lexicographic y ordering. buf := make([]byte, bls12.G2CompressedSize) copy(buf, c1[:]) copy(buf[48:], c0[:]) buf[0] |= 1 << 7 // serializationCompressed if greater { buf[0] |= 1 << 5 // serializationBigY } p, err := p.DecodeCompressed(buf) if err != nil { continue } p.ScaleByCofactor() if p.IsZero() { continue } return p } } var fqModulus = [48]byte{0x1a, 0x01, 0x11, 0xea, 0x39, 0x7f, 0xe6, 0x9a, 0x4b, 0x1b, 0xa7, 0xb6, 0x43, 0x4b, 0xac, 0xd7, 0x64, 0x77, 0x4b, 0x84, 0xf3, 0x85, 0x12, 0xbf, 0x67, 0x30, 0xd2, 0xa0, 0xf6, 0xb0, 0xf6, 0x24, 0x1e, 0xab, 0xff, 0xfe, 0xb1, 0x53, 0xff, 0xff, 0xb9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xab} func extractFieldElement(rng *chacha20.Rng) [48]byte { for { var res [48]byte for i := 48 - 8; i >= 0; i -= 8 { binary.BigEndian.PutUint32(res[i:], rng.ReadUint32()) binary.BigEndian.PutUint32(res[i+4:], rng.ReadUint32()) } res[0] &= 0xff >> 3 if bytes.Compare(res[:], fqModulus[:]) >= 0 { continue } bls12.FqMontgomeryReduce(res[:]) return res } } func extractBool(rng *chacha20.Rng) bool { x := rng.ReadUint32() return x&1 == 1 }
powersoftau/hash_to_g2.go
0.680454
0.558688
hash_to_g2.go
starcoder
package predictor import ( "image" "image/png" "os" "runtime" "runtime/debug" "github.com/pkg/errors" imagetypes "github.com/rai-project/image/types" tf "github.com/tensorflow/tensorflow/tensorflow/go" "github.com/tensorflow/tensorflow/tensorflow/go/op" gotensor "gorgonia.org/tensor" ) func disableFrameworkAutoTuning() { os.Setenv("TF_CUDNN_USE_AUTOTUNE", "0") } func makeTensorFromGoTensors(in0 []*gotensor.Dense) (*tf.Tensor, error) { if len(in0) < 1 { return nil, errors.New("no dense tensor in input") } fst := in0[0] joined, err := fst.Concat(0, in0[1:]...) if err != nil { return nil, errors.Wrap(err, "unable to concat tensors") } joined.Reshape(append([]int{len(in0)}, fst.Shape()...)...) shape := make([]int64, len(joined.Shape())) for ii, s := range joined.Shape() { shape[ii] = int64(s) } switch t := in0[0].Dtype(); t { case gotensor.Uint8: return flattenedUint8ToTensor(joined.Data().([]uint8), shape) case gotensor.Uint16: return flattenedUint16ToTensor(joined.Data().([]uint16), shape) case gotensor.Uint32: return flattenedUint32ToTensor(joined.Data().([]uint32), shape) case gotensor.Int8: return flattenedInt8ToTensor(joined.Data().([]int8), shape) case gotensor.Int16: return flattenedInt16ToTensor(joined.Data().([]int16), shape) case gotensor.Int32: return flattenedInt32ToTensor(joined.Data().([]int32), shape) case gotensor.Float32: return flattenedFloat32ToTensor(joined.Data().([]float32), shape) case gotensor.Float64: return flattenedFloat64ToTensor(joined.Data().([]float64), shape) default: return nil, errors.Errorf("invalid element datatype %v", t) } } func reshapeTensorFloats(data [][]float32, shape []int64) (*tf.Tensor, error) { N, H, W, C := shape[0], shape[1], shape[2], shape[3] tensor := make([][][][]float32, N) for n := int64(0); n < N; n++ { ndata := data[n] tn := make([][][]float32, H) for h := int64(0); h < H; h++ { th := make([][]float32, W) for w := int64(0); w < W; w++ { offset := C * (W*h + w) tw := ndata[offset : offset+C] th[w] = tw } tn[h] = th } tensor[n] = tn } return tf.NewTensor(tensor) } func reshapeTensorBytes(data [][]uint8, shape []int64) (*tf.Tensor, error) { N, H, W, C := shape[0], shape[1], shape[2], shape[3] tensor := make([][][][]uint8, N) for n := int64(0); n < N; n++ { ndata := data[n] tn := make([][][]uint8, H) for h := int64(0); h < H; h++ { th := make([][]uint8, W) for w := int64(0); w < W; w++ { offset := C * (W*h + w) tw := ndata[offset : offset+C] th[w] = tw } tn[h] = th } tensor[n] = tn } return tf.NewTensor(tensor) } func decodeJpegGraph() (graph *tf.Graph, input, output tf.Output, err error) { s := op.NewScope() input = op.Placeholder(s, tf.String) output = op.ExpandDims(s, op.DecodeJpeg(s, input, op.DecodeJpegChannels(3)), op.Const(s.SubScope("make_batch"), int32(0))) graph, err = s.Finalize() return graph, input, output, err } func makeTensorFromRawImageBytes(b []byte) (*tf.Tensor, error) { // DecodeJpeg uses a scalar String-valued tensor as input. tensor, err := tf.NewTensor(string(b)) if err != nil { return nil, err } // Creates a tensorflow graph to decode the jpeg image graph, input, output, err := decodeJpegGraph() if err != nil { return nil, err } // Execute that graph to decode this one image session, err := tf.NewSession(graph, nil) if err != nil { return nil, err } defer session.Close() normalized, err := session.Run( map[tf.Output]*tf.Tensor{input: tensor}, []tf.Output{output}, nil) if err != nil { return nil, err } return normalized[0], nil } func toPng(filePath string, imgByte []byte, bounds image.Rectangle) { img := imagetypes.NewRGBImage(bounds) copy(img.Pix, imgByte) out, _ := os.Create(filePath) defer out.Close() err := png.Encode(out, img.ToRGBAImage()) if err != nil { log.Println(err) } } func zeros(height, width, channels int) [][][]float32 { rows := make([][][]float32, height) for ii := range rows { columns := make([][]float32, width) for jj := range columns { columns[jj] = make([]float32, channels) } rows[ii] = columns } return rows } func forceGC() { runtime.GC() debug.FreeOSMemory() } func makeUniformImage() [][][][]float32 { images := make([][][][]float32, 10) width := 1000 height := 1000 for ii := range images { sl := make([][][]float32, height) for jj := range sl { el := make([][]float32, width) for kk := range el { el[kk] = []float32{1, 0, 1} } sl[jj] = el } images[ii] = sl } return images }
predictor/utils.go
0.633864
0.435781
utils.go
starcoder
package eval import ( "fmt" "github.com/dikaeinstein/monkey/ast" "github.com/dikaeinstein/monkey/object" "github.com/dikaeinstein/monkey/token" ) // Eval evaluates walks the code by walking the parsed AST //gocyclo:ignore func Eval(node ast.Node, env *object.Environment) object.Object { switch node := node.(type) { // Statements case *ast.Program: return evalStatements(node.Statements, env) case *ast.ExpressionStatement: return Eval(node.Expression, env) case *ast.BlockStatement: return evalStatements(node.Statements, env) case *ast.LetStatement: val := Eval(node.Value, env) if object.IsError(val) { return val } env.Set(node.Name.Value, val) return nil case *ast.ReturnStatement: return Eval(node.ReturnValue, env) // Expressions case *ast.IntegerLiteral: return object.Integer(node.Value) case *ast.StringLiteral: return object.String(node.Value) case *ast.Boolean: return object.Boolean(node.Value) case *ast.PrefixExpression: return evalPrefixExpression(node, env) case *ast.InfixExpression: return evalInfixExpression(node, env) case *ast.IfExpression: return evalIfExpression(node, env) case *ast.Identifier: return evalIdentifier(node, env) case *ast.FunctionLiteral: return evalFunction(node, env) case *ast.CallExpression: return evalCallExpression(node, env) case *ast.ArrayLiteral: return evalArrayLiteral(node, env) case *ast.HashLiteral: return evalHashLiteral(node, env) case *ast.IndexExpression: left := Eval(node.Left, env) if object.IsError(left) { return left } index := Eval(node.Index, env) if object.IsError(index) { return index } return evalIndexExpression(left, index) default: return nil } } func evalStatements(statements []ast.Statement, env *object.Environment) object.Object { var result object.Object for _, stmt := range statements { if _, ok := stmt.(*ast.ReturnStatement); ok { return Eval(stmt, env) } result = Eval(stmt, env) if object.IsError(result) { return result } } return result } func evalPrefixExpression(node *ast.PrefixExpression, env *object.Environment) object.Object { right := Eval(node.Right, env) if object.IsError(right) { return right } switch node.Operator { case string(token.BANG): return evalBangOperatorExpression(right) case string(token.MINUS): return evalMinusPrefixOperatorExpression(right) default: return newError("unknown operator: %s%s", node.Operator, right.Type()) } } func evalInfixExpression(node *ast.InfixExpression, env *object.Environment) object.Object { left := Eval(node.Left, env) if object.IsError(left) { return left } right := Eval(node.Right, env) if object.IsError(right) { return right } switch { case left.Type() == object.BOOLEAN && right.Type() == object.BOOLEAN: return evalBooleanInfixExpression(node.Operator, left, right) case left.Type() == object.INTEGER && right.Type() == object.INTEGER: return evalIntegerInfixExpression(node.Operator, left, right) case left.Type() == object.STRING && right.Type() == object.STRING: return evalStringInfixExpression(node.Operator, left, right) case left.Type() != right.Type(): return newError("type mismatch: %s %s %s", left.Type(), node.Operator, right.Type()) default: return newError("unknown operator: %s %s %s", left.Type(), node.Operator, right.Type()) } } func evalIntegerInfixExpression(operator string, left, right object.Object) object.Object { lVal := int64(left.(object.Integer)) rVal := int64(right.(object.Integer)) switch operator { case string(token.PLUS): return object.Integer(lVal + rVal) case string(token.MINUS): return object.Integer(lVal - rVal) case string(token.ASTERISK): return object.Integer(lVal * rVal) case string(token.SLASH): return object.Integer(lVal / rVal) case string(token.EQ): return object.Boolean(lVal == rVal) case string(token.NotEQ): return object.Boolean(lVal != rVal) case string(token.GT): return object.Boolean(lVal > rVal) case string(token.LT): return object.Boolean(lVal < rVal) default: return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } } func evalBooleanInfixExpression(operator string, left, right object.Object) object.Object { lVal := bool(left.(object.Boolean)) rVal := bool(right.(object.Boolean)) switch operator { case string(token.EQ): return object.Boolean(lVal == rVal) case string(token.NotEQ): return object.Boolean(lVal != rVal) default: return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } } func evalStringInfixExpression(operator string, left, right object.Object) object.Object { lVal := left.(object.String) rVal := right.(object.String) if operator != string(token.PLUS) { return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } return lVal + rVal } func evalBangOperatorExpression(right object.Object) object.Object { switch right := right.(type) { case object.Boolean: return object.Boolean(!bool(right)) case object.Null: return object.Boolean(true) default: return object.Boolean(false) } } func evalMinusPrefixOperatorExpression(right object.Object) object.Object { intVal, ok := right.(object.Integer) if !ok { return newError("unknown operator: -%s", right.Type()) } return object.Integer(-int64(intVal)) } func evalIfExpression(node *ast.IfExpression, env *object.Environment) object.Object { condition := Eval(node.Condition, env) if object.IsError(condition) { return condition } if isTruthy(condition) { return Eval(node.Consequence, env) } else if node.Alternative != nil { return Eval(node.Alternative, env) } else { return object.NullValue() } } // evalIdentifier resolve names in this order: (local, enclosing, global, builtin) func evalIdentifier(node *ast.Identifier, env *object.Environment) object.Object { if val, ok := env.Get(node.Value); ok { return val } if val, ok := builtins[node.Value]; ok { return val } return newError("identifier not found: " + node.Value) } func evalFunction(node *ast.FunctionLiteral, env *object.Environment) object.Object { return &object.Function{ Parameters: node.Parameters, Body: node.Body, Env: env, } } func evalExpressions(exps []ast.Expression, env *object.Environment) []object.Object { var result []object.Object for _, e := range exps { evaluated := Eval(e, env) if object.IsError(evaluated) { return []object.Object{evaluated} } result = append(result, evaluated) } return result } func evalCallExpression(node *ast.CallExpression, env *object.Environment) object.Object { if node.Function.TokenLiteral() == "quote" { return quote(node.Arguments[0], env) } fn := Eval(node.Function, env) if object.IsError(fn) { return fn } args := evalExpressions(node.Arguments, env) if len(args) == 1 && object.IsError(args[0]) { return args[0] } return applyFunction(fn, args) } func applyFunction(fn object.Object, args []object.Object) object.Object { switch fn := fn.(type) { case *object.Function: env := object.NewEnclosedEnvironment(fn.Env) // bind arguments to parameters in the function stack frame a.k.a scope for i, arg := range args { ident := fn.Parameters[i] env.Set(ident.Value, arg) } return evalStatements(fn.Body.Statements, env) case object.BuiltInFunction: // use function already defined with host lang(Go) return fn(args...) default: return newError("not a function: %s", fn.Type()) } } func evalArrayLiteral(node *ast.ArrayLiteral, env *object.Environment) object.Object { elements := evalExpressions(node.Elements, env) if len(elements) == 1 && object.IsError(elements[0]) { return elements[0] } return &object.Array{Elements: elements} } func evalIndexExpression(left, index object.Object) object.Object { switch { case left.Type() == object.ARRAY && index.Type() == object.INTEGER: return evalArrayIndexExpression(left, index) case left.Type() == object.HASH: return evalHashIndexExpression(left, index) default: return newError("index operator not supported: %s", left.Type()) } } func evalArrayIndexExpression(left, index object.Object) object.Object { array := left.(*object.Array) idx := index.(object.Integer) max := int64(len(array.Elements) - 1) if idx < 0 || int64(idx) > max { return object.NullValue() } return array.Elements[idx] } func evalHashLiteral(node *ast.HashLiteral, env *object.Environment) object.Object { h := &object.Hash{Pairs: make(map[object.String]object.Object)} for k, v := range node.Pairs { kk := evalHashKey(Eval(k, env)) if object.IsError(kk) { return kk } key := kk.(object.String) value := Eval(v, env) if object.IsError(value) { return value } h.Pairs[key] = value } return h } func evalHashKey(key object.Object) object.Object { switch key := key.(type) { case object.String: return key case object.Integer: return object.String(key.Inspect()) case object.Boolean: return object.String(key.Inspect()) default: return newError("unusable as hash key: %s", key.Type()) } } func evalHashIndexExpression(left, index object.Object) object.Object { hash := left.(*object.Hash) kk := evalHashKey(index) if object.IsError(kk) { return kk } key := kk.(object.String) val, ok := hash.Pairs[key] if !ok { return object.NullValue() } return val } func quote(node ast.Node, env *object.Environment) object.Object { node = evalUnquoteCalls(node, env) return &object.Quote{Node: node} } func evalUnquoteCalls(node ast.Node, env *object.Environment) ast.Node { return ast.Modify(node, func(node ast.Node) ast.Node { if !isUnquotedCall(node) { return node } call, ok := node.(*ast.CallExpression) if !ok { return node } if len(call.Arguments) != 1 { return node } return convertObjectToASTNode(Eval(call.Arguments[0], env)) }) } func isUnquotedCall(node ast.Node) bool { exp, ok := node.(*ast.CallExpression) if !ok { return false } return exp.Function.TokenLiteral() == "unquote" } func convertObjectToASTNode(obj object.Object) ast.Node { switch obj := obj.(type) { case object.Integer: t := token.Token{ Type: token.INT, Literal: fmt.Sprintf("%d", obj), } return &ast.IntegerLiteral{Token: t, Value: int64(obj)} case object.Boolean: var t token.Token if obj { t = token.Token{Type: token.TRUE, Literal: "true"} } else { t = token.Token{Type: token.FALSE, Literal: "false"} } return &ast.Boolean{Token: t, Value: bool(obj)} case *object.Quote: return obj.Node default: return nil } } func isTruthy(obj object.Object) bool { switch obj := obj.(type) { case object.Boolean: return bool(obj) case object.Null: return false case object.Integer: return int64(obj) != 0 default: return true } } func newError(format string, a ...interface{}) object.Error { return object.Error(fmt.Sprintf(format, a...)) }
eval/eval.go
0.694613
0.435181
eval.go
starcoder
package set import ( . "github.com/noxer/nox/dot" "github.com/noxer/nox/maps" "github.com/noxer/nox/slice" ) // Set offers set functionality. A set is a data structure that only contains unique, unsorted values. type Set[T comparable] map[T]struct{} // New creates a new set from a list of values. func New[T comparable](from ...T) Set[T] { s := make(Set[T], len(from)) for _, e := range from { s[e] = struct{}{} } return s } // Union takes two sets and returns a new set that contains all values from // both sets. func Union[T comparable](a, b Set[T]) Set[T] { u := make(Set[T]) for k := range b { u[k] = struct{}{} } for k := range a { u[k] = struct{}{} } return u } // Intersection takes two sets and returns a new set containing only the values // found in both sets. func Intersection[T comparable](a, b Set[T]) Set[T] { u := make(Set[T]) for k := range a { if _, ok := b[k]; ok { u[k] = struct{}{} } } return u } // Len returns the number of elements in a Set. func (s Set[T]) Len() int { return len(s) } // Any returns any element from the set (useful if the set has only one value). func (s Set[T]) Any() T { for k := range s { return k } return Default[T]() } // Put an element into the set. func (s Set[T]) Put(e T) { s[e] = struct{}{} } // Has checks if the set contains a certain element. func (s Set[T]) Has(e T) bool { _, ok := s[e] return ok } // Delete removes an element from the set. func (s Set[T]) Delete(e T) { delete(s, e) } // Add a second set to the first one. This is like Union but the elements are // added to the set this message was called on. func (s Set[T]) Add(o Set[T]) { for k := range o { s[k] = struct{}{} } } // Substract removes all elements in set o from this set. func (s Set[T]) Subtract(o Set[T]) { for k := range o { delete(s, k) } } // Slice returns the unsorted list of elements of this set. func (s Set[T]) Slice() []T { return maps.Keys(s) } // Enumerate creates an enumerable from this set. func (s Set[T]) Enumerate() Enumerable[T] { return slice.Enumerate(s.Slice()) }
set/set.go
0.708213
0.409693
set.go
starcoder
// Package cc describes Creative Commons software licenses. package cc import "github.com/creachadair/lice/licenses" func init() { licenses.Register(licenses.License{ Name: "Creative Commons CC0", Slug: "cc0", URL: "https://creativecommons.org/publicdomain/zero/1.0/legalcode", Text: cc0text, PerFile: cc0file, }) } const cc0text = ` This software is released to the public domain by {{.Author}} under the terms of Creative Commons CC0. -- Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; moral rights retained by the original author(s) and/or performer(s); publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; rights protecting the extraction, dissemination, use and reuse of data in a Work; database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. ` const cc0file = ` This file is released under the Creative Commons CC0 license. See https://creativecommons.org/publicdomain/zero/1.0/legalcode. `
licenses/cc/cc0.go
0.617628
0.633566
cc0.go
starcoder
package metadata const AtaSmartAttributeDisplayTypeRaw = "raw" const AtaSmartAttributeDisplayTypeNormalized = "normalized" const AtaSmartAttributeDisplayTypeTransformed = "transformed" type AtaAttributeMetadata struct { ID int64 `json:"-"` DisplayName string `json:"-"` Ideal string `json:"ideal"` Critical bool `json:"critical"` Description string `json:"description"` Transform func(int, int64, string) int64 `json:"-"` //this should be a method to extract/tranform the normalized or raw data to a chartable format. Str TransformValueUnit string `json:"transform_value_unit,omitempty"` ObservedThresholds []ObservedThreshold `json:"observed_thresholds,omitempty"` //these thresholds must match the DisplayType DisplayType string `json:"display_type"` //"raw" "normalized" or "transformed" } const ObservedThresholdIdealLow = "low" const ObservedThresholdIdealHigh = "high" type ObservedThreshold struct { Low int64 `json:"low"` //threshold (row/normalized data) boundary low value High int64 `json:"high"` //threshold (row/normalized data) boundary high value AnnualFailureRate float64 `json:"annual_failure_rate"` //error rate % ErrorInterval []float64 `json:"error_interval"` } var AtaMetadata = map[int]AtaAttributeMetadata{ 1: { ID: 1, DisplayName: "Read Error Rate", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "(Vendor specific raw value.) Stores data related to the rate of hardware read errors that occurred when reading data from a disk surface. The raw value has different structure for different vendors and is often not meaningful as a decimal number.", ObservedThresholds: []ObservedThreshold{ { Low: 80, High: 95, AnnualFailureRate: 0.8879749768303985, ErrorInterval: []float64{0.682344353388663, 1.136105732920724}, }, { Low: 95, High: 110, AnnualFailureRate: 0.034155719633986996, ErrorInterval: []float64{0.030188482024981093, 0.038499386872354435}, }, { Low: 110, High: 125, AnnualFailureRate: 0.06390002135229157, ErrorInterval: []float64{0.05852004676110847, 0.06964160930553712}, }, { Low: 125, High: 140, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 140, High: 155, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 155, High: 170, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 170, High: 185, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 185, High: 200, AnnualFailureRate: 0.044823775021490854, ErrorInterval: []float64{0.032022762038723306, 0.06103725943096589}, }, }, }, 2: { ID: 2, DisplayName: "Throughput Performance", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealHigh, Critical: false, Description: "Overall (general) throughput performance of a hard disk drive. If the value of this attribute is decreasing there is a high probability that there is a problem with the disk.", }, 3: { ID: 3, DisplayName: "Spin-Up Time", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Average time of spindle spin up (from zero RPM to fully operational [milliseconds]).", ObservedThresholds: []ObservedThreshold{ { Low: 78, High: 96, AnnualFailureRate: 0.11452195377351217, ErrorInterval: []float64{0.10591837762295722, 0.12363823501915781}, }, { Low: 96, High: 114, AnnualFailureRate: 0.040274562840558074, ErrorInterval: []float64{0.03465055611002801, 0.046551312468303144}, }, { Low: 114, High: 132, AnnualFailureRate: 0.009100406705780476, ErrorInterval: []float64{0.006530608971356785, 0.012345729280075591}, }, { Low: 132, High: 150, AnnualFailureRate: 0.008561351734020232, ErrorInterval: []float64{0.004273795939256936, 0.015318623141355509}, }, { Low: 150, High: 168, AnnualFailureRate: 0.015780508262068848, ErrorInterval: []float64{0.005123888078524015, 0.03682644215646287}, }, { Low: 168, High: 186, AnnualFailureRate: 0.05262688124794024, ErrorInterval: []float64{0.0325768689524594, 0.08044577830285578}, }, { Low: 186, High: 204, AnnualFailureRate: 0.01957419424036038, ErrorInterval: []float64{0.0023705257325185624, 0.0707087198669825}, }, { Low: 204, High: 222, AnnualFailureRate: 0.026050959960031404, ErrorInterval: []float64{0.0006595532020744994, 0.1451466588889228}, }, }, }, 4: { ID: 4, DisplayName: "Start/Stop Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: "", Critical: false, Description: "A tally of spindle start/stop cycles. The spindle turns on, and hence the count is increased, both when the hard disk is turned on after having before been turned entirely off (disconnected from power source) and when the hard disk returns from having previously been put to sleep mode.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 13, AnnualFailureRate: 0.01989335424860646, ErrorInterval: []float64{0.016596548909440657, 0.023653263230617408}, }, { Low: 13, High: 26, AnnualFailureRate: 0.03776935438256488, ErrorInterval: []float64{0.03310396052098642, 0.04290806173460437}, }, { Low: 26, High: 39, AnnualFailureRate: 0.11022223828187004, ErrorInterval: []float64{0.09655110535164119, 0.12528657238811672}, }, { Low: 39, High: 52, AnnualFailureRate: 0.16289995457762474, ErrorInterval: []float64{0.13926541653588131, 0.18939614504497515}, }, { Low: 52, High: 65, AnnualFailureRate: 0.19358212432279714, ErrorInterval: []float64{0.15864522253849073, 0.23392418181765526}, }, { Low: 65, High: 78, AnnualFailureRate: 0.1157094940074447, ErrorInterval: []float64{0.07861898732346269, 0.16424039052527728}, }, { Low: 78, High: 91, AnnualFailureRate: 0.12262136155304391, ErrorInterval: []float64{0.0670382394080032, 0.20573780888032978}, }, { Low: 91, High: 104, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, }, }, 5: { ID: 5, DisplayName: "Reallocated Sectors Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "Count of reallocated sectors. The raw value represents a count of the bad sectors that have been found and remapped.Thus, the higher the attribute value, the more sectors the drive has had to reallocate. This value is primarily used as a metric of the life expectancy of the drive; a drive which has had any reallocations at all is significantly more likely to fail in the immediate months.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.025169175350572493, ErrorInterval: []float64{0.022768612038746357, 0.027753988579272894}, }, { Low: 1, High: 4, AnnualFailureRate: 0.027432608477803388, ErrorInterval: []float64{0.010067283827589948, 0.05970923963096652}, }, { Low: 4, High: 16, AnnualFailureRate: 0.07501976284584981, ErrorInterval: []float64{0.039944864177334186, 0.12828607921150972}, }, { Low: 16, High: 70, AnnualFailureRate: 0.23589260654405794, ErrorInterval: []float64{0.1643078435800227, 0.32806951196017664}, }, { Low: 70, High: 260, AnnualFailureRate: 0.36193219378600433, ErrorInterval: []float64{0.2608488901774093, 0.4892271827875412}, }, { Low: 260, High: 1100, AnnualFailureRate: 0.5676621428968173, ErrorInterval: []float64{0.4527895568499355, 0.702804359408436}, }, { Low: 1100, High: 4500, AnnualFailureRate: 1.5028253400346423, ErrorInterval: []float64{1.2681757596263297, 1.768305221795894}, }, { Low: 4500, High: 17000, AnnualFailureRate: 2.0659987547404763, ErrorInterval: []float64{1.6809790460512237, 2.512808045182302}, }, { Low: 17000, High: 70000, AnnualFailureRate: 1.7755385684503124, ErrorInterval: []float64{1.2796520259849835, 2.400012341226441}, }, }, }, 6: { ID: 6, DisplayName: "Read Channel Margin", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Margin of a channel while reading data. The function of this attribute is not specified.", }, 7: { ID: 7, DisplayName: "Seek Error Rate", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "(Vendor specific raw value.) Rate of seek errors of the magnetic heads. If there is a partial failure in the mechanical positioning system, then seek errors will arise. Such a failure may be due to numerous factors, such as damage to a servo, or thermal widening of the hard disk. The raw value has different structure for different vendors and is often not meaningful as a decimal number.", ObservedThresholds: []ObservedThreshold{ { Low: 58, High: 76, AnnualFailureRate: 0.2040131025936549, ErrorInterval: []float64{0.17032852883286412, 0.2424096283327138}, }, { Low: 76, High: 94, AnnualFailureRate: 0.08725919610118257, ErrorInterval: []float64{0.08077138510999876, 0.09412943212007528}, }, { Low: 94, High: 112, AnnualFailureRate: 0.01087335627722523, ErrorInterval: []float64{0.008732197944943352, 0.013380600544561905}, }, { Low: 112, High: 130, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 130, High: 148, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 148, High: 166, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 166, High: 184, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 184, High: 202, AnnualFailureRate: 0.05316285755900475, ErrorInterval: []float64{0.03370069132942804, 0.07977038905848267}, }, }, }, 8: { ID: 8, DisplayName: "Seek Time Performance", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealHigh, Critical: false, Description: "Average performance of seek operations of the magnetic heads. If this attribute is decreasing, it is a sign of problems in the mechanical subsystem.", }, 9: { ID: 9, DisplayName: "Power-On Hours", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Count of hours in power-on state. The raw value of this attribute shows total count of hours (or minutes, or seconds, depending on manufacturer) in power-on state. By default, the total expected lifetime of a hard disk in perfect condition is defined as 5 years (running every day and night on all days). This is equal to 1825 days in 24/7 mode or 43800 hours. On some pre-2005 drives, this raw value may advance erratically and/or \"wrap around\" (reset to zero periodically).", }, 10: { ID: 10, DisplayName: "Spin Retry Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "Count of retry of spin start attempts. This attribute stores a total count of the spin start attempts to reach the fully operational speed (under the condition that the first attempt was unsuccessful). An increase of this attribute value is a sign of problems in the hard disk mechanical subsystem.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.05459827163896099, ErrorInterval: []float64{0.05113785787727033, 0.05823122757702782}, }, { //TODO: using fake data from attribute 11. Not enough data, but critical and correlated with failure. Low: 0, High: 80, AnnualFailureRate: 0.5555555555555556, ErrorInterval: []float64{0.014065448880161053, 3.095357439410498}, }, }, }, 11: { ID: 11, DisplayName: "Recalibration Retries or Calibration Retry Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "This attribute indicates the count that recalibration was requested (under the condition that the first attempt was unsuccessful). An increase of this attribute value is a sign of problems in the hard disk mechanical subsystem.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.04658866433672694, ErrorInterval: []float64{0.03357701137320878, 0.06297433993055492}, }, { Low: 0, High: 80, AnnualFailureRate: 0.5555555555555556, ErrorInterval: []float64{0.014065448880161053, 3.095357439410498}, }, { Low: 80, High: 160, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 160, High: 240, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 240, High: 320, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 320, High: 400, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 400, High: 480, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 480, High: 560, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, }, }, 12: { ID: 12, DisplayName: "Power Cycle Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "This attribute indicates the count of full hard disk power on/off cycles.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 13, AnnualFailureRate: 0.019835987118930823, ErrorInterval: []float64{0.016560870164523494, 0.023569242386797896}, }, { Low: 13, High: 26, AnnualFailureRate: 0.038210930067894826, ErrorInterval: []float64{0.03353859179329295, 0.0433520775718649}, }, { Low: 26, High: 39, AnnualFailureRate: 0.11053528307302571, ErrorInterval: []float64{0.09671061589521368, 0.1257816678419765}, }, { Low: 39, High: 52, AnnualFailureRate: 0.16831189443375036, ErrorInterval: []float64{0.1440976510675928, 0.19543066007594895}, }, { Low: 52, High: 65, AnnualFailureRate: 0.20630344262550107, ErrorInterval: []float64{0.1693965932069108, 0.2488633537247856}, }, { Low: 65, High: 78, AnnualFailureRate: 0.1030972634140512, ErrorInterval: []float64{0.06734655535304743, 0.15106137807407605}, }, { Low: 78, High: 91, AnnualFailureRate: 0.12354840389522469, ErrorInterval: []float64{0.06578432170016109, 0.21127153335749593}, }, }, }, 13: { ID: 13, DisplayName: "Soft Read Error Rate", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Uncorrected read errors reported to the operating system.", }, 22: { ID: 22, DisplayName: "Current Helium Level", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealHigh, Critical: false, Description: "Specific to He8 drives from HGST. This value measures the helium inside of the drive specific to this manufacturer. It is a pre-fail attribute that trips once the drive detects that the internal environment is out of specification.", }, 170: { ID: 170, DisplayName: "Available Reserved Space", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "See attribute E8.", }, 171: { ID: 171, DisplayName: "SSD Program Fail Count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "(Kingston) The total number of flash program operation failures since the drive was deployed.[33] Identical to attribute 181.", }, 172: { ID: 172, DisplayName: "SSD Erase Fail Count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "(Kingston) Counts the number of flash erase failures. This attribute returns the total number of Flash erase operation failures since the drive was deployed. This attribute is identical to attribute 182.", }, 173: { ID: 173, DisplayName: "SSD Wear Leveling Count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Counts the maximum worst erase count on any block.", }, 174: { ID: 174, DisplayName: "Unexpected Power Loss Count", Ideal: "", Critical: false, Description: "Also known as \"Power-off Retract Count\" per conventional HDD terminology. Raw value reports the number of unclean shutdowns, cumulative over the life of an SSD, where an \"unclean shutdown\" is the removal of power without STANDBY IMMEDIATE as the last command (regardless of PLI activity using capacitor power). Normalized value is always 100.", }, 175: { ID: 175, DisplayName: "Power Loss Protection Failure", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Last test result as microseconds to discharge cap, saturated at its maximum value. Also logs minutes since last test and lifetime number of tests. Raw value contains the following data: Bytes 0-1: Last test result as microseconds to discharge cap, saturates at max value. Test result expected in range 25 <= result <= 5000000, lower indicates specific error code. Bytes 2-3: Minutes since last test, saturates at max value.Bytes 4-5: Lifetime number of tests, not incremented on power cycle, saturates at max value. Normalized value is set to one on test failure or 11 if the capacitor has been tested in an excessive temperature condition, otherwise 100.", }, 176: { ID: 176, DisplayName: "Erase Fail Count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "S.M.A.R.T. parameter indicates a number of flash erase command failures.", }, 177: { ID: 177, DisplayName: "Wear Range Delta", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Delta between most-worn and least-worn Flash blocks. It describes how good/bad the wearleveling of the SSD works on a more technical way. ", }, 179: { ID: 179, DisplayName: "Used Reserved Block Count Total", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Pre-Fail attribute used at least in Samsung devices.", }, 180: { ID: 180, DisplayName: "Unused Reserved Block Count Total", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "\"Pre-Fail\" attribute used at least in HP devices. ", }, 181: { ID: 181, DisplayName: "Program Fail Count Total", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Total number of Flash program operation failures since the drive was deployed.", }, 182: { ID: 182, DisplayName: "Erase Fail Count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "\"Pre-Fail\" Attribute used at least in Samsung devices.", }, 183: { ID: 183, DisplayName: "SATA Downshift Error Count or Runtime Bad Block", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Western Digital, Samsung or Seagate attribute: Either the number of downshifts of link speed (e.g. from 6Gbit/s to 3Gbit/s) or the total number of data blocks with detected, uncorrectable errors encountered during normal operation. Although degradation of this parameter can be an indicator of drive aging and/or potential electromechanical problems, it does not directly indicate imminent drive failure.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.09084549203210031, ErrorInterval: []float64{0.08344373475686712, 0.09872777224842152}, }, { Low: 1, High: 2, AnnualFailureRate: 0.05756065656498585, ErrorInterval: []float64{0.04657000847949464, 0.07036491775108872}, }, { Low: 2, High: 4, AnnualFailureRate: 0.6193088626208925, ErrorInterval: []float64{0.41784508895529787, 0.8841019099092139}, }, { Low: 4, High: 8, AnnualFailureRate: 0.5533447034299792, ErrorInterval: []float64{0.31628430884775033, 0.8985971312402635}, }, { Low: 8, High: 16, AnnualFailureRate: 0.3882388694727245, ErrorInterval: []float64{0.21225380267814295, 0.6513988534774338}, }, { Low: 16, High: 35, AnnualFailureRate: 0.37116708385481856, ErrorInterval: []float64{0.19763084005134446, 0.6347070173754686}, }, { Low: 35, High: 70, AnnualFailureRate: 0.2561146752205292, ErrorInterval: []float64{0.10297138269895259, 0.5276941165819332}, }, { Low: 70, High: 130, AnnualFailureRate: 0.40299684542586756, ErrorInterval: []float64{0.16202563309223209, 0.8303275247667772}, }, { Low: 130, High: 260, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, }, }, 184: { ID: 184, DisplayName: "End-to-End error", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "This attribute is a part of Hewlett-Packard\"s SMART IV technology, as well as part of other vendors\" IO Error Detection and Correction schemas, and it contains a count of parity errors which occur in the data path to the media via the drive\"s cache RAM", ObservedThresholds: []ObservedThreshold{ { Low: 93, High: 94, AnnualFailureRate: 1.631212012870933, ErrorInterval: []float64{1.055634407303844, 2.407990716767714}, }, { Low: 94, High: 95, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 95, High: 96, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 96, High: 97, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 97, High: 97, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 97, High: 98, AnnualFailureRate: 1.8069306930693072, ErrorInterval: []float64{0.04574752432804858, 10.067573453924245}, }, { Low: 98, High: 99, AnnualFailureRate: 0.8371559633027523, ErrorInterval: []float64{0.10138347095016888, 3.0240951820174824}, }, { Low: 99, High: 100, AnnualFailureRate: 0.09334816849865138, ErrorInterval: []float64{0.08689499010435861, 0.10015372448181788}, }, }, }, 185: { ID: 185, DisplayName: "Head Stability", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Western Digital attribute.", }, 186: { ID: 186, DisplayName: "Induced Op-Vibration Detection", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Western Digital attribute.", }, 187: { ID: 187, DisplayName: "Reported Uncorrectable Errors", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "The count of errors that could not be recovered using hardware ECC (see attribute 195).", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.028130798308190524, ErrorInterval: []float64{0.024487830609364304, 0.032162944988161336}, }, { Low: 1, High: 1, AnnualFailureRate: 0.33877621175661743, ErrorInterval: []float64{0.22325565823630591, 0.4929016016666955}, }, { Low: 1, High: 3, AnnualFailureRate: 0.24064820598237213, ErrorInterval: []float64{0.14488594021076606, 0.3758019832614595}, }, { Low: 3, High: 6, AnnualFailureRate: 0.5014425058387142, ErrorInterval: []float64{0.3062941096766342, 0.7744372808405151}, }, { Low: 6, High: 11, AnnualFailureRate: 0.38007108544136836, ErrorInterval: []float64{0.2989500188963677, 0.4764223967570595}, }, { Low: 11, High: 20, AnnualFailureRate: 0.5346094598348444, ErrorInterval: []float64{0.40595137663302483, 0.6911066985735377}, }, { Low: 20, High: 35, AnnualFailureRate: 0.8428063943161636, ErrorInterval: []float64{0.6504601819243522, 1.0742259350903411}, }, { Low: 35, High: 65, AnnualFailureRate: 1.4429071005017484, ErrorInterval: []float64{1.1405581860945952, 1.8008133631629157}, }, { Low: 65, High: 120, AnnualFailureRate: 1.6190935390549661, ErrorInterval: []float64{1.0263664163011208, 2.4294352761068576}, }, }, }, 188: { ID: 188, DisplayName: "Command Timeout", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "The count of aborted operations due to HDD timeout. Normally this attribute value should be equal to zero.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.024893587674442153, ErrorInterval: []float64{0.020857343769186413, 0.0294830350167543}, }, { Low: 0, High: 13, AnnualFailureRate: 0.10044174089362015, ErrorInterval: []float64{0.0812633664077498, 0.1227848196758574}, }, { Low: 13, High: 26, AnnualFailureRate: 0.334030592234279, ErrorInterval: []float64{0.2523231196342665, 0.4337665082489293}, }, { Low: 26, High: 39, AnnualFailureRate: 0.36724705400842445, ErrorInterval: []float64{0.30398009356575617, 0.4397986538328568}, }, { Low: 39, High: 52, AnnualFailureRate: 0.29848155926978354, ErrorInterval: []float64{0.2509254838615984, 0.35242890006477073}, }, { Low: 52, High: 65, AnnualFailureRate: 0.2203079701535098, ErrorInterval: []float64{0.18366082845676174, 0.26212468677179274}, }, { Low: 65, High: 78, AnnualFailureRate: 0.3018169948863018, ErrorInterval: []float64{0.23779746376787655, 0.37776897542831006}, }, { Low: 78, High: 91, AnnualFailureRate: 0.32854928239235887, ErrorInterval: []float64{0.2301118782147336, 0.4548506948185028}, }, { Low: 91, High: 104, AnnualFailureRate: 0.28488916640649387, ErrorInterval: []float64{0.1366154288236293, 0.5239213202729072}, }, }, }, 189: { ID: 189, DisplayName: "High Fly Writes", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "HDD manufacturers implement a flying height sensor that attempts to provide additional protections for write operations by detecting when a recording head is flying outside its normal operating range. If an unsafe fly height condition is encountered, the write process is stopped, and the information is rewritten or reallocated to a safe region of the hard drive. This attribute indicates the count of these errors detected over the lifetime of the drive.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.09070551401946862, ErrorInterval: []float64{0.08018892683853401, 0.10221801211956287}, }, { Low: 1, High: 2, AnnualFailureRate: 0.0844336097370013, ErrorInterval: []float64{0.07299813695315267, 0.09715235540340669}, }, { Low: 2, High: 5, AnnualFailureRate: 0.07943219628781906, ErrorInterval: []float64{0.06552176680630226, 0.09542233189887633}, }, { Low: 5, High: 13, AnnualFailureRate: 0.09208847603893404, ErrorInterval: []float64{0.07385765060838133, 0.11345557807163456}, }, { Low: 13, High: 30, AnnualFailureRate: 0.18161161650924224, ErrorInterval: []float64{0.13858879602902988, 0.23377015012749933}, }, { Low: 30, High: 70, AnnualFailureRate: 0.2678117886102384, ErrorInterval: []float64{0.19044036194841887, 0.36610753129699186}, }, { Low: 70, High: 150, AnnualFailureRate: 0.26126480798826107, ErrorInterval: []float64{0.15958733218826962, 0.4035023060905559}, }, { Low: 150, High: 350, AnnualFailureRate: 0.11337164155924832, ErrorInterval: []float64{0.030889956621649995, 0.2902764300762812}, }, }, }, 190: { ID: 190, DisplayName: "Temperature Difference", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Value is equal to (100-temp. °C), allowing manufacturer to set a minimum threshold which corresponds to a maximum temperature. This also follows the convention of 100 being a best-case value and lower values being undesirable. However, some older drives may instead report raw Temperature (identical to 0xC2) or Temperature minus 50 here.", }, 191: { ID: 191, DisplayName: "G-sense Error Rate", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "The count of errors resulting from externally induced shock and vibration. ", }, 192: { ID: 192, DisplayName: "Power-off Retract Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Number of power-off or emergency retract cycles.", ObservedThresholds: []ObservedThreshold{ { Low: 1, High: 2, AnnualFailureRate: 0.02861098445412803, ErrorInterval: []float64{0.022345416230915037, 0.036088863823297186}, }, { Low: 2, High: 6, AnnualFailureRate: 0.0738571777154862, ErrorInterval: []float64{0.06406927746420421, 0.0847175264009771}, }, { Low: 6, High: 16, AnnualFailureRate: 0.11970378206823593, ErrorInterval: []float64{0.10830059875098269, 0.13198105985656441}, }, { Low: 16, High: 40, AnnualFailureRate: 0.027266868552620425, ErrorInterval: []float64{0.021131448605713823, 0.03462795920968522}, }, { Low: 40, High: 100, AnnualFailureRate: 0.011741682974559688, ErrorInterval: []float64{0.00430899071133239, 0.025556700631152028}, }, { Low: 100, High: 250, AnnualFailureRate: 0.012659940134091309, ErrorInterval: []float64{0.00607093338127348, 0.023282080653656938}, }, { Low: 250, High: 650, AnnualFailureRate: 0.01634692899031039, ErrorInterval: []float64{0.009522688540043157, 0.026173016865409605}, }, { Low: 650, High: 1600, AnnualFailureRate: 0.005190074354440066, ErrorInterval: []float64{0.0025908664180103293, 0.009286476666453648}, }, }, }, 193: { ID: 193, DisplayName: "Load Cycle Count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Count of load/unload cycles into head landing zone position.[45] Some drives use 225 (0xE1) for Load Cycle Count instead.", }, 194: { ID: 194, DisplayName: "Temperature", DisplayType: AtaSmartAttributeDisplayTypeTransformed, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Indicates the device temperature, if the appropriate sensor is fitted. Lowest byte of the raw value contains the exact temperature value (Celsius degrees).", Transform: func(normValue int, rawValue int64, rawString string) int64 { return rawValue & 0b11111111 }, TransformValueUnit: "°C", }, 195: { ID: 195, DisplayName: "Hardware ECC Recovered", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "(Vendor-specific raw value.) The raw value has different structure for different vendors and is often not meaningful as a decimal number.", ObservedThresholds: []ObservedThreshold{ { Low: 12, High: 24, AnnualFailureRate: 0.31472916829975706, ErrorInterval: []float64{0.15711166685282174, 0.5631374192486645}, }, { Low: 24, High: 36, AnnualFailureRate: 0.15250310197260136, ErrorInterval: []float64{0.10497611828070175, 0.21417105521823687}, }, { Low: 36, High: 48, AnnualFailureRate: 0.2193119102723874, ErrorInterval: []float64{0.16475385681835103, 0.28615447006525274}, }, { Low: 48, High: 60, AnnualFailureRate: 0.05672658497265746, ErrorInterval: []float64{0.043182904776447234, 0.07317316161437043}, }, { Low: 60, High: 72, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 72, High: 84, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 84, High: 96, AnnualFailureRate: 0, ErrorInterval: []float64{0, 0}, }, { Low: 96, High: 108, AnnualFailureRate: 0.04074570216566197, ErrorInterval: []float64{0.001031591863615295, 0.22702052218047528}, }, }, }, 196: { ID: 196, DisplayName: "Reallocation Event Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "Count of remap operations. The raw value of this attribute shows the total count of attempts to transfer data from reallocated sectors to a spare area. Both successful and unsuccessful attempts are counted.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.007389855800729792, ErrorInterval: []float64{0.005652654139732716, 0.009492578928212054}, }, { Low: 1, High: 1, AnnualFailureRate: 0.026558331312151347, ErrorInterval: []float64{0.005476966404484466, 0.07761471429677293}, }, { Low: 1, High: 2, AnnualFailureRate: 0.02471894893674658, ErrorInterval: []float64{0.0006258296027540169, 0.13772516847438018}, }, { Low: 2, High: 4, AnnualFailureRate: 0.03200912040691046, ErrorInterval: []float64{0.0008104007642081744, 0.17834340416493005}, }, { Low: 4, High: 7, AnnualFailureRate: 0.043078012510326925, ErrorInterval: []float64{0.001090640849081295, 0.24001532369794615}, }, { Low: 7, High: 11, AnnualFailureRate: 0.033843300880853036, ErrorInterval: []float64{0.0008568381932559863, 0.18856280368036135}, }, { Low: 11, High: 17, AnnualFailureRate: 0.16979376647542252, ErrorInterval: []float64{0.035015556653263225, 0.49620943874336304}, }, { Low: 17, High: 27, AnnualFailureRate: 0.059042381106438044, ErrorInterval: []float64{0.0014948236677880642, 0.32896309247698113}, }, { Low: 27, High: 45, AnnualFailureRate: 0.24701105346266636, ErrorInterval: []float64{0.050939617608142244, 0.721871118983972}, }, }, }, 197: { ID: 197, DisplayName: "Current Pending Sector Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "Count of \"unstable\" sectors (waiting to be remapped, because of unrecoverable read errors). If an unstable sector is subsequently read successfully, the sector is remapped and this value is decreased. Read errors on a sector will not remap the sector immediately (since the correct value cannot be read and so the value to remap is not known, and also it might become readable later); instead, the drive firmware remembers that the sector needs to be remapped, and will remap it the next time it\"s written.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.025540791394761345, ErrorInterval: []float64{0.023161777231213983, 0.02809784482748174}, }, { Low: 1, High: 2, AnnualFailureRate: 0.34196613799103254, ErrorInterval: []float64{0.22723401523750225, 0.4942362818474496}, }, { Low: 2, High: 6, AnnualFailureRate: 0.6823772508117681, ErrorInterval: []float64{0.41083568090070416, 1.0656166047061635}, }, { Low: 6, High: 16, AnnualFailureRate: 0.6108100007493069, ErrorInterval: []float64{0.47336936083368364, 0.7757071095273286}, }, { Low: 16, High: 40, AnnualFailureRate: 0.9564879341127684, ErrorInterval: []float64{0.7701044196378299, 1.174355230793638}, }, { Low: 40, High: 100, AnnualFailureRate: 1.6519989942167461, ErrorInterval: []float64{1.328402276482456, 2.0305872327541317}, }, { Low: 100, High: 250, AnnualFailureRate: 2.5137741046831956, ErrorInterval: []float64{1.9772427971560862, 3.1510376077891613}, }, { Low: 250, High: 650, AnnualFailureRate: 3.3203378817413904, ErrorInterval: []float64{2.5883662702274406, 4.195047163573006}, }, { Low: 650, High: 1600, AnnualFailureRate: 3.133047210300429, ErrorInterval: []float64{1.1497731080460096, 6.819324775707182}, }, }, }, 198: { ID: 198, DisplayName: "(Offline) Uncorrectable Sector Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "The total count of uncorrectable errors when reading/writing a sector. A rise in the value of this attribute indicates defects of the disk surface and/or problems in the mechanical subsystem.", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 0, AnnualFailureRate: 0.028675322159886437, ErrorInterval: []float64{0.026159385510707116, 0.03136793218577656}, }, { Low: 0, High: 2, AnnualFailureRate: 0.8135764944275583, ErrorInterval: []float64{0.40613445471964466, 1.4557130815309443}, }, { Low: 2, High: 4, AnnualFailureRate: 1.1173469387755102, ErrorInterval: []float64{0.5773494680315332, 1.9517802404552516}, }, { Low: 4, High: 6, AnnualFailureRate: 1.3558692421991083, ErrorInterval: []float64{0.4402470522980859, 3.1641465148237544}, }, { Low: 6, High: 8, AnnualFailureRate: 0.7324414715719062, ErrorInterval: []float64{0.15104704003805655, 2.140504796291604}, }, { Low: 8, High: 10, AnnualFailureRate: 0.5777213677766163, ErrorInterval: []float64{0.43275294849366835, 0.7556737733062419}, }, { Low: 10, High: 12, AnnualFailureRate: 1.7464114832535886, ErrorInterval: []float64{0.47583835092536914, 4.471507017371231}, }, { Low: 12, High: 14, AnnualFailureRate: 2.6449275362318843, ErrorInterval: []float64{0.3203129951758959, 9.554387676519005}, }, { Low: 14, High: 16, AnnualFailureRate: 0.796943231441048, ErrorInterval: []float64{0.5519063550198366, 1.113648286331181}, }, }, }, 199: { ID: 199, DisplayName: "UltraDMA CRC Error Count", DisplayType: AtaSmartAttributeDisplayTypeRaw, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "The count of errors in data transfer via the interface cable as determined by ICRC (Interface Cyclic Redundancy Check).", ObservedThresholds: []ObservedThreshold{ { Low: 0, High: 1, AnnualFailureRate: 0.04068379316116366, ErrorInterval: []float64{0.037534031558106425, 0.04402730201866553}, }, { Low: 1, High: 2, AnnualFailureRate: 0.1513481259734218, ErrorInterval: []float64{0.12037165605991791, 0.18786293065527596}, }, { Low: 2, High: 4, AnnualFailureRate: 0.16849758722418978, ErrorInterval: []float64{0.12976367397863445, 0.2151676572000481}, }, { Low: 4, High: 8, AnnualFailureRate: 0.15385127340491614, ErrorInterval: []float64{0.10887431782430312, 0.21117289306426648}, }, { Low: 8, High: 16, AnnualFailureRate: 0.14882894050104387, ErrorInterval: []float64{0.09631424312463635, 0.2197008753522735}, }, { Low: 16, High: 35, AnnualFailureRate: 0.20878219917249793, ErrorInterval: []float64{0.14086447304552446, 0.29804957135975}, }, { Low: 35, High: 70, AnnualFailureRate: 0.13742940270409038, ErrorInterval: []float64{0.06860426267470295, 0.24589916335290812}, }, { Low: 70, High: 130, AnnualFailureRate: 0.22336578581363, ErrorInterval: []float64{0.11150339549604707, 0.39966309081252904}, }, { Low: 130, High: 260, AnnualFailureRate: 0.18277416124186283, ErrorInterval: []float64{0.07890890989692058, 0.3601379610272007}, }, }, }, 200: { ID: 200, DisplayName: "Multi-Zone Error Rate", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "The count of errors found when writing a sector. The higher the value, the worse the disk\"s mechanical condition is.", }, 201: { ID: 201, DisplayName: "Soft Read Error Rate", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: true, Description: "Count indicates the number of uncorrectable software read errors.", }, 202: { ID: 202, DisplayName: "Data Address Mark errors", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Count of Data Address Mark errors (or vendor-specific).", }, 203: { ID: 203, DisplayName: "Run Out Cancel", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "The number of errors caused by incorrect checksum during the error correction.", }, 204: { ID: 204, DisplayName: "Soft ECC Correction", Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Count of errors corrected by the internal error correction software.", }, 205: { ID: 205, DisplayName: "Thermal Asperity Rate", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Count of errors due to high temperature.", }, 206: { ID: 206, DisplayName: "Flying Height", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Height of heads above the disk surface. If too low, head crash is more likely; if too high, read/write errors are more likely.", }, 207: { ID: 207, DisplayName: "Spin High Current", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Amount of surge current used to spin up the drive.", }, 208: { ID: 208, DisplayName: "Spin Buzz", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Count of buzz routines needed to spin up the drive due to insufficient power.", }, 209: { ID: 209, DisplayName: "Offline Seek Performance", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Drive\"s seek performance during its internal tests.", }, 210: { ID: 210, DisplayName: "Vibration During Write", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Found in Maxtor 6B200M0 200GB and Maxtor 2R015H1 15GB disks.", }, 211: { ID: 211, DisplayName: "Vibration During Write", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "A recording of a vibration encountered during write operations.", }, 212: { ID: 212, DisplayName: "Shock During Write", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "A recording of shock encountered during write operations.", }, 220: { ID: 220, DisplayName: "Disk Shift", Ideal: ObservedThresholdIdealLow, DisplayType: AtaSmartAttributeDisplayTypeNormalized, Critical: false, Description: "Distance the disk has shifted relative to the spindle (usually due to shock or temperature). Unit of measure is unknown.", }, 221: { ID: 221, DisplayName: "G-Sense Error Rate", Ideal: ObservedThresholdIdealLow, DisplayType: AtaSmartAttributeDisplayTypeNormalized, Critical: false, Description: "The count of errors resulting from externally induced shock and vibration.", }, 222: { ID: 222, DisplayName: "Loaded Hours", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Time spent operating under data load (movement of magnetic head armature).", }, 223: { ID: 223, DisplayName: "Load/Unload Retry Count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Count of times head changes position.", }, 224: { ID: 224, DisplayName: "Load Friction", Ideal: ObservedThresholdIdealLow, DisplayType: AtaSmartAttributeDisplayTypeNormalized, Critical: false, Description: "Resistance caused by friction in mechanical parts while operating.", }, 225: { ID: 225, DisplayName: "Load/Unload Cycle Count", Ideal: ObservedThresholdIdealLow, DisplayType: AtaSmartAttributeDisplayTypeNormalized, Critical: false, Description: "Total count of load cycles Some drives use 193 (0xC1) for Load Cycle Count instead. See Description for 193 for significance of this number. ", }, 226: { ID: 226, DisplayName: "Load \"In\"-time", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Total time of loading on the magnetic heads actuator (time not spent in parking area).", }, 227: { ID: 227, DisplayName: "Torque Amplification Count", Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Count of attempts to compensate for platter speed variations.[66]", }, 228: { ID: 228, DisplayName: "Power-Off Retract Cycle", Ideal: ObservedThresholdIdealLow, Critical: false, Description: "The number of power-off cycles which are counted whenever there is a \"retract event\" and the heads are loaded off of the media such as when the machine is powered down, put to sleep, or is idle.", }, 230: { ID: 230, DisplayName: "GMR Head Amplitude ", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Amplitude of \"thrashing\" (repetitive head moving motions between operations).", }, 231: { ID: 231, DisplayName: "Life Left", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Indicates the approximate SSD life left, in terms of program/erase cycles or available reserved blocks. A normalized value of 100 represents a new drive, with a threshold value at 10 indicating a need for replacement. A value of 0 may mean that the drive is operating in read-only mode to allow data recovery.", }, 232: { ID: 232, DisplayName: "Endurance Remaining", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Number of physical erase cycles completed on the SSD as a percentage of the maximum physical erase cycles the drive is designed to endure.", }, 233: { ID: 233, DisplayName: "Media Wearout Indicator", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Intel SSDs report a normalized value from 100, a new drive, to a minimum of 1. It decreases while the NAND erase cycles increase from 0 to the maximum-rated cycles.", }, 234: { ID: 234, DisplayName: "Average erase count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Decoded as: byte 0-1-2 = average erase count (big endian) and byte 3-4-5 = max erase count (big endian).", }, 235: { ID: 235, DisplayName: "Good Block Count", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Decoded as: byte 0-1-2 = good block count (big endian) and byte 3-4 = system (free) block count.", }, 240: { ID: 240, DisplayName: "Head Flying Hours", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Time spent during the positioning of the drive heads.[15][71] Some Fujitsu drives report the count of link resets during a data transfer.", }, 241: { ID: 241, DisplayName: "Total LBAs Written", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Total count of LBAs written.", }, 242: { ID: 242, DisplayName: "Total LBAs Read", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Total count of LBAs read.Some S.M.A.R.T. utilities will report a negative number for the raw value since in reality it has 48 bits rather than 32.", }, 243: { ID: 243, DisplayName: "Total LBAs Written Expanded", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "The upper 5 bytes of the 12-byte total number of LBAs written to the device. The lower 7 byte value is located at attribute 0xF1.", }, 244: { ID: 244, DisplayName: "Total LBAs Read Expanded", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "The upper 5 bytes of the 12-byte total number of LBAs read from the device. The lower 7 byte value is located at attribute 0xF2.", }, 249: { ID: 249, DisplayName: "NAND Writes (1GiB)", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "Total NAND Writes. Raw value reports the number of writes to NAND in 1 GB increments.", }, 250: { ID: 250, DisplayName: "Read Error Retry Rate", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Count of errors while reading from a disk.", }, 251: { ID: 251, DisplayName: "Minimum Spares Remaining", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "The Minimum Spares Remaining attribute indicates the number of remaining spare blocks as a percentage of the total number of spare blocks available.", }, 252: { ID: 252, DisplayName: "Newly Added Bad Flash Block", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: "", Critical: false, Description: "The Newly Added Bad Flash Block attribute indicates the total number of bad flash blocks the drive detected since it was first initialized in manufacturing.", }, 254: { ID: 254, DisplayName: "Free Fall Protection", DisplayType: AtaSmartAttributeDisplayTypeNormalized, Ideal: ObservedThresholdIdealLow, Critical: false, Description: "Count of \"Free Fall Events\" detected.", }, }
webapp/backend/pkg/metadata/ata_attribute_metadata.go
0.71602
0.426142
ata_attribute_metadata.go
starcoder
package exchange import "github.com/mattkanwisher/cryptofiend/currency/pair" // ILimits provides information about the limits placed by an exchange on numbers representing // order/trade price and amount. type ILimits interface { // Returns max number of decimal places allowed in the trade price for the given currency pair, // -1 should be used to indicate this value isn't defined. GetPriceDecimalPlaces(p pair.CurrencyPair) int32 // Returns max number of decimal places allowed in the trade amount for the given currency pair, // -1 should be used to indicate this value isn't defined. GetAmountDecimalPlaces(p pair.CurrencyPair) int32 // Returns the minimum trade amount for the given currency pair. GetMinAmount(p pair.CurrencyPair) float64 // Returns the minimum trade total (the amount multiplied by the price, denominated in the // price currency). GetMinTotal(p pair.CurrencyPair) float64 } // DefaultExchangeLimits provides reasonable defaults for exchanges that don't bother specifying // this kind of information in their API docs. type DefaultExchangeLimits struct{} // GetPriceDecimalPlaces Returns max number of decimal places allowed in the trade price for // the given currency pair, -1 should be used to indicate this value isn't defined. func (l *DefaultExchangeLimits) GetPriceDecimalPlaces(p pair.CurrencyPair) int32 { return 8 } // GetAmountDecimalPlaces returns max number of decimal places allowed in the trade amount for // the given currency pair, -1 should be used to indicate this value isn't defined. func (l *DefaultExchangeLimits) GetAmountDecimalPlaces(p pair.CurrencyPair) int32 { return 8 } // GetMinAmount returns the minimum trade amount for the given currency pair. func (l *DefaultExchangeLimits) GetMinAmount(p pair.CurrencyPair) float64 { return 0.00000001 } // GetMinTotal returns the minimum trade total (amount * price) for the given currency pair. func (l *DefaultExchangeLimits) GetMinTotal(p pair.CurrencyPair) float64 { return 0 }
exchanges/exchange_limits.go
0.840423
0.408336
exchange_limits.go
starcoder
package pid import ( "math" ) type Pid struct { integral float64 last float64 max_output float64 kp float64 ki float64 kd float64 gain float64 Scale_kp float64 Scale_ki float64 Scale_kd float64 Scale_gain float64 } // the computation factors are predefind after running some PID tuning and use to create a PID object // During running the parameters can be trimmed or adjusted using the scaling factors // which are by default set to 1 // gain is used be used to adjust the overall output to within the range used by the actuator. // Scale_gain can be used to trim the runtime sensitivity func MakePid(kp, ki, kd, gain, max_output float64) *Pid { p := Pid{ integral: 0, last: 0, max_output: max_output, kp: kp, ki: ki, kd: kd, gain: gain, Scale_kp: 1.0, Scale_ki: 1.0, Scale_kd: 1.0, Scale_gain: 1.0, } return &p } func (p *Pid) Reset() { p.integral = 0 } // Compute the Actuating Signal from the error term // The error term is supplied externally and is typically the command signal (or Set Point ) // minus the Process Variable (feed back value). // This is done by the application since Sp-Pv but in some cases may need conditioning for example // compass substrations should be based +/- 180 after substraction. // The result is the actuator value. // To assist with scaling the paramenters scaling variables may be set. This makes it easier to use // user friendly values for settings // -pv is used instead of sp-pv as avoids spiking if set point changed // The assumption is a constant calculation rate func (p *Pid) Compute(sp_pv, pv float64) float64 { proportional := sp_pv * p.kp * p.Scale_kp i_in := sp_pv * p.ki * p.Scale_ki d_inc := -pv * p.kd * p.Scale_kd p.integral += i_in differential := d_inc - p.last p.last = d_inc as := (p.integral + proportional + differential) * p.gain * p.Scale_gain // Integral latch up protection // Prevent further integration if max output is achieved and addition to integral is same sign if math.Abs(as) > p.max_output{ if same_sign(i_in, as){ p.integral -= i_in } } return as } func same_sign(x, y float64) bool { if x >= 0.0 && y >= 0.0 { return true } else if x < 0.0 && y <0.0 { return true } return false }
pid/pid.go
0.786787
0.484136
pid.go
starcoder
package hitables import ( "math" "github.com/go-gl/mathgl/mgl64" "github.com/markzuber/zgotrace/raytrace" "github.com/markzuber/zgotrace/raytrace/vectorextensions" ) type RotateY struct { hitable raytrace.Hitable angle float64 sintheta float64 costheta float64 boundingbox *raytrace.AABB } func NewRotateY(hitable raytrace.Hitable, angle float64) raytrace.Hitable { radians := math.Pi / 180.0 * angle sinTheta := math.Sin(radians) cosTheta := math.Cos(radians) box := hitable.GetBoundingBox(0.0, 1.0) min := []float64{math.MaxFloat64, math.MaxFloat64, math.MaxFloat64} max := []float64{-math.MaxFloat64, -math.MaxFloat64, -math.MaxFloat64} for i := 0; i < 2; i++ { dubi := float64(i) for j := 0; j < 2; j++ { dubj := float64(j) for k := 0; k < 2; k++ { dubk := float64(k) x := (dubi * box.Max().X()) + ((1.0 - dubi) * box.Min().X()) y := (dubj * box.Max().Y()) + ((1.0 - dubj) * box.Min().Y()) z := (dubk * box.Max().Z()) + ((1.0 - dubk) * box.Min().Z()) newx := (cosTheta * x) + (sinTheta * z) newz := (-sinTheta * x) + (cosTheta * z) tester := []float64{newx, y, newz} for c := 0; c < 3; c++ { if tester[c] > max[c] { max[c] = tester[c] } if tester[c] < min[c] { min[c] = tester[c] } } } } } boundingBox := raytrace.NewAABB(mgl64.Vec3{min[0], min[1], min[2]}, mgl64.Vec3{max[0], max[1], max[2]}) r := &RotateY{hitable, angle, sinTheta, cosTheta, boundingBox} return r } func (s *RotateY) Hit(ray *raytrace.Ray, tMin float64, tMax float64) *raytrace.HitRecord { origin := []float64{ray.Origin().X(), ray.Origin().Y(), ray.Origin().Z()} dir := []float64{ray.Direction().X(), ray.Direction().Y(), ray.Direction().Z()} origin[0] = (s.costheta * ray.Origin().X()) - (s.sintheta * ray.Origin().Z()) origin[2] = (s.sintheta * ray.Origin().X()) + (s.costheta * ray.Origin().Z()) dir[0] = (s.costheta * ray.Direction().X()) - (s.sintheta * ray.Direction().Z()) dir[2] = (s.sintheta * ray.Direction().X()) + (s.costheta * ray.Direction().Z()) rotatedRay := raytrace.NewRay(mgl64.Vec3{origin[0], origin[1], origin[2]}, mgl64.Vec3{dir[0], dir[1], dir[2]}) hitRecord := s.hitable.Hit(rotatedRay, tMin, tMax) if hitRecord == nil { return nil } p := []float64{hitRecord.P().X(), hitRecord.P().Y(), hitRecord.P().Z()} normal := []float64{hitRecord.Normal().X(), hitRecord.Normal().Y(), hitRecord.Normal().Z()} p[0] = (s.costheta * hitRecord.P().X()) + (s.sintheta * hitRecord.P().Z()) p[2] = (-s.sintheta * hitRecord.P().X()) + (s.costheta * hitRecord.P().Z()) normal[0] = (s.costheta * hitRecord.Normal().X()) + (s.sintheta * hitRecord.Normal().Z()) normal[2] = (-s.sintheta * hitRecord.Normal().X()) + (s.costheta * hitRecord.Normal().Z()) return raytrace.NewHitRecord(hitRecord.T(), mgl64.Vec3{p[0], p[1], p[2]}, mgl64.Vec3{normal[0], normal[1], normal[2]}, hitRecord.UvCoords(), hitRecord.Material()) } func (s *RotateY) GetBoundingBox(t0 float64, t1 float64) *raytrace.AABB { return s.boundingbox } func (s *RotateY) GetPdfValue(origin mgl64.Vec3, v mgl64.Vec3) float64 { return 1.0 } func (s *RotateY) Random(origin mgl64.Vec3) mgl64.Vec3 { return vectorextensions.UnitX() }
raytrace/hitables/rotatey.go
0.774839
0.41478
rotatey.go
starcoder
package movie const ( // Label holds the string label denoting the movie type in the database. Label = "movie" // FieldID holds the string denoting the id field in the database. FieldID = "id" // FieldTitle holds the string denoting the title field in the database. FieldTitle = "title" // FieldOriginalTitle holds the string denoting the original_title field in the database. FieldOriginalTitle = "original_title" // FieldLanguages holds the string denoting the languages field in the database. FieldLanguages = "languages" // FieldReleaseDate holds the string denoting the release_date field in the database. FieldReleaseDate = "release_date" // FieldPlot holds the string denoting the plot field in the database. FieldPlot = "plot" // FieldDuration holds the string denoting the duration field in the database. FieldDuration = "duration" // FieldWatched holds the string denoting the watched field in the database. FieldWatched = "watched" // EdgeRatings holds the string denoting the ratings edge name in mutations. EdgeRatings = "ratings" // EdgePoster holds the string denoting the poster edge name in mutations. EdgePoster = "poster" // EdgeFanart holds the string denoting the fanart edge name in mutations. EdgeFanart = "fanart" // EdgeCast holds the string denoting the cast edge name in mutations. EdgeCast = "cast" // EdgeDirectors holds the string denoting the directors edge name in mutations. EdgeDirectors = "directors" // EdgeWriters holds the string denoting the writers edge name in mutations. EdgeWriters = "writers" // EdgeGenres holds the string denoting the genres edge name in mutations. EdgeGenres = "genres" // EdgeCountries holds the string denoting the countries edge name in mutations. EdgeCountries = "countries" // Table holds the table name of the movie in the database. Table = "movies" // RatingsTable is the table that holds the ratings relation/edge. RatingsTable = "ratings" // RatingsInverseTable is the table name for the Rating entity. // It exists in this package in order to avoid circular dependency with the "rating" package. RatingsInverseTable = "ratings" // RatingsColumn is the table column denoting the ratings relation/edge. RatingsColumn = "movie_ratings" // PosterTable is the table that holds the poster relation/edge. PosterTable = "movies" // PosterInverseTable is the table name for the Picture entity. // It exists in this package in order to avoid circular dependency with the "picture" package. PosterInverseTable = "pictures" // PosterColumn is the table column denoting the poster relation/edge. PosterColumn = "movie_poster" // FanartTable is the table that holds the fanart relation/edge. FanartTable = "pictures" // FanartInverseTable is the table name for the Picture entity. // It exists in this package in order to avoid circular dependency with the "picture" package. FanartInverseTable = "pictures" // FanartColumn is the table column denoting the fanart relation/edge. FanartColumn = "movie_fanart" // CastTable is the table that holds the cast relation/edge. The primary key declared below. CastTable = "movie_cast" // CastInverseTable is the table name for the Artist entity. // It exists in this package in order to avoid circular dependency with the "artist" package. CastInverseTable = "artists" // DirectorsTable is the table that holds the directors relation/edge. The primary key declared below. DirectorsTable = "movie_directors" // DirectorsInverseTable is the table name for the Artist entity. // It exists in this package in order to avoid circular dependency with the "artist" package. DirectorsInverseTable = "artists" // WritersTable is the table that holds the writers relation/edge. The primary key declared below. WritersTable = "movie_writers" // WritersInverseTable is the table name for the Artist entity. // It exists in this package in order to avoid circular dependency with the "artist" package. WritersInverseTable = "artists" // GenresTable is the table that holds the genres relation/edge. The primary key declared below. GenresTable = "movie_genre_movies" // GenresInverseTable is the table name for the MovieGenre entity. // It exists in this package in order to avoid circular dependency with the "moviegenre" package. GenresInverseTable = "movie_genres" // CountriesTable is the table that holds the countries relation/edge. The primary key declared below. CountriesTable = "country_movies" // CountriesInverseTable is the table name for the Country entity. // It exists in this package in order to avoid circular dependency with the "country" package. CountriesInverseTable = "countries" ) // Columns holds all SQL columns for movie fields. var Columns = []string{ FieldID, FieldTitle, FieldOriginalTitle, FieldLanguages, FieldReleaseDate, FieldPlot, FieldDuration, FieldWatched, } // ForeignKeys holds the SQL foreign-keys that are owned by the "movies" // table and are not defined as standalone fields in the schema. var ForeignKeys = []string{ "movie_poster", } var ( // CastPrimaryKey and CastColumn2 are the table columns denoting the // primary key for the cast relation (M2M). CastPrimaryKey = []string{"movie_id", "artist_id"} // DirectorsPrimaryKey and DirectorsColumn2 are the table columns denoting the // primary key for the directors relation (M2M). DirectorsPrimaryKey = []string{"movie_id", "artist_id"} // WritersPrimaryKey and WritersColumn2 are the table columns denoting the // primary key for the writers relation (M2M). WritersPrimaryKey = []string{"movie_id", "artist_id"} // GenresPrimaryKey and GenresColumn2 are the table columns denoting the // primary key for the genres relation (M2M). GenresPrimaryKey = []string{"movie_genre_id", "movie_id"} // CountriesPrimaryKey and CountriesColumn2 are the table columns denoting the // primary key for the countries relation (M2M). CountriesPrimaryKey = []string{"country_id", "movie_id"} ) // ValidColumn reports if the column name is valid (part of the table columns). func ValidColumn(column string) bool { for i := range Columns { if column == Columns[i] { return true } } for i := range ForeignKeys { if column == ForeignKeys[i] { return true } } return false } var ( // DefaultWatched holds the default value on creation for the "watched" field. DefaultWatched bool )
src/ent/movie/movie.go
0.500732
0.430088
movie.go
starcoder
package primitives import ( "github.com/alexandreLamarre/Golang-Ray-Tracing-Renderer/pkg/algebra" "github.com/alexandreLamarre/Golang-Ray-Tracing-Renderer/pkg/canvas" "math" ) //SmoothTriangle datastructure that handles calculations for SmoothTriangle Shape type SmoothTriangle struct { Parent Shape transform *algebra.Matrix material *canvas.Material p1 *algebra.Vector p2 *algebra.Vector p3 *algebra.Vector e1 *algebra.Vector e2 *algebra.Vector n1 *algebra.Vector n2 *algebra.Vector n3 *algebra.Vector } //NewSmoothTriangle Initializer for fully specificed Smooth Triangle Shape // n1, n2, n3 are the normals used for the vertices p1, p2, p3 func NewSmoothTriangle(p1, p2, p3, n1, n2, n3 *algebra.Vector) *SmoothTriangle { e1, err := p2.Subtract(p1) if err != nil { panic(err) } e2, err := p3.Subtract(p1) if err != nil { panic(err) } normal, err := algebra.CrossProduct(e2, e1) if err != nil { panic(err) } normal, err = normal.Normalize() if err != nil { panic(err) } return &SmoothTriangle{ p1: p1, p2: p2, p3: p3, e1: e1, e2: e2, material: canvas.NewDefaultMaterial(), transform: algebra.IdentityMatrix(4), Parent: nil, n1: n1, n2: n2, n3: n3, } } //NewDefaultSmoothTriangle Initializer for Smooth Triangle Shape and Infer Normals at vertices // based on cross products func NewDefaultSmoothTriangle(p1, p2, p3 *algebra.Vector) *SmoothTriangle { a := p1 b := p2 c := p3 ab, err := b.Subtract(a) if err != nil { panic(err) } ac, err := c.Subtract(a) if err != nil { panic(err) } abXac, err := algebra.CrossProduct(ab, ac) if err != nil { panic(err) } cross1, err := algebra.CrossProduct(abXac, ab) if err != nil { panic(err) } cross1 = cross1.MultScalar(ac.Magnitude2()) cross2, err := algebra.CrossProduct(ac, abXac) cross2 = cross2.MultScalar(ab.Magnitude2()) toCircumCenter, err := cross1.Add(cross2) if err != nil { panic(err) } toCircumCenter = toCircumCenter.DivideScalar(2 * abXac.Magnitude2()) circumcenter, err := a.Add(toCircumCenter) if err != nil { panic(err) } //next we calculate the vector normals of this sphere passing through each vertex n1, err := p1.Subtract(circumcenter) if err != nil { panic(err) } n1, err = n1.Normalize() if err != nil { panic(err) } n2, err := p2.Subtract(circumcenter) if err != nil { panic(err) } n2, err = n2.Normalize() if err != nil { panic(err) } n3, err := p3.Subtract(circumcenter) if err != nil { panic(err) } n3, err = n3.Normalize() if err != nil { panic(err) } return &SmoothTriangle{ p1: a, p2: b, p3: c, e1: ab, e2: ac, material: canvas.NewDefaultMaterial(), transform: algebra.IdentityMatrix(4), Parent: nil, n1: n1, n2: n2, n3: n3, } } //Shape Interface Methods //GetTransform Getter for transform of SmoothTriangle Shape, interface method func (t *SmoothTriangle) GetTransform() *algebra.Matrix { return t.transform } //GetMaterial Getter for material of SmoothTriangle Shape, interface method func (t *SmoothTriangle) GetMaterial() *canvas.Material { return t.material } //GetParent Getter for parent of SmoothTriangle Shape, interface method func (t *SmoothTriangle) GetParent() Shape { return t.Parent } //GetBounds Getter for bounds of SmoothTriangle Shape, interface method func (t *SmoothTriangle) GetBounds() (*algebra.Vector, *algebra.Vector) { var xMin = math.Inf(1) var yMin = math.Inf(1) var zMin = math.Inf(1) var xMax = math.Inf(-1) var yMax = math.Inf(-1) var zMax = math.Inf(-1) points := []*algebra.Vector{t.p1, t.p2, t.p3} for _, p := range points { xMin = math.Min(p.Get()[0], xMin) yMin = math.Min(p.Get()[1], yMin) zMin = math.Min(p.Get()[2], zMin) xMax = math.Max(p.Get()[0], xMax) yMax = math.Max(p.Get()[1], yMax) zMax = math.Max(p.Get()[2], zMax) } return algebra.NewPoint(xMin, yMin, zMin), algebra.NewPoint(xMax, yMax, zMax) } //SetTransform Setter for SmoothTriangle Shape transform, interface method func (t *SmoothTriangle) SetTransform(m *algebra.Matrix) { if len(m.Get()) != 4 || len(m.Get()[0]) != 4 { panic(algebra.ExpectedDimension(4)) } t.transform = m } //SetMaterial Setter for SmoothTriangle Shape material, interface method func (t *SmoothTriangle) SetMaterial(m *canvas.Material) { t.material = m } //SetParent Setter for SmoothTriangle Shape parent, interface method func (t *SmoothTriangle) SetParent(s Shape) { t.Parent = s } //LocalIntersect Intersect implementation for SmoothTriangle Shape, interface method func (t *SmoothTriangle) LocalIntersect(r *algebra.Ray) ([]*Intersection, bool) { xs := make([]*Intersection, 0, 0) direction := r.Get()["direction"] dirCrossProduct, err := algebra.CrossProduct(direction, t.e2) if err != nil { panic(err) } det, err := algebra.DotProduct(t.e1, dirCrossProduct) if err != nil { panic(err) } if math.Abs(det) <= algebra.EPSILON { return xs, false } origin := r.Get()["origin"] f := 1.0 / det p1ToOrigin, err := origin.Subtract(t.p1) if err != nil { panic(err) } u, err := algebra.DotProduct(p1ToOrigin, dirCrossProduct) if err != nil { panic(err) } u *= f if u < 0 || u > 1 { return xs, false } originCross, err := algebra.CrossProduct(p1ToOrigin, t.e1) if err != nil { panic(err) } v, err := algebra.DotProduct(direction, originCross) v *= f if err != nil { panic(err) } if v < 0 || (u+v) > 1 { return xs, false } pos, err := algebra.DotProduct(t.e2, originCross) if err != nil { panic(err) } pos *= f i := NewIntersection(t, pos) i.SetUV(u, v) xs = append(xs, i) return xs, true } //LocalNormalAt normal interpolation method for SmoothTriangle Shape, interface method func (t *SmoothTriangle) LocalNormalAt(p *algebra.Vector, hit *Intersection) (*algebra.Vector, error) { temp, err := t.n2.MultScalar(hit.U).Add(t.n3.MultScalar(hit.V)) if err != nil { panic(err) } temp, err = temp.Add(t.n1.MultScalar(1 - hit.U - hit.V)) return temp, nil }
pkg/geometry/primitives/smoothTriangle.go
0.661704
0.670096
smoothTriangle.go
starcoder
package reactor import ( "aoc2021/pkg/io" "aoc2021/pkg/numbers" "strconv" "strings" ) type Cuboid struct { from, to numbers.Vector3 toggle bool } func (c *Cuboid) Volume() int { return (c.to.X - c.from.X + 1) * (c.to.Y - c.from.Y + 1) * (c.to.Z - c.from.Z + 1) } func (c *Cuboid) Intersect(other Cuboid) *Cuboid { if c.from.X > other.to.X || c.to.X < other.from.X || c.from.Y > other.to.Y || c.to.Y < other.from.Y || c.from.Z > other.to.Z || c.to.Z < other.from.Z { return nil } return &Cuboid{ from: numbers.Vector3{ X: numbers.Max(c.from.X, other.from.X), Y: numbers.Max(c.from.Y, other.from.Y), Z: numbers.Max(c.from.Z, other.from.Z), }, to: numbers.Vector3{ X: numbers.Min(c.to.X, other.to.X), Y: numbers.Min(c.to.Y, other.to.Y), Z: numbers.Min(c.to.Z, other.to.Z), }, } } type Reactor []Cuboid type Instruction = Cuboid type Instructions []Instruction func LoadInstructions(file string) Instructions { var is Instructions for _, line := range io.ReadLines(file) { var i Instruction var parts []string if line[1] == 'n' { i.toggle = true parts = strings.SplitN(line[3:], ",", 3) } else { parts = strings.SplitN(line[4:], ",", 3) } xparts := strings.SplitN(parts[0][2:], "..", 2) yparts := strings.SplitN(parts[1][2:], "..", 2) zparts := strings.SplitN(parts[2][2:], "..", 2) i.from.X, _ = strconv.Atoi(xparts[0]) i.to.X, _ = strconv.Atoi(xparts[1]) i.from.Y, _ = strconv.Atoi(yparts[0]) i.to.Y, _ = strconv.Atoi(yparts[1]) i.from.Z, _ = strconv.Atoi(zparts[0]) i.to.Z, _ = strconv.Atoi(zparts[1]) is = append(is, i) } return is } func (r *Reactor) Apply(instructions Instructions, init bool) { for _, instruction := range instructions { // Ignore non-initialization steps? if init && (instruction.from.X < -50 || instruction.to.X > 50 || instruction.from.Y < -50 || instruction.to.Y > 50 || instruction.from.Z < -50 || instruction.to.Z > 50) { continue } // Save current cuboid count to avoid // handling newly added ones rcount := len(*r) // If this is turning things on, add it if instruction.toggle { *r = append(*r, instruction) } // Look for overlaps with previous cuboids for i := 0; i < rcount; i++ { intersection := instruction.Intersect((*r)[i]) if intersection != nil { // Negate the instruction to account for the // overlap (e.g. avoid adding/removing twice) intersection.toggle = !(*r)[i].toggle *r = append(*r, *intersection) } } } } func (r *Reactor) GetCount() int { count := 0 for _, c := range *r { if c.toggle { count += c.Volume() } else { count -= c.Volume() } } return count }
pkg/reactor/reactor.go
0.508788
0.405566
reactor.go
starcoder
package inventory func (x *Accelerator) Clone() *Accelerator { if x == nil { return nil } return &Accelerator{ AcceleratorType: x.AcceleratorType, } } func (x *Accelerator) Equal(c *Accelerator) bool { return x.GetAcceleratorType() == c.GetAcceleratorType() } func (x *BladeCapacity) Clone() *BladeCapacity { if x == nil { return nil } var accelerators []*Accelerator = nil if x.Accelerators != nil { accelerators = make([]*Accelerator, len(x.Accelerators)) for i, a := range x.Accelerators { accelerators[i] = a.Clone() } } return &BladeCapacity{ Cores: x.Cores, MemoryInMb: x.MemoryInMb, DiskInGb: x.DiskInGb, NetworkBandwidthInMbps: x.NetworkBandwidthInMbps, Arch: x.Arch, Accelerators: accelerators, } } func (x *BladeCapacity) Equal(c *BladeCapacity) bool { if x == nil || c == nil { return x == c } accelMatch := true if x.Accelerators == nil || c.Accelerators == nil { accelMatch = x.Accelerators == nil && c.Accelerators == nil } else { accelMatch = len(x.GetAccelerators()) == len(c.GetAccelerators()) for i := 0; accelMatch && i < len(x.Accelerators); i++ { accelMatch = accelMatch && x.Accelerators[i].Equal(c.Accelerators[i]) } } return accelMatch && x.GetArch() == c.GetArch() && x.GetCores() == c.GetCores() && x.GetDiskInGb() == c.GetDiskInGb() && x.GetMemoryInMb() == c.GetMemoryInMb() && x.GetNetworkBandwidthInMbps() == c.GetNetworkBandwidthInMbps() } func (x *Usage) Clone() *Usage { if x == nil { return nil } return &Usage{ InstanceId: x.GetInstanceId(), State: x.GetState(), Consumed: x.GetConsumed().Clone(), } } func (x *Usage) Equal(y *Usage) bool { if x == nil || y == nil { return x == y } return x.GetInstanceId() == y.GetInstanceId() && x.GetState() == y.GetState() && x.GetConsumed() == y.GetConsumed() } func (x *Usage_Consumption) Clone() *Usage_Consumption { if x == nil { return nil } return &Usage_Consumption{ Cores: x.GetCores(), MemoryInMb: x.GetMemoryInMb(), DiskInGb: x.GetDiskInGb(), NetworkBandwidthInMbps: x.GetNetworkBandwidthInMbps(), } } func (x *Usage_Consumption) Equal(y *Usage_Consumption) bool { if x == nil || y == nil { return x == y } return x.GetCores() == y.GetCores() && x.GetMemoryInMb() == y.GetMemoryInMb() && x.GetDiskInGb() == y.GetDiskInGb() && x.GetNetworkBandwidthInMbps() == y.GetNetworkBandwidthInMbps() }
simulation/pkg/protos/inventory/capacity.Helpers.go
0.654895
0.540803
capacity.Helpers.go
starcoder
package factory import ( "github.com/Yiling-J/carrier/examples/ent_recipe/ent" "context" ) type EntRecipeIngredientMutator struct { Ingredient *ent.Ingredient IngredientID int Quantity float32 Unit string } type entRecipeIngredientMutation struct { ingredientType int ingredientFunc func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error ingredientIDType int ingredientIDFunc func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error quantityType int quantityFunc func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error unitType int unitFunc func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error afterCreateFunc func(ctx context.Context, i *ent.RecipeIngredient) error } type EntRecipeIngredientMetaFactory struct { mutation entRecipeIngredientMutation } type entRecipeIngredientTrait struct { mutation entRecipeIngredientMutation updates []func(m *entRecipeIngredientMutation) } func EntRecipeIngredientTrait() *entRecipeIngredientTrait { return &entRecipeIngredientTrait{} } func (*entRecipeIngredientMutation) afterCreateMutateFunc(fn func(ctx context.Context, i *ent.RecipeIngredient) error) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.afterCreateFunc = fn } } func (*entRecipeIngredientMutation) ingredientSequenceMutateFunc(fn func(ctx context.Context, i int) (*ent.Ingredient, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.ingredientType = TypeSequence m.ingredientFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx, c) if err != nil { return err } creator.SetIngredient(value) i.Ingredient = value return nil } } } func (*entRecipeIngredientMutation) ingredientLazyMutateFunc(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (*ent.Ingredient, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.ingredientType = TypeLazy m.ingredientFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx, i) if err != nil { return err } creator.SetIngredient(value) i.Ingredient = value return nil } } } func (*entRecipeIngredientMutation) ingredientDefaultMutateFunc(v *ent.Ingredient) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.ingredientType = TypeDefault m.ingredientFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { creator.SetIngredient(v) i.Ingredient = v return nil } } } func (*entRecipeIngredientMutation) ingredientFactoryMutateFunc(fn func(ctx context.Context) (*ent.Ingredient, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.ingredientType = TypeFactory m.ingredientFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx) if err != nil { return err } creator.SetIngredient(value) i.Ingredient = value return nil } } } func (f *EntRecipeIngredientMetaFactory) SetIngredientSequence(fn func(ctx context.Context, i int) (*ent.Ingredient, error)) *EntRecipeIngredientMetaFactory { f.mutation.ingredientSequenceMutateFunc(fn)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetIngredientLazy(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (*ent.Ingredient, error)) *EntRecipeIngredientMetaFactory { f.mutation.ingredientLazyMutateFunc(fn)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetIngredientDefault(v *ent.Ingredient) *EntRecipeIngredientMetaFactory { f.mutation.ingredientDefaultMutateFunc(v)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetIngredientFactory(fn func(ctx context.Context) (*ent.Ingredient, error)) *EntRecipeIngredientMetaFactory { f.mutation.ingredientFactoryMutateFunc(fn)(&f.mutation) return f } func (t *entRecipeIngredientTrait) SetIngredientSequence(fn func(ctx context.Context, i int) (*ent.Ingredient, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.ingredientSequenceMutateFunc(fn)) return t } func (t *entRecipeIngredientTrait) SetIngredientLazy(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (*ent.Ingredient, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.ingredientLazyMutateFunc(fn)) return t } func (t *entRecipeIngredientTrait) SetIngredientDefault(v *ent.Ingredient) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.ingredientDefaultMutateFunc(v)) return t } func (t *entRecipeIngredientTrait) SetIngredientFactory(fn func(ctx context.Context) (*ent.Ingredient, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.ingredientFactoryMutateFunc(fn)) return t } func (*entRecipeIngredientMutation) ingredientIDSequenceMutateFunc(fn func(ctx context.Context, i int) (int, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.ingredientIDType = TypeSequence m.ingredientIDFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx, c) if err != nil { return err } creator.SetIngredientID(value) i.IngredientID = value return nil } } } func (*entRecipeIngredientMutation) ingredientIDLazyMutateFunc(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (int, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.ingredientIDType = TypeLazy m.ingredientIDFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx, i) if err != nil { return err } creator.SetIngredientID(value) i.IngredientID = value return nil } } } func (*entRecipeIngredientMutation) ingredientIDDefaultMutateFunc(v int) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.ingredientIDType = TypeDefault m.ingredientIDFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { creator.SetIngredientID(v) i.IngredientID = v return nil } } } func (*entRecipeIngredientMutation) ingredientIDFactoryMutateFunc(fn func(ctx context.Context) (int, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.ingredientIDType = TypeFactory m.ingredientIDFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx) if err != nil { return err } creator.SetIngredientID(value) i.IngredientID = value return nil } } } func (f *EntRecipeIngredientMetaFactory) SetIngredientIDSequence(fn func(ctx context.Context, i int) (int, error)) *EntRecipeIngredientMetaFactory { f.mutation.ingredientIDSequenceMutateFunc(fn)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetIngredientIDLazy(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (int, error)) *EntRecipeIngredientMetaFactory { f.mutation.ingredientIDLazyMutateFunc(fn)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetIngredientIDDefault(v int) *EntRecipeIngredientMetaFactory { f.mutation.ingredientIDDefaultMutateFunc(v)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetIngredientIDFactory(fn func(ctx context.Context) (int, error)) *EntRecipeIngredientMetaFactory { f.mutation.ingredientIDFactoryMutateFunc(fn)(&f.mutation) return f } func (t *entRecipeIngredientTrait) SetIngredientIDSequence(fn func(ctx context.Context, i int) (int, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.ingredientIDSequenceMutateFunc(fn)) return t } func (t *entRecipeIngredientTrait) SetIngredientIDLazy(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (int, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.ingredientIDLazyMutateFunc(fn)) return t } func (t *entRecipeIngredientTrait) SetIngredientIDDefault(v int) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.ingredientIDDefaultMutateFunc(v)) return t } func (t *entRecipeIngredientTrait) SetIngredientIDFactory(fn func(ctx context.Context) (int, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.ingredientIDFactoryMutateFunc(fn)) return t } func (*entRecipeIngredientMutation) quantitySequenceMutateFunc(fn func(ctx context.Context, i int) (float32, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.quantityType = TypeSequence m.quantityFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx, c) if err != nil { return err } creator.SetQuantity(value) i.Quantity = value return nil } } } func (*entRecipeIngredientMutation) quantityLazyMutateFunc(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (float32, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.quantityType = TypeLazy m.quantityFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx, i) if err != nil { return err } creator.SetQuantity(value) i.Quantity = value return nil } } } func (*entRecipeIngredientMutation) quantityDefaultMutateFunc(v float32) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.quantityType = TypeDefault m.quantityFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { creator.SetQuantity(v) i.Quantity = v return nil } } } func (*entRecipeIngredientMutation) quantityFactoryMutateFunc(fn func(ctx context.Context) (float32, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.quantityType = TypeFactory m.quantityFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx) if err != nil { return err } creator.SetQuantity(value) i.Quantity = value return nil } } } func (f *EntRecipeIngredientMetaFactory) SetQuantitySequence(fn func(ctx context.Context, i int) (float32, error)) *EntRecipeIngredientMetaFactory { f.mutation.quantitySequenceMutateFunc(fn)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetQuantityLazy(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (float32, error)) *EntRecipeIngredientMetaFactory { f.mutation.quantityLazyMutateFunc(fn)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetQuantityDefault(v float32) *EntRecipeIngredientMetaFactory { f.mutation.quantityDefaultMutateFunc(v)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetQuantityFactory(fn func(ctx context.Context) (float32, error)) *EntRecipeIngredientMetaFactory { f.mutation.quantityFactoryMutateFunc(fn)(&f.mutation) return f } func (t *entRecipeIngredientTrait) SetQuantitySequence(fn func(ctx context.Context, i int) (float32, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.quantitySequenceMutateFunc(fn)) return t } func (t *entRecipeIngredientTrait) SetQuantityLazy(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (float32, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.quantityLazyMutateFunc(fn)) return t } func (t *entRecipeIngredientTrait) SetQuantityDefault(v float32) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.quantityDefaultMutateFunc(v)) return t } func (t *entRecipeIngredientTrait) SetQuantityFactory(fn func(ctx context.Context) (float32, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.quantityFactoryMutateFunc(fn)) return t } func (*entRecipeIngredientMutation) unitSequenceMutateFunc(fn func(ctx context.Context, i int) (string, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.unitType = TypeSequence m.unitFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx, c) if err != nil { return err } creator.SetUnit(value) i.Unit = value return nil } } } func (*entRecipeIngredientMutation) unitLazyMutateFunc(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (string, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.unitType = TypeLazy m.unitFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx, i) if err != nil { return err } creator.SetUnit(value) i.Unit = value return nil } } } func (*entRecipeIngredientMutation) unitDefaultMutateFunc(v string) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.unitType = TypeDefault m.unitFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { creator.SetUnit(v) i.Unit = v return nil } } } func (*entRecipeIngredientMutation) unitFactoryMutateFunc(fn func(ctx context.Context) (string, error)) func(m *entRecipeIngredientMutation) { return func(m *entRecipeIngredientMutation) { m.unitType = TypeFactory m.unitFunc = func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { if fn == nil { return nil } value, err := fn(ctx) if err != nil { return err } creator.SetUnit(value) i.Unit = value return nil } } } func (f *EntRecipeIngredientMetaFactory) SetUnitSequence(fn func(ctx context.Context, i int) (string, error)) *EntRecipeIngredientMetaFactory { f.mutation.unitSequenceMutateFunc(fn)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetUnitLazy(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (string, error)) *EntRecipeIngredientMetaFactory { f.mutation.unitLazyMutateFunc(fn)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetUnitDefault(v string) *EntRecipeIngredientMetaFactory { f.mutation.unitDefaultMutateFunc(v)(&f.mutation) return f } func (f *EntRecipeIngredientMetaFactory) SetUnitFactory(fn func(ctx context.Context) (string, error)) *EntRecipeIngredientMetaFactory { f.mutation.unitFactoryMutateFunc(fn)(&f.mutation) return f } func (t *entRecipeIngredientTrait) SetUnitSequence(fn func(ctx context.Context, i int) (string, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.unitSequenceMutateFunc(fn)) return t } func (t *entRecipeIngredientTrait) SetUnitLazy(fn func(ctx context.Context, i *EntRecipeIngredientMutator) (string, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.unitLazyMutateFunc(fn)) return t } func (t *entRecipeIngredientTrait) SetUnitDefault(v string) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.unitDefaultMutateFunc(v)) return t } func (t *entRecipeIngredientTrait) SetUnitFactory(fn func(ctx context.Context) (string, error)) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.unitFactoryMutateFunc(fn)) return t } func (f *EntRecipeIngredientMetaFactory) SetAfterCreateFunc(fn func(ctx context.Context, i *ent.RecipeIngredient) error) *EntRecipeIngredientMetaFactory { f.mutation.afterCreateFunc = fn return f } func (t *entRecipeIngredientTrait) SetAfterCreateFunc(fn func(ctx context.Context, i *ent.RecipeIngredient) error) *entRecipeIngredientTrait { t.updates = append(t.updates, t.mutation.afterCreateMutateFunc(fn)) return t } func (f *EntRecipeIngredientMetaFactory) Build() *EntRecipeIngredientFactory { return &EntRecipeIngredientFactory{meta: *f, counter: &Counter{}} } type EntRecipeIngredientFactory struct { meta EntRecipeIngredientMetaFactory counter *Counter client *ent.Client } func (f *EntRecipeIngredientFactory) SetIngredient(i *ent.Ingredient) *EntRecipeIngredientBuilder { builder := &EntRecipeIngredientBuilder{mutation: f.meta.mutation, counter: f.counter, factory: f} builder.SetIngredient(i) builder.client = f.client return builder } func (f *EntRecipeIngredientFactory) SetIngredientID(i int) *EntRecipeIngredientBuilder { builder := &EntRecipeIngredientBuilder{mutation: f.meta.mutation, counter: f.counter, factory: f} builder.SetIngredientID(i) builder.client = f.client return builder } func (f *EntRecipeIngredientFactory) SetQuantity(i float32) *EntRecipeIngredientBuilder { builder := &EntRecipeIngredientBuilder{mutation: f.meta.mutation, counter: f.counter, factory: f} builder.SetQuantity(i) builder.client = f.client return builder } func (f *EntRecipeIngredientFactory) SetUnit(i string) *EntRecipeIngredientBuilder { builder := &EntRecipeIngredientBuilder{mutation: f.meta.mutation, counter: f.counter, factory: f} builder.SetUnit(i) builder.client = f.client return builder } func (f *EntRecipeIngredientFactory) Create(ctx context.Context) (*ent.RecipeIngredient, error) { builder := &EntRecipeIngredientBuilder{mutation: f.meta.mutation, counter: f.counter, factory: f} builder.client = f.client return builder.Create(ctx) } func (f *EntRecipeIngredientFactory) CreateV(ctx context.Context) (ent.RecipeIngredient, error) { builder := &EntRecipeIngredientBuilder{mutation: f.meta.mutation, counter: f.counter, factory: f} builder.client = f.client return builder.CreateV(ctx) } func (f *EntRecipeIngredientFactory) CreateBatch(ctx context.Context, n int) ([]*ent.RecipeIngredient, error) { builder := &EntRecipeIngredientBuilder{mutation: f.meta.mutation, counter: f.counter, factory: f} builder.client = f.client return builder.CreateBatch(ctx, n) } func (f *EntRecipeIngredientFactory) CreateBatchV(ctx context.Context, n int) ([]ent.RecipeIngredient, error) { builder := &EntRecipeIngredientBuilder{mutation: f.meta.mutation, counter: f.counter, factory: f} builder.client = f.client return builder.CreateBatchV(ctx, n) } func (f *EntRecipeIngredientFactory) Client(c *ent.Client) *EntRecipeIngredientFactory { f.client = c return f } type EntRecipeIngredientBuilder struct { factory *EntRecipeIngredientFactory mutation entRecipeIngredientMutation counter *Counter ingredientOverride *ent.Ingredient ingredientOverriden bool ingredientIDOverride int ingredientIDOverriden bool quantityOverride float32 quantityOverriden bool unitOverride string unitOverriden bool client *ent.Client } func (b *EntRecipeIngredientBuilder) Client(c *ent.Client) *EntRecipeIngredientBuilder { b.client = c return b } func (b *EntRecipeIngredientBuilder) SetIngredient(i *ent.Ingredient) *EntRecipeIngredientBuilder { b.ingredientOverride = i b.ingredientOverriden = true return b } func (b *EntRecipeIngredientBuilder) SetIngredientID(i int) *EntRecipeIngredientBuilder { b.ingredientIDOverride = i b.ingredientIDOverriden = true return b } func (b *EntRecipeIngredientBuilder) SetQuantity(i float32) *EntRecipeIngredientBuilder { b.quantityOverride = i b.quantityOverriden = true return b } func (b *EntRecipeIngredientBuilder) SetUnit(i string) *EntRecipeIngredientBuilder { b.unitOverride = i b.unitOverriden = true return b } func (b *EntRecipeIngredientBuilder) CreateV(ctx context.Context) (ent.RecipeIngredient, error) { var d ent.RecipeIngredient p, err := b.Create(ctx) if err == nil { d = *p } return d, err } func (b *EntRecipeIngredientBuilder) Create(ctx context.Context) (*ent.RecipeIngredient, error) { var preSlice = []func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error{} var lazySlice = []func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error{} var postSlice = []func(ctx context.Context, i *ent.RecipeIngredient, c int, creator *ent.RecipeIngredientCreate) error{} index := b.counter.Get() _ = index client := b.client entBuilder := client.RecipeIngredient.Create() if b.ingredientOverriden { preSlice = append(preSlice, func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { value := b.ingredientOverride creator.SetIngredient(value) i.Ingredient = value return nil }) } else { switch b.mutation.ingredientType { case TypeDefault: preSlice = append(preSlice, b.mutation.ingredientFunc) case TypeLazy: lazySlice = append(lazySlice, b.mutation.ingredientFunc) case TypeSequence: preSlice = append(preSlice, b.mutation.ingredientFunc) case TypeFactory: preSlice = append(preSlice, b.mutation.ingredientFunc) } } if b.ingredientIDOverriden { preSlice = append(preSlice, func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { value := b.ingredientIDOverride creator.SetIngredientID(value) i.IngredientID = value return nil }) } else { switch b.mutation.ingredientIDType { case TypeDefault: preSlice = append(preSlice, b.mutation.ingredientIDFunc) case TypeLazy: lazySlice = append(lazySlice, b.mutation.ingredientIDFunc) case TypeSequence: preSlice = append(preSlice, b.mutation.ingredientIDFunc) case TypeFactory: preSlice = append(preSlice, b.mutation.ingredientIDFunc) } } if b.quantityOverriden { preSlice = append(preSlice, func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { value := b.quantityOverride creator.SetQuantity(value) i.Quantity = value return nil }) } else { switch b.mutation.quantityType { case TypeDefault: preSlice = append(preSlice, b.mutation.quantityFunc) case TypeLazy: lazySlice = append(lazySlice, b.mutation.quantityFunc) case TypeSequence: preSlice = append(preSlice, b.mutation.quantityFunc) case TypeFactory: preSlice = append(preSlice, b.mutation.quantityFunc) } } if b.unitOverriden { preSlice = append(preSlice, func(ctx context.Context, i *EntRecipeIngredientMutator, c int, creator *ent.RecipeIngredientCreate) error { value := b.unitOverride creator.SetUnit(value) i.Unit = value return nil }) } else { switch b.mutation.unitType { case TypeDefault: preSlice = append(preSlice, b.mutation.unitFunc) case TypeLazy: lazySlice = append(lazySlice, b.mutation.unitFunc) case TypeSequence: preSlice = append(preSlice, b.mutation.unitFunc) case TypeFactory: preSlice = append(preSlice, b.mutation.unitFunc) } } v := &EntRecipeIngredientMutator{} for _, f := range preSlice { err := f(ctx, v, index, entBuilder) if err != nil { return nil, err } } for _, f := range lazySlice { err := f(ctx, v, index, entBuilder) if err != nil { return nil, err } } new, err := entBuilder.Save(ctx) if err != nil { return nil, err } if b.mutation.afterCreateFunc != nil { err := b.mutation.afterCreateFunc(ctx, new) if err != nil { return nil, err } } for _, f := range postSlice { err := f(ctx, new, index, entBuilder) if err != nil { return nil, err } } return new, nil } func (b *EntRecipeIngredientBuilder) CreateBatch(ctx context.Context, n int) ([]*ent.RecipeIngredient, error) { var results []*ent.RecipeIngredient for i := 0; i < n; i++ { d, err := b.Create(ctx) if err != nil { return results, err } results = append(results, d) } return results, nil } func (b *EntRecipeIngredientBuilder) CreateBatchV(ctx context.Context, n int) ([]ent.RecipeIngredient, error) { var results []ent.RecipeIngredient for i := 0; i < n; i++ { d, err := b.CreateV(ctx) if err != nil { return results, err } results = append(results, d) } return results, nil }
examples/ent_recipe/carrier/factory/ent_recipeingredient.go
0.506103
0.455562
ent_recipeingredient.go
starcoder
package _542_01_Matrix /* https://leetcode.com/problems/01-matrix/ Given a matrix consists of 0 and 1, find the distance of the nearest 0 for each cell. The distance between two adjacent cells is 1. Example 1: Input: 0 0 0 0 1 0 0 0 0 Output: 0 0 0 0 1 0 0 0 0 Example 2: Input: 0 0 0 0 1 0 1 1 1 Output: 0 0 0 0 1 0 1 2 1 Note: The number of elements of the given matrix will not exceed 10,000. There are at least one 0 in the given matrix. The cells are adjacent in only four directions: up, down, left and right. */ /* * init output matrix by: set all possible 0 by scannin input matrix, others - to -1 * take "0" cells of output matix as starting points for bfs solution * add them to the Queue * pulling Queue while not empty * check neighbours, if -1 then make it (current cell count + 1) and add them to Queue */ func updateMatrix(matrix [][]int) [][]int { if len(matrix) == 0 { return matrix } q := [][]int{} push := func(x,y int) { q = append(q,[]int{x,y}) } pull := func() (int,int) { if len(q) == 0 { return -1,-1 } out := q[0] q = q[1:] return out[0],out[1] } out := make([][]int,len(matrix)) for i := range matrix { out[i] = make([]int,len(matrix[i])) for j := range matrix[i] { if matrix[i][j] == 0 { out[i][j] = 0 push(i,j) } else { out[i][j] = -1 } } } for len(q) > 0 { i,j := pull() if i + 1 < len(out) && out[i+1][j] == -1 { out[i+1][j] = out[i][j] + 1 push(i+1,j) } if i > 0 && out[i-1][j] == -1 { out[i-1][j] = out[i][j] + 1 push(i-1,j) } if j + 1 < len(out[i]) && out[i][j+1] == -1 { out[i][j+1] = out[i][j] + 1 push(i,j+1) } if j > 0 && out[i][j-1] == -1 { out[i][j-1] = out[i][j] + 1 push(i,j-1) } } return out }
542_01_Matrix/solution.go
0.865423
0.602032
solution.go
starcoder
package aws import ( "github.com/infracost/infracost/internal/resources" "github.com/infracost/infracost/internal/schema" "github.com/infracost/infracost/internal/usage" "github.com/shopspring/decimal" ) type Route53Record struct { Address string IsAlias bool MonthlyLatencyBasedQueries *int64 `infracost_usage:"monthly_latency_based_queries"` MonthlyGeoQueries *int64 `infracost_usage:"monthly_geo_queries"` MonthlyStandardQueries *int64 `infracost_usage:"monthly_standard_queries"` } var Route53RecordUsageSchema = []*schema.UsageItem{ {Key: "monthly_latency_based_queries", ValueType: schema.Int64, DefaultValue: 0}, {Key: "monthly_geo_queries", ValueType: schema.Int64, DefaultValue: 0}, {Key: "monthly_standard_queries", ValueType: schema.Int64, DefaultValue: 0}, } func (r *Route53Record) PopulateUsage(u *schema.UsageData) { resources.PopulateArgsWithUsage(r, u) } func (r *Route53Record) BuildResource() *schema.Resource { if r.IsAlias { return &schema.Resource{ Name: r.Address, NoPrice: true, IsSkipped: true, UsageSchema: Route53RecordUsageSchema, } } costComponents := []*schema.CostComponent{} limits := []int{1000000000} var numbOfStdQueries *decimal.Decimal if r.MonthlyStandardQueries != nil { numbOfStdQueries = decimalPtr(decimal.NewFromInt(*r.MonthlyStandardQueries)) stdQueriesTiers := usage.CalculateTierBuckets(*numbOfStdQueries, limits) if stdQueriesTiers[0].GreaterThan(decimal.Zero) { costComponents = append(costComponents, queriesCostComponent("Standard queries (first 1B)", "DNS-Queries", "0", &stdQueriesTiers[0])) } if stdQueriesTiers[1].GreaterThan(decimal.Zero) { costComponents = append(costComponents, queriesCostComponent("Standard queries (over 1B)", "DNS-Queries", "1000000000", &stdQueriesTiers[1])) } } else { var unknown *decimal.Decimal costComponents = append(costComponents, queriesCostComponent("Standard queries (first 1B)", "DNS-Queries", "0", unknown)) } var numbOfLBRQueries *decimal.Decimal if r.MonthlyLatencyBasedQueries != nil { numbOfLBRQueries = decimalPtr(decimal.NewFromInt(*r.MonthlyLatencyBasedQueries)) lbrQueriesTiers := usage.CalculateTierBuckets(*numbOfLBRQueries, limits) if lbrQueriesTiers[0].GreaterThan(decimal.Zero) { costComponents = append(costComponents, queriesCostComponent("Latency based routing queries (first 1B)", "LBR-Queries", "0", &lbrQueriesTiers[0])) } if lbrQueriesTiers[1].GreaterThan(decimal.Zero) { costComponents = append(costComponents, queriesCostComponent("Latency based routing queries (over 1B)", "LBR-Queries", "1000000000", &lbrQueriesTiers[1])) } } else { var unknown *decimal.Decimal costComponents = append(costComponents, queriesCostComponent("Latency based routing queries (first 1B)", "LBR-Queries", "0", unknown)) } var numbOfGeoQueries *decimal.Decimal if r.MonthlyGeoQueries != nil { numbOfGeoQueries = decimalPtr(decimal.NewFromInt(*r.MonthlyGeoQueries)) geoQueriesTiers := usage.CalculateTierBuckets(*numbOfGeoQueries, limits) if geoQueriesTiers[0].GreaterThan(decimal.Zero) { costComponents = append(costComponents, queriesCostComponent("Geo DNS queries (first 1B)", "Geo-Queries", "0", &geoQueriesTiers[0])) } if geoQueriesTiers[1].GreaterThan(decimal.Zero) { costComponents = append(costComponents, queriesCostComponent("Geo DNS queries (over 1B)", "Geo-Queries", "1000000000", &geoQueriesTiers[1])) } } else { var unknown *decimal.Decimal costComponents = append(costComponents, queriesCostComponent("Geo DNS queries (first 1B)", "Geo-Queries", "0", unknown)) } return &schema.Resource{ Name: r.Address, CostComponents: costComponents, UsageSchema: Route53RecordUsageSchema, } } func queriesCostComponent(displayName string, usageType string, usageTier string, quantity *decimal.Decimal) *schema.CostComponent { return &schema.CostComponent{ Name: displayName, Unit: "1M queries", UnitMultiplier: decimal.NewFromInt(1000000), MonthlyQuantity: quantity, ProductFilter: &schema.ProductFilter{ VendorName: strPtr("aws"), Service: strPtr("AmazonRoute53"), ProductFamily: strPtr("DNS Query"), AttributeFilters: []*schema.AttributeFilter{ {Key: "usagetype", Value: &usageType}, }, }, PriceFilter: &schema.PriceFilter{ StartUsageAmount: strPtr(usageTier), }, } }
internal/resources/aws/route53_record.go
0.57081
0.441312
route53_record.go
starcoder
package pkg import ( "fmt" "github.com/lanl/clp" "math" "math/bits" "reflect" "sort" "strings" ) // ExprSet describes a set of Expr. type ExprSet = map[Expr]bool // NodeSet describes a set of Node. type NodeSet = map[Node]bool // ExprOperator that wraps the Add and Multiply methods needed to build a quorum from a set of Node. type ExprOperator interface { // Add method aggregate a Node to an Expr with a logical Or (a ∨ b) // returns the resulting Or operation. Add(expr Expr) Or // Multiply method aggregate a Node to an Expr with a logical And (a ∧ b) // returns the resulting And operation. Multiply(expr Expr) And } // ExprGetter wraps some methods to retrieve the Expr. type ExprGetter interface { // GetExprs methods returns a []Expr representing the Expr. GetExprs() []Expr } // NodeGetter wraps the method for getting the NodeSet from an Expr. type NodeGetter interface { // GetNodes returns a NodeSet with the NodeSet in an Expr. GetNodes() NodeSet } // NumLeavesGetter wraps the method for getting the number of leaves in an Expr. type NumLeavesGetter interface { // NumLeaves returns the number of leaves in an Expr. e.g. ( a + b ) * a results in 3 leaves. NumLeaves() uint } // DualOperator wraps a basic Dual method. type DualOperator interface { // Dual method returns the logic Dual of an Expr. The Dual of a boolean Expr is the Expr one obtains // by interchanging addition and multiplication and interchanging 0’s and 1’s. // see: https://www.cs.fsu.edu/~lacher/courses/MAD3105/lectures/s4_1boolfn.pdf Dual() Expr } // ResilienceCalculator wraps the method for calculating the resilience of a quorum. type ResilienceCalculator interface { // Resilience returns the resilience of an Expr. Resilience() uint } // MinFailuresCalculator wraps the method for calculating the minimum failure. type MinFailuresCalculator interface { // MinFailures returns the number of minimum failures for an Expr. MinFailures() uint } // DuplicateChecker wraps the method for checking if an Expr contains a duplicate. type DuplicateChecker interface { DupFree() bool } // Quorum wraps the methods for calculating a quorum from an Expr and to check if an ExprSet is a valid Quorum. type Quorum interface { // Quorums returns a chan exposing the quorums derived from an Expr. Quorums() chan ExprSet // IsQuorum returns true if the ExprSet is a quorum otherwise it returns false. IsQuorum(set ExprSet) bool } // Expr represent a logic expressions between nodes or other expressions and its own methods. type Expr interface { Quorum ExprOperator DualOperator ExprGetter NodeGetter NumLeavesGetter DuplicateChecker MinFailuresCalculator ResilienceCalculator fmt.Stringer } // Node represents a node in an Expr. type Node struct { Name string ReadCapacity *uint WriteCapacity *uint Latency *uint } // NewNode define a new node with a name. func NewNode(name string) Node { node := Node{} node.Name = name initialValue := uint(1) node.ReadCapacity = &initialValue node.WriteCapacity = &initialValue return node } // NewNodeWithCapacityAndLatency defines a new node with a name, read and write capacities and a latency. func NewNodeWithCapacityAndLatency(name string, readCapacity uint, writeCapacity uint, latency uint) Node { node := Node{} node.Name = name node.ReadCapacity = &readCapacity node.WriteCapacity = &writeCapacity node.Latency = &latency return node } // NewNodeWithCapacity defines a new node with a name a read and write capacity. func NewNodeWithCapacity(name string, readCapacity uint, writeCapacity uint) Node { node := Node{} node.Name = name node.ReadCapacity = &readCapacity node.WriteCapacity = &writeCapacity return node } // NewNodeWithLatency defines a new node with a name and a latency. func NewNodeWithLatency(name string, latency uint) Node { node := Node{} node.Name = name initialValue := uint(1) node.ReadCapacity = &initialValue node.WriteCapacity = &initialValue node.Latency = &latency return node } func (n Node) Add(expr Expr) Or { return mergeWithOr(n, expr) } func (n Node) Multiply(expr Expr) And { return mergeWithAnd(n, expr) } func (n Node) Quorums() chan ExprSet { chnl := make(chan ExprSet) go func() { chnl <- ExprSet{n: true} close(chnl) }() return chnl } func (n Node) IsQuorum(xs ExprSet) bool { for k := range xs { if n.String() == k.String() { return true } } return false } func (n Node) GetNodes() NodeSet { return NodeSet{n: true} } func (n Node) NumLeaves() uint { return 1 } func (n Node) MinFailures() uint { return 1 } func (n Node) Resilience() uint { if n.DupFree() { return n.MinFailures() - 1 } qs := make([]ExprSet, 0) for q := range n.Quorums() { qs = append(qs, q) } return minHittingSet(qs) - 1.0 } func (n Node) DupFree() bool { return uint(len(n.GetNodes())) == n.NumLeaves() } func (n Node) String() string { return n.Name } func (n Node) GetType() string { return "GetNodeByName" } func (n Node) GetExprs() []Expr { return []Expr{n} } func (n Node) Dual() Expr { return n } // Or represents a logical Or expression between others nodes or expressions. type Or struct { Es []Expr } func (e Or) Add(rhs Expr) Or { return mergeWithOr(e, rhs) } func (e Or) Multiply(rhs Expr) And { return mergeWithAnd(e, rhs) } func (e Or) Quorums() chan ExprSet { chnl := make(chan ExprSet) go func() { for _, es := range e.Es { tmp := <-es.Quorums() chnl <- tmp } // Ensure that at the end of the loop we close the channel! close(chnl) }() return chnl } func (e Or) IsQuorum(xs ExprSet) bool { var found = false for _, es := range e.Es { if es.IsQuorum(xs) { found = true return found } } return found } func (e Or) GetNodes() NodeSet { var final = make(NodeSet) for _, es := range e.Es { for n := range es.GetNodes() { final[n] = true } } return final } func (e Or) NumLeaves() uint { total := uint(0) for _, es := range e.Es { total += es.NumLeaves() } return total } func (e Or) MinFailures() uint { total := uint(0) for _, es := range e.Es { total += es.MinFailures() } return total } func (e Or) Resilience() uint { if e.DupFree() { return e.MinFailures() - 1 } qs := make([]ExprSet, 0) for q := range e.Quorums() { qs = append(qs, q) } return minHittingSet(qs) - 1.0 } func (e Or) DupFree() bool { return uint(len(e.GetNodes())) == e.NumLeaves() } func (e Or) String() string { if len(e.Es) == 0 { return "()" } var sb strings.Builder sb.WriteString("(") sb.WriteString(e.Es[0].String()) for _, v := range e.Es[1:] { sb.WriteString(" + ") sb.WriteString(v.String()) } sb.WriteString(")") return sb.String() } func (e Or) GetType() string { return "Or" } func (e Or) GetExprs() []Expr { return e.Es } func (e Or) Dual() Expr { dualExprs := make([]Expr, 0) for _, es := range e.Es { dualExprs = append(dualExprs, es.Dual()) } return And{Es: dualExprs} } //And represents a logical And expression between others nodes or expressions. type And struct { Es []Expr } func (e And) Add(rhs Expr) Or { return mergeWithOr(e, rhs) } func (e And) Multiply(rhs Expr) And { return mergeWithAnd(e, rhs) } func (e And) Quorums() chan ExprSet { chnl := make(chan ExprSet) flatQuorums := make([][]interface{}, 0) for _, es := range e.Es { quorums := make([]interface{}, 0) for q := range es.Quorums() { quorums = append(quorums, q) } flatQuorums = append(flatQuorums, quorums) } go func() { for _, sets := range product(flatQuorums...) { set := make(ExprSet) for _, t := range sets { set = mergeExprSets(set, t.(ExprSet)) } chnl <- set } // Ensure that at the end of the loop we close the channel! close(chnl) }() return chnl } func (e And) IsQuorum(xs ExprSet) bool { var found = true for _, es := range e.Es { if !es.IsQuorum(xs) { found = false return found } } return found } func (e And) GetNodes() NodeSet { var final = make(NodeSet) for _, es := range e.Es { for n := range es.GetNodes() { final[n] = true } } return final } func (e And) NumLeaves() uint { total := uint(0) for _, es := range e.Es { total += es.NumLeaves() } return total } func (e And) MinFailures() uint { var exprs = e.Es var min = exprs[0].MinFailures() for _, expr := range exprs { if min > expr.MinFailures() { min = expr.MinFailures() } } return min } func (e And) Resilience() uint { if e.DupFree() { return e.MinFailures() - 1 } qs := make([]ExprSet, 0) for q := range e.Quorums() { qs = append(qs, q) } return minHittingSet(qs) - 1 } func (e And) DupFree() bool { return uint(len(e.GetNodes())) == e.NumLeaves() } func (e And) String() string { if len(e.Es) == 0 { return "()" } var sb strings.Builder sb.WriteString("(") sb.WriteString(e.Es[0].String()) for _, v := range e.Es[1:] { sb.WriteString(" * ") sb.WriteString(v.String()) } sb.WriteString(")") return sb.String() } func (e And) GetType() string { return "And" } func (e And) GetExprs() []Expr { return e.Es } func (e And) Dual() Expr { dualExprs := make([]Expr, 0) for _, es := range e.Es { dualExprs = append(dualExprs, es.Dual()) } return Or{Es: dualExprs} } // Choose represents a logical type Choose struct { Es []Expr K int } func NewChoose(k int, es []Expr) (Expr, error) { if len(es) == 0 { return Choose{}, fmt.Errorf("no expressions provided") } if !(1 <= k && k <= len(es)) { return Choose{}, fmt.Errorf("k must be in the range [1, len(es)]") } if k == 1 { return Or{Es: es}, nil } if k == len(es) { return And{Es: es}, nil } if k <= 0 || k > len(es) { return Choose{}, fmt.Errorf("k must be in the range [1, %d]", len(es)) } return Choose{Es: es, K: k}, nil } func (e Choose) Add(rhs Expr) Or { return mergeWithOr(e, rhs) } func (e Choose) Multiply(rhs Expr) And { return mergeWithAnd(e, rhs) } func (e Choose) Quorums() chan ExprSet { chnl := make(chan ExprSet) sets := make([]ExprSet, 0) for _, combo := range combinations(e.Es, uint(e.K)) { combinedQuorums := make([][]interface{}, 0) for _, c := range combo { quorums := make([]interface{}, 0) for q := range c.Quorums() { quorums = append(quorums, q) } combinedQuorums = append(combinedQuorums, quorums) } for _, s := range product(combinedQuorums...) { set := make(ExprSet) for _, t := range s { set = mergeExprSets(set, t.(ExprSet)) } sets = append(sets, set) } } go func() { for _, set := range sets { chnl <- set } close(chnl) }() return chnl } func (e Choose) IsQuorum(xs ExprSet) bool { sum := 0 for _, es := range e.Es { if es.IsQuorum(xs) { sum += 1 } } return sum >= e.K } func (e Choose) GetNodes() NodeSet { var final = make(NodeSet) for _, es := range e.Es { for n := range es.GetNodes() { final[n] = true } } return final } func (e Choose) NumLeaves() uint { total := uint(0) for _, es := range e.Es { total += es.NumLeaves() } return total } func (e Choose) MinFailures() uint { var exprs = e.Es var subFailures []int for _, expr := range exprs { subFailures = append(subFailures, int(expr.MinFailures())) } sort.Ints(subFailures) sortedSubset := subFailures[:len(subFailures)-e.K+1] total := 0 for _, v := range sortedSubset { total += v } return uint(total) } func (e Choose) Resilience() uint { if e.DupFree() { return e.MinFailures() - 1 } qs := make([]ExprSet, 0) for q := range e.Quorums() { qs = append(qs, q) } return minHittingSet(qs) - 1.0 } func (e Choose) DupFree() bool { return uint(len(e.GetNodes())) == e.NumLeaves() } func (e Choose) String() string { if len(e.Es) == 0 { return "()" } var sb strings.Builder sb.WriteString("(") sb.WriteString(e.Es[0].String()) for _, v := range e.Es[1:] { sb.WriteString(" * ") sb.WriteString(v.String()) } sb.WriteString(")") return sb.String() } func (e Choose) GetType() string { return "Choose" } func (e Choose) GetExprs() []Expr { return e.Es } func (e Choose) Dual() Expr { dualExprs := make([]Expr, 0) for _, es := range e.Es { dualExprs = append(dualExprs, es.Dual()) } return Choose{Es: dualExprs, K: len(e.Es) - e.K + 1} } // mergeWithOr returns a Or expression between two input expressions. func mergeWithOr(lhs Expr, rhs Expr) Or { if reflect.TypeOf(lhs).Name() == "Or" && reflect.TypeOf(rhs).String() == "Or" { return Or{append(lhs.GetExprs(), rhs.GetExprs()...)} } else if reflect.TypeOf(lhs).Name() == "Or" { return Or{append(lhs.GetExprs(), rhs)} } else if reflect.TypeOf(rhs).String() == "Or" { return Or{append([]Expr{lhs}, rhs.GetExprs()...)} } else { return Or{[]Expr{lhs, rhs}} } } // mergeWithAnd returns an And expression between two input expressions. func mergeWithAnd(lhs Expr, rhs Expr) And { if reflect.TypeOf(lhs).Name() == "And" && reflect.TypeOf(rhs).String() == "And" { return And{append(lhs.GetExprs(), rhs.GetExprs()...)} } else if reflect.TypeOf(lhs).Name() == "And" { return And{append(lhs.GetExprs(), rhs)} } else if reflect.TypeOf(rhs).String() == "And" { return And{append([]Expr{lhs}, rhs.GetExprs()...)} } else { return And{[]Expr{lhs, rhs}} } } // mergeExprSets returns a merge between multiple ExprSet. func mergeExprSets(maps ...ExprSet) ExprSet { result := make(ExprSet) for _, m := range maps { for k, v := range m { result[k] = v } } return result } //product returns the cartesian product between a list of inputs. func product(sets ...[]interface{}) [][]interface{} { result := make([][]interface{}, 0) nextIndex := func(ix []int, lens func(i int) int) { for j := len(ix) - 1; j >= 0; j-- { ix[j]++ if j == 0 || ix[j] < lens(j) { return } ix[j] = 0 } } lens := func(i int) int { return len(sets[i]) } for ix := make([]int, len(sets)); ix[0] < lens(0); nextIndex(ix, lens) { var r []interface{} for j, k := range ix { r = append(r, sets[j][k]) } result = append(result, r) } return result } //combinations returns n combinations given a list of input Expr func combinations(set []Expr, n uint) (subsets [][]Expr) { length := len(set) if n > uint(len(set)) { n = uint(len(set)) } for subsetBits := 1; subsetBits < (1 << length); subsetBits++ { if n > 0 && bits.OnesCount(uint(subsetBits)) != int(n) { continue } var ss []Expr for object := 0; object < length; object++ { if (subsetBits>>object)&1 == 1 { ss = append(ss, set[object]) } } subsets = append(subsets, ss) } return subsets } //setToArr given an input ExprSet returns an []Expr. func setToArr(input ExprSet) []Expr { result := make([]Expr, 0) for k := range input { result = append(result, k) } return result } func minHittingSet(quorums []ExprSet) uint { keys := make([]Expr, 0) def := lpDefinition{} def.Vars = make([]float64, 0) def.Constraints = make([][2]float64, 0) def.Objectives = make([][]float64, 0) simp := clp.NewSimplex() uniqueKeys := make(map[Expr]float64) for _, xs := range quorums { for k := range xs { if _, exists := uniqueKeys[k]; !exists { keys = append(keys, k) } uniqueKeys[k] = 1.0 } } for range keys { def.Vars = append(def.Vars, 1.0) } for range keys { constr := [2]float64{0, 1} def.Constraints = append(def.Constraints, constr) } for _, xs := range quorums { obj := make([]float64, 0) obj = append(obj, 1) for _, k := range keys { if _, exists := xs[k]; exists { obj = append(obj, 1) } else { obj = append(obj, 0) } } obj = append(obj, math.Inf(1)) def.Objectives = append(def.Objectives, obj) } // Set up the optimization problem. simp.EasyLoadDenseProblem( def.Vars, def.Constraints, def.Objectives) simp.SetOptimizationDirection(clp.Minimize) // Solve the optimization problem. status := simp.Primal(clp.NoValuesPass, clp.NoStartFinishOptions) soln := simp.PrimalColumnSolution() if status != clp.Optimal { fmt.Println("Error") } result := uint(0) for _, v := range soln { result += uint(math.Round(v)) } return result }
pkg/expr.go
0.692538
0.442757
expr.go
starcoder
package geo3d import ( "fmt" "strconv" "strings" "aoc/internal/util" ) // Tranform is a 3D rotation matrix. type Transform [9]int // Rotations contains all possible 3D rotations with increments of 90 degrees. // Courtesy: https://www.euclideanspace.com/maths/algebra/matrix/transforms/examples/index.htm var Rotations = []Transform{ {1, 0, 0, 0, 1, 0, 0, 0, 1}, // identity {0, 0, 1, 0, 1, 0, -1, 0, 0}, {-1, 0, 0, 0, 1, 0, 0, 0, -1}, {0, 0, -1, 0, 1, 0, 1, 0, 0}, {0, -1, 0, 1, 0, 0, 0, 0, 1}, {0, 0, 1, 1, 0, 0, 0, 1, 0}, {0, 1, 0, 1, 0, 0, 0, 0, -1}, {0, 0, -1, 1, 0, 0, 0, -1, 0}, {0, 1, 0, -1, 0, 0, 0, 0, 1}, {0, 0, 1, -1, 0, 0, 0, -1, 0}, {0, -1, 0, -1, 0, 0, 0, 0, -1}, {0, 0, -1, -1, 0, 0, 0, 1, 0}, {1, 0, 0, 0, 0, -1, 0, 1, 0}, {0, 1, 0, 0, 0, -1, -1, 0, 0}, {-1, 0, 0, 0, 0, -1, 0, -1, 0}, {0, -1, 0, 0, 0, -1, 1, 0, 0}, {1, 0, 0, 0, -1, 0, 0, 0, -1}, {0, 0, -1, 0, -1, 0, -1, 0, 0}, {-1, 0, 0, 0, -1, 0, 0, 0, 1}, {0, 0, 1, 0, -1, 0, 1, 0, 0}, {1, 0, 0, 0, 0, 1, 0, -1, 0}, {0, -1, 0, 0, 0, 1, -1, 0, 0}, {-1, 0, 0, 0, 0, 1, 0, 1, 0}, {0, 1, 0, 0, 0, 1, 1, 0, 0}, } type Pos struct { X, Y, Z int } func NewPos(x, y, z int) Pos { return Pos{X: x, Y: y, Z: z} } func ParsePos(s string) (p Pos, err error) { tokens := strings.Split(s, ",") if len(tokens) != 3 { return Pos{}, fmt.Errorf("invalid position: %s", s) } p.X, err = strconv.Atoi(tokens[0]) if err != nil { return Pos{}, fmt.Errorf("invalid position: %s", s) } p.Y, err = strconv.Atoi(tokens[1]) if err != nil { return Pos{}, fmt.Errorf("invalid position: %s", s) } p.Z, err = strconv.Atoi(tokens[2]) if err != nil { return Pos{}, fmt.Errorf("invalid position: %s", s) } return } func (p Pos) Add(other Pos) Pos { return Pos{ X: p.X + other.X, Y: p.Y + other.Y, Z: p.Z + other.Z, } } func (p Pos) Sub(other Pos) Pos { return Pos{ X: p.X - other.X, Y: p.Y - other.Y, Z: p.Z - other.Z, } } func (p Pos) Transform(t Transform) Pos { return Pos{ X: p.X*t[0] + p.Y*t[1] + p.Z*t[2], Y: p.X*t[3] + p.Y*t[4] + p.Z*t[5], Z: p.X*t[6] + p.Y*t[7] + p.Z*t[8], } } func (p Pos) In(c Cube) bool { if p.X < c.A.X || p.Y < c.A.Y || p.Z < c.A.Z { return false } if p.X > c.B.X || p.Y > c.B.Y || p.Z > c.B.Z { return false } return true } func (c Pos) Manhattan() int { return util.Abs(c.X) + util.Abs(c.Y) + util.Abs(c.Z) } func (c Pos) String() string { return fmt.Sprintf("<%d,%d,%d>", c.X, c.Y, c.Z) }
go/internal/geo3d/pos.go
0.629091
0.594904
pos.go
starcoder
package database import ( "errors" "fmt" "strconv" "strings" "time" ) // Date is the struct for MySQL DATE type type Date struct { Year int16 Month, Day byte } func (dd Date) String() string { return fmt.Sprintf("%04d-%02d-%02d", dd.Year, dd.Month, dd.Day) } // IsZero is the function for true if date is 0000-00-00 func (dd Date) IsZero() bool { return dd.Day == 0 && dd.Month == 0 && dd.Year == 0 } // Time is the function for converts Date to time.Time using loc location. // Converts MySQL zero to time.Time zero. func (dd Date) Time(loc *time.Location) (t time.Time) { if !dd.IsZero() { t = time.Date( int(dd.Year), time.Month(dd.Month), int(dd.Day), 0, 0, 0, 0, loc, ) } return } // Localtime is the function for converts Date to time.Time using Local location. // Converts MySQL zero to time.Time zero. func (dd Date) Localtime() time.Time { return dd.Time(time.Local) } // ParseDate is the function for convert string date in format YYYY-MM-DD to Date. // Leading and trailing spaces are ignored. func ParseDate(str string) (dd Date, err error) { str = strings.TrimSpace(str) if str == "0000-00-00" { return } var ( y, m, d int ) if len(str) != 10 || str[4] != '-' || str[7] != '-' { goto invalid } if y, err = strconv.Atoi(str[0:4]); err != nil { return } if m, err = strconv.Atoi(str[5:7]); err != nil { return } if m < 0 || m > 12 { // MySQL permits month == 0 goto invalid } if d, err = strconv.Atoi(str[8:10]); err != nil { return } if d < 0 { // MySQL permits day == 0 goto invalid } switch m { case 1, 3, 5, 7, 8, 10, 12: if d > 31 { goto invalid } case 4, 6, 9, 11: if d > 30 { goto invalid } case 2: if d > 29 { goto invalid } } dd.Year = int16(y) dd.Month = byte(m) dd.Day = byte(d) return invalid: err = errors.New("Invalid MySQL DATE string: " + str) return } // TimeFormat is Sandard MySQL datetime format const TimeFormat = "2006-01-02 15:04:05.000000000" // TimeString returns t as string in MySQL format Converts time.Time zero to MySQL zero. func TimeString(t time.Time) string { if t.IsZero() { return "0000-00-00 00:00:00" } if t.Nanosecond() == 0 { return t.Format(TimeFormat[:19]) } return t.Format(TimeFormat) } // ParseTime returns parses string datetime in TimeFormat using loc location. // Converts MySQL zero to time.Time zero. func ParseTime(str string, loc *time.Location) (t time.Time, err error) { str = strings.TrimSpace(str) format := TimeFormat[:19] switch len(str) { case 10: if str == "0000-00-00" { return } format = format[:10] case 19: if str == "0000-00-00 00:00:00" { return } } // Don't expect 0000-00-00 00:00:00.0+ t, err = time.ParseInLocation(format, str, loc) return } // DurationString returns d as string in MySQL format Convert time.Duration to string representation of mysql.TIME func DurationString(d time.Duration) string { sign := 1 if d < 0 { sign = -1 d = -d } ns := int(d % 1e9) d /= 1e9 sec := int(d % 60) d /= 60 min := int(d % 60) hour := int(d/60) * sign if ns == 0 { return fmt.Sprintf("%d:%02d:%02d", hour, min, sec) } return fmt.Sprintf("%d:%02d:%02d.%09d", hour, min, sec, ns) } // ParseDuration Parse duration from MySQL string format [+-]H+:MM:SS[.UUUUUUUUU]. // Leading and trailing spaces are ignored. If format is invalid returns nil. func ParseDuration(str string) (dur time.Duration, err error) { str = strings.TrimSpace(str) orig := str // Check sign sign := int64(1) switch str[0] { case '-': sign = -1 fallthrough case '+': str = str[1:] } var i, d int64 // Find houre if nn := strings.IndexRune(str, ':'); nn != -1 { if i, err = strconv.ParseInt(str[0:nn], 10, 64); err != nil { return } d = i * 3600 str = str[nn+1:] } else { goto invalid } if len(str) != 5 && len(str) != 15 || str[2] != ':' { goto invalid } if i, err = strconv.ParseInt(str[0:2], 10, 64); err != nil { return } if i < 0 || i > 59 { goto invalid } d += i * 60 if i, err = strconv.ParseInt(str[3:5], 10, 64); err != nil { return } if i < 0 || i > 59 { goto invalid } d += i d *= 1e9 if len(str) == 15 { if str[5] != '.' { goto invalid } if i, err = strconv.ParseInt(str[6:15], 10, 64); err != nil { return } d += i } dur = time.Duration(d * sign) return invalid: err = errors.New("invalid MySQL TIME string: " + orig) return } // Timestamp is ... type Timestamp struct { time.Time } func (t Timestamp) String() string { return TimeString(t.Time) }
database/types.go
0.689933
0.547162
types.go
starcoder
package gofa // Earth Rotation and Sidereal Time /* Ee00 Equation of the equinoxes, IAU 2000 The equation of the equinoxes, compatible with IAU 2000 resolutions, given the nutation in longitude and the mean obliquity. Given: date1,date2 float64 TT as a 2-part Julian Date (Note 1) epsa float64 mean obliquity (Note 2) dpsi float64 nutation in longitude (Note 3) Returned (function value): float64 equation of the equinoxes (Note 4) Notes: 1) The TT date date1+date2 is a Julian Date, apportioned in any convenient way between the two arguments. For example, JD(TT)=2450123.7 could be expressed in any of these ways, among others: date1 date2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 method is best matched to the way the argument is handled internally and will deliver the optimum resolution. The MJD method and the date & time methods are both good compromises between resolution and convenience. 2) The obliquity, in radians, is mean of date. 3) The result, which is in radians, operates in the following sense: Greenwich apparent ST = GMST + equation of the equinoxes 4) The result is compatible with the IAU 2000 resolutions. For further details, see IERS Conventions 2003 and Capitaine et al. (2002). Called: Eect00 equation of the equinoxes complementary terms References: <NAME>., <NAME>. and <NAME>., "Expressions to implement the IAU 2000 definition of UT1", Astronomy & Astrophysics, 406, 1135-1149 (2003) <NAME>., <NAME>. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) */ func Ee00(date1, date2 float64, epsa, dpsi float64) float64 { var ee float64 /* Equation of the equinoxes. */ ee = dpsi*cos(epsa) + Eect00(date1, date2) return ee } /* Ee00a Equation of the equinoxes, IAU 2000A Equation of the equinoxes, compatible with IAU 2000 resolutions. Given: date1,date2 float64 TT as a 2-part Julian Date (Note 1) Returned (function value): float64 equation of the equinoxes (Note 2) Notes: 1) The TT date date1+date2 is a Julian Date, apportioned in any convenient way between the two arguments. For example, JD(TT)=2450123.7 could be expressed in any of these ways, among others: date1 date2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 method is best matched to the way the argument is handled internally and will deliver the optimum resolution. The MJD method and the date & time methods are both good compromises between resolution and convenience. 2) The result, which is in radians, operates in the following sense: Greenwich apparent ST = GMST + equation of the equinoxes 3) The result is compatible with the IAU 2000 resolutions. For further details, see IERS Conventions 2003 and Capitaine et al. (2002). Called: Pr00 IAU 2000 precession adjustments Obl80 mean obliquity, IAU 1980 Nut00a nutation, IAU 2000A Ee00 equation of the equinoxes, IAU 2000 References: <NAME>., <NAME>. and <NAME>., "Expressions to implement the IAU 2000 definition of UT1", Astronomy & Astrophysics, 406, 1135-1149 (2003). <NAME>., <NAME>. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004). */ func Ee00a(date1, date2 float64) float64 { var dpsipr, depspr, epsa, dpsi, deps, ee float64 /* IAU 2000 precession-rate adjustments. */ Pr00(date1, date2, &dpsipr, &depspr) /* Mean obliquity, consistent with IAU 2000 precession-nutation. */ epsa = Obl80(date1, date2) + depspr /* Nutation in longitude. */ Nut00a(date1, date2, &dpsi, &deps) /* Equation of the equinoxes. */ ee = Ee00(date1, date2, epsa, dpsi) return ee } /* Ee00b Equation of the equinoxes, IAU 2000B Equation of the equinoxes, compatible with IAU 2000 resolutions but using the truncated nutation model IAU 2000B. Given: date1,date2 float64 TT as a 2-part Julian Date (Note 1) Returned (function value): float64 equation of the equinoxes (Note 2) Notes: 1) The TT date date1+date2 is a Julian Date, apportioned in any convenient way between the two arguments. For example, JD(TT)=2450123.7 could be expressed in any of these ways, among others: date1 date2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 method is best matched to the way the argument is handled internally and will deliver the optimum resolution. The MJD method and the date & time methods are both good compromises between resolution and convenience. 2) The result, which is in radians, operates in the following sense: Greenwich apparent ST = GMST + equation of the equinoxes 3) The result is compatible with the IAU 2000 resolutions except that accuracy has been compromised (1 mas) for the sake of speed. For further details, see McCarthy & Luzum (2003), IERS Conventions 2003 and Capitaine et al. (2003). Called: Pr00 IAU 2000 precession adjustments Obl80 mean obliquity, IAU 1980 Nut00b nutation, IAU 2000B Ee00 equation of the equinoxes, IAU 2000 References: <NAME>., <NAME>. and <NAME>., "Expressions to implement the IAU 2000 definition of UT1", Astronomy & Astrophysics, 406, 1135-1149 (2003) <NAME>. & <NAME>., "An abridged model of the precession-nutation of the celestial pole", Celestial Mechanics & Dynamical Astronomy, 85, 37-49 (2003) <NAME>., <NAME>. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) */ func Ee00b(date1, date2 float64) float64 { var dpsipr, depspr, epsa, dpsi, deps, ee float64 /* IAU 2000 precession-rate adjustments. */ Pr00(date1, date2, &dpsipr, &depspr) /* Mean obliquity, consistent with IAU 2000 precession-nutation. */ epsa = Obl80(date1, date2) + depspr /* Nutation in longitude. */ Nut00b(date1, date2, &dpsi, &deps) /* Equation of the equinoxes. */ ee = Ee00(date1, date2, epsa, dpsi) return ee } /* Ee06a Equation of the equinoxes, IAU 2006/2000A Equation of the equinoxes, compatible with IAU 2000 resolutions and IAU 2006/2000A precession-nutation. Given: date1,date2 float64 TT as a 2-part Julian Date (Note 1) Returned (function value): float64 equation of the equinoxes (Note 2) Notes: 1) The TT date date1+date2 is a Julian Date, apportioned in any convenient way between the two arguments. For example, JD(TT)=2450123.7 could be expressed in any of these ways, among others: date1 date2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 method is best matched to the way the argument is handled internally and will deliver the optimum resolution. The MJD method and the date & time methods are both good compromises between resolution and convenience. 2) The result, which is in radians, operates in the following sense: Greenwich apparent ST = GMST + equation of the equinoxes Called: Anpm normalize angle into range +/- pi Gst06a Greenwich apparent sidereal time, IAU 2006/2000A Gmst06 Greenwich mean sidereal time, IAU 2006 Reference: <NAME>., <NAME>. (eds.), 2004, IERS Conventions (2003), IERS Technical Note No. 32, BKG */ func Ee06a(date1, date2 float64) float64 { var gst06a, gmst06, ee float64 /* Apparent and mean sidereal times. */ gst06a = Gst06a(0.0, 0.0, date1, date2) gmst06 = Gmst06(0.0, 0.0, date1, date2) /* Equation of the equinoxes. */ ee = Anpm(gst06a - gmst06) return ee } /* Eect00 Equation of the equinoxes complementary terms, consistent with IAU 2000 resolutions. Given: date1,date2 float64 TT as a 2-part Julian Date (Note 1) Returned (function value): float64 complementary terms (Note 2) Notes: 1) The TT date date1+date2 is a Julian Date, apportioned in any convenient way between the two arguments. For example, JD(TT)=2450123.7 could be expressed in any of these ways, among others: date1 date2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 method is best matched to the way the argument is handled internally and will deliver the optimum resolution. The MJD method and the date & time methods are both good compromises between resolution and convenience. 2) The "complementary terms" are part of the equation of the equinoxes (EE), classically the difference between apparent and mean Sidereal Time: GAST = GMST + EE with: EE = dpsi * cos(eps) where dpsi is the nutation in longitude and eps is the obliquity of date. However, if the rotation of the Earth were constant in an inertial frame the classical formulation would lead to apparent irregularities in the UT1 timescale traceable to side- effects of precession-nutation. In order to eliminate these effects from UT1, "complementary terms" were introduced in 1994 (IAU, 1994) and took effect from 1997 (Capitaine and Gontier, 1993): GAST = GMST + CT + EE By convention, the complementary terms are included as part of the equation of the equinoxes rather than as part of the mean Sidereal Time. This slightly compromises the "geometrical" interpretation of mean sidereal time but is otherwise inconsequential. The present function computes CT in the above expression, compatible with IAU 2000 resolutions (Capitaine et al., 2002, and IERS Conventions 2003). Called: Fal03 mean anomaly of the Moon Falp03 mean anomaly of the Sun Faf03 mean argument of the latitude of the Moon Fad03 mean elongation of the Moon from the Sun Faom03 mean longitude of the Moon's ascending node Fave03 mean longitude of Venus Fae03 mean longitude of Earth Fapa03 general accumulated precession in longitude References: <NAME>. & <NAME>., Astron.Astrophys., 275, 645-650 (1993) <NAME>., <NAME>. and <NAME>., "Expressions to implement the IAU 2000 definition of UT1", Astron.Astrophys., 406, 1135-1149 (2003) IAU Resolution C7, Recommendation 3 (1994) <NAME>., <NAME>. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) */ func Eect00(date1, date2 float64) float64 { /* Time since J2000.0, in Julian centuries */ var t float64 /* Miscellaneous */ var i, j int var a, s0, s1 float64 /* Fundamental arguments */ var fa [14]float64 /* Returned value. */ var eect float64 /* ----------------------------------------- */ /* The series for the EE complementary terms */ /* ----------------------------------------- */ type TERM struct { nfa [8]int /* coefficients of l,l',F,D,Om,LVe,LE,pA */ s, c float64 /* sine and cosine coefficients */ } /* Terms of order t^0 */ e0 := []TERM{ /* 1-10 */ {[8]int{0, 0, 0, 0, 1, 0, 0, 0}, 2640.96e-6, -0.39e-6}, {[8]int{0, 0, 0, 0, 2, 0, 0, 0}, 63.52e-6, -0.02e-6}, {[8]int{0, 0, 2, -2, 3, 0, 0, 0}, 11.75e-6, 0.01e-6}, {[8]int{0, 0, 2, -2, 1, 0, 0, 0}, 11.21e-6, 0.01e-6}, {[8]int{0, 0, 2, -2, 2, 0, 0, 0}, -4.55e-6, 0.00e-6}, {[8]int{0, 0, 2, 0, 3, 0, 0, 0}, 2.02e-6, 0.00e-6}, {[8]int{0, 0, 2, 0, 1, 0, 0, 0}, 1.98e-6, 0.00e-6}, {[8]int{0, 0, 0, 0, 3, 0, 0, 0}, -1.72e-6, 0.00e-6}, {[8]int{0, 1, 0, 0, 1, 0, 0, 0}, -1.41e-6, -0.01e-6}, {[8]int{0, 1, 0, 0, -1, 0, 0, 0}, -1.26e-6, -0.01e-6}, /* 11-20 */ {[8]int{1, 0, 0, 0, -1, 0, 0, 0}, -0.63e-6, 0.00e-6}, {[8]int{1, 0, 0, 0, 1, 0, 0, 0}, -0.63e-6, 0.00e-6}, {[8]int{0, 1, 2, -2, 3, 0, 0, 0}, 0.46e-6, 0.00e-6}, {[8]int{0, 1, 2, -2, 1, 0, 0, 0}, 0.45e-6, 0.00e-6}, {[8]int{0, 0, 4, -4, 4, 0, 0, 0}, 0.36e-6, 0.00e-6}, {[8]int{0, 0, 1, -1, 1, -8, 12, 0}, -0.24e-6, -0.12e-6}, {[8]int{0, 0, 2, 0, 0, 0, 0, 0}, 0.32e-6, 0.00e-6}, {[8]int{0, 0, 2, 0, 2, 0, 0, 0}, 0.28e-6, 0.00e-6}, {[8]int{1, 0, 2, 0, 3, 0, 0, 0}, 0.27e-6, 0.00e-6}, {[8]int{1, 0, 2, 0, 1, 0, 0, 0}, 0.26e-6, 0.00e-6}, /* 21-30 */ {[8]int{0, 0, 2, -2, 0, 0, 0, 0}, -0.21e-6, 0.00e-6}, {[8]int{0, 1, -2, 2, -3, 0, 0, 0}, 0.19e-6, 0.00e-6}, {[8]int{0, 1, -2, 2, -1, 0, 0, 0}, 0.18e-6, 0.00e-6}, {[8]int{0, 0, 0, 0, 0, 8, -13, -1}, -0.10e-6, 0.05e-6}, {[8]int{0, 0, 0, 2, 0, 0, 0, 0}, 0.15e-6, 0.00e-6}, {[8]int{2, 0, -2, 0, -1, 0, 0, 0}, -0.14e-6, 0.00e-6}, {[8]int{1, 0, 0, -2, 1, 0, 0, 0}, 0.14e-6, 0.00e-6}, {[8]int{0, 1, 2, -2, 2, 0, 0, 0}, -0.14e-6, 0.00e-6}, {[8]int{1, 0, 0, -2, -1, 0, 0, 0}, 0.14e-6, 0.00e-6}, {[8]int{0, 0, 4, -2, 4, 0, 0, 0}, 0.13e-6, 0.00e-6}, /* 31-33 */ {[8]int{0, 0, 2, -2, 4, 0, 0, 0}, -0.11e-6, 0.00e-6}, {[8]int{1, 0, -2, 0, -3, 0, 0, 0}, 0.11e-6, 0.00e-6}, {[8]int{1, 0, -2, 0, -1, 0, 0, 0}, 0.11e-6, 0.00e-6}, } /* Terms of order t^1 */ e1 := []TERM{ {[8]int{0, 0, 0, 0, 1, 0, 0, 0}, -0.87e-6, 0.00e-6}, } /* Number of terms in the series */ NE0 := len(e0) NE1 := len(e1) /* ------------------------------------------------------------------ */ /* Interval between fundamental epoch J2000.0 and current date (JC). */ t = ((date1 - DJ00) + date2) / DJC /* Fundamental Arguments (from IERS Conventions 2003) */ /* Mean anomaly of the Moon. */ fa[0] = Fal03(t) /* Mean anomaly of the Sun. */ fa[1] = Falp03(t) /* Mean longitude of the Moon minus that of the ascending node. */ fa[2] = Faf03(t) /* Mean elongation of the Moon from the Sun. */ fa[3] = Fad03(t) /* Mean longitude of the ascending node of the Moon. */ fa[4] = Faom03(t) /* Mean longitude of Venus. */ fa[5] = Fave03(t) /* Mean longitude of Earth. */ fa[6] = Fae03(t) /* General precession in longitude. */ fa[7] = Fapa03(t) /* Evaluate the EE complementary terms. */ s0 = 0.0 s1 = 0.0 for i = NE0 - 1; i >= 0; i-- { a = 0.0 for j = 0; j < 8; j++ { a += float64(e0[i].nfa[j]) * fa[j] } s0 += e0[i].s*sin(a) + e0[i].c*cos(a) } for i = NE1 - 1; i >= 0; i-- { a = 0.0 for j = 0; j < 8; j++ { a += float64(e1[i].nfa[j]) * fa[j] } s1 += e1[i].s*sin(a) + e1[i].c*cos(a) } eect = (s0 + s1*t) * DAS2R return eect } /* Eqeq94 Equation of the equinoxes, IAU 1994 Equation of the equinoxes, IAU 1994 model. Given: date1,date2 float64 TDB date (Note 1) Returned (function value): float64 equation of the equinoxes (Note 2) Notes: 1) The date date1+date2 is a Julian Date, apportioned in any convenient way between the two arguments. For example, JD(TT)=2450123.7 could be expressed in any of these ways, among others: date1 date2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 method is best matched to the way the argument is handled internally and will deliver the optimum resolution. The MJD method and the date & time methods are both good compromises between resolution and convenience. 2) The result, which is in radians, operates in the following sense: Greenwich apparent ST = GMST + equation of the equinoxes Called: Anpm normalize angle into range +/- pi Nut80 nutation, IAU 1980 Obl80 mean obliquity, IAU 1980 References: IAU Resolution C7, Recommendation 3 (1994). Capitaine, N. & Gontier, A.-M., 1993, Astron.Astrophys., 275, 645-650. */ func Eqeq94(date1, date2 float64) float64 { var t, om, dpsi, deps, eps0, ee float64 /* Interval between fundamental epoch J2000.0 and given date (JC). */ t = ((date1 - DJ00) + date2) / DJC /* Longitude of the mean ascending node of the lunar orbit on the */ /* ecliptic, measured from the mean equinox of date. */ om = Anpm((450160.280+(-482890.539+ (7.455+0.008*t)*t)*t)*DAS2R + fmod(-5.0*t, 1.0)*D2PI) /* Nutation components and mean obliquity. */ Nut80(date1, date2, &dpsi, &deps) eps0 = Obl80(date1, date2) /* Equation of the equinoxes. */ ee = dpsi*cos(eps0) + DAS2R*(0.00264*sin(om)+0.000063*sin(om+om)) return ee } /* Era00 Earth Rotation Angle, IAU 2000 Given: dj1,dj2 float64 UT1 as a 2-part Julian Date (see note) Returned (function value): float64 Earth rotation angle (radians), range 0-2pi Notes: 1) The UT1 date dj1+dj2 is a Julian Date, apportioned in any convenient way between the arguments dj1 and dj2. For example, JD(UT1)=2450123.7 could be expressed in any of these ways, among others: dj1 dj2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 and MJD methods are good compromises between resolution and convenience. The date & time method is best matched to the algorithm used: maximum precision is delivered when the dj1 argument is for 0hrs UT1 on the day in question and the dj2 argument lies in the range 0 to 1, or vice versa. 2) The algorithm is adapted from Expression 22 of Capitaine et al. 2000. The time argument has been expressed in days directly, and, to retain precision, integer contributions have been eliminated. The same formulation is given in IERS Conventions (2003), Chap. 5, Eq. 14. Called: Anp normalize angle into range 0 to 2pi References: <NAME>., <NAME>. and <NAME>, 2000, Astron. Astrophys., 355, 398-405. <NAME>., <NAME>. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) */ func Era00(dj1, dj2 float64) float64 { var d1, d2, t, f, theta float64 /* Days since fundamental epoch. */ if dj1 < dj2 { d1 = dj1 d2 = dj2 } else { d1 = dj2 d2 = dj1 } t = d1 + (d2 - DJ00) /* Fractional part of T (days). */ f = fmod(d1, 1.0) + fmod(d2, 1.0) /* Earth rotation angle at this UT1. */ theta = Anp(D2PI * (f + 0.7790572732640 + 0.00273781191135448*t)) return theta } /* Gmst00 Greenwich Mean Sidereal Time, IAU 2000 Greenwich mean sidereal time (model consistent with IAU 2000 resolutions). Given: uta,utb float64 UT1 as a 2-part Julian Date (Notes 1,2) tta,ttb float64 TT as a 2-part Julian Date (Notes 1,2) Returned (function value): float64 Greenwich mean sidereal time (radians) Notes: 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both Julian Dates, apportioned in any convenient way between the argument pairs. For example, JD(UT1)=2450123.7 could be expressed in any of these ways, among others: Part A Part B 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable (in the case of UT; the TT is not at all critical in this respect). The J2000 and MJD methods are good compromises between resolution and convenience. For UT, the date & time method is best matched to the algorithm that is used by the Earth Rotation Angle function, called internally: maximum precision is delivered when the uta argument is for 0hrs UT1 on the day in question and the utb argument lies in the range 0 to 1, or vice versa. 2) Both UT1 and TT are required, UT1 to predict the Earth rotation and TT to predict the effects of precession. If UT1 is used for both purposes, errors of order 100 microarcseconds result. 3) This GMST is compatible with the IAU 2000 resolutions and must be used only in conjunction with other IAU 2000 compatible components such as precession-nutation and equation of the equinoxes. 4) The result is returned in the range 0 to 2pi. 5) The algorithm is from Capitaine et al. (2003) and IERS Conventions 2003. Called: Era00 Earth rotation angle, IAU 2000 Anp normalize angle into range 0 to 2pi References: <NAME>., <NAME>. and <NAME>., "Expressions to implement the IAU 2000 definition of UT1", Astronomy & Astrophysics, 406, 1135-1149 (2003) <NAME>., <NAME>. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) */ func Gmst00(uta, utb float64, tta, ttb float64) float64 { var t, gmst float64 /* TT Julian centuries since J2000.0. */ t = ((tta - DJ00) + ttb) / DJC /* Greenwich Mean Sidereal Time, IAU 2000. */ gmst = Anp(Era00(uta, utb) + (0.014506+ (4612.15739966+ (1.39667721+ (-0.00009344+ (0.00001882)*t)*t)*t)*t)*DAS2R) return gmst } /* Gmst06 Greenwich Mean Sidereal Time, IAU 2006 Greenwich mean sidereal time (consistent with IAU 2006 precession). Given: uta,utb float64 UT1 as a 2-part Julian Date (Notes 1,2) tta,ttb float64 TT as a 2-part Julian Date (Notes 1,2) Returned (function value): float64 Greenwich mean sidereal time (radians) Notes: 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both Julian Dates, apportioned in any convenient way between the argument pairs. For example, JD=2450123.7 could be expressed in any of these ways, among others: Part A Part B 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable (in the case of UT; the TT is not at all critical in this respect). The J2000 and MJD methods are good compromises between resolution and convenience. For UT, the date & time method is best matched to the algorithm that is used by the Earth rotation angle function, called internally: maximum precision is delivered when the uta argument is for 0hrs UT1 on the day in question and the utb argument lies in the range 0 to 1, or vice versa. 2) Both UT1 and TT are required, UT1 to predict the Earth rotation and TT to predict the effects of precession. If UT1 is used for both purposes, errors of order 100 microarcseconds result. 3) This GMST is compatible with the IAU 2006 precession and must not be used with other precession models. 4) The result is returned in the range 0 to 2pi. Called: Era00 Earth rotation angle, IAU 2000 Anp normalize angle into range 0 to 2pi Reference: <NAME>., <NAME>. & <NAME>., 2005, Astron.Astrophys. 432, 355 */ func Gmst06(uta, utb float64, tta, ttb float64) float64 { var t, gmst float64 /* TT Julian centuries since J2000.0. */ t = ((tta - DJ00) + ttb) / DJC /* Greenwich mean sidereal time, IAU 2006. */ gmst = Anp(Era00(uta, utb) + (0.014506+ (4612.156534+ (1.3915817+ (-0.00000044+ (-0.000029956+ (-0.0000000368)*t)*t)*t)*t)*t)*DAS2R) return gmst } /* Gmst82 Greenwich Mean Sidereal Time, IAU 1982 Universal Time to Greenwich mean sidereal time (IAU 1982 model). Given: dj1,dj2 float64 UT1 Julian Date (see note) Returned (function value): float64 Greenwich mean sidereal time (radians) Notes: 1) The UT1 date dj1+dj2 is a Julian Date, apportioned in any convenient way between the arguments dj1 and dj2. For example, JD(UT1)=2450123.7 could be expressed in any of these ways, among others: dj1 dj2 2450123.7 0 (JD method) 2451545 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 and MJD methods are good compromises between resolution and convenience. The date & time method is best matched to the algorithm used: maximum accuracy (or, at least, minimum noise) is delivered when the dj1 argument is for 0hrs UT1 on the day in question and the dj2 argument lies in the range 0 to 1, or vice versa. 2) The algorithm is based on the IAU 1982 expression. This is always described as giving the GMST at 0 hours UT1. In fact, it gives the difference between the GMST and the UT, the steady 4-minutes-per-day drawing-ahead of ST with respect to UT. When whole days are ignored, the expression happens to equal the GMST at 0 hours UT1 each day. 3) In this function, the entire UT1 (the sum of the two arguments dj1 and dj2) is used directly as the argument for the standard formula, the constant term of which is adjusted by 12 hours to take account of the noon phasing of Julian Date. The UT1 is then added, but omitting whole days to conserve accuracy. Called: Anp normalize angle into range 0 to 2pi References: Transactions of the International Astronomical Union, XVIII B, 67 (1983). Aoki et al., Astron.Astrophys., 105, 359-361 (1982). */ func Gmst82(dj1, dj2 float64) float64 { /* Coefficients of IAU 1982 GMST-UT1 model */ A := 24110.54841 - DAYSEC/2.0 B := 8640184.812866 C := 0.093104 D := -6.2e-6 /* The first constant, A, has to be adjusted by 12 hours because the */ /* UT1 is supplied as a Julian date, which begins at noon. */ var d1, d2, t, f, gmst float64 /* Julian centuries since fundamental epoch. */ if dj1 < dj2 { d1 = dj1 d2 = dj2 } else { d1 = dj2 d2 = dj1 } t = (d1 + (d2 - DJ00)) / DJC /* Fractional part of JD(UT1), in seconds. */ f = DAYSEC * (fmod(d1, 1.0) + fmod(d2, 1.0)) /* GMST at this UT1. */ gmst = Anp(DS2R * ((A + (B+(C+D*t)*t)*t) + f)) return gmst } /* Gst00a Greenwich Apparent Sidereal Time, IAU 2000A Greenwich apparent sidereal time (consistent with IAU 2000 resolutions). Given: uta,utb float64 UT1 as a 2-part Julian Date (Notes 1,2) tta,ttb float64 TT as a 2-part Julian Date (Notes 1,2) Returned (function value): float64 Greenwich apparent sidereal time (radians) Notes: 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both Julian Dates, apportioned in any convenient way between the argument pairs. For example, JD(UT1)=2450123.7 could be expressed in any of these ways, among others: uta utb 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable (in the case of UT; the TT is not at all critical in this respect). The J2000 and MJD methods are good compromises between resolution and convenience. For UT, the date & time method is best matched to the algorithm that is used by the Earth Rotation Angle function, called internally: maximum precision is delivered when the uta argument is for 0hrs UT1 on the day in question and the utb argument lies in the range 0 to 1, or vice versa. 2) Both UT1 and TT are required, UT1 to predict the Earth rotation and TT to predict the effects of precession-nutation. If UT1 is used for both purposes, errors of order 100 microarcseconds result. 3) This GAST is compatible with the IAU 2000 resolutions and must be used only in conjunction with other IAU 2000 compatible components such as precession-nutation. 4) The result is returned in the range 0 to 2pi. 5) The algorithm is from Capitaine et al. (2003) and IERS Conventions 2003. Called: Gmst00 Greenwich mean sidereal time, IAU 2000 Ee00a equation of the equinoxes, IAU 2000A Anp normalize angle into range 0 to 2pi References: <NAME>., <NAME>. and <NAME>., "Expressions to implement the IAU 2000 definition of UT1", Astronomy & Astrophysics, 406, 1135-1149 (2003) <NAME>., <NAME>. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) */ func Gst00a(uta, utb float64, tta, ttb float64) float64 { var gmst00, ee00a, gst float64 gmst00 = Gmst00(uta, utb, tta, ttb) ee00a = Ee00a(tta, ttb) gst = Anp(gmst00 + ee00a) return gst } /* Gst00b Greenwich Apparent Sidereal Time, IAU 2000B Greenwich apparent sidereal time (consistent with IAU 2000 resolutions but using the truncated nutation model IAU 2000B). Given: uta,utb float64 UT1 as a 2-part Julian Date (Notes 1,2) Returned (function value): float64 Greenwich apparent sidereal time (radians) Notes: 1) The UT1 date uta+utb is a Julian Date, apportioned in any convenient way between the argument pair. For example, JD(UT1)=2450123.7 could be expressed in any of these ways, among others: uta utb 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 and MJD methods are good compromises between resolution and convenience. For UT, the date & time method is best matched to the algorithm that is used by the Earth Rotation Angle function, called internally: maximum precision is delivered when the uta argument is for 0hrs UT1 on the day in question and the utb argument lies in the range 0 to 1, or vice versa. 2) The result is compatible with the IAU 2000 resolutions, except that accuracy has been compromised for the sake of speed and convenience in two respects: . UT is used instead of TDB (or TT) to compute the precession component of GMST and the equation of the equinoxes. This results in errors of order 0.1 mas at present. . The IAU 2000B abridged nutation model (McCarthy & Luzum, 2003) is used, introducing errors of up to 1 mas. 3) This GAST is compatible with the IAU 2000 resolutions and must be used only in conjunction with other IAU 2000 compatible components such as precession-nutation. 4) The result is returned in the range 0 to 2pi. 5) The algorithm is from Capitaine et al. (2003) and IERS Conventions 2003. Called: Gmst00 Greenwich mean sidereal time, IAU 2000 Ee00b equation of the equinoxes, IAU 2000B Anp normalize angle into range 0 to 2pi References: <NAME>., <NAME>. and <NAME>., "Expressions to implement the IAU 2000 definition of UT1", Astronomy & Astrophysics, 406, 1135-1149 (2003) <NAME>. & <NAME>., "An abridged model of the precession-nutation of the celestial pole", Celestial Mechanics & Dynamical Astronomy, 85, 37-49 (2003) <NAME>., <NAME>. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) */ func Gst00b(uta, utb float64) float64 { var gmst00, ee00b, gst float64 gmst00 = Gmst00(uta, utb, uta, utb) ee00b = Ee00b(uta, utb) gst = Anp(gmst00 + ee00b) return gst } /* Gst06 Greenwich Apparent Sidereal Time, IAU 2006 given NPB matrix Given: uta,utb float64 UT1 as a 2-part Julian Date (Notes 1,2) tta,ttb float64 TT as a 2-part Julian Date (Notes 1,2) rnpb [3][3]float64 nutation x precession x bias matrix Returned (function value): float64 Greenwich apparent sidereal time (radians) Notes: 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both Julian Dates, apportioned in any convenient way between the argument pairs. For example, JD(UT1)=2450123.7 could be expressed in any of these ways, among others: uta utb 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable (in the case of UT; the TT is not at all critical in this respect). The J2000 and MJD methods are good compromises between resolution and convenience. For UT, the date & time method is best matched to the algorithm that is used by the Earth rotation angle function, called internally: maximum precision is delivered when the uta argument is for 0hrs UT1 on the day in question and the utb argument lies in the range 0 to 1, or vice versa. 2) Both UT1 and TT are required, UT1 to predict the Earth rotation and TT to predict the effects of precession-nutation. If UT1 is used for both purposes, errors of order 100 microarcseconds result. 3) Although the function uses the IAU 2006 series for s+XY/2, it is otherwise independent of the precession-nutation model and can in practice be used with any equinox-based NPB matrix. 4) The result is returned in the range 0 to 2pi. Called: Bpn2xy extract CIP X,Y coordinates from NPB matrix S06 the CIO locator s, given X,Y, IAU 2006 Anp normalize angle into range 0 to 2pi Era00 Earth rotation angle, IAU 2000 Eors equation of the origins, given NPB matrix and s Reference: Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 */ func Gst06(uta, utb float64, tta, ttb float64, rnpb [3][3]float64) float64 { var x, y, s, era, eors, gst float64 /* Extract CIP coordinates. */ Bpn2xy(rnpb, &x, &y) /* The CIO locator, s. */ s = S06(tta, ttb, x, y) /* Greenwich apparent sidereal time. */ era = Era00(uta, utb) eors = Eors(rnpb, s) gst = Anp(era - eors) return gst } /* Gst06a Greenwich Apparent Sidereal Time, IAU 2006/2000A Greenwich apparent sidereal time (consistent with IAU 2000 and 2006 resolutions). Given: uta,utb float64 UT1 as a 2-part Julian Date (Notes 1,2) tta,ttb float64 TT as a 2-part Julian Date (Notes 1,2) Returned (function value): float64 Greenwich apparent sidereal time (radians) Notes: 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both Julian Dates, apportioned in any convenient way between the argument pairs. For example, JD(UT1)=2450123.7 could be expressed in any of these ways, among others: uta utb 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable (in the case of UT; the TT is not at all critical in this respect). The J2000 and MJD methods are good compromises between resolution and convenience. For UT, the date & time method is best matched to the algorithm that is used by the Earth rotation angle function, called internally: maximum precision is delivered when the uta argument is for 0hrs UT1 on the day in question and the utb argument lies in the range 0 to 1, or vice versa. 2) Both UT1 and TT are required, UT1 to predict the Earth rotation and TT to predict the effects of precession-nutation. If UT1 is used for both purposes, errors of order 100 microarcseconds result. 3) This GAST is compatible with the IAU 2000/2006 resolutions and must be used only in conjunction with IAU 2006 precession and IAU 2000A nutation. 4) The result is returned in the range 0 to 2pi. Called: Pnm06a classical NPB matrix, IAU 2006/2000A Gst06 Greenwich apparent ST, IAU 2006, given NPB matrix Reference: <NAME>. & <NAME>., 2006, Astron.Astrophys. 459, 981 */ func Gst06a(uta, utb float64, tta, ttb float64) float64 { var rnpb [3][3]float64 var gst float64 /* Classical nutation x precession x bias matrix, IAU 2000A. */ Pnm06a(tta, ttb, &rnpb) /* Greenwich apparent sidereal time. */ gst = Gst06(uta, utb, tta, ttb, rnpb) return gst } /* Gst94 Greenwich Apparent Sidereal Time, IAU 1994 Greenwich apparent sidereal time (consistent with IAU 1982/94 resolutions). Given: uta,utb float64 UT1 as a 2-part Julian Date (Notes 1,2) Returned (function value): float64 Greenwich apparent sidereal time (radians) Notes: 1) The UT1 date uta+utb is a Julian Date, apportioned in any convenient way between the argument pair. For example, JD(UT1)=2450123.7 could be expressed in any of these ways, among others: uta utb 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 and MJD methods are good compromises between resolution and convenience. For UT, the date & time method is best matched to the algorithm that is used by the Earth Rotation Angle function, called internally: maximum precision is delivered when the uta argument is for 0hrs UT1 on the day in question and the utb argument lies in the range 0 to 1, or vice versa. 2) The result is compatible with the IAU 1982 and 1994 resolutions, except that accuracy has been compromised for the sake of convenience in that UT is used instead of TDB (or TT) to compute the equation of the equinoxes. 3) This GAST must be used only in conjunction with contemporaneous IAU standards such as 1976 precession, 1980 obliquity and 1982 nutation. It is not compatible with the IAU 2000 resolutions. 4) The result is returned in the range 0 to 2pi. Called: Gmst82 Greenwich mean sidereal time, IAU 1982 Eqeq94 equation of the equinoxes, IAU 1994 Anp normalize angle into range 0 to 2pi References: Explanatory Supplement to the Astronomical Almanac, <NAME> (ed), University Science Books (1992) IAU Resolution C7, Recommendation 3 (1994) */ func Gst94(uta, utb float64) float64 { var gmst82, eqeq94, gst float64 gmst82 = Gmst82(uta, utb) eqeq94 = Eqeq94(uta, utb) gst = Anp(gmst82 + eqeq94) return gst }
erast.go
0.908696
0.788217
erast.go
starcoder
package tuple type Tuple2[T1 any, T2 any] struct { v1 T1 v2 T2 } func N2[T1 any, T2 any](v1 T1, v2 T2) Tuple2[T1, T2] { return Tuple2[T1, T2]{ v1: v1, v2: v2, } } func (t Tuple2[T1, T2]) T1() T1 { return t.v1 } func (t Tuple2[T1, T2]) T2() T2 { return t.v2 } type Tuple3[T1 any, T2 any, T3 any] struct { v1 T1 v2 T2 v3 T3 } func N3[T1 any, T2 any, T3 any](v1 T1, v2 T2, v3 T3) Tuple3[T1, T2, T3] { return Tuple3[T1, T2, T3]{ v1: v1, v2: v2, v3: v3, } } func (t Tuple3[T1, T2, T3]) T1() T1 { return t.v1 } func (t Tuple3[T1, T2, T3]) T2() T2 { return t.v2 } func (t Tuple3[T1, T2, T3]) T3() T3 { return t.v3 } type Tuple4[T1 any, T2 any, T3 any, T4 any] struct { v1 T1 v2 T2 v3 T3 v4 T4 } func N4[T1 any, T2 any, T3 any, T4 any](v1 T1, v2 T2, v3 T3, v4 T4) Tuple4[T1, T2, T3, T4] { return Tuple4[T1, T2, T3, T4]{ v1: v1, v2: v2, v3: v3, v4: v4, } } func (t Tuple4[T1, T2, T3, T4]) T1() T1 { return t.v1 } func (t Tuple4[T1, T2, T3, T4]) T2() T2 { return t.v2 } func (t Tuple4[T1, T2, T3, T4]) T3() T3 { return t.v3 } func (t Tuple4[T1, T2, T3, T4]) T4() T4 { return t.v4 } type Tuple5[T1 any, T2 any, T3 any, T4 any, T5 any] struct { v1 T1 v2 T2 v3 T3 v4 T4 v5 T5 } func N5[T1 any, T2 any, T3 any, T4 any, T5 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5) Tuple5[T1, T2, T3, T4, T5] { return Tuple5[T1, T2, T3, T4, T5]{ v1: v1, v2: v2, v3: v3, v4: v4, v5: v5, } } func (t Tuple5[T1, T2, T3, T4, T5]) T1() T1 { return t.v1 } func (t Tuple5[T1, T2, T3, T4, T5]) T2() T2 { return t.v2 } func (t Tuple5[T1, T2, T3, T4, T5]) T3() T3 { return t.v3 } func (t Tuple5[T1, T2, T3, T4, T5]) T4() T4 { return t.v4 } func (t Tuple5[T1, T2, T3, T4, T5]) T5() T5 { return t.v5 } type Tuple6[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any] struct { v1 T1 v2 T2 v3 T3 v4 T4 v5 T5 v6 T6 } func N6[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6) Tuple6[T1, T2, T3, T4, T5, T6] { return Tuple6[T1, T2, T3, T4, T5, T6]{ v1: v1, v2: v2, v3: v3, v4: v4, v5: v5, v6: v6, } } func (t Tuple6[T1, T2, T3, T4, T5, T6]) T1() T1 { return t.v1 } func (t Tuple6[T1, T2, T3, T4, T5, T6]) T2() T2 { return t.v2 } func (t Tuple6[T1, T2, T3, T4, T5, T6]) T3() T3 { return t.v3 } func (t Tuple6[T1, T2, T3, T4, T5, T6]) T4() T4 { return t.v4 } func (t Tuple6[T1, T2, T3, T4, T5, T6]) T5() T5 { return t.v5 } func (t Tuple6[T1, T2, T3, T4, T5, T6]) T6() T6 { return t.v6 } type Tuple7[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any] struct { v1 T1 v2 T2 v3 T3 v4 T4 v5 T5 v6 T6 v7 T7 } func N7[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7) Tuple7[T1, T2, T3, T4, T5, T6, T7] { return Tuple7[T1, T2, T3, T4, T5, T6, T7]{ v1: v1, v2: v2, v3: v3, v4: v4, v5: v5, v6: v6, v7: v7, } } func (t Tuple7[T1, T2, T3, T4, T5, T6, T7]) T1() T1 { return t.v1 } func (t Tuple7[T1, T2, T3, T4, T5, T6, T7]) T2() T2 { return t.v2 } func (t Tuple7[T1, T2, T3, T4, T5, T6, T7]) T3() T3 { return t.v3 } func (t Tuple7[T1, T2, T3, T4, T5, T6, T7]) T4() T4 { return t.v4 } func (t Tuple7[T1, T2, T3, T4, T5, T6, T7]) T5() T5 { return t.v5 } func (t Tuple7[T1, T2, T3, T4, T5, T6, T7]) T6() T6 { return t.v6 } func (t Tuple7[T1, T2, T3, T4, T5, T6, T7]) T7() T7 { return t.v7 } type Tuple8[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any] struct { v1 T1 v2 T2 v3 T3 v4 T4 v5 T5 v6 T6 v7 T7 v8 T8 } func N8[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8) Tuple8[T1, T2, T3, T4, T5, T6, T7, T8] { return Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]{ v1: v1, v2: v2, v3: v3, v4: v4, v5: v5, v6: v6, v7: v7, v8: v8, } } func (t Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]) T1() T1 { return t.v1 } func (t Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]) T2() T2 { return t.v2 } func (t Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]) T3() T3 { return t.v3 } func (t Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]) T4() T4 { return t.v4 } func (t Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]) T5() T5 { return t.v5 } func (t Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]) T6() T6 { return t.v6 } func (t Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]) T7() T7 { return t.v7 } func (t Tuple8[T1, T2, T3, T4, T5, T6, T7, T8]) T8() T8 { return t.v8 } type Tuple9[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any] struct { v1 T1 v2 T2 v3 T3 v4 T4 v5 T5 v6 T6 v7 T7 v8 T8 v9 T9 } func N9[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9) Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9] { return Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]{ v1: v1, v2: v2, v3: v3, v4: v4, v5: v5, v6: v6, v7: v7, v8: v8, v9: v9, } } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T1() T1 { return t.v1 } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T2() T2 { return t.v2 } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T3() T3 { return t.v3 } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T4() T4 { return t.v4 } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T5() T5 { return t.v5 } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T6() T6 { return t.v6 } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T7() T7 { return t.v7 } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T8() T8 { return t.v8 } func (t Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9]) T9() T9 { return t.v9 } type Tuple10[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any, T10 any] struct { v1 T1 v2 T2 v3 T3 v4 T4 v5 T5 v6 T6 v7 T7 v8 T8 v9 T9 v10 T10 } func N10[T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any, T10 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10) Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10] { return Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]{ v1: v1, v2: v2, v3: v3, v4: v4, v5: v5, v6: v6, v7: v7, v8: v8, v9: v9, v10: v10, } } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T1() T1 { return t.v1 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T2() T2 { return t.v2 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T3() T3 { return t.v3 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T4() T4 { return t.v4 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T5() T5 { return t.v5 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T6() T6 { return t.v6 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T7() T7 { return t.v7 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T8() T8 { return t.v8 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T9() T9 { return t.v9 } func (t Tuple10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]) T10() T10 { return t.v10 }
types/tuple/tuple.go
0.613815
0.517876
tuple.go
starcoder
package aoc2021 import ( "fmt" "strconv" "strings" "github.com/simonski/aoc/utils" ) /* --- Day 4: Giant Squid --- You're already almost 1.5km (almost a mile) below the surface of the ocean, already so deep that you can't see any sunlight. What you can see, however, is a giant squid that has attached itself to the outside of your submarine. Maybe it wants to play bingo? Bingo is played on a set of boards each consisting of a 5x5 grid of numbers. Numbers are chosen at random, and the chosen number is marked on all boards on which it appears. (Numbers may not appear on all boards.) If all numbers in any row or any column of a board are marked, that board wins. (Diagonals don't count.) The submarine has a bingo subsystem to help passengers (currently, you and the giant squid) pass the time. It automatically generates a random order in which to draw numbers and a random set of boards (your puzzle input). For example: 7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1 22 13 17 11 0 8 2 23 4 24 21 9 14 16 7 6 10 3 18 5 1 12 20 15 19 3 15 0 2 22 9 18 13 17 5 19 8 7 25 23 20 11 10 24 4 14 21 16 12 6 14 21 17 24 4 10 16 15 9 19 18 8 23 26 20 22 11 13 6 5 2 0 12 3 7 After the first five numbers are drawn (7, 4, 9, 5, and 11), there are no winners, but the boards are marked as follows (shown here adjacent to each other to save space): 22 13 17 11 0 3 15 0 2 22 14 21 17 24 4 8 2 23 4 24 9 18 13 17 5 10 16 15 9 19 21 9 14 16 7 19 8 7 25 23 18 8 23 26 20 6 10 3 18 5 20 11 10 24 4 22 11 13 6 5 1 12 20 15 19 14 21 16 12 6 2 0 12 3 7 After the next six numbers are drawn (17, 23, 2, 0, 14, and 21), there are still no winners: 22 13 17 11 0 3 15 0 2 22 14 21 17 24 4 8 2 23 4 24 9 18 13 17 5 10 16 15 9 19 21 9 14 16 7 19 8 7 25 23 18 8 23 26 20 6 10 3 18 5 20 11 10 24 4 22 11 13 6 5 1 12 20 15 19 14 21 16 12 6 2 0 12 3 7 Finally, 24 is drawn: 22 13 17 11 0 3 15 0 2 22 14 21 17 24 4 8 2 23 4 24 9 18 13 17 5 10 16 15 9 19 21 9 14 16 7 19 8 7 25 23 18 8 23 26 20 6 10 3 18 5 20 11 10 24 4 22 11 13 6 5 1 12 20 15 19 14 21 16 12 6 2 0 12 3 7 At this point, the third board wins because it has at least one complete row or column of marked numbers (in this case, the entire top row is marked: 14 21 17 24 4). The score of the winning board can now be calculated. Start by finding the sum of all unmarked numbers on that board; in this case, the sum is 188. Then, multiply that sum by the number that was just called when the board won, 24, to get the final score, 188 * 24 = 4512. To guarantee victory against the giant squid, figure out which board will win first. What will your final score be if you choose that board? */ // rename this to the year and day in question func (app *Application) Y2021D04P1() { playGameD1(DAY_2021_04_TEST_DATA) playGameD1(DAY_2021_04_DATA) } type Game struct { numbersToCall []int boards []*Board } func (game *Game) PlayFirst() (*Board, int) { for index := 0; index < len(game.numbersToCall); index++ { numberToCall := game.numbersToCall[index] // fmt.Printf("numbersToCall(%v), numberToCall=%v\n", game.numbersToCall, numberToCall) for _, board := range game.boards { board.Call(numberToCall) if board.IsComplete() { return board, numberToCall } } } return nil, -1 } func (game *Game) PlayLast() (*Board, int) { lastWinningBoard := game.boards[0] lastNumberToCall := -1 for index := 0; index < len(game.numbersToCall); index++ { numberToCall := game.numbersToCall[index] for _, board := range game.boards { if !board.IsComplete() { board.Call(numberToCall) if board.IsComplete() { lastWinningBoard = board lastNumberToCall = numberToCall } } } } return lastWinningBoard, lastNumberToCall } func NewGame(data string) *Game { lines := strings.Split(data, "\n") boards := make([]*Board, 0) numbersToCall := utils.SplitDataToListOfInts(lines[0], ",") // avoid the blank line then read the boards for index := 2; index < len(lines); index += 6 { boardLines := lines[index : index+5] board := NewBoard(boardLines) boards = append(boards, board) } game := Game{boards: boards, numbersToCall: numbersToCall} return &game } type Board struct { values [5][5]int marked [5][5]bool } func (board *Board) Call(number int) bool { for row := 0; row < 5; row++ { for col := 0; col < 5; col++ { if board.values[row][col] == number { board.marked[row][col] = true return true } } } return false } func (board *Board) IsComplete() bool { // check rows for row := 0; row < 5; row++ { result := true for col := 0; col < 5; col++ { if !board.marked[row][col] { result = false break } } if result { return result } } // check cols for col := 0; col < 5; col++ { result := true for row := 0; row < 5; row++ { if !board.marked[row][col] { result = false break } } if result { return result } } return false } func (board *Board) SumOfUnmarked() int { // check rows total := 0 for row := 0; row < 5; row++ { for col := 0; col < 5; col++ { if !board.marked[row][col] { total += board.values[row][col] } } } return total } /* returns a string n n n n n n n n n n n n n n n n n n n n n n n n n */ func (b *Board) Debug() string { result := "" for row := 0; row < 5; row++ { // I dont understand multidimensional array lengths ingo for col := 0; col < 5; col++ { result += fmt.Sprintf("%v ", b.values[row][col]) } result += "\n" } return result } func NewBoard(data []string) *Board { b := Board{values: [5][5]int{{}}, marked: [5][5]bool{{}}} for row, line := range data { line = strings.Trim(line, " ") line = strings.ReplaceAll(line, " ", " ") line = strings.ReplaceAll(line, " ", ",") line = strings.ReplaceAll(line, " ", "") line = strings.ReplaceAll(line, "\n", "") splits := strings.Split(line, ",") for col, number := range splits { value, _ := strconv.Atoi(number) b.values[row][col] = value b.marked[row][col] = false // unnecessary as defaults but whatever } } // fmt.Println() return &b } func playGameD1(data string) { game := NewGame(data) winningBoard, lastNumber := game.PlayFirst() score := winningBoard.SumOfUnmarked() * lastNumber fmt.Printf("The winning store is %v\n", score) } func playGameD2(data string) { game := NewGame(data) winningBoard, lastNumber := game.PlayLast() score := winningBoard.SumOfUnmarked() * lastNumber fmt.Printf("The last winning store is %v\n", score) } // rename this to the year and day in question func (app *Application) Y2021D04P2() { playGameD2(DAY_2021_04_TEST_DATA) playGameD2(DAY_2021_04_DATA) } // rename and uncomment this to the year and day in question once complete for a gold star! // func (app *Application) Y20XXDXXP1Render() { // } // rename and uncomment this to the year and day in question once complete for a gold star! // func (app *Application) Y20XXDXXP2Render() { // } // this is what we will reflect and call - so both parts with run. It's up to you to make it print nicely etc. // The app reference has a CLI for logging. func (app *Application) Y2021D04() { app.Y2021D04P1() app.Y2021D04P2() }
app/aoc2021/aoc2021_04.go
0.531696
0.472562
aoc2021_04.go
starcoder
package token //This file contains legacy code, //With updates this file will keep shrinking until being eliminated //Type rappresent a type of token type Type int //Types definition //They can rapresent the rapresented char but it is not mandatory const ( //TypeUndefined is an undefined token TypeUndefined = iota //TypeBold is an bold starting/ending token TypeBold = '*' //TypeItalic is an italic starting/ending token TypeItalic = '/' //TypeNewLine is a newline TypeNewLine = '\n' //TypeTab is a tab TypeTab = '\t' //TypeHeader is the token used in an header TypeHeader = '+' //TypeLess is the token used for subtitle and for divisor TypeLess = '-' //TypeEqual is the token used for title TypeEqual = '=' TypeSBracketOpen = '[' TypeSBracketClose = ']' TypeEscape = '\\' //TypeText is a text token TypeText = -1 TypePipe = '|' TypeQuote = '"' TypeAt = '@' TypeParagraphHeader TypeParagraphText TypeParagraphDivisor TypeParagraphTitle TypeParagraphSubtitle TypeParagraphList TypeCheckBox ) //WhitespaceEscape is a slice of types that can be escaped from the EscapeToken var WhitespaceEscape = []Type{ TypeBold, TypeItalic, TypeTab, TypeLess, } //Defaults Type Method //Type returns the type of the Token func (t BoldToken) Type() Type { return TypeBold } //Type returns the type of the Token func (t ItalicToken) Type() Type { return TypeItalic } //Type returns the type of the Token func (t NewLineToken) Type() Type { return TypeNewLine } //Type returns the type of the Token func (t TabToken) Type() Type { return TypeTab } //Type returns the type of the Token func (t HeaderToken) Type() Type { return TypeHeader } //Type returns the type of the Token func (t LessToken) Type() Type { return TypeLess } //Type returns the type of the Token func (t EqualToken) Type() Type { return TypeEqual } //Type returns the type of the Token func (t TextToken) Type() Type { return TypeText } //Type returns the type of the Token func (p HeaderParagraph) Type() Type { return TypeParagraphHeader } //Type returns the type of the Token func (p TextParagraph) Type() Type { return TypeParagraphText } //Type returns the type of the Token func (p DivisorParagraph) Type() Type { return TypeParagraphDivisor } //Type returns the type of the Token func (p TitleParagraph) Type() Type { return TypeParagraphTitle } //Type returns the type of the Token func (p SubtitleParagraph) Type() Type { return TypeParagraphSubtitle } //Type returns the type of the Token func (p ListParagraph) Type() Type { return TypeParagraphList } //Type returns the type of the Token func (p SBracketOpenToken) Type() Type { return TypeSBracketOpen } //Type returns the type of the Token func (p SBracketCloseToken) Type() Type { return TypeSBracketClose } //Type returns the type of the Token func (p CheckBoxToken) Type() Type { return TypeCheckBox } //Type returns the type of the Token func (p EscapeToken) Type() Type { return TypeEscape } //Type returns the type of the Token func (p PipeToken) Type() Type { return TypePipe } //Type returns the type of the Token func (p QuoteToken) Type() Type { return TypeQuote } //Type returns the type of the Token func (p AtToken) Type() Type { return TypeAt }
token/type.go
0.628635
0.442998
type.go
starcoder
package maybe import ( "errors" "fmt" "reflect" ) // AoAoX implements the Maybe monad for a 2-D slice of empty interfaces. An AoAoX is // considered 'valid' or 'invalid' depending on whether it contains a 2-D // slice of empty interfaces or an error value. A zero-value AoAoX is invalid and Unbox() // will return an error to that effect. type AoAoX struct { just [][]interface{} err error } // NewAoAoX constructs an AoAoX from a given 2-D slice of empty interfaces or error. If e is not // nil, returns ErrAoAoX(e), otherwise returns JustAoAoX(x). func NewAoAoX(x [][]interface{}, e error) AoAoX { if e != nil { return ErrAoAoX(e) } return JustAoAoX(x) } var errAoAoXNotSlice = errors.New("NewAoAoXFromSlice called with non-slice-of-slices") // NewAoAoXFromSlice constructs an AoAoX from a given slice of slices of // arbitrary values or error. If e is not nil, returns ErrAoAoX(e), // otherwise, the inner slices of values are converted to slices of empty // interface and returned as JustAoAoX(x). If the provided value is not a // slice of slices, ErrAoAoX is returned. func NewAoAoXFromSlice(x interface{}, e error) AoAoX { if e != nil { return ErrAoAoX(e) } if x == nil { return ErrAoAoX(errAoAoXNotSlice) } switch reflect.TypeOf(x).Kind() { case reflect.Slice: s := reflect.ValueOf(x) xs := make([][]interface{}, s.Len()) for i := 0; i < s.Len(); i++ { v, err := NewAoXFromSlice(s.Index(i).Interface(), nil).Unbox() if err != nil { return ErrAoAoX(errAoAoXNotSlice) } xs[i] = v } return JustAoAoX(xs) default: return ErrAoAoX(errAoAoXNotSlice) } } // JustAoAoX constructs a valid AoAoX from a given 2-D slice of empty interfaces. func JustAoAoX(x [][]interface{}) AoAoX { return AoAoX{just: x} } // ErrAoAoX constructs an invalid AoAoX from a given error. func ErrAoAoX(e error) AoAoX { return AoAoX{err: e} } // IsErr returns true for an invalid AoAoX. func (m AoAoX) IsErr() bool { return m.just == nil || m.err != nil } // Bind applies a function that takes a 2-D slice of empty interfaces and returns an AoAoX. func (m AoAoX) Bind(f func(x [][]interface{}) AoAoX) AoAoX { if m.IsErr() { return m } return f(m.just) } // Join applies a function that takes a 2-D slice of empty interfaces and returns an AoX. func (m AoAoX) Join(f func(x []interface{}) X) AoX { if m.IsErr() { return ErrAoX(m.err) } xss := make([]interface{}, len(m.just)) for i, v := range m.just { s, err := f(v).Unbox() if err != nil { return ErrAoX(err) } xss[i] = s } return JustAoX(xss) } // Flatten joins a 2-D slice of empty interfaces into a 1-D slice func (m AoAoX) Flatten() AoX { if m.IsErr() { return ErrAoX(m.err) } xs := make([]interface{}, 0) for _, v := range m.just { xs = append(xs, v...) } return JustAoX(xs) } // Map applies a function to each element of a valid AoAoX (i.e. a 1-D slice) // and returns a new AoAoX. If the AoAoX is invalid or if any function // returns an invalid AoX, Map returns an invalid AoAoX. func (m AoAoX) Map(f func(x []interface{}) AoX) AoAoX { if m.IsErr() { return m } xss := make([][]interface{}, len(m.just)) for i, v := range m.just { x, err := f(v).Unbox() if err != nil { return ErrAoAoX(err) } xss[i] = x } return JustAoAoX(xss) } // String returns a string representation, mostly useful for debugging. func (m AoAoX) String() string { if m.IsErr() { return fmt.Sprintf("Err %v", m.err) } return fmt.Sprintf("Just %v", m.just) } // Unbox returns the underlying 2-D slice of empty interfaces or error. func (m AoAoX) Unbox() ([][]interface{}, error) { if m.just == nil && m.err == nil { return nil, errors.New("zero-value AoAoX") } return m.just, m.err }
aoaox.go
0.728941
0.439026
aoaox.go
starcoder
package statext import ( "github.com/argusdusty/gofft" "golang.org/x/exp/rand" "gonum.org/v1/gonum/stat/sampleuv" "math" ) // PoissonBinomial represents a random variable whose value is the sum of // independent Bernoulli trials that are not necessarily identically distributed. // The value of entries in P must be between 0 and 1. // More information at https://en.wikipedia.org/wiki/Poisson_binomial_distribution. type PoissonBinomial struct { p []float64 dim int src rand.Source pmf []float64 cdf []float64 } // NewPoissonBinomial creates a new Poisson binomial distribution with the given parameters p. // NewPoissonBinomial will panic if len(p) == 0, or if any p is < 0 or > 1. func NewPoissonBinomial(p []float64, src rand.Source) PoissonBinomial { if len(p) == 0 { panic("poisson binomial: zero dimensional input") } for _, v := range p { if v < 0 { panic("poisson binomial: prob less than 0") } else if v > 1 { panic("poisson binomial: prob greater than 1") } } dist := PoissonBinomial{ p: p, src: src, } dist.pmf = dist.computePmf() dist.cdf = dist.computeCdf() return dist } // computePmf computes the pmf of the Poisson binomial distribution // Running time: O(N*log(N)^2) func (p PoissonBinomial) computePmf() []float64 { // Handle the small cases quickly switch len(p.p) { case 1: return []float64{1 - p.p[0], p.p[0]} case 2: p0 := p.p[0] p1 := p.p[1] return []float64{(1 - p0) * (1 - p1), (1-p0)*p1 + p0*(1-p1), p0 * p1} case 3: p0 := p.p[0] p1 := p.p[1] p2 := p.p[2] return []float64{(1 - p0) * (1 - p1) * (1 - p2), (1-p0)*(1-p1)*p2 + (1-p0)*p1*(1-p2) + p0*(1-p1)*(1-p2), p0*p1*(1-p2) + p0*(1-p1)*p2 + (1-p0)*p1*p2, p0 * p1 * p2} case 4: p0 := p.p[0] p1 := p.p[1] p2 := p.p[2] p3 := p.p[3] return []float64{(1 - p0) * (1 - p1) * (1 - p2) * (1 - p3), (1-p0)*(1-p1)*(1-p2)*p3 + (1-p0)*(1-p1)*p2*(1-p3) + (1-p0)*p1*(1-p2)*(1-p3) + p0*(1-p1)*(1-p2)*(1-p3), (1-p0)*(1-p1)*p2*p3 + (1-p0)*p1*(1-p2)*p3 + p0*(1-p1)*(1-p2)*p3 + (1-p0)*p1*p2*(1-p3) + p0*(1-p1)*p2*(1-p3) + p0*p1*(1-p2)*(1-p3), (1-p0)*p1*p2*p3 + p0*(1-p1)*p2*p3 + p0*p1*(1-p2)*p3 + p0*p1*p2*(1-p3), p0 * p1 * p2 * p3} } m := 4 // Starting block size N := len(p.p) + 1 n := gofft.NextPow2(N) // Number of probability arrays to convolve data := make([]complex128, n*m) // Working space for i, x := range p.p { // Initialize arrays to [1-x, x, 0, 0] data[i*m] = complex(1-x, 0) data[i*m+1] = complex(x, 0) } for i := N - 1; i < n; i++ { // "zero"-pad out to next power of 2 // Using arrays of [1, 0, 0, 0] data[i*m] = 1 } // Do the FFT convolutions err := gofft.FastMultiConvolve(data, m, true) if err != nil { panic(err) } pmf := gofft.Complex128ToFloat64Array(data[:N]) return pmf } func (p PoissonBinomial) computeCdf() []float64 { cdf := make([]float64, len(p.pmf)) var t float64 for i := 0; i < len(p.pmf); i++ { t += p.pmf[i] cdf[i] = t } return cdf } // CDF computes the value of the cumulative distribution function at x. func (p PoissonBinomial) CDF(x float64) float64 { if x < 0 { return 0 } if x <= float64(len(p.p)) { return p.cdf[int(x)] } return 1 } // ExKurtosis returns the excess kurtosis of the distribution. func (p PoissonBinomial) ExKurtosis() float64 { var exkurtosis float64 for _, prob := range p.p { exkurtosis += (1 - 6*(1-prob)*prob) * (1 - prob) * prob } exkurtosis /= math.Pow(p.StdDev(), 4) return exkurtosis } // LogProb computes the natural logarithm of the value of the probability // density function at x. func (p PoissonBinomial) LogProb(x float64) float64 { if x < 0 || x > float64(len(p.p)) || math.Floor(x) != x { return math.Inf(-1) } return math.Log(p.Prob(x)) } // Mean returns the mean of the probability distribution. func (p PoissonBinomial) Mean() float64 { var mean float64 for _, prob := range p.p { mean += prob } return mean } // NumParameters returns the number of parameters in the distribution. func (p PoissonBinomial) NumParameters() int { return len(p.p) } // Prob computes the value of the probability density function at x. func (p PoissonBinomial) Prob(x float64) float64 { if x < 0 || x > float64(len(p.p)) || math.Floor(x) != x { return 0 } return p.pmf[int(x)] } // Rand returns a random sample drawn from the distribution. func (p PoissonBinomial) Rand() float64 { idx, _ := sampleuv.NewWeighted(p.pmf, nil).Take() return float64(idx) } // Skewness returns the skewness of the distribution. func (p PoissonBinomial) Skewness() float64 { var skewness float64 for _, prob := range p.p { skewness += (1 - 2*prob) * (1 - prob) * prob } skewness /= math.Pow(p.StdDev(), 3) return skewness } // StdDev returns the standard deviation of the probability distribution. func (p PoissonBinomial) StdDev() float64 { return math.Sqrt(p.Variance()) } // Survival returns the survival function (complementary CDF) at x. func (p PoissonBinomial) Survival(x float64) float64 { return 1 - p.CDF(x) } // Variance returns the variance of the probability distribution. func (p PoissonBinomial) Variance() float64 { var variance float64 for _, prob := range p.p { variance += (1 - prob) * prob } return variance }
poissonbinomial.go
0.794863
0.530845
poissonbinomial.go
starcoder
package values import ( "fmt" "reflect" "strings" ) // StringSliceValue is a struct that holds a string slice value type StringSliceValue struct { value []string } // IsEqualTo returns true if the value is equal to the expected value, else false func (s StringSliceValue) IsEqualTo(expected interface{}) bool { return reflect.DeepEqual(s.value, expected) } // IsEmpty returns true if the slice is empty else false func (s StringSliceValue) IsEmpty() bool { return len(s.value) == 0 } // IsNotEmpty returns true if the slice is not empty else false func (s StringSliceValue) IsNotEmpty() bool { return !s.IsEmpty() } // HasSize returns true if the slice has the expected size else false func (s StringSliceValue) HasSize(length int) bool { return len(s.value) == length } // Size returns the slice size func (s StringSliceValue) Size() int { return len(s.value) } func (s StringSliceValue) contains(element string) bool { for _, item := range s.value { if item == element { return true } } return false } // Contains returns true if the slice contains the expected element(s) else false func (s StringSliceValue) Contains(elements interface{}) bool { switch v := elements.(type) { case []string: var all = true for _, c := range v { all = all && s.contains(c) } return all case string: return s.contains(v) default: return false } } // DoesNotContain returns true if the slice does not contain the expected element(s) else false func (s StringSliceValue) DoesNotContain(elements interface{}) bool { return !s.Contains(elements) } // ContainsOnly returns true if the slice contains only the expected element(s) else false func (s StringSliceValue) ContainsOnly(elements interface{}) bool { switch v := elements.(type) { case []string: return s.HasSize(len(v)) && s.Contains(elements) case string: return s.HasSize(1) && s.contains(v) default: return false } } // Value returns the actual value of the structure func (s StringSliceValue) Value() interface{} { return s.value } // NewStringSliceValue creates and returns a StringSliceValue struct initialed with the given value func NewStringSliceValue(value interface{}) StringSliceValue { switch v := value.(type) { case []string: return StringSliceValue{value: v} default: panic(fmt.Sprintf("expected string slice value type but got %T type", v)) } } func (s StringSliceValue) String() string { return strings.Join(s.value, ",") }
internal/pkg/values/string_slice_value.go
0.842378
0.547404
string_slice_value.go
starcoder
package rel type ExistsNode FunctionNode func NewExistsNode(v Visitable) *ExistsNode { return &ExistsNode{ Expressions: []Visitable{v}, } } func (node ExistsNode) Desc() *DescendingNode { return orderingDesc(node) } func (node ExistsNode) Asc() *AscendingNode { return orderingAsc(node) } func (node ExistsNode) Eq(visitable Visitable) *EqualityNode { return predicationEq(node, visitable) } func (node ExistsNode) EqAny(visitables ...Visitable) *GroupingNode { return predicationEqAny(node, visitables...) } func (node ExistsNode) EqAll(visitables ...Visitable) *GroupingNode { return predicationEqAll(node, visitables...) } func (node ExistsNode) Lt(visitable Visitable) *LessThanNode { return predicationLt(node, visitable) } func (node ExistsNode) LtAny(visitables ...Visitable) *GroupingNode { return predicationLtAny(node, visitables...) } func (node ExistsNode) LtAll(visitables ...Visitable) *GroupingNode { return predicationLtAll(node, visitables...) } func (node ExistsNode) LtEq(visitable Visitable) *LessThanOrEqualNode { return predicationLtEq(node, visitable) } func (node ExistsNode) LtEqAny(visitables ...Visitable) *GroupingNode { return predicationLtEqAny(node, visitables...) } func (node ExistsNode) LtEqAll(visitables ...Visitable) *GroupingNode { return predicationLtEqAll(node, visitables...) } func (node ExistsNode) Gt(visitable Visitable) *GreaterThanNode { return predicationGt(node, visitable) } func (node ExistsNode) GtAny(visitables ...Visitable) *GroupingNode { return predicationGtAny(node, visitables...) } func (node ExistsNode) GtAll(visitables ...Visitable) *GroupingNode { return predicationGtAll(node, visitables...) } func (node ExistsNode) GtEq(visitable Visitable) *GreaterThanOrEqualNode { return predicationGtEq(node, visitable) } func (node ExistsNode) GtEqAny(visitables ...Visitable) *GroupingNode { return predicationGtEqAny(node, visitables...) } func (node ExistsNode) GtEqAll(visitables ...Visitable) *GroupingNode { return predicationGtEqAll(node, visitables...) } func (node ExistsNode) Count() *CountNode { return predicationCount(node) } func (node ExistsNode) Extract(literal SqlLiteralNode) *ExtractNode { return predicationExtract(node, literal) } func (node ExistsNode) As(literal SqlLiteralNode) *AsNode { return aliasPredicationAs(node, literal) } func (node ExistsNode) In(visitables []Visitable) Visitable { return predicationIn(node, visitables) } func (node ExistsNode) InAny(visitableslices ...[]Visitable) Visitable { return predicationInAny(node, visitableslices...) } func (node ExistsNode) InAll(visitableslices ...[]Visitable) Visitable { return predicationInAll(node, visitableslices...) } func (node ExistsNode) NotIn(visitables []Visitable) Visitable { return predicationNotIn(node, visitables) } func (node ExistsNode) NotInAny(visitableslices ...[]Visitable) Visitable { return predicationNotInAny(node, visitableslices...) } func (node ExistsNode) NotInAll(visitableslices ...[]Visitable) Visitable { return predicationNotInAll(node, visitableslices...) } func (node ExistsNode) NotEq(visitable Visitable) *NotEqualNode { return predicationNotEq(node, visitable) } func (node ExistsNode) NotEqAny(visitables ...Visitable) *GroupingNode { return predicationNotEqAny(node, visitables...) } func (node ExistsNode) NotEqAll(visitables ...Visitable) *GroupingNode { return predicationNotEqAll(node, visitables...) } func (node ExistsNode) DoesNotMatch(literal SqlLiteralNode) *DoesNotMatchNode { return predicationDoesNotMatch(node, literal) } func (node ExistsNode) DoesNotMatchAny(literals ...SqlLiteralNode) *GroupingNode { return predicationDoesNotMatchAny(node, literals...) } func (node ExistsNode) DoesNotMatchAll(literals ...SqlLiteralNode) *GroupingNode { return predicationDoesNotMatchAll(node, literals...) } func (node ExistsNode) Matches(literal SqlLiteralNode) *MatchesNode { return predicationMatches(node, literal) } func (node ExistsNode) MatchesAny(literals ...SqlLiteralNode) *GroupingNode { return predicationMatchesAny(node, literals...) } func (node ExistsNode) MatchesAll(literals ...SqlLiteralNode) *GroupingNode { return predicationMatchesAll(node, literals...) }
exists_node.go
0.756717
0.630116
exists_node.go
starcoder
Copyright 2019 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the NOTICE.md file. */ package remote // region is HTTP-range-request-compliant range. // "b" is beginning byte of the range and "e" is the end. // "e" is must be inclusive along with HTTP's range expression. type region struct{ b, e int64 } func (c region) size() int64 { return c.e - c.b + 1 } func superRegion(regs []region) region { s := regs[0] for _, reg := range regs { if reg.b < s.b { s.b = reg.b } if reg.e > s.e { s.e = reg.e } } return s } // regionSet is a set of regions type regionSet struct { rs []region // must be kept sorted } // add attempts to merge r to rs.rs with squashing the regions as // small as possible. This operation takes O(n). // TODO: more efficient way to do it. func (rs *regionSet) add(r region) { // Iterate over the sorted region slice from the tail. // a) When an overwrap occurs, adjust `r` to fully contain the looking region // `l` and remove `l` from region slice. // b) Once l.e become less than r.b, no overwrap will occur again. So immediately // insert `r` which fully contains all overwrapped regions, to the region slice. // Here, `r` is inserted to the region slice with keeping it sorted, without // overwrapping to any regions. // *) If any `l` contains `r`, we don't need to do anything so return immediately. for i := len(rs.rs) - 1; i >= 0; i-- { l := &rs.rs[i] // *) l contains r if l.b <= r.b && r.e <= l.e { return } // a) r overwraps to l so adjust r to fully contain l and reomve l // from region slice. if l.b <= r.b && r.b <= l.e+1 && l.e <= r.e { r.b = l.b rs.rs = append(rs.rs[:i], rs.rs[i+1:]...) continue } if r.b <= l.b && l.b <= r.e+1 && r.e <= l.e { r.e = l.e rs.rs = append(rs.rs[:i], rs.rs[i+1:]...) continue } if r.b <= l.b && l.e <= r.e { rs.rs = append(rs.rs[:i], rs.rs[i+1:]...) continue } // b) No overwrap will occur after this iteration. Instert r to the // region slice immediately. if l.e < r.b { rs.rs = append(rs.rs[:i+1], append([]region{r}, rs.rs[i+1:]...)...) return } // No overwrap occurs yet. See the next region. } // r is the topmost region among regions in the slice. rs.rs = append([]region{r}, rs.rs...) } func (rs *regionSet) totalSize() int64 { var sz int64 for _, f := range rs.rs { sz += f.size() } return sz }
fs/remote/util.go
0.683314
0.469338
util.go
starcoder
package quickunion import ( "fmt" "github.com/ivanlemeshev/algorithms-go/unionfind" ) // WeightedQuickUnion is an implementation of union–find data type. This // implementation uses weighted quick union by size (without path compression). // The constructor takes O(n), where n is the number of elements. The union and // find operations take O(log n) time in the worst case. The count operation // takes O(1) time. type WeightedQuickUnion struct { parent []int size []int count int } // New initializes an empty union-find data structure with n elements from 0 // through n-1. Initially, each elements is in its own set. func New(n int) unionfind.UnionFind { parent := make([]int, n) size := make([]int, n) for i := 0; i < n; i++ { parent[i] = i size[i] = i } return &WeightedQuickUnion{ parent: parent, size: size, count: n, } } // Union merges the set containing element p with the the set containing element q. func (wqu *WeightedQuickUnion) Union(p, q int) error { if err := wqu.validate(p); err != nil { return err } if err := wqu.validate(q); err != nil { return err } rootP := wqu.find(p) rootQ := wqu.find(q) // p and q are already in the same component if rootP == rootQ { return nil } // make smaller root point to larger one if wqu.size[rootP] < wqu.size[rootQ] { wqu.parent[rootP] = rootQ wqu.size[rootQ] += wqu.size[rootP] } else { wqu.parent[rootQ] = rootP wqu.size[rootP] += wqu.size[rootQ] } wqu.count-- return nil } // Find returns the canonical element of the set containing element p. func (wqu *WeightedQuickUnion) Find(p int) (int, error) { if err := wqu.validate(p); err != nil { return 0, err } return wqu.find(p), nil } // IsConnected returns true if the two elements are in the same set. func (wqu *WeightedQuickUnion) IsConnected(p, q int) (bool, error) { if err := wqu.validate(p); err != nil { return false, err } if err := wqu.validate(q); err != nil { return false, err } rootP := wqu.find(p) rootQ := wqu.find(q) return rootP == rootQ, nil } // Count returns the number of sets. func (wqu *WeightedQuickUnion) Count() int { return wqu.count } // validate that p is a valid index. func (wqu *WeightedQuickUnion) validate(p int) error { n := len(wqu.parent) if p < 0 || p >= n { return fmt.Errorf("index %d is not between 0 and %d", p, n-1) } return nil } // find returns the canonical element of the set containing element p. func (wqu *WeightedQuickUnion) find(p int) int { for p != wqu.parent[p] { p = wqu.parent[p] } return p }
unionfind/weightedquickunion/weightedquickunion.go
0.823044
0.537041
weightedquickunion.go
starcoder
func (r receiver) identifier(parameters) (return(s)){...}. 6. Function as a expression 7. function return type, means function returning a function type. As function are normal return types like Int in GOLANG Q: how to read (r reciever ) identifier(parameter) -- Identfier(parameter) func have access to the value of type receiver AGAIN SAY : A func have access to the value(r) of receiver type. https://play.golang.org/p/JnjLU9enrW5 in above code */ package main import ( "fmt" ) func main() { fmt.Println("hello") reciepe() /*func calling semantics func (r receiver) identifier (paramters)(retrun (s){...code}) -----(called paramter attached to the type) Q :now the difference between paramter and argument? A: When calling function like // In GO everything is call by value only that's it */ // firstreciepe("<NAME>") // ingred1, ingred2 := secondreciepe("Ans : sarso ", "palak") // fmt.Println(ingred1) // fmt.Println(ingred2) fmt.Println(secondreciepe("Ans : sarso ", "palak")) // // Variadic paramter func var mealgood bool var newpricelist []int xi := []int{5, 10, 15, 20} sum, mealgood, newpricelist := pricerangeeofmeal(xi...) //xi...is called unfurling of slice // here pricerangeeofmeal is looking for int but xi is slice of int, whereas (x ...int) is expecting //unlimited number of int. So we need to unfurl the slice like (x ...int) where we are calling the //pricerangeeofmeal like in above line sum, mealgood, newpricelist := pricerangeeofmeal(xi...) pricemenu := []string{"veg", "non-veg"} pricemenufunc("myprice") pricemenufunc("veg:", "non-veg", "jains") fmt.Println(pricemenu) fmt.Println() fmt.Println(sum, mealgood) for i, v := range newpricelist { fmt.Println(i, v) } //unfurling of array //Assigning a function to a variable, called function expression: x := func(y int) { fmt.Println("Assigning a function to a variable", y) } x(1980) // Func returning a func type. SO declare a func with func return type. // Function are first class citizen :-) -- //1. function can be return type //2. function can be assigned to a variable //3. function can be passed as an argument to a function fmt.Printf("%T\n", bar()) fmt.Printf("%T", bar()()) fmt.Println("\nz :", bar()()) /* SO CALLBACK is pass a func as an argument. Prog: create a function that sums even numbers from slice*/ ii := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} s := sum(ii...) fmt.Println(s) } func sum(xii ...int) int { //evenlist []int s := 0 var ii []int for _, v := range xii { if v%2 == 0 { s += v ii = append(ii, v) } } fmt.Println(ii) return s } func bar() func() int { return func() int { return 111 } } func pricemenufunc(s string, x ...string) int { sums := 0 fmt.Println(s) fmt.Println(len(x)) fmt.Println(cap(x)) return sums } func pricerangeeofmeal(x ...int) (int, bool, []int) { var sum int for i, v := range x { fmt.Println(i, v) sum += v } var newpricelist []int = append(x, 50) fmt.Println("Sum of all meals :", sum) return sum, true, newpricelist } func reciepe() { fmt.Println("I am in reciepe") } func firstreciepe(a string) { fmt.Println("my", a) } func secondreciepe(firstingred string, secondingred string) (string, bool) { a := fmt.Sprint(firstingred, secondingred, " the two main ingredient of sarsoo da saag") b := true return a, b }
28_kk-src/Functions/Func.go
0.588889
0.450057
Func.go
starcoder
package griddlersolver import "github.com/yinonavraham/go-griddlersolver/grid" // Solution is an iterator over states - from the initial (empty) state to the last solved state type Solution interface { // Next returns the next state in the solution and an indicator whether there are more states Next() (state grid.Grid, hasNext bool) } type solution struct { problem Problem } func (s *solution) Next() (state grid.Grid, hasNext bool) { return nil, false } func (s *solution) updateProbabilities(g grid.Grid, rowCount int, colCount int, rowPossibleSolutions []possibleSolutions, colPossibleSolutions []possibleSolutions) (hasNext bool) { changed := false for r := 0; r < rowCount; r++ { for c := 0; c < colCount; c++ { value := g.GetCell(r, c) if value == grid.FullCellValue || value == grid.EmptyCellValue { continue } changed = true // - rows trueCount := 0 falseCount := 0 for i := 0; i < len(rowPossibleSolutions[r]); i++ { if rowPossibleSolutions[r][i][c] == true { trueCount++ } else { falseCount++ } } if trueCount == 0 { g.(grid.MutableGrid).SetCell(r, c, grid.EmptyCellValue) continue } if falseCount == 0 { g.(grid.MutableGrid).SetCell(r, c, grid.FullCellValue) continue } intermediateValue := intermediateCellValue{} intermediateValue.fullPossibilities = trueCount intermediateValue.emptyPossibilities = falseCount g.(grid.MutableGrid).SetCell(r, c, intermediateValue) // - columns trueCount = 0 falseCount = 0 for i := 0; i < len(colPossibleSolutions[c]); i++ { if colPossibleSolutions[c][i][r] == true { trueCount++ } else { falseCount++ } } if trueCount == 0 { intermediateValue.fullPossibilities = 0 } intermediateValue.fullPossibilities += trueCount if falseCount == 0 { intermediateValue.emptyPossibilities = 0 } intermediateValue.emptyPossibilities += falseCount g.(grid.MutableGrid).SetCell(r, c, intermediateValue) } } return changed } func (s *solution) removeFalseSolutions(g grid.Grid, rowCount int, colCount int, rowPossibleSolutions []possibleSolutions, colPossibleSolutions []possibleSolutions) (hasNext bool) { changed := false for r := 0; r < rowCount; r++ { for c := 0; c < colCount; c++ { value := g.GetCell(r, c) if value == grid.ZeroCellValue { continue } expected := false if v, ok := value.(intermediateCellValue); ok { if v.emptyPossibilities != 0 && v.fullPossibilities != 0 { continue } expected = v.fullPossibilities != 0 } else { expected = value == grid.FullCellValue } // - rows var toRemove []int for i := 0; i < len(rowPossibleSolutions[r]); i++ { if rowPossibleSolutions[r][i][c] != expected { toRemove = append(toRemove, i) } } for i := len(toRemove)-1; i >= 0; i-- { rowPossibleSolutions[r] = removeAt(rowPossibleSolutions[r], i) } // - columns toRemove = []int{} for i := 0; i < len(colPossibleSolutions[c]); i++ { if colPossibleSolutions[c][i][r] != expected { toRemove = append(toRemove, i) } } for i := len(toRemove)-1; i >= 0; i-- { colPossibleSolutions[c] = removeAt(colPossibleSolutions[c], i) } changed = true } } return changed } type possibleSolutions []possibleSolution type possibleSolution []bool func (s possibleSolution) Clone() possibleSolution { clone := make(possibleSolution, len(s)) for _, v := range s { clone = append(clone, v) } return clone } func removeAt(s possibleSolutions, index int) possibleSolutions { s1 := make(possibleSolutions, len(s)-1) for i := 0; i < len(s); i++ { if i == index { continue } s1 = append(s1, s[i]) } return s1 } func calcLinePossibleSolutions(def Definition, size int) possibleSolutions { solutions := possibleSolutions{} var solution = make(possibleSolution, size) for i := 0; i < size; i++ { solution[i] = false } calcRecursive(&solutions, solution, def, size, 0, 0) return solutions } func calcTotalLength(def Definition, part int) int { total := 0 for i := part; i < len(def); i++ { if i > part { total += 1 // Add space } total += def[i] } return total } func calcRecursive(solutions *possibleSolutions, solution possibleSolution, def Definition, size int, location int, part int) bool { // If no more parts to lay - add the solution and return true if part >= len(def) { *solutions = append(*solutions, solution.Clone()) return true } // If the remaining parts cannot fit in the current location and size - return false var defTotalLength = calcTotalLength(def, part) if defTotalLength > (size - location) { return false } // For each location from current location to size for i := location; i < (size - defTotalLength + 1); i++ { // - Place the current part and continue recursively to the next part for j := location; j < size; j++ { solution[j] = false } for j := i; j < i+def[part]; j++ { solution[j] = true } result := calcRecursive(solutions, solution, def, size, i+def[part]+1, part+1) // - If the return value is false - break if result == false { break } } return true } type intermediateCellValue struct { fullPossibilities int emptyPossibilities int } func (v intermediateCellValue) Value() float32 { return float32(v.fullPossibilities) / float32(v.fullPossibilities+v.emptyPossibilities) }
solution.go
0.71113
0.470554
solution.go
starcoder
package display var Bits2 = &Ind2Bit{ Threshold1_5: 0.15, Threshold2: 0.3, Threshold2_5: 0.45, Threshold3: 0.55, Threshold3_5: 0.7, Threshold4: 0.8, } type Ind2Bit struct { Threshold1_5 float64 Threshold2 float64 Threshold2_5 float64 Threshold3 float64 Threshold3_5 float64 Threshold4 float64 } type Indexizer interface { Indexize(intens float64, posx, posy int) byte } // Color2Bit calculates 2-bit color index for input intensity (clamped to 0-1). // To introduce lighting, add offset and/or multiply intensity by scale factor. func (i *Ind2Bit) Indexize(intens float64, posx, posy int) byte { var color byte var dither bool switch { case intens >= i.Threshold4: color = 4 case intens >= i.Threshold3_5: color = 4 dither = true case intens >= i.Threshold3: color = 3 case intens >= i.Threshold2_5: color = 3 dither = true case intens >= i.Threshold2: color = 2 case intens >= i.Threshold1_5: color = 2 dither = true default: color = 1 } if dither && (posx%2 == posy%2) { color-- } return color } func TriShaderIndexed(iim IndexedImage, tx0, ty0, tx1, ty1, tx2, ty2 int, lightOffset, lightScale float64) func(o *TriangleShaderOpts) { tx0m := float64(tx0) tx1m := float64(tx1) tx2m := float64(tx2) ty0m := float64(ty0) ty1m := float64(ty1) ty2m := float64(ty2) return func(o *TriangleShaderOpts) { cx := int(tx0m*o.W0 + tx1m*o.W1 + tx2m*o.W2) cy := int(ty0m*o.W0 + ty1m*o.W1 + ty2m*o.W2) c := iim.Pixels[cx+cy*iim.Width] if c == 0 { return } in := o.Lights.Light(c, int(o.X), int(o.Y)) col := o.Indexizer.Indexize(in, int(o.X), int(o.Y)) o.Buffer[o.BufferOffset] = col } } func PerspTriShaderIndexed(iim IndexedImage, tx0, ty0, tx1, ty1, tx2, ty2 int, z0, z1, z2 float64) func(o *TriangleShaderOpts) { tx0m := float64(tx0) * z0 tx1m := float64(tx1) * z1 tx2m := float64(tx2) * z2 ty0m := float64(ty0) * z0 ty1m := float64(ty1) * z1 ty2m := float64(ty2) * z2 return func(o *TriangleShaderOpts) { rz := 1 / (z0*o.W0 + z1*o.W1 + z2*o.W2) cx := int((tx0m*o.W0 + tx1m*o.W1 + tx2m*o.W2) * rz) cy := int((ty0m*o.W0 + ty1m*o.W1 + ty2m*o.W2) * rz) c := iim.Pixels[cx+cy*iim.Width] if c == 0 { return } in := o.Lights.Light(c, int(o.X), int(o.Y)) col := o.Indexizer.Indexize(in, int(o.X), int(o.Y)) o.Buffer[o.BufferOffset] = col } } func RectShaderIndexed(iim IndexedImage, tx0, ty0, tx1, ty1 int) func(o *RectangleShaderOpts) { tw := float64(tx1-tx0) + 1 th := float64(ty1-ty0) + 1 return func(o *RectangleShaderOpts) { cx := int(float64(tw*o.Px)) + tx0 cy := int(float64(th*o.Py)) + ty0 c := iim.Pixels[cx+cy*iim.Width] if c == 0 { return } in := o.Lights.Light(c, int(o.X), int(o.Y)) col := o.Indexizer.Indexize(in, int(o.X), int(o.Y)) o.Buffer[o.BufferOffset] = col } }
pkg/display/bit-shader.go
0.66072
0.470128
bit-shader.go
starcoder
package main import ( "fmt" // The "math" package is new. We need to use it because it contains a very // accurate value for Pi "math" ) func main() { // First the variables for the Earths radius and time of the ISS orbit. // These are floating point types to keeps things accruate - we want to // know what the numbers are after the decimal point. This also avoids // having to do any conversions from integer to floating point. var earthsRadiusInKm float64 var timeOfIssOrbitInHrs float64 // Your turn // The ISS altitude is 412.5 km. What type of variable do you need? var issAverageAltitudeInKm // Set the value for the Earths radius earthsRadiusInKm = 6378.0 // Set the value for the ISS orbit time in hours timeOfIssOrbitInHrs = 1.545 // Your Turn // Now set the value of the ISS's average altitude - see the important // numbers at the top of the rpogram issAverageAltitudeInKm = // Before you can work out the speed you need to work out the total // distance of the orbit of the ISS. We need a variable to store the // answer is. var issOrbitalDistanceInKm float64 // If we assume the orbit is a circle then the circumfrence of the circle is // 2 * pi * the circle radius // Remember that the radius of the orbit is the earths radius plus the altitude, // so we have to work this out. // The "math.Pi" bit is just how you access the value of Pi in the math package issOrbitalDistanceInKm = 2 * math.Pi * (earthsRadiusInKm + issAverageAltitudeInKm) // We need another variable to store the speed of the ISS orbit in km/h var issSpeedInKmPerHr float64 // Now we can work out the speed in km per hour. The speed is just the // distance of the orbit divided by the time the orbit takes. issSpeedInKmPerHr = issOrbitalDistanceInKm / timeOfIssOrbitInHrs // Now we can print the results! fmt.Print("The distance of one orbit of the ISS is ") fmt.Print(issOrbitalDistanceInKm) fmt.Println("km") fmt.Print("The speed of the ISS in orbit is ") fmt.Print(issSpeedInKmPerHr) fmt.Println("km/h") // Your Turn // Now you know how to calcualte the orbital speed of the ISS // see if you can calculate the oribital speed of the Hubble Space // Telescope. // Hint: You need new variables for the altitude, orbital distance and // orbital speed of the Hubble Space Telescope. But the maths is the same, // you just need to change the variable names. // The numbers you need are at the top of the program. // ....Add your new code below this line.... // Your Turn Again // Now do the same for the Astra 1KR satellite to work out its speed. // ....Add your new code below this line..... // Bonus Points // See if you can work out the orbital speed of a GPS satellite. // ....Add your new code below this line.... }
orbits/orbits.go
0.572364
0.516717
orbits.go
starcoder
package sim import ( "github.com/flowmatters/openwater-core/data" ) const ( DIMP_PARAMETER int = 0 DIMP_CELL int = 1 DIMI_CELL int = 0 DIMI_INPUT int = 1 DIMI_TIMESTEP int = 2 DIMS_CELL int = 0 DIMS_STATE int = 1 DIMO_CELL int = 0 DIMO_OUTPUT int = 1 DIMO_TIMESTEP int = 2 ) type ModelDescription struct { Parameters []ParameterDescription States []string Inputs []string Outputs []string Dimensions []string } type ParameterDescription struct { Name string Default float64 Description string Range [2]float64 RangeOpen [2]bool Units string Dimensions []string } type TimeSteppingModel interface { Description() ModelDescription InitialiseDimensions(dims []int) FindDimensions(params data.ND2Float64) []int ApplyParameters(params data.ND2Float64) InitialiseStates(n int) data.ND2Float64 Run(inputs data.ND3Float64, states data.ND2Float64, outputs data.ND3Float64) } type Hotstartable interface { GetStates() []float64 SetStates(states []float64) } type Series data.ND1Float64 //type InputSet data.ND2Float64 //type OutputSet data.ND3Float64 //type StateSet data.ND2Float64 type RunResults struct { Outputs data.ND3Float64 States data.ND2Float64 } func DescribeParameters(names []string) []ParameterDescription { result := make([]ParameterDescription, len(names)) for i, val := range names { result[i] = NewParameter(val) } return result } func DescribeParameter(name string, defaultValue float64, description string, paramRange []float64, units string, dims []string) ParameterDescription { var result ParameterDescription result.Name = name result.Default = defaultValue result.Description = description result.Range[0] = paramRange[0] result.Range[1] = paramRange[1] result.Units = units result.Dimensions = dims return result } func NewParameter(name string) ParameterDescription { dummyRange := make([]float64, 2) return DescribeParameter(name, 0, "", dummyRange, "",[]string{}) } func InitialiseOutputs(model TimeSteppingModel, nTimeSteps int, nCells int) data.ND3Float64 { return data.NewArray3DFloat64(nCells, len(model.Description().Outputs), nTimeSteps) } /* How do we want it run? * Provide inputs for a given window of time * Receive outputs for corresponding window of time * Receive initial states, return final states * How is time specified? Implicitly? * How to specify linked models (conceptually and spatially) * Generic? or * Specific to a model problem (eg a Dyanmic Sednet stragegy? * */
sim/runnable.go
0.553747
0.432543
runnable.go
starcoder
package geometry import "github.com/gopherd/three/core" type Plane struct { Normal core.Vector3 Constant core.Float } func (plane *Plane) Set(normal core.Vector3, constant core.Float) *Plane { plane.Normal = normal plane.Constant = constant return plane } func (plane *Plane) SetComponents(x, y, z, w core.Float) *Plane { plane.Normal.Set(x, y, z) plane.Constant = w return plane } func (plane *Plane) SetFromNormalAndCoplanarPoint(normal, point core.Vector3) *Plane { plane.Normal = normal plane.Constant = -point.Dot(normal) return plane } func (plane *Plane) SetFromCoplanarPoints(a, b, c core.Vector3) *Plane { var normal = c.Sub(b).Cross(a.Sub(b)).Normalize() plane.SetFromNormalAndCoplanarPoint(normal, a) return plane } func (plane *Plane) Normalize() *Plane { var inverseNormalLength = 1.0 / plane.Normal.Length() plane.Normal.Mul(inverseNormalLength) plane.Constant *= inverseNormalLength return plane } func (plane *Plane) Negate() *Plane { plane.Constant *= -1 plane.Normal = plane.Normal.Mul(-1) return plane } func (plane Plane) DistanceToPoint(point core.Vector3) core.Float { return plane.Normal.Dot(point) + plane.Constant } func (plane Plane) DistanceToSphere(sphere Sphere3) core.Float { return plane.DistanceToPoint(sphere.Center) - sphere.Radius } func (plane Plane) ProjectPoint(point core.Vector3) core.Vector3 { return plane.Normal.Mul(-plane.DistanceToPoint(point)).Add(point) } func (plane Plane) IntersectLine(line Line3) (p core.Vector3, ok bool) { var direction = line.Direction() var denominator = plane.Normal.Dot(direction) if denominator == 0 { // line is coplanar, return origin if plane.DistanceToPoint(line.Start) == 0 { return line.Start, true } return } var t = -(line.Start.Dot(plane.Normal) + plane.Constant) / denominator if t < 0 || t > 1 { return } return direction.Mul(t).Add(line.Start), true } func (plane Plane) IntersectsLine(line Line3) bool { var startSign = plane.DistanceToPoint(line.Start) var endSign = plane.DistanceToPoint(line.End) return (startSign < 0 && endSign > 0) || (endSign < 0 && startSign > 0) } func (plane Plane) CoplanarPoint() core.Vector3 { return plane.Normal.Mul(-plane.Constant) }
geometry/plane.go
0.868896
0.751032
plane.go
starcoder
package note import "fmt" type SpecialType int const ( SpecialTypeEmpty = SpecialType(iota) SpecialTypeRelease SpecialTypeStop SpecialTypeNormal SpecialTypeStopOrRelease SpecialTypeInvalid ) // Note is a note or special effect related to the channel's voice playback system type Note interface { fmt.Stringer Type() SpecialType } type baseNote struct{} // EmptyNote is a special note effect that specifies no change in the current voice settings type EmptyNote baseNote func (n EmptyNote) String() string { return "..." } // Type returns the SpecialType enumerator reflecting the type of the note func (n EmptyNote) Type() SpecialType { return SpecialTypeEmpty } // ReleaseNote is a special note effect that releases the currently playing voice (note-off) type ReleaseNote baseNote func (n ReleaseNote) String() string { return "===" } // Type returns the SpecialType enumerator reflecting the type of the note func (n ReleaseNote) Type() SpecialType { return SpecialTypeRelease } // StopNote is a special note effect that stops the currently playing voice (note-cut) type StopNote baseNote func (n StopNote) String() string { return "^^^" } // Type returns the SpecialType enumerator reflecting the type of the note func (n StopNote) Type() SpecialType { return SpecialTypeStop } // Normal is a standard note, which is a combination of key and octave type Normal Semitone func (n Normal) String() string { st := Semitone(n) return st.Key().String() + st.Octave().String() } // Type returns the SpecialType enumerator reflecting the type of the note func (n Normal) Type() SpecialType { return SpecialTypeNormal } // StopOrReleaseNote is a special note effect that denotes an S3M-style Stop note // NOTE: ST3 treats a "stop" note like a combination of release (note-off) and stop (note-cut) // For PCM, it is a stop, but for OPL2, it is a release type StopOrReleaseNote baseNote func (n StopOrReleaseNote) String() string { return "^^." } // Type returns the SpecialType enumerator reflecting the type of the note func (n StopOrReleaseNote) Type() SpecialType { return SpecialTypeStopOrRelease } // InvalidNote is a special note effect that stops the currently playing voice (note-cut) type InvalidNote baseNote func (n InvalidNote) String() string { return "???" } // Type returns the SpecialType enumerator reflecting the type of the note func (n InvalidNote) Type() SpecialType { return SpecialTypeInvalid } // CoalesceNoteSemitone will coalesce a note and an included semitone value // the intention is that a special note (note-off, fade-out, etc.) will take precedence // over the semitone passed in, but if the semitone asks to override a normal note's // semitone value, it will. func CoalesceNoteSemitone(n Note, s Semitone) Note { if s == UnchangedSemitone || IsSpecial(n) { return n } return Normal(s) } // IsRelease returns true if the note is a release (Note-Off) func IsRelease(n Note) bool { return n != nil && n.Type() == SpecialTypeRelease } // IsStop returns true if the note is a stop (Note-Cut) func IsStop(n Note) bool { return n != nil && n.Type() == SpecialTypeStop } // IsEmpty returns true if the note is empty func IsEmpty(n Note) bool { return n == nil || n.Type() == SpecialTypeEmpty } // IsInvalid returns true if the note is invalid in any way func IsInvalid(n Note) bool { return n != nil && n.Type() == SpecialTypeInvalid } // IsSpecial returns true if the note is special in any way func IsSpecial(n Note) bool { return n != nil && n.Type() != SpecialTypeNormal } // Type returns the SpecialType enumerator reflecting the type of the note func Type(n Note) SpecialType { if n == nil { return SpecialTypeEmpty } return n.Type() } // String returns the string representation of the note presented func String(n Note) string { if n == nil { return EmptyNote{}.String() } return n.String() }
internal/song/note/note.go
0.817938
0.420838
note.go
starcoder
package compression import ( "context" ) // EncodedFrame contains the streams of one compressed frame. type EncodedFrame struct { Bitstream []byte Maskstream []byte } type tileDelta [PixelPerTile]byte type frameDelta struct { tiles []tileDelta } // SceneEncoder encodes an entire scene of bitmaps sharing the same palette. type SceneEncoder struct { hTiles int vTiles int lineStride int tileStride int lastFrame []byte deltas []frameDelta } // NewSceneEncoder returns a new instance. func NewSceneEncoder(width, height int) *SceneEncoder { e := &SceneEncoder{ hTiles: width / TileSideLength, vTiles: height / TileSideLength, lineStride: width, } e.tileStride = e.lineStride * TileSideLength e.lastFrame = make([]byte, e.vTiles*TileSideLength*e.lineStride) return e } // AddFrame registers a further frame to the scene. func (e *SceneEncoder) AddFrame(frame []byte) error { if len(frame) != len(e.lastFrame) { return errInvalidFrameSize } var delta frameDelta isFirstFrame := len(e.deltas) == 0 vStart := 0 for vTile := 0; vTile < e.vTiles; vTile++ { tileStart := vStart for hTile := 0; hTile < e.hTiles; hTile++ { delta.tiles = append(delta.tiles, e.deltaTile(isFirstFrame, tileStart, frame)) tileStart += TileSideLength } vStart += e.tileStride } e.deltas = append(e.deltas, delta) copy(e.lastFrame, frame) return nil } func (e *SceneEncoder) deltaTile(isFirstFrame bool, offset int, frame []byte) tileDelta { var delta tileDelta for y := 0; y < TileSideLength; y++ { start := offset + (y * e.lineStride) for x := 0; x < TileSideLength; x++ { pixel := frame[start+x] if isFirstFrame || (pixel != e.lastFrame[start+x]) { delta[y*TileSideLength+x] = pixel } } } return delta } // Encode processes all the previously registered frames and creates the necessary components for decoding. func (e *SceneEncoder) Encode(ctx context.Context) ( words []ControlWord, paletteLookupBuffer []byte, frames []EncodedFrame, err error) { var wordSequencer ControlWordSequencer tileColorOpsPerFrame := make([][]TileColorOp, len(e.deltas)) paletteLookup, err := e.createPaletteLookup(ctx) if err != nil { return } paletteLookupBuffer = paletteLookup.Buffer() if len(paletteLookupBuffer) > 0x1FFFF { err = paletteLookupTooBigError{Size: len(paletteLookupBuffer)} return } frames = make([]EncodedFrame, len(e.deltas)) for frameIndex := 0; frameIndex < len(e.deltas); frameIndex++ { var maskstreamWriter MaskstreamWriter outFrame := &frames[frameIndex] delta := e.deltas[frameIndex] lastOp := TileColorOp{Type: CtrlUnknown} for tileIndex, tile := range delta.tiles { var op TileColorOp paletteIndex, pal, mask := paletteLookup.Lookup(tile) palSize := len(pal) switch { case palSize == 1 && (pal[0] == 0x00): op.Type = CtrlSkip case palSize == 1: op.Type = CtrlColorTile2ColorsStatic op.Offset = uint32(pal[0])<<8 | uint32(pal[0]) case palSize == 2 && mask == 0xAAAA && (pal[0] != 0x00) && (pal[1] != 0x00): op.Type = CtrlColorTile2ColorsStatic op.Offset = uint32(pal[1])<<8 | uint32(pal[0]) case palSize == 2 && mask == 0x5555 && (pal[0] != 0x00) && (pal[1] != 0x00): op.Type = CtrlColorTile2ColorsStatic op.Offset = uint32(pal[0])<<8 | uint32(pal[1]) case palSize <= 2: op.Type = CtrlColorTile2ColorsMasked if palSize == 2 { op.Offset = uint32(pal[1]) op.Offset <<= 8 } if palSize > 0 { op.Offset |= uint32(pal[0]) } _ = maskstreamWriter.Write(2, mask) case palSize <= 4: op.Type = CtrlColorTile4ColorsMasked op.Offset = uint32(paletteIndex) _ = maskstreamWriter.Write(4, mask) case palSize <= 8: op.Type = CtrlColorTile8ColorsMasked op.Offset = uint32(paletteIndex) _ = maskstreamWriter.Write(6, mask) default: op.Type = CtrlColorTile16ColorsMasked op.Offset = uint32(paletteIndex) _ = maskstreamWriter.Write(8, mask) } if op.Type != CtrlSkip && (tileIndex%e.hTiles) != 0 && lastOp == op { op = TileColorOp{Type: CtrlRepeatPrevious} } else { lastOp = op } err = wordSequencer.Add(op) if err != nil { return nil, nil, nil, err } tileColorOpsPerFrame[frameIndex] = append(tileColorOpsPerFrame[frameIndex], op) } outFrame.Maskstream = maskstreamWriter.Buffer } wordSequence, err := wordSequencer.Sequence() if err != nil { return nil, nil, nil, err } wordSequence.HTiles = uint32(e.hTiles) words = wordSequence.ControlWords() for frameIndex, ops := range tileColorOpsPerFrame { frames[frameIndex].Bitstream, err = wordSequence.BitstreamFor(ops) if err != nil { return nil, nil, nil, err } } return } func (e *SceneEncoder) createPaletteLookup(ctx context.Context) (PaletteLookup, error) { var paletteLookupGenerator PaletteLookupGenerator for _, delta := range e.deltas { for _, tile := range delta.tiles { paletteLookupGenerator.Add(tile) } } return paletteLookupGenerator.Generate(ctx) }
ss1/content/movie/internal/compression/SceneEncoder.go
0.71721
0.46478
SceneEncoder.go
starcoder
package shape import ( "fmt" "math" "github.com/fogleman/gg" ) // Rectangle represents a rectangular shape. type Rectangle struct { X1, Y1 int X2, Y2 int } func NewRectangle() *Rectangle { return &Rectangle{} } func (r *Rectangle) Init(plane *Plane) { rnd := plane.Rnd r.X1 = rnd.Intn(plane.W) r.Y1 = rnd.Intn(plane.H) r.X2 = clampInt(r.X1+rnd.Intn(32)+1, 0, plane.W-1) r.Y2 = clampInt(r.Y1+rnd.Intn(32)+1, 0, plane.H-1) r.mutateImpl(plane, 1.0, 2, ActionAny) } func (r *Rectangle) bounds() (x1, y1, x2, y2 int) { x1, y1 = r.X1, r.Y1 x2, y2 = r.X2, r.Y2 if x1 > x2 { x1, x2 = x2, x1 } if y1 > y2 { y1, y2 = y2, y1 } return } func (r *Rectangle) Draw(dc *gg.Context, temp float64) { x1, y1, x2, y2 := r.bounds() dc.DrawRectangle(float64(x1), float64(y1), float64(x2-x1+1), float64(y2-y1+1)) dc.Fill() } func (r *Rectangle) SVG(attrs string) string { x1, y1, x2, y2 := r.bounds() w := x2 - x1 + 1 h := y2 - y1 + 1 return fmt.Sprintf( "<rect %s x=\"%d\" y=\"%d\" width=\"%d\" height=\"%d\" />", attrs, x1, y1, w, h) } func (r *Rectangle) Copy() Shape { a := *r return &a } func (r *Rectangle) Mutate(plane *Plane, temp float64) { r.mutateImpl(plane, temp, 10, ActionAny) } func (r *Rectangle) mutateImpl(plane *Plane, temp float64, rollback int, actions ActionType) { if actions == ActionNone { return } const R = math.Pi / 4.0 w := plane.W h := plane.H rnd := plane.Rnd scale := 16 * temp save := *r for { switch rnd.Intn(5) { case 0: // Mutate if (actions & ActionMutate) == 0 { continue } a := int(rnd.NormFloat64() * scale) b := int(rnd.NormFloat64() * scale) r.X1 = clampInt(r.X1+a, 0, w-1) r.Y1 = clampInt(r.Y1+b, 0, h-1) case 1: if (actions & ActionMutate) == 0 { continue } a := int(rnd.NormFloat64() * scale) b := int(rnd.NormFloat64() * scale) r.X2 = clampInt(r.X2+a, 0, w-1) r.Y2 = clampInt(r.Y2+b, 0, h-1) case 2: // Translate if (actions & ActionTranslate) == 0 { continue } a := int(rnd.NormFloat64() * scale) b := int(rnd.NormFloat64() * scale) r.X1 = clampInt(r.X1+a, 0, w-1) r.Y1 = clampInt(r.Y1+b, 0, h-1) r.X2 = clampInt(r.X2+a, 0, w-1) r.Y2 = clampInt(r.Y2+b, 0, h-1) case 3: // Move if (actions & ActionTranslate) == 0 { continue } a := int(rnd.NormFloat64() * scale) r.X1 = clampInt(r.X1+a, 0, w-1) r.Y1 = clampInt(r.Y1+a, 0, h-1) r.X2 = clampInt(r.X2+a, 0, w-1) r.Y2 = clampInt(r.Y2+a, 0, h-1) } if r.Valid() { break } if rollback > 0 { *r = save rollback -= 1 } } } func (r *Rectangle) Valid() bool { a, b := r.X1-r.X2, r.Y1-r.Y2 if a < 0 { a = -a } if b < 0 { b = -b } return a > 2 && b > 2 } func (r *Rectangle) Rasterize(rc *RasterContext) []Scanline { x1, y1, x2, y2 := r.bounds() lines := rc.Lines[:0] for y := y1; y <= y2; y++ { lines = append(lines, Scanline{y, x1, x2, 0xffff}) } return lines } // RotatedRectangle represents a rotated rectangular shape type RotatedRectangle struct { X, Y int Sx, Sy int // Angle of rotation of the rectangle. Angle int } func NewRotatedRectangle() *RotatedRectangle { return &RotatedRectangle{} } func (r *RotatedRectangle) Init(plane *Plane) { rnd := plane.Rnd r.X = rnd.Intn(plane.W) r.Y = rnd.Intn(plane.H) r.Sx = rnd.Intn(32) + 1 r.Sy = rnd.Intn(32) + 1 r.Angle = rnd.Intn(360) r.mutateImpl(plane, 1.0, 1, ActionAny) } func (r *RotatedRectangle) Draw(dc *gg.Context, scale float64) { sx, sy := float64(r.Sx), float64(r.Sy) dc.Push() dc.Translate(float64(r.X), float64(r.Y)) dc.Rotate(radians(float64(r.Angle))) dc.DrawRectangle(-sx/2, -sy/2, sx, sy) dc.Pop() dc.Fill() } func (r *RotatedRectangle) SVG(attrs string) string { return fmt.Sprintf( "<g transform=\"translate(%d %d) rotate(%d) scale(%d %d)\"><rect %s x=\"-0.5\" y=\"-0.5\" width=\"1\" height=\"1\" /></g>", r.X, r.Y, r.Angle, r.Sx, r.Sy, attrs) } func (r *RotatedRectangle) Copy() Shape { a := *r return &a } func (r *RotatedRectangle) Mutate(plane *Plane, temp float64) { r.mutateImpl(plane, temp, 10, ActionAny) } func (r *RotatedRectangle) mutateImpl(plane *Plane, temp float64, rollback int, actions ActionType) { if actions == ActionNone { return } w := plane.W h := plane.H rnd := plane.Rnd scale := 16 * temp save := *r for { a := int(rnd.NormFloat64() * scale) b := int(rnd.NormFloat64() * scale) switch rnd.Intn(3) { case 0: // Move Origin if (actions & ActionTranslate) == 0 { continue } r.X = clampInt(r.X+a, 0, w-1) r.Y = clampInt(r.Y+b, 0, h-1) case 1: // Resize if (actions & ActionScale) == 0 { continue } r.Sx = clampInt(r.Sx+a, 1, w-1) r.Sy = clampInt(r.Sy+b, 1, h-1) case 2: // Rotate if (actions & ActionRotate) == 0 { continue } r.Angle = r.Angle + a + a } if r.Valid() { break } if rollback > 0 { *r = save rollback -= 1 } } } func (r *RotatedRectangle) Valid() bool { a, b := r.Sx, r.Sy if a < b { a, b = b, a } aspect := float64(a) / float64(b) return aspect <= 5 } func (r *RotatedRectangle) Rasterize(rc *RasterContext) []Scanline { w := rc.W h := rc.H sx, sy := float64(r.Sx), float64(r.Sy) angle := radians(float64(r.Angle)) rx1, ry1 := rotate(-sx/2, -sy/2, angle) rx2, ry2 := rotate(sx/2, -sy/2, angle) rx3, ry3 := rotate(sx/2, sy/2, angle) rx4, ry4 := rotate(-sx/2, sy/2, angle) x1, y1 := int(rx1)+r.X, int(ry1)+r.Y x2, y2 := int(rx2)+r.X, int(ry2)+r.Y x3, y3 := int(rx3)+r.X, int(ry3)+r.Y x4, y4 := int(rx4)+r.X, int(ry4)+r.Y miny := minInt(y1, minInt(y2, minInt(y3, y4))) maxy := maxInt(y1, maxInt(y2, maxInt(y3, y4))) n := maxy - miny + 1 min := make([]int, n) max := make([]int, n) for i := range min { min[i] = w } xs := []int{x1, x2, x3, x4, x1} ys := []int{y1, y2, y3, y4, y1} // TODO: this could be better probably for i := 0; i < 4; i++ { x, y := float64(xs[i]), float64(ys[i]) dx, dy := float64(xs[i+1]-xs[i]), float64(ys[i+1]-ys[i]) count := int(math.Sqrt(dx*dx+dy*dy)) * 2 for j := 0; j < count; j++ { t := float64(j) / float64(count-1) xi := int(x + dx*t) yi := int(y+dy*t) - miny min[yi] = minInt(min[yi], xi) max[yi] = maxInt(max[yi], xi) } } lines := rc.Lines[:0] for i := 0; i < n; i++ { y := miny + i if y < 0 || y >= h { continue } a := maxInt(min[i], 0) b := minInt(max[i], w-1) if b >= a { lines = append(lines, Scanline{y, a, b, 0xffff}) } } return lines }
primitive/shape/rectangle.go
0.86674
0.650772
rectangle.go
starcoder
package entity import ( "github.com/go-gl/mathgl/mgl32" ) // Transform Represents the transformation of an entity in // a 3-dimensional space: position, rotation and scale. // Note: Rotation is measured in degrees type Transform struct { Position mgl32.Vec3 Rotation mgl32.Vec3 Scale mgl32.Vec3 prevPosition mgl32.Vec3 prevRotation mgl32.Vec3 prevScale mgl32.Vec3 matrix mgl32.Mat4 quat mgl32.Quat } // TransformationMatrix computes object transformation matrix func (transform *Transform) TransformationMatrix() mgl32.Mat4 { if !transform.Position.ApproxEqual(transform.prevPosition) || !transform.Rotation.ApproxEqual(transform.prevRotation) || !transform.Scale.ApproxEqual(transform.prevScale) { transform.quat = mgl32.QuatIdent() // Scale of 0 is invalid if transform.Scale.X() == 0 || transform.Scale.Y() == 0 || transform.Scale.Z() == 0 { transform.Scale = mgl32.Vec3{1, 1, 1} } //Translate translation := mgl32.Translate3D(transform.Position.X(), transform.Position.Y(), transform.Position.Z()) // rotate // IMPORTANT. Source engine has Y and Z axis switched rotation := mgl32.Ident4() rotation = transform.rotateAroundAxis(rotation, mgl32.Vec3{1, 0, 0}, mgl32.DegToRad(transform.Rotation.X())) rotation = transform.rotateAroundAxis(rotation, mgl32.Vec3{0, 1, 0}, mgl32.DegToRad(transform.Rotation.Z())) rotation = transform.rotateAroundAxis(rotation, mgl32.Vec3{0, 0, 1}, mgl32.DegToRad(transform.Rotation.Y())) //@TODO ROTATIONS // scale scale := mgl32.Scale3D(transform.Scale.X(), transform.Scale.Y(), transform.Scale.Z()) transform.prevPosition = transform.Position transform.prevRotation = transform.Rotation transform.prevScale = transform.Scale transform.matrix = translation.Mul4(rotation).Mul4(scale) } return transform.matrix } // rotateAroundAxis rotates a matrix around a given axis func (transform *Transform) rotateAroundAxis(matrix mgl32.Mat4, axis mgl32.Vec3, angle float32) mgl32.Mat4 { q1 := mgl32.QuatRotate(angle, axis) transform.quat = transform.quat.Mul(q1) return matrix.Mul4(q1.Mat4()) }
entity/transform.go
0.751466
0.749866
transform.go
starcoder
package dfs import ( "fmt" "github.com/wangyoucao577/algorithms_practice/graph" ) type dfsTopologicalSort struct { Dfs sorted []graph.NodeID acyclic bool } // NewTopologicalSort execute the DFS search on a directed acyclic graph, // traversing all nodes to get topological sort func NewTopologicalSort(g graph.Graph) ([]graph.NodeID, error) { if !g.Directed() { return nil, fmt.Errorf("It's not a Directed Graph") } // Initialize dfsContext := dfsTopologicalSort{ Dfs{0, []dfsTree{}, nodeAttrArray{}, edgeAttrArray{}}, []graph.NodeID{}, false} dfsContext.initialize(g) // DFS g.ControllableIterateAllNodes(func(k graph.NodeID) graph.IterateControl { if dfsContext.acyclic { return graph.BreakIterate } if dfsContext.nodesAttr[k].nodeColor == white { dfsContext.forest = append(dfsContext.forest, dfsTree{k}) //record a tree's root // execute one tree search dfsContext.stackBasedVisit(g, k) } return graph.ContinueIterate }) if dfsContext.acyclic { return nil, fmt.Errorf("It's not a Directed Acyclic Graph") } //reversing in-place for i, j := 0, len(dfsContext.sorted)-1; i < j; i, j = i+1, j-1 { dfsContext.sorted[i], dfsContext.sorted[j] = dfsContext.sorted[j], dfsContext.sorted[i] } return dfsContext.sorted, nil } func (d *dfsTopologicalSort) stackBasedVisit(g graph.Graph, root graph.NodeID) { d.time++ d.nodesAttr[root].nodeColor = gray d.nodesAttr[root].timestampD = d.time var stack = []graph.NodeID{} stack = append(stack, root) for len(stack) > 0 { if d.acyclic { return //found acyclic in the graph, no need to continue search } currNode := stack[len(stack)-1] newWhiteNodeFound := false g.ControllableIterateAdjacencyNodes(currNode, func(v graph.NodeID) graph.IterateControl { edge := graph.EdgeID{From: currNode, To: v} if d.nodesAttr[v].nodeColor == white { newWhiteNodeFound = true d.nodesAttr[v].parent = currNode d.time++ d.nodesAttr[v].nodeColor = gray d.nodesAttr[v].timestampD = d.time stack = append(stack, v) // push stack: push to the end // set attr for edge d.edgesAttr[edge].t = branch return graph.BreakIterate } else if d.nodesAttr[v].nodeColor == gray { // backward edge, means it's not a directed acyclic graph // so that we can exit the search since we'll not be able to find the topological sort d.acyclic = true return graph.BreakIterate } return graph.ContinueIterate }) if !newWhiteNodeFound { d.time++ d.nodesAttr[currNode].nodeColor = black d.nodesAttr[currNode].timestampF = d.time d.sorted = append(d.sorted, currNode) stack = stack[:len(stack)-1] // pop from stack } } }
dfs/topological_sort.go
0.652241
0.407422
topological_sort.go
starcoder
package reflecth import ( "go/token" "reflect" ) // UnaryOp performs unary operation <op><y> as Go language specification describes. // Supported operations: + - ^ ! & <- . // If operation cannot be performed then error will be returned. // Special note for token.AND (&) operation: if passed y is not addressable (reflect.Value.CanAddr) then new variable will be created with the same as y type and value and address of new variable will be returned. func UnaryOp(op token.Token, y reflect.Value) (r reflect.Value, err error) { switch op { case token.SUB: return unaryOpSub(y) case token.XOR: return unaryOpXor(y) case token.NOT: return unaryOpNot(y) case token.AND: return unaryOpAnd(y) case token.ARROW: return unaryOpArrow(y) case token.ADD: if k := y.Kind(); IsAnyInt(k) || IsFloat(k) || IsComplex(k) { return y, nil } fallthrough default: return reflect.Value{}, unaryOpError(y, op) } } func unaryOpAnd(x reflect.Value) (r reflect.Value, err error) { if x.CanAddr() { return x.Addr(), nil } r = reflect.New(x.Type()) r.Elem().Set(x) return } func unaryOpSub(x reflect.Value) (r reflect.Value, err error) { r = reflect.New(x.Type()).Elem() switch k := x.Kind(); { case IsInt(k): r.SetInt(-x.Int()) // looks like overflow correct (see tests) case IsFloat(k): r.SetFloat(-x.Float()) case IsComplex(k): r.SetComplex(-x.Complex()) default: return reflect.Value{}, unaryOpError(x, token.SUB) } return } func unaryOpNot(x reflect.Value) (r reflect.Value, err error) { if k := x.Kind(); k != reflect.Bool { return reflect.Value{}, unaryOpError(x, token.NOT) } r = reflect.New(x.Type()).Elem() r.SetBool(!x.Bool()) return } func unaryOpXor(x reflect.Value) (r reflect.Value, err error) { r = reflect.New(x.Type()).Elem() switch k := x.Kind(); k { case reflect.Int: r.SetInt(int64(^int(x.Int()))) case reflect.Int8: r.SetInt(int64(^int8(x.Int()))) case reflect.Int16: r.SetInt(int64(^int16(x.Int()))) case reflect.Int32: r.SetInt(int64(^int32(x.Int()))) case reflect.Int64: r.SetInt(^x.Int()) case reflect.Uint: r.SetUint(uint64(^uint(x.Uint()))) case reflect.Uint8: r.SetUint(uint64(^uint8(x.Uint()))) case reflect.Uint16: r.SetUint(uint64(^uint16(x.Uint()))) case reflect.Uint32: r.SetUint(uint64(^uint32(x.Uint()))) case reflect.Uint64: r.SetUint(^x.Uint()) default: return reflect.Value{}, unaryOpError(x, token.XOR) } return } func unaryOpArrow(x reflect.Value) (r reflect.Value, err error) { if x.Kind() != reflect.Chan { return reflect.Value{}, unaryOpInvalidReceiverError(x, token.ARROW) } switch dir := x.Type().ChanDir(); dir { case reflect.RecvDir, reflect.BothDir: // nothing to do default: return reflect.Value{}, unaryOpReceiveFromSendOnlyError(x, token.ARROW) } r, _ = x.Recv() return }
back/vendor/github.com/apaxa-go/helper/reflecth/op-unary.go
0.700485
0.589864
op-unary.go
starcoder
package point import ( "github.com/ClickHouse-Ninja/ninjato/src/atime" ) const magicNumber = 146 func New(label string, value float64) *Point { return &Point{ Label: label, Value: value, timestamp: int32(atime.Now().Unix()), magicNumber: magicNumber, } } type ( Tags = map[string]string Fields = map[string]float64 ) type Point struct { Label string Value float64 tags TagsPair fields FieldsPair timestamp int32 magicNumber uint8 } func (p *Point) WithTags(tags Tags) *Point { if cap(p.tags.keys) == 0 { p.tags.keys = make([]string, 0, len(tags)) p.tags.values = make([]string, 0, len(tags)) } for k, v := range tags { p.tags.keys = append(p.tags.keys, k) p.tags.values = append(p.tags.values, v) } return p } func (p *Point) WithFields(fields Fields) *Point { if cap(p.fields.keys) == 0 { p.fields.keys = make([]string, 0, len(fields)) p.fields.values = make([]float64, 0, len(fields)) } for k, v := range fields { p.fields.keys = append(p.fields.keys, k) p.fields.values = append(p.fields.values, v) } return p } func (p *Point) Timestamp() int32 { return p.timestamp } func (p *Point) IsValid() bool { return p.magicNumber == magicNumber && p.timestamp != 0 } func (p *Point) Tags() *TagsPair { return &p.tags } func (p *Point) Fields() *FieldsPair { return &p.fields } type TagsPair struct { keys []string values []string } func (t TagsPair) Len() int { return len(t.keys) } func (t TagsPair) Less(i, j int) bool { return t.keys[i] < t.keys[j] } func (t TagsPair) Swap(i, j int) { t.keys[i], t.keys[j] = t.keys[j], t.keys[i] t.values[i], t.values[j] = t.values[j], t.values[i] } func (t *TagsPair) Keys() []string { return t.keys } func (t *TagsPair) Values() []string { return t.values } type FieldsPair struct { keys []string values []float64 } func (f FieldsPair) Len() int { return len(f.keys) } func (f FieldsPair) Less(i, j int) bool { return f.keys[i] < f.keys[j] } func (f FieldsPair) Swap(i, j int) { f.keys[i], f.keys[j] = f.keys[j], f.keys[i] f.values[i], f.values[j] = f.values[j], f.values[i] } func (f *FieldsPair) Keys() []string { return f.keys } func (f *FieldsPair) Values() []float64 { return f.values }
src/point/point.go
0.646349
0.474936
point.go
starcoder
package images import ( "fmt" "io" "reflect" "github.com/mitchellh/mapstructure" "github.com/rackspace/gophercloud" "github.com/rackspace/gophercloud/pagination" ) // Image model // Does not include the literal image data; just metadata. // returned by listing images, and by fetching a specific image. type Image struct { // ID is the image UUID ID string // Name is the human-readable display name for the image. Name string // Status is the image status. It can be "queued" or "active" // See imageservice/v2/images/type.go Status ImageStatus // Tags is a list of image tags. Tags are arbitrarily defined strings // attached to an image. Tags []string // ContainerFormat is the format of the container. // Valid values are ami, ari, aki, bare, and ovf. ContainerFormat string `mapstructure:"container_format"` // DiskFormat is the format of the disk. // If set, valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso. DiskFormat string `mapstructure:"disk_format"` // MinDiskGigabytes is the amount of disk space in GB that is required to boot the image. MinDiskGigabytes int `mapstructure:"min_disk"` // MinRAMMegabytes [optional] is the amount of RAM in MB that is required to boot the image. MinRAMMegabytes int `mapstructure:"min_ram"` // Owner is the tenant the image belongs to. Owner string // Protected is whether the image is deletable or not. Protected bool // Visibility defines who can see/use the image. Visibility ImageVisibility // Checksum is the checksum of the data that's associated with the image Checksum string // SizeBytes is the size of the data that's associated with the image. SizeBytes int `mapstructure:"size"` // Metadata is a set of metadata associated with the image. // Image metadata allow for meaningfully define the image properties // and tags. See http://docs.openstack.org/developer/glance/metadefs-concepts.html. Metadata map[string]string // Properties is a set of key-value pairs, if any, that are associated with the image. Properties map[string]string // CreatedDate is the date when the image has been created. CreatedDate string `mapstructure:"created_at"` // LastUpdate is the date when the last change has been made to the image or it's properties. LastUpdate string `mapstructure:"updated_at"` // File is the trailing path after the glance endpoint that represent the location // of the image or the path to retrieve it. File string `mapstructure:"file"` // Schema is the path to the JSON-schema that represent the image or image entity. Schema string `mapstructure:"schema"` } type commonResult struct { gophercloud.Result } // Extract interprets any commonResult as an Image. func (c commonResult) Extract() (*Image, error) { if c.Err != nil { return nil, c.Err } var image *Image err := mapstructure.Decode(c.Result.Body, &image) return image, err } // CreateResult represents the result of a Create operation type CreateResult struct { commonResult } // UpdateResult represents the result of an Update operation type UpdateResult struct { commonResult } // GetResult represents the result of a Get operation type GetResult struct { commonResult } //DeleteResult model type DeleteResult struct { gophercloud.Result } // PutImageDataResult is model put image respose type PutImageDataResult struct { gophercloud.Result } // GetImageDataResult model for image response type GetImageDataResult struct { gophercloud.Result } // Extract builds images model from io.Reader func (g GetImageDataResult) Extract() (io.Reader, error) { if r, ok := g.Body.(io.Reader); ok { return r, nil } return nil, fmt.Errorf("Expected io.Reader but got: %T(%#v)", g.Body, g.Body) } // ImagePage represents page type ImagePage struct { pagination.LinkedPageBase } // IsEmpty returns true if a page contains no Images results. func (page ImagePage) IsEmpty() (bool, error) { images, err := ExtractImages(page) if err != nil { return true, err } return len(images) == 0, nil } // NextPageURL uses the response's embedded link reference to navigate to the next page of results. func (page ImagePage) NextPageURL() (string, error) { type resp struct { Next string `mapstructure:"next"` } var r resp err := mapstructure.Decode(page.Body, &r) if err != nil { return "", err } return nextPageURL(page.URL.String(), r.Next), nil } func toMapFromString(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { if (from == reflect.String) && (to == reflect.Map) { return map[string]interface{}{}, nil } return data, nil } // ExtractImages interprets the results of a single page from a List() call, producing a slice of Image entities. func ExtractImages(page pagination.Page) ([]Image, error) { casted := page.(ImagePage).Body var response struct { Images []Image `mapstructure:"images"` } config := &mapstructure.DecoderConfig{ DecodeHook: toMapFromString, Result: &response, } decoder, err := mapstructure.NewDecoder(config) if err != nil { return nil, err } err = decoder.Decode(casted) if err != nil { fmt.Printf("Error happened %v \n", err) } return response.Images, err }
vendor/github.com/rackspace/gophercloud/openstack/imageservice/v2/images/results.go
0.716417
0.401424
results.go
starcoder
An Image Copy algorithm. Supports both, single threaded and multithreaded operations. */ package copier import ( "image" //"image/color" "image/draw" "errors" ) var ESizeMismatch = errors.New("Size-Mismatch") func iterate(r image.Rectangle, pchan chan <- image.Point) { xb,yb,xe,ye := r.Min.X,r.Min.Y,r.Max.X,r.Max.Y for x := xb; x<xe; x++ { for y := yb; y<ye; y++ { pchan <- image.Point{x,y} } } close(pchan) } func copyf(dst draw.Image, src image.Image, pchan <- chan image.Point, qchan chan <- int) { for pt := range pchan { c := src.At(pt.X,pt.Y) dst.Set(pt.X,pt.Y,c) } qchan <- 1 } /* Copies an Image using one thread. */ func Copy(dst draw.Image, src image.Image, blind bool) error { dR,sR := dst.Bounds(),src.Bounds() if !(blind||dR.Eq(sR)) { return ESizeMismatch } pchan := make(chan image.Point,16) qchan := make(chan int,1) go iterate(sR,pchan) copyf(dst,src,pchan,qchan) return nil } /* Copies an Image using multiple threads. The images should be thread-safe. */ func CopyMT(dst draw.Image, src image.Image, blind bool, nthreads int) error { dR,sR := dst.Bounds(),src.Bounds() if !(blind||dR.Eq(sR)) { return ESizeMismatch } pchan := make(chan image.Point,nthreads) qchan := make(chan int,nthreads) go iterate(sR,pchan) for i:=0; i<nthreads; i++ { go copyf(dst,src,pchan,qchan) } for i:=0; i<nthreads; i++ { <- qchan } return nil } type Operator interface{ Operate(pt image.Point) } func operateF(o Operator, pchan <- chan image.Point, qchan chan <- int) { for pt := range pchan { o.Operate(pt) } qchan <- 1 } /* Performs the Task 'o' on every Pixel within the Rectangle 'r'. */ func Operate(o Operator, r image.Rectangle) { pchan := make(chan image.Point,16) qchan := make(chan int,1) go iterate(r,pchan) operateF(o,pchan,qchan) } /* Performs the Task 'o' on every Pixel within the Rectangle 'r'. This implementation uses multiple threads, so the Task should be threadsafe. */ func OperateMT(o Operator, r image.Rectangle, nthreads int) { pchan := make(chan image.Point,nthreads) qchan := make(chan int,nthreads) go iterate(r,pchan) for i:=0; i<nthreads; i++ { go operateF(o,pchan,qchan) } for i:=0; i<nthreads; i++ { <- qchan } }
copier/copier.go
0.595728
0.548432
copier.go
starcoder
package slice /* Apply a function to slice Sample usage: multiply each element by 2 l := []int64{0, 7, 8} slice.ApplyToInt64Slice(l, func(i int64) int64 { return i * 2 }) fmt.Println(l) Expected out: [0, 14, 16] */ // ApplyStrings ... Apply a function to each element in string slice func ApplyStrings(l []string, applyFunc func(s string) string) { for i, s := range l { l[i] = applyFunc(s) } } // ApplyInt64s ... Apply a function to each element in int64 slice func ApplyInt64s(l []int64, applyFunc func(n int64) int64) { for i, num := range l { l[i] = applyFunc(num) } } // ApplyInt32s ... Apply a function to each element in int32 slice func ApplyInt32s(l []int32, applyFunc func(n int32) int32) { for i, num := range l { l[i] = applyFunc(num) } } // ApplyUint64s ... Apply a function to each element in uint64 slice func ApplyUint64s(l []uint64, applyFunc func(n uint64) uint64) { for i, num := range l { l[i] = applyFunc(num) } } // ApplyUint32s ... Apply a function to each element in uint32 slice func ApplyUint32s(l []uint32, applyFunc func(n uint32) uint32) { for i, num := range l { l[i] = applyFunc(num) } } /* Apply a index function to slice Sample usage: multiply each element by 2 if it is in odd-value position l := []int64{0, 7, 8} slice.ApplyIdxToInt64Slice(l, func(i int, num int64) int64 { if i%2 == 0 { return num * 2 } return num }) fmt.Println(l) Expected out: [0, 7, 16] */ // IApplyStrings ... Apply an index-related function to each element in string slice func IApplyStrings(l []string, applyFunc func(i int, s string) string) { for i, s := range l { l[i] = applyFunc(i, s) } } // IApplyInt64s ... Apply an index-related function to each element in int64 slice func IApplyInt64s(l []int64, applyFunc func(i int, n int64) int64) { for i, num := range l { l[i] = applyFunc(i, num) } } // IApplyInt32s ... Apply an index-related function to each element in int32 slice func IApplyInt32s(l []int32, applyFunc func(i int, n int32) int32) { for i, num := range l { l[i] = applyFunc(i, num) } } // IApplyUint64s ... Apply an index-related function to each element in uint64 slice func IApplyUint64s(l []uint64, applyFunc func(i int, n uint64) uint64) { for i, num := range l { l[i] = applyFunc(i, num) } } // IApplyUint32s ... Apply an index-related function to each element in uint32 slice func IApplyUint32s(l []uint32, applyFunc func(i int, n uint32) uint32) { for i, num := range l { l[i] = applyFunc(i, num) } }
slice/apply.go
0.745306
0.427695
apply.go
starcoder
package berlingo // NodeType declares the type of a node (re-usable characteristics) type NodeType struct { Name string Points int Soldiers_Per_Turn int } // Map represents the map and the nodes in it type Map struct { Game *Game Directed bool NodeTypes map[string]*NodeType Nodes map[int]*Node // Lazy caches freeNodes []*Node ownedNodes []*Node enemyNodes []*Node controlledNodes []*Node } // NewMap initializes a new map func NewMap(game *Game) (m *Map, err error) { request := game.Request m = &Map{ Game: game, Directed: request.Infos.Directed, NodeTypes: make(map[string]*NodeType), Nodes: make(map[int]*Node), } for _, rt := range request.Map.Types { m.NodeTypes[rt.Name] = &NodeType{ Name: rt.Name, Points: rt.Points, Soldiers_Per_Turn: rt.Soldiers_Per_Turn, } } for _, rn := range request.Map.Nodes { node := NewNode(m) node.Id = rn.Id node.Type = m.NodeTypes[rn.Type] m.Nodes[rn.Id] = node } for _, rs := range request.State { node := m.Nodes[rs.Node_Id] node.Player_Id = rs.Player_Id node.Number_Of_Soldiers = rs.Number_Of_Soldiers node.reset() } for _, rp := range request.Map.Paths { m.Nodes[rp.From].linkTo(m.Nodes[rp.To]) if m.Directed == false { m.Nodes[rp.To].linkTo(m.Nodes[rp.From]) } } return m, nil } // FreeNodes returns an array of nodes on this map that are free func (m *Map) FreeNodes() []*Node { if m.freeNodes != nil { return m.freeNodes } m.freeNodes = make([]*Node, 0, len(m.Nodes)/2) for _, node := range m.Nodes { if node.IsFree() { m.freeNodes = append(m.freeNodes, node) } } return m.freeNodes } // OwnedNodes returns an array of nodes on this map that are owned func (m *Map) OwnedNodes() []*Node { if m.ownedNodes != nil { return m.ownedNodes } m.ownedNodes = make([]*Node, 0, len(m.Nodes)/2) for _, node := range m.Nodes { if node.IsOwned() { m.ownedNodes = append(m.ownedNodes, node) } } return m.ownedNodes } // EnemyNodes returns an array of nodes on this map that are enemy nodes func (m *Map) EnemyNodes() []*Node { if m.enemyNodes != nil { return m.enemyNodes } m.enemyNodes = make([]*Node, 0, len(m.Nodes)/2) for _, node := range m.Nodes { if node.IsEnemy() { m.enemyNodes = append(m.enemyNodes, node) } } return m.enemyNodes } // ControlledNodes returns an array of nodes on this map that are controlled by the current player func (m *Map) ControlledNodes() []*Node { if m.controlledNodes != nil { return m.controlledNodes } m.controlledNodes = make([]*Node, 0, len(m.Nodes)/2) for _, node := range m.Nodes { if node.IsControlled() { m.controlledNodes = append(m.controlledNodes, node) } } return m.controlledNodes }
map.go
0.604049
0.516595
map.go
starcoder
package pattern import ( "math" "github.com/calbim/ray-tracer/src/color" "github.com/calbim/ray-tracer/src/matrix" "github.com/calbim/ray-tracer/src/tuple" ) //Pattern interface type Pattern interface { GetTransform() *matrix.Matrix SetTransform(m *matrix.Matrix) PatternAt(point tuple.Tuple) *color.Color } //Stripe pattern type Stripe struct { a color.Color b color.Color Transform *matrix.Matrix } //Gradient pattern type Gradient struct { a color.Color b color.Color Transform *matrix.Matrix } //Ring pattern type Ring struct { a color.Color b color.Color Transform *matrix.Matrix } //Checkers pattern type Checkers struct { a color.Color b color.Color Transform *matrix.Matrix } //RadialGradient pattern type RadialGradient struct { a color.Color b color.Color Transform *matrix.Matrix } //Object struct type Object struct { Transform *matrix.Matrix } //NewObject returns a new object func NewObject() Object { return Object{ Transform: matrix.Identity, } } //SetTransform sets an object's transform func (o *Object) SetTransform(transform *matrix.Matrix) { o.Transform = transform } //NewStripe returns a stripe pattern func NewStripe(a color.Color, b color.Color) *Stripe { return &Stripe{a: a, b: b, Transform: matrix.Identity} } //GetTransform returns a stripe's transformation matrix func (p *Stripe) GetTransform() *matrix.Matrix { return p.Transform } //SetTransform sets a stripe's transformation matrix func (p *Stripe) SetTransform(m *matrix.Matrix) { p.Transform = m } //PatternAt returns the color of a stripe at a point func (p *Stripe) PatternAt(point tuple.Tuple) *color.Color { if int(math.Floor(point.X))%2 == 0 { return &p.a } return &p.b } //NewGradient returns a gradient pattern func NewGradient(a color.Color, b color.Color) *Gradient { return &Gradient{a: a, b: b, Transform: matrix.Identity} } //GetTransform returns a gradient's transformation matrix func (p *Gradient) GetTransform() *matrix.Matrix { return p.Transform } //SetTransform sets a gradient's transformation matrix func (p *Gradient) SetTransform(m *matrix.Matrix) { p.Transform = m } //PatternAt returns the color of a gradient at a point func (p *Gradient) PatternAt(point tuple.Tuple) *color.Color { diff := p.b.Subtract(p.a) c := p.a.Add(diff.Multiply(point.X - math.Floor(point.X))) return &c } //NewRing returns a gradient pattern func NewRing(a color.Color, b color.Color) *Ring { return &Ring{a: a, b: b, Transform: matrix.Identity} } //GetTransform returns a gradient's transformation matrix func (p *Ring) GetTransform() *matrix.Matrix { return p.Transform } //SetTransform sets a gradient's transformation matrix func (p *Ring) SetTransform(m *matrix.Matrix) { p.Transform = m } //PatternAt returns the color of a gradient at a point func (p *Ring) PatternAt(point tuple.Tuple) *color.Color { v := int(math.Floor(math.Sqrt(point.X*point.X + point.Z*point.Z))) if v%2 == 0 { return &p.a } return &p.b } //NewCheckers returns a checkers pattern func NewCheckers(a color.Color, b color.Color) *Checkers { return &Checkers{a: a, b: b, Transform: matrix.Identity} } //GetTransform returns a checkers transformation matrix func (p *Checkers) GetTransform() *matrix.Matrix { return p.Transform } //SetTransform sets a checkers transformation matrix func (p *Checkers) SetTransform(m *matrix.Matrix) { p.Transform = m } //PatternAt returns the color of a gradient at a point func (p *Checkers) PatternAt(point tuple.Tuple) *color.Color { v := int(math.Floor(point.X) + math.Floor(point.Y) + math.Floor(point.Z)) if v%2 == 0 { return &p.a } return &p.b } //NewRadialGradient returns a radial gradient pattern func NewRadialGradient(a color.Color, b color.Color) *RadialGradient { return &RadialGradient{a: a, b: b, Transform: matrix.Identity} } //GetTransform returns a radian gradient transformation matrix func (p *RadialGradient) GetTransform() *matrix.Matrix { return p.Transform } //SetTransform sets a radial gradient's transformation matrix func (p *RadialGradient) SetTransform(m *matrix.Matrix) { p.Transform = m } //PatternAt returns the color of a radial gradient at a point func (p *RadialGradient) PatternAt(point tuple.Tuple) *color.Color { v := math.Sqrt(point.X*point.X + point.Z*point.Z) diff := p.b.Subtract(p.a) c := p.a.Add(diff.Multiply(v - math.Floor(v))) return &c } //AtObject returns the color of a stripe at a point on an object func AtObject(p Pattern, o Object, point tuple.Tuple) *color.Color { oInv, _ := o.Transform.Inverse() point = oInv.MultiplyTuple(point) pInv, _ := p.GetTransform().Inverse() point = pInv.MultiplyTuple(point) return p.PatternAt(point) }
src/pattern/pattern.go
0.898869
0.618204
pattern.go
starcoder
package graph import ( tf "github.com/tensorflow/tensorflow/tensorflow/go" ) // DataType holds the type for a scalar value. E.g., one slot in a tensor. type DataType tf.DataType const ( Unknown DataType = DataType(0) Float DataType = DataType(tf.Float) Double DataType = DataType(tf.Double) Int32 DataType = DataType(tf.Int32) Uint32 DataType = DataType(tf.Uint32) Uint8 DataType = DataType(tf.Uint8) Int16 DataType = DataType(tf.Int16) Int8 DataType = DataType(tf.Int8) String DataType = DataType(tf.String) Complex64 DataType = DataType(tf.Complex64) Complex DataType = DataType(tf.Complex) Int64 DataType = DataType(tf.Int64) Uint64 DataType = DataType(tf.Uint64) Bool DataType = DataType(tf.Bool) Qint8 DataType = DataType(tf.Qint8) Quint8 DataType = DataType(tf.Quint8) Qint32 DataType = DataType(tf.Qint32) Bfloat16 DataType = DataType(tf.Bfloat16) Qint16 DataType = DataType(tf.Qint16) Quint16 DataType = DataType(tf.Quint16) Uint16 DataType = DataType(tf.Uint16) Complex128 DataType = DataType(tf.Complex128) Half DataType = DataType(tf.Half) ) func (d DataType) String() string { switch d { case Float: return "Float" case Double: return "Double" case Int32: return "Int32" case Uint32: return "Uint32" case Uint8: return "Uint8" case Int16: return "Int16" case Int8: return "Int8" case String: return "String" case Complex64: return "Complex64" case Int64: return "Int64" case Uint64: return "Uint64" case Bool: return "Bool" case Qint8: return "Qint8" case Quint8: return "Quint8" case Qint32: return "Qint32" case Bfloat16: return "Bfloat16" case Qint16: return "Qint16" case Quint16: return "Quint16" case Uint16: return "Uint16" case Complex128: return "Complex128" case Half: return "Half" } return "Unknown" } func (d DataType) ByteCount() int { switch d { case Float: return 8 case Double: return 16 case Int32: return 8 case Uint32: return 8 case Uint8: return 1 case Int16: return 2 case Int8: return 1 case String: return 1 case Complex64: return 32 case Int64: return 16 case Uint64: return 16 case Bool: return 1 case Qint8: return 1 case Quint8: return 1 case Qint32: return 8 case Bfloat16: return 2 case Qint16: return 2 case Quint16: return 2 case Uint16: return 2 case Complex128: return 64 case Half: return 2 } return 0 } func GetDataType(s string) DataType { switch s { case "Float": return Float case "Double": return Double case "Int32": return Int32 case "Uint32": return Uint32 case "Uint8": return Uint8 case "Int16": return Int16 case "Int8": return Int8 case "String": return String case "Complex64": return Complex64 case "Complex": return Complex case "Int64": return Int64 case "Uint64": return Uint64 case "Bool": return Bool case "Qint8": return Qint8 case "Quint8": return Quint8 case "Qint32": return Qint32 case "Bfloat16": return Bfloat16 case "Qint16": return Qint16 case "Quint16": return Quint16 case "Uint16": return Uint16 case "Complex128": return Complex128 case "Half": return Half } return Unknown }
graph/type.go
0.62498
0.751192
type.go
starcoder
package gm64 import ( "fmt" "math" ) type Vec2 [2]float64 func (v Vec2) Len() float64 { return math.Hypot(v[0], v[1]) } func (v Vec2) Normalize() Vec2 { l := 1.0 / v.Len() return Vec2{v[0] * l, v[1] * l} } func (v1 Vec2) Dot(v2 Vec2) float64 { return v1[0]*v2[0] + v1[1]*v2[1] } func (v1 Vec2) Cross(v2 Vec2) float64 { return v1[0]*v2[1] - v1[1]*v2[0] } func (v1 Vec2) Add(v2 Vec2) Vec2 { return Vec2{v1[0] + v2[0], v1[1] + v2[1]} } func (v1 Vec2) Sub(v2 Vec2) Vec2 { return Vec2{v1[0] - v2[0], v1[1] - v2[1]} } func (v Vec2) Mul(c float64) Vec2 { return Vec2{v[0] * c, v[1] * c} } func (v Vec2) Elem() (x, y float64) { return v[0], v[1] } type Vec3 [3]float64 func (v Vec3) Len() float64 { return math.Sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]) } func (v Vec3) Normalize() Vec3 { l := 1.0 / v.Len() return Vec3{v[0] * l, v[1] * l, v[2] * l} } func (v1 Vec3) Dot(v2 Vec3) float64 { return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2] } func (v1 Vec3) Cross(v2 Vec3) Vec3 { return Vec3{v1[1]*v2[2] - v1[2]*v2[1], v1[2]*v2[0] - v1[0]*v2[2], v1[0]*v2[1] - v2[1]*v1[0]} } func (v1 Vec3) Add(v2 Vec3) Vec3 { return Vec3{v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2]} } func (v1 Vec3) Sub(v2 Vec3) Vec3 { return Vec3{v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2]} } func (v Vec3) Mul(c float64) Vec3 { return Vec3{v[0] * c, v[1] * c, v[2] * c} } func (v Vec3) Elem() (x, y, z float64) { return v[0], v[1], v[2] } type Vec4 [4]float64 func (v Vec4) Len() float64 { return math.Sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2] + v[3]*v[3]) } func (v Vec4) Normalize() Vec4 { l := 1.0 / v.Len() return Vec4{v[0] * l, v[1] * l, v[2] * l, v[3] * l} } func (v1 Vec4) Dot(v2 Vec4) float64 { return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2] + v1[3]*v2[3] } func (v1 Vec4) Add(v2 Vec4) Vec4 { return Vec4{v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2], v1[3] + v2[3]} } func (v1 Vec4) Sub(v2 Vec4) Vec4 { return Vec4{v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2], v1[3] - v2[3]} } func (v Vec4) Mul(c float64) Vec4 { return Vec4{v[0] * c, v[1] * c, v[2] * c, v[3] * c} } func (v Vec4) Elem() (x, y, z, w float64) { return v[0], v[1], v[2], v[3] } type Vec struct { N int Data []float64 } func NewVec(n int) func(data ...float64) *Vec { if n <= 0 { err := fmt.Errorf("the n parameter must be positive (got %d)", n) panic(err) } ctor := func(data ...float64) *Vec { if len(data) > n { err := fmt.Errorf("the number of input values must not be greater than n (%d)", n) panic(err) } o := &Vec{ N: n, Data: make([]float64, n), } copy(o.Data, data) return o } return ctor } func (v *Vec) Copy() *Vec { cp := &Vec{ N: v.N, Data: make([]float64, v.N), } copy(cp.Data, v.Data) return cp } func (v1 *Vec) Add(v2 *Vec) *Vec { if v1.N != v2.N { err := fmt.Errorf( "the first and second vectors have different sized (got %d and %d)", v1.N, v2.N, ) panic(err) } o := &Vec{ N: v1.N, Data: make([]float64, v1.N), } for i := 0; i < o.N; i++ { o.Data[i] = v1.Data[i] + v2.Data[i] } return o } func (v1 *Vec) Sub(v2 *Vec) *Vec { if v1.N != v2.N { err := fmt.Errorf( "the first and second vectors have different sized (got %d and %d)", v1.N, v2.N, ) panic(err) } o := &Vec{ N: v1.N, Data: make([]float64, v1.N), } for i := 0; i < o.N; i++ { o.Data[i] = v1.Data[i] - v2.Data[i] } return o } func (v *Vec) Mul(c float64) *Vec { o := &Vec{ N: v.N, Data: make([]float64, v.N), } for i := 0; i < o.N; i++ { o.Data[i] = v.Data[i] * c } return o } func (v *Vec) Len() float64 { sum := float64(0) for i := 0; i < v.N; i++ { sum += v.Data[i] * v.Data[i] } return math.Sqrt(sum) } func (v *Vec) Normalize() *Vec { o := &Vec{ N: v.N, Data: make([]float64, v.N), } l := 1.0 / v.Len() for i := 0; i < o.N; i++ { o.Data[i] = v.Data[i] * l } return o } func (v1 *Vec) Dot(v2 *Vec) float64 { if v1.N != v2.N { err := fmt.Errorf( "the first and second vectors have different sized (got %d and %d)", v1.N, v2.N, ) panic(err) } sum := float64(0) for i := 0; i < v1.N; i++ { sum += v1.Data[i] * v2.Data[i] } return sum } func (m *Mat) Row(i int) *Vec { if i < 0 { err := fmt.Errorf("the i parameter must be non-negative (got %d)", i) panic(err) } if i >= m.M { err := fmt.Errorf("trying to get a row out of matrix bounds (got row index %d, while matrix has only %d rows)", i, m.M) panic(err) } return NewVec(m.N)(m.Data[i*m.N : (i+1)*m.N]...) } func (m *Mat) Col(j int) *Vec { if j < 0 { err := fmt.Errorf("the j parameter must be non-negative (got %d)", j) panic(err) } if j >= m.N { err := fmt.Errorf("trying to get a column out of matrix bounds (got column index %d, while matrix has only %d columns)", j, m.N) panic(err) } col := make([]float64, m.M) for i := 0; i < m.M; i++ { col[i] = m.Data[j+i*m.N] } return NewVec(m.M)(col...) } func (v *Vec) String() string { return fmt.Sprint(v.Data) }
gm64/vec.go
0.746601
0.588682
vec.go
starcoder
package boxmodel import ( "github.com/adamcolton/geom/d2" "github.com/adamcolton/geom/d2/affine" "github.com/adamcolton/geom/d2/curve/line" "github.com/adamcolton/geom/d2/shape" "github.com/adamcolton/geom/d2/shape/box" ) // BoxModel represents a shape that has been encoded as a set of boxes. type BoxModel interface { // Inside returns the number of boxes inside the shape. Inside() int // InsideCursor returns a cursor that will iterate over all the boxes // inside the shape. InsideCursor() (Iterator, box.Box, bool) // Perimeter returns the number of boxes that contain the perimeter. Perimeter() int // PerimeterCursor returns a cursor that will iterate over all the boxes // on the perimeter of the shape. PerimeterCursor() (Iterator, box.Box, bool) // Outside returns the number of boxes outside the shape. Outside() int // OutsideCursor returns a cursor that will iterate over all the boxes // inside the shape. OutsideCursor() (Iterator, box.Box, bool) // Area is an approximation of the area of the shape. It is the sum of all // the boxes inside the shape and half the area of the boxes on the // perimeter. Area() float64 // SignedArea is the same as Area. SignedArea() float64 // Centroid is the center of mass of the shape. Centroid() d2.Pt tree() *tree } // Iterator iterates over a collection of boxes type Iterator interface { Next() (b box.Box, done bool) } // New BoxModel representing the shape. func New(s shape.Shape, depth int) BoxModel { b := box.New(s.BoundingBox()) t := &tree{ start: firstParent, nodes: make([]children, 1, 1<<(depth+2)), depth: depth, h: line.Line{ T0: b[0], D: d2.V{b[1].X - b[0].X, 0}, }, v: line.Line{ T0: b[0], D: d2.V{0, b[1].Y - b[0].Y}, }, } root := t.root() root.scan(s, depth) root.tag(s) sm := &sum{ centroid: affine.NewWeighted(root.inside + root.perimeter), } root.sum(sm) root.area = sm.area * b.Area() root.centroid = sm.centroid.Centroid() return root.tree }
d2/shape/boxmodel/boxmodel.go
0.740831
0.519521
boxmodel.go
starcoder
package carbon import ( "time" ) // CreateFromTimestamp creates a Carbon instance from a given timestamp with second. // 从给定的秒级时间戳创建 Carbon 实例 func (c Carbon) CreateFromTimestamp(timestamp int64, timezone ...string) Carbon { if len(timezone) > 0 { c.loc, c.Error = getLocationByTimezone(timezone[len(timezone)-1]) } if c.Error != nil { return c } c.time = time.Unix(timestamp, 0) return c } // CreateFromTimestamp creates a Carbon instance from a given timestamp with second. // 从给定的秒级时间戳创建 Carbon 实例 func CreateFromTimestamp(timestamp int64, timezone ...string) Carbon { return NewCarbon().CreateFromTimestamp(timestamp, timezone...) } // CreateFromTimestampMilli creates a Carbon instance from a given timestamp with millisecond. // 从给定的微秒级时间戳创建 Carbon 实例 func (c Carbon) CreateFromTimestampMilli(timestamp int64, timezone ...string) Carbon { if len(timezone) > 0 { c.loc, c.Error = getLocationByTimezone(timezone[len(timezone)-1]) } if c.Error != nil { return c } c.time = time.Unix(timestamp/1e3, (timestamp%1e3)*1e6) return c } // CreateFromTimestampMilli creates a Carbon instance from a given timestamp with millisecond. // 从给定的微秒级时间戳创建 Carbon 实例 func CreateFromTimestampMilli(timestamp int64, timezone ...string) Carbon { return NewCarbon().CreateFromTimestampMilli(timestamp, timezone...) } // CreateFromTimestampMicro creates a Carbon instance from a given timestamp with microsecond. // 从给定的微秒级时间戳创建 Carbon 实例 func (c Carbon) CreateFromTimestampMicro(timestamp int64, timezone ...string) Carbon { if len(timezone) > 0 { c.loc, c.Error = getLocationByTimezone(timezone[len(timezone)-1]) } if c.Error != nil { return c } c.time = time.Unix(timestamp/1e6, (timestamp%1e6)*1e3) return c } // CreateFromTimestampMicro creates a Carbon instance from a given timestamp with microsecond. // 从给定的微秒级时间戳创建 Carbon 实例 func CreateFromTimestampMicro(timestamp int64, timezone ...string) Carbon { return NewCarbon().CreateFromTimestampMicro(timestamp, timezone...) } // CreateFromTimestampNano creates a Carbon instance from a given timestamp with nanosecond. // 从给定的纳秒级时间戳创建 Carbon 实例 func (c Carbon) CreateFromTimestampNano(timestamp int64, timezone ...string) Carbon { if len(timezone) > 0 { c.loc, c.Error = getLocationByTimezone(timezone[len(timezone)-1]) } if c.Error != nil { return c } c.time = time.Unix(timestamp/1e9, timestamp%1e9) return c } // CreateFromTimestampNano creates a Carbon instance from a given timestamp with nanosecond. // 从给定的纳秒级时间戳创建 Carbon 实例 func CreateFromTimestampNano(timestamp int64, timezone ...string) Carbon { return NewCarbon().CreateFromTimestampNano(timestamp, timezone...) } // CreateFromDateTime creates a Carbon instance from a given date and time. // 从给定的年月日时分秒创建 Carbon 实例 func (c Carbon) CreateFromDateTime(year, month, day, hour, minute, second int, timezone ...string) Carbon { now := c.Now(timezone...) return c.create(year, month, day, hour, minute, second, now.Nanosecond(), timezone...) } // CreateFromDateTime creates a Carbon instance from a given date and time. // 从给定的年月日时分秒创建 Carbon 实例 func CreateFromDateTime(year, month, day, hour, minute, second int, timezone ...string) Carbon { return NewCarbon().CreateFromDateTime(year, month, day, hour, minute, second, timezone...) } // CreateFromDateTimeMilli creates a Carbon instance from a given date and time with millisecond. // 从给定的年月日时分秒创建 Carbon 实例,包含毫秒 func (c Carbon) CreateFromDateTimeMilli(year, month, day, hour, minute, second, millisecond int, timezone ...string) Carbon { return c.create(year, month, day, hour, minute, second, millisecond*1e6, timezone...) } // CreateFromDateTimeMilli creates a Carbon instance from a given date and time with millisecond. // 从给定的年月日时分秒创建 Carbon 实例,包含毫秒 func CreateFromDateTimeMilli(year, month, day, hour, minute, second, millisecond int, timezone ...string) Carbon { return NewCarbon().CreateFromDateTimeMilli(year, month, day, hour, minute, second, millisecond, timezone...) } // CreateFromDateTimeMicro creates a Carbon instance from a given date and time with microsecond. // 从给定的年月日时分秒创建 Carbon 实例,包含微秒 func (c Carbon) CreateFromDateTimeMicro(year, month, day, hour, minute, second, microsecond int, timezone ...string) Carbon { return c.create(year, month, day, hour, minute, second, microsecond*1e3, timezone...) } // CreateFromDateTimeMicro creates a Carbon instance from a given date and time with microsecond. // 从给定的年月日时分秒创建 Carbon 实例,包含微秒 func CreateFromDateTimeMicro(year, month, day, hour, minute, second, microsecond int, timezone ...string) Carbon { return NewCarbon().CreateFromDateTimeMicro(year, month, day, hour, minute, second, microsecond, timezone...) } // CreateFromDateTimeNano creates a Carbon instance from a given date and time with nanosecond. // 从给定的年月日时分秒创建 Carbon 实例,包含纳秒 func (c Carbon) CreateFromDateTimeNano(year, month, day, hour, minute, second, nanosecond int, timezone ...string) Carbon { return c.create(year, month, day, hour, minute, second, nanosecond, timezone...) } // CreateFromDateTimeNano creates a Carbon instance from a given date and time with nanosecond. // 从给定的年月日时分秒创建 Carbon 实例,包含纳秒 func CreateFromDateTimeNano(year, month, day, hour, minute, second, nanosecond int, timezone ...string) Carbon { return NewCarbon().CreateFromDateTimeNano(year, month, day, hour, minute, second, nanosecond, timezone...) } // CreateFromDate creates a Carbon instance from a given date. // 从给定的年月日创建 Carbon 实例 func (c Carbon) CreateFromDate(year, month, day int, timezone ...string) Carbon { now := c.Now(timezone...) hour, minute, second := now.Time() return c.create(year, month, day, hour, minute, second, now.Nanosecond(), timezone...) } // CreateFromDate creates a Carbon instance from a given date. // 从给定的年月日创建 Carbon 实例 func CreateFromDate(year, month, day int, timezone ...string) Carbon { return NewCarbon().CreateFromDate(year, month, day, timezone...) } // CreateFromTime creates a Carbon instance from a given time. // 从给定的时分秒创建 Carbon 实例 func (c Carbon) CreateFromTime(hour, minute, second int, timezone ...string) Carbon { now := c.Now(timezone...) year, month, day := now.Date() return c.create(year, month, day, hour, minute, second, now.Nanosecond(), timezone...) } // CreateFromTime creates a Carbon instance from a given time. // 从给定的时分秒创建 Carbon 实例 func CreateFromTime(hour, minute, second int, timezone ...string) Carbon { return NewCarbon().CreateFromTime(hour, minute, second, timezone...) } // creates a Carbon instance from a given date and time with nanosecond. // 从给定的完整日期时间创建 Carbon 实例 func (c Carbon) create(year, month, day, hour, minute, second, nanosecond int, timezone ...string) Carbon { if len(timezone) > 0 { c.loc, c.Error = getLocationByTimezone(timezone[len(timezone)-1]) } if c.Error != nil { return c } c.time = time.Date(year, time.Month(month), day, hour, minute, second, nanosecond, c.loc) return c }
creator.go
0.692226
0.557364
creator.go
starcoder
package vat import ( "fmt" "regexp" ) // ErrInvalidVATRegion implements the error interface and is returned when // ValidInRegion fails to find the requested region. Blank "" regions will not // fail the region lookup type ErrInvalidVATRegion struct { region string } func (e ErrInvalidVATRegion) Error() string { return fmt.Sprintf("invalid vat region: %s. valid regions are (case-insensitive): %q", e.region, validRegions) } var regionPatterns = map[string][]*regexp.Regexp{ "AT": []*regexp.Regexp{regexp.MustCompile(`^(AT)?U(\d{8})$`)}, "BE": []*regexp.Regexp{regexp.MustCompile(`^(BE)?(0?\d{9})$`)}, "BG": []*regexp.Regexp{regexp.MustCompile(`^(BG)?(\d{9,10})$`)}, "CHE": []*regexp.Regexp{regexp.MustCompile(`^(CHE)?(\d{9})(MWST)?$`)}, "CY": []*regexp.Regexp{regexp.MustCompile(`^(CY)?([0-5|9]\d{7}[A-Z])$`)}, "CZ": []*regexp.Regexp{regexp.MustCompile(`^(CZ)?(\d{8,10})(\d{3})?$`)}, "DE": []*regexp.Regexp{regexp.MustCompile(`^(DE)?([1-9]\d{8})$`)}, "DK": []*regexp.Regexp{regexp.MustCompile(`^(DK)?(\d{8})$`)}, "EE": []*regexp.Regexp{regexp.MustCompile(`^(EE)?(10\d{7})$`)}, "EL": []*regexp.Regexp{regexp.MustCompile(`^(EL)?(\d{9})$`)}, "GR": []*regexp.Regexp{regexp.MustCompile(`^(EL)?(\d{9})$`)}, "ES": []*regexp.Regexp{ regexp.MustCompile(`^(ES)?([A-Z]\d{8})$`), regexp.MustCompile(`^(ES)?([A-H|N-S|W]\d{7}[A-J])$`), regexp.MustCompile(`^(ES)?([0-9|Y|Z]\d{7}[A-Z])$`), regexp.MustCompile(`^(ES)?([K|L|M|X]\d{7}[A-Z])$`), }, "FI": []*regexp.Regexp{regexp.MustCompile(`^(FI)?(\d{8})$`)}, "FR": []*regexp.Regexp{ regexp.MustCompile(`^(FR)?(\d{11})$`), regexp.MustCompile(`^(FR)?([(A-H)|(J-N)|(P-Z)]\d{10})$`), regexp.MustCompile(`^(FR)?(\d[(A-H)|(J-N)|(P-Z)]\d{9})$`), regexp.MustCompile(`^(FR)?([(A-H)|(J-N)|(P-Z)]{2}\d{9})$`), }, "GB": []*regexp.Regexp{ regexp.MustCompile(`^(GB)?(\d{9})$`), regexp.MustCompile(`^(GB)?(\d{12})$`), regexp.MustCompile(`^(GB)?(GD\d{3})$`), regexp.MustCompile(`^(GB)?(HA\d{3})$`), }, "HR": []*regexp.Regexp{ regexp.MustCompile(`^(HR)?(\d{11})$`), }, "HU": []*regexp.Regexp{ regexp.MustCompile(`^(HU)?(\d{8})$`), }, "IE": []*regexp.Regexp{ regexp.MustCompile(`^(IE)?(\d{7}[A-W])(W)?$`), regexp.MustCompile(`^(IE)?([7-9][A-Z\*\+)]\d{5}[A-W])$`), regexp.MustCompile(`^(IE)?(\d{7}[A-W][AH])$`), }, "IT": []*regexp.Regexp{ regexp.MustCompile(`^(IT)?(\d{11})$`), }, "LV": []*regexp.Regexp{ regexp.MustCompile(`^(LV)?(\d{11})$`), }, "LT": []*regexp.Regexp{ regexp.MustCompile(`^(LT)?(\d{9}|\d{12})$`), }, "LU": []*regexp.Regexp{ regexp.MustCompile(`^(LU)?(\d{8})$`), }, "MT": []*regexp.Regexp{ regexp.MustCompile(`^(MT)?([1-9]\d{7})$`), }, "NL": []*regexp.Regexp{ regexp.MustCompile(`^(NL)?(\d{9})B\d{2}$`), }, "NO": []*regexp.Regexp{ regexp.MustCompile(`^(NO)?(\d{9})$`), }, "PL": []*regexp.Regexp{ regexp.MustCompile(`^(PL)?(\d{10})$`), }, "PT": []*regexp.Regexp{ regexp.MustCompile(`^(PT)?(\d{9})$`), }, "RO": []*regexp.Regexp{ regexp.MustCompile(`^(RO)?([1-9]\d{1,9})$`), }, "RU": []*regexp.Regexp{ regexp.MustCompile(`^(RU)?(\d{10}|\d{12})$`), }, "RS": []*regexp.Regexp{ regexp.MustCompile(`^(RS)?(\d{9})$`), }, "SI": []*regexp.Regexp{ regexp.MustCompile(`^(SI)?([1-9]\d{7})$`), }, "SK": []*regexp.Regexp{ regexp.MustCompile(`^(SK)?([1-9]\d[(2-4)|(6-9)]\d{7})$`), }, "SE": []*regexp.Regexp{ regexp.MustCompile(`^(SE)?(\d{10}01)$`), }, "EU": []*regexp.Regexp{ regexp.MustCompile(`^(EU)?(\d{9})$`), }, } var validRegions []string func init() { for k := range regionPatterns { validRegions = append(validRegions, k) } }
region.go
0.541894
0.628379
region.go
starcoder
package compiler type nfa2dfa struct { // The reference NFA. nfa *NFA // Map of subset key to subset. subsetMap map[string]*intset // All subsets in order of creation. subsetList []*intset // Maps subset key to map of symbols to destination subset key. trans map[string]map[int]string subsetToDFA map[string]int startSubset *intset closurePending []int } func NFAToDFA(nfa *NFA) *DFA { estDFAStateCount := nfa.StateCount() * 2 n2d := &nfa2dfa{ nfa: nfa, subsetMap: make(map[string]*intset, estDFAStateCount), trans: make(map[string]map[int]string, estDFAStateCount), subsetToDFA: make(map[string]int), } n2d.startSubset = &intset{} n2d.startSubset.Add(0) n2d.closure(n2d.startSubset) n2d.subsetMap[n2d.startSubset.Key()] = n2d.startSubset n2d.subsetList = append(n2d.subsetList, n2d.startSubset) pending := []*intset{n2d.startSubset} for len(pending) > 0 { var subset *intset subset, pending = pending[len(pending)-1], pending[:len(pending)-1] syms := n2d.symbols(subset) for _, sym := range syms { destSubset := n2d.move(subset, sym) n2d.closure(destSubset) if _, ok := n2d.subsetMap[destSubset.Key()]; !ok { n2d.subsetMap[destSubset.Key()] = destSubset n2d.subsetList = append(n2d.subsetList, destSubset) pending = append(pending, destSubset) } if n2d.trans[subset.Key()] == nil { n2d.trans[subset.Key()] = make(map[int]string) } n2d.trans[subset.Key()][sym] = destSubset.Key() } } return n2d.buildDFA() } func (n2d *nfa2dfa) buildDFA() *DFA { dfa := &DFA{States: make([]DFAState, 0, len(n2d.subsetMap))} buildDFAState := func(subset *intset) { dfaS := DFAState{ NFAStates: make([]int, 0, subset.Len()), } subset.ForEach(func(nfaState int) { dfaS.NFAStates = append(dfaS.NFAStates, nfaState) dfaS.Accepting = dfaS.Accepting || n2d.nfa.States[nfaState].Accepting }) dfa.States = append(dfa.States, dfaS) n2d.subsetToDFA[subset.Key()] = len(dfa.States) - 1 } for _, subset := range n2d.subsetList { buildDFAState(subset) } for subset, dfaStateIndex := range n2d.subsetToDFA { dfaState := &dfa.States[dfaStateIndex] for sym, destSubsetKey := range n2d.trans[subset] { if dfaState.Trans == nil { dfaState.Trans = make(map[int]int) } destDFAS := n2d.subsetToDFA[n2d.subsetMap[destSubsetKey].Key()] dfaState.Trans[sym] = destDFAS } } return dfa } func (n2d *nfa2dfa) closure(s *intset) { pending := n2d.closurePending pending = pending[:0] s.ForEach(func(state int) { pending = append(pending, state) }) for len(pending) > 0 { var state int state, pending = pending[len(pending)-1], pending[:len(pending)-1] toStates := n2d.nfa.TransitionsFor(state, Epsilon) for _, toState := range toStates { if !s.Has(toState) { s.Add(toState) pending = append(pending, toState) } } } } func (n2d *nfa2dfa) move(stateSet *intset, sym int) *intset { destStates := &intset{} stateSet.ForEach(func(state int) { for _, destState := range n2d.nfa.States[state].Trans[sym] { destStates.Add(destState) } }) return destStates } func (n2d *nfa2dfa) symbols(s *intset) []int { syms := &intset{} s.ForEach(func(state int) { for sym, _ := range n2d.nfa.States[state].Trans { if sym != Epsilon { syms.Add(sym) } } }) return syms.Items() }
compiler/nfa2dfa.go
0.531696
0.415136
nfa2dfa.go
starcoder
package csv import ( "encoding/csv" "fmt" "io" "sort" "strconv" "github.com/attic-labs/noms/go/d" "github.com/attic-labs/noms/go/types" ) // StringToKind maps names of valid NomsKinds (e.g. Bool, Number, etc) to their associated types.NomsKind var StringToKind = func(kindMap map[types.NomsKind]string) map[string]types.NomsKind { m := map[string]types.NomsKind{} for k, v := range kindMap { m[v] = k } return m }(types.KindToString) // StringsToKinds looks up each element of strs in the StringToKind map and returns a slice of answers func StringsToKinds(strs []string) KindSlice { kinds := make(KindSlice, len(strs)) for i, str := range strs { k, ok := StringToKind[str] if !ok { d.Panic("StringToKind[%s] failed", str) } kinds[i] = k } return kinds } // KindsToStrings looks up each element of kinds in the types.KindToString map and returns a slice of answers func KindsToStrings(kinds KindSlice) []string { strs := make([]string, len(kinds)) for i, k := range kinds { strs[i] = k.String() } return strs } //EscapeStructFieldFromCSV removes special characters and replaces spaces with camelCasing (camel case turns to camelCase) func EscapeStructFieldFromCSV(input string) string { if types.IsValidStructFieldName(input) { return input } return types.CamelCaseFieldName(input) } // MakeStructTemplateFromHeaders creates a struct type from the headers using |kinds| as the type of each field. If |kinds| is empty, default to strings. func MakeStructTemplateFromHeaders(headers []string, structName string, kinds KindSlice) (temp types.StructTemplate, fieldOrder []int, kindMap []types.NomsKind) { useStringType := len(kinds) == 0 d.PanicIfFalse(useStringType || len(headers) == len(kinds)) fieldMap := make(map[string]types.NomsKind, len(headers)) origOrder := make(map[string]int, len(headers)) fieldNames := make(sort.StringSlice, len(headers)) for i, key := range headers { fn := EscapeStructFieldFromCSV(key) origOrder[fn] = i kind := types.StringKind if !useStringType { kind = kinds[i] } _, ok := fieldMap[fn] if ok { d.Panic(`Duplicate field name "%s"`, key) } fieldMap[fn] = kind fieldNames[i] = fn } sort.Sort(fieldNames) kindMap = make([]types.NomsKind, len(fieldMap)) fieldOrder = make([]int, len(fieldMap)) for i, fn := range fieldNames { kindMap[i] = fieldMap[fn] fieldOrder[origOrder[fn]] = i } temp = types.MakeStructTemplate(structName, fieldNames) return } // ReadToList takes a CSV reader and reads data into a typed List of structs. // Each row gets read into a struct named structName, described by headers. If // the original data contained headers it is expected that the input reader has // already read those and are pointing at the first data row. // If kinds is non-empty, it will be used to type the fields in the generated // structs; otherwise, they will be left as string-fields. // In addition to the list, ReadToList returns the typeDef of the structs in the // list. func ReadToList(r *csv.Reader, structName string, headers []string, kinds KindSlice, vrw types.ValueReadWriter, limit uint64) (l types.List) { temp, fieldOrder, kindMap := MakeStructTemplateFromHeaders(headers, structName, kinds) valueChan := make(chan types.Value, 128) // TODO: Make this a function param? listChan := types.NewStreamingList(vrw, valueChan) cnt := uint64(0) for { row, err := r.Read() if cnt >= limit || err == io.EOF { close(valueChan) break } else if err != nil { panic(err) } cnt++ fields := readFieldsFromRow(row, headers, fieldOrder, kindMap) valueChan <- temp.NewStruct(fields) } return <-listChan } type column struct { ch chan types.Value list <-chan types.List zeroValue types.Value hdr string } // ReadToColumnar takes a CSV reader and reads data from each column into a // separate list. Values from columns in each successive row are appended to the // column-specific lists whose type is described by headers. Finally, a new // "Columnar" struct is created that consists of one field for each column and // each field contains a list of values. // If the original data contained headers it is expected that the input reader // has already read those and are pointing at the first data row. // If kinds is non-empty, it will be used to type the fields in the generated // structs; otherwise, they will be left as string-fields. // In addition to the list, ReadToList returns the typeDef of the structs in the // list. func ReadToColumnar(r *csv.Reader, structName string, headers []string, kinds KindSlice, vrw types.ValueReadWriter, limit uint64) (s types.Struct) { valueChan := make(chan types.Value, 128) // TODO: Make this a function param? cols := []column{} fieldOrder := []int{} for i, hdr := range headers { ch := make(chan types.Value, 1024) cols = append(cols, column{ ch: ch, list: types.NewStreamingList(vrw, ch), hdr: hdr, }) fieldOrder = append(fieldOrder, i) } cnt := uint64(0) for { row, err := r.Read() if cnt >= limit || err == io.EOF { close(valueChan) break } else if err != nil { panic(err) } cnt++ fields := readFieldsFromRow(row, headers, fieldOrder, kinds) for i, v := range fields { cols[i].ch <- v } } sd := types.StructData{} for _, col := range cols { close(col.ch) r := vrw.WriteValue(<-col.list) sd[col.hdr] = r } return types.NewStruct("Columnar", sd) } // getFieldIndexByHeaderName takes the collection of headers and the name to search for and returns the index of name within the headers or -1 if not found func getFieldIndexByHeaderName(headers []string, name string) int { for i, header := range headers { if header == name { return i } } return -1 } // getPkIndices takes collection of primary keys as strings and determines if they are integers, if so then use those ints as the indices, otherwise it looks up the strings in the headers to find the indices; returning the collection of int indices representing the primary keys maintaining the order of strPks to the return collection func getPkIndices(strPks []string, headers []string) []int { result := make([]int, len(strPks)) for i, pk := range strPks { pkIdx, ok := strconv.Atoi(pk) if ok == nil { result[i] = pkIdx } else { result[i] = getFieldIndexByHeaderName(headers, pk) } if result[i] < 0 { d.Chk.Fail(fmt.Sprintf("Invalid pk: %v", pk)) } } return result } func readFieldsFromRow(row []string, headers []string, fieldOrder []int, kindMap []types.NomsKind) types.ValueSlice { fields := make(types.ValueSlice, len(headers)) for i, v := range row { if i < len(headers) { fieldOrigIndex := fieldOrder[i] val, err := StringToValue(v, kindMap[fieldOrigIndex]) if err != nil { d.Chk.Fail(fmt.Sprintf("Error parsing value for column '%s': %s", headers[i], err)) } fields[fieldOrigIndex] = val } } return fields } // primaryKeyValuesFromFields extracts the values of the primaryKey fields into // array. The values are in the user-specified order. This function returns 2 // objects: // 1) a ValueSlice containing the first n-1 keys. // 2) a single Value which will be used as the key in the leaf map created by // GraphBuilder func primaryKeyValuesFromFields(fields types.ValueSlice, fieldOrder, pkIndices []int) (types.ValueSlice, types.Value) { numPrimaryKeys := len(pkIndices) if numPrimaryKeys == 1 { return nil, fields[fieldOrder[pkIndices[0]]] } keys := make(types.ValueSlice, numPrimaryKeys-1) var value types.Value for i, idx := range pkIndices { k := fields[fieldOrder[idx]] if i < numPrimaryKeys-1 { keys[i] = k } else { value = k } } return keys, value } // ReadToMap takes a CSV reader and reads data into a typed Map of structs. Each // row gets read into a struct named structName, described by headers. If the // original data contained headers it is expected that the input reader has // already read those and are pointing at the first data row. // If kinds is non-empty, it will be used to type the fields in the generated // structs; otherwise, they will be left as string-fields. func ReadToMap(r *csv.Reader, structName string, headersRaw []string, primaryKeys []string, kinds KindSlice, vrw types.ValueReadWriter, limit uint64) types.Map { temp, fieldOrder, kindMap := MakeStructTemplateFromHeaders(headersRaw, structName, kinds) pkIndices := getPkIndices(primaryKeys, headersRaw) d.Chk.True(len(pkIndices) >= 1, "No primary key defined when reading into map") gb := types.NewGraphBuilder(vrw, types.MapKind) cnt := uint64(0) for { row, err := r.Read() if cnt >= limit || err == io.EOF { break } else if err != nil { panic(err) } cnt++ fields := readFieldsFromRow(row, headersRaw, fieldOrder, kindMap) graphKeys, mapKey := primaryKeyValuesFromFields(fields, fieldOrder, pkIndices) st := temp.NewStruct(fields) gb.MapSet(graphKeys, mapKey, st) } return gb.Build().(types.Map) }
samples/go/csv/read.go
0.562177
0.427098
read.go
starcoder