code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package unnecessary_data_transfer import ( "sort" "github.com/threagile/threagile/model" ) func Category() model.RiskCategory { return model.RiskCategory{ Id: "unnecessary-data-transfer", Title: "Unnecessary Data Transfer", Description: "Quando um ativo técnico envia ou recebe ativos de dados, que não processa ou armazena isso é " + "an indicator for unnecessarily transferred data (or for an incomplete model). When the unnecessarily " + "transferred data assets are sensitive, this poses an unnecessary risk of an increased attack surface.", Impact: "If this risk is unmitigated, attackers might be able to target unnecessarily transferred data.", ASVS: "V1 - Architecture, Design and Threat Modeling Requirements", CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html", Action: "Attack Surface Reduction", Mitigation: "Try to avoid sending or receiving sensitive data assets which are not required (i.e. neither " + "processed or stored) by the involved technical asset.", Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?", Function: model.Architecture, STRIDE: model.ElevationOfPrivilege, DetectionLogic: "In-scope technical assets sending or receiving sensitive data assets which are neither processed nor " + "stored by the technical asset are flagged with this risk. The risk rating (low or medium) depends on the " + "confidentiality, integrity, and availability rating of the technical asset. Monitoring data is exempted from this risk.", RiskAssessment: "The risk assessment is depending on the confidentiality and integrity rating of the transferred data asset " + "either " + model.LowSeverity.String() + " or " + model.MediumSeverity.String() + ".", FalsePositives: "Technical assets missing the model entries of either processing or storing the mentioned data assets " + "can be considered as false positives (incomplete models) after individual review. These should then be addressed by " + "completing the model so that all necessary data assets are processed and/or stored by the technical asset involved.", ModelFailurePossibleReason: true, CWE: 1008, } } func SupportedTags() []string { return []string{} } func GenerateRisks() []model.Risk { risks := make([]model.Risk, 0) for _, id := range model.SortedTechnicalAssetIDs() { technicalAsset := model.ParsedModelRoot.TechnicalAssets[id] if technicalAsset.OutOfScope { continue } // outgoing data flows for _, outgoingDataFlow := range technicalAsset.CommunicationLinks { targetAsset := model.ParsedModelRoot.TechnicalAssets[outgoingDataFlow.TargetId] if targetAsset.Technology.IsUnnecessaryDataTolerated() { continue } risks = checkRisksAgainstTechnicalAsset(risks, technicalAsset, outgoingDataFlow, false) } // incoming data flows commLinks := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id] sort.Sort(model.ByTechnicalCommunicationLinkIdSort(commLinks)) for _, incomingDataFlow := range commLinks { targetAsset := model.ParsedModelRoot.TechnicalAssets[incomingDataFlow.SourceId] if targetAsset.Technology.IsUnnecessaryDataTolerated() { continue } risks = checkRisksAgainstTechnicalAsset(risks, technicalAsset, incomingDataFlow, true) } } return risks } func checkRisksAgainstTechnicalAsset(risks []model.Risk, technicalAsset model.TechnicalAsset, dataFlow model.CommunicationLink, inverseDirection bool) []model.Risk { for _, transferredDataAssetId := range dataFlow.DataAssetsSent { if !technicalAsset.ProcessesOrStoresDataAsset(transferredDataAssetId) { transferredDataAsset := model.ParsedModelRoot.DataAssets[transferredDataAssetId] //fmt.Print("--->>> Checking "+technicalAsset.Id+": "+transferredDataAsset.Id+" sent via "+dataFlow.Id+"\n") if transferredDataAsset.Confidentiality >= model.Confidential || transferredDataAsset.Integrity >= model.Critical { commPartnerId := dataFlow.TargetId if inverseDirection { commPartnerId = dataFlow.SourceId } commPartnerAsset := model.ParsedModelRoot.TechnicalAssets[commPartnerId] risk := createRisk(technicalAsset, transferredDataAsset, commPartnerAsset) if isNewRisk(risks, risk) { risks = append(risks, risk) } } } } for _, transferredDataAssetId := range dataFlow.DataAssetsReceived { if !technicalAsset.ProcessesOrStoresDataAsset(transferredDataAssetId) { transferredDataAsset := model.ParsedModelRoot.DataAssets[transferredDataAssetId] //fmt.Print("--->>> Checking "+technicalAsset.Id+": "+transferredDataAsset.Id+" received via "+dataFlow.Id+"\n") if transferredDataAsset.Confidentiality >= model.Confidential || transferredDataAsset.Integrity >= model.Critical { commPartnerId := dataFlow.TargetId if inverseDirection { commPartnerId = dataFlow.SourceId } commPartnerAsset := model.ParsedModelRoot.TechnicalAssets[commPartnerId] risk := createRisk(technicalAsset, transferredDataAsset, commPartnerAsset) if isNewRisk(risks, risk) { risks = append(risks, risk) } } } } return risks } func isNewRisk(risks []model.Risk, risk model.Risk) bool { for _, check := range risks { if check.SyntheticId == risk.SyntheticId { return false } } return true } func createRisk(technicalAsset model.TechnicalAsset, dataAssetTransferred model.DataAsset, commPartnerAsset model.TechnicalAsset) model.Risk { moreRisky := dataAssetTransferred.Confidentiality == model.StrictlyConfidential || dataAssetTransferred.Integrity == model.MissionCritical impact := model.LowImpact if moreRisky { impact = model.MediumImpact } title := "<b>Unnecessary Data Transfer</b> of <b>" + dataAssetTransferred.Title + "</b> data at <b>" + technicalAsset.Title + "</b> " + "from/to <b>" + commPartnerAsset.Title + "</b>" risk := model.Risk{ Category: Category(), Severity: model.CalculateSeverity(model.Unlikely, impact), ExploitationLikelihood: model.Unlikely, ExploitationImpact: impact, Title: title, MostRelevantTechnicalAssetId: technicalAsset.Id, MostRelevantDataAssetId: dataAssetTransferred.Id, DataBreachProbability: model.Improbable, DataBreachTechnicalAssetIDs: []string{technicalAsset.Id}, } risk.SyntheticId = risk.Category.Id + "@" + dataAssetTransferred.Id + "@" + technicalAsset.Id + "@" + commPartnerAsset.Id return risk }
risks/built-in/unnecessary-data-transfer/unnecessary-data-transfer-rule.go
0.585338
0.504089
unnecessary-data-transfer-rule.go
starcoder
package cache import ( "sync" "github.com/disgoorg/snowflake/v2" ) // GroupedFilterFunc is used to filter grouped cached entities. type GroupedFilterFunc[T any] func(groupID snowflake.ID, entity T) bool // GroupedCache is a simple key value store grouped by a snowflake.ID. They key is always a snowflake.ID. // The cache provides a simple way to store and retrieve entities. But is not guaranteed to be thread safe as this depends on the underlying implementation. type GroupedCache[T any] interface { // Get returns a copy of the entity with the given groupID and ID and a bool wheaten it was found or not. Get(groupID snowflake.ID, id snowflake.ID) (T, bool) // Put stores the given entity with the given groupID and ID as key. If the entity is already present, it will be overwritten. Put(groupID snowflake.ID, id snowflake.ID, entity T) // Remove removes the entity with the given groupID and ID as key and returns a copy of the entity and a bool whether it was removed or not. Remove(groupID snowflake.ID, id snowflake.ID) (T, bool) // RemoveAll removes all entities in the given groupID. RemoveAll(groupID snowflake.ID) // RemoveIf removes all entities that pass the given GroupedFilterFunc RemoveIf(filterFunc GroupedFilterFunc[T]) // Len returns the total number of entities in the cache. Len() int // GroupLen returns the number of entities in the cache within the groupID. GroupLen(groupID snowflake.ID) int // All returns a copy of all entities in the cache. All() map[snowflake.ID][]T // GroupAll returns a copy of all entities in a specific group. GroupAll(groupID snowflake.ID) []T // MapAll returns a copy of all entities in the cache as a map. MapAll() map[snowflake.ID]map[snowflake.ID]T // MapGroupAll returns a copy of all entities in a specific group as a map. MapGroupAll(groupID snowflake.ID) map[snowflake.ID]T // FindFirst returns the first entity that passes the given GroupedFilterFunc. FindFirst(cacheFindFunc GroupedFilterFunc[T]) (T, bool) // GroupFindFirst returns the first entity that passes the given GroupedFilterFunc within the groupID. GroupFindFirst(groupID snowflake.ID, cacheFindFunc GroupedFilterFunc[T]) (T, bool) // FindAll returns all entities that pass the given GroupedFilterFunc. FindAll(cacheFindFunc GroupedFilterFunc[T]) []T // GroupFindAll returns all entities that pass the given GroupedFilterFunc within the groupID. GroupFindAll(groupID snowflake.ID, cacheFindFunc GroupedFilterFunc[T]) []T // ForEach calls the given function for each entity in the cache. ForEach(func(groupID snowflake.ID, entity T)) // GroupForEach calls the given function for each entity in the cache within the groupID. GroupForEach(groupID snowflake.ID, forEachFunc func(entity T)) } var _ GroupedCache[any] = (*defaultGroupedCache[any])(nil) // NewGroupedCache returns a new default GroupedCache with the provided flags, neededFlags and policy. func NewGroupedCache[T any](flags Flags, neededFlags Flags, policy Policy[T]) GroupedCache[T] { return &defaultGroupedCache[T]{ flags: flags, neededFlags: neededFlags, policy: policy, cache: make(map[snowflake.ID]map[snowflake.ID]T), } } type defaultGroupedCache[T any] struct { mu sync.RWMutex flags Flags neededFlags Flags policy Policy[T] cache map[snowflake.ID]map[snowflake.ID]T } func (c *defaultGroupedCache[T]) Get(groupID snowflake.ID, id snowflake.ID) (T, bool) { c.mu.RLock() defer c.mu.RUnlock() if groupEntities, ok := c.cache[groupID]; ok { if entity, ok := groupEntities[id]; ok { return entity, true } } var entity T return entity, false } func (c *defaultGroupedCache[T]) Put(groupID snowflake.ID, id snowflake.ID, entity T) { if c.neededFlags != FlagsNone && c.flags.Missing(c.neededFlags) { return } if c.policy != nil && !c.policy(entity) { return } c.mu.Lock() defer c.mu.Unlock() if c.cache == nil { c.cache = make(map[snowflake.ID]map[snowflake.ID]T) } if groupEntities, ok := c.cache[groupID]; ok { groupEntities[id] = entity } else { groupEntities = make(map[snowflake.ID]T) groupEntities[id] = entity c.cache[groupID] = groupEntities } } func (c *defaultGroupedCache[T]) Remove(groupID snowflake.ID, id snowflake.ID) (entity T, ok bool) { c.mu.Lock() defer c.mu.Unlock() if groupEntities, ok := c.cache[groupID]; ok { if entity, ok := groupEntities[id]; ok { delete(groupEntities, id) return entity, ok } } ok = false return } func (c *defaultGroupedCache[T]) RemoveAll(groupID snowflake.ID) { c.mu.Lock() defer c.mu.Unlock() delete(c.cache, groupID) } func (c *defaultGroupedCache[T]) RemoveIf(filterFunc GroupedFilterFunc[T]) { c.mu.Lock() defer c.mu.Unlock() for groupID := range c.cache { for id, entity := range c.cache[groupID] { if filterFunc(groupID, entity) { delete(c.cache[groupID], id) } } } } func (c *defaultGroupedCache[T]) Len() int { var totalLen int c.mu.RLock() defer c.mu.RUnlock() for _, groupEntities := range c.cache { totalLen += len(groupEntities) } return totalLen } func (c *defaultGroupedCache[T]) GroupLen(groupID snowflake.ID) int { c.mu.RLock() defer c.mu.RUnlock() if groupEntities, ok := c.cache[groupID]; ok { return len(groupEntities) } return 0 } func (c *defaultGroupedCache[T]) All() map[snowflake.ID][]T { c.mu.RLock() defer c.mu.RUnlock() all := make(map[snowflake.ID][]T) for groupID, groupEntities := range c.cache { all[groupID] = make([]T, 0, len(groupEntities)) for _, entity := range groupEntities { all[groupID] = append(all[groupID], entity) } } return all } func (c *defaultGroupedCache[T]) GroupAll(groupID snowflake.ID) []T { c.mu.RLock() defer c.mu.RUnlock() groupEntities, ok := c.cache[groupID] if !ok { return nil } all := make([]T, 0, len(groupEntities)) for _, entity := range groupEntities { all = append(all, entity) } return all } func (c *defaultGroupedCache[T]) MapAll() map[snowflake.ID]map[snowflake.ID]T { c.mu.RLock() defer c.mu.RUnlock() all := make(map[snowflake.ID]map[snowflake.ID]T, len(c.cache)) for groupID, groupEntities := range c.cache { all[groupID] = make(map[snowflake.ID]T, len(groupEntities)) for entityID, entity := range groupEntities { all[groupID][entityID] = entity } } return all } func (c *defaultGroupedCache[T]) MapGroupAll(groupID snowflake.ID) map[snowflake.ID]T { c.mu.RLock() defer c.mu.RUnlock() groupEntities, ok := c.cache[groupID] if !ok { return nil } all := make(map[snowflake.ID]T, len(groupEntities)) for entityID, entity := range groupEntities { all[entityID] = entity } return all } func (c *defaultGroupedCache[T]) FindFirst(cacheFindFunc GroupedFilterFunc[T]) (T, bool) { c.mu.RLock() defer c.mu.RUnlock() for groupID, groupEntities := range c.cache { for _, entity := range groupEntities { if cacheFindFunc(groupID, entity) { return entity, true } } } var entity T return entity, false } func (c *defaultGroupedCache[T]) GroupFindFirst(groupID snowflake.ID, cacheFindFunc GroupedFilterFunc[T]) (T, bool) { c.mu.RLock() defer c.mu.RUnlock() for _, entity := range c.cache[groupID] { if cacheFindFunc(groupID, entity) { return entity, true } } var entity T return entity, false } func (c *defaultGroupedCache[T]) FindAll(cacheFindFunc GroupedFilterFunc[T]) []T { c.mu.RLock() defer c.mu.RUnlock() all := make([]T, 0) for groupID, groupEntities := range c.cache { for _, entity := range groupEntities { if cacheFindFunc(groupID, entity) { all = append(all, entity) } } } return all } func (c *defaultGroupedCache[T]) GroupFindAll(groupID snowflake.ID, cacheFindFunc GroupedFilterFunc[T]) []T { c.mu.RLock() defer c.mu.RUnlock() all := make([]T, 0) for _, entity := range c.cache[groupID] { if cacheFindFunc(groupID, entity) { all = append(all, entity) } } return all } func (c *defaultGroupedCache[T]) ForEach(forEachFunc func(groupID snowflake.ID, entity T)) { c.mu.RLock() defer c.mu.RUnlock() for groupID, groupEntities := range c.cache { for _, entity := range groupEntities { forEachFunc(groupID, entity) } } } func (c *defaultGroupedCache[T]) GroupForEach(groupID snowflake.ID, forEachFunc func(entity T)) { c.mu.RLock() defer c.mu.RUnlock() for _, entity := range c.cache[groupID] { forEachFunc(entity) } }
cache/grouped_cache.go
0.682468
0.463566
grouped_cache.go
starcoder
package qrcode import ( "github.com/yeqown/go-qrcode/v2/matrix" ) // maskPatternModulo ... // mask Pattern ref to: https://www.thonky.com/qr-code-tutorial/mask-patterns type maskPatternModulo uint32 const ( // modulo0 (x+y) mod 2 == 0 modulo0 maskPatternModulo = iota // modulo1 (x) mod 2 == 0 modulo1 // modulo2 (y) mod 3 == 0 modulo2 // modulo3 (x+y) mod 3 == 0 modulo3 // modulo4 (floor (x/ 2) + floor (y/ 3) mod 2 == 0 modulo4 // modulo5 (x * y) mod 2) + (x * y) mod 3) == 0 modulo5 // modulo6 (x * y) mod 2) + (x * y) mod 3) mod 2 == 0 modulo6 // modulo7 (x + y) mod 2) + (x * y) mod 3) mod 2 == 0 modulo7 ) type mask struct { mat *matrix.Matrix // matrix mode maskPatternModulo // mode moduloFn moduloFunc // moduloFn masking function } // newMask ... func newMask(mat *matrix.Matrix, mode maskPatternModulo) *mask { m := &mask{ mat: mat.Copy(), mode: mode, moduloFn: getModuloFunc(mode), } m.masking() return m } // moduloFunc to define what's modulo func type moduloFunc func(int, int) bool func getModuloFunc(mode maskPatternModulo) (f moduloFunc) { f = nil switch mode { case modulo0: f = modulo0Func case modulo1: f = modulo1Func case modulo2: f = modulo2Func case modulo3: f = modulo3Func case modulo4: f = modulo4Func case modulo5: f = modulo5Func case modulo6: f = modulo6Func case modulo7: f = modulo7Func } return } // init generate maks by mode func (m *mask) masking() { moduloFn := m.moduloFn if moduloFn == nil { panic("impossible panic, contact maintainer plz") } m.mat.Iterate(matrix.COLUMN, func(x, y int, s matrix.State) { // skip the function modules if state, _ := m.mat.Get(x, y); state != matrix.StateInit { _ = m.mat.Set(x, y, matrix.StateInit) return } if moduloFn(x, y) { _ = m.mat.Set(x, y, matrix.StateTrue) } else { _ = m.mat.Set(x, y, matrix.StateFalse) } }) } // modulo0Func for maskPattern function // modulo0 (x+y) mod 2 == 0 func modulo0Func(x, y int) bool { return (x+y)%2 == 0 } // modulo1Func for maskPattern function // modulo1 (y) mod 2 == 0 func modulo1Func(x, y int) bool { return y%2 == 0 } // modulo2Func for maskPattern function // modulo2 (x) mod 3 == 0 func modulo2Func(x, y int) bool { return x%3 == 0 } // modulo3Func for maskPattern function // modulo3 (x+y) mod 3 == 0 func modulo3Func(x, y int) bool { return (x+y)%3 == 0 } // modulo4Func for maskPattern function // modulo4 (floor (x/ 2) + floor (y/ 3) mod 2 == 0 func modulo4Func(x, y int) bool { return (x/3+y/2)%2 == 0 } // modulo5Func for maskPattern function // modulo5 (x * y) mod 2 + (x * y) mod 3 == 0 func modulo5Func(x, y int) bool { return (x*y)%2+(x*y)%3 == 0 } // modulo6Func for maskPattern function // modulo6 (x * y) mod 2) + (x * y) mod 3) mod 2 == 0 func modulo6Func(x, y int) bool { return ((x*y)%2+(x*y)%3)%2 == 0 } // modulo7Func for maskPattern function // modulo7 (x + y) mod 2) + (x * y) mod 3) mod 2 == 0 func modulo7Func(x, y int) bool { return ((x+y)%2+(x*y)%3)%2 == 0 }
mask.go
0.707809
0.468243
mask.go
starcoder
package resolv import ( "github.com/ClessLi/2d-game-engin/core/render" "github.com/ClessLi/2d-game-engin/resource" "github.com/go-gl/mathgl/mgl32" ) // Shape is a basic interface that describes a Shape that can be passed to collision testing and resolution functions and // exist in the same Space. type Shape interface { IsColliding(Shape) bool WouldBeColliding(Shape, int32, int32) bool GetTags() []string ClearTags() AddTags(...string) RemoveTags(...string) HasTags(...string) bool GetData() interface{} SetData(interface{}) GetXY() (int32, int32) GetXY2() (int32, int32) SetXY(int32, int32) Move(int32, int32) Draw(*render.SpriteRenderer) GetFriction() float32 SetFriction(float32) GetMaxSpd() float32 SetMaxSpd(float32) GetSpd() (float32, float32) SetSpd(float32, float32) } // BasicShape isn't to be used directly; it just has some basic functions and data, common to all structs that embed it, like // position and tags. It is embedded in other Shapes. type BasicShape struct { X, Y int32 tags []string Data interface{} Texture *resource.Texture2D rotate float32 color *mgl32.Vec3 IsXReverse bool friction float32 multiple float32 } // GetTags returns a reference to the the string array representing the tags on the BasicShape. func (b *BasicShape) GetTags() []string { return b.tags } // AddTags adds the specified tags to the BasicShape. func (b *BasicShape) AddTags(tags ...string) { if b.tags == nil { b.tags = []string{} } b.tags = append(b.tags, tags...) } // RemoveTags removes the specified tags from the BasicShape. func (b *BasicShape) RemoveTags(tags ...string) { for _, t := range tags { for i := len(b.tags) - 1; i >= 0; i-- { if t == b.tags[i] { b.tags = append(b.tags[:i], b.tags[i+1:]...) } } } } // ClearTags clears the tags active on the BasicShape. func (b *BasicShape) ClearTags() { b.tags = []string{} } // HasTags returns true if the Shape has all of the tags provided. func (b *BasicShape) HasTags(tags ...string) bool { hasTags := true for _, t1 := range tags { found := false for _, shapeTag := range b.tags { if t1 == shapeTag { found = true continue } } if !found { hasTags = false break } } return hasTags } // GetData returns the data on the Shape. func (b *BasicShape) GetData() interface{} { return b.Data } // SetData sets the data on the Shape. func (b *BasicShape) SetData(data interface{}) { b.Data = data } // GetXY returns the position of the Shape. func (b *BasicShape) GetXY() (int32, int32) { return b.X, b.Y } // SetXY sets the position of the Shape. func (b *BasicShape) SetXY(x, y int32) { b.X = x b.Y = y } // Move moves the Shape by the delta X and Y values provided. func (b *BasicShape) Move(x, y int32) { b.X += x b.Y += y } // ReverseX, BasicShape 类方向转换为水平向后的方法 func (b *BasicShape) ReverseX() { b.IsXReverse = true } // ForWordX, BasicShape 类方向转换为水平向前的方法 func (b *BasicShape) ForWordX() { b.IsXReverse = false } // GetFriction, BasicShape 类获取 friction 的方法, Shape.GetFriction() float32 的实现 // 返回值: // float32 类型 func (b *BasicShape) GetFriction() float32 { return b.friction } // SetFriction, BasicShape 类设置 friction 的方法, Shape.SetFriction(float32) 的实现 // 参数: // friction: 阻力值 func (b *BasicShape) SetFriction(friction float32) { b.friction = friction } // NewBasicShape, BasicShape 类的实例初始化函数 func NewBasicShape(x, y int32, texture *resource.Texture2D, rotate float32, color *mgl32.Vec3, friction, multiple float32) *BasicShape { return &BasicShape{ X: x, Y: y, tags: nil, Data: nil, Texture: texture, rotate: rotate, color: color, IsXReverse: false, friction: friction, multiple: multiple, } }
core/resolv/shape.go
0.682785
0.512815
shape.go
starcoder
package day39 import ( "math" "strings" ) // Coord represents a coordinate in the game of life. type Coord struct { X, Y int64 } // GameOfLife represents the game state. // '*' represents a live cell and '.' represents a dead cell. type GameOfLife struct { living map[Coord]struct{} minX, minY, maxX, maxY int64 } // NewGameOfLife parses a starting board. // The given board's lower left coordinate with be 0,0 func NewGameOfLife(board string) *GameOfLife { gol := &GameOfLife{living: make(map[Coord]struct{})} rows := strings.Split(board, "\n") gol.maxY = int64(len(rows)) for y := 0; y < len(rows); y++ { for x, val := range rows[len(rows)-1-y] { if val == '*' { gol.living[Coord{int64(x), int64(y)}] = struct{}{} if int64(x) >= gol.maxX { gol.maxX = int64(x) + 1 } } } } gol.minX, gol.minY = -1, -1 return gol } // String returns the string representation of the board now. func (gol *GameOfLife) String() string { var sb strings.Builder for y := gol.maxY - 1; y > gol.minY; y-- { for x := gol.minX + 1; x < gol.maxX; x++ { if _, alive := gol.living[Coord{x, y}]; alive { sb.WriteRune('*') // nolint: gosec } else { sb.WriteRune('.') // nolint: gosec } } sb.WriteRune('\n') // nolint: gosec } return sb.String() } // Step executes a single step of the simulation. func (gol *GameOfLife) Step() { var birth, death []Coord for coord := range gol.living { if gol.willDie(coord) { death = append(death, coord) } birth = append(birth, gol.willBeBorn(coord)...) } for _, d := range death { delete(gol.living, d) } for _, b := range birth { gol.living[b] = struct{}{} } gol.minX, gol.minY, gol.maxX, gol.maxY = math.MaxInt64, math.MaxInt64, math.MinInt64, math.MinInt64 for coord := range gol.living { if coord.X <= gol.minX { gol.minX = coord.X - 1 } if coord.X >= gol.maxX { gol.maxX = coord.X + 1 } if coord.Y <= gol.minY { gol.minY = coord.Y - 1 } if coord.Y >= gol.maxY { gol.maxY = coord.Y + 1 } } } func (gol *GameOfLife) livingNeighbors(coord Coord) int { var livingNeighbors int for x := coord.X - 1; x <= coord.X+1; x++ { for y := coord.Y - 1; y <= coord.Y+1; y++ { if x == coord.X && y == coord.Y { continue } if _, alive := gol.living[Coord{x, y}]; alive { livingNeighbors++ } } } return livingNeighbors } func (gol *GameOfLife) willBeBorn(coord Coord) []Coord { var birth []Coord checked := make(map[Coord]struct{}) for x := coord.X - 1; x <= coord.X+1; x++ { for y := coord.Y - 1; y <= coord.Y+1; y++ { if x == coord.X && y == coord.Y { continue } coord := Coord{x, y} if _, alive := gol.living[coord]; alive { continue } if _, done := checked[coord]; !done { checked[coord] = struct{}{} living := gol.livingNeighbors(coord) if living == 3 { birth = append(birth, coord) } } } } return birth } func (gol *GameOfLife) willDie(coord Coord) bool { living := gol.livingNeighbors(coord) return living < 2 || living > 3 }
day39/problem.go
0.745861
0.463505
problem.go
starcoder
package encoder import ( "encoding/binary" "errors" "hash/crc32" ) // EncodeEntry takes in a key, value and timestamp and then creates a buffer containing // all of the data from that. This data is appended to a datafile. func EncodeEntry(key []byte, value []byte, ts uint32) []byte { // the header contains the first 16 bytes denoting the crc, timestamp, keysize // value size and then followed by the lengths of key, and value. buffer := make([]byte, 16) binary.LittleEndian.PutUint32(buffer[4:8], ts) binary.LittleEndian.PutUint32(buffer[8:12], uint32(len(key))) binary.LittleEndian.PutUint32(buffer[12:16], uint32(len(value))) buffer = append(buffer[:], key[:]...) buffer = append(buffer[:], value[:]...) crc := crc32.ChecksumIEEE(buffer[4:]) binary.LittleEndian.PutUint32(buffer[:4], crc) return buffer } // DecodeEntryMeta decodes a byte buffer of length 16 and then returns the metadata information // about the given entry. func DecodeEntryMeta(data []byte) (uint32, uint32, uint32, uint32) { crc := binary.LittleEndian.Uint32(data[4:8]) timestamp := binary.LittleEndian.Uint32(data[4:8]) ksize := binary.LittleEndian.Uint32(data[8:12]) vsize := binary.LittleEndian.Uint32(data[12:16]) return crc, timestamp, ksize, vsize } // DecodeEntryValue takes in some data and decodes the value from the data. func DecodeEntryValue(data []byte) ([]byte, error) { ksize := binary.LittleEndian.Uint32(data[8:12]) vsize := binary.LittleEndian.Uint32(data[12:20]) value := make([]byte, vsize) // copy the value from the buffer copy(value, data[(16+ksize):(16+ksize+vsize)]) c32 := binary.LittleEndian.Uint32(data[:4]) if crc32.ChecksumIEEE(data[4:]) != c32 { return nil, errors.New("the crc32 checksum doesn't match") } return value, nil } // DecodeHintMeta takes in a buffer of length 20 and parses hint metadata from it. // It also expects the buffer to be 20 bytes long otherwise a panic will happen. func DecodeHintMeta(metaBuffer []byte) (uint32, uint32, uint32, int64) { timestamp := binary.LittleEndian.Uint32(metaBuffer[:4]) ksize := binary.LittleEndian.Uint32(metaBuffer[4:8]) vsize := binary.LittleEndian.Uint32(metaBuffer[8:12]) offset := binary.LittleEndian.Uint64(metaBuffer[12:20]) return timestamp, ksize, vsize, int64(offset) } // DecodeAll returns all of the information and returns all of the variables. func DecodeAll(data []byte) (uint32, uint32, uint32, []byte, []byte, error) { if len(data) < 20 { return 0, 0, 0, nil, nil, errors.New("too few bytes to properly read") } timestamp := binary.LittleEndian.Uint32(data[4:8]) ksize := binary.LittleEndian.Uint32(data[8:12]) vsize := binary.LittleEndian.Uint32(data[12:16]) key := make([]byte, ksize) value := make([]byte, vsize) copy(key, data[16:16+ksize]) copy(value, data[16+ksize:16+ksize+vsize]) crc := binary.LittleEndian.Uint32(data[0:4]) if crc32.ChecksumIEEE(data[4:]) != crc { return 0, 0, 0, nil, nil, errors.New("the crc32 checksum doesn't match") } return timestamp, ksize, vsize, key, value, nil } // EncodeHint takes in all of the data contained in hints and returns a byte buffer // that contains all of it. func EncodeHint(timestamp, vsize uint32, offset int64, key []byte) []byte { buffer := make([]byte, 20) binary.LittleEndian.PutUint32(buffer[0:4], timestamp) binary.LittleEndian.PutUint32(buffer[4:8], uint32(len(key))) binary.LittleEndian.PutUint32(buffer[8:12], vsize) binary.LittleEndian.PutUint64(buffer[12:20], uint64(offset)) buffer = append(buffer[:], key[:]...) return buffer } // DecodeHint returns all of information stored in a mementry and lastly it also returns // the amount of bytes read. Such that the scanning through the values works better. func DecodeHint(buffer []byte) (uint32, uint32, int64, []byte, uint32) { if len(buffer) < 20 { return 0, 0, 0, nil, 0 } timestamp := binary.LittleEndian.Uint32(buffer[:4]) vsize := binary.LittleEndian.Uint32(buffer[8:12]) offset := binary.LittleEndian.Uint64(buffer[12:20]) ksize := binary.LittleEndian.Uint32(buffer[4:8]) key := buffer[20 : ksize+20] return timestamp, vsize, int64(offset), key, 20 + ksize }
encoder/encoder.go
0.8067
0.421373
encoder.go
starcoder
package math import "fmt" import "regexp" import "strconv" import "math" type Color struct { R float64 G float64 B float64 } type HSL struct { H float64 S float64 L float64 } func NewColor( r float64, g float64, b float64) *Color { color := &Color{} color.R = r color.G = g color.B = b return color } func NewColorHex( value int32 ) *Color { color := &Color{} color.SetHex(value) return color } func NewColorString( value string ) *Color { color := &Color{} color.SetStyle( value ) return color } func (color *Color) Set( value interface{} ) *Color { if v, ok := value.(*Color); ok { color.Copy(v) } else if v, ok := value.(int); ok { color.SetHex(int32(v)) } else if v, ok := value.(string); ok { color.SetStyle(v) } return color } func (color *Color) SetScalar( scalar float64 ) { color.R = scalar color.G = scalar color.B = scalar } func (color *Color) SetHex( hex int32) *Color { color.R = float64( hex >> 16 & 255 ) / 255.0 color.G = float64( hex >> 8 & 255 ) / 255.0 color.B = float64( hex & 255 ) / 255.0 return color } func (color *Color) SetRGB( r float64, g float64, b float64) *Color { color.R = r; color.G = g; color.B = b; return color } func hue2rgb( p float64, q float64, t float64) float64 { if t < 0 { t += 1 } if t > 1 { t -= 1 } if t < 1.0 / 6.0 { return p + ( q - p ) * 6.0 * t } if t < 1.0 / 2.0 { return q } if t < 2.0 / 3.0 { return p + ( q - p ) * 6.0 * ( 2.0 / 3.0 - t ) } return p } func (color *Color) SetHSL( h float64, s float64, l float64) { h = EuclideanModulo(h, 1) s = Clamp(s, 0, 1) l = Clamp(l, 0, 1) if s == 0 { color.R = 1 color.G = 1 color.B = 1 } else { p := l + s - ( l * s ) if l <= 0.5 { p = l * ( 1 + s ) } q := ( 2 * l ) - p color.R = hue2rgb( q, p, h + 1.0 / 3.0) color.G = hue2rgb( q, p, h) color.B = hue2rgb( q, p, h - 1.0 / 3.0) } } func (color *Color) SetStyle( style string) { rgbhslReg := regexp.MustCompile(`^((?:rgb|hsl)a?)\(\s*([^\)]*)\)`) if rgbhslReg.MatchString( style ) { group := rgbhslReg.FindStringSubmatch(style) name := group[1] components := group[2] switch( name ) { case "rgb", "rgba": rgb1Reg := regexp.MustCompile(`^(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*(,\s*([0-9]*\.?[0-9]+)\s*)?$`) if rgb1Reg.MatchString( components ) { colorArray := rgb1Reg.FindStringSubmatch( components ) r_value, _ := strconv.ParseInt( colorArray[1], 10, 32) color.R = math.Min( 255, float64(r_value) ) / 255 g_value, _ := strconv.ParseInt( colorArray[2], 10, 32) color.G = math.Min( 255, float64(g_value) ) / 255 b_value, _ := strconv.ParseInt( colorArray[3], 10, 32) color.B = math.Min( 255, float64(b_value) ) / 255 return } rgb2Reg := regexp.MustCompile(`^(\d+)\%\s*,\s*(\d+)\%\s*,\s*(\d+)\%\s*(,\s*([0-9]*\.?[0-9]+)\s*)?$`) if rgb2Reg.MatchString( components ) { colorArray := rgb2Reg.FindStringSubmatch( components ) r_value, _ := strconv.ParseInt( colorArray[1], 10, 32) color.R = math.Min( 100, float64(r_value) ) / 100.0 g_value, _ := strconv.ParseInt( colorArray[2], 10, 32) color.G = math.Min( 100, float64(g_value) ) / 100.0 b_value, _ := strconv.ParseInt( colorArray[3], 10, 32) color.B = math.Min( 100, float64(b_value) ) / 100.0 return } break; case "hsl", "hsla": hslReg := regexp.MustCompile(`^([0-9]*\.?[0-9]+)\s*,\s*(\d+)\%\s*,\s*(\d+)\%\s*(,\s*([0-9]*\.?[0-9]+)\s*)?`) if hslReg.MatchString( components ) { colorArray := hslReg.FindStringSubmatch( components ) h_value, _ := strconv.ParseFloat( colorArray[1], 64 ) h := h_value / 360 s_value, _ := strconv.ParseInt( colorArray[2], 10, 32) s := float64(s_value) / 100 l_value, _ := strconv.ParseInt( colorArray[3], 10, 32) l := float64(l_value) / 100 color.SetHSL(h, s, l); return } } } else { colorCodeReg := regexp.MustCompile(`^\#([A-Fa-f0-9]+)$`) if colorCodeReg.MatchString( style ) { colorArray := colorCodeReg.FindStringSubmatch( style ) hex := colorArray[1] size := len(hex) if( size == 3 ) { r_value, _ := strconv.ParseInt( string(hex[0]) + string(hex[0]), 16, 32 ) color.R = float64(r_value) / 255.0 g_value, _ := strconv.ParseInt( string(hex[1]) + string(hex[1]), 16, 32 ) color.G = float64(g_value) / 255.0 b_value, _ := strconv.ParseInt( string(hex[2]) + string(hex[2]), 16, 32 ) color.B = float64(b_value) / 255.0 return } else if size == 6 { r_value, _ := strconv.ParseInt( string(hex[0]) + string(hex[1]), 16, 32 ) color.R = float64(r_value) / 255.0 g_value, _ := strconv.ParseInt( string(hex[2]) + string(hex[3]), 16, 32 ) color.G = float64(g_value) / 255.0 b_value, _ := strconv.ParseInt( string(hex[4]) + string(hex[5]), 16, 32 ) color.B = float64(b_value) / 255.0 return } } } if len(style) > 0 { if ColorKeywords[style] != 0 { hex := ColorKeywords[style] color.SetHex(hex); } // TODO else 後のエラー処理 } } func (color *Color) Clone() *Color { c := &Color{} c.Copy(color) return c } func (color *Color) Copy( color2 *Color ) *Color { color.R = color2.R color.G = color2.G color.B = color2.B return color } func (color *Color) CopyGammaToLinear( color2 *Color, gammaFactor float64) *Color { color.R = math.Pow( color2.R, gammaFactor ) color.G = math.Pow( color2.G, gammaFactor ) color.B = math.Pow( color2.B, gammaFactor ) return color } func (color *Color) CopyLinearToGamma( color2 *Color, gammaFactor float64) *Color { safeInverse := 1.0 if gammaFactor > 0 { safeInverse = 1.0 / gammaFactor } color.R = math.Pow( color2.R, safeInverse) color.G = math.Pow( color2.G, safeInverse) color.B = math.Pow( color2.B, safeInverse) return color } func (color *Color) ConvertGammaToLinear() *Color { r := color.R g := color.G b := color.B color.R = r * r color.G = g * g color.B = b * b return color } func (color *Color) ConvertLinearToGamma() *Color { color.R = math.Sqrt(color.R) color.G = math.Sqrt(color.G) color.B = math.Sqrt(color.B) return color } func (color *Color) GetHex() int32 { return int32(color.R * 255) << 16 ^ int32( color.G * 255 ) << 8 ^ int32( color.B * 255 ) << 0 } func (color *Color) GetHexString() string { return fmt.Sprintf("%06x", color.GetHex()) } func (color *Color) GetHSL() *HSL { hsl := &HSL{} r := color.R g := color.G b := color.B max := math.Max( r, math.Max( g, b) ) min := math.Min( r, math.Min( g, b) ) hue := 0.0 saturation := 0.0 lightness := ( min + max ) / 2.0 if min != max { delta := max - min saturation = delta / ( 2 - max - min ) if lightness <= 0.5 { saturation = delta / ( max + min ) } switch max { case r: hue = ( g - b )/ delta if g < b { hue += 6 } case g: hue = ( b - r ) / delta + 2 case b: hue = ( r - g ) / delta + 4 } hue /= 6 } hsl.H = hue hsl.S = saturation hsl.L = lightness return hsl } func (color *Color) GetStyle() string { return fmt.Sprintf("rgb(%d,%d,%d)",int32(color.R*255), int32(color.G*255), int32(color.B*255)) } func (color *Color) OffsetHSL( h, s, l float64) *Color { var hsl = color.GetHSL() hsl.H += h hsl.S += s hsl.L += l color.SetHSL(hsl.H, hsl.S, hsl.L) return color } func (color *Color) Lerp( color2 *Color, alpha float64) *Color { color.R += ( color2.R - color.R ) * alpha color.G += ( color2.G - color.G ) * alpha color.B += ( color2.B - color.B ) * alpha return color } func (color *Color) Add( color2 *Color) *Color { color.R += color2.R color.G += color2.G color.B += color2.B return color } func (color *Color) AddColors( color2 , color3 *Color) *Color { color.R += color2.R + color3.R color.G += color2.G + color3.G color.B += color2.B + color3.B return color } func (color *Color) AddScalar( s float64) *Color { color.R += s color.G += s color.B += s return color } func (color *Color) Multiply( color2 *Color) *Color { color.R *= color2.R color.G *= color2.G color.B *= color2.B return color } func (color *Color) MultiplyScalar( scalar float64 ) *Color { color.R *= scalar color.G *= scalar color.B *= scalar return color } func (color *Color) Equals( color2 *Color) bool { return (color.R == color2.R) && (color.G == color2.G) && (color.B == color2.B) } func (color *Color) FromArray( array []float64) { color.R = array[0] color.G = array[1] color.B = array[2] } func (color *Color) ToArray( array []float64, offset int ) []float64 { array[ offset ] = color.R array[ offset+1 ] = color.G array[ offset+2 ] = color.B return array } var ColorKeywords map[string]int32 = map[string]int32 {"aliceblue": 0xF0F8FF, "antiquewhite": 0xFAEBD7, "aqua": 0x00FFFF, "aquamarine": 0x7FFFD4, "azure": 0xF0FFFF, "beige": 0xF5F5DC, "bisque": 0xFFE4C4, "black": 0x000000, "blanchedalmond": 0xFFEBCD, "blue": 0x0000FF, "blueviolet": 0x8A2BE2, "brown": 0xA52A2A, "burlywood": 0xDEB887, "cadetblue": 0x5F9EA0, "chartreuse": 0x7FFF00, "chocolate": 0xD2691E, "coral": 0xFF7F50, "cornflowerblue": 0x6495ED, "cornsilk": 0xFFF8DC, "crimson": 0xDC143C, "cyan": 0x00FFFF, "darkblue": 0x00008B, "darkcyan": 0x008B8B, "darkgoldenrod": 0xB8860B, "darkgray": 0xA9A9A9, "darkgreen": 0x006400, "darkgrey": 0xA9A9A9, "darkkhaki": 0xBDB76B, "darkmagenta": 0x8B008B, "darkolivegreen": 0x556B2F, "darkorange": 0xFF8C00, "darkorchid": 0x9932CC, "darkred": 0x8B0000, "darksalmon": 0xE9967A, "darkseagreen": 0x8FBC8F, "darkslateblue": 0x483D8B, "darkslategray": 0x2F4F4F, "darkslategrey": 0x2F4F4F, "darkturquoise": 0x00CED1, "darkviolet": 0x9400D3, "deeppink": 0xFF1493, "deepskyblue": 0x00BFFF, "dimgray": 0x696969, "dimgrey": 0x696969, "dodgerblue": 0x1E90FF, "firebrick": 0xB22222, "floralwhite": 0xFFFAF0, "forestgreen": 0x228B22, "fuchsia": 0xFF00FF, "gainsboro": 0xDCDCDC, "ghostwhite": 0xF8F8FF, "gold": 0xFFD700, "goldenrod": 0xDAA520, "gray": 0x808080, "green": 0x008000, "greenyellow": 0xADFF2F, "grey": 0x808080, "honeydew": 0xF0FFF0, "hotpink": 0xFF69B4, "indianred": 0xCD5C5C, "indigo": 0x4B0082, "ivory": 0xFFFFF0, "khaki": 0xF0E68C, "lavender": 0xE6E6FA, "lavenderblush": 0xFFF0F5, "lawngreen": 0x7CFC00, "lemonchiffon": 0xFFFACD, "lightblue": 0xADD8E6, "lightcoral": 0xF08080, "lightcyan": 0xE0FFFF, "lightgoldenrodyellow": 0xFAFAD2, "lightgray": 0xD3D3D3, "lightgreen": 0x90EE90, "lightgrey": 0xD3D3D3, "lightpink": 0xFFB6C1, "lightsalmon": 0xFFA07A, "lightseagreen": 0x20B2AA, "lightskyblue": 0x87CEFA, "lightslategray": 0x778899, "lightslategrey": 0x778899, "lightsteelblue": 0xB0C4DE, "lightyellow": 0xFFFFE0, "lime": 0x00FF00, "limegreen": 0x32CD32, "linen": 0xFAF0E6, "magenta": 0xFF00FF, "maroon": 0x800000, "mediumaquamarine": 0x66CDAA, "mediumblue": 0x0000CD, "mediumorchid": 0xBA55D3, "mediumpurple": 0x9370DB, "mediumseagreen": 0x3CB371, "mediumslateblue": 0x7B68EE, "mediumspringgreen": 0x00FA9A, "mediumturquoise": 0x48D1CC, "mediumvioletred": 0xC71585, "midnightblue": 0x191970, "mintcream": 0xF5FFFA, "mistyrose": 0xFFE4E1, "moccasin": 0xFFE4B5, "navajowhite": 0xFFDEAD, "navy": 0x000080, "oldlace": 0xFDF5E6, "olive": 0x808000, "olivedrab": 0x6B8E23, "orange": 0xFFA500, "orangered": 0xFF4500, "orchid": 0xDA70D6, "palegoldenrod": 0xEEE8AA, "palegreen": 0x98FB98, "paleturquoise": 0xAFEEEE, "palevioletred": 0xDB7093, "papayawhip": 0xFFEFD5, "peachpuff": 0xFFDAB9, "peru": 0xCD853F, "pink": 0xFFC0CB, "plum": 0xDDA0DD, "powderblue": 0xB0E0E6, "purple": 0x800080, "red": 0xFF0000, "rosybrown": 0xBC8F8F, "royalblue": 0x4169E1, "saddlebrown": 0x8B4513, "salmon": 0xFA8072, "sandybrown": 0xF4A460, "seagreen": 0x2E8B57, "seashell": 0xFFF5EE, "sienna": 0xA0522D, "silver": 0xC0C0C0, "skyblue": 0x87CEEB, "slateblue": 0x6A5ACD, "slategray": 0x708090, "slategrey": 0x708090, "snow": 0xFFFAFA, "springgreen": 0x00FF7F, "steelblue": 0x4682B4, "tan": 0xD2B48C, "teal": 0x008080, "thistle": 0xD8BFD8, "tomato": 0xFF6347, "turquoise": 0x40E0D0, "violet": 0xEE82EE, "wheat": 0xF5DEB3, "white": 0xFFFFFF, "whitesmoke": 0xF5F5F5, "yellow": 0xFFFF00, "yellowgreen": 0x9ACD32 };
math/color.go
0.673299
0.455138
color.go
starcoder
package three import ( "math" ) const ( // DefaultOrder : DefaultOrder = "xyz" ) // RotationOrders : var RotationOrders = []string{"XYZ", "YZX", "ZXY", "XZY", "YXZ", "ZYX"} var _matrix = NewMatrix4() var _quaternion = NewQuaternion(0, 0, 0, 1) // NewEuler : func NewEuler(x, y, z float64, order string) *Euler { if order == "" { order = DefaultOrder } return &Euler{x, y, z, order, nil} } // Euler : type Euler struct { _x float64 _y float64 _z float64 _order string _onChangeCallback onChangeCallback } // X : func (e Euler) X() float64 { return e._x } // SetX : func (e Euler) SetX(value float64) { e._x = value e._onChangeCallback() } // Y : func (e Euler) Y() float64 { return e._y } // SetY : func (e Euler) SetY(value float64) { e._y = value e._onChangeCallback() } // Z : func (e Euler) Z() float64 { return e._z } // SetZ : func (e Euler) SetZ(value float64) { e._z = value e._onChangeCallback() } // Order : func (e Euler) Order() string { return e._order } // SetOrder : func (e Euler) SetOrder(value string) { e._order = value e._onChangeCallback() } // Set : func (e Euler) Set(x, y, z float64, order string) *Euler { e._x = x e._y = y e._z = z if order != "" { e._order = order } e._onChangeCallback() return &e } // Clone : func (e Euler) Clone() *Euler { return NewEuler(e._x, e._y, e._z, e._order) } // Copy : func (e Euler) Copy(euler Euler) *Euler { e._x = euler._x e._y = euler._y e._z = euler._z e._order = euler._order e._onChangeCallback() return &e } // SetFromRotationMatrix : func (e Euler) SetFromRotationMatrix(m Matrix4, order string, update bool) *Euler { clamp := Clamp // assumes the upper 3x3 of m is a pure rotation matrix (i.e, unscaled) te := m.Elements m11, m12, m13 := te[0], te[4], te[8] m21, m22, m23 := te[1], te[5], te[9] m31, m32, m33 := te[2], te[6], te[10] if order == "" { order = e._order } switch order { default: panic("THREE.Euler: .setFromRotationMatrix() encountered an unknown order: " + order) case "XYZ": e._y = math.Asin(clamp(m13, -1, 1)) if math.Abs(m13) < 0.9999999 { e._x = math.Atan2(-m23, m33) e._z = math.Atan2(-m12, m11) } else { e._x = math.Atan2(m32, m22) e._z = 0 } case "YXZ": e._x = math.Asin(-clamp(m23, -1, 1)) if math.Abs(m23) < 0.9999999 { e._y = math.Atan2(m13, m33) e._z = math.Atan2(m21, m22) } else { e._y = math.Atan2(-m31, m11) e._z = 0 } case "ZXY": e._x = math.Asin(clamp(m32, -1, 1)) if math.Abs(m32) < 0.9999999 { e._y = math.Atan2(-m31, m33) e._z = math.Atan2(-m12, m22) } else { e._y = 0 e._z = math.Atan2(m21, m11) } case "ZYX": e._y = math.Asin(-clamp(m31, -1, 1)) if math.Abs(m31) < 0.9999999 { e._x = math.Atan2(m32, m33) e._z = math.Atan2(m21, m11) } else { e._x = 0 e._z = math.Atan2(-m12, m22) } case "YZX": e._z = math.Asin(clamp(m21, -1, 1)) if math.Abs(m21) < 0.9999999 { e._x = math.Atan2(-m23, m22) e._y = math.Atan2(-m31, m11) } else { e._x = 0 e._y = math.Atan2(m13, m33) } case "XZY": e._z = math.Asin(-clamp(m12, -1, 1)) if math.Abs(m12) < 0.9999999 { e._x = math.Atan2(m32, m22) e._y = math.Atan2(m13, m11) } else { e._x = math.Atan2(-m23, m33) e._y = 0 } } e._order = order if update { e._onChangeCallback() } return &e } // SetFromQuaternion : func (e Euler) SetFromQuaternion(q Quaternion, order string, update bool) *Euler { _matrix.MakeRotationFromQuaternion(q) return e.SetFromRotationMatrix(*_matrix, order, update) } // SetFromVector3 : func (e Euler) SetFromVector3(v Vector3, order string) *Euler { if order == "" { order = e._order } return e.Set(v.X, v.Y, v.Z, order) } // Reorder : func (e Euler) Reorder(newOrder string) *Euler { // WARNING: e discards revolution information -bhouston _quaternion.SetFromEuler(e, false) return e.SetFromQuaternion(*_quaternion, newOrder, true) } // Equals : func (e Euler) Equals(euler Euler) bool { return (euler._x == e._x) && (euler._y == e._y) && (euler._z == e._z) && (euler._order == e._order) } // FromArray : func (e Euler) FromArray(array []float64, order string) *Euler { if len(array) < 3 { panic("array length should be greater than 3") } e._x = array[0] e._y = array[1] e._z = array[2] if order != "" { e._order = order } e._onChangeCallback() return &e } // ToArray : func (e Euler) ToArray(array []float64, offset int) ([]float64, string) { if len(array) < offset+3 { panic("array length should be greater than offset+3") } array[offset] = e._x array[offset+1] = e._y array[offset+2] = e._z return array, e._order } // ToVector3 : func (e Euler) ToVector3(optionalResult Vector3) *Vector3 { return optionalResult.Set(e._x, e._y, e._z) } func (e Euler) _onChange(callback onChangeCallback) *Euler { e._onChangeCallback = callback return &e }
server/three/euler.go
0.729038
0.560373
euler.go
starcoder
package tags import ( "fmt" "strings" ) // SEP is the seperator to split the string into tags var SEP string = "," // Tags allows the managements of tags in a single string be comma seperation type Tags string // Add the tags in the string seperated by SEP func (t *Tags) Add(tags string) { t.AddSlice(strings.Split(tags, SEP)) } // AddSlice adds the strings in tags to the Tags func (t *Tags) AddSlice(tags []string) error { collect := make(map[string]struct{}, 0) for _, tag := range tags { tag = strings.TrimSpace(tag) if strings.Contains(tag, SEP) { return fmt.Errorf("An tag cannot contain the sepereator") } if tag != "" { collect[tag] = struct{}{} } } for _, tag := range t.AsSlice() { collect[tag] = struct{}{} } *t = Tags(strings.Join(setToSlice(collect), SEP)) return nil } // AsSlice returns the tags in Tags as a slice of tags func (t *Tags) AsSlice() []string { if len(string(*t)) > 0 { return strings.Split(string(*t), SEP) } return []string{} } // Count returns the number of tags func (t *Tags) Count() int { return len(t.AsSlice()) } // String returns the Tags as a string func (t *Tags) String() string { return string(*t) } // Contains checks if the given tag is in Tags func (t *Tags) Contains(tag string) bool { for _, x := range t.AsSlice() { if x == tag { return true } } return false } // Remove removes the tags (seperated by SEP) from Tags func (t *Tags) Remove(tags string) { t.RemoveSlice(strings.Split(tags, SEP)) } // RemoveSlice removes the tags given in the slice from Tags func (t *Tags) RemoveSlice(tags []string) { collect := make(map[string]struct{}, 0) for _, tag := range t.AsSlice() { collect[tag] = struct{}{} } for _, tag := range tags { tag = strings.TrimSpace(tag) delete(collect, tag) } *t = Tags(strings.Join(setToSlice(collect), SEP)) } // Clear removes all tags from Tags func (t *Tags) Clear() { *t = Tags("") } func setToSlice(set map[string]struct{}) []string { slice := make([]string, 0, len(set)) for x := range set { slice = append(slice, x) } return slice }
tags.go
0.641759
0.468122
tags.go
starcoder
package mock import ( "crypto/rand" "encoding/binary" "math" ) // RandInt Returns an int64 between min and max. func RandInt(min, max int64) int64 { off := min size := max - min return int64(randUint64()%uint64(size)) + off } // RandIntStep Returns an int64 whose step distance between min and max is step. func RandIntStep(min, max, step int64) int64 { off := min sub := max - min size := sub / step return int64(randUint64()%uint64(size))*step + off } // RandUint Returns an uint64 between min and max. func RandUint(min, max uint64) uint64 { off := min size := max - min return randUint64()%size + off } // RandUintStep Returns an uint64 whose step distance between min and max is step. func RandUintStep(min, max, step uint64) uint64 { off := min sub := max - min size := sub / step return (randUint64()%size)*step + off } // RandFloat Returns an float64 between min and max. func RandFloat(min, max float64) float64 { off := min size := max - min return randFloat64()*size + off } // RandFloatStep Returns an float64 whose step distance between min and max is step. func RandFloatStep(min, max, step float64) float64 { off := min sub := max - min size := int64(sub / step) return float64(randUint64()%uint64(size))*step + off } func randUint64() uint64 { var buf [8]byte rand.Read(buf[:]) return binary.BigEndian.Uint64(buf[:]) } func randFloat64() float64 { f := float64(randUint64()>>1) / (1 << 63) if f == 1 { return randFloat64() } return f } func compareInt(a, b int64) (min, max int64) { if a < b { return a, b } return b, a } func compareUint(a, b uint64) (min, max uint64) { if a < b { return a, b } return b, a } func compareFloat(a, b float64) (min, max float64) { if a < b { return a, b } return b, a } func maxUint(bit int) uint64 { switch bit { default: return 0 case 8: return math.MaxUint8 case 16: return math.MaxUint16 case 32: return math.MaxUint32 case 64: return math.MaxUint64 } } func maxInt(bit int) int64 { switch bit { default: return 0 case 8: return math.MaxInt8 case 16: return math.MaxInt16 case 32: return math.MaxInt32 case 64: return math.MaxInt64 } } func minInt(bit int) int64 { switch bit { default: return 0 case 8: return math.MinInt8 case 16: return math.MinInt16 case 32: return math.MinInt32 case 64: return math.MinInt64 } } func maxFloat(bit int) float64 { switch bit { default: return 0 case 32: return math.MaxFloat32 case 64: return math.MaxFloat64 } } func minFloat(bit int) float64 { switch bit { default: return 0 case 32: return -math.MaxFloat32 case 64: return -math.MaxFloat64 } }
mock_number.go
0.778228
0.416915
mock_number.go
starcoder
package hmath import ( "fmt" "github.com/barnex/fmath" ) type Mat3 [9]float32 func (mat3 *Mat3) Pointer() *[9]float32 { return (*[9]float32)(mat3) } func (mat3 *Mat3) Slice() []float32 { return mat3[:] } func (mat3 *Mat3) String() string { return fmt.Sprintf("[%f,%f,%f,\n %f,%f,%f,\n %f,%f,%f,]", mat3[0], mat3[3], mat3[6], mat3[1], mat3[4], mat3[7], mat3[2], mat3[5], mat3[8]) } //mat3[0] mat3[1] mat3[2] //mat3[3] mat3[4] mat3[5] //mat3[6] mat3[7] mat3[8] func (mat3 *Mat3) SetAt(row int, column int, value float32) { selection := 0 switch row { case 0: selection = 0 case 1: selection = 3 case 2: selection = 6 } selection = selection + column mat3[selection] = value } func (mat3 *Mat3) GetAt(row int, column int) float32 { selection := 0 switch row { case 0: selection = 0 case 1: selection = 3 case 2: selection = 6 } selection = selection + column return mat3[selection] } func Mat3Identity() Mat3 { return Mat3{ 1, 0, 0, 0, 1, 0, 0, 0, 1} } func Mat3Translate(v Vec2) Mat3 { return Mat3{ 1, 0, 0, 0, 1, 0, v[0], v[1], 1} } func Mat3Scale(v Vec2) Mat3 { return Mat3{ v[0], 0, 0, 0, v[1], 0, 0, 0, 1} } func Mat3Rotate(radians float32) Mat3 { s, c := fmath.Sincos(radians) return Mat3{ c, s, 0, -s, c, 0, 0, 0, 1} } func (mat3 Mat3) Mul(m2 Mat3) Mat3 { return Mat3{ mat3[0]*m2[0] + mat3[1]*m2[3] + mat3[2]*m2[6], mat3[0]*m2[1] + mat3[1]*m2[4] + mat3[2]*m2[7], mat3[0]*m2[2] + mat3[1]*m2[5] + mat3[2]*m2[8], mat3[3]*m2[0] + mat3[4]*m2[3] + mat3[5]*m2[6], mat3[3]*m2[1] + mat3[4]*m2[4] + mat3[5]*m2[7], mat3[3]*m2[2] + mat3[4]*m2[5] + mat3[5]*m2[8], mat3[6]*m2[0] + mat3[7]*m2[3] + mat3[8]*m2[6], mat3[6]*m2[1] + mat3[7]*m2[4] + mat3[8]*m2[7], mat3[6]*m2[2] + mat3[7]*m2[5] + mat3[8]*m2[8]} } func (mat3 Mat3) Invert() Mat3 { identity := 1.0 / (mat3[0]*mat3[4]*mat3[8] + mat3[3]*mat3[7]*mat3[2] + mat3[6]*mat3[1]*mat3[5] - mat3[6]*mat3[4]*mat3[2] - mat3[3]*mat3[1]*mat3[8] - mat3[0]*mat3[7]*mat3[5]) return Mat3{ (mat3[4]*mat3[8] - mat3[5]*mat3[7]) * identity, (mat3[2]*mat3[7] - mat3[1]*mat3[8]) * identity, (mat3[1]*mat3[5] - mat3[2]*mat3[4]) * identity, (mat3[5]*mat3[6] - mat3[3]*mat3[8]) * identity, (mat3[0]*mat3[8] - mat3[2]*mat3[6]) * identity, (mat3[2]*mat3[3] - mat3[0]*mat3[5]) * identity, (mat3[3]*mat3[7] - mat3[4]*mat3[6]) * identity, (mat3[1]*mat3[6] - mat3[0]*mat3[7]) * identity, (mat3[0]*mat3[4] - mat3[1]*mat3[3]) * identity} }
code/pkg/hmath/mat3.go
0.587588
0.689541
mat3.go
starcoder
package frequency import . "github.com/deinspanjer/units/unit" // Frequency represents a SI unit of frequency (in hertz, Hz) type Frequency Unit // ... const ( // SI Yoctohertz = Hertz * 1e-24 Zeptohertz = Hertz * 1e-21 Attohertz = Hertz * 1e-18 Femtohertz = Hertz * 1e-15 Picohertz = Hertz * 1e-12 Nanohertz = Hertz * 1e-9 Microhertz = Hertz * 1e-6 Millihertz = Hertz * 1e-3 Centihertz = Hertz * 1e-2 Decihertz = Hertz * 1e-1 Hertz Frequency = 1e0 Decahertz = Hertz * 1e1 Hectohertz = Hertz * 1e2 Kilohertz = Hertz * 1e3 Megahertz = Hertz * 1e6 Gigahertz = Hertz * 1e9 Terahertz = Hertz * 1e12 Petahertz = Hertz * 1e15 Exahertz = Hertz * 1e18 Zettahertz = Hertz * 1e21 Yottahertz = Hertz * 1e24 ) // Yoctohertz returns the frequency in yHz func (f Frequency) Yoctohertz() float64 { return float64(f / Yoctohertz) } // Zeptohertz returns the frequency in zHz func (f Frequency) Zeptohertz() float64 { return float64(f / Zeptohertz) } // Attohertz returns the frequency in aHz func (f Frequency) Attohertz() float64 { return float64(f / Attohertz) } // Femtohertz returns the frequency in fHz func (f Frequency) Femtohertz() float64 { return float64(f / Femtohertz) } // Picohertz returns the frequency in pHz func (f Frequency) Picohertz() float64 { return float64(f / Picohertz) } // Nanohertz returns the frequency in nHz func (f Frequency) Nanohertz() float64 { return float64(f / Nanohertz) } // Microhertz returns the frequency in µHz func (f Frequency) Microhertz() float64 { return float64(f / Microhertz) } // Millihertz returns the frequency in mHz func (f Frequency) Millihertz() float64 { return float64(f / Millihertz) } // Centihertz returns the frequency in cHz func (f Frequency) Centihertz() float64 { return float64(f / Centihertz) } // Decihertz returns the frequency in dHz func (f Frequency) Decihertz() float64 { return float64(f / Decihertz) } // Hertz returns the frequency in Hz func (f Frequency) Hertz() float64 { return float64(f) } // Decahertz returns the frequency in daHz func (f Frequency) Decahertz() float64 { return float64(f / Decahertz) } // Hectohertz returns the frequency in hHz func (f Frequency) Hectohertz() float64 { return float64(f / Hectohertz) } // Kilohertz returns the frequency in kHz func (f Frequency) Kilohertz() float64 { return float64(f / Kilohertz) } // Megahertz returns the frequency in MHz func (f Frequency) Megahertz() float64 { return float64(f / Megahertz) } // Gigahertz returns the frequency in GHz func (f Frequency) Gigahertz() float64 { return float64(f / Gigahertz) } // Terahertz returns the frequency in THz func (f Frequency) Terahertz() float64 { return float64(f / Terahertz) } // Petahertz returns the frequency in PHz func (f Frequency) Petahertz() float64 { return float64(f / Petahertz) } // Exahertz returns the frequency in EHz func (f Frequency) Exahertz() float64 { return float64(f / Exahertz) } // Zettahertz returns the frequency in ZHz func (f Frequency) Zettahertz() float64 { return float64(f / Zettahertz) } // Yottahertz returns the frequency in YHz func (f Frequency) Yottahertz() float64 { return float64(f / Yottahertz) }
frequency/frequency.go
0.86129
0.497559
frequency.go
starcoder
package graph import ( "fmt" "github.com/gonum/graph" "github.com/goulash/pacman" "github.com/goulash/pacman/aur" ) // Node implements graph.Node. type Node struct { id int pacman.AnyPackage } // ID returns the unique (within the graph) ID of the node. func (n *Node) ID() int { return n.id } // IsFromAUR returns whether the node comes from AUR. func (n *Node) IsFromAUR() bool { _, ok := n.AnyPackage.(*aur.Package) return ok } // AllDepends returns a (newly created) string slice of the installation // and make dependencies of this package. func (n *Node) AllDepends() []string { deps := make([]string, 0, n.NumAllDepends()) deps = append(deps, n.PkgDepends()...) deps = append(deps, n.PkgMakeDepends()...) return deps } // NumDepends returns the number of make and installation dependencies the package has. func (n *Node) NumAllDepends() int { return len(n.PkgDepends()) + len(n.PkgMakeDepends()) } func (n *Node) String() string { return n.PkgName() } // Edge implements the graph.Edge interface. type Edge struct { from *Node to *Node } // From returns the node that has the dependency. func (e *Edge) From() graph.Node { return e.from } // To returns the depdency that the from node has. func (e *Edge) To() graph.Node { return e.to } // Weight returns zero, because depdencies are not weighted. func (e *Edge) Weight() float64 { return 0.0 } // IsFromAUR returns true if the dependency needs to be fetched from AUR. func (e *Edge) IsFromAUR() bool { return e.to.IsFromAUR() } func (e *Edge) String() string { return fmt.Sprintf("%s -> %s", e.from, e.to) } // Graph implements graph.Graph. type Graph struct { names map[string]*Node nodes []graph.Node nodeIDs map[int]graph.Node edgesFrom map[int][]graph.Node edgesTo map[int][]graph.Node edges map[int]map[int]graph.Edge nextID int } // NewGraph returns a new graph. func NewGraph() *Graph { return &Graph{ names: make(map[string]*Node), nodes: make([]graph.Node, 0), nodeIDs: make(map[int]graph.Node), edgesFrom: make(map[int][]graph.Node), edgesTo: make(map[int][]graph.Node), edges: make(map[int]map[int]graph.Edge), nextID: 0, } } // Has returns whether the node exists within the graph. func (g *Graph) Has(n graph.Node) bool { _, ok := g.nodeIDs[n.ID()] return ok } // HasName returns whether the package with the given name exists within the // graph. func (g *Graph) HasName(name string) bool { _, ok := g.names[name] return ok } // NodeWithName returns the node with the given name, or nil. func (g *Graph) NodeWithName(name string) *Node { return g.names[name] } // Nodes returns all the nodes in the graph. func (g *Graph) Nodes() []graph.Node { return g.nodes } // From returns all nodes that can be reached directly from the given node. func (g *Graph) From(v graph.Node) []graph.Node { return g.edgesFrom[v.ID()] } // To returns all nodes that can reach directly to the given node. func (g *Graph) To(v graph.Node) []graph.Node { return g.edgesTo[v.ID()] } // HasEdgeBetween returns whether an edge exists between nodes u and v // without considering direction. func (g *Graph) HasEdgeBetween(u, v graph.Node) bool { return g.HasEdgeFromTo(u, v) || g.HasEdgeFromTo(v, u) } // HasEdgeFromTo returns whether an edge exists in the graph from u to v. func (g *Graph) HasEdgeFromTo(u, v graph.Node) bool { for _, n := range g.edgesFrom[u.ID()] { if n == v { return true } } return false } // Edge returns the edge from u to v if such an edge exists and nil // otherwise. The node v must be directly reachable from u as defined // by the From method. func (g *Graph) Edge(u, v graph.Node) graph.Edge { return g.edges[u.ID()][v.ID()] } // NewNodeID returns a unique ID for a new node. func (g *Graph) NewNodeID() int { g.nextID++ return g.nextID } // NewNode returns a new node. func (g *Graph) NewNode(pkg pacman.AnyPackage) *Node { return &Node{ id: g.NewNodeID(), AnyPackage: pkg, } } // AddNode adds the node and initializes data structures but does nothing else. func (g *Graph) AddNode(v graph.Node) { // Checking preconditions: n, ok := v.(*Node) if !ok { panic("only accept our own nodes") } if g.HasName(n.PkgName()) { panic("package name already in graph") } if g.Has(v) { panic("node id already here") } g.names[n.PkgName()] = n g.nodes = append(g.nodes, n) id := n.ID() g.nodeIDs[id] = n g.edgesFrom[id] = make([]graph.Node, 0, n.NumAllDepends()) g.edgesTo[id] = make([]graph.Node, 0) g.edges[id] = make(map[int]graph.Edge) } // AddEdgeFromTo adds an edge betwewen the two nodes. func (g *Graph) AddEdgeFromTo(u, v graph.Node) { uid, vid := u.ID(), v.ID() g.edges[uid][vid] = &Edge{from: u.(*Node), to: v.(*Node)} g.edgesFrom[uid] = append(g.edgesFrom[uid], u) g.edgesTo[vid] = append(g.edgesTo[vid], v) }
vendor/github.com/goulash/pacman/graph/graph.go
0.799442
0.475118
graph.go
starcoder
package cirno import ( "fmt" "math" ) // NormalTo returns the normal from the given circle // to the other shape. func (circle *Circle) NormalTo(shape Shape) (Vector, error) { if shape == nil { return Zero(), fmt.Errorf("the shape is nil") } switch other := shape.(type) { case *Circle: return circle.NormalToCircle(other) case *Line: return circle.NormalToLine(other) case *Rectangle: return circle.NormalToRectangle(other) } return Zero(), fmt.Errorf("unknown shape type") } // NormalTo returns the normal from the given rectangle // to the other shape. func (rect *Rectangle) NormalTo(shape Shape) (Vector, error) { if shape == nil { return Zero(), fmt.Errorf("the shape is nil") } switch other := shape.(type) { case *Circle: return rect.NormalToCircle(other) case *Line: return rect.NormalToLine(other) case *Rectangle: return rect.NormalToRectangle(other) } return Zero(), fmt.Errorf("unknown shape type") } // NormalTo returns the normal from the given line to // the other shape. func (line *Line) NormalTo(shape Shape) (Vector, error) { if shape == nil { return Zero(), fmt.Errorf("the shape is nil") } switch other := shape.(type) { case *Circle: return line.NormalToCircle(other) case *Line: return line.NormalToLine(other) case *Rectangle: return line.NormalToRectangle(other) } return Zero(), fmt.Errorf("unknown shape type") } // NormalToCircle returns the normal from the given circle // to the other circle. func (circle *Circle) NormalToCircle(other *Circle) (Vector, error) { if other == nil { return Zero(), fmt.Errorf("the other circle is nil") } return other.center.Subtract(circle.center).Normalize() } // NormalToRectangle returns the normal from the given circle // to the rectangle. func (circle *Circle) NormalToRectangle(rect *Rectangle) (Vector, error) { if rect == nil { return Zero(), fmt.Errorf("the rectangle is nil") } // Transform the circle center coordinates from the world space // to the rectangle's local space. t := circle.center.Subtract(rect.center) theta := -rect.angle t = t.Rotate(theta) localCircle := &Circle{ center: t, radius: circle.radius, } localRect := &Rectangle{ center: NewVector(0, 0), extents: NewVector(rect.Width()/2, rect.Height()/2), xAxis: NewVector(1, 0), yAxis: NewVector(0, 1), } closestPoint := localCircle.center // Find the point of the rectangle which is closest to // the center of the circle. if closestPoint.X < localRect.Min().X { closestPoint.X = localRect.Min().X } else if closestPoint.X > localRect.Max().X { closestPoint.X = localRect.Max().X } if closestPoint.Y < localRect.Min().Y { closestPoint.Y = localRect.Min().Y } else if closestPoint.Y > localRect.Max().Y { closestPoint.Y = localRect.Max().Y } closestPoint = closestPoint.Rotate(-theta).Add(rect.center) normal, err := closestPoint.Subtract(circle.center).Normalize() if err != nil { return Zero(), err } return normal, nil } // NormalToLine returns the normal from the given circle // to the line. func (circle *Circle) NormalToLine(line *Line) (Vector, error) { if line == nil { return Zero(), fmt.Errorf("the line is nil") } closestPoint := line.ProjectPoint(circle.center) if !line.ContainsPoint(closestPoint) { cp := line.P().Subtract(circle.Center()) cq := line.Q().Subtract(circle.Center()) if cp.SquaredMagnitude() < cq.SquaredMagnitude() { closestPoint = line.P() } else { closestPoint = line.Q() } } normal, err := closestPoint.Subtract(circle.center).Normalize() if err != nil { return Zero(), err } if math.IsNaN(normal.X) { normal.X = 0.0 } if math.IsNaN(normal.Y) { normal.Y = 0.0 } return normal, nil } // NormalToCircle returns the normal from the given line // to the circle. func (line *Line) NormalToCircle(circle *Circle) (Vector, error) { if circle == nil { return Zero(), fmt.Errorf("the circle is nil") } normalToLine, err := circle.NormalToLine(line) if err != nil { return Zero(), err } return normalToLine.MultiplyByScalar(-1), nil } // NormalToLine returns the normal from the given line // to the other line. func (line *Line) NormalToLine(other *Line) (Vector, error) { if line == nil { return Zero(), fmt.Errorf("the line is nil") } normal := Zero() pRightOfLine, err := line.isPointRightOfLine(other.p) if err != nil { return Zero(), err } qRightOfLine, err := line.isPointRightOfLine(other.q) if err != nil { return Zero(), err } if pRightOfLine == qRightOfLine { pointProj := line.ProjectPoint(other.p) normal, err = other.p.Subtract(pointProj).Normalize() if err != nil { return Zero(), err } } else { pointProj := other.ProjectPoint(line.p) normal, err = pointProj.Subtract(line.p).Normalize() if err != nil { return Zero(), err } } return normal, nil } // NormalToRectangle returns the normal from the given line // to the rectangle. func (line *Line) NormalToRectangle(rect *Rectangle) (Vector, error) { if rect == nil { return Zero(), fmt.Errorf("the rectangle is nil") } normalToLine, err := rect.NormalToLine(line) if err != nil { return Zero(), err } return normalToLine.MultiplyByScalar(-1), nil } // NormalToCircle returns the normal from the given rectangle // to the circle. func (rect *Rectangle) NormalToCircle(circle *Circle) (Vector, error) { if circle == nil { return Zero(), fmt.Errorf("the circle is nil") } normalToRect, err := circle.NormalToRectangle(rect) if err != nil { return Zero(), err } return normalToRect.MultiplyByScalar(-1), nil } // NormalToLine returns the normal between the given rectangle // and the line. func (rect *Rectangle) NormalToLine(line *Line) (Vector, error) { if line == nil { return Zero(), fmt.Errorf("the line is nil") } lineAxisX, err := line.q.Subtract(line.Center()).Normalize() if err != nil { return Zero(), err } lineAxisY := lineAxisX.Rotate(90) lineExtent := line.Length() / 2 t := line.Center().Subtract(rect.center) sepAx := math.Abs(Dot(t, rect.xAxis)) > rect.extents.X+ math.Abs(Dot(lineAxisX.MultiplyByScalar(lineExtent), rect.xAxis)) sepAy := math.Abs(Dot(t, rect.yAxis)) > rect.extents.Y+ math.Abs(Dot(lineAxisX.MultiplyByScalar(lineExtent), rect.yAxis)) sepLineX := math.Abs(Dot(t, lineAxisX)) > lineExtent+ math.Abs(Dot(rect.xAxis.MultiplyByScalar(rect.extents.X), lineAxisX))+ math.Abs(Dot(rect.yAxis.MultiplyByScalar(rect.extents.Y), lineAxisX)) sepLineY := math.Abs(Dot(t, lineAxisY)) > math.Abs(Dot(rect.xAxis.MultiplyByScalar(rect.extents.X), lineAxisY))+ math.Abs(Dot(rect.yAxis.MultiplyByScalar(rect.extents.Y), lineAxisY)) var normal Vector if sepAx { normal = rect.xAxis sepLine, err := NewLine(rect.center, rect.center.Add(rect.yAxis)) if err != nil { return Zero(), err } if sepLine.Orientation(line.Center()) < 0 { normal = normal.MultiplyByScalar(-1) } } else if sepAy { normal = rect.yAxis sepLine, err := NewLine(rect.center, rect.center.Add(rect.xAxis)) if err != nil { return Zero(), err } if sepLine.Orientation(line.Center()) > 0 { normal = normal.MultiplyByScalar(-1) } } else if sepLineX { normal = lineAxisX sepLine, err := NewLine(line.Center(), line.Center().Add(lineAxisY)) if err != nil { return Zero(), err } if sepLine.Orientation(rect.center) > 0 { normal = normal.MultiplyByScalar(-1) } } else if sepLineY { normal = lineAxisY sepLine, err := NewLine(line.Center(), line.Center().Add(lineAxisX)) if err != nil { return Zero(), err } if sepLine.Orientation(rect.center) < 0 { normal = normal.MultiplyByScalar(-1) } } return normal, nil } // NormalToRectangle returns the normal from the given rectangle to // the other rectangle. func (rect *Rectangle) NormalToRectangle(other *Rectangle) (Vector, error) { if other == nil { return Zero(), fmt.Errorf("the rectangle is nil") } // A vector from the center of rectangle A to the center of rectangle B. t := other.center.Subtract(rect.center) // Check if Ax is parallel to the separating axis and hence the separating axis exists. sepAx := math.Abs(Dot(t, rect.xAxis)) > rect.extents.X+ math.Abs(Dot(other.xAxis.MultiplyByScalar(other.extents.X), rect.xAxis))+ math.Abs(Dot(other.yAxis.MultiplyByScalar(other.extents.Y), rect.xAxis)) // Check if Ay is parallel to the separating axis and hence the separating axis exists. sepAy := math.Abs(Dot(t, rect.yAxis)) > rect.extents.Y+ math.Abs(Dot(other.xAxis.MultiplyByScalar(other.extents.X), rect.yAxis))+ math.Abs(Dot(other.yAxis.MultiplyByScalar(other.extents.Y), rect.yAxis)) // Check if Bx is parallel to the separating axis and hence the separating axis exists. sepBx := math.Abs(Dot(t, other.xAxis)) > other.extents.X+ math.Abs(Dot(rect.xAxis.MultiplyByScalar(rect.extents.X), other.xAxis))+ math.Abs(Dot(rect.yAxis.MultiplyByScalar(rect.extents.Y), other.xAxis)) // Check if By is parallel to the separating axis and hence the separating axis exists. sepBy := math.Abs(Dot(t, other.yAxis)) > other.extents.Y+ math.Abs(Dot(rect.xAxis.MultiplyByScalar(rect.extents.X), other.yAxis))+ math.Abs(Dot(rect.yAxis.MultiplyByScalar(rect.extents.Y), other.yAxis)) var normal Vector if sepAx { normal = rect.xAxis sepLine, err := NewLine(rect.center, rect.center.Add(rect.yAxis)) if err != nil { return Zero(), err } if sepLine.Orientation(other.center) < 0 { normal = normal.MultiplyByScalar(-1) } } else if sepAy { normal = rect.yAxis sepLine, err := NewLine(rect.center, rect.center.Add(rect.xAxis)) if err != nil { return Zero(), err } if sepLine.Orientation(other.center) > 0 { normal = normal.MultiplyByScalar(-1) } } else if sepBx { normal = other.xAxis sepLine, err := NewLine(other.center, other.center.Add(other.yAxis)) if err != nil { return Zero(), err } if sepLine.Orientation(rect.center) > 0 { normal = normal.MultiplyByScalar(-1) } } else if sepBy { normal = other.yAxis sepLine, err := NewLine(other.center, other.center.Add(other.xAxis)) if err != nil { return Zero(), err } if sepLine.Orientation(rect.center) < 0 { normal = normal.MultiplyByScalar(-1) } } return normal, nil }
normal.go
0.921605
0.716095
normal.go
starcoder
package base import ( "strconv" sdkTypes "github.com/cosmos/cosmos-sdk/types" "github.com/persistenceOne/persistenceSDK/constants/errors" "github.com/persistenceOne/persistenceSDK/schema/types" "github.com/persistenceOne/persistenceSDK/utilities/meta" ) var _, _ types.Data = (*Data_HeightData)(nil), (*HeightData)(nil) func (heightData Data_HeightData) Compare(data types.Data) int { compareHeightData, Error := heightDataFromInterface(data) if Error != nil { panic(Error) } return heightData.HeightData.Value.Compare(&compareHeightData.HeightData.Value) } func (heightData Data_HeightData) String() string { return strconv.FormatInt(heightData.HeightData.Value.Get(), 10) } func (heightData Data_HeightData) GetTypeID() types.ID { return NewID("H") } func (heightData Data_HeightData) ZeroValue() types.Data { return NewHeightData(NewHeight(0)) } func (heightData Data_HeightData) GenerateHashID() types.ID { if heightData.Compare(heightData.ZeroValue()) == 0 { return NewID("") } return NewID(meta.Hash(strconv.FormatInt(heightData.HeightData.Value.Get(), 10))) } func (heightData Data_HeightData) AsAccAddress() (sdkTypes.AccAddress, error) { zeroValue, _ := Data_AccAddressData{}.ZeroValue().AsAccAddress() return zeroValue, errors.IncorrectFormat } func (heightData Data_HeightData) AsListData() (types.ListData, error) { zeroValue, _ := Data_ListData{}.ZeroValue().AsListData() return zeroValue, errors.IncorrectFormat } func (heightData Data_HeightData) AsString() (string, error) { zeroValue, _ := Data_StringData{}.ZeroValue().AsString() return zeroValue, errors.IncorrectFormat } func (heightData Data_HeightData) AsDec() (sdkTypes.Dec, error) { zeroValue, _ := Data_DecData{}.ZeroValue().AsDec() return zeroValue, errors.IncorrectFormat } func (heightData Data_HeightData) AsHeight() (types.Height, error) { return &heightData.HeightData.Value, nil } func (heightData Data_HeightData) AsID() (types.ID, error) { zeroValue, _ := Data_IdData{}.ZeroValue().AsID() return zeroValue, errors.IncorrectFormat } func (heightData Data_HeightData) Get() interface{} { return heightData.HeightData.Value } func (heightData Data_HeightData) Unmarshal(dAtA []byte) error { return heightData.HeightData.Unmarshal(dAtA) } func (heightData *Data_HeightData) Reset() { *heightData = Data_HeightData{} } func (*Data_HeightData) ProtoMessage() {} func heightDataFromInterface(data types.Data) (Data_HeightData, error) { switch value := data.(type) { case *Data_HeightData: return *value, nil default: return Data_HeightData{}, errors.MetaDataError } } func NewHeightData(value types.Height) *Data_HeightData { height := *NewHeight(value.Get()) return &Data_HeightData{ HeightData: &HeightData{ Value: height, }, } } func ReadHeightData(dataString string) (types.Data, error) { if dataString == "" { return Data_HeightData{}.ZeroValue(), nil } height, Error := strconv.ParseInt(dataString, 10, 64) if Error != nil { return nil, Error } return NewHeightData(NewHeight(height)), nil } func (heightData HeightData) Compare(data types.Data) int { compareHeightData, Error := dummyHeightDataFromInterface(data) if Error != nil { panic(Error) } return heightData.Value.Compare(&compareHeightData.Value) } func (heightData HeightData) String() string { return strconv.FormatInt(heightData.Value.Get(), 10) } func (heightData HeightData) GetTypeID() types.ID { return NewID("H") } func (heightData HeightData) ZeroValue() types.Data { return NewHeightData(NewHeight(0)) } func (heightData HeightData) GenerateHashID() types.ID { if heightData.Compare(heightData.ZeroValue()) == 0 { return NewID("") } return NewID(meta.Hash(strconv.FormatInt(heightData.Value.Get(), 10))) } func (heightData HeightData) AsAccAddress() (sdkTypes.AccAddress, error) { zeroValue, _ := AccAddressData{}.ZeroValue().AsAccAddress() return zeroValue, errors.IncorrectFormat } func (heightData HeightData) AsListData() (types.ListData, error) { zeroValue, _ := ListData{}.ZeroValue().AsListData() return zeroValue, errors.IncorrectFormat } func (heightData HeightData) AsString() (string, error) { zeroValue, _ := StringData{}.ZeroValue().AsString() return zeroValue, errors.IncorrectFormat } func (heightData HeightData) AsDec() (sdkTypes.Dec, error) { zeroValue, _ := DecData{}.ZeroValue().AsDec() return zeroValue, errors.IncorrectFormat } func (heightData HeightData) AsHeight() (types.Height, error) { return &heightData.Value, nil } func (heightData HeightData) AsID() (types.ID, error) { zeroValue, _ := IDData{}.ZeroValue().AsID() return zeroValue, errors.IncorrectFormat } func (heightData HeightData) Get() interface{} { return heightData.Value } func dummyHeightDataFromInterface(data types.Data) (HeightData, error) { switch value := data.(type) { case *HeightData: return *value, nil default: return HeightData{}, errors.MetaDataError } }
schema/types/base/heightData.go
0.68941
0.490175
heightData.go
starcoder
Package bytecode provides requisite structs and utility functions to build and validate unlinked and unlinked bytecode objects for the ethpm v2 manifest. Information about these objects can be found here http://ethpm.github.io/ethpm-spec/package-spec.html#the-bytecode-object */ package bytecode import ( "encoding/json" "errors" "fmt" "strings" "github.com/ethpm/ethpm-go/pkg/ethregexlib" liblink "github.com/ethpm/ethpm-go/pkg/librarylink" ) // StandardJSONBC is the bytecode or deployedBytecode object from a compiler's // standard JSON output object type StandardJSONBC struct { LinkReferences map[string]map[string][]map[string]int `json:"linkReferences,omitempty"` Object string `json:"object,omitempty"` } // UnlinkedBytecode A bytecode object for unlinked bytecode. type UnlinkedBytecode struct { Bytecode string `json:"bytecode,omitempty"` LinkReferences []*liblink.LinkReference `json:"link_references,omitempty"` } // LinkedBytecode A bytecode object for linked bytecode type LinkedBytecode struct { Bytecode string `json:"bytecode,omitempty"` LinkDependencies []*liblink.LinkValue `json:"link_dependencies,omitempty"` LinkReferences []*liblink.LinkReference `json:"link_references,omitempty"` } // Build takes a compiler standard output bytecode object as a json string // and builds the UnlinkedBytecode struct func (ub *UnlinkedBytecode) Build(jsonstring string) (err error) { var s *StandardJSONBC var contractcount int if err = json.Unmarshal([]byte(jsonstring), &s); err != nil { err = fmt.Errorf("Error parsing standard json bytecode object: '%v'", err) return } if s == nil { err = errors.New("No unlinked bytecode received in json string") return } for k := range s.LinkReferences { contractcount += len(s.LinkReferences[k]) } ub.LinkReferences = make([]*liblink.LinkReference, contractcount) contractcount = 0 for k := range s.LinkReferences { for z, v := range s.LinkReferences[k] { ub.LinkReferences[contractcount] = &liblink.LinkReference{} ub.LinkReferences[contractcount].Build(z, v) s.Object = addLinkRefZeros(s.Object, ub.LinkReferences[contractcount]) contractcount++ } } ub.Bytecode = s.Object return } func addLinkRefZeros(bytecode string, lr *liblink.LinkReference) string { l := lr.Length * 2 zeros := strings.Repeat("0", l) for _, x := range lr.Offsets { spot := x * 2 bytecode = bytecode[:spot] + zeros + bytecode[spot+l:] } return bytecode } // Validate with UnlinkedBytecode ensures the UnlinkedBytecode object conforms to the standard // described here https://ethpm.github.io/ethpm-spec/package-spec.html#bytecode func (ub *UnlinkedBytecode) Validate() (err error) { if (ub.Bytecode == "") || (ub.Bytecode == "0x") { err = errors.New("bytecode empty and is a required field") return } if retErr := ethregexlib.CheckBytecode(ub.Bytecode); retErr != nil { err = fmt.Errorf("unlinked_bytecode:bytecode error '%v'", retErr) return } if retErr := checkLinkReferences(ub.Bytecode, ub.LinkReferences); retErr != nil { err = retErr } return } // Build the linked bytecode string and create a LinkedBytecode struct. Each // dependency and reference for LinkedBytecode should be added through the // AddLinkDependencies and AddLinkReference utility functions. func (lb *LinkedBytecode) Build(bc string) (err error) { lb.Bytecode = bc return } // AddLinkDependencies will take an array of LinkValue objects and add them // to the LinkedBytecode object. This function is not currently built into // any compiler or deployment workflow. func (lb *LinkedBytecode) AddLinkDependencies(lv []*liblink.LinkValue) { if len(lb.LinkDependencies) == 0 { lb.LinkDependencies = make([]*liblink.LinkValue, len(lv)) } for i, v := range lv { lb.LinkDependencies[i] = v } return } // AddLinkReference will take an array of LinkReference objects and add them // to the LinkedBytecode object. This function is not currently built into // any compiler or deployment workflow. func (lb *LinkedBytecode) AddLinkReference(lr []*liblink.LinkReference) { if len(lb.LinkReferences) == 0 { lb.LinkReferences = make([]*liblink.LinkReference, len(lr)) } for i, v := range lr { lb.LinkReferences[i] = v } return } // Validate with LinkedBytecode ensures the LinkedBytecode object conforms to the standard // described here https://ethpm.github.io/ethpm-spec/package-spec.html#bytecode func (lb *LinkedBytecode) Validate(dependencyLengths map[string]int) (err error) { if (lb.Bytecode == "") || (lb.Bytecode == "0x") { err = errors.New("bytecode empty and is a required field") return } if retErr := ethregexlib.CheckBytecode(lb.Bytecode); retErr != nil { err = fmt.Errorf("linked_bytecode:bytecode error '%v'", retErr) return } if retErr := checkLinkReferences(lb.Bytecode, lb.LinkReferences); retErr != nil { err = retErr return } if retErr := checkLinkDependencies(lb.Bytecode, lb.LinkDependencies, dependencyLengths); retErr != nil { err = retErr } return } // checkLinkReferences validates each of the link references against the bytecode func checkLinkReferences(bc string, lr []*liblink.LinkReference) (err error) { length := len(bc) OuterLoop: for k, v := range lr { if retErr := v.Validate(); retErr != nil { err = fmt.Errorf("link_reference at position '%v' returned the following error: "+ "%v+", k, retErr) break } for i, z := range v.Offsets { if (z + v.Length) >= ((length - 2) / 2) { err = fmt.Errorf("link_reference at position '%v' has invalid length for offset "+ "at postion %v. Offset '%v' plus '%v' is out of bounds for the bytecode.", k, i, z, v.Length) break OuterLoop } } } return } // checkLinkDependencies validates each of the link dependencies against the link references func checkLinkDependencies(bc string, lv []*liblink.LinkValue, depLengths map[string]int) (err error) { length := len(bc) OuterLoop: for k, v := range lv { if retErr := v.Validate(depLengths); retErr != nil { err = fmt.Errorf("link_dependency at position '%v' returned the following error: "+ "%v+", k, retErr) break } for i, z := range v.Offsets { if v.Type == "literal" { depLength := (len(v.Value) - 2) / 2 if (z + depLength) >= ((length - 2) / 2) { err = fmt.Errorf("link_dependency at position '%v' has invalid length for offset "+ "at postion %v. Offset '%v' plus '%v' (byte length of value '%v') is out of bounds "+ "for the bytecode.", k, i, z, depLength, v.Value) break OuterLoop } } if (z + depLengths[v.Value]) >= ((length - 2) / 2) { err = fmt.Errorf("link_dependency at position '%v' has invalid length for offset "+ "at postion %v. Offset '%v' plus '%v' (byte length of dependency '%v') is out of bounds "+ "for the bytecode.", k, i, z, depLengths[v.Value], v.Value) break OuterLoop } } } return }
pkg/bytecode/bytecode.go
0.750278
0.479138
bytecode.go
starcoder
package iso20022 // Reversal card transaction. type CardTransaction7 struct { // TTransaction type of the transaction to be reversed. // It correspond partially to the ISO 8583, field number 3. TransactionType *CardPaymentServiceType7Code `xml:"TxTp"` // Identification of the reconciliation period between the acquirer and the issuer or their respective agents. Reconciliation *TransactionIdentifier2 `xml:"Rcncltn,omitempty"` // Date and time of the transaction transported unchanged by the acquirer from the card acceptor to the issuer. // It correspond to the ISO 8583 field number 12. AcceptorTransactionDateTime *ISODateTime `xml:"AccptrTxDtTm"` // Unique transaction identification generated by the acceptor or the acquirer transported unchanged by the acquirer from the card acceptor to the issuer. It is used to assist locating the original source information. Eventually it could be included in the cardholder statement. It corresponds to ISO 8583, field 37 and CAPE data element TransactionIdentification/TransactionReference. AcceptorTransactionIdentification *Max35Text `xml:"AccptrTxId"` // Number generated by the transaction Initiator to assist in identifying a transaction uniquely. This value remains unchanged for all messages within a message pair exchange, for instance an initiation/response. It corresponds to the ISO 8583 field number 11. InitiatorTransactionIdentification *Max35Text `xml:"InitrTxId"` // Unique identification to match transactions throughout their life cycle (for example, authorisation to financial presentment, financial presentment to chargeback). It shall contain the same value in all messages throughout a transaction’s life cycle. It corresponds partially to ISO 8583:2003 field number 21. TransactionLifeCycleIdentification *Max35Text `xml:"TxLifeCyclId,omitempty"` // Life cycle transaction sequence number when multiple authorisations are performed for the same presentment. // It corresponds partially to ISO 8583:2003 field number 21. TransactionLifeCycleSequenceNumber *Number `xml:"TxLifeCyclSeqNb,omitempty"` // Total number of transactions under the same life cycle transaction sequence number. TransactionLifeCycleSequenceCounter *Number `xml:"TxLifeCyclSeqCntr,omitempty"` // Data supplied by an acquirer at clearing time to assist in identifying the original transaction in subsequent messages, for example when researching retrievals and chargebacks. It corresponds to ISO 8583:2003 field number 31, acquirer reference number. AcquirerTransactionReference *Max35NumericText `xml:"AcqrrTxRef,omitempty"` // Data supplied by a card issuer in response messages or in issuer generated messages, that the acquirer may be required to be provided in subsequent messages. It corresponds to ISO 8583:93 and 2003 field number 95. CardIssuerReferenceData *Max140Text `xml:"CardIssrRefData,omitempty"` // Identification of the original transaction. // It corresponds to ISO 8583 field number 90 for the version 87, and field number 56 for the other versions. OriginalTransaction *CardTransaction3 `xml:"OrgnlTx,omitempty"` // Details of the card transaction. TransactionDetails *CardTransactionDetail5 `xml:"TxDtls"` // Response to the reversal. TransactionResponse *ResponseType2 `xml:"TxRspn,omitempty"` } func (c *CardTransaction7) SetTransactionType(value string) { c.TransactionType = (*CardPaymentServiceType7Code)(&value) } func (c *CardTransaction7) AddReconciliation() *TransactionIdentifier2 { c.Reconciliation = new(TransactionIdentifier2) return c.Reconciliation } func (c *CardTransaction7) SetAcceptorTransactionDateTime(value string) { c.AcceptorTransactionDateTime = (*ISODateTime)(&value) } func (c *CardTransaction7) SetAcceptorTransactionIdentification(value string) { c.AcceptorTransactionIdentification = (*Max35Text)(&value) } func (c *CardTransaction7) SetInitiatorTransactionIdentification(value string) { c.InitiatorTransactionIdentification = (*Max35Text)(&value) } func (c *CardTransaction7) SetTransactionLifeCycleIdentification(value string) { c.TransactionLifeCycleIdentification = (*Max35Text)(&value) } func (c *CardTransaction7) SetTransactionLifeCycleSequenceNumber(value string) { c.TransactionLifeCycleSequenceNumber = (*Number)(&value) } func (c *CardTransaction7) SetTransactionLifeCycleSequenceCounter(value string) { c.TransactionLifeCycleSequenceCounter = (*Number)(&value) } func (c *CardTransaction7) SetAcquirerTransactionReference(value string) { c.AcquirerTransactionReference = (*Max35NumericText)(&value) } func (c *CardTransaction7) SetCardIssuerReferenceData(value string) { c.CardIssuerReferenceData = (*Max140Text)(&value) } func (c *CardTransaction7) AddOriginalTransaction() *CardTransaction3 { c.OriginalTransaction = new(CardTransaction3) return c.OriginalTransaction } func (c *CardTransaction7) AddTransactionDetails() *CardTransactionDetail5 { c.TransactionDetails = new(CardTransactionDetail5) return c.TransactionDetails } func (c *CardTransaction7) AddTransactionResponse() *ResponseType2 { c.TransactionResponse = new(ResponseType2) return c.TransactionResponse }
CardTransaction7.go
0.879768
0.423041
CardTransaction7.go
starcoder
package gke // There should be no imports as it is used standalone in e2e tests const ( // MiB - MebiByte size (2^20) MiB = 1024 * 1024 // Duplicating an upstream bug treating GB as 1000*MiB (we need to predict the end result accurately). mbPerGB = 1000 millicoresPerCore = 1000 ) // PredictKubeReservedMemory calculates kube-reserved memory based on physical memory func PredictKubeReservedMemory(physicalMemory int64) int64 { return memoryReservedMiB(physicalMemory/MiB) * MiB } // PredictKubeReservedCpuMillicores calculates kube-reserved cpu based on physical cpu func PredictKubeReservedCpuMillicores(physicalCpuMillicores int64) int64 { return cpuReservedMillicores(physicalCpuMillicores) } type allocatableBracket struct { threshold int64 marginalReservedRate float64 } func memoryReservedMiB(memoryCapacityMiB int64) int64 { if memoryCapacityMiB <= mbPerGB { if memoryCapacityMiB <= 0 { return 0 } // The minimum reservation required for proper node operation is 255 MiB. // For any node with less than 1 GB of memory use the minimum. Nodes with // more memory will use the existing reservation thresholds. return 255 } return calculateReserved(memoryCapacityMiB, []allocatableBracket{ { threshold: 0, marginalReservedRate: 0.25, }, { threshold: 4 * mbPerGB, marginalReservedRate: 0.2, }, { threshold: 8 * mbPerGB, marginalReservedRate: 0.1, }, { threshold: 16 * mbPerGB, marginalReservedRate: 0.06, }, { threshold: 128 * mbPerGB, marginalReservedRate: 0.02, }, }) } func cpuReservedMillicores(cpuCapacityMillicores int64) int64 { return calculateReserved(cpuCapacityMillicores, []allocatableBracket{ { threshold: 0, marginalReservedRate: 0.06, }, { threshold: 1 * millicoresPerCore, marginalReservedRate: 0.01, }, { threshold: 2 * millicoresPerCore, marginalReservedRate: 0.005, }, { threshold: 4 * millicoresPerCore, marginalReservedRate: 0.0025, }, }) } // calculateReserved calculates reserved using capacity and a series of // brackets as follows: the marginalReservedRate applies to all capacity // greater than the bracket, but less than the next bracket. For example, if // the first bracket is threshold: 0, rate:0.1, and the second bracket has // threshold: 100, rate: 0.4, a capacity of 100 results in a reserved of // 100*0.1 = 10, but a capacity of 200 results in a reserved of // 10 + (200-100)*.4 = 50. Using brackets with marginal rates ensures that as // capacity increases, reserved always increases, and never decreases. func calculateReserved(capacity int64, brackets []allocatableBracket) int64 { var reserved float64 for i, bracket := range brackets { c := capacity if i < len(brackets)-1 && brackets[i+1].threshold < capacity { c = brackets[i+1].threshold } additionalReserved := float64(c-bracket.threshold) * bracket.marginalReservedRate if additionalReserved > 0 { reserved += additionalReserved } } return int64(reserved) }
cluster-autoscaler/cloudprovider/gke/reserved.go
0.691081
0.445409
reserved.go
starcoder
package variation import ( "context" "fmt" "image" "math" "github.com/fogleman/gg" "github.com/ironarachne/world/pkg/heraldry/tincture" "github.com/ironarachne/world/pkg/random" "github.com/ironarachne/world/pkg/words" ) // Variation is a variation of the field type Variation struct { Name string Blazon string NumberOfTinctures int Tinctures []tincture.Tincture Commonality int Render func(width int, height int, tinctures []tincture.Tincture) image.Image } // All returns all variations func All() []Variation { variations := []Variation{ { Name: "barry", NumberOfTinctures: 2, Commonality: 5, Render: func(width int, height int, tinctures []tincture.Tincture) image.Image { dc := gg.NewContext(width, height) barHeight := height / 10 for i := 0; i < 10; i++ { dc.DrawRectangle(0, float64(i*barHeight), float64(width), float64((i+1)*barHeight)) if math.Mod(float64(i), 2) == 0 { tinctures[0].Fill(dc) } else { tinctures[1].Fill(dc) } } return dc.Image() }, }, { Name: "bendy", NumberOfTinctures: 2, Commonality: 5, Render: func(width int, height int, tinctures []tincture.Tincture) image.Image { dc := gg.NewContext(width, height) barWidth := height / 6 for i := -10; i < 20; i++ { dc.MoveTo(float64(i*barWidth), 0) dc.LineTo(float64((i+1)*barWidth), 0) dc.LineTo(float64((i+6)*barWidth), float64(height)) dc.LineTo(float64((i+5)*barWidth), float64(height)) dc.ClosePath() if math.Mod(float64(i), 2) == 0 { tinctures[0].Fill(dc) } else { tinctures[1].Fill(dc) } } return dc.Image() }, }, { Name: "<NAME>", NumberOfTinctures: 2, Commonality: 5, Render: func(width int, height int, tinctures []tincture.Tincture) image.Image { dc := gg.NewContext(width, height) barWidth := height / 6 for i := 0; i < 20; i++ { dc.MoveTo(float64(i*barWidth), 0) dc.LineTo(float64((i+1)*barWidth), 0) dc.LineTo(float64((i-5)*barWidth), float64(height)) dc.LineTo(float64((i-6)*barWidth), float64(height)) dc.ClosePath() if math.Mod(float64(i), 2) == 0 { tinctures[0].Fill(dc) } else { tinctures[1].Fill(dc) } } return dc.Image() }, }, { Name: "chequy", NumberOfTinctures: 2, Commonality: 10, Render: func(width int, height int, tinctures []tincture.Tincture) image.Image { dc := gg.NewContext(width, height) boxSize := width / 10 drawType := 0 for y := 0; y < (height/boxSize)+1; y++ { if math.Mod(float64(y), 2) == 0 { drawType = 1 } for x := 0; x < 10; x++ { dc.DrawRectangle(float64(x*boxSize), float64(y*boxSize), float64((x+1)*boxSize), float64((y+1)*boxSize)) if drawType == 0 { tinctures[0].Fill(dc) drawType = 1 } else { tinctures[1].Fill(dc) drawType = 0 } } drawType = 0 } return dc.Image() }, }, { Name: "paly", NumberOfTinctures: 2, Commonality: 5, Render: func(width int, height int, tinctures []tincture.Tincture) image.Image { dc := gg.NewContext(width, height) barWidth := width / 10 for i := 0; i < 10; i += 2 { dc.DrawRectangle(float64(i*barWidth), 0, float64((i+1)*barWidth), float64(height)) tinctures[0].Fill(dc) dc.DrawRectangle(float64((i+1)*barWidth), 0, float64((i+3)*barWidth), float64(height)) tinctures[1].Fill(dc) } return dc.Image() }, }, { Name: "", NumberOfTinctures: 1, Commonality: 300, Render: func(width int, height int, tinctures []tincture.Tincture) image.Image { dc := gg.NewContext(width, height) dc.DrawRectangle(0, 0, float64(width), float64(height)) tinctures[0].Fill(dc) return dc.Image() }, }, } return variations } // Random returns a random variation func Random(ctx context.Context) Variation { all := All() return all[random.Intn(ctx, len(all))] } // RandomWeighted returns a random variation by weight func RandomWeighted(ctx context.Context) (Variation, error) { all := All() weights := map[string]int{} for _, c := range all { weights[c.Name] = c.Commonality } name, err := random.StringFromThresholdMap(ctx, weights) if err != nil { err = fmt.Errorf("failed to get random weighted heraldic variation: %w", err) return Variation{}, err } for _, c := range all { if c.Name == name { return c, nil } } err = fmt.Errorf("failed to get random weighted heraldic variation!") return Variation{}, err } // Generate procedurally generates a random variation of the field func Generate(ctx context.Context, initialTincture tincture.Tincture) (Variation, error) { var tinc tincture.Tincture var tinctureNames []string var possible []tincture.Tincture variation, err := RandomWeighted(ctx) if err != nil { err = fmt.Errorf("failed to generate heraldic variation: %w", err) return Variation{}, err } lastTincture := initialTincture if variation.NumberOfTinctures > 1 { if initialTincture.Type == "fur" { possible = tincture.Complementary(initialTincture, false) initialTincture, err = tincture.RandomWeighted(ctx, possible) if err != nil { err = fmt.Errorf("failed to generate heraldic variation: %w", err) return Variation{}, err } } } variation.Tinctures = append(variation.Tinctures, initialTincture) for i := 1; i < variation.NumberOfTinctures; i++ { possible = tincture.All() possible = tincture.Remove(lastTincture, possible) tinc, err = tincture.Random(ctx, possible) if err != nil { err = fmt.Errorf("failed to generate heraldic variation: %w", err) return Variation{}, err } variation.Tinctures = append(variation.Tinctures, tinc) lastTincture = tinc } for _, t := range variation.Tinctures { tinctureNames = append(tinctureNames, t.Name) } variation.Blazon = variation.Name if variation.NumberOfTinctures > 1 { variation.Blazon += " " + words.CombinePhrases(tinctureNames) } else { variation.Blazon += variation.Tinctures[0].Name } return variation, nil }
pkg/heraldry/variation/variation.go
0.653569
0.437703
variation.go
starcoder
package spec // Parameter Describes a single operation parameter. // A unique parameter is defined by a combination of a name and location. // Parameter Locations // There are four possible parameter locations specified by the in field: // path - Used together with Path Templating, where the parameter value is actually part of the operation's URL. // This does not include the host or base path of the API. // For example, in /items/{itemId}, the path parameter is itemId. // query - Parameters that are appended to the URL. // For example, in /items?id=###, the query parameter is id. // header - Custom headers that are expected as part of the request. // Note that RFC7230 states header names are case insensitive. // cookie - Used to pass a specific cookie value to the API. type Parameter struct { Refable `json:",inline"` // REQUIRED. // The name of the parameter. // Parameter names are case sensitive. // If in is "path", the name field MUST correspond to the associated path segment from the path field in the Paths Object. // See Path Templating for further information. // If in is "header" and the name field is "Accept", "Content-Type" or "Authorization", the parameter definition SHALL be ignored. // For all other cases, the name corresponds to the parameter name used by the in property. Name string `json:"name"` // REQUIRED. // The location of the parameter. // Possible values are "query", "header", "path" or "cookie". In string `json:"in"` // A brief description of the parameter. // This could contain examples of use. // CommonMark syntax MAY be used for rich text representation. Description string `json:"description,omitempty"` // Determines whether this parameter is mandatory. // If the parameter location is "path", this property is REQUIRED and its value MUST be true. // Otherwise, the property MAY be included and its default value is false. Required bool `json:"required,omitempty"` // Specifies that a parameter is deprecated and SHOULD be transitioned out of usage. Deprecated bool `json:"deprecated,omitempty"` // Sets the ability to pass empty-valued parameters. // This is valid only for query parameters and allows sending a parameter with an empty value. // Default value is false. // If style is used, and if behavior is n/a (cannot be serialized), the value of allowEmptyValue SHALL be ignored. AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // The rules for serialization of the parameter are specified in one of two ways. // For simpler scenarios, a schema and style can describe the structure and syntax of the parameter. // Describes how the parameter value will be serialized depending on the type of the parameter value. // Default values (based on value of in): for query - form; for path - simple; for header - simple; for cookie - form. Style string `json:"style,omitempty"` // When this is true, parameter values of type array or object generate separate parameters for each value of the array or key-value pair of the map. // For other types of parameters this property has no effect. // When style is form, the default value is true. // For all other styles, the default value is false. Explode bool `json:"explode"` // Determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 :/?#[]@!$&'()*+,;= to be included without percent-encoding. // This property only applies to parameters with an in value of query. // The default value is false. AllowReserved bool `json:"allowReserved,omitempty"` // The schema defining the type used for the parameter. Schema *Schema `json:"schema,omitempty"` // Example of the media type. // The example SHOULD match the specified schema and encoding properties if present. // The example field is mutually exclusive of the examples field. // Furthermore, if referencing a schema which contains an example, the example value SHALL override the example provided by the schema. // To represent examples of media types that cannot naturally be represented in JSON or YAML, a string value can contain the example with escaping where necessary. Example Any `json:"example,omitempty"` // Examples of the media type. // Each example SHOULD contain a value in the correct format as specified in the parameter encoding. // The examples field is mutually exclusive of the example field. // Furthermore, if referencing a schema which contains an example, the examples value SHALL override the example provided by the schema. Examples map[string]*Example `json:"examples,omitempty"` // For more complex scenarios, the content property can define the media type and schema of the parameter. // A parameter MUST contain either a schema property, or a content property, but not both. // When example or examples are provided in conjunction with the schema object, the example MUST follow the prescribed serialization strategy for the parameter. // A map containing the representations for the parameter. // The key is the media type and the value describes it. // The map MUST only contain one entry. Content map[string]*MediaType `json:"content,omitempty"` } // Entity satisfies componenter interface func (s Parameter) Entity() Entity { return ParameterKind }
internal/oapi/spec/parameter.go
0.841598
0.607605
parameter.go
starcoder
package ent import ( "fmt" "strings" "time" "entgo.io/ent/dialect/sql" "github.com/gitploy-io/gitploy/model/ent/deploymentstatistics" "github.com/gitploy-io/gitploy/model/ent/repo" ) // DeploymentStatistics is the model entity for the DeploymentStatistics schema. type DeploymentStatistics struct { config `json:"-"` // ID of the ent. ID int `json:"id,omitempty"` // Env holds the value of the "env" field. Env string `json:"env"` // Count holds the value of the "count" field. Count int `json:"count"` // RollbackCount holds the value of the "rollback_count" field. RollbackCount int `json:"rollback_count"` // Additions holds the value of the "additions" field. Additions int `json:"additions"` // Deletions holds the value of the "deletions" field. Deletions int `json:"deletions"` // Changes holds the value of the "changes" field. Changes int `json:"changes"` // LeadTimeSeconds holds the value of the "lead_time_seconds" field. LeadTimeSeconds int `json:"lead_time_seconds"` // CommitCount holds the value of the "commit_count" field. CommitCount int `json:"commit_count"` // CreatedAt holds the value of the "created_at" field. CreatedAt time.Time `json:"created_at"` // UpdatedAt holds the value of the "updated_at" field. UpdatedAt time.Time `json:"updated_at"` // RepoID holds the value of the "repo_id" field. RepoID int64 `json:"repo_id"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the DeploymentStatisticsQuery when eager-loading is set. Edges DeploymentStatisticsEdges `json:"edges"` } // DeploymentStatisticsEdges holds the relations/edges for other nodes in the graph. type DeploymentStatisticsEdges struct { // Repo holds the value of the repo edge. Repo *Repo `json:"repo,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool } // RepoOrErr returns the Repo value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e DeploymentStatisticsEdges) RepoOrErr() (*Repo, error) { if e.loadedTypes[0] { if e.Repo == nil { // The edge repo was loaded in eager-loading, // but was not found. return nil, &NotFoundError{label: repo.Label} } return e.Repo, nil } return nil, &NotLoadedError{edge: "repo"} } // scanValues returns the types for scanning values from sql.Rows. func (*DeploymentStatistics) scanValues(columns []string) ([]interface{}, error) { values := make([]interface{}, len(columns)) for i := range columns { switch columns[i] { case deploymentstatistics.FieldID, deploymentstatistics.FieldCount, deploymentstatistics.FieldRollbackCount, deploymentstatistics.FieldAdditions, deploymentstatistics.FieldDeletions, deploymentstatistics.FieldChanges, deploymentstatistics.FieldLeadTimeSeconds, deploymentstatistics.FieldCommitCount, deploymentstatistics.FieldRepoID: values[i] = new(sql.NullInt64) case deploymentstatistics.FieldEnv: values[i] = new(sql.NullString) case deploymentstatistics.FieldCreatedAt, deploymentstatistics.FieldUpdatedAt: values[i] = new(sql.NullTime) default: return nil, fmt.Errorf("unexpected column %q for type DeploymentStatistics", columns[i]) } } return values, nil } // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the DeploymentStatistics fields. func (ds *DeploymentStatistics) assignValues(columns []string, values []interface{}) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } for i := range columns { switch columns[i] { case deploymentstatistics.FieldID: value, ok := values[i].(*sql.NullInt64) if !ok { return fmt.Errorf("unexpected type %T for field id", value) } ds.ID = int(value.Int64) case deploymentstatistics.FieldEnv: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field env", values[i]) } else if value.Valid { ds.Env = value.String } case deploymentstatistics.FieldCount: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field count", values[i]) } else if value.Valid { ds.Count = int(value.Int64) } case deploymentstatistics.FieldRollbackCount: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field rollback_count", values[i]) } else if value.Valid { ds.RollbackCount = int(value.Int64) } case deploymentstatistics.FieldAdditions: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field additions", values[i]) } else if value.Valid { ds.Additions = int(value.Int64) } case deploymentstatistics.FieldDeletions: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field deletions", values[i]) } else if value.Valid { ds.Deletions = int(value.Int64) } case deploymentstatistics.FieldChanges: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field changes", values[i]) } else if value.Valid { ds.Changes = int(value.Int64) } case deploymentstatistics.FieldLeadTimeSeconds: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field lead_time_seconds", values[i]) } else if value.Valid { ds.LeadTimeSeconds = int(value.Int64) } case deploymentstatistics.FieldCommitCount: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field commit_count", values[i]) } else if value.Valid { ds.CommitCount = int(value.Int64) } case deploymentstatistics.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { ds.CreatedAt = value.Time } case deploymentstatistics.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { ds.UpdatedAt = value.Time } case deploymentstatistics.FieldRepoID: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field repo_id", values[i]) } else if value.Valid { ds.RepoID = value.Int64 } } } return nil } // QueryRepo queries the "repo" edge of the DeploymentStatistics entity. func (ds *DeploymentStatistics) QueryRepo() *RepoQuery { return (&DeploymentStatisticsClient{config: ds.config}).QueryRepo(ds) } // Update returns a builder for updating this DeploymentStatistics. // Note that you need to call DeploymentStatistics.Unwrap() before calling this method if this DeploymentStatistics // was returned from a transaction, and the transaction was committed or rolled back. func (ds *DeploymentStatistics) Update() *DeploymentStatisticsUpdateOne { return (&DeploymentStatisticsClient{config: ds.config}).UpdateOne(ds) } // Unwrap unwraps the DeploymentStatistics entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (ds *DeploymentStatistics) Unwrap() *DeploymentStatistics { tx, ok := ds.config.driver.(*txDriver) if !ok { panic("ent: DeploymentStatistics is not a transactional entity") } ds.config.driver = tx.drv return ds } // String implements the fmt.Stringer. func (ds *DeploymentStatistics) String() string { var builder strings.Builder builder.WriteString("DeploymentStatistics(") builder.WriteString(fmt.Sprintf("id=%v", ds.ID)) builder.WriteString(", env=") builder.WriteString(ds.Env) builder.WriteString(", count=") builder.WriteString(fmt.Sprintf("%v", ds.Count)) builder.WriteString(", rollback_count=") builder.WriteString(fmt.Sprintf("%v", ds.RollbackCount)) builder.WriteString(", additions=") builder.WriteString(fmt.Sprintf("%v", ds.Additions)) builder.WriteString(", deletions=") builder.WriteString(fmt.Sprintf("%v", ds.Deletions)) builder.WriteString(", changes=") builder.WriteString(fmt.Sprintf("%v", ds.Changes)) builder.WriteString(", lead_time_seconds=") builder.WriteString(fmt.Sprintf("%v", ds.LeadTimeSeconds)) builder.WriteString(", commit_count=") builder.WriteString(fmt.Sprintf("%v", ds.CommitCount)) builder.WriteString(", created_at=") builder.WriteString(ds.CreatedAt.Format(time.ANSIC)) builder.WriteString(", updated_at=") builder.WriteString(ds.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", repo_id=") builder.WriteString(fmt.Sprintf("%v", ds.RepoID)) builder.WriteByte(')') return builder.String() } // DeploymentStatisticsSlice is a parsable slice of DeploymentStatistics. type DeploymentStatisticsSlice []*DeploymentStatistics func (ds DeploymentStatisticsSlice) config(cfg config) { for _i := range ds { ds[_i].config = cfg } }
model/ent/deploymentstatistics.go
0.612078
0.418994
deploymentstatistics.go
starcoder
package codingame import ( "fmt" "math" "strconv" "strings" ) const EARTH_RADIUS = 6371 const RADIAN_MULTIPLIER = math.Pi / 180 /* AngleDec represents an angle in Decimal Degrees https://en.wikipedia.org/wiki/Decimal_degrees Angle North (+0°-90°) or South (-0°-90°) of Equator (0°) = Latitude, or East (+0°-180°) or West (-0°-180°) of Prime Meridian (0°) = Longitude. */ type AngleDec float64 /* FromString constructs a new AngleDec from string. It cleans up the string (separators are normalised to `.`) and parses it as a float and converts to AngleDec. */ func FromString(s string) (AngleDec, error) { f, err := strconv.ParseFloat( strings.Replace(strings.TrimSpace(s), ",", ".", 1), 64, ) if err != nil { return 0, err } if math.IsNaN(f) { return 0, fmt.Errorf("%v isn't a valid value for degree of an angle", f) } return AngleDec(f), nil } /* Radians converts this angle from degrees to radians. https://en.wikipedia.org/wiki/Radian Formula: AngleRad = AngleDec * (π/180) */ func (this AngleDec) Radians() float64 { return float64(this * RADIAN_MULTIPLIER) } /* Position encapsulates geographic coordinates of a point. */ type Position struct { latitude AngleDec longitude AngleDec } /* NewPosition validates given coordinates and creates a new Position. The angles are given as strings representing decimal degrees, they are cleaned up (separators normalised to `.`), converted into AngleDec (float) and validated that they are in range. Return an error if that fails, otherwise return the new Position. */ func NewPosition(latitude string, longitude string) (*Position, error) { latDec, err := FromString(latitude) if err != nil { return nil, fmt.Errorf("Could not parse %v as latitude: %s", latitude, err) } if -90 > latDec || latDec > 90 { return nil, fmt.Errorf("Latitude out of bounds (-90°..90°): %v", latDec) } lonDec, err := FromString(longitude) if err != nil { return nil, fmt.Errorf("Could not parse %v as longitude: %s", longitude, err) } if -180 > lonDec || lonDec > 180 { return nil, fmt.Errorf("Longitude out of bounds (-180°..180°): %v", lonDec) } return &Position{latDec, lonDec}, nil } /* DistanceTo calculates distance from this to other position. Formula: x = (otherLongitude - thisLongitude) * cos((thisLatitude + otherLatitude) / 2) y = (otherLatitude - thisLatitude) distance = sqrt(x² + y²) * EARTH_RADIUS */ func (this *Position) DistanceTo(other Position) float64 { x := float64(other.longitude-this.longitude) * math.Cos(float64(this.latitude+other.latitude)/2) y := float64(other.latitude - this.latitude) return math.Sqrt(math.Pow(x, 2)+math.Pow(y, 2)) * EARTH_RADIUS } /* Defibrillator encapsulates details and position of a defibrillator. */ type Defibrillator struct { id int name string address string phone string location Position } /* DefibrillatorFromFields parses the given fields and returns a Defibrillator. The fields are a result of reading in the `;` separated input lines and they correspond to fields of the Defibrillator structure (with in-line position fields) in order. Note that position fields are in reverse order than in Position struct, i.e. longitude, then latitude. The last two fields are converted into Position using NewPosition and that, along with other fields is used as values for the Defibrillator. */ func DefibrillatorFromFields(fields []string) (*Defibrillator, error) { if len(fields) != 6 { return nil, fmt.Errorf("Expected 6 fields, got %d: %v", len(fields), fields) } this := Defibrillator{} id, err := strconv.Atoi(fields[0]) if err != nil { return &this, err } this.id = id this.name = strings.TrimSpace(fields[1]) this.address = strings.TrimSpace(fields[2]) this.phone = strings.TrimSpace(fields[3]) if this.name == "" { return &this, fmt.Errorf("name cannot be empty") } location, err := NewPosition(fields[5], fields[4]) if err != nil { return &this, err } this.location = *location return &this, nil } /* MapToDefibrillators maps each line in `lines` to a new Defibrillator. It does this by calling DefibrillatorFromFields for each line. The given lines should be slices of fields as read from CSV (`;` separated) input. */ func MapToDefibrillators(lines [][]string) (defibrillators []Defibrillator, err error) { defibrillators = make([]Defibrillator, len(lines)) for i, line := range lines { defib, err := DefibrillatorFromFields(line) if err != nil { break } defibrillators[i] = *defib } return } /* FindDefibrillator finds and returns a pointer to a Defibrillator in the given slice that matches the given predicate (i.e. for which the given predicate function returns true). */ func FindDefibrillator( defibrillators []Defibrillator, predicate func(*Defibrillator) bool, ) (*Defibrillator, error) { for _, defib := range defibrillators { if predicate(&defib) { return &defib, nil } } return nil, fmt.Errorf( "predicate didn't match any of the %d defibrillators", len(defibrillators), ) } /* FindNearestDefibrillator finds a Defibrillator from the given list that is nearest (according to Position.DistanceTo) to the `user`. */ func FindNearestDefibrillator(user *Position, defibrillators []Defibrillator) (*Defibrillator, error) { var nearest Defibrillator distance := math.Inf(1) for _, defibrillator := range defibrillators { // fmt.Fprintf(os.Stderr, "user=%v, defibrillator=%v\n", user, defibrillator) if thisDist := user.DistanceTo(defibrillator.location); thisDist < distance { nearest = defibrillator distance = thisDist } } return &nearest, nil }
codingame/defibrillators.go
0.812496
0.421909
defibrillators.go
starcoder
package ckks import ( "math" "math/bits" ) // EvaluateChebyFast evaluates the input Chebyshev polynomial on the input ciphertext. // Faster than EvaluateChebyEco but consumes ceil(log(deg)) + 2 levels. func (eval *evaluator) EvaluateChebyFast(op *Ciphertext, cheby *ChebyshevInterpolation, evakey *EvaluationKey) (opOut *Ciphertext) { C := make(map[uint64]*Ciphertext) C[1] = op.CopyNew().Ciphertext() eval.MultByConst(C[1], 2/(cheby.b-cheby.a), C[1]) eval.AddConst(C[1], (-cheby.a-cheby.b)/(cheby.b-cheby.a), C[1]) eval.Rescale(C[1], eval.ckksContext.scale, C[1]) M := uint64(bits.Len64(cheby.degree - 1)) L := uint64(M >> 1) for i := uint64(2); i <= (1 << L); i++ { computePowerBasisCheby(i, C, eval, evakey) } for i := L + 1; i < M; i++ { computePowerBasisCheby(1<<i, C, eval, evakey) } return recurseCheby(cheby.degree, L, M, cheby.coeffs, C, eval, evakey) } // EvaluateChebyEco evaluates the input Chebyshev polynomial on the input ciphertext. // It is slower than EvaluateChebyFast but consumes one less level (ceil(log(deg)) + 1 levels). func (eval *evaluator) EvaluateChebyEco(op *Ciphertext, cheby *ChebyshevInterpolation, evakey *EvaluationKey) (opOut *Ciphertext) { C := make(map[uint64]*Ciphertext) C[1] = op.CopyNew().Ciphertext() eval.MultByConst(C[1], 2/(cheby.b-cheby.a), C[1]) eval.AddConst(C[1], (-cheby.a-cheby.b)/(cheby.b-cheby.a), C[1]) eval.Rescale(C[1], eval.ckksContext.scale, C[1]) M := uint64(bits.Len64(cheby.degree - 1)) L := uint64(1) for i := uint64(2); i <= (1 << L); i++ { computePowerBasisCheby(i, C, eval, evakey) } for i := L + 1; i < M; i++ { computePowerBasisCheby(1<<i, C, eval, evakey) } return recurseCheby(cheby.degree, L, M, cheby.coeffs, C, eval, evakey) } func computePowerBasisCheby(n uint64, C map[uint64]*Ciphertext, evaluator *evaluator, evakey *EvaluationKey) { // Given a hash table with the first three evaluations of the Chebyshev ring at x in the interval a, b: // C0 = 1 (actually not stored in the hash table) // C1 = (2*x - a - b)/(b-a) // C2 = 2*C1*C1 - C0 // Evaluates the nth degree Chebyshev ring in a recursive manner, storing intermediate results in the hashtable. // Consumes at most ceil(sqrt(n)) levels for an evaluation at Cn. // Uses the following property: for a given Chebyshev ring Cn = 2*Ca*Cb - Cc, n = a+b and c = abs(a-b) if C[n] == nil { // Computes the index required to compute the asked ring evaluation a := uint64(math.Ceil(float64(n) / 2)) b := n >> 1 c := uint64(math.Abs(float64(a) - float64(b))) // Recurses on the given indexes computePowerBasisCheby(a, C, evaluator, evakey) computePowerBasisCheby(b, C, evaluator, evakey) // Since C[0] is not stored (but rather seen as the constant 1), only recurses on c if c!= 0 if c != 0 { computePowerBasisCheby(c, C, evaluator, evakey) } // Computes C[n] = C[a]*C[b] C[n] = evaluator.MulRelinNew(C[a], C[b], evakey) evaluator.Rescale(C[n], evaluator.ckksContext.scale, C[n]) // Computes C[n] = 2*C[a]*C[b] evaluator.Add(C[n], C[n], C[n]) // Computes C[n] = 2*C[a]*C[b] - C[c] if c == 0 { evaluator.AddConst(C[n], -1, C[n]) } else { evaluator.Sub(C[n], C[c], C[n]) } } } // recurseCheby recursively computes the evaluation of the Chebyshev polynomial using a baby-step giant-step algorithm. func recurseCheby(maxDegree, L, M uint64, coeffs map[uint64]complex128, C map[uint64]*Ciphertext, evaluator *evaluator, evakey *EvaluationKey) (res *Ciphertext) { if maxDegree <= (1 << L) { return evaluatePolyFromPowerBasis(coeffs, C, evaluator, evakey) } for 1<<(M-1) > maxDegree { M-- } coeffsq, coeffsr := splitCoeffsCheby(coeffs, 1<<(M-1), maxDegree) res = recurseCheby(maxDegree-(1<<(M-1)), L, M-1, coeffsq, C, evaluator, evakey) var tmp *Ciphertext tmp = recurseCheby((1<<(M-1))-1, L, M-1, coeffsr, C, evaluator, evakey) evaluator.MulRelin(res, C[1<<(M-1)], evakey, res) evaluator.Add(res, tmp, res) evaluator.Rescale(res, evaluator.ckksContext.scale, res) return res } // splitCoeffsCheby splits a Chebyshev polynomial p such that p = q*C^degree + r, where q and r are a linear combination of a Chebyshev basis. func splitCoeffsCheby(coeffs map[uint64]complex128, degree, maxDegree uint64) (coeffsq, coeffsr map[uint64]complex128) { coeffsr = make(map[uint64]complex128) coeffsq = make(map[uint64]complex128) for i := uint64(0); i < degree; i++ { coeffsr[i] = coeffs[i] } coeffsq[0] = coeffs[degree] for i := uint64(degree + 1); i < maxDegree+1; i++ { coeffsq[i-degree] = 2 * coeffs[i] coeffsr[2*degree-i] -= coeffs[i] } return coeffsq, coeffsr }
ckks/chebyshev_evaluation.go
0.724481
0.40439
chebyshev_evaluation.go
starcoder
package main import "fmt" type NaiveBayesianModel struct { xTrain [][]int yTrain []int } type NaiveBayesianClassifier interface { predict(xTest []int) int } /** 미션: P(A|C)와 P(B|C)의 확률값을 비교해서 더 큰 값의 레이블을 반환한다. 풀이 과정: P(A|array) = (P(array|A) * P(A)) / P(array) P(B|array) = (P(array|B) * P(B)) / P(array) P(A|array) = P(array|A) * P(A) P(B|array) = P(array|B) * P(B) P(A) = 사건 A가 일어날 확률. 여기서는 y값이 0이 될 확률인데 2/5 P(B) = 사건 B가 일어날 확률. 여기서는 y값이 1이 될 확률인데 3/5 P(array|A) = y가 0일 때, 지정한 array가 나올 확률 P(array|B) = y가 1일 때, 지정한 array가 나올 확률 위의 예제에서는 array가 {1, 1, 1, 0}으로 주어졌기 때문에 P({1, 1, 1, 0}|0)와 P({1, 1, 1, 0}|1)을 구하면 된다. 그러고 나서 높은 쪽의 레이블을 선택하면 된다. (각 피처는 상호 연관관계가 없이 독립적이기 때문에 각 피처별로 구한 확률을 곱한다) [클래스0의 경우] feature 1에 1이 나오면서 y가 0인 횟수 = 0, 확률은 0/5 feature 2에 1이 나오면서 y가 0인 횟수 = 1, 확률은 1/5 feature 3에 1이 나오면서 y가 0인 횟수 = 0, 확률은 0/5 feature 4에 0이 나오면서 y가 0인 횟수 = 0, 확률은 0/5 [클래스1의 경우] feature 1에 1이 나오면서 y가 1인 횟수 = 3, 확률은 3/5 feature 2에 1이 나오면서 y가 1인 횟수 = 1, 확률은 1/5 feature 3에 1이 나오면서 y가 1인 횟수 = 2, 확률은 2/5 feature 4에 0이 나오면서 y가 1인 횟수 = 2, 확률은 2/5 위 공식에 대입하면 P(0|{1,1,1,0}) = (0/5 * 1/5 * 0/5 * 0/5) * 2/5 P(1|{1,1,1,0}) = (2/5 * 1/5 * 2/5 * 1/5) * 3/5 고로 {1,1,1,0} 일 때는 1로 예측 */ func (m NaiveBayesianModel) predict(xTest []int) int { rowCount := len(m.xTrain) featureValCountForClass0 := [4]int{0, 0, 0, 0} featureValCountForClass1 := [4]int{0, 0, 0, 0} for i := 0; i < rowCount; i++ { featureCount := len(m.xTrain[i]) for j := 0; j < featureCount; j++ { if m.xTrain[i][j] == xTest[j] && m.yTrain[i] == 0 { featureValCountForClass0[j] += 1 } else if m.xTrain[i][j] == xTest[j] && m.yTrain[i] == 1 { featureValCountForClass1[j] += 1 } } } probForClass0 := [4]float64{ float64(featureValCountForClass0[0]) / float64(rowCount), float64(featureValCountForClass0[1]) / float64(rowCount), float64(featureValCountForClass0[2]) / float64(rowCount), float64(featureValCountForClass0[3]) / float64(rowCount), } probForClass1 := [4]float64{ float64(featureValCountForClass1[0]) / float64(rowCount), float64(featureValCountForClass1[1]) / float64(rowCount), float64(featureValCountForClass1[2]) / float64(rowCount), float64(featureValCountForClass1[3]) / float64(rowCount), } resultForClass0 := float64(1) resultForClass1 := float64(1) pc0 := 1 pc1 := 1 for i := 0; i < len(probForClass0); i++ { resultForClass0 *= probForClass0[i] resultForClass1 *= probForClass1[i] } for i := 0; i < len(m.yTrain); i++ { if m.yTrain[i] == 0 { pc0 += 1 } else { pc1 += 1 } } p0 := float64(pc0) / float64(len(m.yTrain)) p1 := float64(pc1) / float64(len(m.yTrain)) if resultForClass0*p0 > resultForClass1*p1 { return 0 } return 1 } func main() { xTrain := [][]int{ {0, 1, 0, 1}, // 0 {1, 0, 1, 1}, // 1 {0, 0, 0, 1}, // 0 {1, 0, 1, 0}, // 1 {1, 1, 0, 0}, // 1 } yTrain := []int{0, 1, 0, 1, 1} xTest := []int{1, 1, 1, 0} model := NaiveBayesianModel{xTrain, yTrain} result := model.predict(xTest) fmt.Println(result) }
golang/src/coding-training/naive_bayesian_example.go
0.543106
0.800185
naive_bayesian_example.go
starcoder
package digest import ( "bufio" "errors" "hash" "hash/adler32" "io" "os" ) var ( // errReadFewerThanExpectedBytes returned when number of bytes read is fewer than expected errReadFewerThanExpectedBytes = errors.New("number of bytes read is fewer than expected") // errChecksumMismatch returned when the calculated checksum doesn't match the stored checksum errChecksumMismatch = errors.New("calculated checksum doesn't match stored checksum") // errBufferSizeMismatch returned when ReadAllAndValidate called without well sized buffer errBufferSizeMismatch = errors.New("buffer passed is not an exact fit for contents") ) // FdWithDigestReader provides a buffered reader for reading from the underlying file. type FdWithDigestReader interface { FdWithDigest io.Reader // ReadAllAndValidate reads everything in the underlying file and validates // it against the expected digest, returning an error if they don't match. // Note: the buffer "b" must be an exact match for how long the contents being // read is, the signature is structured this way to allow for buffer reuse. ReadAllAndValidate(b []byte, expectedDigest uint32) (int, error) // Validate compares the current digest against the expected digest and returns // an error if they don't match. Validate(expectedDigest uint32) error } type fdWithDigestReader struct { fd *os.File bufReader *bufio.Reader readerWithDigest ReaderWithDigest single [1]byte } // NewFdWithDigestReader creates a new FdWithDigestReader. func NewFdWithDigestReader(bufferSize int) FdWithDigestReader { bufReader := bufio.NewReaderSize(nil, bufferSize) return &fdWithDigestReader{ bufReader: bufReader, readerWithDigest: NewReaderWithDigest(bufReader), } } // Reset resets the underlying file descriptor and the buffered reader. func (r *fdWithDigestReader) Reset(fd *os.File) { r.fd = fd r.bufReader.Reset(fd) r.readerWithDigest.Reset(r.bufReader) } func (r *fdWithDigestReader) Read(b []byte) (int, error) { return r.readerWithDigest.Read(b) } func (r *fdWithDigestReader) Fd() *os.File { return r.fd } func (r *fdWithDigestReader) Digest() hash.Hash32 { return r.readerWithDigest.Digest() } func (r *fdWithDigestReader) ReadAllAndValidate(b []byte, expectedDigest uint32) (int, error) { n, err := r.Read(b) if err != nil { return n, err } // NB(r): Attempt next read to prove that the size of the buffer b // was sized correctly to fit all contents into it and that we are // correctly now at the end of input. _, err = r.Read(r.single[:]) if err != io.EOF { return 0, errBufferSizeMismatch } if err := r.Validate(expectedDigest); err != nil { return n, err } return n, nil } func (r *fdWithDigestReader) Validate(expectedDigest uint32) error { return r.readerWithDigest.Validate(expectedDigest) } func (r *fdWithDigestReader) Close() error { if r.fd == nil { return nil } err := r.fd.Close() r.fd = nil return err } // FdWithDigestContentsReader provides additional functionality of reading a digest from the underlying file. type FdWithDigestContentsReader interface { FdWithDigestReader // ReadDigest reads a digest from the underlying file. ReadDigest() (uint32, error) } type fdWithDigestContentsReader struct { FdWithDigestReader digestBuf Buffer } // NewFdWithDigestContentsReader creates a new FdWithDigestContentsReader. func NewFdWithDigestContentsReader(bufferSize int) FdWithDigestContentsReader { return &fdWithDigestContentsReader{ FdWithDigestReader: NewFdWithDigestReader(bufferSize), digestBuf: NewBuffer(), } } func (r *fdWithDigestContentsReader) ReadDigest() (uint32, error) { n, err := r.Read(r.digestBuf) if err != nil { return 0, err } if n < len(r.digestBuf) { return 0, errReadFewerThanExpectedBytes } return r.digestBuf.ReadDigest(), nil } // ReaderWithDigest is a reader that that calculates a digest // as it is read. type ReaderWithDigest interface { io.Reader // Reset resets the reader for use with a new reader. Reset(reader io.Reader) // Digest returns the digest. Digest() hash.Hash32 // Validate compares the current digest against the expected digest and returns // an error if they don't match. Validate(expectedDigest uint32) error } type readerWithDigest struct { reader io.Reader digest hash.Hash32 } // NewReaderWithDigest creates a new reader that calculates a digest as it // reads an input. func NewReaderWithDigest(reader io.Reader) ReaderWithDigest { return &readerWithDigest{ reader: reader, digest: adler32.New(), } } func (r *readerWithDigest) Reset(reader io.Reader) { r.reader = reader r.digest.Reset() } func (r *readerWithDigest) Digest() hash.Hash32 { return r.digest } func (r *readerWithDigest) readBytes(b []byte) (int, error) { n, err := r.reader.Read(b) if err != nil { return 0, err } // In case the buffered reader only returns what's remaining in // the buffer, recursively read what's left in the underlying reader. if n < len(b) { b = b[n:] remainder, err := r.readBytes(b) return n + remainder, err } return n, err } func (r *readerWithDigest) Read(b []byte) (int, error) { n, err := r.readBytes(b) if err != nil && err != io.EOF { return n, err } // If we encountered an EOF error and didn't read any bytes // given a non-empty slice, we return an EOF error. if err == io.EOF && n == 0 && len(b) > 0 { return 0, err } if _, err := r.digest.Write(b[:n]); err != nil { return 0, err } return n, nil } func (r *readerWithDigest) Validate(expectedDigest uint32) error { if r.digest.Sum32() != expectedDigest { return errChecksumMismatch } return nil }
vendor/github.com/m3db/m3/src/dbnode/digest/reader.go
0.650356
0.453141
reader.go
starcoder
package plangenerator import ( "fmt" "math" "time" ) // Struct to store the input loan details for repayment plan generation. // Note: Duration field takes the duration of the loan period in months and not years. type LoanDetails struct { LoanAmount float64 `json:"loanAmount"` NominalRate float64 `json:"nominalRate"` Duration int `json:"duration"` StartDate time.Time `json:"startDate"` Annuity float64 `json:"annuity"` } // Struct to store the repayment details for each month of the loan period. type MonthlyRepayment struct { BorrowerPaymentAmount float64 `json:"borrowerPaymentAmount"` Date time.Time `json:"date"` InitialOutstandingPrincipal float64 `json:"initialOutstandingPrincipal"` Interest float64 `json:"interest"` Principal float64 `json:"principal"` RemainingOutstandingPrincipal float64 `json:"remainingOutstandingPrincipal"` } // Validates if the LoanAmount, NominalRate and Duration are greater than 1 and StartDate is after 01-01-2000. func (loan *LoanDetails) Validate() (err error) { var errs, err1, err2, err3, err4 string jan2000, _ := time.Parse("02-01-2006", "01-01-2000") if loan.LoanAmount < 1 { err1 = "Loan Amount can not be zero or less. " errs += err1 } if loan.Duration < 1 { err2 = "Duration can not be zero or less. " errs += err2 } if loan.NominalRate < 1 { err3 = "Nominal Interest Rate can not be negative. " errs += err3 } if loan.StartDate.Before(jan2000) { err4 = "Start date can not be before 01-01-2000." errs += err4 } if len(errs) > 0 { err = fmt.Errorf("Error: %s", errs) } return } // Populates the Annuity field when called invoked. func (loan *LoanDetails) AnnuityCalculation() { loan.Annuity = (loan.LoanAmount * (loan.NominalRate / 1200)) / (1 - (math.Pow(1+(loan.NominalRate/1200), float64(-loan.Duration)))) } // Calculates the repayment plan for an annuity loan. // It takes a reference to a LoanDetails struct and returns a slice of MonthlyRepayment struct. // The LoanAmount, NominalRate, Duration and StartDate fields of the LoanDetails struct are mandatory. // It invokes the Validate and AnnuityCalcuation functions of the LoanDetails struct to validate the input and calculate annuity. func RepaymentPlan(loan *LoanDetails) (repaymentPlan []MonthlyRepayment, err error) { err = loan.Validate() if err != nil { return } loan.AnnuityCalculation() intialOutstandingPrincipal := loan.LoanAmount repaymentPlan = make([]MonthlyRepayment, loan.Duration) for month := 0; month < loan.Duration; month++ { repaymentPlan[month].Date = loan.StartDate.AddDate(0, month, 0) repaymentPlan[month].InitialOutstandingPrincipal = intialOutstandingPrincipal repaymentPlan[month].Interest = (loan.NominalRate * 30 * intialOutstandingPrincipal) / 36000 repaymentPlan[month].Principal = loan.Annuity - repaymentPlan[month].Interest repaymentPlan[month].BorrowerPaymentAmount = repaymentPlan[month].Interest + repaymentPlan[month].Principal repaymentPlan[month].RemainingOutstandingPrincipal = intialOutstandingPrincipal - repaymentPlan[month].Principal if repaymentPlan[month].RemainingOutstandingPrincipal < 1 { repaymentPlan[month].RemainingOutstandingPrincipal = 0 } intialOutstandingPrincipal = repaymentPlan[month].RemainingOutstandingPrincipal } return }
plangenerator.go
0.619817
0.45308
plangenerator.go
starcoder
package master import ( "math" "strconv" "time" "github.com/ipfs/go-ds-bench/options" "golang.org/x/tools/benchmark/parse" "gonum.org/v1/plot" ) type ysel struct { name string sel func(*parse.Benchmark) float64 } type xsel struct { name string sel func(options.BenchOptions) float64 } var yselNsPerOp = &ysel{ name: "ns/op", sel: func(b *parse.Benchmark) float64 { return b.NsPerOp }, } var yselMBps = &ysel{ name: "MB/s", sel: func(b *parse.Benchmark) float64 { return b.MBPerS }, } var yselAllocs = &ysel{ name: "alloc/op", sel: func(b *parse.Benchmark) float64 { return float64(b.AllocsPerOp) }, } var yselAlocKB = &ysel{ name: "allocKBs/op", sel: func(b *parse.Benchmark) float64 { return float64(b.AllocedBytesPerOp) / 1024.0 }, } var xselPrimeRecs = &xsel{ name: "prime-count", sel: func(opt options.BenchOptions) float64 { return float64(opt.PrimeRecordCount) }, } var xselRecordSize = &xsel{ name: "record-size", sel: func(opt options.BenchOptions) float64 { return float64(opt.RecordSize) }, } var xselBatchSize = &xsel{ name: "batchsize", sel: func(opt options.BenchOptions) float64 { return float64(opt.BatchSize) }, } type Log2Ticks struct{} var _ plot.Ticker = Log2Ticks{} // Ticks returns Ticks in a specified range func (t Log2Ticks) Ticks(min, max float64) []plot.Tick { if min < 0 { min = 1 } val := math.Pow(2, math.Log2(min)) if val == 0 { val = 1 } max = math.Pow(2, math.Ceil(math.Log2(max))) var ticks []plot.Tick for val < max { for i := 1; i < 4; i++ { if i == 1 { ticks = append(ticks, plot.Tick{Value: val, Label: formatFloatTick(val)}) } ticks = append(ticks, plot.Tick{Value: val * float64(i)}) } val *= 4 } ticks = append(ticks, plot.Tick{Value: val, Label: formatFloatTick(val)}) return ticks } func formatFloatTick(v float64) string { return strconv.FormatFloat(v, 'f', 2, 64) } // TimeTicks is suitable for axes representing time values. type TimeTicks struct { Ticker plot.Ticker } // Ticks implements plot.Ticker. func (t TimeTicks) Ticks(min, max float64) []plot.Tick { if t.Ticker == nil { t.Ticker = Log2Ticks{} } ticks := t.Ticker.Ticks(min, max) for i := range ticks { tick := &ticks[i] if tick.Label == "" { continue } tick.Label = time.Duration(tick.Value).String() } return ticks } // hacky version of logscale which doesn't panic type ZeroLogScale struct{} // Normalize returns the fractional logarithmic distance of // x between min and max. func (ZeroLogScale) Normalize(min, max, x float64) float64 { logMin := math.Log(min) return (math.Log(x) - logMin) / (math.Log(max) - logMin) }
master/plotutils.go
0.783326
0.41739
plotutils.go
starcoder
package core import ( "git.maze.io/go/math32" ) type Vector3 struct { X, Y, Z float32 } func (x *Vector3) LengthSqr() float32 { return DotVector3(*x, *x) } func (x *Vector3) Length() float32 { return math32.Sqrt(x.LengthSqr()) } func (x *Vector3) Minus() Vector3 { return Vector3{-x.X, -x.Y, -x.Z} } func (x *Vector3) IsZero() bool { return EqualZero32(x.X) && EqualZero32(x.Y) && EqualZero32(x.Z) } func EqualVector3(x0, x1 Vector3) bool { return Equal32(x0.X, x1.X) && Equal32(x0.Y, x1.Y) && Equal32(x0.Z, x1.Z) } func AddVector3(x0, x1 Vector3) Vector3 { return Vector3{x0.X + x1.X, x0.Y + x1.Y, x0.Z + x1.Z} } func SubVector3(x0, x1 Vector3) Vector3 { return Vector3{x0.X - x1.X, x0.Y - x1.Y, x0.Z - x1.Z} } func MulVector3(x0 float32, x1 Vector3) Vector3 { return Vector3{x0 * x1.X, x0 * x1.Y, x0 * x1.Z} } func DivVector3(x0 Vector3, x1 float32) Vector3 { inv := 1.0 / x1 return Vector3{x0.X * inv, x0.Y * inv, x0.Z * inv} } func DotVector3(x0, x1 Vector3) float32 { return x0.X*x1.X + x0.Y*x1.Y + x0.Z*x1.Z } func CrossVector3(x0, x1 Vector3) Vector3 { x := x0.Y*x1.Z - x0.Z*x1.Y y := x0.Z*x1.X - x0.X*x1.Z z := x0.X*x1.Y - x0.Y*x1.X return Vector3{x, y, z} } func NormalizeVector3(x Vector3) Vector3 { invL := 1.0 / math32.Sqrt(DotVector3(x, x)) return MulVector3(float32(invL), x) } func HadamardDotVector3(x0 Vector3, x1 Vector3) Vector3 { return Vector3{x0.X * x1.X, x0.Y * x1.Y, x0.Z * x1.Z} } func SaturateVector3(x Vector3) Vector3 { x.X = Saturate32(x.X) x.Y = Saturate32(x.Y) x.Z = Saturate32(x.Z) return x } func RandomInSphere(x0, x1, x2 float32) Vector3 { theta := 2.0*x0 - 1.0 r := math32.Sqrt(1.0 - theta*theta) phi := (math32.Pi * 2.0) * x1 sn := math32.Sin(phi) cs := math32.Cos(phi) r *= x2 return Vector3{r * cs, r * sn, x2 * theta} } func RandomOnSphere(x0, x1 float32) Vector3 { theta := 2.0*x0 - 1.0 r := math32.Sqrt(1.0 - theta*theta) phi := (math32.Pi * 2.0) * x1 sn := math32.Sin(phi) cs := math32.Cos(phi) return Vector3{r * cs, r * sn, theta} } func RandomOnHemiSphere(x0, x1 float32) Vector3 { theta := x0 r := math32.Sqrt(1.0 - theta*theta) phi := (math32.Pi * 2.0) * x1 sn := math32.Sin(phi) cs := math32.Cos(phi) return Vector3{r * cs, r * sn, theta} } func RandomOnCosineHemiSphere(x0, x1 float32) Vector3 { p := RandomOnDisk(x0, x1) z := math32.Max(Epsilon32, math32.Sqrt(math32.Max(Epsilon32, (1.0-p.X*p.X-p.Y*p.Y)))) return Vector3{p.X, p.Y, z} } func RandomCone(x0, x1, cosCutoff float32) Vector3 { cosTheta := (1.0 - x0) + x0*cosCutoff sinTheta := math32.Sqrt(math32.Max(Epsilon32, (1.0 - cosTheta*cosTheta))) phi := 2.0 * math32.Pi * x1 sinPhi := math32.Sin(phi) cosPhi := math32.Cos(phi) return Vector3{cosPhi * sinTheta, sinPhi * sinTheta, cosTheta} } func Reflect(x, n Vector3) Vector3 { return SubVector3(x, MulVector3(2.0*DotVector3(x, n), n)) } func Refract(refracted *Vector3, x, n Vector3, niOverNt float32) bool { dt := DotVector3(x, n) discriminant := 1.0 - niOverNt*niOverNt*(1.0-dt*dt) if 0.0 < discriminant { *refracted = SubVector3(MulVector3(niOverNt, SubVector3(x, MulVector3(dt, n))), MulVector3(math32.Sqrt(discriminant), n)) return true } return false }
core/vector3.go
0.758153
0.732687
vector3.go
starcoder
package wax import ( "math" "math/bits" "github.com/pgavlin/warp/exec" "github.com/pgavlin/warp/wasm/code" ) type values []uint64 func (vs values) U8(i int) uint8 { return uint8(vs[i]) } func (vs values) U16(i int) uint16 { return uint16(vs[i]) } func (vs values) U32(i int) uint32 { return uint32(vs[i]) } func (vs values) U64(i int) uint64 { return vs[i] } func (vs values) I(i int) int { return int(vs[i]) } func (vs values) I8(i int) int8 { return int8(vs[i]) } func (vs values) I16(i int) int16 { return int16(vs[i]) } func (vs values) I32(i int) int32 { return int32(vs[i]) } func (vs values) I64(i int) int64 { return int64(vs[i]) } func (vs values) F32(i int) float32 { return math.Float32frombits(vs.U32(i)) } func (vs values) F64(i int) float64 { return math.Float64frombits(vs.U64(i)) } func i32Bool(v bool) int32 { if v { return 1 } return 0 } func evaluate(x *Expression) (result uint64, ok bool) { defer func() { if x := recover(); x != nil { result, ok = 0, false } }() // We can only evaluate pure expressions. if x.Flags&^FlagsMayTrap != 0 { return 0, false } args := make(values, len(x.Uses)) for i, u := range x.Uses { if u.IsTemp() { return 0, false } v, ok := evaluate(u.X) if !ok { return 0, false } args[i] = v } instr := x.Instr if x.IsPseudo() { switch instr.Opcode { case PseudoI32ConvertBool: return uint64(instr.I32()), true } return 0, false } switch instr.Opcode { case code.OpI32Const: return uint64(instr.I32()), true case code.OpI64Const: return uint64(instr.I64()), true case code.OpF32Const: return uint64(math.Float32bits(instr.F32())), true case code.OpF64Const: return uint64(math.Float64bits(instr.F64())), true case code.OpI32Eqz: return uint64(i32Bool(args.I32(0) == 0)), true case code.OpI32Eq: return uint64(i32Bool(args.I32(0) == args.I32(1))), true case code.OpI32Ne: return uint64(i32Bool(args.I32(0) != args.I32(1))), true case code.OpI32LtS: v2, v1 := args.I32(1), args.I32(0) return uint64(i32Bool(v1 < v2)), true case code.OpI32LtU: v2, v1 := args.U32(1), args.U32(0) return uint64(i32Bool(v1 < v2)), true case code.OpI32GtS: v2, v1 := args.I32(1), args.I32(0) return uint64(i32Bool(v1 > v2)), true case code.OpI32GtU: v2, v1 := args.U32(1), args.U32(0) return uint64(i32Bool(v1 > v2)), true case code.OpI32LeS: v2, v1 := args.I32(1), args.I32(0) return uint64(i32Bool(v1 <= v2)), true case code.OpI32LeU: v2, v1 := args.U32(1), args.U32(0) return uint64(i32Bool(v1 <= v2)), true case code.OpI32GeS: v2, v1 := args.I32(1), args.I32(0) return uint64(i32Bool(v1 >= v2)), true case code.OpI32GeU: v2, v1 := args.U32(1), args.U32(0) return uint64(i32Bool(v1 >= v2)), true case code.OpI64Eqz: return uint64(i32Bool(args.I64(0) == 0)), true case code.OpI64Eq: return uint64(i32Bool(args.I64(0) == args.I64(1))), true case code.OpI64Ne: return uint64(i32Bool(args.I64(0) != args.I64(1))), true case code.OpI64LtS: v2, v1 := args.I64(1), args.I64(0) return uint64(i32Bool(v1 < v2)), true case code.OpI64LtU: v2, v1 := args.U64(1), args.U64(0) return uint64(i32Bool(v1 < v2)), true case code.OpI64GtS: v2, v1 := args.I64(1), args.I64(0) return uint64(i32Bool(v1 > v2)), true case code.OpI64GtU: v2, v1 := args.U64(1), args.U64(0) return uint64(i32Bool(v1 > v2)), true case code.OpI64LeS: v2, v1 := args.I64(1), args.I64(0) return uint64(i32Bool(v1 <= v2)), true case code.OpI64LeU: v2, v1 := args.U64(1), args.U64(0) return uint64(i32Bool(v1 <= v2)), true case code.OpI64GeS: v2, v1 := args.I64(1), args.I64(0) return uint64(i32Bool(v1 >= v2)), true case code.OpI64GeU: v2, v1 := args.U64(1), args.U64(0) return uint64(i32Bool(v1 >= v2)), true case code.OpF32Eq: return uint64(i32Bool(args.F32(0) == args.F32(1))), true case code.OpF32Ne: return uint64(i32Bool(args.F32(0) != args.F32(1))), true case code.OpF32Lt: v2, v1 := args.F32(1), args.F32(0) return uint64(i32Bool(v1 < v2)), true case code.OpF32Gt: v2, v1 := args.F32(1), args.F32(0) return uint64(i32Bool(v1 > v2)), true case code.OpF32Le: v2, v1 := args.F32(1), args.F32(0) return uint64(i32Bool(v1 <= v2)), true case code.OpF32Ge: v2, v1 := args.F32(1), args.F32(0) return uint64(i32Bool(v1 >= v2)), true case code.OpF64Eq: return uint64(i32Bool(args.F64(0) == args.F64(1))), true case code.OpF64Ne: return uint64(i32Bool(args.F64(0) != args.F64(1))), true case code.OpF64Lt: v2, v1 := args.F64(1), args.F64(0) return uint64(i32Bool(v1 < v2)), true case code.OpF64Gt: v2, v1 := args.F64(1), args.F64(0) return uint64(i32Bool(v1 > v2)), true case code.OpF64Le: v2, v1 := args.F64(1), args.F64(0) return uint64(i32Bool(v1 <= v2)), true case code.OpF64Ge: v2, v1 := args.F64(1), args.F64(0) return uint64(i32Bool(v1 >= v2)), true case code.OpI32Clz: return uint64(bits.LeadingZeros32(args.U32(0))), true case code.OpI32Ctz: return uint64(bits.TrailingZeros32(args.U32(0))), true case code.OpI32Popcnt: return uint64(bits.OnesCount32(args.U32(0))), true case code.OpI32Add: return uint64(args.I32(0) + args.I32(1)), true case code.OpI32Sub: v2, v1 := args.I32(1), args.I32(0) return uint64(v1 - v2), true case code.OpI32Mul: return uint64(args.I32(0) * args.I32(1)), true case code.OpI32DivS: v2, v1 := args.I32(1), args.I32(0) return uint64(exec.I32DivS(v1, v2)), true case code.OpI32DivU: v2, v1 := args.U32(1), args.U32(0) return uint64(v1 / v2), true case code.OpI32RemS: v2, v1 := args.I32(1), args.I32(0) return uint64(v1 % v2), true case code.OpI32RemU: v2, v1 := args.U32(1), args.U32(0) return uint64(v1 % v2), true case code.OpI32And: return uint64(args.I32(0) & args.I32(1)), true case code.OpI32Or: return uint64(args.I32(0) | args.I32(1)), true case code.OpI32Xor: return uint64(args.I32(0) ^ args.I32(1)), true case code.OpI32Shl: v2, v1 := args.I32(1), args.I32(0) return uint64(v1 << (v2 & 31)), true case code.OpI32ShrS: v2, v1 := args.I32(1), args.I32(0) return uint64(v1 >> (v2 & 31)), true case code.OpI32ShrU: v2, v1 := args.U32(1), args.U32(0) return uint64(v1 >> (v2 & 31)), true case code.OpI32Rotl: v2, v1 := args.I(1), args.U32(0) return uint64(bits.RotateLeft32(v1, v2)), true case code.OpI32Rotr: v2, v1 := args.I(1), args.U32(0) return uint64(bits.RotateLeft32(v1, -v2)), true case code.OpI64Clz: return uint64(bits.LeadingZeros64(args.U64(0))), true case code.OpI64Ctz: return uint64(bits.TrailingZeros64(args.U64(0))), true case code.OpI64Popcnt: return uint64(bits.OnesCount64(args.U64(0))), true case code.OpI64Add: return uint64(args.I64(0) + args.I64(1)), true case code.OpI64Sub: v2, v1 := args.I64(1), args.I64(0) return uint64(v1 - v2), true case code.OpI64Mul: return uint64(args.I64(0) * args.I64(1)), true case code.OpI64DivS: v2, v1 := args.I64(1), args.I64(0) return uint64(exec.I64DivS(v1, v2)), true case code.OpI64DivU: v2, v1 := args.U64(1), args.U64(0) return uint64(v1 / v2), true case code.OpI64RemS: v2, v1 := args.I64(1), args.I64(0) return uint64(v1 % v2), true case code.OpI64RemU: v2, v1 := args.U64(1), args.U64(0) return uint64(v1 % v2), true case code.OpI64And: return uint64(args.I64(0) & args.I64(1)), true case code.OpI64Or: return uint64(args.I64(0) | args.I64(1)), true case code.OpI64Xor: return uint64(args.I64(0) ^ args.I64(1)), true case code.OpI64Shl: v2, v1 := args.I64(1), args.I64(0) return uint64(v1 << (v2 & 63)), true case code.OpI64ShrS: v2, v1 := args.I64(1), args.I64(0) return uint64(v1 >> (v2 & 63)), true case code.OpI64ShrU: v2, v1 := args.U64(1), args.U64(0) return uint64(v1 >> (v2 & 63)), true case code.OpI64Rotl: v2, v1 := args.I(1), args.U64(0) return uint64(bits.RotateLeft64(v1, v2)), true case code.OpI64Rotr: v2, v1 := args.I(1), args.U64(0) return uint64(bits.RotateLeft64(v1, -v2)), true case code.OpF32Abs: return uint64(math.Float32bits(float32(math.Abs(float64(args.F32(0)))))), true case code.OpF32Neg: return uint64(math.Float32bits(-args.F32(0))), true case code.OpF32Ceil: return uint64(math.Float32bits(float32(math.Ceil(float64(args.F32(0)))))), true case code.OpF32Floor: return uint64(math.Float32bits(float32(math.Floor(float64(args.F32(0)))))), true case code.OpF32Trunc: return uint64(math.Float32bits(float32(math.Trunc(float64(args.F32(0)))))), true case code.OpF32Nearest: return uint64(math.Float32bits(float32(math.RoundToEven(float64(args.F32(0)))))), true case code.OpF32Sqrt: return uint64(math.Float32bits(float32(math.Sqrt(float64(args.F32(0)))))), true case code.OpF32Add: return uint64(math.Float32bits(args.F32(0) + args.F32(1))), true case code.OpF32Sub: v2, v1 := args.F32(1), args.F32(0) return uint64(math.Float32bits(v1 - v2)), true case code.OpF32Mul: return uint64(math.Float32bits(args.F32(0) * args.F32(1))), true case code.OpF32Div: v2, v1 := args.F32(1), args.F32(0) return uint64(math.Float32bits(v1 / v2)), true case code.OpF32Min: return uint64(math.Float32bits(float32(exec.Fmin(float64(args.F32(0)), float64(args.F32(1)))))), true case code.OpF32Max: return uint64(math.Float32bits(float32(exec.Fmax(float64(args.F32(0)), float64(args.F32(1)))))), true case code.OpF32Copysign: v2, v1 := args.F32(1), args.F32(0) return uint64(math.Float32bits(float32(math.Copysign(float64(v1), float64(v2))))), true case code.OpF64Abs: return uint64(math.Float64bits(math.Abs(args.F64(0)))), true case code.OpF64Neg: return uint64(math.Float64bits(-args.F64(0))), true case code.OpF64Ceil: return uint64(math.Float64bits(math.Ceil(args.F64(0)))), true case code.OpF64Floor: return uint64(math.Float64bits(math.Floor(args.F64(0)))), true case code.OpF64Trunc: return uint64(math.Float64bits(math.Trunc(args.F64(0)))), true case code.OpF64Nearest: return uint64(math.Float64bits(math.RoundToEven(args.F64(0)))), true case code.OpF64Sqrt: return uint64(math.Float64bits(math.Sqrt(args.F64(0)))), true case code.OpF64Add: return uint64(math.Float64bits(args.F64(0) + args.F64(1))), true case code.OpF64Sub: v2, v1 := args.F64(1), args.F64(0) return uint64(math.Float64bits(v1 - v2)), true case code.OpF64Mul: return uint64(math.Float64bits(args.F64(0) * args.F64(1))), true case code.OpF64Div: v2, v1 := args.F64(1), args.F64(0) return uint64(math.Float64bits(v1 / v2)), true case code.OpF64Min: return uint64(math.Float64bits(exec.Fmin(args.F64(0), args.F64(1)))), true case code.OpF64Max: return uint64(math.Float64bits(exec.Fmax(args.F64(0), args.F64(1)))), true case code.OpF64Copysign: v2, v1 := args.F64(1), args.F64(0) return uint64(math.Float64bits(math.Copysign(v1, v2))), true case code.OpI32WrapI64: return uint64(int32(args.I64(0))), true case code.OpI32TruncF32S: return uint64(exec.I32TruncS(float64(args.F32(0)))), true case code.OpI32TruncF32U: return uint64(exec.I32TruncU(float64(args.F32(0)))), true case code.OpI32TruncF64S: return uint64(exec.I32TruncS(args.F64(0))), true case code.OpI32TruncF64U: return uint64(exec.I32TruncU(args.F64(0))), true case code.OpI64ExtendI32S: return uint64(int64(args.I32(0))), true case code.OpI64ExtendI32U: return uint64(int64(args.U32(0))), true case code.OpI64TruncF32S: return uint64(exec.I64TruncS(float64(args.F32(0)))), true case code.OpI64TruncF32U: return uint64(exec.I64TruncU(float64(args.F32(0)))), true case code.OpI64TruncF64S: return uint64(exec.I64TruncS(args.F64(0))), true case code.OpI64TruncF64U: return uint64(exec.I64TruncU(args.F64(0))), true case code.OpF32ConvertI32S: return uint64(math.Float32bits(float32(args.I32(0)))), true case code.OpF32ConvertI32U: return uint64(math.Float32bits(float32(args.U32(0)))), true case code.OpF32ConvertI64S: return uint64(math.Float32bits(float32(args.I64(0)))), true case code.OpF32ConvertI64U: return uint64(math.Float32bits(float32(args.U64(0)))), true case code.OpF32DemoteF64: return uint64(math.Float32bits(float32(args.F64(0)))), true case code.OpF64ConvertI32S: return uint64(math.Float64bits(float64(args.I32(0)))), true case code.OpF64ConvertI32U: return uint64(math.Float64bits(float64(args.U32(0)))), true case code.OpF64ConvertI64S: return uint64(math.Float64bits(float64(args.I64(0)))), true case code.OpF64ConvertI64U: return uint64(math.Float64bits(float64(args.U64(0)))), true case code.OpF64PromoteF32: return uint64(math.Float64bits(float64(args.F32(0)))), true case code.OpI32ReinterpretF32: return uint64(math.Float32bits(args.F32(0))), true case code.OpI64ReinterpretF64: return uint64(math.Float64bits(args.F64(0))), true case code.OpF32ReinterpretI32: return uint64(args.U32(0)), true case code.OpF64ReinterpretI64: return uint64(args.U64(0)), true case code.OpI32Extend8S: return uint64(int32(int8(args.I32(0)))), true case code.OpI32Extend16S: return uint64(int32(int16(args.I32(0)))), true case code.OpI64Extend8S: return uint64(int64(int8(args.I64(0)))), true case code.OpI64Extend16S: return uint64(int64(int16(args.I64(0)))), true case code.OpI64Extend32S: return uint64(int64(int32(args.I64(0)))), true case code.OpPrefix: switch instr.Immediate { case code.OpI32TruncSatF32S: return uint64(exec.I32TruncSatS(float64(args.F32(0)))), true case code.OpI32TruncSatF32U: return uint64(exec.I32TruncSatU(float64(args.F32(0)))), true case code.OpI32TruncSatF64S: return uint64(exec.I32TruncSatS(args.F64(0))), true case code.OpI32TruncSatF64U: return uint64(exec.I32TruncSatU(args.F64(0))), true case code.OpI64TruncSatF32S: return uint64(exec.I64TruncSatS(float64(args.F32(0)))), true case code.OpI64TruncSatF32U: return uint64(exec.I64TruncSatU(float64(args.F32(0)))), true case code.OpI64TruncSatF64S: return uint64(exec.I64TruncSatS(args.F64(0))), true case code.OpI64TruncSatF64U: return uint64(exec.I64TruncSatU(args.F64(0))), true } } return 0, false }
compiler/wax/evaluate.go
0.523908
0.459622
evaluate.go
starcoder
package graphics import ( "encoding/json" "fmt" "github.com/stnma7e/betuol/common" "github.com/stnma7e/betuol/component" "github.com/stnma7e/betuol/event" "github.com/stnma7e/betuol/graphics" "github.com/stnma7e/betuol/math" "github.com/stnma7e/betuol/res" ) // GraphicsManager is a component manager used to visualize the game onscreen. // It uses multiple GraphicsHandlers to render the world in a variety of ways. type GraphicsManager struct { em *event.EventManager rm *res.ResourceManager sm component.SceneManager justForcedARender bool graphicsHandlersLink []chan *common.Vector modellink []chan graphics.ModelTransfer deletelink []chan component.GOiD resizelink []chan bool errorlink chan error cam *math.Frustum compList *common.Vector } // MakeGraphicsManager returns a pointer to a GraphicsManager. func MakeGraphicsManager(em *event.EventManager, rm *res.ResourceManager, sm component.SceneManager) *GraphicsManager { gm := &GraphicsManager{ em, rm, sm, false, make([]chan *common.Vector, 1), make([]chan graphics.ModelTransfer, 1), make([]chan component.GOiD, 1), make([]chan bool, 1), make(chan error), math.MakeFrustum(0.1, 100, 90, 1/1), common.MakeVector(), } target, eye, up := math.Vec3{0, 0, 0}, math.Vec3{0, 6, -12}, math.Vec3{0, 1, 0} gm.cam.LookAt(target, eye, up) for i := range gm.graphicsHandlersLink { gm.graphicsHandlersLink[i] = make(chan *common.Vector) } for i := range gm.modellink { gm.modellink[i] = make(chan graphics.ModelTransfer) } for i := range gm.deletelink { gm.deletelink[i] = make(chan component.GOiD) } for i := range gm.resizelink { gm.resizelink[i] = make(chan bool) } go gm.RunGraphicsHandlerFunc(gm.graphicsHandlersLink[0], gm.modellink[0], gm.deletelink[0], gm.resizelink[0], gm.TextHandlerFunc) return gm } func (gm *GraphicsManager) handleClosedGraphicsHandler(indexOfClosedHandler int) { r := recover() if r != nil { common.LogErr.Printf("a graphics handler might have closed. deleting the handler now. recovered: %s", r) gm.graphicsHandlersLink[indexOfClosedHandler] = nil gm.resizelink[indexOfClosedHandler] = nil gm.deletelink[indexOfClosedHandler] = nil } } // Tick calls the Tick function of each GraphicsHandler in the manager's list. // If any Tick functions return false, then GraphicsManager.Tick returns false. func (gm *GraphicsManager) Tick(delta float64, sm component.SceneManager) { gm.sm = sm if gm.justForcedARender { gm.justForcedARender = false return } compsToSend, errs := gm.RenderAll(sm) if errs != nil { errArray := errs.Array() if errArray != nil && len(errArray) > 0 { for i := range errArray { common.LogErr.Print(errArray[i].(error)) } } } gm.Render(compsToSend) } // ForceRender sends a resize message to all of the handlers, signaling a redraw. func (gm *GraphicsManager) ForceRender(compsToSend *common.Vector) { handlerIndex := 0 defer gm.handleClosedGraphicsHandler(handlerIndex) gm.Render(compsToSend) for handlerIndex = range gm.resizelink { if gm.resizelink[handlerIndex] == nil { continue } gm.resizelink[handlerIndex] <- true } gm.justForcedARender = true } // Render sends a new list of components to be rendered to the graphics handlers. func (gm *GraphicsManager) Render(compsToSend *common.Vector) { handlerIndex := 0 defer gm.handleClosedGraphicsHandler(handlerIndex) //common.LogInfo.Println(compsToSend) for handlerIndex = range gm.graphicsHandlersLink { if gm.graphicsHandlersLink[handlerIndex] == nil { continue } gm.graphicsHandlersLink[handlerIndex] <- compsToSend } } // RenderAllFromPerspective returns a list of all the game objects with graphics components that can be seen from the perspective of a single game object, id. func (gm *GraphicsManager) RenderAllFromPerspective(id component.GOiD, sm component.SceneManager) (*common.Vector, *common.Vector) { errs := common.MakeVector() compsToSend := common.MakeVector() comps := gm.compList.Array() perspLoc, err := sm.GetObjectLocation(id) if err != nil { errs.Insert(fmt.Errorf("requesting location from scene manager failed in perspective render, error %s", err.Error())) return nil, errs } compsNearPerspective := sm.GetObjectsInLocationRadius(perspLoc, 5.0).Array() for i := range comps { if comps[i] == nil { continue } if comps[i].(component.GOiD) == id || comps[i].(component.GOiD) == 0 { continue } for j := range compsNearPerspective { if comps[i].(component.GOiD) == compsNearPerspective[j].(component.GOiD) { compsToSend.Insert(comps[i].(component.GOiD)) } } } return compsToSend, errs } // RenderAll returns a list of all of the game objects with graphics components. func (gm *GraphicsManager) RenderAll(sm component.SceneManager) (*common.Vector, *common.Vector) { errs := common.MakeVector() compsToSend := common.MakeVector() comps := gm.compList.Array() for i := range comps { if comps[i] == nil { continue } compsToSend.Insert(comps[i].(component.GOiD)) } return compsToSend, errs } // JsonCreate extracts creation data from a byte array of json text to pass to CreateComponent. func (gm *GraphicsManager) JsonCreate(id component.GOiD, compData []byte) error { obj := graphics.GraphicsComponent{} err := json.Unmarshal(compData, &obj) if err != nil { return fmt.Errorf("failed to unmarshal graphics component, error: %s", err.Error()) } gm.CreateComponent(id, obj) return nil } // Uses extracted data from higher level component creation functions and initializes a graphics component based on the id passed through. // The function calls the LoadModel function of each GraphicsHandler in the manager's list. func (gm *GraphicsManager) CreateComponent(id component.GOiD, gc graphics.GraphicsComponent) error { for i := range gm.modellink { gm.modellink[i] <- graphics.ModelTransfer{id, gc} err := <-gm.errorlink if err != nil { return fmt.Errorf("failed to create model with GraphicsHandler #%d, error: %s", i, err.Error()) } } gm.compList.Insert(id) return fmt.Errorf("failed to create model with GraphicsHandler #%d, error: %s", id, "heop") return nil } // DeleteComponent implements the component.ComponentManager interface and deletes graphics component data from the manager. // The function calls the DeleteModel function of each GraphicsHandler in the manager's list. func (gm *GraphicsManager) DeleteComponent(id component.GOiD) { comps := gm.compList.Array() for i := range comps { if comps[i] == id { gm.compList.Erase(i) } } for i := range gm.modellink { if gm.deletelink[i] == nil { continue } gm.deletelink[i] <- id } } // RegisterGraphicsHandler addeds a GraphicsHandlerFunc to the manager's list to be included on all subsequent render and query function calls. func (gm *GraphicsManager) RegisterGraphicsHandler(handler GraphicsHandlerFunc) { // resize arrays // make(chan) for all the channels // launch go routine with gm.RunGraphicsHandlerFunc() }
component/graphics/manager.go
0.624637
0.415907
manager.go
starcoder
package arithmetic import "fmt" func (parser Parser) Expression(input string) (float64, error) { check, value := parser.m19([]rune(input), 0) if check.Ok { return value, nil } var zero float64 return zero, fmt.Errorf("%s", check.Explain()) } func NewParser(input string) Parser { return Parser{ input: []rune(input), wherem13: map[int]Result{}, whatm13: map[int]float64{}, wherem18: map[int]Result{}, whatm18: map[int]float64{}, wherem9: map[int]Result{}, whatm9: map[int]float64{}, wherem12: map[int]Result{}, whatm12: map[int]float64{}, wherem16: map[int]Result{}, whatm16: map[int]struct { V0 float64 V1 string V2 float64 }{}, wherem2: map[int]Result{}, whatm2: map[int]float64{}, wherem0: map[int]Result{}, whatm0: map[int]float64{}, wherem3: map[int]Result{}, whatm3: map[int]float64{}, wherem10: map[int]Result{}, whatm10: map[int]string{}, wherem11: map[int]Result{}, whatm11: map[int]float64{}, wherem19: map[int]Result{}, whatm19: map[int]float64{}, wherem4: map[int]Result{}, whatm4: map[int]string{}, wherem7: map[int]Result{}, whatm7: map[int]string{}, wherem8: map[int]Result{}, whatm8: map[int]float64{}, wherem15: map[int]Result{}, whatm15: map[int]string{}, wherem17: map[int]Result{}, whatm17: map[int]float64{}, wherem14: map[int]Result{}, whatm14: map[int]float64{}, wherem1: map[int]Result{}, whatm1: map[int]string{}, wherem5: map[int]Result{}, whatm5: map[int]float64{}, wherem6: map[int]Result{}, whatm6: map[int]float64{}, } } type Parser struct { input []rune // Internal memoization tables wherem13 map[int]Result whatm13 map[int]float64 wherem18 map[int]Result whatm18 map[int]float64 wherem2 map[int]Result whatm2 map[int]float64 wherem0 map[int]Result whatm0 map[int]float64 wherem3 map[int]Result whatm3 map[int]float64 wherem9 map[int]Result whatm9 map[int]float64 wherem12 map[int]Result whatm12 map[int]float64 wherem16 map[int]Result whatm16 map[int]struct { V0 float64 V1 string V2 float64 } wherem4 map[int]Result whatm4 map[int]string wherem7 map[int]Result whatm7 map[int]string wherem8 map[int]Result whatm8 map[int]float64 wherem10 map[int]Result whatm10 map[int]string wherem11 map[int]Result whatm11 map[int]float64 wherem19 map[int]Result whatm19 map[int]float64 wherem1 map[int]Result whatm1 map[int]string wherem5 map[int]Result whatm5 map[int]float64 wherem6 map[int]Result whatm6 map[int]float64 wherem15 map[int]Result whatm15 map[int]string wherem17 map[int]Result whatm17 map[int]float64 wherem14 map[int]Result whatm14 map[int]float64 } // Below is the internal generated parse structure. // It's not very efficient right now, but is accomplishes parsing in linear time. // Currently, there's no way to parse multiple inputs, due to the fact that the // state of the parse is stored in global variables. type Result struct { Ok bool At int Expected []Reject } type Reject interface { Reason() string } func (r Result) Explain() string { if r.Ok { return fmt.Sprintf("Okay: %d characters parsed", r.At) } s := "Failed to parse. Expected at " + fmt.Sprintf("%d", r.At) + " one of:" for _, v := range r.Expected { s += "\n\t" + v.Reason() } return s } type Expected struct { Token string } func (e Expected) Reason() string { return fmt.Sprintf("%q", e.Token) } func Failure(tokens ...Reject) Result { return Result{ Ok: false, Expected: tokens, } } func FailureCombined(first []Reject, second []Reject) Result { return Result{ Ok: false, Expected: append(append([]Reject{}, first...), second...), } } func Success(at int) Result { return Result{ Ok: true, At: at, } } type Exclude struct { Message string } func (e Exclude) Reason() string { return fmt.Sprintf("but not %s", e.Message) } func (parser Parser) m0(input []rune, here int) (Result, float64) { return parser.m2(input, here) } var wherem1 = map[int]Result{} var whatm1 = map[int]string{} func (parser Parser) m1(input []rune, here int) (Result, string) { if result, ok := parser.wherem1[here]; ok { return result, parser.whatm1[here] } result, value := parser.dm1(input, here) parser.wherem1[here] = result parser.whatm1[here] = value return result, value } // "one" func (parser Parser) dm1(input []rune, here int) (Result, string) { if here+3 > len(input) || string(input[here:here+3]) != "one" { return Failure(Expected{Token: "one"}), "" } return Success(here + 3), "one" } var wherem10 = map[int]Result{} var whatm10 = map[int]string{} func (parser Parser) m10(input []rune, here int) (Result, string) { if result, ok := parser.wherem10[here]; ok { return result, parser.whatm10[here] } result, value := parser.dm10(input, here) parser.wherem10[here] = result parser.whatm10[here] = value return result, value } // "four" func (parser Parser) dm10(input []rune, here int) (Result, string) { if here+4 > len(input) || string(input[here:here+4]) != "four" { return Failure(Expected{Token: "four"}), "" } return Success(here + 4), "four" } var wherem11 = map[int]Result{} var whatm11 = map[int]float64{} func (parser Parser) m11(input []rune, here int) (Result, float64) { if result, ok := parser.wherem11[here]; ok { return result, parser.whatm11[here] } result, value := parser.dm11(input, here) parser.wherem11[here] = result parser.whatm11[here] = value return result, value } // string go { float64 } func (parser Parser) dm11(input []rune, here int) (Result, float64) { check, value := parser.m10(input, here) if !check.Ok { var zero float64 return check, zero } answer := func(arg string) float64 { return 4 }(value) return check, answer } func (parser Parser) m12(input []rune, here int) (Result, float64) { return parser.m13(input, here) } var wherem13 = map[int]Result{} var whatm13 = map[int]float64{} func (parser Parser) m13(input []rune, here int) (Result, float64) { if result, ok := parser.wherem13[here]; ok { return result, parser.whatm13[here] } result, value := parser.dm13(input, here) parser.wherem13[here] = result parser.whatm13[here] = value return result, value } // (root one / root two / root three / root four) func (parser Parser) dm13(input []rune, here int) (Result, float64) { notes := []Reject{} if next, value := parser.m0(input, here); next.Ok { return next, value } else { notes = append(notes, next.Expected...) } if next, value := parser.m3(input, here); next.Ok { return next, value } else { notes = append(notes, next.Expected...) } if next, value := parser.m6(input, here); next.Ok { return next, value } else { notes = append(notes, next.Expected...) } if next, value := parser.m9(input, here); next.Ok { return next, value } else { notes = append(notes, next.Expected...) } var zero float64 return Failure(notes...), zero } func (parser Parser) m14(input []rune, here int) (Result, float64) { return parser.m18(input, here) } var wherem15 = map[int]Result{} var whatm15 = map[int]string{} func (parser Parser) m15(input []rune, here int) (Result, string) { if result, ok := parser.wherem15[here]; ok { return result, parser.whatm15[here] } result, value := parser.dm15(input, here) parser.wherem15[here] = result parser.whatm15[here] = value return result, value } // "+" func (parser Parser) dm15(input []rune, here int) (Result, string) { if here+1 > len(input) || string(input[here:here+1]) != "+" { return Failure(Expected{Token: "+"}), "" } return Success(here + 1), "+" } var wherem16 = map[int]Result{} var whatm16 = map[int]struct { V0 float64 V1 string V2 float64 }{} func (parser Parser) m16(input []rune, here int) (Result, struct { V0 float64 V1 string V2 float64 }) { if result, ok := parser.wherem16[here]; ok { return result, parser.whatm16[here] } result, value := parser.dm16(input, here) parser.wherem16[here] = result parser.whatm16[here] = value return result, value } // root number "+" root sum func (parser Parser) dm16(input []rune, here int) (Result, struct { V0 float64 V1 string V2 float64 }) { result := struct { V0 float64 V1 string V2 float64 }{} if next, value := parser.m12(input, here); next.Ok { here = next.At result.V0 = value } else { return next, struct { V0 float64 V1 string V2 float64 }{} } if next, value := parser.m15(input, here); next.Ok { here = next.At result.V1 = value } else { return next, struct { V0 float64 V1 string V2 float64 }{} } if next, value := parser.m14(input, here); next.Ok { here = next.At result.V2 = value } else { return next, struct { V0 float64 V1 string V2 float64 }{} } return Success(here), result } var wherem17 = map[int]Result{} var whatm17 = map[int]float64{} func (parser Parser) m17(input []rune, here int) (Result, float64) { if result, ok := parser.wherem17[here]; ok { return result, parser.whatm17[here] } result, value := parser.dm17(input, here) parser.wherem17[here] = result parser.whatm17[here] = value return result, value } // struct{V0 float64;V1 string;V2 float64;} go { float64 } func (parser Parser) dm17(input []rune, here int) (Result, float64) { check, value := parser.m16(input, here) if !check.Ok { var zero float64 return check, zero } answer := func(arg struct { V0 float64 V1 string V2 float64 }) float64 { return arg.V0 + arg.V2 }(value) return check, answer } var wherem18 = map[int]Result{} var whatm18 = map[int]float64{} func (parser Parser) m18(input []rune, here int) (Result, float64) { if result, ok := parser.wherem18[here]; ok { return result, parser.whatm18[here] } result, value := parser.dm18(input, here) parser.wherem18[here] = result parser.whatm18[here] = value return result, value } // (struct{V0 float64;V1 string;V2 float64;} go { float64 } / root number) func (parser Parser) dm18(input []rune, here int) (Result, float64) { notes := []Reject{} if next, value := parser.m17(input, here); next.Ok { return next, value } else { notes = append(notes, next.Expected...) } if next, value := parser.m12(input, here); next.Ok { return next, value } else { notes = append(notes, next.Expected...) } var zero float64 return Failure(notes...), zero } func (parser Parser) m19(input []rune, here int) (Result, float64) { return parser.m14(input, here) } var wherem2 = map[int]Result{} var whatm2 = map[int]float64{} func (parser Parser) m2(input []rune, here int) (Result, float64) { if result, ok := parser.wherem2[here]; ok { return result, parser.whatm2[here] } result, value := parser.dm2(input, here) parser.wherem2[here] = result parser.whatm2[here] = value return result, value } // string go { float64 } func (parser Parser) dm2(input []rune, here int) (Result, float64) { check, value := parser.m1(input, here) if !check.Ok { var zero float64 return check, zero } answer := func(arg string) float64 { return 1 }(value) return check, answer } func (parser Parser) m3(input []rune, here int) (Result, float64) { return parser.m5(input, here) } var wherem4 = map[int]Result{} var whatm4 = map[int]string{} func (parser Parser) m4(input []rune, here int) (Result, string) { if result, ok := parser.wherem4[here]; ok { return result, parser.whatm4[here] } result, value := parser.dm4(input, here) parser.wherem4[here] = result parser.whatm4[here] = value return result, value } // "two" func (parser Parser) dm4(input []rune, here int) (Result, string) { if here+3 > len(input) || string(input[here:here+3]) != "two" { return Failure(Expected{Token: "two"}), "" } return Success(here + 3), "two" } var wherem5 = map[int]Result{} var whatm5 = map[int]float64{} func (parser Parser) m5(input []rune, here int) (Result, float64) { if result, ok := parser.wherem5[here]; ok { return result, parser.whatm5[here] } result, value := parser.dm5(input, here) parser.wherem5[here] = result parser.whatm5[here] = value return result, value } // string go { float64 } func (parser Parser) dm5(input []rune, here int) (Result, float64) { check, value := parser.m4(input, here) if !check.Ok { var zero float64 return check, zero } answer := func(arg string) float64 { return 2 }(value) return check, answer } func (parser Parser) m6(input []rune, here int) (Result, float64) { return parser.m8(input, here) } var wherem7 = map[int]Result{} var whatm7 = map[int]string{} func (parser Parser) m7(input []rune, here int) (Result, string) { if result, ok := parser.wherem7[here]; ok { return result, parser.whatm7[here] } result, value := parser.dm7(input, here) parser.wherem7[here] = result parser.whatm7[here] = value return result, value } // "three" func (parser Parser) dm7(input []rune, here int) (Result, string) { if here+5 > len(input) || string(input[here:here+5]) != "three" { return Failure(Expected{Token: "three"}), "" } return Success(here + 5), "three" } var wherem8 = map[int]Result{} var whatm8 = map[int]float64{} func (parser Parser) m8(input []rune, here int) (Result, float64) { if result, ok := parser.wherem8[here]; ok { return result, parser.whatm8[here] } result, value := parser.dm8(input, here) parser.wherem8[here] = result parser.whatm8[here] = value return result, value } // string go { float64 } func (parser Parser) dm8(input []rune, here int) (Result, float64) { check, value := parser.m7(input, here) if !check.Ok { var zero float64 return check, zero } answer := func(arg string) float64 { return 3 }(value) return check, answer } func (parser Parser) m9(input []rune, here int) (Result, float64) { return parser.m11(input, here) }
example/arithmetic/parse.go
0.576542
0.414958
parse.go
starcoder
package donna import ( `bytes` `regexp` ) const ( isCapture = 0x00F00000 isPromo = 0x0F000000 isCastle = 0x10000000 isEnpassant = 0x20000000 ) // Bits 00:00:00:FF => Source square (0 .. 63). // Bits 00:00:FF:00 => Destination square (0 .. 63). // Bits 00:0F:00:00 => Piece making the move. // Bits 00:F0:00:00 => Captured piece if any. // Bits 0F:00:00:00 => Promoted piece if any. // Bits F0:00:00:00 => Castle and en-passant flags. type Move uint32 func NewMove(p *Position, from, to int) Move { piece, capture := p.pieces[from], p.pieces[to] if p.enpassant != 0 && to == int(p.enpassant) && piece.isPawn() { capture = pawn(piece.color() ^ 1) } return Move(from | (to << 8) | (int(piece) << 16) | (int(capture) << 20)) } func NewPawnMove(p *Position, square, target int) Move { if abs(square - target) == 16 { // Check if pawn jump causes en-passant. This is done by verifying // whether enemy pawns occupy squares ajacent to the target square. pawns := p.outposts[pawn(p.color ^ 1)] if pawns & maskIsolated[col(target)] & maskRank[row(target)] != 0 { return NewEnpassant(p, square, target) } } return NewMove(p, square, target) } func NewEnpassant(p *Position, from, to int) Move { return Move(from | (to << 8) | (int(p.pieces[from]) << 16) | isEnpassant) } func NewCastle(p *Position, from, to int) Move { return Move(from | (to << 8) | (int(p.pieces[from]) << 16) | isCastle) } func NewPromotion(p *Position, square, target int) (Move, Move, Move, Move) { return NewMove(p, square, target).promote(Queen), NewMove(p, square, target).promote(Rook), NewMove(p, square, target).promote(Bishop), NewMove(p, square, target).promote(Knight) } // Decodes a string in coordinate notation and returns a move. The string is // expected to be either 4 or 5 characters long (with promotion). func NewMoveFromNotation(p *Position, e2e4 string) Move { from := square(int(e2e4[1] - '1'), int(e2e4[0] - 'a')) to := square(int(e2e4[3] - '1'), int(e2e4[2] - 'a')) // Check if this is a castle. if p.pieces[from].isKing() && abs(from - to) == 2 { return NewCastle(p, from, to) } // Special handling for pawn pushes because they might cause en-passant // and result in promotion. if p.pieces[from].isPawn() { move := NewPawnMove(p, from, to) if len(e2e4) > 4 { switch e2e4[4] { case 'q', 'Q': move = move.promote(Queen) case 'r', 'R': move = move.promote(Rook) case 'b', 'B': move = move.promote(Bishop) case 'n', 'N': move = move.promote(Knight) } } return move } return NewMove(p, from, to) } // Decodes a string in long algebraic notation and returns a move. All invalid // moves are discarded and returned as Move(0). func NewMoveFromString(p *Position, e2e4 string) (move Move, validMoves []Move) { re := regexp.MustCompile(`([KkQqRrBbNn]?)([a-h])([1-8])[-x]?([a-h])([1-8])([QqRrBbNn]?)\+?[!\?]{0,2}`) matches := re.FindStringSubmatch(e2e4) // Before returning the move make sure it is valid in current position. defer func() { gen := NewMoveGen(p).generateAllMoves().validOnly() validMoves = gen.allMoves() if move != Move(0) && !gen.amongValid(move) { move = Move(0) } }() if len(matches) == 7 { // Full regex match. if letter := matches[1]; letter != `` { var piece Piece // Validate optional piece character to make sure the actual piece it // represents is there. switch letter { case `K`, `k`: piece = king(p.color) case `Q`, `q`: piece = queen(p.color) case `R`, `r`: piece = rook(p.color) case `B`, `b`: piece = bishop(p.color) case `N`, `n`: piece = knight(p.color) } square := square(int(matches[3][0] - '1'), int(matches[2][0] - 'a')) if p.pieces[square] != piece { move = Move(0) return } } move = NewMoveFromNotation(p, matches[2] + matches[3] + matches[4] + matches[5] + matches[6]) return } // Special castle move notation. if e2e4 == `0-0` || e2e4 == `0-0-0` { kingside, queenside := p.canCastle(p.color) if e2e4 == `0-0` && kingside { from, to := int(p.king[p.color]), G1 + int(p.color) * A8 move = NewCastle(p, from, to) return } if e2e4 == `0-0-0` && queenside { from, to := int(p.king[p.color]), C1 + int(p.color) * A8 move = NewCastle(p, from, to) return } } return } func (m Move) from() int { return int(m & 0xFF) } func (m Move) to() int { return int((m >> 8) & 0xFF) } func (m Move) piece() Piece { return Piece((m >> 16) & 0x0F) } func (m Move) color() uint8 { return uint8((m >> 16) & 1) } func (m Move) capture() Piece { return Piece((m >> 20) & 0x0F) } func (m Move) split() (from, to int, piece, capture Piece) { return int(m & 0xFF), int((m >> 8) & 0xFF), Piece((m >> 16) & 0x0F), Piece((m >> 20) & 0x0F) } func (m Move) promo() Piece { return Piece((m >> 24) & 0x0F) } func (m Move) promote(kind int) Move { piece := Piece(kind | int(m.color())) return m | Move(int(piece) << 24) } // Capture value based on most valueable victim/least valueable attacker. func (m Move) value() int { return pieceValue[m.capture()] - m.piece().kind() } func (m Move) isCastle() bool { return m & isCastle != 0 } func (m Move) isCapture() bool { return m & isCapture != 0 } func (m Move) isEnpassant() bool { return m & isEnpassant != 0 } func (m Move) isPromo() bool { return m & isPromo != 0 } // Returns true if the move doesn't change material balance. func (m Move) isQuiet() bool { return m & (isCapture | isPromo) == 0 } // Returns string representation of the move in long coordinate notation as // expected by UCI, ex. `g1f3`, `e4d5` or `h7h8q`. func (m Move) notation() string { var buffer bytes.Buffer from, to, _, _ := m.split() buffer.WriteByte(byte(col(from)) + 'a') buffer.WriteByte(byte(row(from)) + '1') buffer.WriteByte(byte(col(to)) + 'a') buffer.WriteByte(byte(row(to)) + '1') if m & isPromo != 0 { buffer.WriteByte(m.promo().char() + 32) } return buffer.String() } // Returns string representation of the move in long algebraic notation using // ASCII characters only. func (m Move) str() (str string) { if engine.fancy { defer func() { engine.fancy = true }() engine.fancy = false } return m.String() } // By default the move is represented in long algebraic notation utilizing fancy // UTF-8 engine setting. For example: `♘g1-f3` (fancy), `e4xd5` or `h7-h8Q`. // This notation is used in tests, REPL, and when showing principal variation. func (m Move) String() (str string) { var buffer bytes.Buffer from, to, piece, capture := m.split() if m.isCastle() { if to > from { return `0-0` } return `0-0-0` } if !piece.isPawn() { if engine.fancy { // Figurine notation is more readable with extra space. buffer.WriteString(piece.String() + ` `) } else { buffer.WriteByte(piece.char()) } } buffer.WriteByte(byte(col(from)) + 'a') buffer.WriteByte(byte(row(from)) + '1') if capture == 0 { buffer.WriteByte('-') } else { buffer.WriteByte('x') } buffer.WriteByte(byte(col(to)) + 'a') buffer.WriteByte(byte(row(to)) + '1') if m & isPromo != 0 { buffer.WriteByte(m.promo().char()) } return buffer.String() }
move.go
0.768038
0.594787
move.go
starcoder
package jsonlogic import ( "bytes" "encoding/json" "fmt" "reflect" "strconv" "strings" "github.com/dariubs/percent" "github.com/spf13/cast" "github.com/buger/jsonparser" ) // Errors var ( ErrInvalidOperation = "invalid operation: %s" ) // Operators holds any operators var Operators = make(map[string]func(rule string, data string) (result interface{})) // Run is an alias to Apply without data func Run(rule string) (res interface{}, errs error) { return Apply(rule, ``) } // Apply is the entry function to parse rule and optional data func Apply(rule string, data string) (res interface{}, errs error) { // Ensure data is object if data == `` { data = `{}` } // Unicode & data = strings.ReplaceAll(data, `\u0026`, `&`) // Must be an object to start process result, err := ParseOperator(rule, data) if err != nil { return false, err } return result, nil } // ParseOperator takes in the json rule and data and attempts to parse func ParseOperator(rule string, data string) (result interface{}, err error) { err = jsonparser.ObjectEach([]byte(rule), func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { switch dataType { case jsonparser.String: result = RunOperator(string(key), "\""+string(value)+"\"", data) default: result = RunOperator(string(key), string(value), data) } return nil }) if err != nil { return false, fmt.Errorf(ErrInvalidOperation, err) } return result, nil } // GetValues will attempt to recursively resolve all values for a given operator func GetValues(rule string, data string) (results []interface{}) { ruleValue, dataType, _, _ := jsonparser.Get([]byte(rule)) switch dataType { case jsonparser.Object: res, _ := ParseOperator(string(ruleValue), data) results = append(results, res) case jsonparser.Array: jsonparser.ArrayEach([]byte(ruleValue), func(value []byte, dataType jsonparser.ValueType, offset int, err error) { switch dataType { case jsonparser.Array: m := make([]interface{}, 0) json.Unmarshal(value, &m) results = append(results, m) case jsonparser.Object: res, _ := ParseOperator(string(value), data) results = append(results, res) case jsonparser.String: results = append(results, cast.ToString(value)) case jsonparser.Number: results = append(results, cast.ToFloat64(cast.ToString(value))) case jsonparser.Boolean: results = append(results, cast.ToBool(string(value))) case jsonparser.Null: results = append(results, value) } }) case jsonparser.Number: results = append(results, cast.ToFloat64(string(ruleValue))) case jsonparser.String: // Remove the quotes we added so we could detect string type rule = rule[1 : len(rule)-1] value, dataType, _, _ := jsonparser.Get([]byte(data), rule) if len(value) > 0 { results = append(results, rule) switch dataType { case jsonparser.String: results = append(results, cast.ToString(value)) case jsonparser.Number: results = append(results, cast.ToFloat64(cast.ToString(value))) case jsonparser.Boolean: results = append(results, cast.ToBool(value)) case jsonparser.Null: results = append(results, value) } } else { // No data was found so we just append the rule and move on results = append(results, rule) } default: return nil } return results } // AddOperator allows for custom operators to be used func AddOperator(key string, cb func(rule string, data string) (result interface{})) { Operators[key] = cb } // RunOperator determines what function to run against the passed rule and data func RunOperator(key string, rule string, data string) (result interface{}) { values := GetValues(rule, data) switch key { // Accessing Data case "var": var fallback interface{} if len(values) > 1 { fallback = values[1] } else { fallback = nil } result = Var(values[0], fallback, data) // TODO missing case "missing": result = Missing(values, data) case "missing_some": break // TODO missing_some // Logic and Boolean Operations case "?": case "if": // TOFIX basically the "success" value is showing false when it should be showing true // result = If(values[0], values[1], values[2]) result = If(values) case "==": result = SoftEqual(cast.ToString(values[0]), cast.ToString(values[1])) case "===": result = HardEqual(values[0], values[1]) case "!=": result = NotSoftEqual(cast.ToString(values[0]), cast.ToString(values[1])) case "!==": result = NotHardEqual(values[0], values[1]) case "!": result = NotTruthy(values) case "!!": result = Truthy(values) case "or": result = Or(values) case "and": result = And(values) // Numeric Operations case ">": result = More(cast.ToFloat64(values[0]), cast.ToFloat64(values[1])) case ">=": result = MoreEqual(cast.ToString(values[0]), cast.ToString(values[1])) case "<": // Test for exclusive between result = false if len(values) > 2 && IsNumeric(values[0]) && IsNumeric(values[1]) && IsNumeric(values[2]) { result = LessBetween(cast.ToFloat64(values[0]), cast.ToFloat64(values[1]), cast.ToFloat64(values[2])) } else if IsNumeric(values[0]) && IsNumeric(values[1]) { result = Less(cast.ToFloat64(values[0]), cast.ToFloat64(values[1])) } case "<=": // Test for inclusive between result = false if len(values) > 2 && IsNumeric(values[0]) && IsNumeric(values[1]) && IsNumeric(values[2]) { result = LessEqualBetween(cast.ToFloat64(values[0]), cast.ToFloat64(values[1]), cast.ToFloat64(values[2])) } else if IsNumeric(values[0]) && IsNumeric(values[1]) { result = LessEqual(cast.ToFloat64(values[0]), cast.ToFloat64(values[1])) } case "max": result = Max(values) case "min": result = Min(values) case "+": result = Plus(values) case "-": result = Minus(values) case "*": result = Multiply(values) case "/": result = Divide(cast.ToFloat64(values[0]), cast.ToFloat64(values[1])) case "%": result = Percentage(cast.ToInt(values[0]), cast.ToInt(values[1])) // String Operations case "cat": result = Cat(values) case "in": result = In(values) case "substr": if len(values) > 2 { result = Substr(cast.ToString(values[0]), cast.ToInt(values[1]), cast.ToInt(values[2])) } else { result = Substr(cast.ToString(values[0]), cast.ToInt(values[1]), 0) } case "merge": result = Merge(values) // TODO All, None and Some http://jsonlogic.com/operations.html#all-none-and-some case "all": break case "some": break case "none": break // TODO Map, Reduce and Filter http://jsonlogic.com/operations.html#map-reduce-and-filter case "map": break case "reduce": break case "filter": break // Miscellaneous case "log": result = Log(cast.ToString(values[0])) } // Check against any custom operators for index, operation := range Operators { if key == index { result = operation(rule, data) } } return result } func IsNumeric(s interface{}) bool { _, err := strconv.ParseFloat(cast.ToString(s), 64) return err == nil } func Missing(a []interface{}, data string) interface{} { result := make([]interface{}, 0) for i := 0; i < len(a); i++ { _, dataType, _, _ := jsonparser.Get([]byte(data), cast.ToString(a[i])) if dataType == jsonparser.NotExist { result = append(result, a[i]) } } return result } func Merge(a []interface{}) interface{} { result := make([]interface{}, 0) for i := 0; i < len(a); i++ { array, _ := isArray(a[i]) if array { item := a[i].([]interface{}) for x := 0; x < len(item); x++ { result = append(result, item[x]) } } else { result = append(result, a[i]) } } return result } func Substr(a string, position int, length int) string { start := 0 end := len(a) if position < 0 { start = end + position } else { start = position } if length < 0 { end = end + length } else if length > 0 { end = position + length } return a[start:end] } func In(a []interface{}) bool { array, _ := isArray(a[1]) if array { items := a[1].([]interface{}) result := false for i := 0; i < len(items); i++ { if strings.Contains(cast.ToString(items[i]), cast.ToString(a[0])) && a[0] != nil { result = true } } return result } return strings.Contains(cast.ToString(a[1]), cast.ToString(a[0])) } // Cat implements the 'cat' conditional returning all the values merged together. func Cat(values []interface{}) string { buffer := new(bytes.Buffer) for _, v := range values { buffer.WriteString(cast.ToString(v)) } return buffer.String() } // Max implements the 'Max' conditional returning the Maximum value from an array of values. func Max(values []interface{}) (max float64) { if len(values) == 0 { return 0 } max = cast.ToFloat64(values[0]) for _, v := range values { val := cast.ToFloat64(v) if val > max { max = val } } return max } // Min implements the 'min' conditional returning the minimum value from an array of values. func Min(values []interface{}) (min float64) { if len(values) == 0 { return 0 } min = cast.ToFloat64(values[0]) for _, v := range values { val := cast.ToFloat64(v) if val < min { min = val } } return min } // Log implements the 'log' operator, which prints a log inside termianl. func Log(a string) interface{} { fmt.Println(a) return nil } // Plus implements the '+' operator, which does type JS-style coertion. func Plus(a []interface{}) interface{} { result := 0.0 for _, v := range a { result = result + cast.ToFloat64(v) } return result } // Minus implements the '-' operator, which does type JS-style coertion. func Minus(a []interface{}) interface{} { result := cast.ToFloat64(a[0]) if len(a) < 2 { result = -1 * cast.ToFloat64(a[0]) } else { for i, v := range a { if i != 0 { result = result - cast.ToFloat64(v) } } } return result } // Multiply implements the '-' operator, which does type JS-style coertion. func Multiply(a []interface{}) interface{} { result := 1.0 for _, v := range a { result = result * cast.ToFloat64(v) } return result } // Divide implements the '-' operator, which does type JS-style coertion. func Divide(a float64, b float64) interface{} { return a / b } // SoftEqual implements the '==' operator, which does type JS-style coertion. func SoftEqual(a string, b string) bool { return a == b } // HardEqual Implements the '===' operator, which does type JS-style coertion. func HardEqual(a ...interface{}) bool { if GetType(a[0]) != GetType(a[1]) { return false } if a[0] == a[1] { return true } return false } // NotSoftEqual implements the '!=' operator, which does type JS-style coertion. func NotSoftEqual(a string, b string) bool { return !SoftEqual(a, b) } // NotHardEqual implements the '!==' operator, which does type JS-style coertion. func NotHardEqual(a ...interface{}) bool { return !HardEqual(a[0], a[1]) } // More implements the '>' operator with JS-style type coertion. func More(a float64, b float64) bool { return LessEqual(b, a) } // MoreEqual implements the '>=' operator with JS-style type coertion. func MoreEqual(a string, b string) bool { return Less(cast.ToFloat64(b), cast.ToFloat64(a)) || SoftEqual(a, b) } // Less implements the '<' operator however checks against 3 values to test that one value is between but not equal to two others. func LessBetween(a float64, b float64, c float64) bool { leftCheck := Less(a, b) rightCheck := Less(b, c) if leftCheck && rightCheck { return true } return false } // Less implements the '<' operator with JS-style type coertion. func Less(a float64, b float64) bool { return a < b } // Less implements the '<' operator however checks against 3 values to test that one value is between two others. func LessEqualBetween(a float64, b float64, c float64) bool { leftCheck := LessEqual(a, b) rightCheck := LessEqual(b, c) if leftCheck && rightCheck { return true } return false } // LessEqual implements the '<=' operator with JS-style type coertion. func LessEqual(a float64, b float64) bool { return a <= b } // NotTruthy implements the '!' operator with JS-style type coertion. func NotTruthy(a interface{}) bool { return !Truthy(a) } // Truthy implements the '!!' operator with JS-style type coertion. func Truthy(a interface{}) bool { valid, length := isArray(a) if valid && length == 0 { return true } return cast.ToBool(a) } // Percentage implements the '%' operator, which does type JS-style coertion. Returns float64. func Percentage(a int, b int) float64 { return percent.PercentOf(a, b) } // And implements the 'and' conditional requiring all bubbled up bools to be true. func And(values []interface{}) bool { result := true for _, res := range values { if res == false { result = false } } return result } // Or implements the 'or' conditional requiring at least one of the bubbled up bools to be true. func Or(values []interface{}) bool { result := false for _, res := range values { if res == true { result = true } } return result } // If implements the 'if' conditional where if the first value is true, the second value is returned, otherwise the third. // func If(conditional interface{}, success interface{}, fail interface{}) interface{} { func If(conditions []interface{}) interface{} { var result interface{} lastElement := conditions[len(conditions)-1] isTrue := false for i := 0; i < len(conditions); i++ { if (i + 1) < len(conditions) { value := conditions[i+1] if cast.ToBool(conditions[i]) { result = value isTrue = true } } i++ } if isTrue { return result } return lastElement } // Var implements the 'var' operator, which grabs value from passed data and has a fallback. func Var(rules interface{}, fallback interface{}, data string) (value interface{}) { ruleType := GetType(rules) rule := "" switch ruleType { case 1: case 2: rule = "[" + cast.ToString(rules) + "]" default: rule = cast.ToString(rules) } if cast.ToString(rules) == "" { dataValue, dataType, _, _ := jsonparser.Get([]byte(data)) if dataType != jsonparser.NotExist { value = TranslateType(dataValue, dataType) } } else { key := strings.Split(rule, ".") dataValue, dataType, _, _ := jsonparser.Get([]byte(data), key...) value = TranslateType(dataValue, dataType) if value == nil { value = fallback } } if value == "" { value = data } return value } // GetType returns an int to map against type so we can see if we are dealing with a specific type of data or an object operation. func GetType(a interface{}) int { switch a.(type) { case int: return 1 case float64: return 2 case string: return 3 case bool: return 4 default: // It could be an object or array fmt.Println("Don't know what this is") return 0 } } // TranslateType Takes the returned dataType from jsonparser along with it's returned []byte data and returns the casted value. func TranslateType(data []byte, dataType jsonparser.ValueType) interface{} { switch dataType { case jsonparser.String: return string(data) case jsonparser.Number: numberString := cast.ToString(data) numberFloat := cast.ToFloat64(numberString) return numberFloat case jsonparser.Boolean: return string(data) case jsonparser.Null: return string(data) } return nil } // isArray is a simple function to determine if passed args is of type array. func isArray(args interface{}) (valid bool, length int) { val := reflect.ValueOf(args) if val.Kind() == reflect.Array { return true, val.Len() } else if val.Kind() == reflect.Slice { return true, val.Len() } else { return false, 0 } }
jsonlogic.go
0.546133
0.414217
jsonlogic.go
starcoder
one another.*/ package eq // Generic returns true if two arrays are the same type and have the same values // and false otherwise. Only []byte, []string, []uint32, []uint64, []float32, // []float64, [][3]float32, [][3]float64. func Generic(x, y interface{}) bool { switch xx := x.(type) { case []byte: yy, ok := y.([]byte) if !ok { return false } return Bytes(xx, yy) case []int: yy, ok := y.([]int) if !ok { return false } return Ints(xx, yy) case []string: yy, ok := y.([]string) if !ok { return false } return Strings(xx, yy) case []float32: yy, ok := y.([]float32) if !ok { return false } return Float32s(xx, yy) case []float64: yy, ok := y.([]float64) if !ok { return false } return Float64s(xx, yy) case []uint32: yy, ok := y.([]uint32) if !ok { return false } return Uint32s(xx, yy) case []uint64: yy, ok := y.([]uint64) if !ok { return false } return Uint64s(xx, yy) case []int32: yy, ok := y.([]int32) if !ok { return false } return Int32s(xx, yy) case []int64: yy, ok := y.([]int64) if !ok { return false } return Int64s(xx, yy) case [][3]float32: yy, ok := y.([][3]float32) if !ok { return false } return Vec32s(xx, yy) case [][3]float64: yy, ok := y.([][3]float64) if !ok { return false } return Vec64s(xx, yy) default: return false } return false } // Strings returns true if two []string arrays are the same and false otherwise. func Strings(x, y []string) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Bytes returns true if two []byte arrays are the same and false otherwise. func Bytes(x, y []byte) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Ints returns true if two []int arrays are the same and false otherwise. func Ints(x, y []int) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Uint32s returns true if two []uint32 arrays are the same and false otherwise. func Uint32s(x, y []uint32) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Uint64s returns true if two []uint64 arrays are the same and false otherwise. func Uint64s(x, y []uint64) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Int32s returns true if two []int32 arrays are the same and false otherwise. func Int32s(x, y []int32) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Int64s returns true if two []int64 arrays are the same and false otherwise. func Int64s(x, y []int64) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Float32s returns true if two []float32 arrays are the same and false // otherwise. func Float32s(x, y []float32) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Float64s returns true if two []float64 arrays are the same and false // otherwise. func Float64s(x, y []float64) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Vec32s returns true if two [][3]float32 arrays are the same and false // otherwise. func Vec32s(x, y [][3]float32) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Vec64s returns true if two [][3]float64 arrays are the same and false // otherwise. func Vec64s(x, y [][3]float64) bool { if len(x) != len(y) { return false } for i := range x { if x[i] != y[i] { return false } } return true } // Float32sEps returns true if the two []float32 arrays are within eps of one // another and false otherwise. func Float32sEps(x, y []float32, eps float32) bool { if len(x) != len(y) { return false } for i := range x { if x[i] + eps < y[i] || x[i] - eps > y[i] { return false } } return true } // Float64sEps returns true if the two []float64 arrays are within eps of one // another and false otherwise. func Float64sEps(x, y []float64, eps float64) bool { if len(x) != len(y) { return false } for i := range x { if x[i] + eps < y[i] || x[i] - eps > y[i] { return false } } return true }
lib/eq/eq.go
0.666714
0.455259
eq.go
starcoder
package network_manager // enum NMCapability NMCapability names the numbers in the Capabilities property. Capabilities are positive numbers. They are part of stable API and a certain capability number is guaranteed not to change. // NM_CAPABILITY_TEAM Teams can be managed. This means the team device plugin is loaded. const NM_CAPABILITY_TEAM uint32 = 1 // NM_CAPABILITY_OVS OpenVSwitch can be managed. This means the OVS device plugin is loaded. Since: 1.24 const NM_CAPABILITY_OVS uint32 = 2 // enum NMState NMState values indicate the current overall networking state. // NM_STATE_UNKNOWN Networking state is unknown. This indicates a daemon error that makes it unable to reasonably assess the state. In such event the applications are expected to assume Internet connectivity might be present and not disable controls that require network access. The graphical shells may hide the network accessibility indicator altogether since no meaningful status indication can be provided. const NM_STATE_UNKNOWN uint32 = 0 // NM_STATE_ASLEEP Networking is not enabled, the system is being suspended or resumed from suspend. const NM_STATE_ASLEEP uint32 = 10 // NM_STATE_DISCONNECTED There is no active network connection. The graphical shell should indicate no network connectivity and the applications should not attempt to access the network. const NM_STATE_DISCONNECTED uint32 = 20 // NM_STATE_DISCONNECTING Network connections are being cleaned up. The applications should tear down their network sessions. const NM_STATE_DISCONNECTING uint32 = 30 // NM_STATE_CONNECTING A network connection is being started The graphical shell should indicate the network is being connected while the applications should still make no attempts to connect the network. const NM_STATE_CONNECTING uint32 = 40 // NM_STATE_CONNECTED_LOCAL There is only local IPv4 and/or IPv6 connectivity, but no default route to access the Internet. The graphical shell should indicate no network connectivity. const NM_STATE_CONNECTED_LOCAL uint32 = 50 // NM_STATE_CONNECTED_SITE There is only site-wide IPv4 and/or IPv6 connectivity. This means a default route is available, but the Internet connectivity check (see &#34;Connectivity&#34; property) did not succeed. The graphical shell should indicate limited network connectivity. const NM_STATE_CONNECTED_SITE uint32 = 60 // NM_STATE_CONNECTED_GLOBAL There is global IPv4 and/or IPv6 Internet connectivity This means the Internet connectivity check succeeded, the graphical shell should indicate full network connectivity. const NM_STATE_CONNECTED_GLOBAL uint32 = 70 // enum NMConnectivityState // NM_CONNECTIVITY_UNKNOWN Network connectivity is unknown. This means the connectivity checks are disabled (e.g. on server installations) or has not run yet. The graphical shell should assume the Internet connection might be available and not present a captive portal window. const NM_CONNECTIVITY_UNKNOWN uint32 = 0 // NM_CONNECTIVITY_NONE The host is not connected to any network. There&#39;s no active connection that contains a default route to the internet and thus it makes no sense to even attempt a connectivity check. The graphical shell should use this state to indicate the network connection is unavailable. const NM_CONNECTIVITY_NONE uint32 = 1 // NM_CONNECTIVITY_PORTAL The Internet connection is hijacked by a captive portal gateway. The graphical shell may open a sandboxed web browser window (because the captive portals typically attempt a man-in-the-middle attacks against the https connections) for the purpose of authenticating to a gateway and retrigger the connectivity check with CheckConnectivity() when the browser window is dismissed. const NM_CONNECTIVITY_PORTAL uint32 = 2 // NM_CONNECTIVITY_LIMITED The host is connected to a network, does not appear to be able to reach the full Internet, but a captive portal has not been detected. const NM_CONNECTIVITY_LIMITED uint32 = 3 // NM_CONNECTIVITY_FULL The host is connected to a network, and appears to be able to reach the full Internet. const NM_CONNECTIVITY_FULL uint32 = 4 // enum NMDeviceType NMDeviceType values indicate the type of hardware represented by a device object. // NM_DEVICE_TYPE_UNKNOWN unknown device const NM_DEVICE_TYPE_UNKNOWN uint32 = 0 // NM_DEVICE_TYPE_GENERIC generic support for unrecognized device types const NM_DEVICE_TYPE_GENERIC uint32 = 14 // NM_DEVICE_TYPE_ETHERNET a wired ethernet device const NM_DEVICE_TYPE_ETHERNET uint32 = 1 // NM_DEVICE_TYPE_WIFI an 802.11 Wi-Fi device const NM_DEVICE_TYPE_WIFI uint32 = 2 // NM_DEVICE_TYPE_UNUSED1 not used const NM_DEVICE_TYPE_UNUSED1 uint32 = 3 // NM_DEVICE_TYPE_UNUSED2 not used const NM_DEVICE_TYPE_UNUSED2 uint32 = 4 // NM_DEVICE_TYPE_BT a Bluetooth device supporting PAN or DUN access protocols const NM_DEVICE_TYPE_BT uint32 = 5 // NM_DEVICE_TYPE_OLPC_MESH an OLPC XO mesh networking device const NM_DEVICE_TYPE_OLPC_MESH uint32 = 6 // NM_DEVICE_TYPE_WIMAX an 802.16e Mobile WiMAX broadband device const NM_DEVICE_TYPE_WIMAX uint32 = 7 // NM_DEVICE_TYPE_MODEM a modem supporting analog telephone, CDMA/EVDO, GSM/UMTS, or LTE network access protocols const NM_DEVICE_TYPE_MODEM uint32 = 8 // NM_DEVICE_TYPE_INFINIBAND an IP-over-InfiniBand device const NM_DEVICE_TYPE_INFINIBAND uint32 = 9 // NM_DEVICE_TYPE_BOND a bond master interface const NM_DEVICE_TYPE_BOND uint32 = 10 // NM_DEVICE_TYPE_VLAN an 802.1Q VLAN interface const NM_DEVICE_TYPE_VLAN uint32 = 11 // NM_DEVICE_TYPE_ADSL ADSL modem const NM_DEVICE_TYPE_ADSL uint32 = 12 // NM_DEVICE_TYPE_BRIDGE a bridge master interface const NM_DEVICE_TYPE_BRIDGE uint32 = 13 // NM_DEVICE_TYPE_TEAM a team master interface const NM_DEVICE_TYPE_TEAM uint32 = 15 // NM_DEVICE_TYPE_TUN a TUN or TAP interface const NM_DEVICE_TYPE_TUN uint32 = 16 // NM_DEVICE_TYPE_IP_TUNNEL a IP tunnel interface const NM_DEVICE_TYPE_IP_TUNNEL uint32 = 17 // NM_DEVICE_TYPE_MACVLAN a MACVLAN interface const NM_DEVICE_TYPE_MACVLAN uint32 = 18 // NM_DEVICE_TYPE_VXLAN a VXLAN interface const NM_DEVICE_TYPE_VXLAN uint32 = 19 // NM_DEVICE_TYPE_VETH a VETH interface const NM_DEVICE_TYPE_VETH uint32 = 20 // NM_DEVICE_TYPE_MACSEC a MACsec interface const NM_DEVICE_TYPE_MACSEC uint32 = 21 // NM_DEVICE_TYPE_DUMMY a dummy interface const NM_DEVICE_TYPE_DUMMY uint32 = 22 // NM_DEVICE_TYPE_PPP a PPP interface const NM_DEVICE_TYPE_PPP uint32 = 23 // NM_DEVICE_TYPE_OVS_INTERFACE a Open vSwitch interface const NM_DEVICE_TYPE_OVS_INTERFACE uint32 = 24 // NM_DEVICE_TYPE_OVS_PORT a Open vSwitch port const NM_DEVICE_TYPE_OVS_PORT uint32 = 25 // NM_DEVICE_TYPE_OVS_BRIDGE a Open vSwitch bridge const NM_DEVICE_TYPE_OVS_BRIDGE uint32 = 26 // NM_DEVICE_TYPE_WPAN a IEEE 802.15.4 (WPAN) MAC Layer Device const NM_DEVICE_TYPE_WPAN uint32 = 27 // NM_DEVICE_TYPE_6LOWPAN 6LoWPAN interface const NM_DEVICE_TYPE_6LOWPAN uint32 = 28 // NM_DEVICE_TYPE_WIREGUARD a WireGuard interface const NM_DEVICE_TYPE_WIREGUARD uint32 = 29 // NM_DEVICE_TYPE_WIFI_P2P an 802.11 Wi-Fi P2P device (Since: 1.16) const NM_DEVICE_TYPE_WIFI_P2P uint32 = 30 // NM_DEVICE_TYPE_VRF A VRF (Virtual Routing and Forwarding) interface (Since: 1.24) const NM_DEVICE_TYPE_VRF uint32 = 31 // enum NMDeviceCapabilities General device capability flags. // NM_DEVICE_CAP_NONE device has no special capabilities const NM_DEVICE_CAP_NONE uint32 = 0x00000000 // NM_DEVICE_CAP_NM_SUPPORTED NetworkManager supports this device const NM_DEVICE_CAP_NM_SUPPORTED uint32 = 0x00000001 // NM_DEVICE_CAP_CARRIER_DETECT this device can indicate carrier status const NM_DEVICE_CAP_CARRIER_DETECT uint32 = 0x00000002 // NM_DEVICE_CAP_IS_SOFTWARE this device is a software device const NM_DEVICE_CAP_IS_SOFTWARE uint32 = 0x00000004 // NM_DEVICE_CAP_SRIOV this device supports single-root I/O virtualization const NM_DEVICE_CAP_SRIOV uint32 = 0x00000008 // enum NMDeviceWifiCapabilities 802.11 specific device encryption and authentication capabilities. // NM_WIFI_DEVICE_CAP_NONE device has no encryption/authentication capabilities const NM_WIFI_DEVICE_CAP_NONE uint32 = 0x00000000 // NM_WIFI_DEVICE_CAP_CIPHER_WEP40 device supports 40/64-bit WEP encryption const NM_WIFI_DEVICE_CAP_CIPHER_WEP40 uint32 = 0x00000001 // NM_WIFI_DEVICE_CAP_CIPHER_WEP104 device supports 104/128-bit WEP encryption const NM_WIFI_DEVICE_CAP_CIPHER_WEP104 uint32 = 0x00000002 // NM_WIFI_DEVICE_CAP_CIPHER_TKIP device supports TKIP encryption const NM_WIFI_DEVICE_CAP_CIPHER_TKIP uint32 = 0x00000004 // NM_WIFI_DEVICE_CAP_CIPHER_CCMP device supports AES/CCMP encryption const NM_WIFI_DEVICE_CAP_CIPHER_CCMP uint32 = 0x00000008 // NM_WIFI_DEVICE_CAP_WPA device supports WPA1 authentication const NM_WIFI_DEVICE_CAP_WPA uint32 = 0x00000010 // NM_WIFI_DEVICE_CAP_RSN device supports WPA2/RSN authentication const NM_WIFI_DEVICE_CAP_RSN uint32 = 0x00000020 // NM_WIFI_DEVICE_CAP_AP device supports Access Point mode const NM_WIFI_DEVICE_CAP_AP uint32 = 0x00000040 // NM_WIFI_DEVICE_CAP_ADHOC device supports Ad-Hoc mode const NM_WIFI_DEVICE_CAP_ADHOC uint32 = 0x00000080 // NM_WIFI_DEVICE_CAP_FREQ_VALID device reports frequency capabilities const NM_WIFI_DEVICE_CAP_FREQ_VALID uint32 = 0x00000100 // NM_WIFI_DEVICE_CAP_FREQ_2GHZ device supports 2.4GHz frequencies const NM_WIFI_DEVICE_CAP_FREQ_2GHZ uint32 = 0x00000200 // NM_WIFI_DEVICE_CAP_FREQ_5GHZ device supports 5GHz frequencies const NM_WIFI_DEVICE_CAP_FREQ_5GHZ uint32 = 0x00000400 // NM_WIFI_DEVICE_CAP_MESH device supports acting as a mesh point. Since: 1.20. const NM_WIFI_DEVICE_CAP_MESH uint32 = 0x00001000 // NM_WIFI_DEVICE_CAP_IBSS_RSN device supports WPA2/RSN in an IBSS network. Since: 1.22. const NM_WIFI_DEVICE_CAP_IBSS_RSN uint32 = 0x00002000 // enum NM80211ApFlags 802.11 access point flags. // NM_802_11_AP_FLAGS_NONE access point has no special capabilities const NM_802_11_AP_FLAGS_NONE uint32 = 0x00000000 // NM_802_11_AP_FLAGS_PRIVACY access point requires authentication and encryption (usually means WEP) const NM_802_11_AP_FLAGS_PRIVACY uint32 = 0x00000001 // NM_802_11_AP_FLAGS_WPS access point supports some WPS method const NM_802_11_AP_FLAGS_WPS uint32 = 0x00000002 // NM_802_11_AP_FLAGS_WPS_PBC access point supports push-button WPS const NM_802_11_AP_FLAGS_WPS_PBC uint32 = 0x00000004 // NM_802_11_AP_FLAGS_WPS_PIN access point supports PIN-based WPS const NM_802_11_AP_FLAGS_WPS_PIN uint32 = 0x00000008 // enum NM80211ApSecurityFlags 802.11 access point security and authentication flags. These flags describe the current security requirements of an access point as determined from the access point&#39;s beacon. // NM_802_11_AP_SEC_NONE the access point has no special security requirements const NM_802_11_AP_SEC_NONE uint32 = 0x00000000 // NM_802_11_AP_SEC_PAIR_WEP40 40/64-bit WEP is supported for pairwise/unicast encryption const NM_802_11_AP_SEC_PAIR_WEP40 uint32 = 0x00000001 // NM_802_11_AP_SEC_PAIR_WEP104 104/128-bit WEP is supported for pairwise/unicast encryption const NM_802_11_AP_SEC_PAIR_WEP104 uint32 = 0x00000002 // NM_802_11_AP_SEC_PAIR_TKIP TKIP is supported for pairwise/unicast encryption const NM_802_11_AP_SEC_PAIR_TKIP uint32 = 0x00000004 // NM_802_11_AP_SEC_PAIR_CCMP AES/CCMP is supported for pairwise/unicast encryption const NM_802_11_AP_SEC_PAIR_CCMP uint32 = 0x00000008 // NM_802_11_AP_SEC_GROUP_WEP40 40/64-bit WEP is supported for group/broadcast encryption const NM_802_11_AP_SEC_GROUP_WEP40 uint32 = 0x00000010 // NM_802_11_AP_SEC_GROUP_WEP104 104/128-bit WEP is supported for group/broadcast encryption const NM_802_11_AP_SEC_GROUP_WEP104 uint32 = 0x00000020 // NM_802_11_AP_SEC_GROUP_TKIP TKIP is supported for group/broadcast encryption const NM_802_11_AP_SEC_GROUP_TKIP uint32 = 0x00000040 // NM_802_11_AP_SEC_GROUP_CCMP AES/CCMP is supported for group/broadcast encryption const NM_802_11_AP_SEC_GROUP_CCMP uint32 = 0x00000080 // NM_802_11_AP_SEC_KEY_MGMT_PSK WPA/RSN Pre-Shared Key encryption is supported const NM_802_11_AP_SEC_KEY_MGMT_PSK uint32 = 0x00000100 // NM_802_11_AP_SEC_KEY_MGMT_802_1X 802.1x authentication and key management is supported const NM_802_11_AP_SEC_KEY_MGMT_802_1X uint32 = 0x00000200 // NM_802_11_AP_SEC_KEY_MGMT_SAE WPA/RSN Simultaneous Authentication of Equals is supported const NM_802_11_AP_SEC_KEY_MGMT_SAE uint32 = 0x00000400 // NM_802_11_AP_SEC_KEY_MGMT_OWE WPA/RSN Opportunistic Wireless Encryption is supported const NM_802_11_AP_SEC_KEY_MGMT_OWE uint32 = 0x00000800 // NM_802_11_AP_SEC_KEY_MGMT_OWE_TM WPA/RSN Opportunistic Wireless Encryption transition mode is supported. Since: 1.26. const NM_802_11_AP_SEC_KEY_MGMT_OWE_TM uint32 = 0x00001000 // enum NM80211Mode Indicates the 802.11 mode an access point or device is currently in. // NM_802_11_MODE_UNKNOWN the device or access point mode is unknown const NM_802_11_MODE_UNKNOWN uint32 = 0 // NM_802_11_MODE_ADHOC for both devices and access point objects, indicates the object is part of an Ad-Hoc 802.11 network without a central coordinating access point. const NM_802_11_MODE_ADHOC uint32 = 1 // NM_802_11_MODE_INFRA the device or access point is in infrastructure mode. For devices, this indicates the device is an 802.11 client/station. For access point objects, this indicates the object is an access point that provides connectivity to clients. const NM_802_11_MODE_INFRA uint32 = 2 // NM_802_11_MODE_AP the device is an access point/hotspot. Not valid for access point objects; used only for hotspot mode on the local machine. const NM_802_11_MODE_AP uint32 = 3 // NM_802_11_MODE_MESH the device is a 802.11s mesh point. Since: 1.20. const NM_802_11_MODE_MESH uint32 = 4 // enum NMBluetoothCapabilities NMBluetoothCapabilities values indicate the usable capabilities of a Bluetooth device. // NM_BT_CAPABILITY_NONE device has no usable capabilities const NM_BT_CAPABILITY_NONE uint32 = 0x00000000 // NM_BT_CAPABILITY_DUN device provides Dial-Up Networking capability const NM_BT_CAPABILITY_DUN uint32 = 0x00000001 // NM_BT_CAPABILITY_NAP device provides Network Access Point capability const NM_BT_CAPABILITY_NAP uint32 = 0x00000002 // enum NMDeviceModemCapabilities NMDeviceModemCapabilities values indicate the generic radio access technology families a modem device supports. For more information on the specific access technologies the device supports use the ModemManager D-Bus API. // NM_DEVICE_MODEM_CAPABILITY_NONE modem has no usable capabilities const NM_DEVICE_MODEM_CAPABILITY_NONE uint32 = 0x00000000 // NM_DEVICE_MODEM_CAPABILITY_POTS modem uses the analog wired telephone network and is not a wireless/cellular device const NM_DEVICE_MODEM_CAPABILITY_POTS uint32 = 0x00000001 // NM_DEVICE_MODEM_CAPABILITY_CDMA_EVDO modem supports at least one of CDMA 1xRTT, EVDO revision 0, EVDO revision A, or EVDO revision B const NM_DEVICE_MODEM_CAPABILITY_CDMA_EVDO uint32 = 0x00000002 // NM_DEVICE_MODEM_CAPABILITY_GSM_UMTS modem supports at least one of GSM, GPRS, EDGE, UMTS, HSDPA, HSUPA, or HSPA&#43; packet switched data capability const NM_DEVICE_MODEM_CAPABILITY_GSM_UMTS uint32 = 0x00000004 // NM_DEVICE_MODEM_CAPABILITY_LTE modem has LTE data capability const NM_DEVICE_MODEM_CAPABILITY_LTE uint32 = 0x00000008 // enum NMWimaxNspNetworkType WiMAX network type. // NM_WIMAX_NSP_NETWORK_TYPE_UNKNOWN unknown network type const NM_WIMAX_NSP_NETWORK_TYPE_UNKNOWN uint32 = 0 // NM_WIMAX_NSP_NETWORK_TYPE_HOME home network const NM_WIMAX_NSP_NETWORK_TYPE_HOME uint32 = 1 // NM_WIMAX_NSP_NETWORK_TYPE_PARTNER partner network const NM_WIMAX_NSP_NETWORK_TYPE_PARTNER uint32 = 2 // NM_WIMAX_NSP_NETWORK_TYPE_ROAMING_PARTNER roaming partner network const NM_WIMAX_NSP_NETWORK_TYPE_ROAMING_PARTNER uint32 = 3 // enum NMDeviceState // NM_DEVICE_STATE_UNKNOWN the device&#39;s state is unknown const NM_DEVICE_STATE_UNKNOWN uint32 = 0 // NM_DEVICE_STATE_UNMANAGED the device is recognized, but not managed by NetworkManager const NM_DEVICE_STATE_UNMANAGED uint32 = 10 // NM_DEVICE_STATE_UNAVAILABLE the device is managed by NetworkManager, but is not available for use. Reasons may include the wireless switched off, missing firmware, no ethernet carrier, missing supplicant or modem manager, etc. const NM_DEVICE_STATE_UNAVAILABLE uint32 = 20 // NM_DEVICE_STATE_DISCONNECTED the device can be activated, but is currently idle and not connected to a network. const NM_DEVICE_STATE_DISCONNECTED uint32 = 30 // NM_DEVICE_STATE_PREPARE the device is preparing the connection to the network. This may include operations like changing the MAC address, setting physical link properties, and anything else required to connect to the requested network. const NM_DEVICE_STATE_PREPARE uint32 = 40 // NM_DEVICE_STATE_CONFIG the device is connecting to the requested network. This may include operations like associating with the Wi-Fi AP, dialing the modem, connecting to the remote Bluetooth device, etc. const NM_DEVICE_STATE_CONFIG uint32 = 50 // NM_DEVICE_STATE_NEED_AUTH the device requires more information to continue connecting to the requested network. This includes secrets like WiFi passphrases, login passwords, PIN codes, etc. const NM_DEVICE_STATE_NEED_AUTH uint32 = 60 // NM_DEVICE_STATE_IP_CONFIG the device is requesting IPv4 and/or IPv6 addresses and routing information from the network. const NM_DEVICE_STATE_IP_CONFIG uint32 = 70 // NM_DEVICE_STATE_IP_CHECK the device is checking whether further action is required for the requested network connection. This may include checking whether only local network access is available, whether a captive portal is blocking access to the Internet, etc. const NM_DEVICE_STATE_IP_CHECK uint32 = 80 // NM_DEVICE_STATE_SECONDARIES the device is waiting for a secondary connection (like a VPN) which must activated before the device can be activated const NM_DEVICE_STATE_SECONDARIES uint32 = 90 // NM_DEVICE_STATE_ACTIVATED the device has a network connection, either local or global. const NM_DEVICE_STATE_ACTIVATED uint32 = 100 // NM_DEVICE_STATE_DEACTIVATING a disconnection from the current network connection was requested, and the device is cleaning up resources used for that connection. The network connection may still be valid. const NM_DEVICE_STATE_DEACTIVATING uint32 = 110 // NM_DEVICE_STATE_FAILED the device failed to connect to the requested network and is cleaning up the connection request const NM_DEVICE_STATE_FAILED uint32 = 120 // enum NMDeviceStateReason Device state change reason codes // NM_DEVICE_STATE_REASON_NONE No reason given const NM_DEVICE_STATE_REASON_NONE uint32 = 0 // NM_DEVICE_STATE_REASON_UNKNOWN Unknown error const NM_DEVICE_STATE_REASON_UNKNOWN uint32 = 1 // NM_DEVICE_STATE_REASON_NOW_MANAGED Device is now managed const NM_DEVICE_STATE_REASON_NOW_MANAGED uint32 = 2 // NM_DEVICE_STATE_REASON_NOW_UNMANAGED Device is now unmanaged const NM_DEVICE_STATE_REASON_NOW_UNMANAGED uint32 = 3 // NM_DEVICE_STATE_REASON_CONFIG_FAILED The device could not be readied for configuration const NM_DEVICE_STATE_REASON_CONFIG_FAILED uint32 = 4 // NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE IP configuration could not be reserved (no available address, timeout, etc) const NM_DEVICE_STATE_REASON_IP_CONFIG_UNAVAILABLE uint32 = 5 // NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED The IP config is no longer valid const NM_DEVICE_STATE_REASON_IP_CONFIG_EXPIRED uint32 = 6 // NM_DEVICE_STATE_REASON_NO_SECRETS Secrets were required, but not provided const NM_DEVICE_STATE_REASON_NO_SECRETS uint32 = 7 // NM_DEVICE_STATE_REASON_SUPPLICANT_DISCONNECT 802.1x supplicant disconnected const NM_DEVICE_STATE_REASON_SUPPLICANT_DISCONNECT uint32 = 8 // NM_DEVICE_STATE_REASON_SUPPLICANT_CONFIG_FAILED 802.1x supplicant configuration failed const NM_DEVICE_STATE_REASON_SUPPLICANT_CONFIG_FAILED uint32 = 9 // NM_DEVICE_STATE_REASON_SUPPLICANT_FAILED 802.1x supplicant failed const NM_DEVICE_STATE_REASON_SUPPLICANT_FAILED uint32 = 10 // NM_DEVICE_STATE_REASON_SUPPLICANT_TIMEOUT 802.1x supplicant took too long to authenticate const NM_DEVICE_STATE_REASON_SUPPLICANT_TIMEOUT uint32 = 11 // NM_DEVICE_STATE_REASON_PPP_START_FAILED PPP service failed to start const NM_DEVICE_STATE_REASON_PPP_START_FAILED uint32 = 12 // NM_DEVICE_STATE_REASON_PPP_DISCONNECT PPP service disconnected const NM_DEVICE_STATE_REASON_PPP_DISCONNECT uint32 = 13 // NM_DEVICE_STATE_REASON_PPP_FAILED PPP failed const NM_DEVICE_STATE_REASON_PPP_FAILED uint32 = 14 // NM_DEVICE_STATE_REASON_DHCP_START_FAILED DHCP client failed to start const NM_DEVICE_STATE_REASON_DHCP_START_FAILED uint32 = 15 // NM_DEVICE_STATE_REASON_DHCP_ERROR DHCP client error const NM_DEVICE_STATE_REASON_DHCP_ERROR uint32 = 16 // NM_DEVICE_STATE_REASON_DHCP_FAILED DHCP client failed const NM_DEVICE_STATE_REASON_DHCP_FAILED uint32 = 17 // NM_DEVICE_STATE_REASON_SHARED_START_FAILED Shared connection service failed to start const NM_DEVICE_STATE_REASON_SHARED_START_FAILED uint32 = 18 // NM_DEVICE_STATE_REASON_SHARED_FAILED Shared connection service failed const NM_DEVICE_STATE_REASON_SHARED_FAILED uint32 = 19 // NM_DEVICE_STATE_REASON_AUTOIP_START_FAILED AutoIP service failed to start const NM_DEVICE_STATE_REASON_AUTOIP_START_FAILED uint32 = 20 // NM_DEVICE_STATE_REASON_AUTOIP_ERROR AutoIP service error const NM_DEVICE_STATE_REASON_AUTOIP_ERROR uint32 = 21 // NM_DEVICE_STATE_REASON_AUTOIP_FAILED AutoIP service failed const NM_DEVICE_STATE_REASON_AUTOIP_FAILED uint32 = 22 // NM_DEVICE_STATE_REASON_MODEM_BUSY The line is busy const NM_DEVICE_STATE_REASON_MODEM_BUSY uint32 = 23 // NM_DEVICE_STATE_REASON_MODEM_NO_DIAL_TONE No dial tone const NM_DEVICE_STATE_REASON_MODEM_NO_DIAL_TONE uint32 = 24 // NM_DEVICE_STATE_REASON_MODEM_NO_CARRIER No carrier could be established const NM_DEVICE_STATE_REASON_MODEM_NO_CARRIER uint32 = 25 // NM_DEVICE_STATE_REASON_MODEM_DIAL_TIMEOUT The dialing request timed out const NM_DEVICE_STATE_REASON_MODEM_DIAL_TIMEOUT uint32 = 26 // NM_DEVICE_STATE_REASON_MODEM_DIAL_FAILED The dialing attempt failed const NM_DEVICE_STATE_REASON_MODEM_DIAL_FAILED uint32 = 27 // NM_DEVICE_STATE_REASON_MODEM_INIT_FAILED Modem initialization failed const NM_DEVICE_STATE_REASON_MODEM_INIT_FAILED uint32 = 28 // NM_DEVICE_STATE_REASON_GSM_APN_FAILED Failed to select the specified APN const NM_DEVICE_STATE_REASON_GSM_APN_FAILED uint32 = 29 // NM_DEVICE_STATE_REASON_GSM_REGISTRATION_NOT_SEARCHING Not searching for networks const NM_DEVICE_STATE_REASON_GSM_REGISTRATION_NOT_SEARCHING uint32 = 30 // NM_DEVICE_STATE_REASON_GSM_REGISTRATION_DENIED Network registration denied const NM_DEVICE_STATE_REASON_GSM_REGISTRATION_DENIED uint32 = 31 // NM_DEVICE_STATE_REASON_GSM_REGISTRATION_TIMEOUT Network registration timed out const NM_DEVICE_STATE_REASON_GSM_REGISTRATION_TIMEOUT uint32 = 32 // NM_DEVICE_STATE_REASON_GSM_REGISTRATION_FAILED Failed to register with the requested network const NM_DEVICE_STATE_REASON_GSM_REGISTRATION_FAILED uint32 = 33 // NM_DEVICE_STATE_REASON_GSM_PIN_CHECK_FAILED PIN check failed const NM_DEVICE_STATE_REASON_GSM_PIN_CHECK_FAILED uint32 = 34 // NM_DEVICE_STATE_REASON_FIRMWARE_MISSING Necessary firmware for the device may be missing const NM_DEVICE_STATE_REASON_FIRMWARE_MISSING uint32 = 35 // NM_DEVICE_STATE_REASON_REMOVED The device was removed const NM_DEVICE_STATE_REASON_REMOVED uint32 = 36 // NM_DEVICE_STATE_REASON_SLEEPING NetworkManager went to sleep const NM_DEVICE_STATE_REASON_SLEEPING uint32 = 37 // NM_DEVICE_STATE_REASON_CONNECTION_REMOVED The device&#39;s active connection disappeared const NM_DEVICE_STATE_REASON_CONNECTION_REMOVED uint32 = 38 // NM_DEVICE_STATE_REASON_USER_REQUESTED Device disconnected by user or client const NM_DEVICE_STATE_REASON_USER_REQUESTED uint32 = 39 // NM_DEVICE_STATE_REASON_CARRIER Carrier/link changed const NM_DEVICE_STATE_REASON_CARRIER uint32 = 40 // NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED The device&#39;s existing connection was assumed const NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED uint32 = 41 // NM_DEVICE_STATE_REASON_SUPPLICANT_AVAILABLE The supplicant is now available const NM_DEVICE_STATE_REASON_SUPPLICANT_AVAILABLE uint32 = 42 // NM_DEVICE_STATE_REASON_MODEM_NOT_FOUND The modem could not be found const NM_DEVICE_STATE_REASON_MODEM_NOT_FOUND uint32 = 43 // NM_DEVICE_STATE_REASON_BT_FAILED The Bluetooth connection failed or timed out const NM_DEVICE_STATE_REASON_BT_FAILED uint32 = 44 // NM_DEVICE_STATE_REASON_GSM_SIM_NOT_INSERTED GSM Modem&#39;s SIM Card not inserted const NM_DEVICE_STATE_REASON_GSM_SIM_NOT_INSERTED uint32 = 45 // NM_DEVICE_STATE_REASON_GSM_SIM_PIN_REQUIRED GSM Modem&#39;s SIM Pin required const NM_DEVICE_STATE_REASON_GSM_SIM_PIN_REQUIRED uint32 = 46 // NM_DEVICE_STATE_REASON_GSM_SIM_PUK_REQUIRED GSM Modem&#39;s SIM Puk required const NM_DEVICE_STATE_REASON_GSM_SIM_PUK_REQUIRED uint32 = 47 // NM_DEVICE_STATE_REASON_GSM_SIM_WRONG GSM Modem&#39;s SIM wrong const NM_DEVICE_STATE_REASON_GSM_SIM_WRONG uint32 = 48 // NM_DEVICE_STATE_REASON_INFINIBAND_MODE InfiniBand device does not support connected mode const NM_DEVICE_STATE_REASON_INFINIBAND_MODE uint32 = 49 // NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED A dependency of the connection failed const NM_DEVICE_STATE_REASON_DEPENDENCY_FAILED uint32 = 50 // NM_DEVICE_STATE_REASON_BR2684_FAILED Problem with the RFC 2684 Ethernet over ADSL bridge const NM_DEVICE_STATE_REASON_BR2684_FAILED uint32 = 51 // NM_DEVICE_STATE_REASON_MODEM_MANAGER_UNAVAILABLE ModemManager not running const NM_DEVICE_STATE_REASON_MODEM_MANAGER_UNAVAILABLE uint32 = 52 // NM_DEVICE_STATE_REASON_SSID_NOT_FOUND The Wi-Fi network could not be found const NM_DEVICE_STATE_REASON_SSID_NOT_FOUND uint32 = 53 // NM_DEVICE_STATE_REASON_SECONDARY_CONNECTION_FAILED A secondary connection of the base connection failed const NM_DEVICE_STATE_REASON_SECONDARY_CONNECTION_FAILED uint32 = 54 // NM_DEVICE_STATE_REASON_DCB_FCOE_FAILED DCB or FCoE setup failed const NM_DEVICE_STATE_REASON_DCB_FCOE_FAILED uint32 = 55 // NM_DEVICE_STATE_REASON_TEAMD_CONTROL_FAILED teamd control failed const NM_DEVICE_STATE_REASON_TEAMD_CONTROL_FAILED uint32 = 56 // NM_DEVICE_STATE_REASON_MODEM_FAILED Modem failed or no longer available const NM_DEVICE_STATE_REASON_MODEM_FAILED uint32 = 57 // NM_DEVICE_STATE_REASON_MODEM_AVAILABLE Modem now ready and available const NM_DEVICE_STATE_REASON_MODEM_AVAILABLE uint32 = 58 // NM_DEVICE_STATE_REASON_SIM_PIN_INCORRECT SIM PIN was incorrect const NM_DEVICE_STATE_REASON_SIM_PIN_INCORRECT uint32 = 59 // NM_DEVICE_STATE_REASON_NEW_ACTIVATION New connection activation was enqueued const NM_DEVICE_STATE_REASON_NEW_ACTIVATION uint32 = 60 // NM_DEVICE_STATE_REASON_PARENT_CHANGED the device&#39;s parent changed const NM_DEVICE_STATE_REASON_PARENT_CHANGED uint32 = 61 // NM_DEVICE_STATE_REASON_PARENT_MANAGED_CHANGED the device parent&#39;s management changed const NM_DEVICE_STATE_REASON_PARENT_MANAGED_CHANGED uint32 = 62 // NM_DEVICE_STATE_REASON_OVSDB_FAILED problem communicating with Open vSwitch database const NM_DEVICE_STATE_REASON_OVSDB_FAILED uint32 = 63 // NM_DEVICE_STATE_REASON_IP_ADDRESS_DUPLICATE a duplicate IP address was detected const NM_DEVICE_STATE_REASON_IP_ADDRESS_DUPLICATE uint32 = 64 // NM_DEVICE_STATE_REASON_IP_METHOD_UNSUPPORTED The selected IP method is not supported const NM_DEVICE_STATE_REASON_IP_METHOD_UNSUPPORTED uint32 = 65 // NM_DEVICE_STATE_REASON_SRIOV_CONFIGURATION_FAILED configuration of SR-IOV parameters failed const NM_DEVICE_STATE_REASON_SRIOV_CONFIGURATION_FAILED uint32 = 66 // NM_DEVICE_STATE_REASON_PEER_NOT_FOUND The Wi-Fi P2P peer could not be found const NM_DEVICE_STATE_REASON_PEER_NOT_FOUND uint32 = 67 // enum NMMetered The NMMetered enum has two different purposes: one is to configure &#34;connection.metered&#34; setting of a connection profile in NMSettingConnection, and the other is to express the actual metered state of the NMDevice at a given moment. // NM_METERED_UNKNOWN The metered status is unknown const NM_METERED_UNKNOWN uint32 = 0 // NM_METERED_YES Metered, the value was explicitly configured const NM_METERED_YES uint32 = 1 // NM_METERED_NO Not metered, the value was explicitly configured const NM_METERED_NO uint32 = 2 // NM_METERED_GUESS_YES Metered, the value was guessed const NM_METERED_GUESS_YES uint32 = 3 // NM_METERED_GUESS_NO Not metered, the value was guessed const NM_METERED_GUESS_NO uint32 = 4 // enum NMConnectionMultiConnect Since: 1.14 // NM_CONNECTION_MULTI_CONNECT_DEFAULT indicates that the per-connection setting is unspecified. In this case, it will fallback to the default value, which is %NM_CONNECTION_MULTI_CONNECT_SINGLE. const NM_CONNECTION_MULTI_CONNECT_DEFAULT uint32 = 0 // NM_CONNECTION_MULTI_CONNECT_SINGLE the connection profile can only be active once at each moment. Activating a profile that is already active, will first deactivate it. const NM_CONNECTION_MULTI_CONNECT_SINGLE uint32 = 1 // NM_CONNECTION_MULTI_CONNECT_MANUAL_MULTIPLE the profile can be manually activated multiple times on different devices. However, regarding autoconnect, the profile will autoconnect only if it is currently not connected otherwise. const NM_CONNECTION_MULTI_CONNECT_MANUAL_MULTIPLE uint32 = 2 // NM_CONNECTION_MULTI_CONNECT_MULTIPLE the profile can autoactivate and be manually activated multiple times together. const NM_CONNECTION_MULTI_CONNECT_MULTIPLE uint32 = 3 // enum NMActiveConnectionState NMActiveConnectionState values indicate the state of a connection to a specific network while it is starting, connected, or disconnecting from that network. // NM_ACTIVE_CONNECTION_STATE_UNKNOWN the state of the connection is unknown const NM_ACTIVE_CONNECTION_STATE_UNKNOWN uint32 = 0 // NM_ACTIVE_CONNECTION_STATE_ACTIVATING a network connection is being prepared const NM_ACTIVE_CONNECTION_STATE_ACTIVATING uint32 = 1 // NM_ACTIVE_CONNECTION_STATE_ACTIVATED there is a connection to the network const NM_ACTIVE_CONNECTION_STATE_ACTIVATED uint32 = 2 // NM_ACTIVE_CONNECTION_STATE_DEACTIVATING the network connection is being torn down and cleaned up const NM_ACTIVE_CONNECTION_STATE_DEACTIVATING uint32 = 3 // NM_ACTIVE_CONNECTION_STATE_DEACTIVATED the network connection is disconnected and will be removed const NM_ACTIVE_CONNECTION_STATE_DEACTIVATED uint32 = 4 // enum NMActiveConnectionStateReason Active connection state reasons. // NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN The reason for the active connection state change is unknown. const NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN uint32 = 0 // NM_ACTIVE_CONNECTION_STATE_REASON_NONE No reason was given for the active connection state change. const NM_ACTIVE_CONNECTION_STATE_REASON_NONE uint32 = 1 // NM_ACTIVE_CONNECTION_STATE_REASON_USER_DISCONNECTED The active connection changed state because the user disconnected it. const NM_ACTIVE_CONNECTION_STATE_REASON_USER_DISCONNECTED uint32 = 2 // NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_DISCONNECTED The active connection changed state because the device it was using was disconnected. const NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_DISCONNECTED uint32 = 3 // NM_ACTIVE_CONNECTION_STATE_REASON_SERVICE_STOPPED The service providing the VPN connection was stopped. const NM_ACTIVE_CONNECTION_STATE_REASON_SERVICE_STOPPED uint32 = 4 // NM_ACTIVE_CONNECTION_STATE_REASON_IP_CONFIG_INVALID The IP config of the active connection was invalid. const NM_ACTIVE_CONNECTION_STATE_REASON_IP_CONFIG_INVALID uint32 = 5 // NM_ACTIVE_CONNECTION_STATE_REASON_CONNECT_TIMEOUT The connection attempt to the VPN service timed out. const NM_ACTIVE_CONNECTION_STATE_REASON_CONNECT_TIMEOUT uint32 = 6 // NM_ACTIVE_CONNECTION_STATE_REASON_SERVICE_START_TIMEOUT A timeout occurred while starting the service providing the VPN connection. const NM_ACTIVE_CONNECTION_STATE_REASON_SERVICE_START_TIMEOUT uint32 = 7 // NM_ACTIVE_CONNECTION_STATE_REASON_SERVICE_START_FAILED Starting the service providing the VPN connection failed. const NM_ACTIVE_CONNECTION_STATE_REASON_SERVICE_START_FAILED uint32 = 8 // NM_ACTIVE_CONNECTION_STATE_REASON_NO_SECRETS Necessary secrets for the connection were not provided. const NM_ACTIVE_CONNECTION_STATE_REASON_NO_SECRETS uint32 = 9 // NM_ACTIVE_CONNECTION_STATE_REASON_LOGIN_FAILED Authentication to the server failed. const NM_ACTIVE_CONNECTION_STATE_REASON_LOGIN_FAILED uint32 = 10 // NM_ACTIVE_CONNECTION_STATE_REASON_CONNECTION_REMOVED The connection was deleted from settings. const NM_ACTIVE_CONNECTION_STATE_REASON_CONNECTION_REMOVED uint32 = 11 // NM_ACTIVE_CONNECTION_STATE_REASON_DEPENDENCY_FAILED Master connection of this connection failed to activate. const NM_ACTIVE_CONNECTION_STATE_REASON_DEPENDENCY_FAILED uint32 = 12 // NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_REALIZE_FAILED Could not create the software device link. const NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_REALIZE_FAILED uint32 = 13 // NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_REMOVED The device this connection depended on disappeared. const NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_REMOVED uint32 = 14 // enum NMSecretAgentGetSecretsFlags NMSecretAgentGetSecretsFlags values modify the behavior of a GetSecrets request. // NM_SECRET_AGENT_GET_SECRETS_FLAG_NONE no special behavior; by default no user interaction is allowed and requests for secrets are fulfilled from persistent storage, or if no secrets are available an error is returned. const NM_SECRET_AGENT_GET_SECRETS_FLAG_NONE uint32 = 0x0 // NM_SECRET_AGENT_GET_SECRETS_FLAG_ALLOW_INTERACTION allows the request to interact with the user, possibly prompting via UI for secrets if any are required, or if none are found in persistent storage. const NM_SECRET_AGENT_GET_SECRETS_FLAG_ALLOW_INTERACTION uint32 = 0x1 // NM_SECRET_AGENT_GET_SECRETS_FLAG_REQUEST_NEW explicitly prompt for new secrets from the user. This flag signals that NetworkManager thinks any existing secrets are invalid or wrong. This flag implies that interaction is allowed. const NM_SECRET_AGENT_GET_SECRETS_FLAG_REQUEST_NEW uint32 = 0x2 // NM_SECRET_AGENT_GET_SECRETS_FLAG_USER_REQUESTED set if the request was initiated by user-requested action via the D-Bus interface, as opposed to automatically initiated by NetworkManager in response to (for example) scan results or carrier changes. const NM_SECRET_AGENT_GET_SECRETS_FLAG_USER_REQUESTED uint32 = 0x4 // NM_SECRET_AGENT_GET_SECRETS_FLAG_WPS_PBC_ACTIVE indicates that WPS enrollment is active with PBC method. The agent may suggest that the user pushes a button on the router instead of supplying a PSK. const NM_SECRET_AGENT_GET_SECRETS_FLAG_WPS_PBC_ACTIVE uint32 = 0x8 // NM_SECRET_AGENT_GET_SECRETS_FLAG_ONLY_SYSTEM Internal flag, not part of the D-Bus API. const NM_SECRET_AGENT_GET_SECRETS_FLAG_ONLY_SYSTEM uint32 = 0x80000000 // NM_SECRET_AGENT_GET_SECRETS_FLAG_NO_ERRORS Internal flag, not part of the D-Bus API. const NM_SECRET_AGENT_GET_SECRETS_FLAG_NO_ERRORS uint32 = 0x40000000 // enum NMSecretAgentCapabilities NMSecretAgentCapabilities indicate various capabilities of the agent. // NM_SECRET_AGENT_CAPABILITY_NONE the agent supports no special capabilities const NM_SECRET_AGENT_CAPABILITY_NONE uint32 = 0x0 // NM_SECRET_AGENT_CAPABILITY_VPN_HINTS the agent supports passing hints to VPN plugin authentication dialogs. const NM_SECRET_AGENT_CAPABILITY_VPN_HINTS uint32 = 0x1 // enum NMIPTunnelMode The tunneling mode. // NM_IP_TUNNEL_MODE_UNKNOWN Unknown/unset tunnel mode const NM_IP_TUNNEL_MODE_UNKNOWN uint32 = 0 // NM_IP_TUNNEL_MODE_IPIP IP in IP tunnel const NM_IP_TUNNEL_MODE_IPIP uint32 = 1 // NM_IP_TUNNEL_MODE_GRE GRE tunnel const NM_IP_TUNNEL_MODE_GRE uint32 = 2 // NM_IP_TUNNEL_MODE_SIT SIT tunnel const NM_IP_TUNNEL_MODE_SIT uint32 = 3 // NM_IP_TUNNEL_MODE_ISATAP ISATAP tunnel const NM_IP_TUNNEL_MODE_ISATAP uint32 = 4 // NM_IP_TUNNEL_MODE_VTI VTI tunnel const NM_IP_TUNNEL_MODE_VTI uint32 = 5 // NM_IP_TUNNEL_MODE_IP6IP6 IPv6 in IPv6 tunnel const NM_IP_TUNNEL_MODE_IP6IP6 uint32 = 6 // NM_IP_TUNNEL_MODE_IPIP6 IPv4 in IPv6 tunnel const NM_IP_TUNNEL_MODE_IPIP6 uint32 = 7 // NM_IP_TUNNEL_MODE_IP6GRE IPv6 GRE tunnel const NM_IP_TUNNEL_MODE_IP6GRE uint32 = 8 // NM_IP_TUNNEL_MODE_VTI6 IPv6 VTI tunnel const NM_IP_TUNNEL_MODE_VTI6 uint32 = 9 // NM_IP_TUNNEL_MODE_GRETAP GRETAP tunnel const NM_IP_TUNNEL_MODE_GRETAP uint32 = 10 // NM_IP_TUNNEL_MODE_IP6GRETAP IPv6 GRETAP tunnel const NM_IP_TUNNEL_MODE_IP6GRETAP uint32 = 11 // enum NMCheckpointCreateFlags The flags for CheckpointCreate call // NM_CHECKPOINT_CREATE_FLAG_NONE no flags const NM_CHECKPOINT_CREATE_FLAG_NONE uint32 = 0 // NM_CHECKPOINT_CREATE_FLAG_DESTROY_ALL when creating a new checkpoint, destroy all existing ones. const NM_CHECKPOINT_CREATE_FLAG_DESTROY_ALL uint32 = 0x01 // NM_CHECKPOINT_CREATE_FLAG_DELETE_NEW_CONNECTIONS upon rollback, delete any new connection added after the checkpoint (Since: 1.6) const NM_CHECKPOINT_CREATE_FLAG_DELETE_NEW_CONNECTIONS uint32 = 0x02 // NM_CHECKPOINT_CREATE_FLAG_DISCONNECT_NEW_DEVICES upon rollback, disconnect any new device appeared after the checkpoint (Since: 1.6) const NM_CHECKPOINT_CREATE_FLAG_DISCONNECT_NEW_DEVICES uint32 = 0x04 // NM_CHECKPOINT_CREATE_FLAG_ALLOW_OVERLAPPING by default, creating a checkpoint fails if there are already existing checkoints that reference the same devices. With this flag, creation of such checkpoints is allowed, however, if an older checkpoint that references overlapping devices gets rolled back, it will automatically destroy this checkpoint during rollback. This allows to create several overlapping checkpoints in parallel, and rollback to them at will. With the special case that rolling back to an older checkpoint will invalidate all overlapping younger checkpoints. This opts-in that the checkpoint can be automatically destroyed by the rollback of an older checkpoint. (Since: 1.12) const NM_CHECKPOINT_CREATE_FLAG_ALLOW_OVERLAPPING uint32 = 0x08 // enum NMRollbackResult The result of a checkpoint Rollback() operation for a specific device. // NM_ROLLBACK_RESULT_OK the rollback succeeded. const NM_ROLLBACK_RESULT_OK uint32 = 0 // NM_ROLLBACK_RESULT_ERR_NO_DEVICE the device no longer exists. const NM_ROLLBACK_RESULT_ERR_NO_DEVICE uint32 = 1 // NM_ROLLBACK_RESULT_ERR_DEVICE_UNMANAGED the device is now unmanaged. const NM_ROLLBACK_RESULT_ERR_DEVICE_UNMANAGED uint32 = 2 // NM_ROLLBACK_RESULT_ERR_FAILED other errors during rollback. const NM_ROLLBACK_RESULT_ERR_FAILED uint32 = 3 // enum NMSettingsConnectionFlags Flags describing the current activation state. // NM_SETTINGS_CONNECTION_FLAG_NONE an alias for numeric zero, no flags set. const NM_SETTINGS_CONNECTION_FLAG_NONE uint32 = 0 // NM_SETTINGS_CONNECTION_FLAG_UNSAVED the connection is not saved to disk. That either means, that the connection is in-memory only and currently is not backed by a file. Or, that the connection is backed by a file, but has modifications in-memory that were not persisted to disk. const NM_SETTINGS_CONNECTION_FLAG_UNSAVED uint32 = 0x01 // NM_SETTINGS_CONNECTION_FLAG_NM_GENERATED A connection is &#34;nm-generated&#34; if it was generated by NetworkManger. If the connection gets modified or saved by the user, the flag gets cleared. A nm-generated is also unsaved and has no backing file as it is in-memory only. const NM_SETTINGS_CONNECTION_FLAG_NM_GENERATED uint32 = 0x02 // NM_SETTINGS_CONNECTION_FLAG_VOLATILE The connection will be deleted when it disconnects. That is for in-memory connections (unsaved), which are currently active but deleted on disconnect. Volatile connections are always unsaved, but they are also no backing file on disk and are entirely in-memory only. const NM_SETTINGS_CONNECTION_FLAG_VOLATILE uint32 = 0x04 // NM_SETTINGS_CONNECTION_FLAG_EXTERNAL the profile was generated to represent an external configuration of a networking device. Since: 1.26 const NM_SETTINGS_CONNECTION_FLAG_EXTERNAL uint32 = 0x08 // enum NMActivationStateFlags Flags describing the current activation state. // NM_ACTIVATION_STATE_FLAG_NONE an alias for numeric zero, no flags set. const NM_ACTIVATION_STATE_FLAG_NONE uint32 = 0 // NM_ACTIVATION_STATE_FLAG_IS_MASTER the device is a master. const NM_ACTIVATION_STATE_FLAG_IS_MASTER uint32 = 0x1 // NM_ACTIVATION_STATE_FLAG_IS_SLAVE the device is a slave. const NM_ACTIVATION_STATE_FLAG_IS_SLAVE uint32 = 0x2 // NM_ACTIVATION_STATE_FLAG_LAYER2_READY layer2 is activated and ready. const NM_ACTIVATION_STATE_FLAG_LAYER2_READY uint32 = 0x4 // NM_ACTIVATION_STATE_FLAG_IP4_READY IPv4 setting is completed. const NM_ACTIVATION_STATE_FLAG_IP4_READY uint32 = 0x8 // NM_ACTIVATION_STATE_FLAG_IP6_READY IPv6 setting is completed. const NM_ACTIVATION_STATE_FLAG_IP6_READY uint32 = 0x10 // NM_ACTIVATION_STATE_FLAG_MASTER_HAS_SLAVES The master has any slave devices attached. This only makes sense if the device is a master. const NM_ACTIVATION_STATE_FLAG_MASTER_HAS_SLAVES uint32 = 0x20 // NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY the lifetime of the activation is bound to the visilibity of the connection profile, which in turn depends on &#34;connection.permissions&#34; and whether a session for the user exists. Since: 1.16 const NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY uint32 = 0x40 // NM_ACTIVATION_STATE_FLAG_EXTERNAL the active connection was generated to represent an external configuration of a networking device. Since: 1.26 const NM_ACTIVATION_STATE_FLAG_EXTERNAL uint32 = 0x80 // enum NMSettingsAddConnection2Flags Numeric flags for the &#34;flags&#34; argument of AddConnection2() D-Bus API. // NM_SETTINGS_ADD_CONNECTION2_FLAG_NONE an alias for numeric zero, no flags set. const NM_SETTINGS_ADD_CONNECTION2_FLAG_NONE uint32 = 0 // NM_SETTINGS_ADD_CONNECTION2_FLAG_TO_DISK to persist the connection to disk. const NM_SETTINGS_ADD_CONNECTION2_FLAG_TO_DISK uint32 = 0x1 // NM_SETTINGS_ADD_CONNECTION2_FLAG_IN_MEMORY to make the connection in-memory only. const NM_SETTINGS_ADD_CONNECTION2_FLAG_IN_MEMORY uint32 = 0x2 // NM_SETTINGS_ADD_CONNECTION2_FLAG_BLOCK_AUTOCONNECT usually, when the connection has autoconnect enabled and gets added, it becomes eligible to autoconnect right away. Setting this flag, disables autoconnect until the connection is manually activated. const NM_SETTINGS_ADD_CONNECTION2_FLAG_BLOCK_AUTOCONNECT uint32 = 0x20 // enum NMSettingsUpdate2Flags Since: 1.12 // NM_SETTINGS_UPDATE2_FLAG_NONE an alias for numeric zero, no flags set. const NM_SETTINGS_UPDATE2_FLAG_NONE uint32 = 0 // NM_SETTINGS_UPDATE2_FLAG_TO_DISK to persist the connection to disk. const NM_SETTINGS_UPDATE2_FLAG_TO_DISK uint32 = 0x1 // NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY makes the profile in-memory. Note that such profiles are stored in keyfile format under /run. If the file is already in-memory, the file in /run is updated in-place. Otherwise, the previous storage for the profile is left unchanged on disk, and the in-memory copy shadows it. Note that the original filename of the previous persistent storage (if any) is remembered. That means, when later persisting the profile again to disk, the file on disk will be overwritten again. Likewise, when finally deleting the profile, both the storage from /run and persistent storage are deleted (or if the persistent storage does not allow deletion, and nmmeta file is written to mark the UUID as deleted). const NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY uint32 = 0x2 // NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY_DETACHED this is almost the same as %NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY, with one difference: when later deleting the profile, the original profile will not be deleted. Instead a nmmeta file is written to /run to indicate that the profile is gone. Note that if such a nmmeta tombstone file exists and hides a file in persistant storage, then when re-adding the profile with the same UUID, then the original storage is taken over again. const NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY_DETACHED uint32 = 0x4 // NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY_ONLY this is like %NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY, but if the connection has a corresponding file on persistent storage, the file will be deleted right away. If the profile is later again persisted to disk, a new, unused filename will be chosen. const NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY_ONLY uint32 = 0x8 // NM_SETTINGS_UPDATE2_FLAG_VOLATILE This can be specified with either %NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY, %NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY_DETACHED or %NM_SETTINGS_UPDATE2_FLAG_IN_MEMORY_ONLY. After making the connection in-memory only, the connection is marked as volatile. That means, if the connection is currently not active it will be deleted right away. Otherwise, it is marked to for deletion once the connection deactivates. A volatile connection cannot autoactivate again (because it&#39;s about to be deleted), but a manual activation will clear the volatile flag. const NM_SETTINGS_UPDATE2_FLAG_VOLATILE uint32 = 0x10 // NM_SETTINGS_UPDATE2_FLAG_BLOCK_AUTOCONNECT usually, when the connection has autoconnect enabled and is modified, it becomes eligible to autoconnect right away. Setting this flag, disables autoconnect until the connection is manually activated. const NM_SETTINGS_UPDATE2_FLAG_BLOCK_AUTOCONNECT uint32 = 0x20 // NM_SETTINGS_UPDATE2_FLAG_NO_REAPPLY when a profile gets modified that is currently active, then these changes don&#39;t take effect for the active device unless the profile gets reactivated or the configuration reapplied. There are two exceptions: by default &#34;connection.zone&#34; and &#34;connection.metered&#34; properties take effect immediately. Specify this flag to prevent these properties to take effect, so that the change is restricted to modify the profile. Since: 1.20. const NM_SETTINGS_UPDATE2_FLAG_NO_REAPPLY uint32 = 0x40 // enum NMTernary An boolean value that can be overridden by a default. // NM_TERNARY_DEFAULT use the globally-configured default value. const NM_TERNARY_DEFAULT int8 = -1 // NM_TERNARY_FALSE the option is disabled. const NM_TERNARY_FALSE int8 = 0 // NM_TERNARY_TRUE the option is enabled. const NM_TERNARY_TRUE int8 = 1 // enum NMManagerReloadFlags Flags for the manager Reload() call. // NM_MANAGER_RELOAD_FLAG_NONE an alias for numeric zero, no flags set. This reloads everything that is supported and is identical to a SIGHUP. const NM_MANAGER_RELOAD_FLAG_NONE uint32 = 0 // NM_MANAGER_RELOAD_FLAG_CONF reload the NetworkManager.conf configuration from disk. Note that this does not include connections, which can be reloaded via Setting&#39;s ReloadConnections(). const NM_MANAGER_RELOAD_FLAG_CONF uint32 = 0x1 // NM_MANAGER_RELOAD_FLAG_DNS_RC update DNS configuration, which usually involves writing /etc/resolv.conf anew. const NM_MANAGER_RELOAD_FLAG_DNS_RC uint32 = 0x2 // NM_MANAGER_RELOAD_FLAG_DNS_FULL means to restart the DNS plugin. This is for example useful when using dnsmasq plugin, which uses additional configuration in /etc/NetworkManager/dnsmasq.d. If you edit those files, you can restart the DNS plugin. This action shortly interrupts name resolution. const NM_MANAGER_RELOAD_FLAG_DNS_FULL uint32 = 0x4 // NM_MANAGER_RELOAD_FLAG_ALL all flags. const NM_MANAGER_RELOAD_FLAG_ALL uint32 = 0x7 // enum NMDeviceInterfaceFlags Flags for a network interface. // NM_DEVICE_INTERFACE_FLAG_NONE an alias for numeric zero, no flags set. const NM_DEVICE_INTERFACE_FLAG_NONE uint32 = 0 // NM_DEVICE_INTERFACE_FLAG_UP the interface is enabled from the administrative point of view. Corresponds to kernel IFF_UP. const NM_DEVICE_INTERFACE_FLAG_UP uint32 = 0x1 // NM_DEVICE_INTERFACE_FLAG_LOWER_UP the physical link is up. Corresponds to kernel IFF_LOWER_UP. const NM_DEVICE_INTERFACE_FLAG_LOWER_UP uint32 = 0x2 // NM_DEVICE_INTERFACE_FLAG_CARRIER the interface has carrier. In most cases this is equal to the value of @NM_DEVICE_INTERFACE_FLAG_LOWER_UP. However some devices have a non-standard carrier detection mechanism. const NM_DEVICE_INTERFACE_FLAG_CARRIER uint32 = 0x10000 // enum NMClientPermission NMClientPermission values indicate various permissions that NetworkManager clients can obtain to perform certain tasks on behalf of the current user. // NM_CLIENT_PERMISSION_NONE unknown or no permission const NM_CLIENT_PERMISSION_NONE uint32 = 0 // NM_CLIENT_PERMISSION_ENABLE_DISABLE_NETWORK controls whether networking can be globally enabled or disabled const NM_CLIENT_PERMISSION_ENABLE_DISABLE_NETWORK uint32 = 1 // NM_CLIENT_PERMISSION_ENABLE_DISABLE_WIFI controls whether Wi-Fi can be globally enabled or disabled const NM_CLIENT_PERMISSION_ENABLE_DISABLE_WIFI uint32 = 2 // NM_CLIENT_PERMISSION_ENABLE_DISABLE_WWAN controls whether WWAN (3G) can be globally enabled or disabled const NM_CLIENT_PERMISSION_ENABLE_DISABLE_WWAN uint32 = 3 // NM_CLIENT_PERMISSION_ENABLE_DISABLE_WIMAX controls whether WiMAX can be globally enabled or disabled const NM_CLIENT_PERMISSION_ENABLE_DISABLE_WIMAX uint32 = 4 // NM_CLIENT_PERMISSION_SLEEP_WAKE controls whether the client can ask NetworkManager to sleep and wake const NM_CLIENT_PERMISSION_SLEEP_WAKE uint32 = 5 // NM_CLIENT_PERMISSION_NETWORK_CONTROL controls whether networking connections can be started, stopped, and changed const NM_CLIENT_PERMISSION_NETWORK_CONTROL uint32 = 6 // NM_CLIENT_PERMISSION_WIFI_SHARE_PROTECTED controls whether a password protected Wi-Fi hotspot can be created const NM_CLIENT_PERMISSION_WIFI_SHARE_PROTECTED uint32 = 7 // NM_CLIENT_PERMISSION_WIFI_SHARE_OPEN controls whether an open Wi-Fi hotspot can be created const NM_CLIENT_PERMISSION_WIFI_SHARE_OPEN uint32 = 8 // NM_CLIENT_PERMISSION_SETTINGS_MODIFY_SYSTEM controls whether connections that are available to all users can be modified const NM_CLIENT_PERMISSION_SETTINGS_MODIFY_SYSTEM uint32 = 9 // NM_CLIENT_PERMISSION_SETTINGS_MODIFY_OWN controls whether connections owned by the current user can be modified const NM_CLIENT_PERMISSION_SETTINGS_MODIFY_OWN uint32 = 10 // NM_CLIENT_PERMISSION_SETTINGS_MODIFY_HOSTNAME controls whether the persistent hostname can be changed const NM_CLIENT_PERMISSION_SETTINGS_MODIFY_HOSTNAME uint32 = 11 // NM_CLIENT_PERMISSION_SETTINGS_MODIFY_GLOBAL_DNS modify persistent global DNS configuration const NM_CLIENT_PERMISSION_SETTINGS_MODIFY_GLOBAL_DNS uint32 = 12 // NM_CLIENT_PERMISSION_RELOAD controls access to Reload. const NM_CLIENT_PERMISSION_RELOAD uint32 = 13 // NM_CLIENT_PERMISSION_CHECKPOINT_ROLLBACK permission to create checkpoints. const NM_CLIENT_PERMISSION_CHECKPOINT_ROLLBACK uint32 = 14 // NM_CLIENT_PERMISSION_ENABLE_DISABLE_STATISTICS controls whether device statistics can be globally enabled or disabled const NM_CLIENT_PERMISSION_ENABLE_DISABLE_STATISTICS uint32 = 15 // NM_CLIENT_PERMISSION_ENABLE_DISABLE_CONNECTIVITY_CHECK controls whether connectivity check can be enabled or disabled const NM_CLIENT_PERMISSION_ENABLE_DISABLE_CONNECTIVITY_CHECK uint32 = 16 // NM_CLIENT_PERMISSION_WIFI_SCAN controls whether wifi scans can be performed const NM_CLIENT_PERMISSION_WIFI_SCAN uint32 = 17 // enum NMClientPermissionResult NMClientPermissionResult values indicate what authorizations and permissions the user requires to obtain a given NMClientPermission // NM_CLIENT_PERMISSION_RESULT_UNKNOWN unknown or no authorization const NM_CLIENT_PERMISSION_RESULT_UNKNOWN uint32 = 0 // NM_CLIENT_PERMISSION_RESULT_YES the permission is available const NM_CLIENT_PERMISSION_RESULT_YES uint32 = 1 // NM_CLIENT_PERMISSION_RESULT_AUTH authorization is necessary before the permission is available const NM_CLIENT_PERMISSION_RESULT_AUTH uint32 = 2 // NM_CLIENT_PERMISSION_RESULT_NO permission to perform the operation is denied by system policy const NM_CLIENT_PERMISSION_RESULT_NO uint32 = 3
enum.go
0.538498
0.604866
enum.go
starcoder
package monitoring const MonitoringGrafanaDBCROResourcesJSON = `{ "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "id": 23, "iteration": 1614258659586, "links": [], "panels": [ { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "mappings": [ { "from": "", "id": 0, "text": "Up", "to": "", "type": 1, "value": "1" }, { "from": "", "id": 1, "text": "Down", "to": "", "type": 1, "value": "0" } ], "thresholds": { "mode": "absolute", "steps": [ { "color": "red", "value": null }, { "color": "green", "value": 1 } ] } }, "overrides": [] }, "gridPos": { "h": 5, "w": 8, "x": 0, "y": 0 }, "id": 24, "options": { "colorMode": "value", "graphMode": "none", "justifyMode": "auto", "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "textMode": "value_and_name" }, "pluginVersion": "7.2.0", "targets": [ { "expr": "cro_postgres_available", "interval": "", "legendFormat": "{{productName}}", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Postgres Connection", "type": "stat" }, { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "mappings": [ { "from": "", "id": 0, "text": "Up", "to": "", "type": 1, "value": "1" }, { "from": "", "id": 1, "text": "Down", "to": "", "type": 1, "value": "0" } ], "thresholds": { "mode": "absolute", "steps": [ { "color": "red", "value": null }, { "color": "green", "value": 1 } ] } }, "overrides": [] }, "gridPos": { "h": 5, "w": 8, "x": 8, "y": 0 }, "id": 23, "options": { "colorMode": "value", "graphMode": "none", "justifyMode": "auto", "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "textMode": "value_and_name" }, "pluginVersion": "7.2.0", "targets": [ { "expr": "cro_redis_available", "interval": "", "legendFormat": "{{resourceID}}", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Redis Connection", "type": "stat" }, { "collapsed": true, "datasource": "Prometheus", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 5 }, "id": 5, "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": { "align": null }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 24, "x": 0, "y": 6 }, "hiddenSeries": false, "id": 2, "legend": { "alignAsTable": false, "avg": false, "current": false, "max": false, "min": false, "rightSide": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.1", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "cro_postgres_cpu_utilization_average", "format": "time_series", "hide": false, "interval": "", "legendFormat": "{{productName}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Postgres CPU Utilization", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "transformations": [], "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "title": "Postgres CPU", "type": "row" }, { "collapsed": true, "datasource": "Prometheus", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 6 }, "id": 12, "panels": [ { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": { "align": null }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null } ] }, "unit": "bytes" }, "overrides": [ { "matcher": { "id": "byName", "options": "Value #B" }, "properties": [ { "id": "custom.width", "value": 392 } ] }, { "matcher": { "id": "byName", "options": "Allocated" }, "properties": [ { "id": "custom.width", "value": 292 } ] }, { "matcher": { "id": "byName", "options": "Allocated" }, "properties": [ { "id": "unit", "value": "bytes" } ] }, { "matcher": { "id": "byName", "options": "Free" }, "properties": [ { "id": "unit", "value": "bytes" } ] } ] }, "gridPos": { "h": 5, "w": 24, "x": 0, "y": 7 }, "id": 14, "options": { "showHeader": true, "sortBy": [ { "desc": true, "displayName": "Allocated" } ] }, "pluginVersion": "7.1.1", "targets": [ { "expr": "cro_postgres_max_memory*1048576", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "B" }, { "expr": "cro_postgres_freeable_memory_average", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Postgres Memory", "transformations": [ { "id": "seriesToColumns", "options": { "byField": "productName" } }, { "id": "organize", "options": { "excludeByName": { "Time": true, "__name__": true, "clusterID": true, "endpoint": true, "exported_namespace": true, "instance": true, "instanceID": true, "job": true, "namespace": true, "pod": true, "resourceID": true, "service": true, "strategy": true }, "indexByName": { "Time": 1, "Value #A": 15, "Value #B": 14, "__name__": 2, "clusterID": 3, "endpoint": 4, "exported_namespace": 5, "instance": 6, "instanceID": 7, "job": 8, "namespace": 9, "pod": 10, "productName": 0, "resourceID": 11, "service": 12, "strategy": 13 }, "renameByName": { "Value #A": "Free", "Value #B": "Allocated", "productName": "Product" } } }, { "id": "calculateField", "options": { "alias": "Used", "binary": { "left": "Allocated", "operator": "-", "reducer": "sum", "right": "Free" }, "mode": "binary", "reduce": { "reducer": "diff" }, "replaceFields": false } } ], "type": "table" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "unit": "bytes" }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 12 }, "hiddenSeries": false, "id": 10, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.1", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "cro_postgres_freeable_memory_average", "instant": false, "interval": "", "legendFormat": "{{productName}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Postgres Free Memory", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "bytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "title": "Postgres Memory", "type": "row" }, { "collapsed": true, "datasource": "Prometheus", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 7 }, "id": 7, "panels": [ { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": { "align": null }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null } ] }, "unit": "bytes" }, "overrides": [ { "matcher": { "id": "byName", "options": "Free" }, "properties": [ { "id": "custom.width", "value": 483 } ] }, { "matcher": { "id": "byName", "options": "Product" }, "properties": [ { "id": "custom.width", "value": 532 } ] } ] }, "gridPos": { "h": 5, "w": 24, "x": 0, "y": 8 }, "id": 25, "options": { "showHeader": true, "sortBy": [] }, "pluginVersion": "7.1.1", "targets": [ { "expr": "cro_postgres_current_allocated_storage", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "A" }, { "expr": "cro_postgres_free_storage_average", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "B" } ], "timeFrom": null, "timeShift": null, "title": "Postgres Storage", "transformations": [ { "id": "seriesToColumns", "options": { "byField": "productName" } }, { "id": "organize", "options": { "excludeByName": { "Time": true, "__name__": true, "clusterID": true, "endpoint": true, "exported_namespace": true, "instance": true, "instanceID": true, "job": true, "namespace": true, "pod": true, "resourceID": true, "service": true, "strategy": true }, "indexByName": { "Time": 1, "Value #A": 15, "Value #B": 14, "__name__": 2, "clusterID": 3, "endpoint": 4, "exported_namespace": 5, "instance": 6, "instanceID": 7, "job": 8, "namespace": 9, "pod": 10, "productName": 0, "resourceID": 11, "service": 12, "strategy": 13 }, "renameByName": { "Value #A": "Allocated", "Value #B": "Free", "productName": "Product" } } }, { "id": "calculateField", "options": { "mode": "reduceRow", "reduce": { "reducer": "diff" } } }, { "id": "organize", "options": { "excludeByName": {}, "indexByName": {}, "renameByName": { "Difference": "Used" } } } ], "type": "table" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": { "align": null }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "bytes" }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 9, "w": 24, "x": 0, "y": 13 }, "hiddenSeries": false, "id": 3, "legend": { "alignAsTable": false, "avg": false, "current": false, "max": false, "min": false, "rightSide": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.1", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "cro_postgres_free_storage_average", "format": "time_series", "hide": false, "interval": "", "intervalFactor": 1, "legendFormat": "{{productName}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Postgres Free Storage", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "transformations": [], "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "bytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "title": "Postgres Storage", "type": "row" }, { "collapsed": true, "datasource": "Prometheus", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 8 }, "id": 19, "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 24, "x": 0, "y": 9 }, "hiddenSeries": false, "id": 17, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.1", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "cro_redis_cpu_utilization_average", "interval": "", "legendFormat": "{{resourceID}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Redis CPU Utilization", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "percent", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "title": "Redis CPU", "type": "row" }, { "collapsed": false, "datasource": "Prometheus", "gridPos": { "h": 1, "w": 24, "x": 0, "y": 9 }, "id": 21, "panels": [], "title": "Redis Memory", "type": "row" }, { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": { "align": null, "filterable": false }, "mappings": [ { "from": "", "id": 0, "text": "Up", "to": "", "type": 1, "value": "1" }, { "from": "", "id": 1, "text": "Down", "to": "", "type": 1, "value": "0" } ], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "bytes" }, "overrides": [ { "matcher": { "id": "byName", "options": "Usage" }, "properties": [ { "id": "unit", "value": "percent" } ] }, { "matcher": { "id": "byName", "options": "Free" }, "properties": [ { "id": "custom.width", "value": 402 } ] } ] }, "gridPos": { "h": 6, "w": 24, "x": 0, "y": 10 }, "id": 28, "options": { "showHeader": true, "sortBy": [ { "desc": false, "displayName": "Usage" } ] }, "pluginVersion": "7.2.0", "targets": [ { "expr": "cro_redis_freeable_memory_average", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "A" }, { "expr": "cro_redis_memory_usage_percentage_average", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "B" } ], "timeFrom": null, "timeShift": null, "title": "Redis Memory", "transformations": [ { "id": "seriesToColumns", "options": { "byField": "resourceID" } }, { "id": "organize", "options": { "excludeByName": { "Time": true, "__name__": true, "clusterID": true, "endpoint": true, "exported_namespace": true, "instance": true, "instanceID": true, "job": true, "namespace": true, "pod": true, "productName": true, "service": true, "strategy": true }, "indexByName": {}, "renameByName": { "Value #A": "Free", "Value #B": "Usage" } } } ], "type": "table" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 24, "x": 0, "y": 16 }, "hiddenSeries": false, "id": 16, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.2.0", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "cro_redis_memory_usage_percentage_average", "interval": "", "legendFormat": "{{resourceID}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Redis Memory Usage", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "percent", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "unit": "bytes" }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 24, "x": 0, "y": 24 }, "hiddenSeries": false, "id": 26, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.2.0", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "cro_redis_freeable_memory_average", "interval": "", "legendFormat": "{{resourceID}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Redis Freeable Memory", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "bytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "refresh": false, "schemaVersion": 26, "style": "dark", "tags": [], "templating": { "list": [ { "datasource": "Prometheus", "filters": [], "hide": 0, "label": "", "name": "Filters", "skipUrlSync": false, "type": "adhoc" } ] }, "time": { "from": "now-6h", "to": "now" }, "timepicker": { "refresh_intervals": [ "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ] }, "timezone": "", "title": "CRO Resources", "uid": "OMFxtSyGk", "version": 2 }`
pkg/products/monitoring/dashboards/croResources.go
0.561335
0.445168
croResources.go
starcoder
package types import ( "io" "math" "github.com/lyraproj/puppet-evaluator/errors" "github.com/lyraproj/puppet-evaluator/eval" ) type CollectionType struct { size *IntegerType } var Collection_Type eval.ObjectType func init() { Collection_Type = newObjectType(`Pcore::CollectionType`, `Pcore::AnyType { attributes => { 'size_type' => { type => Type[Integer], value => Integer[0] } } }`, func(ctx eval.Context, args []eval.Value) eval.Value { return NewCollectionType2(args...) }) } func DefaultCollectionType() *CollectionType { return collectionType_DEFAULT } func NewCollectionType(size *IntegerType) *CollectionType { if size == nil || *size == *IntegerType_POSITIVE { return DefaultCollectionType() } return &CollectionType{size} } func NewCollectionType2(args ...eval.Value) *CollectionType { switch len(args) { case 0: return DefaultCollectionType() case 1: arg := args[0] size, ok := arg.(*IntegerType) if !ok { sz, ok := toInt(arg) if !ok { if _, ok := arg.(*DefaultValue); !ok { panic(NewIllegalArgumentType2(`Collection[]`, 0, `Variant[Integer, Default, Type[Integer]]`, arg)) } sz = 0 } size = NewIntegerType(sz, math.MaxInt64) } return NewCollectionType(size) case 2: arg := args[0] min, ok := toInt(arg) if !ok { if _, ok := arg.(*DefaultValue); !ok { panic(NewIllegalArgumentType2(`Collection[]`, 0, `Variant[Integer, Default]`, arg)) } min = 0 } arg = args[1] max, ok := toInt(arg) if !ok { if _, ok := arg.(*DefaultValue); !ok { panic(NewIllegalArgumentType2(`Collection[]`, 1, `Variant[Integer, Default]`, arg)) } max = math.MaxInt64 } return NewCollectionType(NewIntegerType(min, max)) default: panic(errors.NewIllegalArgumentCount(`Collection[]`, `0 - 2`, len(args))) } } func (t *CollectionType) Accept(v eval.Visitor, g eval.Guard) { v(t) t.size.Accept(v, g) } func (t *CollectionType) Default() eval.Type { return collectionType_DEFAULT } func (t *CollectionType) Equals(o interface{}, g eval.Guard) bool { if ot, ok := o.(*CollectionType); ok { return t.size.Equals(ot.size, g) } return false } func (t *CollectionType) Generic() eval.Type { return collectionType_DEFAULT } func (t *CollectionType) Get(key string) (eval.Value, bool) { switch key { case `size_type`: if t.size == nil { return eval.UNDEF, true } return t.size, true default: return nil, false } } func (t *CollectionType) IsAssignable(o eval.Type, g eval.Guard) bool { var osz *IntegerType switch o.(type) { case *CollectionType: osz = o.(*CollectionType).size case *ArrayType: osz = o.(*ArrayType).size case *HashType: osz = o.(*HashType).size case *TupleType: osz = o.(*TupleType).givenOrActualSize case *StructType: n := int64(len(o.(*StructType).elements)) osz = NewIntegerType(n, n) default: return false } return t.size.IsAssignable(osz, g) } func (t *CollectionType) IsInstance(o eval.Value, g eval.Guard) bool { return t.IsAssignable(o.PType(), g) } func (t *CollectionType) MetaType() eval.ObjectType { return Collection_Type } func (t *CollectionType) Name() string { return `Collection` } func (t *CollectionType) Parameters() []eval.Value { if *t.size == *IntegerType_POSITIVE { return eval.EMPTY_VALUES } return t.size.SizeParameters() } func (t *CollectionType) CanSerializeAsString() bool { return true } func (t *CollectionType) SerializationString() string { return t.String() } func (t *CollectionType) Size() *IntegerType { return t.size } func (t *CollectionType) String() string { return eval.ToString2(t, NONE) } func (t *CollectionType) ToString(b io.Writer, s eval.FormatContext, g eval.RDetect) { TypeToString(t, b, s, g) } func (t *CollectionType) PType() eval.Type { return &TypeType{t} } var collectionType_DEFAULT = &CollectionType{IntegerType_POSITIVE}
types/collectiontype.go
0.634656
0.407864
collectiontype.go
starcoder
package economist import ( "github.com/coschain/contentos-go/common/constants" . "github.com/coschain/contentos-go/dandelion" "github.com/coschain/contentos-go/prototype" "github.com/stretchr/testify/assert" "math/big" "testing" ) type DappTester struct { acc0,acc1,acc2,acc3,acc4 *DandelionAccount } func (tester *DappTester) Test(t *testing.T, d *Dandelion) { tester.acc0 = d.Account("actor0") tester.acc1 = d.Account("actor1") tester.acc2 = d.Account("actor2") tester.acc3 = d.Account("actor3") tester.acc4 = d.Account("actor4") a := assert.New(t) registerBlockProducer(tester.acc4, t) const VEST = 1000 a.NoError(tester.acc0.SendTrx(TransferToVest(tester.acc0.Name, tester.acc0.Name, VEST, ""))) t.Run("normal self 100%", d.Test(tester.normal1)) t.Run("normal self 50%", d.Test(tester.normal2)) t.Run("normal other 100%", d.Test(tester.normal3)) t.Run("normal self and other half-and-half", d.Test(tester.normal4)) t.Run("normal three people", d.Test(tester.normal5)) t.Run("normal reply dapp two people", d.Test(tester.normal6)) t.Run("normal post and reply dapp", d.Test(tester.normal7)) } func (tester *DappTester) normal1(t *testing.T, d *Dandelion) { a := assert.New(t) const POST = 1 beneficiary := []map[string]int{{tester.acc0.Name: 10000}} a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"1"}, beneficiary))) acc0vest0 := d.Account(tester.acc0.Name).GetVest().Value a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, POST))) a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2)) // dapp reward dappWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) a.NotEqual(dappWeight.Uint64(), int64(0)) globalDappReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolDappRewards().Value) bigDappWvp := StringToBigInt(d.GlobalProps().GetWeightedVpsDapp()) decayedDappWeight := bigDecay(bigDappWvp) exceptNextBlockDappWeightedVps := decayedDappWeight.Add(decayedDappWeight, dappWeight) nextBlockGlobalDappReward := globalDappReward.Add(globalDappReward, new(big.Int).SetUint64(perBlockDappReward(d))) dappReward := ProportionAlgorithm(dappWeight, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) exceptGlobalClaimRewardAfterCashout := d.GlobalProps().GetClaimedDappRewards().Add(&prototype.Vest{Value: dappReward.Uint64()}) exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(nextBlockGlobalDappReward, dappReward).Uint64()} // post reward postWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) a.NotEqual( postWeight.Int64(), int64(0) ) globalPostReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolPostRewards().Value) bigTotalPostWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsPost()) decayedPostWeight := bigDecay(bigTotalPostWeight) exceptNextBlockPostWeightedVps := decayedPostWeight.Add(decayedPostWeight, postWeight) nextBlockGlobalPostReward := globalPostReward.Add(globalPostReward, new(big.Int).SetUint64(perBlockPostReward(d))) postReward := ProportionAlgorithm(postWeight, exceptNextBlockPostWeightedVps, nextBlockGlobalPostReward) reward := new(big.Int).Add(postReward, dappReward) a.NoError(d.ProduceBlocks(1)) a.Equal(d.GlobalProps().GetWeightedVpsDapp(), exceptNextBlockDappWeightedVps.String()) acc0vest1 := d.Account(tester.acc0.Name).GetVest().Value a.NotZero(reward.Uint64()) a.Equal(reward.Uint64(), acc0vest1 - acc0vest0) a.Equal(d.Post(POST).GetDappRewards().Value, dappReward.Uint64()) a.Equal(d.GlobalProps().GetClaimedDappRewards(), exceptGlobalClaimRewardAfterCashout) a.Equal(d.GlobalProps().GetPoolDappRewards(), exceptGlobalRewardAfterCashout) // make all post/test has been cashouted a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock)) } func (tester *DappTester) normal2(t *testing.T, d *Dandelion) { a := assert.New(t) const BLOCKS = 100 const POST = 2 beneficiary := []map[string]int{{tester.acc0.Name: 5000}} a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"2"}, beneficiary))) acc0vest0 := d.Account(tester.acc0.Name).GetVest().Value a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, POST))) a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2)) // dapp reward dappWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) dappWeight = ProportionAlgorithm(new(big.Int).SetUint64(5000), new(big.Int).SetUint64(constants.PERCENT), dappWeight) a.NotEqual(dappWeight.Uint64(), int64(0)) globalDappReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolDappRewards().Value) bigDappWvp := StringToBigInt(d.GlobalProps().GetWeightedVpsDapp()) decayedDappWeight := bigDecay(bigDappWvp) exceptNextBlockDappWeightedVps := decayedDappWeight.Add(decayedDappWeight, dappWeight) nextBlockGlobalDappReward := globalDappReward.Add(globalDappReward, new(big.Int).SetUint64(perBlockDappReward(d))) dappReward := ProportionAlgorithm(dappWeight, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) exceptGlobalClaimRewardAfterCashout := d.GlobalProps().GetClaimedDappRewards().Add(&prototype.Vest{Value: dappReward.Uint64()}) exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(nextBlockGlobalDappReward, dappReward).Uint64()} // post reward postWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) a.NotEqual( postWeight.Int64(), int64(0) ) globalPostReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolPostRewards().Value) bigTotalPostWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsPost()) decayedPostWeight := bigDecay(bigTotalPostWeight) exceptNextBlockPostWeightedVps := decayedPostWeight.Add(decayedPostWeight, postWeight) nextBlockGlobalPostReward := globalPostReward.Add(globalPostReward, new(big.Int).SetUint64(perBlockPostReward(d))) postReward := ProportionAlgorithm(postWeight, exceptNextBlockPostWeightedVps, nextBlockGlobalPostReward) reward := new(big.Int).Add(postReward, dappReward) a.NoError(d.ProduceBlocks(1)) a.Equal(d.GlobalProps().GetWeightedVpsDapp(), exceptNextBlockDappWeightedVps.String()) acc0vest1 := d.Account(tester.acc0.Name).GetVest().Value a.NotZero(reward.Uint64()) a.Equal(reward.Uint64(), acc0vest1 - acc0vest0) a.Equal(d.Post(POST).GetDappRewards().Value, dappReward.Uint64()) a.Equal(d.GlobalProps().GetClaimedDappRewards(), exceptGlobalClaimRewardAfterCashout) a.Equal(d.GlobalProps().GetPoolDappRewards(), exceptGlobalRewardAfterCashout) // make all post/test has been cashouted a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock)) } func (tester *DappTester) normal3(t *testing.T, d *Dandelion) { a := assert.New(t) const POST = 3 beneficiary := []map[string]int{{tester.acc2.Name: 10000}} a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"3"}, beneficiary))) acc0vest0 := d.Account(tester.acc2.Name).GetVest().Value a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, POST))) a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2)) // 100% dappWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) a.NotEqual(dappWeight.Uint64(), int64(0)) globalDappReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolDappRewards().Value) bigDappWvp := StringToBigInt(d.GlobalProps().GetWeightedVpsDapp()) decayedDappWeight := bigDecay(bigDappWvp) exceptNextBlockDappWeightedVps := decayedDappWeight.Add(decayedDappWeight, dappWeight) nextBlockGlobalDappReward := globalDappReward.Add(globalDappReward, new(big.Int).SetUint64(perBlockDappReward(d))) dappReward := ProportionAlgorithm(dappWeight, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) exceptGlobalClaimRewardAfterCashout := d.GlobalProps().GetClaimedDappRewards().Add(&prototype.Vest{Value: dappReward.Uint64()}) exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(nextBlockGlobalDappReward, dappReward).Uint64()} a.NoError(d.ProduceBlocks(1)) a.Equal(d.GlobalProps().GetWeightedVpsDapp(), exceptNextBlockDappWeightedVps.String()) acc0vest1 := d.Account(tester.acc2.Name).GetVest().Value a.NotZero(dappReward.Uint64()) a.Equal(dappReward.Uint64(), acc0vest1 - acc0vest0) a.Equal(d.Post(POST).GetDappRewards().Value, dappReward.Uint64()) a.Equal(d.GlobalProps().GetClaimedDappRewards(), exceptGlobalClaimRewardAfterCashout) a.Equal(d.GlobalProps().GetPoolDappRewards(), exceptGlobalRewardAfterCashout) a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock)) } func (tester *DappTester) normal4(t *testing.T, d *Dandelion) { a := assert.New(t) const POST = 4 beneficiary := []map[string]int{{tester.acc0.Name: 5000}, {tester.acc2.Name: 5000}} a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"1"}, beneficiary))) acc0vest0 := d.Account(tester.acc0.Name).GetVest().Value acc1vest0 := d.Account(tester.acc2.Name).GetVest().Value a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, POST))) a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2)) // dapp reward dappWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) acc0DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(5000), new(big.Int).SetUint64(constants.PERCENT), dappWeight) acc2DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(5000), new(big.Int).SetUint64(constants.PERCENT), dappWeight) dappWeight = new(big.Int).Add(acc0DappWeight, acc2DappWeight) globalDappReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolDappRewards().Value) bigDappWvp := StringToBigInt(d.GlobalProps().GetWeightedVpsDapp()) decayedDappWeight := bigDecay(bigDappWvp) exceptNextBlockDappWeightedVps := decayedDappWeight.Add(decayedDappWeight, dappWeight) nextBlockGlobalDappReward := globalDappReward.Add(globalDappReward, new(big.Int).SetUint64(perBlockDappReward(d))) acc0DappReward := ProportionAlgorithm(acc0DappWeight, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) acc2DappReward := ProportionAlgorithm(acc2DappWeight, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) dappReward := new(big.Int).Add(acc0DappReward, acc2DappReward) exceptGlobalClaimRewardAfterCashout := d.GlobalProps().GetClaimedDappRewards().Add(&prototype.Vest{Value: dappReward.Uint64()}) exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(nextBlockGlobalDappReward, dappReward).Uint64()} // post reward postWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) a.NotEqual( postWeight.Int64(), int64(0) ) globalPostReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolPostRewards().Value) bigTotalPostWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsPost()) decayedPostWeight := bigDecay(bigTotalPostWeight) exceptNextBlockPostWeightedVps := decayedPostWeight.Add(decayedPostWeight, postWeight) nextBlockGlobalPostReward := globalPostReward.Add(globalPostReward, new(big.Int).SetUint64(perBlockPostReward(d))) postReward := ProportionAlgorithm(postWeight, exceptNextBlockPostWeightedVps, nextBlockGlobalPostReward) acc0Reward := new(big.Int).Add(postReward, acc0DappReward) acc2Reward := acc2DappReward acc0acc2DappReward := new(big.Int).Add(acc0DappReward, acc0DappReward) a.NoError(d.ProduceBlocks(1)) a.Equal(d.GlobalProps().GetWeightedVpsDapp(), exceptNextBlockDappWeightedVps.String()) acc0vest1 := d.Account(tester.acc0.Name).GetVest().Value acc1vest1 := d.Account(tester.acc2.Name).GetVest().Value a.NotZero(acc0Reward.Uint64()) a.NotZero(acc2Reward.Uint64()) a.Equal(acc0Reward.Uint64(), acc0vest1 - acc0vest0) a.Equal(acc2Reward.Uint64(), acc1vest1 - acc1vest0) a.Equal(d.Post(POST).GetDappRewards().Value, acc0acc2DappReward.Uint64()) a.Equal(d.GlobalProps().GetClaimedDappRewards(), exceptGlobalClaimRewardAfterCashout) a.Equal(d.GlobalProps().GetPoolDappRewards(), exceptGlobalRewardAfterCashout) // make all post/test has been cashouted a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock)) } func (tester *DappTester) normal5(t *testing.T, d *Dandelion) { a := assert.New(t) const POST = 5 beneficiary := []map[string]int{{tester.acc0.Name: 5000}, {tester.acc2.Name: 2000}, {tester.acc3.Name: 2000}} a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"5"}, beneficiary))) acc0vest0 := d.Account(tester.acc0.Name).GetVest().Value acc1vest0 := d.Account(tester.acc2.Name).GetVest().Value acc2vest0 := d.Account(tester.acc3.Name).GetVest().Value a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, POST))) a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2)) // dapp reward dappWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) acc0DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(5000), new(big.Int).SetUint64(constants.PERCENT), dappWeight) acc2DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(2000), new(big.Int).SetUint64(constants.PERCENT), dappWeight) acc3DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(2000), new(big.Int).SetUint64(constants.PERCENT), dappWeight) dappWeight = new(big.Int).Add(acc0DappWeight, acc2DappWeight) dappWeight.Add(dappWeight, acc3DappWeight) globalDappReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolDappRewards().Value) bigDappWvp := StringToBigInt(d.GlobalProps().GetWeightedVpsDapp()) decayedDappWeight := bigDecay(bigDappWvp) exceptNextBlockDappWeightedVps := decayedDappWeight.Add(decayedDappWeight, dappWeight) nextBlockGlobalDappReward := globalDappReward.Add(globalDappReward, new(big.Int).SetUint64(perBlockDappReward(d))) acc0DappReward := ProportionAlgorithm(acc0DappWeight, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) acc2DappReward := ProportionAlgorithm(acc2DappWeight, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) acc3DappReward := ProportionAlgorithm(acc3DappWeight, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) dappReward := new(big.Int).Add(acc0DappReward, acc2DappReward) dappReward.Add(dappReward, acc3DappReward) exceptGlobalClaimRewardAfterCashout := d.GlobalProps().GetClaimedDappRewards().Add(&prototype.Vest{Value: dappReward.Uint64()}) exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(nextBlockGlobalDappReward, dappReward).Uint64()} // post reward postWeight := StringToBigInt(d.Post(POST).GetWeightedVp()) a.NotEqual( postWeight.Int64(), int64(0) ) globalPostReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolPostRewards().Value) bigTotalPostWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsPost()) decayedPostWeight := bigDecay(bigTotalPostWeight) exceptNextBlockPostWeightedVps := decayedPostWeight.Add(decayedPostWeight, postWeight) nextBlockGlobalPostReward := globalPostReward.Add(globalPostReward, new(big.Int).SetUint64(perBlockPostReward(d))) postReward := ProportionAlgorithm(postWeight, exceptNextBlockPostWeightedVps, nextBlockGlobalPostReward) acc0Reward := new(big.Int).Add(postReward, acc0DappReward) acc2Reward := acc2DappReward acc3Reward := acc3DappReward allAccDappReward := new(big.Int).Add(acc0DappReward, acc2DappReward) allAccDappReward.Add(allAccDappReward, acc3DappReward) a.NoError(d.ProduceBlocks(1)) a.Equal(d.GlobalProps().GetWeightedVpsDapp(), exceptNextBlockDappWeightedVps.String()) acc0vest1 := d.Account(tester.acc0.Name).GetVest().Value acc1vest1 := d.Account(tester.acc2.Name).GetVest().Value acc2vest1 := d.Account(tester.acc3.Name).GetVest().Value a.NotZero(acc0Reward.Uint64()) a.NotZero(acc2Reward.Uint64()) a.Equal(acc0Reward.Uint64(), acc0vest1 - acc0vest0) a.Equal(acc2Reward.Uint64(), acc1vest1 - acc1vest0) a.Equal(acc3Reward.Uint64(), acc2vest1 - acc2vest0) a.Equal(d.Post(POST).GetDappRewards().Value, allAccDappReward.Uint64()) a.Equal(d.GlobalProps().GetClaimedDappRewards(), exceptGlobalClaimRewardAfterCashout) a.Equal(d.GlobalProps().GetPoolDappRewards(), exceptGlobalRewardAfterCashout) // make all post/test has been cashouted a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock)) } // reply dapp func (tester *DappTester) normal6(t *testing.T, d *Dandelion) { a := assert.New(t) const POST = 6 const REPLY = 7 beneficiary := []map[string]int{{tester.acc2.Name: 2000}, {tester.acc3.Name: 2000}} a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"5"}, nil))) a.NoError(tester.acc0.SendTrxAndProduceBlock(Reply(REPLY, POST, tester.acc0.Name, "content", beneficiary))) acc1vest0 := d.Account(tester.acc2.Name).GetVest().Value acc2vest0 := d.Account(tester.acc3.Name).GetVest().Value a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, REPLY))) a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2)) // dapp reward dappWeight := StringToBigInt(d.Post(REPLY).GetWeightedVp()) acc2DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(2000), new(big.Int).SetUint64(constants.PERCENT), dappWeight) acc3DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(2000), new(big.Int).SetUint64(constants.PERCENT), dappWeight) // reply dapp equalize acc2DappWeightEqualize := ProportionAlgorithm(new(big.Int).SetUint64(constants.RewardRateReply), new(big.Int).SetUint64(constants.RewardRateAuthor), acc2DappWeight) acc3DappWeightEqualize := ProportionAlgorithm(new(big.Int).SetUint64(constants.RewardRateReply), new(big.Int).SetUint64(constants.RewardRateAuthor), acc3DappWeight) dappWeight = new(big.Int).Add(acc2DappWeightEqualize, acc3DappWeightEqualize) globalDappReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolDappRewards().Value) bigDappWvp := StringToBigInt(d.GlobalProps().GetWeightedVpsDapp()) decayedDappWeight := bigDecay(bigDappWvp) exceptNextBlockDappWeightedVps := decayedDappWeight.Add(decayedDappWeight, dappWeight) nextBlockGlobalDappReward := globalDappReward.Add(globalDappReward, new(big.Int).SetUint64(perBlockDappReward(d))) acc2DappReward := ProportionAlgorithm(acc2DappWeightEqualize, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) acc3DappReward := ProportionAlgorithm(acc3DappWeightEqualize, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) dappReward := new(big.Int).Add(acc2DappReward, acc3DappReward) exceptGlobalClaimRewardAfterCashout := d.GlobalProps().GetClaimedDappRewards().Add(&prototype.Vest{Value: dappReward.Uint64()}) exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(nextBlockGlobalDappReward, dappReward).Uint64()} acc2Reward := acc2DappReward acc3Reward := acc3DappReward allAccDappReward := new(big.Int).Add(acc2DappReward, acc3DappReward) a.NoError(d.ProduceBlocks(1)) a.Equal(d.GlobalProps().GetWeightedVpsDapp(), exceptNextBlockDappWeightedVps.String()) acc1vest1 := d.Account(tester.acc2.Name).GetVest().Value acc2vest1 := d.Account(tester.acc3.Name).GetVest().Value a.NotZero(acc2Reward.Uint64()) a.Equal(acc2Reward.Uint64(), acc1vest1 - acc1vest0) a.Equal(acc3Reward.Uint64(), acc2vest1 - acc2vest0) a.Equal(d.Post(REPLY).GetDappRewards().Value, allAccDappReward.Uint64()) a.Equal(d.GlobalProps().GetClaimedDappRewards(), exceptGlobalClaimRewardAfterCashout) a.Equal(d.GlobalProps().GetPoolDappRewards(), exceptGlobalRewardAfterCashout) // make all post/test has been cashouted a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock)) } func (tester *DappTester) normal7(t *testing.T, d *Dandelion) { a := assert.New(t) const POST1 = 8 const POST2 = 9 const REPLY = 10 postBeneficiary := []map[string]int{{tester.acc2.Name: 10000}} replyBeneficiary := []map[string]int{{tester.acc3.Name: 10000}} a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST1, tester.acc0.Name, "title", "content", []string{"5"}, nil))) a.NoError(tester.acc0.SendTrx(Post(POST2, tester.acc0.Name, "title", "content", []string{"6"}, postBeneficiary))) a.NoError(tester.acc1.SendTrx(Reply(REPLY, POST1, tester.acc1.Name, "content", replyBeneficiary))) a.NoError(d.ProduceBlocks(1)) acc2vest0 := d.Account(tester.acc2.Name).GetVest().Value acc3vest0 := d.Account(tester.acc3.Name).GetVest().Value a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, POST2))) a.NoError(tester.acc0.SendTrx(Vote(tester.acc0.Name, REPLY))) a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2)) post1DappWeight := StringToBigInt(d.Post(POST2).GetWeightedVp()) replyDappWeight := StringToBigInt(d.Post(REPLY).GetWeightedVp()) // dapp reward acc2DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(10000), new(big.Int).SetUint64(constants.PERCENT), post1DappWeight) acc3DappWeight := ProportionAlgorithm(new(big.Int).SetUint64(10000), new(big.Int).SetUint64(constants.PERCENT), replyDappWeight) // reply dapp equalize acc2DappWeightEqualize := acc2DappWeight acc3DappWeightEqualize := ProportionAlgorithm(new(big.Int).SetUint64(constants.RewardRateReply), new(big.Int).SetUint64(constants.RewardRateAuthor), acc3DappWeight) dappWeight := new(big.Int).Add(acc2DappWeightEqualize, acc3DappWeightEqualize) globalDappReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolDappRewards().Value) bigDappWvp := StringToBigInt(d.GlobalProps().GetWeightedVpsDapp()) decayedDappWeight := bigDecay(bigDappWvp) exceptNextBlockDappWeightedVps := decayedDappWeight.Add(decayedDappWeight, dappWeight) nextBlockGlobalDappReward := globalDappReward.Add(globalDappReward, new(big.Int).SetUint64(perBlockDappReward(d))) acc2DappReward := ProportionAlgorithm(acc2DappWeightEqualize, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) acc3DappReward := ProportionAlgorithm(acc3DappWeightEqualize, exceptNextBlockDappWeightedVps, nextBlockGlobalDappReward) dappReward := new(big.Int).Add(acc2DappReward, acc3DappReward) exceptGlobalClaimRewardAfterCashout := d.GlobalProps().GetClaimedDappRewards().Add(&prototype.Vest{Value: dappReward.Uint64()}) exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(nextBlockGlobalDappReward, dappReward).Uint64()} acc2Reward := acc2DappReward acc3Reward := acc3DappReward a.NoError(d.ProduceBlocks(1)) a.Equal(d.GlobalProps().GetWeightedVpsDapp(), exceptNextBlockDappWeightedVps.String()) acc2vest1 := d.Account(tester.acc2.Name).GetVest().Value acc3vest1 := d.Account(tester.acc3.Name).GetVest().Value a.NotZero(acc2Reward.Uint64()) a.Equal(acc2Reward.Uint64(), acc2vest1 - acc2vest0) a.Equal(acc3Reward.Uint64(), acc3vest1 - acc3vest0) a.Equal(d.GlobalProps().GetClaimedDappRewards(), exceptGlobalClaimRewardAfterCashout) a.Equal(d.GlobalProps().GetPoolDappRewards(), exceptGlobalRewardAfterCashout) // make all post/test has been cashouted a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock)) }
tests/economist/dapp.go
0.518059
0.403684
dapp.go
starcoder
package dp import ( "math" "sort" ) // A helper function for converting weights and scores from []float64 to []int. // For example, Round(dollars, 2) incorporates cents and Round(dollars, 0) // ignores (truncates) change. This may inherently alter the units of the // solution's max value. func Round(floats []float64, n int) (ints []int) { ints = make([]int, len(floats)) for i := 0; i < len(ints); i++ { r := math.Floor(floats[i] * math.Pow(10, float64(n))) ints[i] = int(r) } return } // The necessary parameters for the DP algorithm are the scores and weights, // aligned by index, and the maximum weight the knapsack can hold. All scores // and weights must be integer-valued. type Parameters struct { Scores []int Weights []int MaxWeight int v [][]int keep [][]int } func (p *Parameters) reset() { n := len(p.Scores) W := p.MaxWeight p.v = make([][]int, n+1) p.v[0] = make([]int, W+1) for i := 1; i <= n; i++ { p.v[i] = make([]int, W+1) for j := 0; j <= W; j++ { p.v[i][j] = int(math.MinInt64) } } p.keep = make([][]int, n) for i := 0; i < n; i++ { p.keep[i] = make([]int, W+1) } } func (p *Parameters) evalAt(i, w int) int { // i counts up to which item to consider 1..n // j counts index for weight of item 0..n-1 j := i - 1 prev := p.v[i-1][w] // check if the knapsack can even hold just this item if w < p.Weights[j] { return prev } alt := p.Scores[j] + p.v[i-1][w-p.Weights[j]] if alt > prev { p.keep[j][w] = 1 return alt } else { return prev } } // This is the main function to perform the optimization algorithm. It returns // the optimal value (max score total) along with an array of indices for the // chosen items making up the optimal solution. func Max(p *Parameters) (optVal int, soln []int) { p.reset() n := len(p.Scores) W := p.MaxWeight for i := 1; i <= n; i++ { for w := 0; w <= W; w++ { p.v[i][w] = p.evalAt(i, w) } } soln = make([]int, 0) K := W for i := n - 1; i >= 0; i-- { if p.keep[i][K] == 1 { soln = append(soln, i) K = K - p.Weights[i] } } sort.Ints(soln) optVal = p.v[n][W] return }
dp/dp.go
0.739328
0.41253
dp.go
starcoder
package util import ( "fmt" "time" "github.com/jinzhu/now" ) // Now is the func used for system time within gtm // This allows for manipulating system time during testing var Now = func() time.Time { return time.Now() } // DateRange creates predefined date ranges and validates if dates are within the range type DateRange struct { Start time.Time End time.Time } // IsSet returns true if the date range has a starting and/or ending date func (d DateRange) IsSet() bool { return !d.Start.IsZero() || !d.End.IsZero() } // String returns a date range as a string func (d DateRange) String() string { return fmt.Sprintf("%s - %s", d.Start.Format(time.UnixDate), d.End.Format(time.UnixDate)) } // Within determines if a date is within the date range func (d DateRange) Within(t time.Time) bool { switch { case !d.Start.IsZero() && !d.End.IsZero(): return t.Equal(d.Start) || t.Equal(d.End) || (t.After(d.Start) && t.Before(d.End)) case !d.Start.IsZero(): return t.Equal(d.Start) || t.After(d.Start) case !d.End.IsZero(): return t.Equal(d.End) || t.Before(d.End) default: return false } } // AfterNow returns a date range ending n days in the past func AfterNow(n int) DateRange { end := now.New(Now()).EndOfDay().AddDate(0, 0, -n) return DateRange{End: end} } // TodayRange returns a date range for today func TodayRange() DateRange { now := now.New(Now()) start := now.BeginningOfDay() end := now.EndOfDay() return DateRange{Start: start, End: end} } // YesterdayRange returns a date range for yesterday func YesterdayRange() DateRange { now := now.New(Now()) start := now.BeginningOfDay().AddDate(0, 0, -1) end := start.AddDate(0, 0, 1).Add(-time.Nanosecond) return DateRange{Start: start, End: end} } // ThisWeekRange returns a date range for this week func ThisWeekRange() DateRange { now := now.New(Now()) start := now.BeginningOfWeek() end := now.EndOfWeek() return DateRange{End: end, Start: start} } // LastWeekRange returns a date for last week func LastWeekRange() DateRange { now := now.New(Now()) start := now.BeginningOfWeek().AddDate(0, 0, -7) end := start.AddDate(0, 0, 7).Add(-time.Nanosecond) return DateRange{End: end, Start: start} } // ThisMonthRange returns a date range for this month func ThisMonthRange() DateRange { now := now.New(Now()) start := now.BeginningOfMonth() end := now.EndOfMonth() return DateRange{End: end, Start: start} } // LastMonthRange returns a date range for last month func LastMonthRange() DateRange { now := now.New(Now()) start := now.BeginningOfMonth().AddDate(0, -1, 0) end := start.AddDate(0, 1, 0).Add(-time.Nanosecond) return DateRange{End: end, Start: start} } // ThisYearRange returns a date range for this year func ThisYearRange() DateRange { now := now.New(Now()) start := now.BeginningOfYear() end := now.EndOfYear() return DateRange{End: end, Start: start} } // LastYearRange returns a date range for last year func LastYearRange() DateRange { now := now.New(Now()) start := now.BeginningOfYear().AddDate(-1, 0, 0) end := start.AddDate(1, 0, 0).Add(-time.Nanosecond) return DateRange{End: end, Start: start} }
util/date.go
0.793666
0.487429
date.go
starcoder
// Package tree contains the data generator to build the tree benchmark data. package tree import ( "fmt" "math" "github.com/google/badwolf/tools/benchmark/generator" "github.com/google/badwolf/triple" "github.com/google/badwolf/triple/node" "github.com/google/badwolf/triple/predicate" ) // treeGenerator generates data modeled after a tree structure. type treeGenerator struct { branch int nodeType *node.Type predicate *predicate.Predicate } // New creates a new tree generator. The triples are generated using breadth // search first. All predicates are immutable and use the predicate // `"parent_of"@[]`.` func New(branch int) (generator.Generator, error) { if branch < 1 { return nil, fmt.Errorf("invalid branch factor %d", branch) } nt, err := node.NewType("/tn") if err != nil { return nil, err } p, err := predicate.NewImmutable("parent_of") if err != nil { return nil, err } return &treeGenerator{ branch: branch, nodeType: nt, predicate: p, }, nil } // newNode returns a new node for the given identifier. func (t *treeGenerator) newNode(branch int, parentID string) (*node.Node, error) { tid := fmt.Sprintf("%d/%s", branch, parentID) if parentID == "" { tid = fmt.Sprintf("%d", branch) } id, err := node.NewID(tid) if err != nil { return nil, err } return node.NewNode(t.nodeType, id), nil } // newTriple creates a new triple given the parent and the descendant as an object. func (t *treeGenerator) newTriple(parent, descendant *node.Node) (*triple.Triple, error) { return triple.New(parent, t.predicate, triple.NewNodeObject(descendant)) } // recurse generated the triple by recursing while there are still triples // left to generate. func (t *treeGenerator) recurse(parent *node.Node, left *int, currentDepth, maxDepth int, trpls []*triple.Triple) ([]*triple.Triple, error) { if *left < 1 { return trpls, nil } for i, last := 0, *left <= t.branch; i < t.branch; i++ { offspring, err := t.newNode(i, parent.ID().String()) if err != nil { return trpls, err } trpl, err := t.newTriple(parent, offspring) if err != nil { return trpls, err } trpls = append(trpls, trpl) (*left)-- if *left < 1 { break } if currentDepth < maxDepth && !last { ntrpls, err := t.recurse(offspring, left, currentDepth+1, maxDepth, trpls) if err != nil { return ntrpls, err } trpls = ntrpls } if *left < 1 { break } } return trpls, nil } // Generates the requested number of triples. func (t *treeGenerator) Generate(n int) ([]*triple.Triple, error) { var trpls []*triple.Triple if n <= 0 { return trpls, nil } root, err := t.newNode(0, "") if err != nil { return nil, err } depth := int(math.Log(float64(n)) / math.Log(float64(t.branch))) ntrpls, err := t.recurse(root, &n, 0, depth, trpls) if err != nil { return nil, err } return ntrpls, nil }
tools/benchmark/generator/tree/tree.go
0.786746
0.466724
tree.go
starcoder
package ztype import ( zserio "github.com/woven-planet/go-zserio" ) // BitSizeOfDescriptor returns the bit size of a descriptor. func bitSizeOfDescriptor(packingNode *zserio.PackingContextNode, bitPosition int) (int, error) { endBitPosition := bitPosition if packingNode.HasContext() { endBitPosition += packingNode.BitSizeOfDescriptor() } else { for _, childNode := range packingNode.GetChildren() { delta, err := bitSizeOfDescriptor(childNode, endBitPosition) if err != nil { return 0, err } endBitPosition += delta } } return endBitPosition - bitPosition, nil } // OffsetMethod is a function used to set/check bit offsets in the buffer. type OffsetMethod func(int, int64) // Array allows representing arrays of any type and serialize them to the zserio format. type Array[T any, Y IArrayTraits[T]] struct { // ArrayTraits are the array traits used. ArrayTraits Y // RawArray is a reference to the raw array. RawArray []T // IsAuto specifies if the array size is automatically calculated. IsAuto bool // IsPacked specifies if the array is packed. IsPacked bool // FixedSize is the size of the array, if the array is of fixed size FixedSize int // The node used by this array for packing PackedContext *zserio.PackingContextNode // SetOffsetMethod is an optional function to set the offset to the buffer. setOffsetMethod OffsetMethod checkOffsetMethod OffsetMethod } // Size returns the number of elements in an array. func (array *Array[T, Y]) Size() int { return len(array.RawArray) } // ZserioBitSize returns the total size of the unpacked array in bits. func (array *Array[T, Y]) ZserioBitSize(bitPosition int) (int, error) { endBitPosition := bitPosition size := array.Size() if array.IsAuto { delta, err := SignedBitSize(int64(size), 4) if err != nil { return 0, err } endBitPosition += delta } if array.ArrayTraits.BitSizeOfIsConstant() && size > 0 { var dummy T elementSize := array.ArrayTraits.BitSizeOf(dummy, 0) if array.setOffsetMethod != nil { endBitPosition += size * elementSize } else { // all elements are spaced in the same way endBitPosition = alignTo(8, endBitPosition) endBitPosition += elementSize + (size-1)*alignTo(8, elementSize) } } else { for _, element := range array.RawArray { if array.setOffsetMethod != nil { endBitPosition = alignTo(8, endBitPosition) } endBitPosition += array.ArrayTraits.BitSizeOf(element, endBitPosition) } } return endBitPosition - bitPosition, nil } // BitSizeOfPacked returns the total size of the packed array in bits. func (array *Array[T, Y]) ZserioBitSizePacked(bitPosition int) (int, error) { endBitPosition := bitPosition size := array.Size() if array.IsAuto { delta, err := SignedBitSize(int64(size), 4) if err != nil { return 0, err } endBitPosition += delta } if size > 0 { delta, err := bitSizeOfDescriptor(array.PackedContext, endBitPosition) if err != nil { return 0, err } endBitPosition += delta for _, element := range array.RawArray { if array.setOffsetMethod != nil { endBitPosition = alignTo(8, endBitPosition) } delta, err := array.ArrayTraits.PackedTraits().BitSizeOf(array.PackedContext, endBitPosition, element) if err != nil { return 0, err } endBitPosition += delta } } return endBitPosition - bitPosition, nil } // Clone does a deep copy of the array. func (array *Array[T, Y]) Clone() zserio.ZserioType { clone := Array[T, Y]{ ArrayTraits: array.ArrayTraits, RawArray: array.RawArray, IsAuto: array.IsAuto, IsPacked: array.IsPacked, FixedSize: array.FixedSize, PackedContext: array.PackedContext, setOffsetMethod: array.setOffsetMethod, checkOffsetMethod: array.checkOffsetMethod, } return &clone }
ztype/array.go
0.753829
0.475423
array.go
starcoder
package builtin import ( "fmt" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" ) const SecondsInHour = 60 * 60 const SecondsInDay = 24 * SecondsInHour type NetworkPolicy struct { // The duration of a chain epoch. // Motivation: It guarantees that a block is propagated and WinningPoSt can be successfully done in time all supported miners. // Usage: It is used for deriving epoch-denominated periods that are more naturally expressed in clock time. epochDurationSeconds uint64 epochsInHour abi.ChainEpoch epochsInDay abi.ChainEpoch epochsInYear abi.ChainEpoch // Expected number of block quality in an epoch (e.g. 1 block with block quality 5, or 5 blocks with quality 1) // Motivation: It ensures that there is enough on-chain throughput // Usage: It is used to calculate the block reward. ExpectedLeadersPerEpoch int64 } func MakeNetworkPolicy(epochDurationSeconds uint64, expectedLeadersPerEpoch int64) NetworkPolicy { if SecondsInHour%epochDurationSeconds != 0 { // This even division is an assumption that other code might unwittingly make. // Don't rely on it on purpose, though. // While we're pretty sure everything will still work fine, we're safer maintaining this invariant anyway. panic(fmt.Sprintf("epoch duration %d does not evenly divide one hour (%d)", epochDurationSeconds, SecondsInHour)) } n := NetworkPolicy{ ExpectedLeadersPerEpoch: expectedLeadersPerEpoch, } n.SetEpochDurationSeconds(epochDurationSeconds) return n } func (np *NetworkPolicy) SetEpochDurationSeconds(epochDurationSeconds uint64) { np.epochDurationSeconds = epochDurationSeconds np.epochsInHour = SecondsInHour / abi.ChainEpoch(np.epochDurationSeconds) np.epochsInDay = 24 * np.epochsInHour np.epochsInYear = 365 * np.epochsInDay } func (np NetworkPolicy) EpochDurationSeconds() uint64 { return np.epochDurationSeconds } func (np NetworkPolicy) EpochsInHour() abi.ChainEpoch { return np.epochsInHour } func (np NetworkPolicy) EpochsInDay() abi.ChainEpoch { return np.epochsInDay } func (np NetworkPolicy) EpochsInYear() abi.ChainEpoch { return np.epochsInYear } var DefaultNetworkPolicy = MakeNetworkPolicy(30, 5) var CurrentNetworkPolicy = DefaultNetworkPolicy func EpochDurationSeconds() uint64 { return CurrentNetworkPolicy.EpochDurationSeconds() } func EpochsInHour() abi.ChainEpoch { return CurrentNetworkPolicy.EpochsInHour() } func EpochsInDay() abi.ChainEpoch { return CurrentNetworkPolicy.EpochsInDay() } func EpochsInYear() abi.ChainEpoch { return CurrentNetworkPolicy.EpochsInYear() } func ExpectedLeadersPerEpoch() int64 { return CurrentNetworkPolicy.ExpectedLeadersPerEpoch } // Number of token units in an abstract "FIL" token. // The network works purely in the indivisible token amounts. This constant converts to a fixed decimal with more // human-friendly scale. var TokenPrecision = big.NewIntUnsigned(1_000_000_000_000_000_000) // The maximum supply of Filecoin that will ever exist (in token units) var TotalFilecoin = big.Mul(big.NewIntUnsigned(2_000_000_000), TokenPrecision) // Quality multiplier for committed capacity (no deals) in a sector var QualityBaseMultiplier = big.NewInt(10) // Quality multiplier for unverified deals in a sector var DealWeightMultiplier = big.NewInt(10) // Quality multiplier for verified deals in a sector var VerifiedDealWeightMultiplier = big.NewInt(100) // Precision used for making QA power calculations const SectorQualityPrecision = 20 // 1 NanoFIL var OneNanoFIL = big.NewInt(1_000_000_000)
actors/builtin/network.go
0.794943
0.529263
network.go
starcoder
package geocoding import ( "fmt" "log" "bitbucket.org/kleinnic74/photos/domain/gps" ) type entry struct { bounds gps.Rect data interface{} } type ResultFunc func(interface{}, gps.Rect) type quadtree struct { count int Bounds gps.Rect root *node capacity int maxDepth int } type node struct { bounds gps.Rect quads [4]*node entries []entry capacity int depth int } type Visitor interface { Begin(bounds gps.Rect) Level(depth int, bounds gps.Rect) Object(bounds gps.Rect) End() } func NewQuadTree(bounds gps.Rect) *quadtree { return &quadtree{Bounds: bounds, capacity: 20, maxDepth: 10} } func (qt *quadtree) InsertRect(r gps.Rect, o interface{}) { if !qt.Bounds.FullyContains(r) { panic(fmt.Sprintf("Rect %v not in vounds %v", r, qt.Bounds)) } if qt.root == nil { qt.root = newNode(r, qt.capacity, qt.maxDepth) } else { if !qt.root.FullyContains(r) { qt.root = qt.root.grow(r) } } qt.root.add(r, o) qt.count++ } func (qt *quadtree) Find(p gps.Point) (result []interface{}) { if qt.root == nil { return } qt.root.findFunc(p, func(o interface{}, _ gps.Rect) { result = append(result, o) }) return } func (qt *quadtree) Visit(v Visitor) { v.Begin(qt.root.bounds) qt.root.visit(v) v.End() } func (qt *quadtree) FindFunc(p gps.Point, f ResultFunc) { if qt.root == nil { return } qt.root.findFunc(p, f) return } func newNode(bounds gps.Rect, capacity int, depth int) *node { log.Printf("New QuadTree node: %v", bounds) return &node{bounds: bounds, capacity: capacity, depth: depth} } func (n *node) FullyContains(r gps.Rect) bool { return n.bounds.FullyContains(r) } func (n *node) add(r gps.Rect, o interface{}) { e := entry{r, o} if n.quads[0] == nil { // Not subdivided yet if len(n.entries) < n.capacity || n.depth == 0 { n.entries = append(n.entries, e) return } n.split() } quad := n.choose(r) switch quad { case -1: n.entries = append(n.entries, e) default: n.quads[quad].add(r, o) } } func (n *node) split() { hw, hh := n.bounds.HalfSize() log.Printf("Splitting to [%f/%f]", n.bounds[0]+hw, n.bounds[1]+hh) n.quads[0] = newNode(gps.RectFrom(n.bounds[0], n.bounds[1], n.bounds[0]+hw, n.bounds[1]+hh), n.capacity, n.depth-1) n.quads[1] = newNode(gps.RectFrom(n.bounds[0], n.bounds[1]+hh, n.bounds[0]+hw, n.bounds[3]), n.capacity, n.depth-1) n.quads[2] = newNode(gps.RectFrom(n.bounds[0]+hw, n.bounds[1], n.bounds[2], n.bounds[1]+hh), n.capacity, n.depth-1) n.quads[3] = newNode(gps.RectFrom(n.bounds[0]+hw, n.bounds[1]+hh, n.bounds[2], n.bounds[3]), n.capacity, n.depth-1) entries := n.entries n.entries = nil for _, e := range entries { quad := n.choose(e.bounds) switch quad { case -1: // Does not fit in any quadrant n.entries = append(n.entries, e) default: n.quads[quad].add(e.bounds, e.data) } } } func (n *node) grow(r gps.Rect) *node { root := n for !root.FullyContains(r) { var xmin, ymin float64 dx0, dx1 := root.bounds.X0()-r.X0(), r.X1()-root.bounds.X1() var previousIndex int left := dx0 > dx1 if left { xmin = root.bounds.X0() - root.bounds.W() previousIndex += 2 } else { xmin = root.bounds.X0() } dy0, dy1 := root.bounds.Y0()-r.Y0(), r.Y1()-root.bounds.Y1() below := dy0 > dy1 if below { ymin = root.bounds.Y0() - root.bounds.H() previousIndex += 1 } else { ymin = root.bounds.Y0() } newRoot := newNode(gps.RectPointSize(xmin, ymin, root.bounds.W()*2, root.bounds.H()*2), n.capacity, root.depth+1) for i := 0; i < 4; i++ { if i == previousIndex { newRoot.quads[i] = root } else { dx := float64(i/2) * root.bounds.W() dy := float64(i%2) * root.bounds.H() r := gps.RectPointSize(xmin+dx, ymin+dy, root.bounds.W(), root.bounds.H()) newRoot.quads[i] = newNode(r, n.capacity, root.depth) } } root = newRoot } return root } func (n *node) choose(r gps.Rect) int { for i := 0; i < 4; i++ { if n.quads[i].bounds.FullyContains(r) { return i } } return -1 } func (n *node) findFunc(p gps.Point, f ResultFunc) { if !p.In(n.bounds) { return } for _, e := range n.entries { if p.In(e.bounds) { f(e.data, e.bounds) } } if n.quads[0] != nil { quad := 0 dx, dy := p.X()-(n.bounds[0]+n.bounds.W()/2), p.Y()-(n.bounds[1]+n.bounds.H()/2) if dx > 0 { quad += 2 } if dy > 0 { quad++ } n.quads[quad].findFunc(p, f) } return } func (n *node) visit(v Visitor) { v.Level(n.depth, n.bounds) for _, e := range n.entries { v.Object(e.bounds) } if n.quads[0] != nil { for i := range n.quads { n.quads[i].visit(v) } } }
geocoding/quadtree.go
0.593374
0.400749
quadtree.go
starcoder
package tree type Node struct { data int left, right *Node flag bool } func NewNode(value int) *Node { return &Node{ data: value, left: nil, right: nil, flag: false, } } type BinarySearchTree struct { root *Node count uint } func NewBinarySearchTree(root *Node) *BinarySearchTree { return &BinarySearchTree{root: root, count: 1} } func (b *BinarySearchTree) Search(value int) bool { if b.count == 0 { return false } if search(b.root, value) != nil { return true } return false } func search(root *Node, value int) *Node { if root == nil { return nil } if root.data == value { return root } else { if root.data < value { search(root.left, value) } else { search(root.right, value) } } return nil } func (b *BinarySearchTree) Insert(value int) { insert(b.root, value) b.count++ } func insert(root *Node, value int) { if value == root.data { return } else { if value < root.data { if root.left == nil { root.left = NewNode(value) return } else { insert(root.left, value) } } else { if root.right == nil { root.right = NewNode(value) return } else { insert(root.right, value) } } } } func (b *BinarySearchTree) Delete(value int) bool { if b.count == 0 { return false } target := search(b.root, value) if target != nil { target.flag = true return true } return false } func (b *BinarySearchTree) Max() *Node { if b.count == 0 { return nil } child := b.root.right for child.right != nil { child = child.right } return child } func (b *BinarySearchTree) Min() *Node { if b.count == 0 { return nil } child := b.root.left for child.left != nil { child = child.left } return child } func (b *BinarySearchTree) PreOrder() []int { if b.count == 0 { return nil } res := make([]int, 0) preOrder(b.root, &res) return res } func preOrder(root *Node, res *[]int) { *res = append(*res, root.data) if root.left != nil { preOrder(root.left, res) } if root.right != nil { preOrder(root.right, res) } } func (b *BinarySearchTree) InOrder() []int { if b.count == 0 { return nil } res := make([]int, 0) inOrder(b.root, &res) return res } func inOrder(root *Node, res *[]int) { if root.left != nil { inOrder(root.left, res) } *res = append(*res, root.data) if root.right != nil { inOrder(root.right, res) } } func (b *BinarySearchTree) PostOrder() []int { if b.count == 0 { return nil } res := make([]int, 0) postOrder(b.root, &res) return res } func postOrder(root *Node, res *[]int) { if root.left != nil { postOrder(root.left, res) } if root.right != nil { postOrder(root.right, res) } *res = append(*res, root.data) }
tree/binarysearchtree.go
0.624637
0.424889
binarysearchtree.go
starcoder
package goval import "reflect" const ( TypeNil = iota TypeBool TypeInt8 TypeInt16 TypeInt32 TypeInt64 TypeUint8 TypeUint16 TypeUint32 TypeUint64 TypeFloat32 TypeFloat64 TypeComplex64 TypeComplex128 TypeString TypeBytes ) var ( ValTypes = struct { Nil Type Bool Type Int8 Type Int16 Type Int32 Type Int64 Type Uint8 Type Uint16 Type Uint32 Type Uint64 Type Float32 Type Float64 Type Complex64 Type Complex128 Type String Type Bytes Type }{ Nil: &NilType{}, Bool: &BoolType{}, Int8: &Int8Type{}, Int16: &Int16Type{}, Int32: &Int32Type{}, Int64: &Int64Type{}, Uint8: &Uint8Type{}, Uint16: &Uint16Type{}, Uint32: &Uint32Type{}, Uint64: &Uint64Type{}, Float32: &Float32Type{}, Float64: &Float64Type{}, Complex64: &Complex64Type{}, Complex128: &Complex128Type{}, String: &StringType{}, Bytes: &BytesType{}, } ) type Type interface { Equal(x Type) bool ID() int Kind() reflect.Kind Name() string String() string GoString() string } type NilType struct{} func (t NilType) Equal(x Type) bool { return t.ID() == x.ID() } func (t NilType) ID() int { return TypeNil } func (t NilType) Kind() reflect.Kind { return reflect.Invalid } func (t NilType) Name() string { return "Nil" } func (t NilType) String() string { return "Nil" } func (t NilType) GoString() string { return "Type:Nil" } type BoolType struct{} func (t BoolType) Equal(x Type) bool { return t.ID() == x.ID() } func (t BoolType) ID() int { return TypeBool } func (t BoolType) Kind() reflect.Kind { return reflect.Bool } func (t BoolType) Name() string { return "Bool" } func (t BoolType) String() string { return "Bool" } func (t BoolType) GoString() string { return "Type:Bool" } func (t BoolType) BitSize() int { return 1 } type Int8Type struct{} func (t Int8Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Int8Type) ID() int { return TypeInt8 } func (t Int8Type) Kind() reflect.Kind { return reflect.Int8 } func (t Int8Type) Name() string { return "Int8" } func (t Int8Type) String() string { return "Int8" } func (t Int8Type) GoString() string { return "Type:Int8" } func (t Int8Type) BitSize() int { return 8 } type Int16Type struct{} func (t Int16Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Int16Type) ID() int { return TypeInt16 } func (t Int16Type) Kind() reflect.Kind { return reflect.Int16 } func (t Int16Type) Name() string { return "Int16" } func (t Int16Type) String() string { return "Int16" } func (t Int16Type) GoString() string { return "Type:Int16" } func (t Int16Type) BitSize() int { return 16 } type Int32Type struct{} func (t Int32Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Int32Type) ID() int { return TypeInt32 } func (t Int32Type) Kind() reflect.Kind { return reflect.Int32 } func (t Int32Type) Name() string { return "Int32" } func (t Int32Type) String() string { return "Int32" } func (t Int32Type) GoString() string { return "Type:Int32" } func (t Int32Type) BitSize() int { return 32 } type Int64Type struct{} func (t Int64Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Int64Type) ID() int { return TypeInt64 } func (t Int64Type) Kind() reflect.Kind { return reflect.Int64 } func (t Int64Type) Name() string { return "Int64" } func (t Int64Type) String() string { return "Int64" } func (t Int64Type) GoString() string { return "Type:Int64" } func (t Int64Type) BitSize() int { return 64 } type Uint8Type struct{} func (t Uint8Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Uint8Type) ID() int { return TypeUint8 } func (t Uint8Type) Kind() reflect.Kind { return reflect.Uint8 } func (t Uint8Type) Name() string { return "Uint8" } func (t Uint8Type) String() string { return "Uint8" } func (t Uint8Type) GoString() string { return "Type:Uint8" } func (t Uint8Type) BitSize() int { return 8 } type Uint16Type struct{} func (t Uint16Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Uint16Type) ID() int { return TypeUint16 } func (t Uint16Type) Kind() reflect.Kind { return reflect.Uint16 } func (t Uint16Type) Name() string { return "Uint16" } func (t Uint16Type) String() string { return "Uint16" } func (t Uint16Type) GoString() string { return "Type:Uint16" } func (t Uint16Type) BitSize() int { return 16 } type Uint32Type struct{} func (t Uint32Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Uint32Type) ID() int { return TypeUint32 } func (t Uint32Type) Kind() reflect.Kind { return reflect.Uint32 } func (t Uint32Type) Name() string { return "Uint32" } func (t Uint32Type) String() string { return "Uint32" } func (t Uint32Type) GoString() string { return "Type:Uint32" } func (t Uint32Type) BitSize() int { return 32 } type Uint64Type struct{} func (t Uint64Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Uint64Type) ID() int { return TypeUint64 } func (t Uint64Type) Kind() reflect.Kind { return reflect.Uint64 } func (t Uint64Type) Name() string { return "Uint64" } func (t Uint64Type) String() string { return "Uint64" } func (t Uint64Type) GoString() string { return "Type:Uint64" } func (t Uint64Type) BitSize() int { return 64 } type Float32Type struct{} func (t Float32Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Float32Type) ID() int { return TypeFloat32 } func (t Float32Type) Kind() reflect.Kind { return reflect.Float32 } func (t Float32Type) Name() string { return "Float32" } func (t Float32Type) String() string { return "Float32" } func (t Float32Type) GoString() string { return "Type:Float32" } func (t Float32Type) BitSize() int { return 32 } type Float64Type struct{} func (t Float64Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Float64Type) ID() int { return TypeFloat64 } func (t Float64Type) Kind() reflect.Kind { return reflect.Float64 } func (t Float64Type) Name() string { return "Float64" } func (t Float64Type) String() string { return "Float64" } func (t Float64Type) GoString() string { return "Type:Float64" } func (t Float64Type) BitSize() int { return 64 } type Complex64Type struct{} func (t Complex64Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Complex64Type) ID() int { return TypeComplex64 } func (t Complex64Type) Kind() reflect.Kind { return reflect.Complex64 } func (t Complex64Type) Name() string { return "Complex64" } func (t Complex64Type) String() string { return "Complex64" } func (t Complex64Type) GoString() string { return "Type:Complex64" } func (t Complex64Type) BitSize() int { return 32 } type Complex128Type struct{} func (t Complex128Type) Equal(x Type) bool { return t.ID() == x.ID() } func (t Complex128Type) ID() int { return TypeComplex128 } func (t Complex128Type) Kind() reflect.Kind { return reflect.Complex128 } func (t Complex128Type) Name() string { return "Complex128" } func (t Complex128Type) String() string { return "Complex128" } func (t Complex128Type) GoString() string { return "Type:Complex128" } func (t Complex128Type) BitSize() int { return 64 } type StringType struct{} func (t StringType) Equal(x Type) bool { return t.ID() == x.ID() } func (t StringType) ID() int { return TypeString } func (t StringType) Kind() reflect.Kind { return reflect.String } func (t StringType) Name() string { return "String" } func (t StringType) String() string { return "String" } func (t StringType) GoString() string { return "Type:String" } type BytesType struct{} func (t BytesType) Equal(x Type) bool { return t.ID() == x.ID() } func (t BytesType) ID() int { return TypeBytes } func (t BytesType) Kind() reflect.Kind { return reflect.Slice } func (t BytesType) Name() string { return "Bytes" } func (t BytesType) String() string { return "Bytes" } func (t BytesType) GoString() string { return "Type:Bytes" }
type.gen.go
0.523664
0.540136
type.gen.go
starcoder
package spiral_memory import ( "math" ) var DIM = 2000 var OFFSET = DIM / 2 type square struct { index uint64 distance uint64 weight uint64 } type spiral []square type matrix []uint64 func GetIndex(x int, y int) uint64 { return uint64((OFFSET+x)*DIM + (OFFSET + y)) } func GetVal(mat matrix, x int, y int) uint64 { return mat[GetIndex(x, y)] } func SetVal(mat matrix, x int, y int, val uint64) matrix { mat[GetIndex(x, y)] = val return mat } func ComputeWeight(mat matrix, x int, y int) uint64 { // root square? just return 1 if x == 0 && y == 0 { return 1 } else { var sum uint64 = 0 sum += GetVal(mat, x+1, y) // right sum += GetVal(mat, x+1, y+1) // top right sum += GetVal(mat, x, y+1) // top sum += GetVal(mat, x-1, y+1) // top left sum += GetVal(mat, x-1, y) // left sum += GetVal(mat, x-1, y-1) // bottom left sum += GetVal(mat, x, y-1) // bottom sum += GetVal(mat, x+1, y-1) // bottom right return sum } } // solution taken from https://stackoverflow.com/a/398302/610979 and adapted to Go // constructs a Spiral that's large enough to contain val (actually, it's a little // bit larger, since we always construct a full nxn Spiral) func Spiral(val uint64) []square { var dim = uint64(math.Ceil(math.Sqrt(float64(val)))) var res = make([]square, dim*dim+1) // 2D matrix to keep track of sums in squares var sums = make([]uint64, DIM*DIM) // arbitrary: hopefully, 4M entries will suffice var x = 0 var y = 0 var dx = 0 var dy = -1 for i := uint64(1); i <= val; i++ { var weight = ComputeWeight(sums, x, y) SetVal(sums, x, y, weight) res[i] = square{i, uint64(math.Abs(float64(x)) + math.Abs(float64(y))), weight} if (x == y) || ((x < 0) && (x == -y)) || ((x > 0) && (x == 1-y)) { dx, dy = -dy, dx } x, y = x+dx, y+dy } return res } func PrintSpiral(spiral []uint64) { for i := 0; i < len(spiral); i++ { print(spiral[i]) } } func DistanceInSpiralMemory(square uint64) uint64 { var spiral = Spiral(square) //PrintSpiral(spiral) return spiral[square].distance } // compute the sum of adjacent squares up to the given cell func SumInSpiralMemory(square uint64) uint64 { var spiral = Spiral(square) return spiral[square].weight } func FirstSumInSpiralMemoryGreaterThanThreshold(threshold uint64) uint64 { var size uint64 = 1000000 // should be enough var spiral = Spiral(size) for i := uint64(1); i <= size; i++ { if spiral[i].weight > threshold { return spiral[i].weight } } return 0; }
2017/03-spiral_memory/spiral_memory.go
0.722625
0.594816
spiral_memory.go
starcoder
package hdkeys import ( "crypto/sha256" "fmt" "math/big" "github.com/runeaune/bitcoin-crypto/bitecdsa" "github.com/runeaune/bitcoin-crypto/bitelliptic" "golang.org/x/crypto/ripemd160" ) type point struct { x, y *big.Int } func RIPEMD160Hash(data []byte) []byte { first := sha256.Sum256(data) hasher := ripemd160.New() hasher.Write(first[:]) return hasher.Sum(nil) } func ser256(key *big.Int) []byte { b := key.Bytes() if len(b) == 32 { return b } // Pad short data with leading zeros. padding := 32 - len(b) data := make([]byte, 32) copy(data[0+padding:32], b) return data } func parse256(b []byte) *big.Int { if len(b) != 32 { panic(fmt.Sprintf("Data \"%x\" isn't 32 bytes.", b)) } return new(big.Int).SetBytes(b) } func SerializeCompact(p point) []byte { data := make([]byte, 33) if p.y.Bit(0) == 0 { // y is even. data[0] = 0x02 } else { data[0] = 0x03 } b := p.x.Bytes() // Pad short data with leading zeros. padding := 32 - len(b) copy(data[1+padding:33], b) return data } func ParseCompact(b []byte) point { if len(b) != 33 { panic(fmt.Sprintf("Data \"%x\" isn't 33 bytes.", b)) } curve := bitelliptic.S256() // y = sqrt(x^3 + B) mod P x := new(big.Int).SetBytes(b[1:33]) x3 := new(big.Int).Mul(x, x) x3.Mul(x3, x) y2 := new(big.Int).Add(x3, curve.B) y2.Mod(y2, curve.P) // sqrt(a) = a^((P+1)/4) e := big.NewInt(1) e = e.Add(e, curve.P) e = e.Div(e, big.NewInt(4)) y := y2.Exp(y2, e, curve.P) switch b[0] { case 0x02: // y should be even. if y.Bit(0) == 1 { y = y.Sub(curve.P, y) } case 0x03: // y should be odd. if y.Bit(0) == 0 { y = y.Sub(curve.P, y) } default: // TODO consider panicking if functions is private. return point{} } return point{x, y} } func addInts(a, b *big.Int) *big.Int { i := a.Add(a, b) i.Mod(i, bitelliptic.S256().N) return i } func privateToPublic(d *big.Int) point { key := bitecdsa.NewKeyFromInt(bitelliptic.S256(), d) return point{key.X, key.Y} } func addPoints(a, b point) point { x, y := bitelliptic.S256().Add(a.x, a.y, b.x, b.y) return point{x, y} } // ParseUncompressed parses a 65 bytes uncompressed public address into a (X,Y) // point on the curve. func ParseUncompressed(d []byte) (*big.Int, *big.Int, error) { if len(d) != 65 { return nil, nil, fmt.Errorf("Input has wrong length %d (expected 65).", len(d)) } if d[0] != 0x04 { return nil, nil, fmt.Errorf("Input has wrong prefix 0x%x (expected 0x04).", d[0]) } return new(big.Int).SetBytes(d[1:33]), new(big.Int).SetBytes(d[33:65]), nil } // SerializeUncompressed serializes a point on the curve into a 65 byte // long byte array. func SerializeUncompressed(p point) []byte { X := p.x.Bytes() Y := p.y.Bytes() // Pad leading zeros for short integers. paddingX := 32 - len(X) paddingY := 32 - len(Y) b := make([]byte, 65) b[0] = 0x04 copy(b[1+paddingX:33], X) copy(b[33+paddingY:65], Y) return b } func keyIsValid(i *big.Int) bool { if i.Sign() != 0 && i.Cmp(bitelliptic.S256().N) < 0 { return true } return false }
util.go
0.550607
0.44903
util.go
starcoder
package day10 import ( "fmt" "math/big" "sort" "strings" "github.com/OctaviPascual/AdventOfCode2019/util" ) const ( asteroid = '#' empty = '.' ) // Day holds the data needed to solve part one and part two type Day struct { asteroidMap [][]bool } type position struct { // instead of working with the coordinates suggested in the statement, we use regular matrix coordinates // (that is, (i=0, j=0) is the top left position, (i=0, j=1) is the position to its right) i, j int } type monitoringStation struct { position position asteroidMap [][]bool positionsGroupedByAngle [][]position vaporizedAsteroids []position } // NewDay returns a new Day that solves part one and two for the given input func NewDay(input string) (*Day, error) { asteroidMap, err := parseAsteroidMap(input) if err != nil { return nil, err } return &Day{ asteroidMap: asteroidMap, }, nil } // SolvePartOne solves part one func (d Day) SolvePartOne() (string, error) { bestMonitoringStation := findBestMonitoringStationLocation(d.asteroidMap) return fmt.Sprintf("%d", bestMonitoringStation.asteroidsDetected()), nil } // SolvePartTwo solves part two func (d Day) SolvePartTwo() (string, error) { bestMonitoringStation := findBestMonitoringStationLocation(d.asteroidMap) bestMonitoringStation.computePositionsGroupedByAngle() bestMonitoringStation.runLaser() twoHundredthVaporizedAsteroid := bestMonitoringStation.vaporizedAsteroids[200-1] return fmt.Sprintf("%d", twoHundredthVaporizedAsteroid.i+twoHundredthVaporizedAsteroid.j*100), nil } func parseAsteroidMap(input string) ([][]bool, error) { lines := strings.Split(input, "\n") asteroidMap := make([][]bool, len(lines)) for i, line := range lines { asteroidMap[i] = make([]bool, len(line)) for j, position := range line { switch position { case empty: asteroidMap[i][j] = false case asteroid: asteroidMap[i][j] = true default: return nil, fmt.Errorf("invalid position %c", position) } } } return asteroidMap, nil } func findBestMonitoringStationLocation(asteroidMap [][]bool) monitoringStation { rows := len(asteroidMap) cols := len(asteroidMap[0]) maxAsteroidsDetected := 0 var bestMonitoringStation monitoringStation for i := 0; i < rows; i++ { for j := 0; j < cols; j++ { if asteroidMap[i][j] { monitoringStation := monitoringStation{ position: position{i, j}, asteroidMap: asteroidMap, } asteroidsDetected := monitoringStation.asteroidsDetected() if asteroidsDetected > maxAsteroidsDetected { maxAsteroidsDetected = asteroidsDetected bestMonitoringStation = monitoringStation } } } } return bestMonitoringStation } func (ms monitoringStation) asteroidsDetected() int { rows := len(ms.asteroidMap) cols := len(ms.asteroidMap[0]) asteroidsDetected := 0 for i := 0; i < rows; i++ { for j := 0; j < cols; j++ { if ms.asteroidMap[i][j] && ms.hasDirectLineOfSight(position{i, j}) { asteroidsDetected++ } } } return asteroidsDetected } func (ms monitoringStation) hasDirectLineOfSight(p position) bool { if p == ms.position { return false } // (di, dj) is a difference vector between the position p and the monitoring station ms di := p.i - ms.position.i dj := p.j - ms.position.j var iStep, jStep, steps int gcd := util.GCD(util.Abs(di), util.Abs(dj)) if gcd == 0 { if di == 0 { iStep = 0 jStep = 1 steps = dj } else { iStep = 1 jStep = 0 steps = di } } else { iStep = di / gcd jStep = dj / gcd steps = gcd } // check all positions in direct line of sight between monitoring station ms and position p for k := 1; k < steps; k++ { if ms.asteroidMap[ms.position.i+k*iStep][ms.position.j+k*jStep] { return false } } return true } func (ms *monitoringStation) computePositionsGroupedByAngle() { rows := len(ms.asteroidMap) cols := len(ms.asteroidMap[0]) type enrichedPosition struct { position position pseudoAngle *big.Rat } enrichedPositions := make([]enrichedPosition, 0, rows*cols-1) for i := 0; i < rows; i++ { for j := 0; j < cols; j++ { position := position{i, j} if position != ms.position { di := position.i - ms.position.i dj := position.j - ms.position.j pseudoAngle := pseudoAngle(di, dj) enrichedPosition := enrichedPosition{ position: position, pseudoAngle: pseudoAngle, } enrichedPositions = append(enrichedPositions, enrichedPosition) } } } sort.Slice(enrichedPositions, func(i, j int) bool { return enrichedPositions[i].pseudoAngle.Cmp(enrichedPositions[j].pseudoAngle) < 0 }) // group positions that have the same pseudo angle in same bucket i := 0 j := 0 for i < len(enrichedPositions) { ms.positionsGroupedByAngle = append(ms.positionsGroupedByAngle, make([]position, 0)) ms.positionsGroupedByAngle[j] = append(ms.positionsGroupedByAngle[j], enrichedPositions[i].position) pseudoAngle := enrichedPositions[i].pseudoAngle i++ for i < len(enrichedPositions) && enrichedPositions[i].pseudoAngle.Cmp(pseudoAngle) == 0 { ms.positionsGroupedByAngle[j] = append(ms.positionsGroupedByAngle[j], enrichedPositions[i].position) i++ } j++ } } func (ms *monitoringStation) runLaser() []position { totalAsteroids := ms.asteroidsDetected() i := 0 for totalAsteroids > 0 { for _, position := range ms.positionsGroupedByAngle[i] { if ms.asteroidMap[position.i][position.j] && ms.hasDirectLineOfSight(position) { ms.asteroidMap[position.i][position.j] = false ms.vaporizedAsteroids = append(ms.vaporizedAsteroids, position) totalAsteroids-- break } } i = (i + 1) % len(ms.positionsGroupedByAngle) } return nil } // pseudoAngle returns a number from the range [-2 .. 2] which is monotonic in the angle the vector (di, dj) // makes against the x axis // https://stackoverflow.com/q/16542042 func pseudoAngle(di, dj int) *big.Rat { r := big.NewRat(int64(di), int64(util.Abs(di)+util.Abs(dj))) if dj < 0 { return r.Sub(big.NewRat(1, 1), r) } return r.Sub(r, big.NewRat(1, 1)) }
day10/day10.go
0.70253
0.458591
day10.go
starcoder
package runtime_data_area import ( "math" "github.com/Frederick-S/jvmgo/runtime_data_area/heap" ) type OperandStack struct { size uint operands []Variable } func newOperandStack(maxStackSize uint) *OperandStack { if maxStackSize > 0 { return &OperandStack{ operands: make([]Variable, maxStackSize), } } return nil } func (operandStack *OperandStack) PushBooleanValue(value bool) { if value { operandStack.PushIntegerValue(1) } else { operandStack.PushIntegerValue(0) } } func (operandStack *OperandStack) PopBooleanValue() bool { return operandStack.PopIntegerValue() == 1 } func (operandStack *OperandStack) PushIntegerValue(value int32) { operandStack.operands[operandStack.size].numericalValue = value operandStack.size++ } func (operandStack *OperandStack) PopIntegerValue() int32 { operandStack.size-- return operandStack.operands[operandStack.size].numericalValue } func (operandStack *OperandStack) PushFloatValue(value float32) { float32Bits := math.Float32bits(value) operandStack.operands[operandStack.size].numericalValue = int32(float32Bits) operandStack.size++ } func (operandStack *OperandStack) PopFloatValue() float32 { operandStack.size-- float32Bits := uint32(operandStack.operands[operandStack.size].numericalValue) return math.Float32frombits(float32Bits) } func (operandStack *OperandStack) PushLongValue(value int64) { operandStack.operands[operandStack.size].numericalValue = int32(value) operandStack.operands[operandStack.size+1].numericalValue = int32(value >> 32) operandStack.size += 2 } func (operandStack *OperandStack) PopLongValue() int64 { operandStack.size -= 2 low := uint32(operandStack.operands[operandStack.size].numericalValue) high := uint32(operandStack.operands[operandStack.size+1].numericalValue) return int64(high)<<32 | int64(low) } func (operandStack *OperandStack) PushDoubleValue(value float64) { float64Bits := math.Float64bits(value) operandStack.PushLongValue(int64(float64Bits)) } func (operandStack *OperandStack) PopDoubleValue() float64 { float64Bits := uint64(operandStack.PopLongValue()) return math.Float64frombits(float64Bits) } func (operandStack *OperandStack) PushReferenceValue(referenceValue *heap.Object) { operandStack.operands[operandStack.size].referenceValue = referenceValue operandStack.size++ } func (operandStack *OperandStack) PopReferenceValue() *heap.Object { operandStack.size-- referenceValue := operandStack.operands[operandStack.size].referenceValue operandStack.operands[operandStack.size].referenceValue = nil return referenceValue } func (operandStack *OperandStack) PushOperand(variable Variable) { operandStack.operands[operandStack.size] = variable operandStack.size++ } func (operandStack *OperandStack) PopOperand() Variable { operandStack.size-- return operandStack.operands[operandStack.size] } func (operandStack *OperandStack) GetReferenceValueBelowTop(n uint) *heap.Object { return operandStack.operands[operandStack.size-1-n].referenceValue } func (operandStack *OperandStack) Clear() { operandStack.size = 0 for i := range operandStack.operands { operandStack.operands[i].referenceValue = nil } }
runtime_data_area/operand_stack.go
0.692538
0.448668
operand_stack.go
starcoder
package atacdemultiplexutils import ( "bufio" "os" "strings" "github.com/biogo/store/interval" "time" "fmt" "strconv" "github.com/jinzhu/copier" ) //IntInterval Integer-specific intervals type IntInterval struct { Start, End int UID uintptr Payload interface{} } //PeakIntervalTreeObject Peak IntervalTree Object type PeakIntervalTreeObject struct { Chrintervaldict map[string]*interval.IntTree Intervalmapping map[uintptr]string Peakiddict *map[string]uint } //Overlap rule for two Interval func (i IntInterval) Overlap(b interval.IntRange) bool { // Search for intersection return i.End >= b.Start && i.Start <= b.End } //ID Return the ID of Interval func (i IntInterval) ID() uintptr { return i.UID } //Range Return the range of Interval func (i IntInterval) Range() interval.IntRange { return interval.IntRange{i.Start, i.End} } //String Return the string re[ of Interval func (i IntInterval) String() string { return fmt.Sprintf("(%d, %d) id: %d ####\n", i.Start, i.End, i.ID()) } //Peak Descriptor of a peak as string slice type Peak struct{ Slice [3]string Start, End int } //SymbolType Descriptor of a symbol: either a string or int slice type SymbolType struct { SymbolPos []int SymbolStr string } //StringToPeak Convert Peak string to peak func (peak * Peak) StringToPeak(str string) { var err1, err2 error split := strings.Split(str, "\t") (*peak).Start, err1 = strconv.Atoi(split[1]) (*peak).End, err2 = strconv.Atoi(split[2]) if err1 != nil || err2 != nil { panic(fmt.Sprintf( "Error when converting Peak: %s cannot be used as int ####\n", str)) } (*peak).Slice[0] = split[0] (*peak).Slice[1] = split[1] (*peak).Slice[2] = split[2] } //StringToPeakWithPos Convert Peak string to peak func (peak * Peak) StringToPeakWithPos(str string, refPos [3]int) { var err1, err2 error split := strings.Split(str, "\t") (*peak).Start, err1 = strconv.Atoi(split[refPos[1]]) (*peak).End, err2 = strconv.Atoi(split[refPos[2]]) if err1 != nil || err2 != nil { panic(fmt.Sprintf( "Error when converting Peak: %s cannot be used as int ####\n", str)) } (*peak).Slice[0] = split[refPos[0]] (*peak).Slice[1] = split[refPos[1]] (*peak).Slice[2] = split[refPos[2]] } //StringToPeakWithPosAndStart Convert Peak string to peak func (peak * Peak) StringToPeakWithPosAndStart(str string, refPosList []int, start int) { var refPos [3]int if len(refPosList) < start + 3 { panic(fmt.Sprintf("Size error with refPosList: %d and start %d", refPosList, start)) } refPos[0] = refPosList[0 + start] refPos[1] = refPosList[1 + start] refPos[2] = refPosList[2 + start] (*peak).StringToPeakWithPos(str, refPos) } //SplitToPeak Convert string split to peak func (peak * Peak) SplitToPeak(split []string) { var err1, err2 error (*peak).Start, err1 = strconv.Atoi(split[1]) (*peak).End, err2 = strconv.Atoi(split[2]) if err1 != nil || err2 != nil { panic(fmt.Sprintf( "Error when converting Peak: %s cannot be used as int ####\n", split)) } (*peak).Slice[0] = split[0] (*peak).Slice[1] = split[1] (*peak).Slice[2] = split[2] } /*PeakToString Convert Peak to string*/ func (peak * Peak) PeakToString() (peakstr string) { return fmt.Sprintf("%s\t%s\t%s", (*peak).Slice[0], (*peak).Slice[1], (*peak).Slice[2]) } /*Chr return the chromosome of the peak */ func (peak * Peak) Chr() (chr string) { return (*peak).Slice[0] } //StringToPeakNoCheck Convert Peak string to peak func (peak * Peak) StringToPeakNoCheck(str string) { split := strings.Split(str, "\t") (*peak).Slice[0] = split[0] (*peak).Slice[1] = split[1] (*peak).Slice[2] = split[2] } /*PEAKIDDICT peak ID<->pos */ var PEAKIDDICT map[string]uint /*CHRINTERVALDICT chr ID <-> interval tree */ var CHRINTERVALDICT map[string]*interval.IntTree /*CHRINTERVALDICTTHREAD threadNB -> chr ID -> pos */ var CHRINTERVALDICTTHREAD map[int]map[string]*interval.IntTree /*INTERVALMAPPING peak ID pos <->pos */ var INTERVALMAPPING map[uintptr]string /*PEAKSYMBOLDICT map[peak]symbol */ var PEAKSYMBOLDICT map[Peak][]string /*PEAKSCOREDICT dict containing score for ref peaks*/ var PEAKSCOREDICT map[Peak]float64 /*LoadSymbolFile peaksymbolfile, peakfile Filename*/ func LoadSymbolFile(peaksymbolfile, peakfile Filename) { var scannerPeak *bufio.Scanner var filePeak *os.File var split []string var peakl Peak var symbol string PEAKSYMBOLDICT = make(map[Peak][]string) if peaksymbolfile == "" { return } isOption1 := true if peakfile == "" { isOption1 = false } else { scannerPeak, filePeak = peakfile.ReturnReader(0) defer CloseFile(filePeak) } scanner, file := peaksymbolfile.ReturnReader(0) defer CloseFile(file) for scanner.Scan() { split = strings.Split(scanner.Text(), "\t") if len(split) == 4 { isOption1 = false } if !isOption1 && len(split) != 4 { panic(fmt.Sprintf( "Error line %s from symbol file should be <symbol>\t<chromosome>\t<start>\t<stop>\n", split)) } symbol = split[0] if isOption1 { scannerPeak.Scan() peakl.StringToPeak(scannerPeak.Text()) } else { peakl.SplitToPeak(split) } PEAKSYMBOLDICT[peakl] = append(PEAKSYMBOLDICT[peakl], symbol) } } /*LoadRefBedFileWithSymbol peaksymbolfile, peakfile Filename*/ func LoadRefBedFileWithSymbol(peaksymbolfile Filename) { symbol := SymbolType{} symbol.SymbolPos = []int{3} loadRefBedFileWithSymbol(peaksymbolfile, "\t", symbol, []int{0, 1, 2}, -1) } /*LoadRefCustomFileWithSymbol peaksymbolfile, peakfile Filename*/ func LoadRefCustomFileWithSymbol( peaksymbolfile Filename, sep string, symbol SymbolType, refPos []int, scorefiltercolumns int) { loadRefBedFileWithSymbol(peaksymbolfile, sep, symbol, refPos, scorefiltercolumns) } /*CheckIfPeakPosIsMutltipleOf3 check if list is multiple of 3 */ func CheckIfPeakPosIsMutltipleOf3(peakPos []int) (numberOfPeaks int) { if len(peakPos) % 3 != 0 { panic(fmt.Sprintf( "peakPos %d from should b a multiple of 3", peakPos)) } numberOfPeaks = len(peakPos) / 3 return numberOfPeaks } /*loadRefBedFileWithSymbol peaksymbolfile, peakfile Filename scorefiltercolumns is used only if positive or null and is used to keep only the top scored symbol */ func loadRefBedFileWithSymbol( peaksymbolfile Filename, sep string, symbol SymbolType, peakPos []int, scorefiltercolumns int) { var peakl Peak var symbolSlice, split, peaksplit []string var pos, i int var symbolStr string var peakPosTriplet [3]int var score, score2 float64 var err error var isInside bool if scorefiltercolumns > -1 { PEAKSCOREDICT = make(map[Peak]float64) } symbolSlice = make([]string, len(symbol.SymbolPos)) peaksplit = make([]string, 3) PEAKSYMBOLDICT = make(map[Peak][]string) maxPeakPos := MaxIntList(append(peakPos[:], symbol.SymbolPos...)) numberOfPeaks := CheckIfPeakPosIsMutltipleOf3(peakPos) scanner, file := peaksymbolfile.ReturnReader(0) defer CloseFile(file) for scanner.Scan() { split = strings.Split(scanner.Text(), sep) if split[0][0] == '#' { continue } if len(split) < maxPeakPos { panic(fmt.Sprintf( "Error line %s from symbol file should have at least enough fields as decribe in pos index: %d\n", split, peakPos)) } for peakNb := 0; peakNb < numberOfPeaks; peakNb++ { peakPosTriplet[0] = peakPos[0 + 3 * peakNb] peakPosTriplet[1] = peakPos[1 + 3 * peakNb] peakPosTriplet[2] = peakPos[2 + 3 * peakNb] for i, pos = range symbol.SymbolPos { if len(split) < pos { panic(fmt.Sprintf( "Index out of range for loadRefBedFileWithSymbol func line: %s file: %s", split, peaksymbolfile.String())) } symbolSlice[i] = split[pos] } for i, pos = range peakPosTriplet { if len(split) < pos { panic(fmt.Sprintf( "Index out of range (2) for loadRefBedFileWithSymbol func line: %s file: %s", split, peaksymbolfile.String())) } peaksplit[i] = split[pos] } peakl.SplitToPeak(peaksplit) if symbol.SymbolStr != "" { symbolStr = symbol.SymbolStr } else { symbolStr = strings.Join(symbolSlice, sep) } //scorefiltercolumns is used only if positive or null and is used to keep only the top scored symbol if scorefiltercolumns > -1 { if len(split) < scorefiltercolumns { panic(fmt.Sprintf("Line: %s cannot be splitted in more than %d part to collect score", split, scorefiltercolumns)) } score, err = strconv.ParseFloat(split[scorefiltercolumns], 64) Check(err) if score2, isInside = PEAKSCOREDICT[peakl];!isInside && score > score2 { PEAKSCOREDICT[peakl] = score PEAKSYMBOLDICT[peakl] = []string{symbolStr} } } else { PEAKSYMBOLDICT[peakl] = append(PEAKSYMBOLDICT[peakl], symbolStr) } } } } /*CreatePeakIntervalTreeCustom ...*/ func CreatePeakIntervalTreeCustom(peakPos []int, sep string) { createPeakIntervalTree(peakPos, sep, false) } /*CreatePeakIntervalTree ...*/ func CreatePeakIntervalTree() { createPeakIntervalTree([]int{0, 1, 2}, "\t", false) } /*createPeakIntervalTree ...*/ func createPeakIntervalTree(peakPos []int, sep string, verbose bool) { var split []string var chroStr string var start, end int var err error var isInside bool tStart := time.Now() CHRINTERVALDICT = make(map[string]*interval.IntTree) INTERVALMAPPING = make(map[uintptr]string) numberOfPeaks := CheckIfPeakPosIsMutltipleOf3(peakPos) maxPeakPos := MaxIntList(peakPos) for key, pos := range PEAKIDDICT { split = strings.Split(key, sep) if len(split) < maxPeakPos { panic(fmt.Sprintf( "Error from createPeakIntervalTree. Line %s from symbol file should have at least enough fields as decribe in pos index: %d\n", split, peakPos)) } for peakNb := 0; peakNb < numberOfPeaks; peakNb++ { chroStr = split[peakPos[0 + 3 * peakNb]] start, err = strconv.Atoi(split[peakPos[1 + 3 * peakNb]]) Check(err) end, err = strconv.Atoi(strings.Trim(split[peakPos[2 + 3 * peakNb]], "\n")) Check(err) inter := IntInterval{ Start: start, End: end} inter.UID = uintptr(uintptr(pos)) if _, isInside = CHRINTERVALDICT[chroStr];!isInside { CHRINTERVALDICT[chroStr] = &interval.IntTree{} } err = CHRINTERVALDICT[chroStr].Insert(inter, false) Check(err) INTERVALMAPPING[inter.ID()] = key } } tDiff := time.Since(tStart) if verbose { fmt.Printf("Create peak index done in time: %f s \n", tDiff.Seconds()) } } /*createPeakIntervalTreeObject create a peak intervall dict object*/ func createPeakIntervalTreeObject(peakiddict map[string]uint, peakPos []int, verbose bool) ( intervalObject PeakIntervalTreeObject) { var chroStr string var err error var isInside bool var peak Peak var peakPosTriplet [3]int numberOfPeaks := CheckIfPeakPosIsMutltipleOf3(peakPos) tStart := time.Now() intervalObject.Chrintervaldict = make(map[string]*interval.IntTree) intervalObject.Intervalmapping = make(map[uintptr]string) intervalObject.Peakiddict = &peakiddict for key, pos := range peakiddict { for peakNb := 0; peakNb < numberOfPeaks; peakNb++ { peakPosTriplet[0] = peakPos[0 + 3 * peakNb] peakPosTriplet[1] = peakPos[1 + 3 * peakNb] peakPosTriplet[2] = peakPos[2 + 3 * peakNb] peak.StringToPeakWithPos(key, peakPosTriplet) chroStr = peak.Chr() int := IntInterval{ Start: peak.Start, End: peak.End} int.UID = uintptr(uintptr(pos)) if _, isInside = intervalObject.Chrintervaldict[chroStr];!isInside { intervalObject.Chrintervaldict[chroStr] = &interval.IntTree{} } err = intervalObject.Chrintervaldict[chroStr].Insert(int, false) Check(err) intervalObject.Intervalmapping[int.ID()] = peak.PeakToString() } } tDiff := time.Since(tStart) if verbose { fmt.Printf("Create peak index done in time: %f s \n", tDiff.Seconds()) } return intervalObject } /*CreatePeakIntervalTreeObjectFromFile create a peak intervall dict object*/ func CreatePeakIntervalTreeObjectFromFile(bedfile Filename, sep string, peakPos []int) ( intervalObject PeakIntervalTreeObject) { peakiddict := loadPeaksDictCustom(bedfile, sep, peakPos) intervalObject = createPeakIntervalTreeObject(peakiddict, peakPos, false) return intervalObject } /*LoadPeaksDict load peak file return map[string]int*/ func LoadPeaksDict(fname Filename) (peakiddict map[string]uint) { peakiddict = make(map[string]uint) loadPeaks(fname, peakiddict, "\t", []int{0, 1, 2}, false, true, -1, make(map[uint]string)) return peakiddict } /*loadPeaksDictCustom load peak file return map[string]int*/ func loadPeaksDictCustom(fname Filename, sep string, peakPos []int) ( peakiddict map[string]uint) { peakiddict = make(map[string]uint) loadPeaks(fname, peakiddict, sep, peakPos, false, true, -1, make(map[uint]string)) return peakiddict } /*LoadPeaks load peak file globally*/ func LoadPeaks(fname Filename, trim bool, keepLine bool) int { PEAKIDDICT = make(map[string]uint) return loadPeaks(fname, PEAKIDDICT, "\t", []int{0, 1, 2}, trim, keepLine, -1, make(map[uint]string)) } /*LoadPeaksCustom load peak file globally*/ func LoadPeaksCustom(fname Filename, sep string, peakPos []int) int { PEAKIDDICT = make(map[string]uint) return loadPeaks(fname, PEAKIDDICT, sep, peakPos, false, true, -1, make(map[uint]string) ) } /*loadPeaks load peak file globally*/ func loadPeaks(fname Filename, peakiddict map[string]uint, sep string, peakPos []int, trim bool, keepLine bool, directionCol int, directionDict map[uint]string) (totNbPeaks int) { var scanner *bufio.Scanner var file *os.File var line string var isInside bool var split, split2 []string scanner, file = fname.ReturnReader(0) defer CloseFile(file) count := uint(0) max := MaxIntList(peakPos) nbPeaks := CheckIfPeakPosIsMutltipleOf3(peakPos) for scanner.Scan() { line = scanner.Text() if trim { line = strings.TrimPrefix(line, "chr") } if line[0] == '#' { continue } checkIfLineCanBeSplitIntoPeaks(line, sep, peakPos, max, nbPeaks) if !keepLine || directionCol > -1 { split = strings.Split(line, sep) } if !keepLine { split2 = []string{} for _, pos := range peakPos { split2 = append(split2, split[pos]) } line = strings.Join(split2, sep) } if _, isInside = peakiddict[line];!isInside { peakiddict[line] = count if directionCol > -1 { if len(split) <= directionCol { panic(fmt.Sprintf("Error when loading peak file %s! line split %s is out of range for sequence orientation (col nb %d)!", fname, line, directionCol)) } directionDict[count] = split[directionCol] } count++ } } return int(count) } func checkIfLineCanBeSplitIntoPeaks(line, sep string, peakPos []int, peakMax, nbPeaks int) { var err1, err2 error split := strings.Split(line, sep) if len(split) < peakMax { panic(fmt.Sprintf( "line: %s cannot be splitted in more than %d with separator: %s to match peak position: %d", line, peakMax, sep, peakPos)) } for peakNb := 0; peakNb < nbPeaks; peakNb++ { _, err1 = strconv.Atoi(split[1 + 3 * peakNb]) _, err2 = strconv.Atoi(split[2 + 3 * peakNb]) if err1 != nil || err2 != nil { panic(fmt.Sprintf( "line: %s cannot be converted into peak Position: %d", line, peakPos)) } } } /*LoadPeaksAndTrimAndReturnOrienation load peak fil, return peak peak id trimmed for "chr" -> dict and return Orientation dict (i.e. the sense of the peak) */ func LoadPeaksAndTrimandReturnOrientation(fname Filename, orientationColID int) (nbPeaks int, orientationDict map[uint]string) { PEAKIDDICT = make(map[string]uint) orientationDict = make(map[uint]string) nbPeaks = loadPeaks(fname, PEAKIDDICT, "\t", []int{0, 1, 2}, true, false, orientationColID, orientationDict) return nbPeaks, orientationDict } /*LoadPeaksSubset load peak file but using only a subset of peaks and return peak peak id -> dict*/ func LoadPeaksSubset(fname Filename, firstPeak, lastPeak int) { var scanner *bufio.Scanner var file *os.File peaknb := -1 scanner, file = fname.ReturnReader(0) defer CloseFile(file) var count uint PEAKIDDICT = make(map[string]uint) count = 0 for scanner.Scan() { peaknb++ if peaknb < firstPeak { continue } if peaknb >= lastPeak { break } line := scanner.Text() line = strings.Join(strings.Split(line, "\t")[:3], "\t") PEAKIDDICT[line] = count count++ } } /*InitIntervalDictsThreading Init interval dict threading map by copying the interval map for each trheads*/ func InitIntervalDictsThreading(threadnb int) { CHRINTERVALDICTTHREAD = make(map[int]map[string]*interval.IntTree) for i := 0;i< threadnb;i++ { CHRINTERVALDICTTHREAD[i] = make(map[string]*interval.IntTree) for key, tree := range CHRINTERVALDICT { CHRINTERVALDICTTHREAD[i][key] = &interval.IntTree{} err := copier.Copy(CHRINTERVALDICTTHREAD[i][key], tree) Check(err) } } } /*MaxIntList int give the max of list */ func MaxIntList(intlist []int) (max int) { for _, pos := range intlist { if pos > max { max = pos } } return max }
ATACdemultiplexUtils/intervalUtils.go
0.578091
0.490114
intervalUtils.go
starcoder
package chapter06 // BFS I find that BFS is better explained at // [Red Blob Games](https://www.redblobgames.com/pathfinding/a-star/introduction.html). With // lovely animations. But this one wasn't too bad either. // This implementation will use the simple map based search from the book. // Also note, that this implementation does not work if you happened to start from the seller. // Since the starting point is actually not in the queue. It starts with visiting the vertices // immediately. Also, this doesn't return the path in any format, it just says it found the // thing you are looking for. There is no proof that the path is the shortest. func BFS(graph map[string][]string, name string) bool { queue := graph[name] seen := map[string]struct{}{} var current string for len(queue) > 0 { current, queue = queue[0], queue[1:] if _, ok := seen[current]; !ok { if isSeller(current) { // found the seller return true } seen[current] = struct{}{} queue = append(queue, graph[current]...) } } return false } // dummy function which denotes when to stop the search. A seller simply // has a name which ends with an `m`. func isSeller(current string) bool { return current[len(current)-1] == 'm' } // GenericShortestPath will use the graph implementation from the type proposal here: // https://go.googlesource.com/proposal/+/refs/heads/master/design/43651-type-parameters.md#mutually-referencing-type-parameters // NodeConstraint is the type constraint for graph nodes: // they must have an `Edges` method that returns the Edges // that connect to this Node. type NodeConstraint[Edge any] interface { Edges() []Edge } // EdgeConstraint is the type constraint for graph edges: // they must have a `Nodes` method that returns the two Nodes // that this edge connects. type EdgeConstraint[Node any] interface { Nodes() (from, to Node) } // Graph is a graph composed of nodes and edges. type Graph[Node NodeConstraint[Edge], Edge EdgeConstraint[Node]] struct { Nodes []Node } // New returns a new graph given a list of nodes. func New[Node NodeConstraint[Edge], Edge EdgeConstraint[Node]](nodes []Node) *Graph[Node, Edge] { // need to implement something here. return &Graph[Node, Edge]{ Nodes: nodes, } } // GraphNode is a node in a graph which fulfills the type constraint of nodes. type GraphNode struct { Value string GraphEdges []*GraphEdge } // GraphEdge is an edge in a graph which fulfills the type constraint of edges. type GraphEdge struct { From *GraphNode To *GraphNode } func (g *GraphNode) Edges() []*GraphEdge { return g.GraphEdges } func (g *GraphEdge) Nodes() (*GraphNode, *GraphNode) { return g.From, g.To } // BFS returns the shortest path between two nodes, // as a list of edges. Eq is used to determine parity between nodes. The Eq functions makes // this function generic enough instead of trying to shoehorn some other `comparable` type // into `Node`. // Note, this would be much more user-friendly, if it would return a map of `cameFrom`s which // then could be traversed back to find the shortest path, rather than returning a slice of // edges which is difficult to follow back. func (g *Graph[Node, Edge]) BFS(from, to Node, eq func(self, other Node) bool) []Edge { queue := []Node{from} var path []Edge // this should be a map for O(1) recall, but in that case I would have to also get a function // which returns a unique identifier for the nodes. But since I already have an Eq function // I can use that. var seen []Node var current Node for len(queue) > 0 { current, queue = queue[0], queue[1:] edges := current.Edges() // For each edge, gather the nodes. The edges contain from -> to syntax, // so we ignore the `from` one. We are only interested in the `to`. for _, edge := range edges { path = append(path, edge) _, dest := edge.Nodes() if eq(dest, to) { return path } visited := false for _, s := range seen { if eq(s, dest) { visited = true break } } if !visited { seen = append(seen, dest) queue = append(queue, dest) } } } return nil }
chapter06/bfs.go
0.815049
0.598107
bfs.go
starcoder
package main import "fmt" // Coord4 represents an integral coordinate type Coord4 struct { x int y int z int w int } func doDeltas4() []Coord4 { coords := make([]Coord4, 0) for x := -1; x <= 1; x++ { for y := -1; y <= 1; y++ { for z := -1; z <= 1; z++ { for w := -1; w <= 1; w++ { if x == 0 && y == 0 && z == 0 && w == 0 { continue } coords = append(coords, Coord4{x, y, z, w}) } } } } return coords } var neighborDeltas4 = doDeltas4() const bits15 = 0x7FFF const bit16 = 0x8000 // Packed4 defines the type of a packed coordinate type Packed4 uint64 func bits16(n int) Packed4 { if n < 0 { return bit16 | Packed4(bits15&(-n)) } return Packed4(bits15 & n) } func toInt4(n Packed4) int { if n&bit16 != 0 { return int(-(n & bits15)) } return int(n & bits15) } // Pack calculates a unique value (provided that no coordinate // goes past 15 bits) func (c Coord4) Pack() Packed4 { return bits16(c.w)<<48 | bits16(c.z)<<32 | bits16(c.y)<<16 | bits16(c.x) } // Unpack4 takes apart a packed value and puts it back into the // coordinate func (p Packed4) Unpack4() Coord4 { return Coord4{ x: toInt4(p), y: toInt4(p >> 16), z: toInt4(p >> 32), w: toInt4(p >> 48), } } // Board4 represents our "energy system" type Board4 struct { grid map[Packed4]bool } // Active returns the state of a single cell func (b *Board4) Active(c Coord4) bool { if active, ok := b.grid[c.Pack()]; ok { return active } return false } // NeighborActive returns the state of a single cell func (b *Board4) NeighborActive(c Coord4, d Coord4) bool { c.x += d.x c.y += d.y c.z += d.z c.w += d.w if active, ok := b.grid[c.Pack()]; ok { return active } return false } // CountActive gets the total number of active cells on the board func (b *Board4) CountActive() int { total := 0 for _, state := range b.grid { if state { total++ } } return total } // CountNeighbors returns the number of active neighbors for a grid coordinate. func (b *Board4) CountNeighbors(coord Coord4) int { total := 0 for _, neighbor := range neighborDeltas4 { if b.NeighborActive(coord, neighbor) { total++ } } return total } // Neighborhood returns a collection of all of the possible // cells that need to be inspected for a given generation. func (b *Board4) Neighborhood() map[Packed4]Coord4 { possibles := make(map[Packed4]Coord4) for pcoord := range b.grid { for _, n := range neighborDeltas4 { coord := pcoord.Unpack4() coord.x += n.x coord.y += n.y coord.z += n.z coord.w += n.w possibles[coord.Pack()] = coord } } return possibles } // Generation iterates a single generation into a new board. func (b *Board4) Generation() *Board4 { nextBoard := NewBoard4() for pcoord, coord := range b.Neighborhood() { neighbors := b.CountNeighbors(coord) if b.Active(coord) { if neighbors == 2 || neighbors == 3 { nextBoard.grid[pcoord] = true } } else { if neighbors == 3 { nextBoard.grid[pcoord] = true } } } return nextBoard } func (b *Board4) printActives() { for pcoord := range b.grid { fmt.Println(pcoord.Unpack4()) } } // NewBoard4 builds a board func NewBoard4() *Board4 { return &Board4{ grid: make(map[Packed4]bool), } } // ParseBoard4 builds a new board from an input text func ParseBoard4(lines []string) *Board4 { board := NewBoard4() for row, line := range lines { for col, ch := range line { if ch == '#' { coord := Coord4{col, row, 0, 0} board.grid[coord.Pack()] = true } } } return board }
day17/space4.go
0.729616
0.414247
space4.go
starcoder
package hash import ( "math/rand" "github.com/sachaservan/vec" ) /* Construct a higher dimensional lattice as a direct product of copies of the leech lattice For example, with 2 copies we have: 1) view a 48-dimensional vector as the concatenation of 2 24-dimensional vectors 2) Find the closest leech lattice point to each 24-dimensional vector, and then concatenate the result. The error increases, but only by a factor of sqrt(2). This reduces the error from dimensionality reduction, at the cost of error in the lattice Adding more tables/multiprobes can reduce lattice error, but not dimensionality reduction error So this is a valuable trade off Permuting coordinates does not change distances and this can make sure the coordinates are chosen at random */ type MultiLatticeHash struct { Hashes []*LatticeHash Permutation []int Spans [][2]int UHash *UniversalHash } func NewMultiLatticeHash(dim, copies int, width, max float64) *MultiLatticeHash { m := &MultiLatticeHash{} m.Permutation = rand.Perm(dim) m.Hashes = make([]*LatticeHash, copies) m.Spans = Spans(dim, copies) for i := 0; i < copies; i++ { m.Hashes[i] = NewLatticeHash(m.Spans[i][1]-m.Spans[i][0], width, max) } m.UHash = NewUniversalHash(copies * 24) return m } // This function divides the input dimension into copies parts as evenly as possible func Spans(total int, numSpans int) [][2]int { Spans := make([][2]int, numSpans) start := 0 skip := total / numSpans extra := total % numSpans for i := range Spans { end := start + skip if extra > 0 { end++ extra-- } Spans[i] = [2]int{start, end} start = end } return Spans } func (m *MultiLatticeHash) Hash(v *vec.Vec) uint64 { h, _ := m.HashWithDist(v) return m.UHash.Hash(h) } func (m *MultiLatticeHash) HashWithDist(v *vec.Vec) (*vec.Vec, float64) { permuted := make([]float64, v.Size()) for i := range permuted { permuted[i] = v.Coord(m.Permutation[i]) } totalDist := 0.0 totalHash := make([]float64, 0) for i := range m.Hashes { hash, dist := m.Hashes[i].HashWithDist(vec.NewVec(permuted[m.Spans[i][0]:m.Spans[i][1]])) totalHash = append(totalHash, hash.Coords...) totalDist += dist } return vec.NewVec(totalHash), totalDist } // We have to iterate through each of the closest points of the sublattices to find the closest point func (m *MultiLatticeHash) MultiProbeHashWithDist(v *vec.Vec, probes int) ([]*vec.Vec, []float64) { permuted := make([]float64, v.Size()) for i := range permuted { permuted[i] = v.Coord(m.Permutation[i]) } Hashes := make([][]*vec.Vec, len(m.Hashes)) sources := make([][]*Element, len(m.Hashes)) for i := range m.Hashes { var distances []float64 Hashes[i], distances = m.Hashes[i].MultiProbeHashWithDist(vec.NewVec(permuted[m.Spans[i][0]:m.Spans[i][1]]), probes) sources[i] = make([]*Element, len(distances)) for j, d := range distances { sources[i][j] = &Element{coords: []int{j}, distance: d} } } d := NewDistanceSearchQueue(probes, sources) c := d.Search() output := make([]*vec.Vec, probes) distances := make([]float64, probes) for k, e := range c { hash := make([]float64, 0) for j := range e.coords { hash = append(hash, Hashes[j][e.coords[j]].Coords...) } distances[k] = e.distance output[k] = vec.NewVec(hash) } return output, distances } func (m *MultiLatticeHash) MultiHash(v *vec.Vec, probes int) []uint64 { vs, _ := m.MultiProbeHashWithDist(v, probes) Hashes := make([]uint64, probes) for i := range vs { Hashes[i] = m.UHash.Hash(vs[i]) } return Hashes }
hash/multilattice_hash.go
0.80479
0.673245
multilattice_hash.go
starcoder
package main import ( rl "goray/raylib" "math" "runtime" ) var ( G = 400 PLAYER_JUMP_SPD = float32(350.0) PLAYER_HOR_SPD = float32(200.0) ) type Player struct { Position rl.Vector2 Speed float32 CanJump bool } type EnvItem struct { Rect rl.Rectangle Blocking bool Color rl.Color } func init() { runtime.LockOSThread() } func main() { screenWidth := int32(800) screenHeight := int32(450) rl.InitWindow(screenWidth, screenHeight, "raylib [core] example - 2d camera") defer rl.CloseWindow() player := Player{ Position: rl.NewVector2(400, 280), Speed: 0, CanJump: false, } envItems := []*EnvItem{ { Rect: rl.NewRectangle(0, 0, 1000, 400), Blocking: false, Color: rl.LightGray, }, { Rect: rl.NewRectangle(0, 400, 1000, 200), Blocking: true, Color: rl.Gray, }, { Rect: rl.NewRectangle(300, 200, 400, 10), Blocking: true, Color: rl.Gray, }, { Rect: rl.NewRectangle(250, 300, 100, 10), Blocking: true, Color: rl.Gray, }, { Rect: rl.NewRectangle(650, 300, 100, 10), Blocking: true, Color: rl.Gray, }, } camera := rl.NewCamera2D( rl.NewVector2(float32(screenWidth/2), float32(screenHeight/2)), player.Position, 0, 1.0, ) cameraOption := 0 cameraDescriptions := []string{ "Follow player center", "Follow player center, but clamp to map edges", "Follow player center; smoothed", "Follow player center horizontally; updateplayer center vertically after landing", "Player push camera on getting too close to screen edge", } rl.SetTargetFPS(60) for !rl.WindowShouldClose() { deltaTime := rl.GetFrameTime() UpdatePlayer(&player, envItems, deltaTime) camera.Zoom += float32(rl.GetMouseWheelMove()) * 0.05 if camera.Zoom > 3.0 { camera.Zoom = 3.0 } else if camera.Zoom < 0.25 { camera.Zoom = 0.25 } if rl.IsKeyPressed(int32(rl.KEY_R)) { camera.Zoom = 1.0 player.Position = rl.NewVector2(400, 280) } if rl.IsKeyPressed(int32(rl.KEY_C)) { cameraOption = (cameraOption + 1) % 5 } switch { case cameraOption == 0: UpdateCameraCenter(&camera, &player, screenWidth, screenHeight) case cameraOption == 1: UpdateCameraCenterInsideMap(&camera, &player, envItems, screenWidth, screenHeight) case cameraOption == 2: UpdateCameraCenterSmoothFollow(&camera, &player, deltaTime, screenWidth, screenHeight) case cameraOption == 3: UpdateCameraEvenOutOnLanding(&camera, &player, deltaTime, screenWidth, screenHeight) case cameraOption == 4: UpdateCameraPlayerBoundsPush(&camera, &player, screenWidth, screenHeight) } rl.BeginDrawing() rl.ClearBackground(rl.LightGray) rl.BeginMode2D(camera) for _, item := range envItems { rl.DrawRectangleRec(item.Rect, item.Color) } playerRect := rl.NewRectangle(player.Position.X-20, player.Position.Y-40, 40, 40) rl.DrawRectangleRec(playerRect, rl.Red) rl.EndMode2D() rl.DrawText("Controls:", 20, 20, 10, rl.Black) rl.DrawText("- Right/Left to move", 40, 40, 10, rl.DarkGray) rl.DrawText("- Space to jump", 40, 60, 10, rl.DarkGray) rl.DrawText("- Mouse Wheel to Zoom in-out, R to reset zoom", 40, 80, 10, rl.DarkGray) rl.DrawText("- C to change camera mode", 40, 100, 10, rl.DarkGray) rl.DrawText("Current camera mode:", 20, 120, 10, rl.Black) rl.DrawText(cameraDescriptions[cameraOption], 40, 140, 10, rl.DarkGray) rl.EndDrawing() } } func UpdatePlayer(player *Player, envItems []*EnvItem, delta float32) { if rl.IsKeyDown(int32(rl.KEY_LEFT)) { player.Position.X -= PLAYER_HOR_SPD * delta } if rl.IsKeyDown(int32(rl.KEY_RIGHT)) { player.Position.X += PLAYER_HOR_SPD * delta } if rl.IsKeyDown(int32(rl.KEY_SPACE)) && player.CanJump { player.Speed = -PLAYER_JUMP_SPD player.CanJump = false } hitObstacle := false for _, ei := range envItems { if ei.Blocking && ei.Rect.X <= player.Position.X && ei.Rect.X+ei.Rect.Width >= player.Position.X && ei.Rect.Y >= player.Position.Y && ei.Rect.Y < player.Position.Y+player.Speed*delta { hitObstacle = true player.Speed = 0.0 player.Position.Y = ei.Rect.Y } } if !hitObstacle { player.Position.Y += player.Speed * delta player.Speed += float32(G) * delta player.CanJump = false } else { player.CanJump = true } } func UpdateCameraCenter(camera *rl.Camera2D, player *Player, width int32, height int32) { camera.Offset = rl.NewVector2(float32(width/2), float32(height/2)) camera.Target = player.Position } func UpdateCameraCenterInsideMap(camera *rl.Camera2D, player *Player, envItems []*EnvItem, width int32, height int32) { camera.Offset = rl.NewVector2(float32(width/2), float32(height/2)) minX, minY, maxX, maxY := 1000.0, 1000.0, -1000.0, -1000.0 for _, ei := range envItems { minX = math.Min(float64(ei.Rect.X), minX) maxX = math.Max(float64(ei.Rect.X+ei.Rect.Width), maxX) minY = math.Min(float64(ei.Rect.Y), minY) maxY = math.Max(float64(ei.Rect.Y+ei.Rect.Height), maxY) } max := rl.GetWorldToScreen2D(rl.NewVector2(float32(maxX), float32(maxY)), *camera) min := rl.GetWorldToScreen2D(rl.NewVector2(float32(minX), float32(minY)), *camera) if max.X < float32(width) { camera.Offset.X = float32(width - (int32(max.X) - width/2)) } if max.Y < float32(height) { camera.Offset.Y = float32(height - (int32(max.Y) - height/2)) } if min.X > 0 { camera.Offset.X = float32(width/2) - min.X } if min.Y > 0 { camera.Offset.Y = float32(height/2) - min.Y } } func UpdateCameraCenterSmoothFollow(camera *rl.Camera2D, player *Player, delta float32, width int32, height int32) { minSpeed := 30.0 minEffectLength := 10.0 fractionSpeed := 0.8 camera.Offset = rl.NewVector2(float32(width/2), float32(height/2)) diff := rl.Vector2Subtract(player.Position, camera.Target) length := rl.Vector2Length(diff) if length > float32(minEffectLength) { speed := math.Max(fractionSpeed*float64(length), minSpeed) camera.Target = rl.Vector2Add(camera.Target, rl.Vector2Scale(diff, float32(speed)*delta/length)) } } func UpdateCameraEvenOutOnLanding(camera *rl.Camera2D, player *Player, delta float32, width int32, height int32) { evenOutSpeed := float32(700) eveningOut := false evenOutTarget := float32(0) camera.Offset = rl.NewVector2(float32(width/2), float32(height/2)) camera.Target.X = player.Position.X if eveningOut { if evenOutTarget > camera.Target.Y { camera.Target.Y += evenOutSpeed * delta if camera.Target.Y > evenOutTarget { camera.Target.Y = evenOutTarget eveningOut = false } } else { camera.Target.Y -= evenOutSpeed * delta if camera.Target.Y < evenOutTarget { camera.Target.Y = evenOutTarget eveningOut = false } } } else { if (player.CanJump && player.Speed == 0) && (player.Position.Y != camera.Target.Y) { eveningOut = true evenOutTarget = player.Position.Y } } } func UpdateCameraPlayerBoundsPush(camera *rl.Camera2D, player *Player, width int32, height int32) { bbox := rl.NewVector2(0.2, 0.2) bboxWorldMin := rl.GetScreenToWorld2D( rl.NewVector2( (1.0-bbox.X)*0.5*float32(width), (1-bbox.Y)*0.5*float32(height), ), *camera, ) bboxWorldMax := rl.GetScreenToWorld2D( rl.NewVector2( (1.0-bbox.X)*0.5*float32(width), (1-bbox.Y)*0.5*float32(height), ), *camera, ) camera.Offset = rl.NewVector2( (1.0-bbox.X)*0.5*float32(width), (1-bbox.Y)*0.5*float32(height), ) if player.Position.X < bboxWorldMin.X { camera.Target.X = player.Position.X } if player.Position.Y < bboxWorldMin.Y { camera.Target.Y = player.Position.Y } if player.Position.X > bboxWorldMax.Y { camera.Target.X = bboxWorldMin.X + (player.Position.X - bboxWorldMax.X) } if player.Position.Y > bboxWorldMax.Y { camera.Target.Y = bboxWorldMin.Y + (player.Position.Y - bboxWorldMax.Y) } }
examples/core/2d_camera_platformer/2d_camera_platformer.go
0.573917
0.4575
2d_camera_platformer.go
starcoder
package main import ( "bytes" "errors" "fmt" "log" "strconv" "strings" ) type rover struct { x int y int direction byte } func (r *rover) String() string { return fmt.Sprintf("%d %d %s", r.x, r.y, string(r.direction)) } type directionNode struct { left byte right byte } // compass directions const ( north byte = 'N' east byte = 'E' south byte = 'S' west byte = 'W' ) // directions const ( left byte = 'L' right byte = 'R' move byte = 'M' ) // upper-right coordinates var ( xCoordinateLimit = 0 yCoordinateLimit = 0 ) var directionNodeList map[byte]directionNode func init() { directionNodeList = map[byte]directionNode{} directionNodeList[north] = directionNode{ left: west, right: east, } directionNodeList[east] = directionNode{ left: north, right: south, } directionNodeList[south] = directionNode{ left: east, right: west, } directionNodeList[west] = directionNode{ left: south, right: north, } } // directive rover's directive func directive(r *rover, d byte) error { switch d { case left: r.direction = directionNodeList[r.direction].left case right: r.direction = directionNodeList[r.direction].right case move: switch r.direction { case north: r.y++ case east: r.x++ case south: r.y-- case west: r.x-- } if r.x > xCoordinateLimit { return errors.New(fmt.Sprintf("x coordinate pointed to (%d) a point outside the plateau", r.x)) } if r.y > yCoordinateLimit { return errors.New(fmt.Sprintf("y coordinate pointed to (%d) a point outside the plateau", r.y)) } default: return errors.New(fmt.Sprintf("unexpected directive: %s", string(d))) } return nil } // parseUpperRightCoordinates parses raw coordinates string func parseUpperRightCoordinates(rawCoordinates string) (x int, y int, err error) { // parse raw coordinates upperRightCoordinates := strings.Split(rawCoordinates, " ") if len(upperRightCoordinates) != 2 { return 0, 0, errors.New("failed to parse upper-right coordinates") } // validate if len(upperRightCoordinates) != 2 { return 0, 0, errors.New("failed to parse upper-right coordinates, min length of raw coordinates should be 2") } // upper right coordinates value for x x, err = strconv.Atoi(upperRightCoordinates[0]) if err != nil { return 0, 0, err } // upper right coordinates value for y y, err = strconv.Atoi(upperRightCoordinates[1]) if err != nil { return 0, 0, err } return x, y, nil } // parseRoverPosition parses rover's position func parseRoverPosition(rawPosition string) (*rover, error) { var err error position := strings.Split(rawPosition, " ") if len(position) != 3 { return nil, errors.New("failed to set first rover position, position data should contain X, Y coordinate and direction (for example: \"3 2 W\")") } r := &rover{} // set first rover's x coordinate r.x, err = strconv.Atoi(position[0]) if err != nil { return nil, err } // set first rover's y coordinate r.y, err = strconv.Atoi(position[1]) if err != nil { return nil, err } // set first rover's direction direction := position[2] if len(direction) != 1 { return nil, errors.New("failed to parse first rover's direction") } validateDirection := bytes.ContainsAny([]byte(direction), "NESW") if !validateDirection { return nil, errors.New(fmt.Sprintf("unexpected value for direction: %s", string([]byte(direction)[0]))) } r.direction = []byte(direction)[0] return r, nil } // Run root function of program func Run(input string) (firstRover *rover, secondRover *rover, err error) { // input parsing parseInput := strings.Split(input, "\n") if len(parseInput) != 5 { return nil, nil, errors.New("error occured while input") } xCoordinateLimit, yCoordinateLimit, err = parseUpperRightCoordinates(parseInput[0]) if err != nil { return nil, nil, fmt.Errorf("error occured while parsing upper right coordinates (%s)", err.Error()) } // set first rover's position firstRover, err = parseRoverPosition(parseInput[1]) if err != nil { return nil, nil, fmt.Errorf("error occured while parsing first rover position (%s)", err.Error()) } // first rover's directives for _, d := range []byte(parseInput[2]) { err = directive(firstRover, d) if err != nil { return nil, nil, fmt.Errorf("error occured while applying directive (%s)", err.Error()) } } // set second rover's position secondRover, err = parseRoverPosition(parseInput[3]) if err != nil { return nil, nil, fmt.Errorf("error occured while parsing first rover position (%s)", err.Error()) } // second rover's directives for _, d := range []byte(parseInput[4]) { err = directive(secondRover, d) if err != nil { return nil, nil, fmt.Errorf("error occured while applying directive (%s)", err.Error()) } } return firstRover, secondRover, nil } func main() { input := "5 5\n1 2 N\nLMLMLMLMM\n3 3 E\nMMRMMRMRRM" firstRover, secondRover, err := Run(input) if err != nil { log.Fatal(err) } fmt.Printf("Result of First Rover: \"%s\"\n", firstRover.String()) fmt.Printf("Result of Second Rover: \"%s\"\n", secondRover.String()) }
main.go
0.627381
0.405625
main.go
starcoder
package assert import ( "errors" "reflect" "runtime/debug" "strconv" "strings" "testing" ) var ( errorInterface = reflect.TypeOf((*error)(nil)).Elem() ) // Matcher hold the current state of the assertion. type Matcher struct { t *testing.T actual interface{} match bool } // With creates a new Matcher with the current test reporter. func With(t *testing.T) *Matcher { m := new(Matcher) m.t = t return m } // That specifies the actual value under test. func (m *Matcher) That(actual interface{}) *Matcher { if m.t == nil { panic("Use With(*testing.T) to initialize Matcher") } m.actual = actual return m } func (m *Matcher) ThatPanics(actual func()) { defer func() { if r := recover(); r == nil { m.t.Errorf("[%s] Did not panic.", testLine()) m.match = false } }() m.match = true actual() } // IsNil verifies the tested valid is `nil` func (m *Matcher) IsNil() *Matcher { if m.match = reflect.TypeOf(m.actual) == nil; !m.match { m.t.Errorf("[%s] is not nil", testLine()) } return m } // IsNotNil verifies the tested value is not `nil` func (m *Matcher) IsNotNil() *Matcher { if m.match = reflect.TypeOf(m.actual) != nil; !m.match { m.t.Errorf("[%s] is nil", testLine()) } return m } // IsEmpty matches an empty string. func (m *Matcher) IsEmpty() *Matcher { v := reflect.ValueOf(m.actual) if m.match = v.IsValid() && v.Kind() == reflect.String && len(v.String()) == 0; !m.match { m.t.Errorf("[%s] is not empty", testLine()) } return m } // IsNotEmpty matches a non-empty string. func (m *Matcher) IsNotEmpty() *Matcher { v := reflect.ValueOf(m.actual) if m.match = v.IsValid() && v.Kind() == reflect.String && len(v.String()) > 0; !m.match { m.t.Errorf("[%s] is empty", testLine()) } return m } // IsOk expects the actual value to be nil and the type to be an instance of error. func (m *Matcher) IsOk() *Matcher { t := reflect.TypeOf(m.actual) if m.match = t == nil || !t.Implements(errorInterface); !m.match { m.t.Errorf("[%s] is not ok", testLine()) } return m } // IsEqualTo verifies that the actual value capture in `That()` is equal to the // expected value. func (m *Matcher) IsEqualTo(expected interface{}) *Matcher { m.match = false av := reflect.ValueOf(m.actual) ev := reflect.ValueOf(expected) // Edge condition: both values are nil. The `IsNil` matcher should be // used instead of IsEqualTo(), but we don't want to fail the test over // semantics. if reflect.TypeOf(m.actual) == nil && reflect.TypeOf(expected) == nil { m.match = true return m } // Both values must be valid. if av.IsValid() && ev.IsValid() { ak, err := basicKind(av) if err != nil { m.t.Error(err) return m } ek, err := basicKind(ev) if err != nil { m.t.Error(err) return m } if ak != ek { m.t.Errorf("[%s] %s", testLine(), errBadComparison) return m } m.match = reflect.DeepEqual(m.actual, expected) } if !m.match { m.t.Errorf("[%s] expected:<[%s]> but was <[%s]>", testLine(), stringValue(ev), stringValue(av)) } return m } // IsGreaterThan matches if the actual value is greater than the expected value. func (m *Matcher) IsGreaterThan(expected interface{}) *Matcher { k, err := typeCheck(m.actual, expected) if err != nil { m.match = false m.t.Error(err) } else { av := reflect.ValueOf(m.actual) ev := reflect.ValueOf(expected) switch k { case floatKind: m.match = av.Float() > ev.Float() case intKind: m.match = av.Int() > ev.Int() case uintKind: m.match = av.Uint() > ev.Uint() default: m.match = false m.t.Error(errBadType) } if !m.match { m.t.Errorf("[%s] expected: Greater Than <[%s]> but was <[%s]>", testLine(), stringValue(ev), stringValue(av)) } } return m } func typeCheck(actual interface{}, expected interface{}) (kind, error) { if reflect.TypeOf(actual) == nil { return invalidKind, errors.New("Actual value was nil") } if reflect.TypeOf(expected) == nil { return invalidKind, errors.New("Expected value was nil") } av := reflect.ValueOf(actual) ev := reflect.ValueOf(expected) ak, err := basicKind(av) if err != nil { return invalidKind, errors.New("Actual " + err.Error()) } ek, err := basicKind(ev) if err != nil { return invalidKind, errors.New("Expected " + err.Error()) } if ak != ek { return invalidKind, errBadComparison } return ak, nil } // stringValue uses reflection to convert a `reflect.Value` to a string for // use in error messages. func stringValue(rv reflect.Value) string { switch rv.Kind() { case reflect.Bool: return strconv.FormatBool(rv.Bool()) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return strconv.FormatInt(rv.Int(), 10) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return strconv.FormatUint(rv.Uint(), 10) case reflect.Float32: return strconv.FormatFloat(rv.Float(), 'g', -1, 32) case reflect.Float64: return strconv.FormatFloat(rv.Float(), 'g', -1, 64) case reflect.Complex64: c := rv.Complex() return "(" + strconv.FormatFloat(real(c), 'g', -1, 32) + "," + strconv.FormatFloat(imag(c), 'g', -1, 32) + ")" case reflect.Complex128: c := rv.Complex() return "(" + strconv.FormatFloat(real(c), 'g', -1, 64) + "," + strconv.FormatFloat(imag(c), 'g', -1, 64) + ")" case reflect.String: return rv.String() default: // All of the types have been accounted for above, so this should // never be reached. panic(errBadType) } } // testLine returns the line the unit test was run from. func testLine() string { lines := strings.Split(string(debug.Stack()), "\n") var source int for i, s := range lines { if strings.HasPrefix(s, "testing.tRunner") { source = i - 1 } } line := lines[source] len := len(line) if index := strings.LastIndex(line, " +"); index >= 0 { len = index } if index := strings.LastIndex(line, "/"); index >= 0 { line = line[index + 1:len] } else if index := strings.LastIndex(line, "\\"); index >= 0 { line = line[index + 1:len] } return line } // The following is lifted from https://golang.org/src/text/template/funcs.go // None of this is available outside of the package, so We're reproducing it. // Errors returned when comparisons go bad. var ( errBadComparisonType = errors.New("invalid type for comparison") errBadComparison = errors.New("incompatible types for comparison") errBadType = errors.New("invalid type") ) // These are the basic types, distilled from the variety of more specific types. type kind int const ( invalidKind kind = iota boolKind complexKind intKind floatKind stringKind uintKind sliceKind ) // basicKind simplifies the type down to the particular class to which it belongs. func basicKind(v reflect.Value) (kind, error) { switch v.Kind() { case reflect.Bool: return boolKind, nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return intKind, nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return uintKind, nil case reflect.Float32, reflect.Float64: return floatKind, nil case reflect.Complex64, reflect.Complex128: return complexKind, nil case reflect.String: return stringKind, nil case reflect.Slice: return sliceKind, nil } return invalidKind, errBadComparisonType }
matcher.go
0.744192
0.486514
matcher.go
starcoder
package value import ( "math/big" "unicode/utf8" ) // Unary operators. // To avoid initialization cycles when we refer to the ops from inside // themselves, we use an init function to initialize the ops. // unaryBigIntOp applies the op to a BigInt. func unaryBigIntOp(c Context, op func(Context, *big.Int, *big.Int) *big.Int, v Value) Value { i := v.(BigInt) z := bigInt64(0) op(c, z.Int, i.Int) return z.shrink() } func bigIntWrap(op func(*big.Int, *big.Int) *big.Int) func(Context, *big.Int, *big.Int) *big.Int { return func(_ Context, u *big.Int, v *big.Int) *big.Int { return op(u, v) } } // unaryBigRatOp applies the op to a BigRat. func unaryBigRatOp(op func(*big.Rat, *big.Rat) *big.Rat, v Value) Value { i := v.(BigRat) z := bigRatInt64(0) op(z.Rat, i.Rat) return z.shrink() } // unaryBigFloatOp applies the op to a BigFloat. func unaryBigFloatOp(c Context, op func(Context, *big.Float, *big.Float) *big.Float, v Value) Value { i := v.(BigFloat) z := bigFloatInt64(c.Config(), 0) op(c, z.Float, i.Float) return z.shrink() } func bigFloatWrap(op func(*big.Float, *big.Float) *big.Float) func(Context, *big.Float, *big.Float) *big.Float { return func(_ Context, u *big.Float, v *big.Float) *big.Float { return op(u, v) } } // bigIntRand sets a to a random number in [origin, origin+b]. func bigIntRand(c Context, a, b *big.Int) *big.Int { a.Rand(c.Config().Random(), b) return a.Add(a, c.Config().BigOrigin()) } func self(c Context, v Value) Value { return v } // vectorSelf promotes v to type Vector. // v must be a scalar. func vectorSelf(c Context, v Value) Value { switch v.(type) { case Vector: Errorf("internal error: vectorSelf of vector") case *Matrix: Errorf("internal error: vectorSelf of matrix") } return NewVector([]Value{v}) } // floatSelf promotes v to type BigFloat. func floatSelf(c Context, v Value) Value { conf := c.Config() switch v := v.(type) { case Int: return v.toType(conf, bigFloatType) case BigInt: return v.toType(conf, bigFloatType) case BigRat: return v.toType(conf, bigFloatType) case BigFloat: return v } Errorf("internal error: floatSelf of non-number") return nil } // text returns a vector of Chars holding the string representation // of the value. func text(c Context, v Value) Value { str := v.Sprint(c.Config()) elem := make([]Value, utf8.RuneCountInString(str)) for i, r := range str { elem[i] = Char(r) } return NewVector(elem) } // Implemented in package run, handled as a func to avoid a dependency loop. var IvyEval func(context Context, s string) Value var UnaryOps = make(map[string]UnaryOp) func factorial(n int64) *big.Int { if n < 0 { Errorf("negative value %d for factorial", n) } if n == 0 { return big.NewInt(1) } fac := new(big.Int) fac.MulRange(1, n) return fac } func init() { ops := []*unaryOp{ { name: "?", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { i := int64(v.(Int)) if i <= 0 { Errorf("illegal roll value %v", v) } return Int(c.Config().Origin()) + Int(c.Config().Random().Int63n(i)) }, bigIntType: func(c Context, v Value) Value { if v.(BigInt).Sign() <= 0 { Errorf("illegal roll value %v", v) } return unaryBigIntOp(c, bigIntRand, v) }, }, }, { name: "+", fn: [numType]unaryFn{ intType: self, bigIntType: self, bigRatType: self, bigFloatType: self, vectorType: self, matrixType: self, }, }, { name: "-", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return -v.(Int) }, bigIntType: func(c Context, v Value) Value { return unaryBigIntOp(c, bigIntWrap((*big.Int).Neg), v) }, bigRatType: func(c Context, v Value) Value { return unaryBigRatOp((*big.Rat).Neg, v) }, bigFloatType: func(c Context, v Value) Value { return unaryBigFloatOp(c, bigFloatWrap((*big.Float).Neg), v) }, }, }, { name: "/", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { i := int64(v.(Int)) if i == 0 { Errorf("division by zero") } return BigRat{ Rat: big.NewRat(0, 1).SetFrac64(1, i), }.shrink() }, bigIntType: func(c Context, v Value) Value { // Zero division cannot happen for unary. return BigRat{ Rat: big.NewRat(0, 1).SetFrac(bigOne.Int, v.(BigInt).Int), }.shrink() }, bigRatType: func(c Context, v Value) Value { // Zero division cannot happen for unary. r := v.(BigRat) return BigRat{ Rat: big.NewRat(0, 1).SetFrac(r.Denom(), r.Num()), }.shrink() }, bigFloatType: func(c Context, v Value) Value { // Zero division cannot happen for unary. f := v.(BigFloat) one := new(big.Float).SetPrec(c.Config().FloatPrec()).SetInt64(1) return BigFloat{ Float: one.Quo(one, f.Float), }.shrink() }, }, }, { name: "sgn", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { i := int64(v.(Int)) if i > 0 { return one } if i < 0 { return minusOne } return zero }, bigIntType: func(c Context, v Value) Value { return Int(v.(BigInt).Sign()) }, bigRatType: func(c Context, v Value) Value { return Int(v.(BigRat).Sign()) }, bigFloatType: func(c Context, v Value) Value { return Int(v.(BigFloat).Sign()) }, }, }, { name: "!", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return BigInt{factorial(int64(v.(Int)))}.shrink() }, }, }, { name: "^", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return ^v.(Int) }, bigIntType: func(c Context, v Value) Value { // Lots of ways to do this, here's one. return BigInt{Int: bigInt64(0).Xor(v.(BigInt).Int, bigMinusOne.Int)}.shrink() }, }, }, { name: "not", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { if v.(Int) == 0 { return one } return zero }, bigIntType: func(c Context, v Value) Value { if v.(BigInt).Sign() == 0 { return one } return zero }, bigRatType: func(c Context, v Value) Value { if v.(BigRat).Sign() == 0 { return one } return zero }, bigFloatType: func(c Context, v Value) Value { if v.(BigFloat).Sign() == 0 { return one } return zero }, }, }, { name: "abs", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { i := v.(Int) if i < 0 { i = -i } return i }, bigIntType: func(c Context, v Value) Value { return unaryBigIntOp(c, bigIntWrap((*big.Int).Abs), v) }, bigRatType: func(c Context, v Value) Value { return unaryBigRatOp((*big.Rat).Abs, v) }, bigFloatType: func(c Context, v Value) Value { return unaryBigFloatOp(c, bigFloatWrap((*big.Float).Abs), v) }, }, }, { name: "floor", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return v }, bigIntType: func(c Context, v Value) Value { return v }, bigRatType: func(c Context, v Value) Value { i := v.(BigRat) if i.IsInt() { // It can't be an integer, which means we must move up or down. panic("min: is int") } positive := i.Sign() >= 0 if !positive { j := bigRatInt64(0) j.Abs(i.Rat) i = j } z := bigInt64(0) z.Quo(i.Num(), i.Denom()) if !positive { z.Add(z.Int, bigOne.Int) z.Neg(z.Int) } return z.shrink() }, bigFloatType: func(c Context, v Value) Value { f := v.(BigFloat) if f.Float.IsInf() { Errorf("floor of %s", v.Sprint(c.Config())) } i, acc := f.Int(nil) switch acc { case big.Exact, big.Below: // Done. case big.Above: i.Sub(i, bigOne.Int) } return BigInt{i}.shrink() }, }, }, { name: "ceil", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return v }, bigIntType: func(c Context, v Value) Value { return v }, bigRatType: func(c Context, v Value) Value { i := v.(BigRat) if i.IsInt() { // It can't be an integer, which means we must move up or down. panic("max: is int") } positive := i.Sign() >= 0 if !positive { j := bigRatInt64(0) j.Abs(i.Rat) i = j } z := bigInt64(0) z.Quo(i.Num(), i.Denom()) if positive { z.Add(z.Int, bigOne.Int) } else { z.Neg(z.Int) } return z.shrink() }, bigFloatType: func(c Context, v Value) Value { f := v.(BigFloat) if f.Float.IsInf() { Errorf("ceil of %s", v.Sprint(c.Config())) } i, acc := f.Int(nil) switch acc { case big.Exact, big.Above: // Done case big.Below: i.Add(i, bigOne.Int) } return BigInt{i}.shrink() }, }, }, { name: "iota", fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { i := v.(Int) if i < 0 || maxInt < i { Errorf("bad iota %d", i) } if i == 0 { return Vector{} } n := make([]Value, i) for k := range n { n[k] = Int(k + c.Config().Origin()) } return NewVector(n) }, }, }, { name: "rho", fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return Int(0) }, charType: func(c Context, v Value) Value { return Int(0) }, bigIntType: func(c Context, v Value) Value { return Int(0) }, bigRatType: func(c Context, v Value) Value { return Int(0) }, bigFloatType: func(c Context, v Value) Value { return Int(0) }, vectorType: func(c Context, v Value) Value { return Int(len(v.(Vector))) }, matrixType: func(c Context, v Value) Value { return NewIntVector(v.(*Matrix).shape) }, }, }, { name: ",", fn: [numType]unaryFn{ intType: vectorSelf, charType: vectorSelf, bigIntType: vectorSelf, bigRatType: vectorSelf, bigFloatType: vectorSelf, vectorType: self, matrixType: func(c Context, v Value) Value { return v.(*Matrix).data.Copy() }, }, }, { name: "up", fn: [numType]unaryFn{ intType: self, charType: self, bigIntType: self, bigRatType: self, bigFloatType: self, vectorType: func(c Context, v Value) Value { if c == nil { panic("NIL IN gradeUP") } return v.(Vector).grade(c) }, }, }, { name: "down", fn: [numType]unaryFn{ intType: self, charType: self, bigIntType: self, bigRatType: self, bigFloatType: self, vectorType: func(c Context, v Value) Value { x := v.(Vector).grade(c) for i, j := 0, len(x)-1; i < j; i, j = i+1, j-1 { x[i], x[j] = x[j], x[i] } return x }, }, }, { name: "rot", fn: [numType]unaryFn{ intType: self, charType: self, bigIntType: self, bigRatType: self, bigFloatType: self, vectorType: func(c Context, v Value) Value { x := v.(Vector).Copy() for i, j := 0, len(x)-1; i < j; i, j = i+1, j-1 { x[i], x[j] = x[j], x[i] } return x }, matrixType: func(c Context, v Value) Value { m := v.(*Matrix).Copy() if m.Rank() == 0 { return m } if m.Rank() == 1 { Errorf("rot: matrix is vector") } size := int(m.Size()) ncols := m.shape[m.Rank()-1] x := m.data for index := 0; index <= size-ncols; index += ncols { for i, j := 0, ncols-1; i < j; i, j = i+1, j-1 { x[index+i], x[index+j] = x[index+j], x[index+i] } } return m }, }, }, { name: "flip", fn: [numType]unaryFn{ intType: self, charType: self, bigIntType: self, bigRatType: self, bigFloatType: self, vectorType: func(c Context, v Value) Value { return c.EvalUnary("rot", v) }, matrixType: func(c Context, v Value) Value { m := v.(*Matrix).Copy() if m.Rank() == 0 { return m } if m.Rank() == 1 { Errorf("flip: matrix is vector") } elemSize := int(m.ElemSize()) size := int(m.Size()) x := m.data lo := 0 hi := size - elemSize for lo < hi { for i := 0; i < elemSize; i++ { x[lo+i], x[hi+i] = x[hi+i], x[lo+i] } lo += elemSize hi -= elemSize } return m }, }, }, { name: "transp", fn: [numType]unaryFn{ intType: self, charType: self, bigIntType: self, bigRatType: self, bigFloatType: self, vectorType: func(c Context, v Value) Value { return v.(Vector).Copy() }, matrixType: func(c Context, v Value) Value { m := v.(*Matrix) if m.Rank() == 1 { Errorf("transp: matrix is vector") } return m.transpose() }, }, }, { name: "cos", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return cos(c, v) }, bigIntType: func(c Context, v Value) Value { return cos(c, v) }, bigRatType: func(c Context, v Value) Value { return cos(c, v) }, bigFloatType: func(c Context, v Value) Value { return cos(c, v) }, }, }, { name: "log", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return logn(c, v) }, bigIntType: func(c Context, v Value) Value { return logn(c, v) }, bigRatType: func(c Context, v Value) Value { return logn(c, v) }, bigFloatType: func(c Context, v Value) Value { return logn(c, v) }, }, }, { name: "sin", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return sin(c, v) }, bigIntType: func(c Context, v Value) Value { return sin(c, v) }, bigRatType: func(c Context, v Value) Value { return sin(c, v) }, bigFloatType: func(c Context, v Value) Value { return sin(c, v) }, }, }, { name: "tan", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return tan(c, v) }, bigIntType: func(c Context, v Value) Value { return tan(c, v) }, bigRatType: func(c Context, v Value) Value { return tan(c, v) }, bigFloatType: func(c Context, v Value) Value { return tan(c, v) }, }, }, { name: "asin", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return asin(c, v) }, bigIntType: func(c Context, v Value) Value { return asin(c, v) }, bigRatType: func(c Context, v Value) Value { return asin(c, v) }, bigFloatType: func(c Context, v Value) Value { return asin(c, v) }, }, }, { name: "acos", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return acos(c, v) }, bigIntType: func(c Context, v Value) Value { return acos(c, v) }, bigRatType: func(c Context, v Value) Value { return acos(c, v) }, bigFloatType: func(c Context, v Value) Value { return acos(c, v) }, }, }, { name: "atan", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return atan(c, v) }, bigIntType: func(c Context, v Value) Value { return atan(c, v) }, bigRatType: func(c Context, v Value) Value { return atan(c, v) }, bigFloatType: func(c Context, v Value) Value { return atan(c, v) }, }, }, { name: "**", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return exp(c, v) }, bigIntType: func(c Context, v Value) Value { return exp(c, v) }, bigRatType: func(c Context, v Value) Value { return exp(c, v) }, bigFloatType: func(c Context, v Value) Value { return exp(c, v) }, }, }, { name: "sqrt", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return sqrt(c, v) }, bigIntType: func(c Context, v Value) Value { return sqrt(c, v) }, bigRatType: func(c Context, v Value) Value { return sqrt(c, v) }, bigFloatType: func(c Context, v Value) Value { return sqrt(c, v) }, }, }, { name: "char", elementwise: true, fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return Char(v.(Int)).validate() }, }, }, { name: "code", elementwise: true, fn: [numType]unaryFn{ charType: func(c Context, v Value) Value { return Int(v.(Char)) }, }, }, { name: "text", fn: [numType]unaryFn{ intType: func(c Context, v Value) Value { return text(c, v) }, bigIntType: func(c Context, v Value) Value { return text(c, v) }, bigRatType: func(c Context, v Value) Value { return text(c, v) }, bigFloatType: func(c Context, v Value) Value { return text(c, v) }, vectorType: func(c Context, v Value) Value { return text(c, v) }, matrixType: func(c Context, v Value) Value { return text(c, v) }, }, }, { name: "ivy", fn: [numType]unaryFn{ charType: func(c Context, v Value) Value { char := v.(Char) return IvyEval(c, string(char)) }, vectorType: func(c Context, v Value) Value { text := v.(Vector) if !text.AllChars() { Errorf("ivy: value is not a vector of char") } return IvyEval(c, text.makeString(c.Config(), false)) }, }, }, { name: "float", elementwise: true, fn: [numType]unaryFn{ intType: floatSelf, bigIntType: floatSelf, bigRatType: floatSelf, bigFloatType: floatSelf, }, }, } for _, op := range ops { UnaryOps[op.name] = op } }
value/unary.go
0.698741
0.493531
unary.go
starcoder
package command import ( "fmt" "strings" "github.com/hashicorp/serf/coordinate" ) // RTTCommand is a Command implementation that allows users to query the // estimated round trip time between nodes using network coordinates. type RTTCommand struct { BaseCommand } func (c *RTTCommand) Help() string { helpText := ` Usage: consul rtt [options] node1 [node2] Estimates the round trip time between two nodes using Consul's network coordinate model of the cluster. At least one node name is required. If the second node name isn't given, it is set to the agent's node name. Note that these are node names as known to Consul as "consul members" would show, not IP addresses. By default, the two nodes are assumed to be nodes in the local datacenter and the LAN coordinates are used. If the -wan option is given, then the WAN coordinates are used, and the node names must be suffixed by a period and the datacenter (eg. "myserver.dc1"). It is not possible to measure between LAN coordinates and WAN coordinates because they are maintained by independent Serf gossip areas, so they are not compatible. ` + c.BaseCommand.Help() return strings.TrimSpace(helpText) } func (c *RTTCommand) Run(args []string) int { var wan bool f := c.BaseCommand.NewFlagSet(c) f.BoolVar(&wan, "wan", false, "Use WAN coordinates instead of LAN coordinates.") if err := c.BaseCommand.Parse(args); err != nil { return 1 } // They must provide at least one node. nodes := f.Args() if len(nodes) < 1 || len(nodes) > 2 { c.UI.Error("One or two node names must be specified") c.UI.Error("") c.UI.Error(c.Help()) return 1 } // Create and test the HTTP client. client, err := c.BaseCommand.HTTPClient() if err != nil { c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) return 1 } coordClient := client.Coordinate() var source string var coord1, coord2 *coordinate.Coordinate if wan { source = "WAN" // Default the second node to the agent if none was given. if len(nodes) < 2 { agent := client.Agent() self, err := agent.Self() if err != nil { c.UI.Error(fmt.Sprintf("Unable to look up agent info: %s", err)) return 1 } node, dc := self["Config"]["NodeName"], self["Config"]["Datacenter"] nodes = append(nodes, fmt.Sprintf("%s.%s", node, dc)) } // Parse the input nodes. parts1 := strings.Split(nodes[0], ".") parts2 := strings.Split(nodes[1], ".") if len(parts1) != 2 || len(parts2) != 2 { c.UI.Error("Node names must be specified as <node name>.<datacenter> with -wan") return 1 } node1, dc1 := parts1[0], parts1[1] node2, dc2 := parts2[0], parts2[1] // Pull all the WAN coordinates. dcs, err := coordClient.Datacenters() if err != nil { c.UI.Error(fmt.Sprintf("Error getting coordinates: %s", err)) return 1 } // See if the requested nodes are in there. We only compare // coordinates in the same areas. var area1, area2 string for _, dc := range dcs { for _, entry := range dc.Coordinates { if dc.Datacenter == dc1 && entry.Node == node1 { area1 = dc.AreaID coord1 = entry.Coord } if dc.Datacenter == dc2 && entry.Node == node2 { area2 = dc.AreaID coord2 = entry.Coord } if area1 == area2 && coord1 != nil && coord2 != nil { goto SHOW_RTT } } } // Nil out the coordinates so we don't display across areas if // we didn't find anything. coord1, coord2 = nil, nil } else { source = "LAN" // Default the second node to the agent if none was given. if len(nodes) < 2 { agent := client.Agent() node, err := agent.NodeName() if err != nil { c.UI.Error(fmt.Sprintf("Unable to look up agent info: %s", err)) return 1 } nodes = append(nodes, node) } // Pull all the LAN coordinates. entries, _, err := coordClient.Nodes(nil) if err != nil { c.UI.Error(fmt.Sprintf("Error getting coordinates: %s", err)) return 1 } // See if the requested nodes are in there. for _, entry := range entries { if entry.Node == nodes[0] { coord1 = entry.Coord } if entry.Node == nodes[1] { coord2 = entry.Coord } if coord1 != nil && coord2 != nil { goto SHOW_RTT } } } // Make sure we found both coordinates. if coord1 == nil { c.UI.Error(fmt.Sprintf("Could not find a coordinate for node %q", nodes[0])) return 1 } if coord2 == nil { c.UI.Error(fmt.Sprintf("Could not find a coordinate for node %q", nodes[1])) return 1 } SHOW_RTT: // Report the round trip time. dist := fmt.Sprintf("%.3f ms", coord1.DistanceTo(coord2).Seconds()*1000.0) c.UI.Output(fmt.Sprintf("Estimated %s <-> %s rtt: %s (using %s coordinates)", nodes[0], nodes[1], dist, source)) return 0 } func (c *RTTCommand) Synopsis() string { return "Estimates network round trip time between nodes" }
vendor/github.com/hashicorp/consul/command/rtt.go
0.746416
0.407982
rtt.go
starcoder
package metric import ( "github.com/zchee/goimportz/internal/event/keys" "github.com/zchee/goimportz/internal/event/label" ) // Scalar represents the construction information for a scalar metric. type Scalar struct { // Name is the unique name of this metric. Name string // Description can be used by observers to describe the metric to users. Description string // Keys is the set of labels that collectively describe rows of the metric. Keys []label.Key } // HistogramInt64 represents the construction information for an int64 histogram metric. type HistogramInt64 struct { // Name is the unique name of this metric. Name string // Description can be used by observers to describe the metric to users. Description string // Keys is the set of labels that collectively describe rows of the metric. Keys []label.Key // Buckets holds the inclusive upper bound of each bucket in the histogram. Buckets []int64 } // HistogramFloat64 represents the construction information for an float64 histogram metric. type HistogramFloat64 struct { // Name is the unique name of this metric. Name string // Description can be used by observers to describe the metric to users. Description string // Keys is the set of labels that collectively describe rows of the metric. Keys []label.Key // Buckets holds the inclusive upper bound of each bucket in the histogram. Buckets []float64 } // Count creates a new metric based on the Scalar information that counts // the number of times the supplied int64 measure is set. // Metrics of this type will use Int64Data. func (info Scalar) Count(e *Config, key label.Key) { data := &Int64Data{Info: &info, key: nil} e.subscribe(key, data.count) } // SumInt64 creates a new metric based on the Scalar information that sums all // the values recorded on the int64 measure. // Metrics of this type will use Int64Data. func (info Scalar) SumInt64(e *Config, key *keys.Int64) { data := &Int64Data{Info: &info, key: key} e.subscribe(key, data.sum) } // LatestInt64 creates a new metric based on the Scalar information that tracks // the most recent value recorded on the int64 measure. // Metrics of this type will use Int64Data. func (info Scalar) LatestInt64(e *Config, key *keys.Int64) { data := &Int64Data{Info: &info, IsGauge: true, key: key} e.subscribe(key, data.latest) } // SumFloat64 creates a new metric based on the Scalar information that sums all // the values recorded on the float64 measure. // Metrics of this type will use Float64Data. func (info Scalar) SumFloat64(e *Config, key *keys.Float64) { data := &Float64Data{Info: &info, key: key} e.subscribe(key, data.sum) } // LatestFloat64 creates a new metric based on the Scalar information that tracks // the most recent value recorded on the float64 measure. // Metrics of this type will use Float64Data. func (info Scalar) LatestFloat64(e *Config, key *keys.Float64) { data := &Float64Data{Info: &info, IsGauge: true, key: key} e.subscribe(key, data.latest) } // Record creates a new metric based on the HistogramInt64 information that // tracks the bucketized counts of values recorded on the int64 measure. // Metrics of this type will use HistogramInt64Data. func (info HistogramInt64) Record(e *Config, key *keys.Int64) { data := &HistogramInt64Data{Info: &info, key: key} e.subscribe(key, data.record) } // Record creates a new metric based on the HistogramFloat64 information that // tracks the bucketized counts of values recorded on the float64 measure. // Metrics of this type will use HistogramFloat64Data. func (info HistogramFloat64) Record(e *Config, key *keys.Float64) { data := &HistogramFloat64Data{Info: &info, key: key} e.subscribe(key, data.record) }
internal/event/export/metric/info.go
0.850934
0.556701
info.go
starcoder
package SetSimilaritySearch import "math" func min(a, b int) int { if a < b { return a } return b } func max(a, b int) int { if a > b { return a } return b } // IntersectionSize computes the number of overlaps of two transformed sets // (sorted integers). func intersectionSize(s1, s2 []int) int { var i, j int var overlap int for i < len(s1) && j < len(s2) { switch d := s1[i] - s2[j]; { case d == 0: overlap++ i++ j++ case d < 0: i++ case d > 0: j++ } } return overlap } type function func([]int, []int) float64 // Jaccard computes the Jaccard similarity of two transformed sets. func jaccard(s1, s2 []int) float64 { if len(s1) == 0 && len(s2) == 0 { return 0.0 } intersectionSize := intersectionSize(s1, s2) return float64(intersectionSize) / float64(len(s1)+len(s2)-intersectionSize) } // Containment computes the Containment of s1 in s2 -- the fraction of s1 // being found in s2. func containment(s1, s2 []int) float64 { if len(s1) == 0 { return 0.0 } intersectionSize := intersectionSize(s1, s2) return float64(intersectionSize) / float64(len(s1)) } func cosine(s1, s2 []int) float64 { if len(s1) == 0 && len(s2) == 0 { return 0.0 } intersectionSize := intersectionSize(s1, s2) return float64(intersectionSize) / math.Sqrt(float64(len(s1)*len(s2))) } type overlapThresholdFunction func(int, float64) int // x is the set size // t is the Jaccard threshold func jaccardOverlapThresholdFunc(x int, t float64) int { return max(1, int(float64(x)*t)) } var jaccardOverlapIndexThresholdFunc = jaccardOverlapThresholdFunc func cosineOverlapThresholdFunc(x int, t float64) int { return int(math.Sqrt(float64(x)) * t) } var cosineOverlapIndexThresholdFunc = cosineOverlapThresholdFunc // This is used for query only. func containmentOverlapThresholdFunc(x int, t float64) int { return max(1, int(float64(x)*t)) } func containmentOverlapIndexThresholdFunc(x int, t float64) int { return 1 } type positionFilter func([]int, []int, int, int, float64) bool func jaccardPositionFilter(s1, s2 []int, p1, p2 int, t float64) bool { l1, l2 := len(s1), len(s2) return float64(min(l1-p1, l2-p2))/float64(max(l1, l2)) >= t } func containmentPositionFilter(s1, s2 []int, p1, p2 int, t float64) bool { l1, l2 := len(s1), len(s2) return float64(min(l1-p1, l2-p2))/float64(l1) >= t } func cosinePositionFilter(s1, s2 []int, p1, p2 int, t float64) bool { l1, l2 := len(s1), len(s2) return float64(min(l1-p1, l2-p2))/math.Sqrt(float64(max(l1, l2))) >= t } var similarityFuncs = map[string]function{ "jaccard": jaccard, "containment": containment, "cosine": cosine, } var overlapThresholdFuncs = map[string]overlapThresholdFunction{ "jaccard": jaccardOverlapThresholdFunc, "containment": containmentOverlapThresholdFunc, "cosine": cosineOverlapThresholdFunc, } var overlapIndexThresholdFuncs = map[string]overlapThresholdFunction{ "jaccard": jaccardOverlapIndexThresholdFunc, "containment": containmentOverlapIndexThresholdFunc, "cosine": cosineOverlapIndexThresholdFunc, } var positionFilterFuncs = map[string]positionFilter{ "jaccard": jaccardPositionFilter, "containment": containmentPositionFilter, "cosine": cosinePositionFilter, } var symmetricSimilarityFuncs = map[string]bool{ "jaccard": true, "containment": false, "cosine": true, }
similarity.go
0.814311
0.443962
similarity.go
starcoder
package condition import ( "errors" "fmt" "regexp" "strconv" "strings" "github.com/Jeffail/benthos/lib/log" "github.com/Jeffail/benthos/lib/metrics" "github.com/Jeffail/benthos/lib/types" radix "github.com/armon/go-radix" "github.com/spf13/cast" ) //------------------------------------------------------------------------------ func init() { Constructors[TypeMetadata] = TypeSpec{ constructor: NewMetadata, description: ` Metadata is a condition that checks metadata keys of a message part against an operator from the following list: ### ` + "`enum`" + ` Checks whether the contents of a metadata key matches one of the defined enum values. ` + "```yaml" + ` metadata: operator: enum part: 0 key: foo arg: - bar - baz - qux - quux ` + "```" + ` ### ` + "`equals`" + ` Checks whether the contents of a metadata key matches an argument. This operator is case insensitive. ` + "```yaml" + ` metadata: operator: equals part: 0 key: foo arg: bar ` + "```" + ` ### ` + "`equals_cs`" + ` Checks whether the contents of a metadata key matches an argument. This operator is case sensitive. ` + "```yaml" + ` metadata: operator: equals_cs part: 0 key: foo arg: BAR ` + "```" + ` ### ` + "`exists`" + ` Checks whether a metadata key exists. ` + "```yaml" + ` metadata: operator: exists part: 0 key: foo ` + "```" + ` ### ` + "`greater_than`" + ` Checks whether the contents of a metadata key, parsed as a floating point number, is greater than an argument. Returns false if the metadata value cannot be parsed into a number. ` + "```yaml" + ` metadata: operator: greater_than part: 0 key: foo arg: 3 ` + "```" + ` ### ` + "`has_prefix`" + ` Checks whether the contents of a metadata key match one of the provided prefixes. The arg field can either be a singular prefix string or a list of prefixes. ` + "```yaml" + ` metadata: operator: has_prefix part: 0 key: foo arg: - foo - bar - baz ` + "```" + ` ### ` + "`less_than`" + ` Checks whether the contents of a metadata key, parsed as a floating point number, is less than an argument. Returns false if the metadata value cannot be parsed into a number. ` + "```yaml" + ` metadata: operator: less_than part: 0 key: foo arg: 3 ` + "```" + ` ### ` + "`regexp_partial`" + ` Checks whether any section of the contents of a metadata key matches a regular expression (RE2 syntax). ` + "```yaml" + ` metadata: operator: regexp_partial part: 0 key: foo arg: "1[a-z]2" ` + "```" + ` ### ` + "`regexp_exact`" + ` Checks whether the contents of a metadata key exactly matches a regular expression (RE2 syntax). ` + "```yaml" + ` metadata: operator: regexp_partial part: 0 key: foo arg: "1[a-z]2" ` + "```" + ` `, } } //------------------------------------------------------------------------------ // Errors for the metadata condition. var ( ErrInvalidMetadataOperator = errors.New("invalid metadata operator type") ) // MetadataConfig is a configuration struct containing fields for the metadata // condition. type MetadataConfig struct { Operator string `json:"operator" yaml:"operator"` Part int `json:"part" yaml:"part"` Key string `json:"key" yaml:"key"` Arg interface{} `json:"arg" yaml:"arg"` } // NewMetadataConfig returns a MetadataConfig with default values. func NewMetadataConfig() MetadataConfig { return MetadataConfig{ Operator: "equals_cs", Part: 0, Key: "", Arg: "", } } //------------------------------------------------------------------------------ type metadataOperator func(md types.Metadata) bool func metadataEnumOperator(key string, arg interface{}) (metadataOperator, error) { entries, err := cast.ToStringSliceE(arg) if err != nil { return nil, fmt.Errorf("failed to parse argument as string slice: %v", err) } tree := radix.New() for _, entry := range entries { tree.Insert(entry, struct{}{}) } return func(md types.Metadata) bool { _, ok := tree.Get(md.Get(key)) return ok }, nil } func metadataEqualsCSOperator(key string, arg interface{}) (metadataOperator, error) { argStr, err := cast.ToStringE(arg) if err != nil { return nil, fmt.Errorf("failed to parse argument as string: %v", err) } return func(md types.Metadata) bool { return md.Get(key) == argStr }, nil } func metadataEqualsOperator(key string, arg interface{}) (metadataOperator, error) { argStr, err := cast.ToStringE(arg) if err != nil { return nil, fmt.Errorf("failed to parse argument as string: %v", err) } return func(md types.Metadata) bool { return strings.ToLower(md.Get(key)) == strings.ToLower(argStr) }, nil } func metadataExistsOperator(key string) metadataOperator { return func(md types.Metadata) bool { return len(md.Get(key)) > 0 } } func metadataGreaterThanOperator(key string, arg interface{}) (metadataOperator, error) { v, err := cast.ToFloat64E(arg) if err != nil { return nil, fmt.Errorf("failed to parse argument as float64: %v", err) } return func(md types.Metadata) bool { val, verr := strconv.ParseFloat(md.Get(key), 10) if verr != nil { return false } return val > v }, nil } func metadataHasPrefixOperator(key string, arg interface{}) (metadataOperator, error) { if prefix, ok := arg.(string); ok { return func(md types.Metadata) bool { return strings.HasPrefix(md.Get(key), prefix) }, nil } entries, err := cast.ToStringSliceE(arg) if err != nil { return nil, fmt.Errorf("failed to parse argument as string or string slice: %v", err) } tree := radix.New() for _, entry := range entries { tree.Insert(entry, struct{}{}) } return func(md types.Metadata) bool { _, _, ok := tree.LongestPrefix(md.Get(key)) return ok }, nil } func metadataLessThanOperator(key string, arg interface{}) (metadataOperator, error) { v, err := cast.ToFloat64E(arg) if err != nil { return nil, fmt.Errorf("failed to parse argument as float64: %v", err) } return func(md types.Metadata) bool { val, verr := strconv.ParseFloat(md.Get(key), 10) if verr != nil { return false } return val < v }, nil } func metadataRegexpPartialOperator(key string, arg interface{}) (metadataOperator, error) { argStr, err := cast.ToStringE(arg) if err != nil { return nil, fmt.Errorf("failed to parse argument as string: %v", err) } compiled, err := regexp.Compile(argStr) if err != nil { return nil, err } return func(md types.Metadata) bool { return compiled.MatchString(md.Get(key)) }, nil } func metadataRegexpExactOperator(key string, arg interface{}) (metadataOperator, error) { argStr, err := cast.ToStringE(arg) if err != nil { return nil, fmt.Errorf("failed to parse argument as string: %v", err) } compiled, err := regexp.Compile(argStr) if err != nil { return nil, err } return func(md types.Metadata) bool { val := md.Get(key) return len(compiled.FindString(val)) == len(val) }, nil } func strToMetadataOperator(str, key string, arg interface{}) (metadataOperator, error) { switch str { case "enum": return metadataEnumOperator(key, arg) case "equals": return metadataEqualsOperator(key, arg) case "equals_cs": return metadataEqualsCSOperator(key, arg) case "exists": return metadataExistsOperator(key), nil case "greater_than": return metadataGreaterThanOperator(key, arg) case "has_prefix": return metadataHasPrefixOperator(key, arg) case "less_than": return metadataLessThanOperator(key, arg) case "regexp_partial": return metadataRegexpPartialOperator(key, arg) case "regexp_exact": return metadataRegexpExactOperator(key, arg) } return nil, ErrInvalidMetadataOperator } //------------------------------------------------------------------------------ // Metadata is a condition that checks message text against logical operators. type Metadata struct { stats metrics.Type operator metadataOperator part int mCount metrics.StatCounter mTrue metrics.StatCounter mFalse metrics.StatCounter } // NewMetadata returns a Metadata condition. func NewMetadata( conf Config, mgr types.Manager, log log.Modular, stats metrics.Type, ) (Type, error) { op, err := strToMetadataOperator(conf.Metadata.Operator, conf.Metadata.Key, conf.Metadata.Arg) if err != nil { return nil, fmt.Errorf("operator '%v': %v", conf.Metadata.Operator, err) } return &Metadata{ stats: stats, operator: op, part: conf.Metadata.Part, mCount: stats.GetCounter("count"), mTrue: stats.GetCounter("true"), mFalse: stats.GetCounter("false"), }, nil } //------------------------------------------------------------------------------ // Check attempts to check a message part against a configured condition. func (c *Metadata) Check(msg types.Message) bool { c.mCount.Incr(1) index := c.part lParts := msg.Len() if lParts == 0 { c.mFalse.Incr(1) return false } res := c.operator(msg.Get(index).Metadata()) if res { c.mTrue.Incr(1) } else { c.mFalse.Incr(1) } return res } //------------------------------------------------------------------------------
lib/condition/metadata.go
0.792625
0.759315
metadata.go
starcoder
package wsm import ( "color-thief/argsort" "color-thief/wu" "math" ) const ( HistBits = 5 Shift = 8 - HistBits HistSize = 1 << (3 * HistBits) ) // encode image pixels to 1d histogram with weight proportion to its frequency // normalize by the total number of pixels func getHistogram(src [][3]int, size float64, pixels *[HistSize][3]float64, hist *[HistSize]float64) { var ind, r, g, b, i int var inr, ing, inb int for i = range src { r = src[i][0] g = src[i][1] b = src[i][2] inr = r >> Shift ing = g >> Shift inb = b >> Shift ind = (inr << (2 * HistBits)) + (ing << HistBits) + inb pixels[ind][0], pixels[ind][1], pixels[ind][2] = float64(r), float64(g), float64(b) hist[ind]++ } // normalize weight by the number of pixels in the image for i = 0; i < HistSize; i++ { hist[i] /= size } } func WSM(src [][3]int, k int) [][3]int { // variables var centroids [][3]float64 // centroid list with size of k var d []float64 // distance matrix var m []int // distance rank matrix var hist [HistSize]float64 // image encoded histogram var pixels [HistSize][3]float64 // encoded unique pixels var p2c [HistSize]int // pointer to centroid index var cR, cG, cB, cW, cSize []float64 // use when computing new centroids var nR, nG, nB float64 // new centroid r,g,b var palette [][3]int // palette container var cPix [3]float64 // pixel with float var pix [3]int // pixel with int var rank []int // palette usage count var dist, minDist, prevDist float64 var loss, tempLoss float64 var size, w float64 var iter, i, j int var p, t int // get histogram size = float64(len(src)) getHistogram(src, size, &pixels, &hist) // init cluster centers based on wu color quantization result palette = wu.QuantWu(src, k) // cannot produce enough color, create palette using color scheme if len(palette) < k { return palette } // init centroids centroids = make([][3]float64, k) for i, pix = range palette { centroids[i][0], centroids[i][1], centroids[i][2] = float64(pix[0]), float64(pix[1]), float64(pix[2]) } // random assign centroids to each pixels for i = 0; i < HistSize; i++ { if hist[i] == 0 { continue } p2c[i] = i % k } loss = 1e6 d = make([]float64, k*k) m = make([]int, k*k) cR = make([]float64, k) cG = make([]float64, k) cB = make([]float64, k) cW = make([]float64, k) cSize = make([]float64, k) // default 100 iterations for k-means for iter = 0; iter < 100; iter++ { // compute distance matrix for i = 0; i < k; i++ { for j = i + 1; j < k; j++ { dist = distance(&centroids[i], &centroids[j]) d[i*k+j], d[j*k+i] = dist, dist } } // Construct a K × K matrix M in which row i is a permutation of 1, 2, . . . , K that // represents the clusters in increasing order of distance of their centers from c_i for i = 0; i < k; i++ { rank = argsort.Quicksort(d[i*k : i*k+k]) copy(m[i*k:i*k+k], rank) } for i, w = range hist { if w == 0 { continue } p = p2c[i] cPix = pixels[i] dist = distance(&cPix, &centroids[p]) minDist, prevDist = dist, dist for j = 1; j < k; j++ { t = m[p*k+j] if d[p*k+t] >= 4*prevDist { break // There can be no other closer center. Stop checking } dist = distance(&cPix, &centroids[t]) if dist <= minDist { minDist = dist p2c[i] = t } } } // reset matrix for i = 0; i < k; i++ { cR[i], cG[i], cB[i], cW[i], cSize[i] = 0, 0, 0, 0, 0 } // recalculate the cluster centers for i, w = range hist { if w == 0 { continue } p = p2c[i] cR[p] += pixels[i][0] * w // r cG[p] += pixels[i][1] * w // g cB[p] += pixels[i][2] * w // b cW[p] += w cSize[p] += w * size } // compute new center value for i = 0; i < k; i++ { nR = cR[i] / cW[i] nG = cG[i] / cW[i] nB = cB[i] / cW[i] centroids[i][0], centroids[i][1], centroids[i][2] = nR, nG, nB } // compute loss tempLoss = 0 for i, w = range hist { if w == 0 { continue } p = p2c[i] cPix = pixels[i] dist = distance(&cPix, &centroids[p]) tempLoss += dist } if loss-tempLoss < 1e-3 { break } loss = tempLoss } rank = argsort.Quicksort(cSize) for i = 0; i < k; i++ { cPix = centroids[rank[k-1-i]] palette[i][0], palette[i][1], palette[i][2] = int(cPix[0]), int(cPix[1]), int(cPix[2]) } return palette } func distance(p1, p2 *[3]float64) float64 { dist := (p1[0]-p2[0])*(p1[0]-p2[0]) + (p1[1]-p2[1])*(p1[1]-p2[1]) + (p1[2]-p2[2])*(p1[2]-p2[2]) return math.Sqrt(dist) }
wsm/wsm.go
0.605682
0.442938
wsm.go
starcoder
package day17 import ( "fmt" "io" "github.com/mjm/advent-of-code-2019/pkg/point" ) // Canvas is an infinitely paintable 2D surface. type Canvas struct { paint map[point.Point2D]int minCorner point.Point2D maxCorner point.Point2D } // NewCanvas creates an empty canvas. func NewCanvas() *Canvas { return &Canvas{ paint: make(map[point.Point2D]int), } } // Paint paints a point with a color, extending the bounds of the canvas if needed. func (c *Canvas) Paint(p point.Point2D, color int) { c.paint[p] = color c.adjustSizeIfNeeded(p) } // At gets the color painted at a point. func (c *Canvas) At(p point.Point2D) int { return c.paint[p] } // Count returns the number of points that have been painted at all. func (c *Canvas) Count() int { return len(c.paint) } // CountColor returns the number of points painted with the given color. func (c *Canvas) CountColor(color int) int { var n int for _, c := range c.paint { if c == color { n++ } } return n } // Draw calls the provided drawFn for every square on the canvas, regardless // of whether it has been painted to. func (c *Canvas) Draw(drawFn func(int, int, int)) { width := c.Width() height := c.Height() for y := 0; y < height; y++ { for x := 0; x < width; x++ { color := c.paint[point.Point2D{ X: x + c.minCorner.X, Y: y + c.minCorner.Y, }] drawFn(x, y, color) } } } // Width returns the width of the canvas func (c *Canvas) Width() int { return c.maxCorner.X - c.minCorner.X + 1 } // Height returns the height of the canvas func (c *Canvas) Height() int { return c.maxCorner.Y - c.minCorner.Y + 1 } func (c *Canvas) adjustSizeIfNeeded(p point.Point2D) { if p.X < c.minCorner.X { c.minCorner.X = p.X } if p.Y < c.minCorner.Y { c.minCorner.Y = p.Y } if p.X > c.maxCorner.X { c.maxCorner.X = p.X } if p.Y > c.maxCorner.Y { c.maxCorner.Y = p.Y } } // PrintTo prints the image on the canvas to the given writer. func (c *Canvas) PrintTo(w io.Writer) { for y := c.minCorner.Y; y <= c.maxCorner.Y; y++ { for x := c.minCorner.X; x <= c.maxCorner.X; x++ { val := c.paint[point.Point2D{X: x, Y: y}] if val == 0 { fmt.Fprint(w, " ") } else { fmt.Fprintf(w, "%c", rune(val)) } } fmt.Fprintln(w) } }
day17/canvas.go
0.762866
0.436022
canvas.go
starcoder
package life import ( "math/rand" "time" ) // Life contains all data about the current game type Life struct { Generation int Width int Height int data [2][]uint8 readBuf int } // New returns a new Life structure func New(width, height int) *Life { size := width * height return &Life{ Generation: 0, data: [2][]uint8{make([]uint8, size, size), make([]uint8, size, size)}, Width: width, Height: height, } } // Randomize fills the life grid randomly func (l *Life) Randomize() { rand.Seed(time.Now().UTC().UnixNano()) buf := l.data[l.readBuf] for i := range buf { buf[i] = uint8(rand.Uint32() & 1) } l.Generation = 0 } // Return the value at the given x, y coordinate on the read buffer func (l *Life) readVal(x, y int) uint8 { return l.data[l.readBuf][y*l.Width+x] } // Steps runs a number of steps func (l *Life) Steps(n int) { for i := 0; i < n; i++ { l.Step() } } // Step runs a single generation func (l *Life) Step() { var writeBuf []uint8 l.Generation++ if l.readBuf == 0 { writeBuf = l.data[1] } else { writeBuf = l.data[0] } // Loop through all the pixels for y := 0; y < l.Height; y++ { for x := 0; x < l.Width; x++ { // Compute the wraparound pixels xm1 := x - 1 if xm1 < 0 { xm1 = l.Width - 1 } xp1 := x + 1 if xp1 >= l.Width { xp1 = 0 } ym1 := y - 1 if ym1 < 0 { ym1 = l.Height - 1 } yp1 := y + 1 if yp1 >= l.Height { yp1 = 0 } // Count neighbors nCount := l.readVal(xm1, ym1) + l.readVal(x, ym1) + l.readVal(xp1, ym1) + l.readVal(xm1, y) + l.readVal(xp1, y) + l.readVal(xm1, yp1) + l.readVal(x, yp1) + l.readVal(xp1, yp1) // Rules of life and death var newVal uint8 if l.readVal(x, y) == 0 { // If dead, comes to life if n=3 if nCount == 3 { newVal = 1 } else { newVal = 0 } } else { // If alive, dies if n<2 or n>3 if nCount < 2 || nCount > 3 { newVal = 0 } else { newVal = 1 } } // Write the new value writeBuf[y*l.Width+x] = newVal } } // Pageflip if l.readBuf == 0 { l.readBuf = 1 } else { l.readBuf = 0 } } // Get returns a reference to the current buffer. Treat this as read-only! func (l *Life) Get() []uint8 { return l.data[l.readBuf] }
life/life.go
0.598899
0.426979
life.go
starcoder
package parquet import ( "github.com/segmentio/parquet-go/deprecated" "github.com/segmentio/parquet-go/encoding/plain" "github.com/segmentio/parquet-go/format" "github.com/segmentio/parquet-go/internal/bits" ) type booleanPageIndex struct{ page *booleanPage } func (index booleanPageIndex) NumPages() int { return 1 } func (index booleanPageIndex) NullCount(int) int64 { return 0 } func (index booleanPageIndex) NullPage(int) bool { return false } func (index booleanPageIndex) MinValue(int) []byte { return plain.Boolean(index.page.min()) } func (index booleanPageIndex) MaxValue(int) []byte { return plain.Boolean(index.page.max()) } func (index booleanPageIndex) IsAscending() bool { return compareBool(index.page.bounds()) < 0 } func (index booleanPageIndex) IsDescending() bool { return compareBool(index.page.bounds()) > 0 } type int32PageIndex struct{ page *int32Page } func (index int32PageIndex) NumPages() int { return 1 } func (index int32PageIndex) NullCount(int) int64 { return 0 } func (index int32PageIndex) NullPage(int) bool { return false } func (index int32PageIndex) MinValue(int) []byte { return plain.Int32(index.page.min()) } func (index int32PageIndex) MaxValue(int) []byte { return plain.Int32(index.page.max()) } func (index int32PageIndex) IsAscending() bool { return compareInt32(index.page.bounds()) < 0 } func (index int32PageIndex) IsDescending() bool { return compareInt32(index.page.bounds()) > 0 } type int64PageIndex struct{ page *int64Page } func (index int64PageIndex) NumPages() int { return 1 } func (index int64PageIndex) NullCount(int) int64 { return 0 } func (index int64PageIndex) NullPage(int) bool { return false } func (index int64PageIndex) MinValue(int) []byte { return plain.Int64(index.page.min()) } func (index int64PageIndex) MaxValue(int) []byte { return plain.Int64(index.page.max()) } func (index int64PageIndex) IsAscending() bool { return compareInt64(index.page.bounds()) < 0 } func (index int64PageIndex) IsDescending() bool { return compareInt64(index.page.bounds()) > 0 } type int96PageIndex struct{ page *int96Page } func (index int96PageIndex) NumPages() int { return 1 } func (index int96PageIndex) NullCount(int) int64 { return 0 } func (index int96PageIndex) NullPage(int) bool { return false } func (index int96PageIndex) MinValue(int) []byte { return plain.Int96(index.page.min()) } func (index int96PageIndex) MaxValue(int) []byte { return plain.Int96(index.page.max()) } func (index int96PageIndex) IsAscending() bool { return compareInt96(index.page.bounds()) < 0 } func (index int96PageIndex) IsDescending() bool { return compareInt96(index.page.bounds()) > 0 } type floatPageIndex struct{ page *floatPage } func (index floatPageIndex) NumPages() int { return 1 } func (index floatPageIndex) NullCount(int) int64 { return 0 } func (index floatPageIndex) NullPage(int) bool { return false } func (index floatPageIndex) MinValue(int) []byte { return plain.Float(index.page.min()) } func (index floatPageIndex) MaxValue(int) []byte { return plain.Float(index.page.max()) } func (index floatPageIndex) IsAscending() bool { return compareFloat32(index.page.bounds()) < 0 } func (index floatPageIndex) IsDescending() bool { return compareFloat32(index.page.bounds()) > 0 } type doublePageIndex struct{ page *doublePage } func (index doublePageIndex) NumPages() int { return 1 } func (index doublePageIndex) NullCount(int) int64 { return 0 } func (index doublePageIndex) NullPage(int) bool { return false } func (index doublePageIndex) MinValue(int) []byte { return plain.Double(index.page.min()) } func (index doublePageIndex) MaxValue(int) []byte { return plain.Double(index.page.max()) } func (index doublePageIndex) IsAscending() bool { return compareFloat64(index.page.bounds()) < 0 } func (index doublePageIndex) IsDescending() bool { return compareFloat64(index.page.bounds()) > 0 } type uint32PageIndex struct{ page uint32Page } func (index uint32PageIndex) NumPages() int { return 1 } func (index uint32PageIndex) NullCount(int) int64 { return 0 } func (index uint32PageIndex) NullPage(int) bool { return false } func (index uint32PageIndex) MinValue(int) []byte { return plain.Int32(int32(index.page.min())) } func (index uint32PageIndex) MaxValue(int) []byte { return plain.Int32(int32(index.page.max())) } func (index uint32PageIndex) IsAscending() bool { return compareUint32(index.page.bounds()) < 0 } func (index uint32PageIndex) IsDescending() bool { return compareUint32(index.page.bounds()) > 0 } type uint64PageIndex struct{ page uint64Page } func (index uint64PageIndex) NumPages() int { return 1 } func (index uint64PageIndex) NullCount(int) int64 { return 0 } func (index uint64PageIndex) NullPage(int) bool { return false } func (index uint64PageIndex) MinValue(int) []byte { return plain.Int64(int64(index.page.min())) } func (index uint64PageIndex) MaxValue(int) []byte { return plain.Int64(int64(index.page.max())) } func (index uint64PageIndex) IsAscending() bool { return compareUint64(index.page.bounds()) < 0 } func (index uint64PageIndex) IsDescending() bool { return compareUint64(index.page.bounds()) > 0 } type booleanColumnIndexer struct { baseColumnIndexer minValues []bool maxValues []bool } func newBooleanColumnIndexer() *booleanColumnIndexer { return new(booleanColumnIndexer) } func (i *booleanColumnIndexer) Reset() { i.reset() i.minValues = i.minValues[:0] i.maxValues = i.maxValues[:0] } func (i *booleanColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { i.observe(numValues, numNulls) i.minValues = append(i.minValues, min.Boolean()) i.maxValues = append(i.maxValues, max.Boolean()) } func (i *booleanColumnIndexer) ColumnIndex() format.ColumnIndex { return i.columnIndex( splitFixedLenByteArrayList(1, bits.BoolToBytes(i.minValues)), splitFixedLenByteArrayList(1, bits.BoolToBytes(i.maxValues)), bits.OrderOfBool(i.minValues), bits.OrderOfBool(i.maxValues), ) } type int32ColumnIndexer struct { baseColumnIndexer minValues []int32 maxValues []int32 } func newInt32ColumnIndexer() *int32ColumnIndexer { return new(int32ColumnIndexer) } func (i *int32ColumnIndexer) Reset() { i.reset() i.minValues = i.minValues[:0] i.maxValues = i.maxValues[:0] } func (i *int32ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { i.observe(numValues, numNulls) i.minValues = append(i.minValues, min.Int32()) i.maxValues = append(i.maxValues, max.Int32()) } func (i *int32ColumnIndexer) ColumnIndex() format.ColumnIndex { return i.columnIndex( splitFixedLenByteArrayList(4, bits.Int32ToBytes(i.minValues)), splitFixedLenByteArrayList(4, bits.Int32ToBytes(i.maxValues)), bits.OrderOfInt32(i.minValues), bits.OrderOfInt32(i.maxValues), ) } type int64ColumnIndexer struct { baseColumnIndexer minValues []int64 maxValues []int64 } func newInt64ColumnIndexer() *int64ColumnIndexer { return new(int64ColumnIndexer) } func (i *int64ColumnIndexer) Reset() { i.reset() i.minValues = i.minValues[:0] i.maxValues = i.maxValues[:0] } func (i *int64ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { i.observe(numValues, numNulls) i.minValues = append(i.minValues, min.Int64()) i.maxValues = append(i.maxValues, max.Int64()) } func (i *int64ColumnIndexer) ColumnIndex() format.ColumnIndex { return i.columnIndex( splitFixedLenByteArrayList(8, bits.Int64ToBytes(i.minValues)), splitFixedLenByteArrayList(8, bits.Int64ToBytes(i.maxValues)), bits.OrderOfInt64(i.minValues), bits.OrderOfInt64(i.maxValues), ) } type int96ColumnIndexer struct { baseColumnIndexer minValues []deprecated.Int96 maxValues []deprecated.Int96 } func newInt96ColumnIndexer() *int96ColumnIndexer { return new(int96ColumnIndexer) } func (i *int96ColumnIndexer) Reset() { i.reset() i.minValues = i.minValues[:0] i.maxValues = i.maxValues[:0] } func (i *int96ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { i.observe(numValues, numNulls) i.minValues = append(i.minValues, min.Int96()) i.maxValues = append(i.maxValues, max.Int96()) } func (i *int96ColumnIndexer) ColumnIndex() format.ColumnIndex { return i.columnIndex( splitFixedLenByteArrayList(12, deprecated.Int96ToBytes(i.minValues)), splitFixedLenByteArrayList(12, deprecated.Int96ToBytes(i.maxValues)), deprecated.OrderOfInt96(i.minValues), deprecated.OrderOfInt96(i.maxValues), ) } type floatColumnIndexer struct { baseColumnIndexer minValues []float32 maxValues []float32 } func newFloatColumnIndexer() *floatColumnIndexer { return new(floatColumnIndexer) } func (i *floatColumnIndexer) Reset() { i.reset() i.minValues = i.minValues[:0] i.maxValues = i.maxValues[:0] } func (i *floatColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { i.observe(numValues, numNulls) i.minValues = append(i.minValues, min.Float()) i.maxValues = append(i.maxValues, max.Float()) } func (i *floatColumnIndexer) ColumnIndex() format.ColumnIndex { return i.columnIndex( splitFixedLenByteArrayList(4, bits.Float32ToBytes(i.minValues)), splitFixedLenByteArrayList(4, bits.Float32ToBytes(i.maxValues)), bits.OrderOfFloat32(i.minValues), bits.OrderOfFloat32(i.maxValues), ) } type doubleColumnIndexer struct { baseColumnIndexer minValues []float64 maxValues []float64 } func newDoubleColumnIndexer() *doubleColumnIndexer { return new(doubleColumnIndexer) } func (i *doubleColumnIndexer) Reset() { i.reset() i.minValues = i.minValues[:0] i.maxValues = i.maxValues[:0] } func (i *doubleColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { i.observe(numValues, numNulls) i.minValues = append(i.minValues, min.Double()) i.maxValues = append(i.maxValues, max.Double()) } func (i *doubleColumnIndexer) ColumnIndex() format.ColumnIndex { return i.columnIndex( splitFixedLenByteArrayList(8, bits.Float64ToBytes(i.minValues)), splitFixedLenByteArrayList(8, bits.Float64ToBytes(i.maxValues)), bits.OrderOfFloat64(i.minValues), bits.OrderOfFloat64(i.maxValues), ) } type uint32ColumnIndexer struct{ *int32ColumnIndexer } func newUint32ColumnIndexer() uint32ColumnIndexer { return uint32ColumnIndexer{newInt32ColumnIndexer()} } func (i uint32ColumnIndexer) ColumnIndex() format.ColumnIndex { minValues := bits.Int32ToUint32(i.minValues) maxValues := bits.Int32ToUint32(i.maxValues) return i.columnIndex( splitFixedLenByteArrayList(4, bits.Uint32ToBytes(minValues)), splitFixedLenByteArrayList(4, bits.Uint32ToBytes(maxValues)), bits.OrderOfUint32(minValues), bits.OrderOfUint32(maxValues), ) } type uint64ColumnIndexer struct{ *int64ColumnIndexer } func newUint64ColumnIndexer() uint64ColumnIndexer { return uint64ColumnIndexer{newInt64ColumnIndexer()} } func (i uint64ColumnIndexer) ColumnIndex() format.ColumnIndex { minValues := bits.Int64ToUint64(i.minValues) maxValues := bits.Int64ToUint64(i.maxValues) return i.columnIndex( splitFixedLenByteArrayList(8, bits.Uint64ToBytes(minValues)), splitFixedLenByteArrayList(8, bits.Uint64ToBytes(maxValues)), bits.OrderOfUint64(minValues), bits.OrderOfUint64(maxValues), ) }
column_index_default.go
0.583559
0.423577
column_index_default.go
starcoder
package interactors import ( "fmt" "github.com/CESARBR/knot-babeltower/pkg/thing/entities" "github.com/go-playground/validator" ) type schemaType struct { valueType interface{} unit interface{} } type interval struct { min int max int } // rules reference table: https://knot-devel.cesar.org.br/doc/thing/unit-type-value.html var rules = map[int]schemaType{ 0x0000: {valueType: 4, unit: 0}, // RAW => NONE 0x0001: {valueType: 1, unit: interval{1, 3}}, // INT => VOLTAGE 0x0002: {valueType: 1, unit: interval{1, 2}}, // INT => CURRENT 0x0003: {valueType: 1, unit: 1}, // INT => RESISTENCE 0x0004: {valueType: 1, unit: interval{1, 3}}, // INT => POWER 0x0005: {valueType: 1, unit: interval{1, 3}}, // INT => TEMPERATURE 0x0006: {valueType: 1, unit: 1}, // INT => RELATIVE_HUMIDITY 0x0007: {valueType: 1, unit: interval{1, 3}}, // INT => LUMINOSITY 0x0008: {valueType: 1, unit: interval{1, 3}}, // INT => TIME 0x0009: {valueType: 1, unit: interval{1, 4}}, // INT => MASS 0x000A: {valueType: 1, unit: interval{1, 3}}, // INT => PRESSURE 0x000B: {valueType: 1, unit: interval{1, 4}}, // INT => DISTANCE 0x000C: {valueType: 2, unit: interval{1, 2}}, // FLOAT => ANGLE 0x000D: {valueType: 2, unit: interval{1, 4}}, // FLOAT => VOLUME 0x000E: {valueType: 2, unit: interval{1, 3}}, // FLOAT => AREA 0x000F: {valueType: 2, unit: 1}, // FLOAT => RAIN 0x0010: {valueType: 2, unit: 1}, // FLOAT => DENSITY 0x0011: {valueType: 2, unit: 1}, // FLOAT => LATITUDE 0x0012: {valueType: 2, unit: 1}, // FLOAT => LONGITUDE 0x0013: {valueType: 1, unit: interval{1, 4}}, // INT => SPEED 0x0014: {valueType: 2, unit: interval{1, 6}}, // FLOAT => VOLUMEFLOW 0x0015: {valueType: 1, unit: interval{1, 6}}, // INT => ENERGY 0xFFF0: {valueType: 3, unit: 0}, // BOOL => PRESENCE 0xFFF1: {valueType: 3, unit: 0}, // BOOL => SWITCH 0xFFF2: {valueType: 4, unit: 0}, // RAW => COMMAND 0xFF10: {valueType: 1, unit: 0}, // INT => ANALOG 0xFFFF: {valueType: 4, unit: 0}, // RAW => INVALID } // UpdateSchema receive the new sensor schema and update it on the thing's service func (i *ThingInteractor) UpdateSchema(authorization, thingID string, schemaList []entities.Schema) error { if authorization == "" { return ErrAuthNotProvided } if thingID == "" { return ErrIDNotProvided } if schemaList == nil { return ErrSchemaNotProvided } if !i.isValidSchema(schemaList) { err := i.notifyClient(thingID, schemaList, ErrSchemaInvalid) return err } i.logger.Info("updateSchema: schema validated") err := i.thingProxy.UpdateSchema(authorization, thingID, schemaList) if err != nil { sendErr := i.notifyClient(thingID, schemaList, err) return sendErr } i.logger.Info("updateSchema: schema updated") err = i.notifyClient(thingID, schemaList, err) if err != nil { // TODO: handle error when publishing message to queue. return err } i.logger.Info("updateSchema: message sent to client") return nil } func (i *ThingInteractor) isValidSchema(schemaList []entities.Schema) bool { validate := validator.New() validate.RegisterStructValidation(schemaValidation, entities.Schema{}) for _, schema := range schemaList { err := validate.Struct(schema) if err != nil { return false } } return true } func (i *ThingInteractor) notifyClient(thingID string, schemaList []entities.Schema, err error) error { sendErr := i.publisher.PublishUpdatedSchema(thingID, schemaList, err) if sendErr != nil { if err != nil { return fmt.Errorf("error sending response to client: %v: %w", sendErr, err) } return fmt.Errorf("error sending response to client: %w", sendErr) } return err } func schemaValidation(sl validator.StructLevel) { schema := sl.Current().Interface().(entities.Schema) typeID := schema.TypeID if (typeID < 0 || 15 < typeID) && (typeID < 0xfff0 || 0xfff2 < typeID) && typeID != 0xff10 { sl.ReportError(schema, "schema", "Type ID", "typeID", "false") return } if !isValidValueType(schema.TypeID, schema.ValueType) { sl.ReportError(schema, "schema", "Value Type", "valueType", "false") return } if !isValidUnit(schema.TypeID, schema.Unit) { sl.ReportError(schema, "schema", "Unit", "unit", "false") } } func isValidValueType(typeID, valueType int) bool { t := rules[typeID].valueType if t == nil { return false } switch v := t.(type) { case int: value := v if valueType != value { return false } case interval: interval := t.(interval) if valueType < interval.min || interval.max < valueType { return false } } return true } func isValidUnit(typeID, unit int) bool { u := rules[typeID].unit if u == nil { return false } switch v := u.(type) { case int: value := v if unit != value { return false } case interval: interval := u.(interval) if unit < interval.min || interval.max < unit { return false } } return true }
pkg/thing/interactors/update_schema.go
0.579995
0.433742
update_schema.go
starcoder
package cryptoapis import ( "encoding/json" ) // GetAssetDetailsByAssetSymbolRISC Crypto Type Data type GetAssetDetailsByAssetSymbolRISC struct { // Represents the percentage of the asset's current price against the its price from 1 hour ago. Var1HourPriceChangeInPercentage string `json:"1HourPriceChangeInPercentage"` // Represents the percentage of the asset's current price against the its price from 1 week ago. Var1WeekPriceChangeInPercentage string `json:"1WeekPriceChangeInPercentage"` // Represents the percentage of the asset's current price against the its price from 24 hours ago. Var24HoursPriceChangeInPercentage string `json:"24HoursPriceChangeInPercentage"` // Represents the trading volume of the asset for the time frame of 24 hours. Var24HoursTradingVolume string `json:"24HoursTradingVolume"` // Defines the type of the supported asset. This could be either \"crypto\" or \"fiat\". AssetType string `json:"assetType"` // Represents the amount of the asset that is circulating on the market and in public hands. CirculatingSupply string `json:"circulatingSupply"` // Defines the total market value of the asset's circulating supply in USD. MarketCapInUSD string `json:"marketCapInUSD"` // Represents the maximum amount of all coins of a specific asset that will ever exist in its lifetime. MaxSupply string `json:"maxSupply"` } // NewGetAssetDetailsByAssetSymbolRISC instantiates a new GetAssetDetailsByAssetSymbolRISC object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewGetAssetDetailsByAssetSymbolRISC(var1HourPriceChangeInPercentage string, var1WeekPriceChangeInPercentage string, var24HoursPriceChangeInPercentage string, var24HoursTradingVolume string, assetType string, circulatingSupply string, marketCapInUSD string, maxSupply string) *GetAssetDetailsByAssetSymbolRISC { this := GetAssetDetailsByAssetSymbolRISC{} this.Var1HourPriceChangeInPercentage = var1HourPriceChangeInPercentage this.Var1WeekPriceChangeInPercentage = var1WeekPriceChangeInPercentage this.Var24HoursPriceChangeInPercentage = var24HoursPriceChangeInPercentage this.Var24HoursTradingVolume = var24HoursTradingVolume this.AssetType = assetType this.CirculatingSupply = circulatingSupply this.MarketCapInUSD = marketCapInUSD this.MaxSupply = maxSupply return &this } // NewGetAssetDetailsByAssetSymbolRISCWithDefaults instantiates a new GetAssetDetailsByAssetSymbolRISC object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewGetAssetDetailsByAssetSymbolRISCWithDefaults() *GetAssetDetailsByAssetSymbolRISC { this := GetAssetDetailsByAssetSymbolRISC{} return &this } // GetVar1HourPriceChangeInPercentage returns the Var1HourPriceChangeInPercentage field value func (o *GetAssetDetailsByAssetSymbolRISC) GetVar1HourPriceChangeInPercentage() string { if o == nil { var ret string return ret } return o.Var1HourPriceChangeInPercentage } // GetVar1HourPriceChangeInPercentageOk returns a tuple with the Var1HourPriceChangeInPercentage field value // and a boolean to check if the value has been set. func (o *GetAssetDetailsByAssetSymbolRISC) GetVar1HourPriceChangeInPercentageOk() (*string, bool) { if o == nil { return nil, false } return &o.Var1HourPriceChangeInPercentage, true } // SetVar1HourPriceChangeInPercentage sets field value func (o *GetAssetDetailsByAssetSymbolRISC) SetVar1HourPriceChangeInPercentage(v string) { o.Var1HourPriceChangeInPercentage = v } // GetVar1WeekPriceChangeInPercentage returns the Var1WeekPriceChangeInPercentage field value func (o *GetAssetDetailsByAssetSymbolRISC) GetVar1WeekPriceChangeInPercentage() string { if o == nil { var ret string return ret } return o.Var1WeekPriceChangeInPercentage } // GetVar1WeekPriceChangeInPercentageOk returns a tuple with the Var1WeekPriceChangeInPercentage field value // and a boolean to check if the value has been set. func (o *GetAssetDetailsByAssetSymbolRISC) GetVar1WeekPriceChangeInPercentageOk() (*string, bool) { if o == nil { return nil, false } return &o.Var1WeekPriceChangeInPercentage, true } // SetVar1WeekPriceChangeInPercentage sets field value func (o *GetAssetDetailsByAssetSymbolRISC) SetVar1WeekPriceChangeInPercentage(v string) { o.Var1WeekPriceChangeInPercentage = v } // GetVar24HoursPriceChangeInPercentage returns the Var24HoursPriceChangeInPercentage field value func (o *GetAssetDetailsByAssetSymbolRISC) GetVar24HoursPriceChangeInPercentage() string { if o == nil { var ret string return ret } return o.Var24HoursPriceChangeInPercentage } // GetVar24HoursPriceChangeInPercentageOk returns a tuple with the Var24HoursPriceChangeInPercentage field value // and a boolean to check if the value has been set. func (o *GetAssetDetailsByAssetSymbolRISC) GetVar24HoursPriceChangeInPercentageOk() (*string, bool) { if o == nil { return nil, false } return &o.Var24HoursPriceChangeInPercentage, true } // SetVar24HoursPriceChangeInPercentage sets field value func (o *GetAssetDetailsByAssetSymbolRISC) SetVar24HoursPriceChangeInPercentage(v string) { o.Var24HoursPriceChangeInPercentage = v } // GetVar24HoursTradingVolume returns the Var24HoursTradingVolume field value func (o *GetAssetDetailsByAssetSymbolRISC) GetVar24HoursTradingVolume() string { if o == nil { var ret string return ret } return o.Var24HoursTradingVolume } // GetVar24HoursTradingVolumeOk returns a tuple with the Var24HoursTradingVolume field value // and a boolean to check if the value has been set. func (o *GetAssetDetailsByAssetSymbolRISC) GetVar24HoursTradingVolumeOk() (*string, bool) { if o == nil { return nil, false } return &o.Var24HoursTradingVolume, true } // SetVar24HoursTradingVolume sets field value func (o *GetAssetDetailsByAssetSymbolRISC) SetVar24HoursTradingVolume(v string) { o.Var24HoursTradingVolume = v } // GetAssetType returns the AssetType field value func (o *GetAssetDetailsByAssetSymbolRISC) GetAssetType() string { if o == nil { var ret string return ret } return o.AssetType } // GetAssetTypeOk returns a tuple with the AssetType field value // and a boolean to check if the value has been set. func (o *GetAssetDetailsByAssetSymbolRISC) GetAssetTypeOk() (*string, bool) { if o == nil { return nil, false } return &o.AssetType, true } // SetAssetType sets field value func (o *GetAssetDetailsByAssetSymbolRISC) SetAssetType(v string) { o.AssetType = v } // GetCirculatingSupply returns the CirculatingSupply field value func (o *GetAssetDetailsByAssetSymbolRISC) GetCirculatingSupply() string { if o == nil { var ret string return ret } return o.CirculatingSupply } // GetCirculatingSupplyOk returns a tuple with the CirculatingSupply field value // and a boolean to check if the value has been set. func (o *GetAssetDetailsByAssetSymbolRISC) GetCirculatingSupplyOk() (*string, bool) { if o == nil { return nil, false } return &o.CirculatingSupply, true } // SetCirculatingSupply sets field value func (o *GetAssetDetailsByAssetSymbolRISC) SetCirculatingSupply(v string) { o.CirculatingSupply = v } // GetMarketCapInUSD returns the MarketCapInUSD field value func (o *GetAssetDetailsByAssetSymbolRISC) GetMarketCapInUSD() string { if o == nil { var ret string return ret } return o.MarketCapInUSD } // GetMarketCapInUSDOk returns a tuple with the MarketCapInUSD field value // and a boolean to check if the value has been set. func (o *GetAssetDetailsByAssetSymbolRISC) GetMarketCapInUSDOk() (*string, bool) { if o == nil { return nil, false } return &o.MarketCapInUSD, true } // SetMarketCapInUSD sets field value func (o *GetAssetDetailsByAssetSymbolRISC) SetMarketCapInUSD(v string) { o.MarketCapInUSD = v } // GetMaxSupply returns the MaxSupply field value func (o *GetAssetDetailsByAssetSymbolRISC) GetMaxSupply() string { if o == nil { var ret string return ret } return o.MaxSupply } // GetMaxSupplyOk returns a tuple with the MaxSupply field value // and a boolean to check if the value has been set. func (o *GetAssetDetailsByAssetSymbolRISC) GetMaxSupplyOk() (*string, bool) { if o == nil { return nil, false } return &o.MaxSupply, true } // SetMaxSupply sets field value func (o *GetAssetDetailsByAssetSymbolRISC) SetMaxSupply(v string) { o.MaxSupply = v } func (o GetAssetDetailsByAssetSymbolRISC) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if true { toSerialize["1HourPriceChangeInPercentage"] = o.Var1HourPriceChangeInPercentage } if true { toSerialize["1WeekPriceChangeInPercentage"] = o.Var1WeekPriceChangeInPercentage } if true { toSerialize["24HoursPriceChangeInPercentage"] = o.Var24HoursPriceChangeInPercentage } if true { toSerialize["24HoursTradingVolume"] = o.Var24HoursTradingVolume } if true { toSerialize["assetType"] = o.AssetType } if true { toSerialize["circulatingSupply"] = o.CirculatingSupply } if true { toSerialize["marketCapInUSD"] = o.MarketCapInUSD } if true { toSerialize["maxSupply"] = o.MaxSupply } return json.Marshal(toSerialize) } type NullableGetAssetDetailsByAssetSymbolRISC struct { value *GetAssetDetailsByAssetSymbolRISC isSet bool } func (v NullableGetAssetDetailsByAssetSymbolRISC) Get() *GetAssetDetailsByAssetSymbolRISC { return v.value } func (v *NullableGetAssetDetailsByAssetSymbolRISC) Set(val *GetAssetDetailsByAssetSymbolRISC) { v.value = val v.isSet = true } func (v NullableGetAssetDetailsByAssetSymbolRISC) IsSet() bool { return v.isSet } func (v *NullableGetAssetDetailsByAssetSymbolRISC) Unset() { v.value = nil v.isSet = false } func NewNullableGetAssetDetailsByAssetSymbolRISC(val *GetAssetDetailsByAssetSymbolRISC) *NullableGetAssetDetailsByAssetSymbolRISC { return &NullableGetAssetDetailsByAssetSymbolRISC{value: val, isSet: true} } func (v NullableGetAssetDetailsByAssetSymbolRISC) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableGetAssetDetailsByAssetSymbolRISC) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
model_get_asset_details_by_asset_symbol_risc.go
0.799677
0.403332
model_get_asset_details_by_asset_symbol_risc.go
starcoder
package config type DocField struct { Name string Type string Comment string } var Doc = map[string][]DocField{ "API": []DocField{ { Name: "ListenAddress", Type: "string", Comment: `Binding address for the Lotus API`, }, { Name: "RemoteListenAddress", Type: "string", Comment: ``, }, { Name: "Timeout", Type: "Duration", Comment: ``, }, }, "Backup": []DocField{ { Name: "DisableMetadataLog", Type: "bool", Comment: `Note that in case of metadata corruption it might be much harder to recover your node if metadata log is disabled`, }, }, "BatchFeeConfig": []DocField{ { Name: "Base", Type: "types.FIL", Comment: ``, }, { Name: "PerSector", Type: "types.FIL", Comment: ``, }, }, "Chainstore": []DocField{ { Name: "EnableSplitstore", Type: "bool", Comment: ``, }, { Name: "Splitstore", Type: "Splitstore", Comment: ``, }, }, "Client": []DocField{ { Name: "UseIpfs", Type: "bool", Comment: ``, }, { Name: "IpfsOnlineMode", Type: "bool", Comment: ``, }, { Name: "IpfsMAddr", Type: "string", Comment: ``, }, { Name: "IpfsUseForRetrieval", Type: "bool", Comment: ``, }, { Name: "SimultaneousTransfersForStorage", Type: "uint64", Comment: `The maximum number of simultaneous data transfers between the client and storage providers for storage deals`, }, { Name: "SimultaneousTransfersForRetrieval", Type: "uint64", Comment: `The maximum number of simultaneous data transfers between the client and storage providers for retrieval deals`, }, { Name: "OffChainRetrieval", Type: "bool", Comment: `Require that retrievals perform no on-chain operations. Paid retrievals without existing payment channels with available funds will fail instead of automatically performing on-chain operations.`, }, }, "Common": []DocField{ { Name: "API", Type: "API", Comment: ``, }, { Name: "Backup", Type: "Backup", Comment: ``, }, { Name: "Logging", Type: "Logging", Comment: ``, }, { Name: "Libp2p", Type: "Libp2p", Comment: ``, }, { Name: "Pubsub", Type: "Pubsub", Comment: ``, }, }, "DAGStoreConfig": []DocField{ { Name: "RootDir", Type: "string", Comment: `Path to the dagstore root directory. This directory contains three subdirectories, which can be symlinked to alternative locations if need be: - ./transients: caches unsealed deals that have been fetched from the storage subsystem for serving retrievals. - ./indices: stores shard indices. - ./datastore: holds the KV store tracking the state of every shard known to the DAG store. Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or <LOTUS_MINER_PATH>/dagstore (monolith deployment)`, }, { Name: "MaxConcurrentIndex", Type: "int", Comment: `The maximum amount of indexing jobs that can run simultaneously. 0 means unlimited. Default value: 5.`, }, { Name: "MaxConcurrentReadyFetches", Type: "int", Comment: `The maximum amount of unsealed deals that can be fetched simultaneously from the storage subsystem. 0 means unlimited. Default value: 0 (unlimited).`, }, { Name: "MaxConcurrentUnseals", Type: "int", Comment: `The maximum amount of unseals that can be processed simultaneously from the storage subsystem. 0 means unlimited. Default value: 0 (unlimited).`, }, { Name: "MaxConcurrencyStorageCalls", Type: "int", Comment: `The maximum number of simultaneous inflight API calls to the storage subsystem. Default value: 100.`, }, { Name: "GCInterval", Type: "Duration", Comment: `The time between calls to periodic dagstore GC, in time.Duration string representation, e.g. 1m, 5m, 1h. Default value: 1 minute.`, }, }, "DealmakingConfig": []DocField{ { Name: "ConsiderOnlineStorageDeals", Type: "bool", Comment: `When enabled, the miner can accept online deals`, }, { Name: "ConsiderOfflineStorageDeals", Type: "bool", Comment: `When enabled, the miner can accept offline deals`, }, { Name: "ConsiderOnlineRetrievalDeals", Type: "bool", Comment: `When enabled, the miner can accept retrieval deals`, }, { Name: "ConsiderOfflineRetrievalDeals", Type: "bool", Comment: `When enabled, the miner can accept offline retrieval deals`, }, { Name: "ConsiderVerifiedStorageDeals", Type: "bool", Comment: `When enabled, the miner can accept verified deals`, }, { Name: "ConsiderUnverifiedStorageDeals", Type: "bool", Comment: `When enabled, the miner can accept unverified deals`, }, { Name: "PieceCidBlocklist", Type: "[]cid.Cid", Comment: `A list of Data CIDs to reject when making deals`, }, { Name: "ExpectedSealDuration", Type: "Duration", Comment: `Maximum expected amount of time getting the deal into a sealed sector will take This includes the time the deal will need to get transferred and published before being assigned to a sector`, }, { Name: "MaxDealStartDelay", Type: "Duration", Comment: `Maximum amount of time proposed deal StartEpoch can be in future`, }, { Name: "PublishMsgPeriod", Type: "Duration", Comment: `When a deal is ready to publish, the amount of time to wait for more deals to be ready to publish before publishing them all as a batch`, }, { Name: "MaxDealsPerPublishMsg", Type: "uint64", Comment: `The maximum number of deals to include in a single PublishStorageDeals message`, }, { Name: "MaxProviderCollateralMultiplier", Type: "uint64", Comment: `The maximum collateral that the provider will put up against a deal, as a multiplier of the minimum collateral bound`, }, { Name: "MaxStagingDealsBytes", Type: "int64", Comment: `The maximum allowed disk usage size in bytes of staging deals not yet passed to the sealing node by the markets service. 0 is unlimited.`, }, { Name: "SimultaneousTransfersForStorage", Type: "uint64", Comment: `The maximum number of parallel online data transfers for storage deals`, }, { Name: "SimultaneousTransfersForStoragePerClient", Type: "uint64", Comment: `The maximum number of simultaneous data transfers from any single client for storage deals. Unset by default (0), and values higher than SimultaneousTransfersForStorage will have no effect; i.e. the total number of simultaneous data transfers across all storage clients is bound by SimultaneousTransfersForStorage regardless of this number.`, }, { Name: "SimultaneousTransfersForRetrieval", Type: "uint64", Comment: `The maximum number of parallel online data transfers for retrieval deals`, }, { Name: "StartEpochSealingBuffer", Type: "uint64", Comment: `Minimum start epoch buffer to give time for sealing of sector with deal.`, }, { Name: "Filter", Type: "string", Comment: `A command used for fine-grained evaluation of storage deals see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`, }, { Name: "RetrievalFilter", Type: "string", Comment: `A command used for fine-grained evaluation of retrieval deals see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`, }, { Name: "RetrievalPricing", Type: "*RetrievalPricing", Comment: ``, }, }, "FeeConfig": []DocField{ { Name: "DefaultMaxFee", Type: "types.FIL", Comment: ``, }, }, "FullNode": []DocField{ { Name: "Client", Type: "Client", Comment: ``, }, { Name: "Wallet", Type: "Wallet", Comment: ``, }, { Name: "Fees", Type: "FeeConfig", Comment: ``, }, { Name: "Chainstore", Type: "Chainstore", Comment: ``, }, }, "IndexProviderConfig": []DocField{ { Name: "Enable", Type: "bool", Comment: `Enable set whether to enable indexing announcement to the network and expose endpoints that allow indexer nodes to process announcements. Enabled by default.`, }, { Name: "EntriesCacheCapacity", Type: "int", Comment: `EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and the length of multihashes being advertised. For example, advertising 128-bit long multihashes with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to 256MiB when full.`, }, { Name: "EntriesChunkSize", Type: "int", Comment: `EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk. Defaults to 16384 if not specified. Note that chunks are chained together for indexing advertisements that include more multihashes than the configured EntriesChunkSize.`, }, { Name: "TopicName", Type: "string", Comment: `TopicName sets the topic name on which the changes to the advertised content are announced. If not explicitly specified, the topic name is automatically inferred from the network name in following format: '/indexer/ingest/<network-name>' Defaults to empty, which implies the topic name is inferred from network name.`, }, { Name: "PurgeCacheOnStart", Type: "bool", Comment: `PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine starts. By default, the cache is rehydrated from previously cached entries stored in datastore if any is present.`, }, }, "Libp2p": []DocField{ { Name: "ListenAddresses", Type: "[]string", Comment: `Binding address for the libp2p host - 0 means random port. Format: multiaddress; see https://multiformats.io/multiaddr/`, }, { Name: "AnnounceAddresses", Type: "[]string", Comment: `Addresses to explicitally announce to other peers. If not specified, all interface addresses are announced Format: multiaddress`, }, { Name: "NoAnnounceAddresses", Type: "[]string", Comment: `Addresses to not announce Format: multiaddress`, }, { Name: "BootstrapPeers", Type: "[]string", Comment: ``, }, { Name: "ProtectedPeers", Type: "[]string", Comment: ``, }, { Name: "DisableNatPortMap", Type: "bool", Comment: `When not disabled (default), lotus asks NAT devices (e.g., routers), to open up an external port and forward it to the port lotus is running on. When this works (i.e., when your router supports NAT port forwarding), it makes the local lotus node accessible from the public internet`, }, { Name: "ConnMgrLow", Type: "uint", Comment: `ConnMgrLow is the number of connections that the basic connection manager will trim down to.`, }, { Name: "ConnMgrHigh", Type: "uint", Comment: `ConnMgrHigh is the number of connections that, when exceeded, will trigger a connection GC operation. Note: protected/recently formed connections don't count towards this limit.`, }, { Name: "ConnMgrGrace", Type: "Duration", Comment: `ConnMgrGrace is a time duration that new connections are immune from being closed by the connection manager.`, }, }, "Logging": []DocField{ { Name: "SubsystemLevels", Type: "map[string]string", Comment: `SubsystemLevels specify per-subsystem log levels`, }, }, "MinerAddressConfig": []DocField{ { Name: "PreCommitControl", Type: "[]string", Comment: `Addresses to send PreCommit messages from`, }, { Name: "CommitControl", Type: "[]string", Comment: `Addresses to send Commit messages from`, }, { Name: "TerminateControl", Type: "[]string", Comment: ``, }, { Name: "DealPublishControl", Type: "[]string", Comment: ``, }, { Name: "DisableOwnerFallback", Type: "bool", Comment: `DisableOwnerFallback disables usage of the owner address for messages sent automatically`, }, { Name: "DisableWorkerFallback", Type: "bool", Comment: `DisableWorkerFallback disables usage of the worker address for messages sent automatically, if control addresses are configured. A control address that doesn't have enough funds will still be chosen over the worker address if this flag is set.`, }, }, "MinerFeeConfig": []DocField{ { Name: "MaxPreCommitGasFee", Type: "types.FIL", Comment: ``, }, { Name: "MaxCommitGasFee", Type: "types.FIL", Comment: ``, }, { Name: "MaxPreCommitBatchGasFee", Type: "BatchFeeConfig", Comment: `maxBatchFee = maxBase + maxPerSector * nSectors`, }, { Name: "MaxCommitBatchGasFee", Type: "BatchFeeConfig", Comment: ``, }, { Name: "MaxTerminateGasFee", Type: "types.FIL", Comment: ``, }, { Name: "MaxWindowPoStGasFee", Type: "types.FIL", Comment: `WindowPoSt is a high-value operation, so the default fee should be high.`, }, { Name: "MaxPublishDealsFee", Type: "types.FIL", Comment: ``, }, { Name: "MaxMarketBalanceAddFee", Type: "types.FIL", Comment: ``, }, }, "MinerSubsystemConfig": []DocField{ { Name: "EnableMining", Type: "bool", Comment: ``, }, { Name: "EnableSealing", Type: "bool", Comment: ``, }, { Name: "EnableSectorStorage", Type: "bool", Comment: ``, }, { Name: "EnableMarkets", Type: "bool", Comment: ``, }, { Name: "SealerApiInfo", Type: "string", Comment: ``, }, { Name: "SectorIndexApiInfo", Type: "string", Comment: ``, }, }, "ProvingConfig": []DocField{ { Name: "ParallelCheckLimit", Type: "int", Comment: `Maximum number of sector checks to run in parallel. (0 = unlimited)`, }, }, "Pubsub": []DocField{ { Name: "Bootstrapper", Type: "bool", Comment: `Run the node in bootstrap-node mode`, }, { Name: "DirectPeers", Type: "[]string", Comment: `DirectPeers specifies peers with direct peering agreements. These peers are connected outside of the mesh, with all (valid) message unconditionally forwarded to them. The router will maintain open connections to these peers. Note that the peering agreement should be reciprocal with direct peers symmetrically configured at both ends. Type: Array of multiaddress peerinfo strings, must include peerid (/p2p/12D3K...`, }, { Name: "IPColocationWhitelist", Type: "[]string", Comment: ``, }, { Name: "RemoteTracer", Type: "string", Comment: ``, }, }, "RetrievalPricing": []DocField{ { Name: "Strategy", Type: "string", Comment: ``, }, { Name: "Default", Type: "*RetrievalPricingDefault", Comment: ``, }, { Name: "External", Type: "*RetrievalPricingExternal", Comment: ``, }, }, "RetrievalPricingDefault": []DocField{ { Name: "VerifiedDealsFreeTransfer", Type: "bool", Comment: `VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal of a payloadCid that belongs to a verified storage deal. This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". default value is true`, }, }, "RetrievalPricingExternal": []DocField{ { Name: "Path", Type: "string", Comment: `Path of the external script that will be run to price a retrieval deal. This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external".`, }, }, "SealerConfig": []DocField{ { Name: "ParallelFetchLimit", Type: "int", Comment: ``, }, { Name: "AllowAddPiece", Type: "bool", Comment: `Local worker config`, }, { Name: "AllowPreCommit1", Type: "bool", Comment: ``, }, { Name: "AllowPreCommit2", Type: "bool", Comment: ``, }, { Name: "AllowCommit", Type: "bool", Comment: ``, }, { Name: "AllowUnseal", Type: "bool", Comment: ``, }, { Name: "AllowReplicaUpdate", Type: "bool", Comment: ``, }, { Name: "AllowProveReplicaUpdate2", Type: "bool", Comment: ``, }, { Name: "AllowRegenSectorKey", Type: "bool", Comment: ``, }, { Name: "ResourceFiltering", Type: "sectorstorage.ResourceFilteringStrategy", Comment: `ResourceFiltering instructs the system which resource filtering strategy to use when evaluating tasks against this worker. An empty value defaults to "hardware".`, }, }, "SealingConfig": []DocField{ { Name: "MaxWaitDealsSectors", Type: "uint64", Comment: `Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time. If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created. If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency 0 = no limit`, }, { Name: "MaxSealingSectors", Type: "uint64", Comment: `Upper bound on how many sectors can be sealing+upgrading at the same time when creating new CC sectors (0 = unlimited)`, }, { Name: "MaxSealingSectorsForDeals", Type: "uint64", Comment: `Upper bound on how many sectors can be sealing+upgrading at the same time when creating new sectors with deals (0 = unlimited)`, }, { Name: "PreferNewSectorsForDeals", Type: "bool", Comment: `Prefer creating new sectors even if there are sectors Available for upgrading. This setting combined with MaxUpgradingSectors set to a value higher than MaxSealingSectorsForDeals makes it possible to use fast sector upgrades to handle high volumes of storage deals, while still using the simple sealing flow when the volume of storage deals is lower.`, }, { Name: "MaxUpgradingSectors", Type: "uint64", Comment: `Upper bound on how many sectors can be sealing+upgrading at the same time when upgrading CC sectors with deals (0 = MaxSealingSectorsForDeals)`, }, { Name: "CommittedCapacitySectorLifetime", Type: "Duration", Comment: `CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will live before it must be extended or converted into sector containing deals before it is terminated. Value must be between 180-540 days inclusive`, }, { Name: "WaitDealsDelay", Type: "Duration", Comment: `Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal. Sectors which are fully filled will start sealing immediately`, }, { Name: "AlwaysKeepUnsealedCopy", Type: "bool", Comment: `Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner avoid the relatively high cost of unsealing the data later, at the cost of more storage space`, }, { Name: "FinalizeEarly", Type: "bool", Comment: `Run sector finalization before submitting sector proof to the chain`, }, { Name: "MakeNewSectorForDeals", Type: "bool", Comment: `Whether new sectors are created to pack incoming deals When this is set to false no new sectors will be created for sealing incoming deals This is useful for forcing all deals to be assigned as snap deals to sectors marked for upgrade`, }, { Name: "MakeCCSectorsAvailable", Type: "bool", Comment: `After sealing CC sectors, make them available for upgrading with deals`, }, { Name: "CollateralFromMinerBalance", Type: "bool", Comment: `Whether to use available miner balance for sector collateral instead of sending it with each message`, }, { Name: "AvailableBalanceBuffer", Type: "types.FIL", Comment: `Minimum available balance to keep in the miner actor before sending it with messages`, }, { Name: "DisableCollateralFallback", Type: "bool", Comment: `Don't send collateral with messages even if there is no available balance in the miner actor`, }, { Name: "BatchPreCommits", Type: "bool", Comment: `enable / disable precommit batching (takes effect after nv13)`, }, { Name: "MaxPreCommitBatch", Type: "int", Comment: `maximum precommit batch size - batches will be sent immediately above this size`, }, { Name: "PreCommitBatchWait", Type: "Duration", Comment: `how long to wait before submitting a batch after crossing the minimum batch size`, }, { Name: "PreCommitBatchSlack", Type: "Duration", Comment: `time buffer for forceful batch submission before sectors/deal in batch would start expiring`, }, { Name: "AggregateCommits", Type: "bool", Comment: `enable / disable commit aggregation (takes effect after nv13)`, }, { Name: "MinCommitBatch", Type: "int", Comment: `maximum batched commit size - batches will be sent immediately above this size`, }, { Name: "MaxCommitBatch", Type: "int", Comment: ``, }, { Name: "CommitBatchWait", Type: "Duration", Comment: `how long to wait before submitting a batch after crossing the minimum batch size`, }, { Name: "CommitBatchSlack", Type: "Duration", Comment: `time buffer for forceful batch submission before sectors/deals in batch would start expiring`, }, { Name: "BatchPreCommitAboveBaseFee", Type: "types.FIL", Comment: `network BaseFee below which to stop doing precommit batching, instead sending precommit messages to the chain individually`, }, { Name: "AggregateAboveBaseFee", Type: "types.FIL", Comment: `network BaseFee below which to stop doing commit aggregation, instead submitting proofs to the chain individually`, }, { Name: "TerminateBatchMax", Type: "uint64", Comment: ``, }, { Name: "TerminateBatchMin", Type: "uint64", Comment: ``, }, { Name: "TerminateBatchWait", Type: "Duration", Comment: ``, }, }, "Splitstore": []DocField{ { Name: "ColdStoreType", Type: "string", Comment: `ColdStoreType specifies the type of the coldstore. It can be "universal" (default) or "discard" for discarding cold blocks.`, }, { Name: "HotStoreType", Type: "string", Comment: `HotStoreType specifies the type of the hotstore. Only currently supported value is "badger".`, }, { Name: "MarkSetType", Type: "string", Comment: `MarkSetType specifies the type of the markset. It can be "map" for in memory marking or "badger" (default) for on-disk marking.`, }, { Name: "HotStoreMessageRetention", Type: "uint64", Comment: `HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond the compaction boundary; default is 0.`, }, { Name: "HotStoreFullGCFrequency", Type: "uint64", Comment: `HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore. A value of 0 disables, while a value 1 will do full GC in every compaction. Default is 20 (about once a week).`, }, }, "StorageMiner": []DocField{ { Name: "Subsystems", Type: "MinerSubsystemConfig", Comment: ``, }, { Name: "Dealmaking", Type: "DealmakingConfig", Comment: ``, }, { Name: "IndexProvider", Type: "IndexProviderConfig", Comment: ``, }, { Name: "Proving", Type: "ProvingConfig", Comment: ``, }, { Name: "Sealing", Type: "SealingConfig", Comment: ``, }, { Name: "Storage", Type: "SealerConfig", Comment: ``, }, { Name: "Fees", Type: "MinerFeeConfig", Comment: ``, }, { Name: "Addresses", Type: "MinerAddressConfig", Comment: ``, }, { Name: "DAGStore", Type: "DAGStoreConfig", Comment: ``, }, }, "Wallet": []DocField{ { Name: "RemoteBackend", Type: "string", Comment: ``, }, { Name: "EnableLedger", Type: "bool", Comment: ``, }, { Name: "DisableLocal", Type: "bool", Comment: ``, }, }, }
node/config/doc_gen.go
0.583915
0.485722
doc_gen.go
starcoder
package internal import ( "log" "reflect" ) func ConformWithWarning(actual interface{}, expectation interface{}, shallwarn bool) bool { if IsArray(actual) && IsArray(expectation) { actualArr, _ := actual.([]interface{}) expectationArr, _ := expectation.([]interface{}) return ConformArray(actualArr, expectationArr, shallwarn) } if IsObject(actual) && IsObject(expectation) { actualObj, _ := actual.(map[string]interface{}) expectedObj, _ := expectation.(map[string]interface{}) return ConformObject(actualObj, expectedObj, shallwarn) } if IsPrimitive(actual) && IsPrimitive(expectation) { return actual == expectation } panic("UNKNOWN TYPE: Actual: " + reflect.TypeOf(actual).Name() + " Expected: " + reflect.TypeOf(expectation).Name()) } func Conform(actual interface{}, expectation interface{}) bool { if IsArray(actual) && IsArray(expectation) { actualArr, _ := actual.([]interface{}) expectationArr, _ := expectation.([]interface{}) return ConformArray(actualArr, expectationArr, true) } if IsObject(actual) && IsObject(expectation) { actualObj, _ := actual.(map[string]interface{}) expectedObj, _ := expectation.(map[string]interface{}) return ConformObject(actualObj, expectedObj, true) } if IsPrimitive(actual) && IsPrimitive(expectation) { return actual == expectation } panic("UNKNOWN TYPE: Actual: " + reflect.TypeOf(actual).Name() + " Expected: " + reflect.TypeOf(expectation).Name()) } func ConformArray(actual []interface{}, expected []interface{}, shallWarn bool) bool { expectedLen := len(expected) seen := make(map[int]bool) for i := 0; i < expectedLen; i++ { eValue := expected[i] if !findAnyConformObject(eValue, actual, seen) && seen[i] == false { seen[i] = true return false } } return true } // {name: "aa"} == {name: "bb"} func ConformObject(actual map[string]interface{}, expected map[string]interface{}, warn bool) bool { actualType := reflect.TypeOf(actual) typesEqual := actualType == reflect.TypeOf(expected) if !typesEqual { return false } for key := range expected { actualValue, hasActualKey := actual[key] expectedValue, hasExpectedKey := expected[key] if hasExpectedKey && !hasActualKey { if warn { log.Print("Key difference at key", key) } return false } if !HaveSameType(actualValue, expectedValue) { if warn { log.Print("Type difference at key ", key) } return false } if IsPrimitive(actualValue) && IsPrimitive(expectedValue) { if !EqualPrimitive(actualValue, expectedValue) { if warn { log.Print("Value difference at key: ", key) } return false } } if IsObject(actualValue) && IsObject(expectedValue) { actualChildObject, _ := actualValue.(map[string]interface{}) expectedChildObject, _ := expectedValue.(map[string]interface{}) if !ConformObject(actualChildObject, expectedChildObject, warn) { if warn { log.Print("Object difference at key ", key) } return false } } if IsArray(actualValue) && IsArray(expectedValue) { actualChildObject, _ := actualValue.([]interface{}) expectedChildObject, _ := expectedValue.([]interface{}) if !ConformArray(actualChildObject, expectedChildObject, warn) { if warn { log.Print("Array difference at key ", key) } return false } } } return true } func findAnyConformObject(wanted interface{}, actual []interface{}, seen map[int]bool) bool { for i := 0; i < len(actual); i++ { if ConformWithWarning(actual[i], wanted, false) && !seen[i] { seen[i] = true return true } } log.Print("No conformed object found") return false }
internal/conform.go
0.527073
0.570481
conform.go
starcoder
package grpc import ( "fmt" "strconv" log "github.com/sirupsen/logrus" "github.com/c9s/bbgo/pkg/bbgo" "github.com/c9s/bbgo/pkg/fixedpoint" "github.com/c9s/bbgo/pkg/pb" "github.com/c9s/bbgo/pkg/types" ) func toSubscriptions(sub *pb.Subscription) (types.Subscription, error) { switch sub.Channel { case pb.Channel_TRADE: return types.Subscription{ Symbol: sub.Symbol, Channel: types.MarketTradeChannel, }, nil case pb.Channel_BOOK: return types.Subscription{ Symbol: sub.Symbol, Channel: types.BookChannel, Options: types.SubscribeOptions{ Depth: types.Depth(sub.Depth), }, }, nil case pb.Channel_KLINE: return types.Subscription{ Symbol: sub.Symbol, Channel: types.KLineChannel, Options: types.SubscribeOptions{ Interval: sub.Interval, }, }, nil } return types.Subscription{}, fmt.Errorf("unsupported subscription channel: %s", sub.Channel) } func transPriceVolume(srcPvs types.PriceVolumeSlice) (pvs []*pb.PriceVolume) { for _, srcPv := range srcPvs { pvs = append(pvs, &pb.PriceVolume{ Price: srcPv.Price.String(), Volume: srcPv.Volume.String(), }) } return pvs } func transBook(session *bbgo.ExchangeSession, book types.SliceOrderBook, event pb.Event) *pb.MarketData { return &pb.MarketData{ Session: session.Name, Exchange: session.ExchangeName.String(), Symbol: book.Symbol, Channel: pb.Channel_BOOK, Event: event, Depth: &pb.Depth{ Exchange: session.ExchangeName.String(), Symbol: book.Symbol, Asks: transPriceVolume(book.Asks), Bids: transPriceVolume(book.Bids), }, } } func toOrderType(orderType pb.OrderType) types.OrderType { switch orderType { case pb.OrderType_MARKET: return types.OrderTypeMarket case pb.OrderType_LIMIT: return types.OrderTypeLimit } log.Warnf("unexpected order type: %v", orderType) return types.OrderTypeLimit } func toSide(side pb.Side) types.SideType { switch side { case pb.Side_BUY: return types.SideTypeBuy case pb.Side_SELL: return types.SideTypeSell } log.Warnf("unexpected side type: %v", side) return types.SideTypeBuy } func toSubmitOrders(pbOrders []*pb.SubmitOrder) (submitOrders []types.SubmitOrder) { for _, pbOrder := range pbOrders { submitOrders = append(submitOrders, types.SubmitOrder{ ClientOrderID: pbOrder.ClientOrderId, Symbol: pbOrder.Symbol, Side: toSide(pbOrder.Side), Type: toOrderType(pbOrder.OrderType), Price: fixedpoint.MustNewFromString(pbOrder.Price), Quantity: fixedpoint.MustNewFromString(pbOrder.Quantity), StopPrice: fixedpoint.MustNewFromString(pbOrder.StopPrice), TimeInForce: "", }) } return submitOrders } func transBalances(session *bbgo.ExchangeSession, balances types.BalanceMap) (pbBalances []*pb.Balance) { for _, b := range balances { pbBalances = append(pbBalances, &pb.Balance{ Exchange: session.ExchangeName.String(), Currency: b.Currency, Available: b.Available.String(), Locked: b.Locked.String(), }) } return pbBalances } func transTrade(session *bbgo.ExchangeSession, trade types.Trade) *pb.Trade { return &pb.Trade{ Session: session.Name, Exchange: trade.Exchange.String(), Symbol: trade.Symbol, Id: strconv.FormatUint(trade.ID, 10), Price: trade.Price.String(), Quantity: trade.Quantity.String(), CreatedAt: trade.Time.UnixMilli(), Side: transSide(trade.Side), FeeCurrency: trade.FeeCurrency, Fee: trade.Fee.String(), Maker: trade.IsMaker, } } func transMarketTrade(session *bbgo.ExchangeSession, marketTrade types.Trade) *pb.MarketData { return &pb.MarketData{ Session: session.Name, Exchange: session.ExchangeName.String(), Symbol: marketTrade.Symbol, Channel: pb.Channel_TRADE, Event: pb.Event_UPDATE, Trades: []*pb.Trade{ { Exchange: marketTrade.Exchange.String(), Symbol: marketTrade.Symbol, Id: strconv.FormatUint(marketTrade.ID, 10), Price: marketTrade.Price.String(), Quantity: marketTrade.Quantity.String(), CreatedAt: marketTrade.Time.UnixMilli(), Side: transSide(marketTrade.Side), FeeCurrency: marketTrade.FeeCurrency, Fee: marketTrade.Fee.String(), Maker: marketTrade.IsMaker, }, }, } } func transSide(side types.SideType) pb.Side { switch side { case types.SideTypeBuy: return pb.Side_BUY case types.SideTypeSell: return pb.Side_SELL } return pb.Side_SELL } func transOrderType(orderType types.OrderType) pb.OrderType { switch orderType { case types.OrderTypeLimit: return pb.OrderType_LIMIT case types.OrderTypeMarket: return pb.OrderType_MARKET case types.OrderTypeStopLimit: return pb.OrderType_STOP_LIMIT case types.OrderTypeStopMarket: return pb.OrderType_STOP_MARKET } return pb.OrderType_LIMIT } func transOrder(session *bbgo.ExchangeSession, order types.Order) *pb.Order { return &pb.Order{ Exchange: order.Exchange.String(), Symbol: order.Symbol, Id: strconv.FormatUint(order.OrderID, 10), Side: transSide(order.Side), OrderType: transOrderType(order.Type), Price: order.Price.String(), StopPrice: order.StopPrice.String(), Status: string(order.Status), CreatedAt: order.CreationTime.UnixMilli(), Quantity: order.Quantity.String(), ExecutedQuantity: order.ExecutedQuantity.String(), ClientOrderId: order.ClientOrderID, GroupId: int64(order.GroupID), } } func transKLine(session *bbgo.ExchangeSession, kline types.KLine) *pb.KLine { return &pb.KLine{ Session: session.Name, Exchange: kline.Exchange.String(), Symbol: kline.Symbol, Open: kline.Open.String(), High: kline.High.String(), Low: kline.Low.String(), Close: kline.Close.String(), Volume: kline.Volume.String(), QuoteVolume: kline.QuoteVolume.String(), StartTime: kline.StartTime.UnixMilli(), EndTime: kline.StartTime.UnixMilli(), Closed: kline.Closed, } } func transKLineResponse(session *bbgo.ExchangeSession, kline types.KLine) *pb.MarketData { return &pb.MarketData{ Session: session.Name, Exchange: kline.Exchange.String(), Symbol: kline.Symbol, Channel: pb.Channel_KLINE, Event: pb.Event_UPDATE, Kline: transKLine(session, kline), SubscribedAt: 0, } }
pkg/grpc/convert.go
0.599485
0.407098
convert.go
starcoder
Package planbuilder allows you to build execution plans that describe how to fulfill a query that may span multiple keyspaces or shards. The main entry points for this package are Build and BuildFromStmt. */ package planbuilder /* The main strategy of the planbuilder is to push down as much of the work as possible down to the vttablets. The special primitive for doing this is route, which can execute any SQL on a single shard (or scatter). Any work that cannot be done by a single route is stitched together by VTGate using relational primitives. If stitching is not possible using existing primitives, then an "unsupported" error is returned. If a query is split into multiple parts, like a cross-shard join, the latter parts may carry references to the former parts. If this happens, the primitive specifies how to build these cross-shard references as "join variables" that will essentially be sent in as bind vars during execution. For example: select ... from a join b on b.col = a.col will be executed as: select ... a.col from a (produce "a_col" from a.col) select ... from b where b.col = :a_col The central design element for analyzing queries and building plans is the symbol table (symtab). This data structure evolves as a query is analyzed. Therefore, searches are not repeatable. To resolve this, search results are persisted inside the ColName as 'Metadata', and reused as needed. The plan is built in two phases. In the first phase (break-up and push-down), the query is broken into smaller parts and pushed down into various primitives. In the second phase (wire-up), external references are wired up using bind vars, and the individual ASTs are converted into actual queries. In current architecture, VTGate does not know the underlying MySQL schema. Due to this, we assume that any qualified or implicit column reference of a table is valid and we rely on the underlying vttablet/MySQL to eventually validate such references. Every 'builder' primitive must satisfy the builder interface. This allows the planbuilder to outsource primitive-specific handling into those implementations. Variable naming: The AST, planbuilder and engine are three different worlds that use overloaded names that are contextually similar, but different. For example a join is: Join is the AST node that represents the SQL construct join is a builder in the current package Join is a primitive in the engine package In order to disambiguate, we'll use the 'a' prefix for AST vars, and the 'e' prefix for engine vars. So, 'ajoin' would be of type *sqlparser.Join, and 'ejoin' would be of type *engine.Join. For the planbuilder join we'll use 'jb'. */
go/vt/vtgate/planbuilder/doc.go
0.74512
0.776072
doc.go
starcoder
package gfx import "math" // SignedDistance holds 2D signed distance functions based on // https://iquilezles.org/www/articles/distfunctions2d/distfunctions2d.htm type SignedDistance struct { Vec } // SignedDistanceFunc is a func that takes a SignedDistance and returns a float64. type SignedDistanceFunc func(SignedDistance) float64 // CircleFunc creates a SignedDistanceFunc for a circle with the given radius. func (SignedDistance) CircleFunc(r float64) SignedDistanceFunc { return func(sd SignedDistance) float64 { return sd.Circle(r) } } // LineFunc cleates a SignedDistanceFunc for a line with the given start and end. func (SignedDistance) LineFunc(a, b Vec) SignedDistanceFunc { return func(sd SignedDistance) float64 { return sd.Line(a, b) } } // RectangleFunc creates a SignedDistanceFunc for a rectangle with the given size. func (SignedDistance) RectangleFunc(b Vec) SignedDistanceFunc { return func(sd SignedDistance) float64 { return sd.Rectangle(b) } } // RhombusFunc creates a SignedDistanceFunc for a rhombus with the given size. func (SignedDistance) RhombusFunc(b Vec) SignedDistanceFunc { return func(sd SignedDistance) float64 { return sd.Rhombus(b) } } // EquilateralTriangleFunc creates a SignedDistanceFunc for an equilateral triangle with the given size. func (SignedDistance) EquilateralTriangleFunc(s float64) SignedDistanceFunc { return func(sd SignedDistance) float64 { return sd.EquilateralTriangle(s) } } // IsoscelesTriangleFunc creates a SignedDistanceFunc for an isosceles triangle with the given size. func (SignedDistance) IsoscelesTriangleFunc(q Vec) SignedDistanceFunc { return func(sd SignedDistance) float64 { return sd.IsoscelesTriangle(q) } } // Circle primitive func (sd SignedDistance) Circle(r float64) float64 { return sd.Len() - r } // Line primitive func (sd SignedDistance) Line(a, b Vec) float64 { pa, ba := sd.Sub(a), b.Sub(a) c := Clamp(pa.Dot(ba)/ba.Dot(ba), 0.0, 1.0) return pa.Sub(ba.Scaled(c)).Len() } // Rectangle primitive func (sd SignedDistance) Rectangle(b Vec) float64 { d := sd.Abs().Sub(b) return d.Max(ZV).Len() + math.Min(math.Max(d.X, d.Y), 0) } // Rhombus primitive func (sd SignedDistance) Rhombus(b Vec) float64 { q := sd.Abs() x := (-2*q.Normal().Dot(b.Normal()) + b.Normal().Dot(b.Normal())) / b.Dot(b) h := Clamp(x, -1.0, 1.0) d := q.Sub(b.Scaled(0.5).ScaledXY(V(1.0-h, 1.0+h))).Len() return d * Sign(q.X*b.Y+q.Y*b.X-b.X*b.Y) } // EquilateralTriangle primitive func (sd SignedDistance) EquilateralTriangle(s float64) float64 { k := math.Sqrt(3) p := sd.Vec p.X = math.Abs(p.X) - s p.Y = p.Y + s/k if p.X+k*p.Y > 0.0 { p = V(p.X-k*p.Y, -k*p.X-p.Y).Scaled(0.5) } p.X -= Clamp(p.X, -2.0, 0.0) return -p.Len() * Sign(p.Y) } // IsoscelesTriangle primitive func (sd SignedDistance) IsoscelesTriangle(q Vec) float64 { p := sd.Vec p.X = math.Abs(p.X) a := p.Sub(q.Scaled(Clamp(p.Dot(q)/q.Dot(q), 0.0, 1.0))) b := p.Sub(q.ScaledXY(V(Clamp(p.X/q.X, 0.0, 1.0), 1.0))) s := -Sign(q.Y) d := V(a.Dot(a), s*(p.X*q.Y-p.Y*q.X)).Min(V(b.Dot(b), s*(p.Y-q.Y))) return -math.Sqrt(d.X) * Sign(d.Y) } // Rounded signed distance function shape func (sd SignedDistance) Rounded(v, r float64) float64 { return v - r } // Annular signed distance function shape func (sd SignedDistance) Annular(v, r float64) float64 { return math.Abs(v) - r } // OpUnion basic boolean operation for union. func (sd SignedDistance) OpUnion(x, y float64) float64 { return math.Min(x, y) } // OpSubtraction basic boolean operation for subtraction. func (sd SignedDistance) OpSubtraction(x, y float64) float64 { return math.Max(-x, y) } // OpIntersection basic boolean operation for intersection. func (sd SignedDistance) OpIntersection(x, y float64) float64 { return math.Max(x, y) } // OpSmoothUnion smooth operation for union. func (sd SignedDistance) OpSmoothUnion(x, y, k float64) float64 { h := Clamp(0.5+0.5*(y-x)/k, 0.0, 1.0) return Lerp(y, x, h) - k*h*(1.0-h) } // OpSmoothSubtraction smooth operation for subtraction. func (sd SignedDistance) OpSmoothSubtraction(x, y, k float64) float64 { h := Clamp(0.5-0.5*(y+x)/k, 0.0, 1.0) return Lerp(y, -x, h) + k*h*(1.0-h) } // OpSmoothIntersection smooth operation for intersection. func (sd SignedDistance) OpSmoothIntersection(x, y, k float64) float64 { h := Clamp(0.5-0.5*(y-x)/k, 0.0, 1.0) return Lerp(y, x, h) + k*h*(1.0-h) } // OpSymX symmetry operation for X. func (sd SignedDistance) OpSymX(sdf SignedDistanceFunc) float64 { sd.X = math.Abs(sd.X) return sdf(sd) } // OpSymY symmetry operation for Y. func (sd SignedDistance) OpSymY(sdf SignedDistanceFunc) float64 { sd.Y = math.Abs(sd.Y) return sdf(sd) } // OpSymXY symmetry operation for X and Y. func (sd SignedDistance) OpSymXY(sdf SignedDistanceFunc) float64 { sd.X = math.Abs(sd.X) sd.Y = math.Abs(sd.Y) return sdf(sd) } // OpRepeat repeats based on the given c vector. func (sd SignedDistance) OpRepeat(c Vec, sdf SignedDistanceFunc) float64 { q := sd.Mod(c).Sub(c.Scaled(0.5)) return sdf(SignedDistance{q}) } // OpMoved moves result of sdf by the given delta. // (Relative to the identity matrix) func (sd SignedDistance) OpMoved(d Vec, sdf SignedDistanceFunc) float64 { return sd.OpTx(IM.Moved(d), sdf) } // OpTx translates using the given matrix. func (sd SignedDistance) OpTx(t Matrix, sdf SignedDistanceFunc) float64 { return sdf(SignedDistance{t.Unproject(sd.Vec)}) }
vendor/github.com/peterhellberg/gfx/signed_distance.go
0.914262
0.759894
signed_distance.go
starcoder
package date import ( "regexp" "strings" "time" ) // PatternTranslation are the map of regexs in different languages var PatternTranslation = map[string]PatternTranslations{ "en": { DateRegex: `(of )?(the )?((after )?tomorrow|((today|tonight)|(next )?(monday|tuesday|wednesday|thursday|friday|saturday|sunday))|(\d{2}|\d)(th|rd|st|nd)? (of )?(january|february|march|april|may|june|july|august|september|october|november|december)|((\d{2}|\d)/(\d{2}|\d)))`, TimeRegex: `(at )?(\d{2}|\d)(:\d{2}|\d)?( )?(pm|am|p\.m|a\.m)`, }, "de": { DateRegex: `(von )?(das )?((nach )?morgen|((heute|abends)|(nächsten )?(montag|dienstag|mittwoch|donnerstag|freitag|samstag|sonntag))|(\d{2}|\d)(th|rd|st|nd)? (of )?(januar|februar|märz|april|mai|juli|juli|august|september|oktober|november|dezember)|((\d{2}|\d)/(\d{2}|\d)))`, TimeRegex: `(um )?(\d{2}|\d)(:\d{2}|\d)?( )?(pm|am|p\.m|a\.m)`, }, "fr": { DateRegex: `(le )?(après )?demain|((aujourd'hui'|ce soir)|(lundi|mardi|mecredi|jeudi|vendredi|samedi|dimanche)( prochain)?|(\d{2}|\d) (janvier|février|mars|avril|mai|juin|juillet|août|septembre|octobre|novembre|décembre)|((\d{2}|\d)/(\d{2}|\d)))`, TimeRegex: `(à )?(\d{2}|\d)(:\d{2}|\d)?( )?(pm|am|p\.m|a\.m)`, }, "es": { DateRegex: `(el )?((pasado )?mañana|((hoy|esta noche)|(el )?(proximo )?(lunes|martes|miercoles|jueves|viernes|sabado|domingo))|(\d{2}|\d) (de )?(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre)|((\d{2}|\d)/(\d{2}|\d)))`, TimeRegex: `(a )?(las )?(\d{2}|\d)(:\d{2}|\d)?( )?(de )?(la )?(pm|am|p\.m|a\.m|tarde|mañana)`, }, "ca": { DateRegex: `(el )?((després )?(de )?demà|((avui|aquesta nit)|(el )?(proper )?(dilluns|dimarts|dimecres|dijous|divendres|dissabte|diumenge))|(\d{2}|\d) (de )?(gener|febrer|març|abril|maig|juny|juliol|agost|setembre|octubre|novembre|desembre)|((\d{2}|\d)/(\d{2}|\d)))`, TimeRegex: `(a )?(les )?(\d{2}|\d)(:\d{2}|\d)?( )?(pm|am|p\.m|a\.m)`, }, "nl": { DateRegex: `(van )?(de )?((na )?morgen|((vandaag|vanavond)|(volgende )?(maandag|dinsdag|woensdag|donderdag|vrijdag|zaterdag|zondag))|(\d{2}|\d)(te|de)? (vab )?(januari|februari|maart|april|mei|juni|juli|augustus|september|oktober|november|december)|((\d{2}|\d)/(\d{2}|\d)))`, TimeRegex: `(om )?(\d{2}|\d)(:\d{2}|\d)?( )?(pm|am|p\.m|a\.m)`, }, } // PatternTranslations are the translations of the regexs for dates type PatternTranslations struct { DateRegex string TimeRegex string } // SearchTime returns the found date in the given sentence and the sentence without the date, if no date has // been found, it returns an empty date and the given sentence. func SearchTime(locale, sentence string) (string, time.Time) { _time := RuleTime(sentence) // Set the time to 12am if no time has been found if _time == (time.Time{}) { _time = time.Date(0, 0, 0, 12, 0, 0, 0, time.UTC) } for _, rule := range rules { date := rule(locale, sentence) // If the current rule found a date if date != (time.Time{}) { date = time.Date(date.Year(), date.Month(), date.Day(), _time.Hour(), _time.Minute(), 0, 0, time.UTC) sentence = DeleteTimes(locale, sentence) return DeleteDates(locale, sentence), date } } return sentence, time.Now().Add(time.Hour * 24) } // DeleteDates removes the dates of the given sentence and returns it func DeleteDates(locale, sentence string) string { // Create a regex to match the patterns of dates to remove them. datePatterns := regexp.MustCompile(PatternTranslation[locale].DateRegex) // Replace the dates by empty string sentence = datePatterns.ReplaceAllString(sentence, "") // Trim the spaces and return return strings.TrimSpace(sentence) } // DeleteTimes removes the times of the given sentence and returns it func DeleteTimes(locale, sentence string) string { // Create a regex to match the patterns of times to remove them. timePatterns := regexp.MustCompile(PatternTranslation[locale].TimeRegex) // Replace the times by empty string sentence = timePatterns.ReplaceAllString(sentence, "") // Trim the spaces and return return strings.TrimSpace(sentence) }
language/date/date.go
0.595493
0.5816
date.go
starcoder
package chip8 import ( termbox "github.com/nsf/termbox-go" ) const ( GraphicsWidth = 64 // Pixels GraphicsHeight = 32 // Pixels ) // Display represents the output display for the CHIP-8 graphics array. type Display interface { // Render should render the current graphics array to the display. Render(*Graphics) error } type DisplayFunc func(*Graphics) error func (f DisplayFunc) Render(g *Graphics) error { return f(g) } // NullDisplay is an implementation of the Display interface that does nothing. var NullDisplay = DisplayFunc(func(*Graphics) error { return nil }) // Graphics represents the graphics array for the CHIP-8. type Graphics struct { // The raw pixels of the graphics array. Pixels [GraphicsWidth * GraphicsHeight]byte // The display to render to. The nil value is the DefaultDisplay. Display } // DrawSprite draws a sprite to the graphics array starting at coording x, y. // If there is a collision, WriteSprite returns true. func (g *Graphics) WriteSprite(sprite []byte, x, y byte) (collision bool) { n := len(sprite) for yl := 0; yl < n; yl++ { // A row of sprite data. r := sprite[yl] for xl := 0; xl < 8; xl++ { // This represents a mask for the bit that we // care about for this coordinate. i := 0x80 >> byte(xl) // Whether the bit is set or not. on := (r & byte(i)) == byte(i) // The X position for this pixel xp := uint16(x) + uint16(xl) for xp >= GraphicsWidth { xp = xp - GraphicsWidth } // The Y position for this pixel yp := uint16(y) + uint16(yl) for yp >= GraphicsHeight { yp = yp - GraphicsHeight } if g.Set(xp, yp, on) { collision = true } } } return } // Clear clears the display. func (g *Graphics) Clear() { g.EachPixel(func(_, _ uint16, addr int) { g.Pixels[addr] = 0 }) } // Draw draws the graphics array to the Display. func (g *Graphics) Draw() error { return g.display().Render(g) } // EachPixel yields each pixel in the graphics array to fn. func (g *Graphics) EachPixel(fn func(x, y uint16, addr int)) { for y := 0; y < GraphicsHeight-1; y++ { for x := 0; x < GraphicsWidth-1; x++ { a := y*GraphicsWidth + x fn(uint16(x), uint16(y), a) } } } // Set turns the pixel at the given coordinates on or off. If there's a // collision, it returns true. func (g *Graphics) Set(x, y uint16, on bool) (collision bool) { a := x + y*GraphicsWidth if g.Pixels[a] == 0x01 { collision = true } var v byte if on { v = 0x01 } g.Pixels[a] = g.Pixels[a] ^ v return } func (g *Graphics) display() Display { if g.Display == nil { return DefaultDisplay } return g.Display } // termboxInit initializes termbox with appropriate settings. This should be // called before using the TermboxDisplay and TermboxKeypad. func termboxInit(bg termbox.Attribute) error { if err := termbox.Init(); err != nil { return err } termbox.HideCursor() if err := termbox.Clear(bg, bg); err != nil { return err } return termbox.Flush() } // TermboxDisplay is an implementation of the Display interface that renders // the graphics array to the terminal. type TermboxDisplay struct { fg, bg termbox.Attribute } // NewTermboxDisplay returns a new TermboxDisplay instance. func NewTermboxDisplay(fg, bg termbox.Attribute) (*TermboxDisplay, error) { return &TermboxDisplay{ fg: fg, bg: bg, }, termboxInit(bg) } // Render renders the graphics array to the terminal using Termbox. func (d *TermboxDisplay) Render(g *Graphics) error { g.EachPixel(func(x, y uint16, addr int) { v := ' ' if g.Pixels[addr] == 0x01 { v = '█' } termbox.SetCell( int(x), int(y), v, d.fg, d.bg, ) }) return termbox.Flush() } func (d *TermboxDisplay) Close() { termbox.Close() }
graphics.go
0.835651
0.479991
graphics.go
starcoder
package plaid import ( "encoding/json" ) // NetPay An object representing information about the net pay amount on the paystub. type NetPay struct { // Raw amount of the net pay for the pay period CurrentAmount NullableFloat32 `json:"current_amount,omitempty"` // Description of the net pay Description NullableString `json:"description,omitempty"` // The ISO-4217 currency code of the net pay. Always `null` if `unofficial_currency_code` is non-null. IsoCurrencyCode NullableString `json:"iso_currency_code,omitempty"` // The unofficial currency code associated with the net pay. Always `null` if `iso_currency_code` is non-`null`. Unofficial currency codes are used for currencies that do not have official ISO currency codes, such as cryptocurrencies and the currencies of certain countries. See the [currency code schema](https://plaid.com/docs/api/accounts#currency-code-schema) for a full listing of supported `iso_currency_code`s. UnofficialCurrencyCode NullableString `json:"unofficial_currency_code,omitempty"` // The year-to-date amount of the net pay YtdAmount NullableFloat32 `json:"ytd_amount,omitempty"` Total *Total `json:"total,omitempty"` AdditionalProperties map[string]interface{} } type _NetPay NetPay // NewNetPay instantiates a new NetPay object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewNetPay() *NetPay { this := NetPay{} return &this } // NewNetPayWithDefaults instantiates a new NetPay object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewNetPayWithDefaults() *NetPay { this := NetPay{} return &this } // GetCurrentAmount returns the CurrentAmount field value if set, zero value otherwise (both if not set or set to explicit null). func (o *NetPay) GetCurrentAmount() float32 { if o == nil || o.CurrentAmount.Get() == nil { var ret float32 return ret } return *o.CurrentAmount.Get() } // GetCurrentAmountOk returns a tuple with the CurrentAmount field value if set, nil otherwise // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned func (o *NetPay) GetCurrentAmountOk() (*float32, bool) { if o == nil { return nil, false } return o.CurrentAmount.Get(), o.CurrentAmount.IsSet() } // HasCurrentAmount returns a boolean if a field has been set. func (o *NetPay) HasCurrentAmount() bool { if o != nil && o.CurrentAmount.IsSet() { return true } return false } // SetCurrentAmount gets a reference to the given NullableFloat32 and assigns it to the CurrentAmount field. func (o *NetPay) SetCurrentAmount(v float32) { o.CurrentAmount.Set(&v) } // SetCurrentAmountNil sets the value for CurrentAmount to be an explicit nil func (o *NetPay) SetCurrentAmountNil() { o.CurrentAmount.Set(nil) } // UnsetCurrentAmount ensures that no value is present for CurrentAmount, not even an explicit nil func (o *NetPay) UnsetCurrentAmount() { o.CurrentAmount.Unset() } // GetDescription returns the Description field value if set, zero value otherwise (both if not set or set to explicit null). func (o *NetPay) GetDescription() string { if o == nil || o.Description.Get() == nil { var ret string return ret } return *o.Description.Get() } // GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned func (o *NetPay) GetDescriptionOk() (*string, bool) { if o == nil { return nil, false } return o.Description.Get(), o.Description.IsSet() } // HasDescription returns a boolean if a field has been set. func (o *NetPay) HasDescription() bool { if o != nil && o.Description.IsSet() { return true } return false } // SetDescription gets a reference to the given NullableString and assigns it to the Description field. func (o *NetPay) SetDescription(v string) { o.Description.Set(&v) } // SetDescriptionNil sets the value for Description to be an explicit nil func (o *NetPay) SetDescriptionNil() { o.Description.Set(nil) } // UnsetDescription ensures that no value is present for Description, not even an explicit nil func (o *NetPay) UnsetDescription() { o.Description.Unset() } // GetIsoCurrencyCode returns the IsoCurrencyCode field value if set, zero value otherwise (both if not set or set to explicit null). func (o *NetPay) GetIsoCurrencyCode() string { if o == nil || o.IsoCurrencyCode.Get() == nil { var ret string return ret } return *o.IsoCurrencyCode.Get() } // GetIsoCurrencyCodeOk returns a tuple with the IsoCurrencyCode field value if set, nil otherwise // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned func (o *NetPay) GetIsoCurrencyCodeOk() (*string, bool) { if o == nil { return nil, false } return o.IsoCurrencyCode.Get(), o.IsoCurrencyCode.IsSet() } // HasIsoCurrencyCode returns a boolean if a field has been set. func (o *NetPay) HasIsoCurrencyCode() bool { if o != nil && o.IsoCurrencyCode.IsSet() { return true } return false } // SetIsoCurrencyCode gets a reference to the given NullableString and assigns it to the IsoCurrencyCode field. func (o *NetPay) SetIsoCurrencyCode(v string) { o.IsoCurrencyCode.Set(&v) } // SetIsoCurrencyCodeNil sets the value for IsoCurrencyCode to be an explicit nil func (o *NetPay) SetIsoCurrencyCodeNil() { o.IsoCurrencyCode.Set(nil) } // UnsetIsoCurrencyCode ensures that no value is present for IsoCurrencyCode, not even an explicit nil func (o *NetPay) UnsetIsoCurrencyCode() { o.IsoCurrencyCode.Unset() } // GetUnofficialCurrencyCode returns the UnofficialCurrencyCode field value if set, zero value otherwise (both if not set or set to explicit null). func (o *NetPay) GetUnofficialCurrencyCode() string { if o == nil || o.UnofficialCurrencyCode.Get() == nil { var ret string return ret } return *o.UnofficialCurrencyCode.Get() } // GetUnofficialCurrencyCodeOk returns a tuple with the UnofficialCurrencyCode field value if set, nil otherwise // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned func (o *NetPay) GetUnofficialCurrencyCodeOk() (*string, bool) { if o == nil { return nil, false } return o.UnofficialCurrencyCode.Get(), o.UnofficialCurrencyCode.IsSet() } // HasUnofficialCurrencyCode returns a boolean if a field has been set. func (o *NetPay) HasUnofficialCurrencyCode() bool { if o != nil && o.UnofficialCurrencyCode.IsSet() { return true } return false } // SetUnofficialCurrencyCode gets a reference to the given NullableString and assigns it to the UnofficialCurrencyCode field. func (o *NetPay) SetUnofficialCurrencyCode(v string) { o.UnofficialCurrencyCode.Set(&v) } // SetUnofficialCurrencyCodeNil sets the value for UnofficialCurrencyCode to be an explicit nil func (o *NetPay) SetUnofficialCurrencyCodeNil() { o.UnofficialCurrencyCode.Set(nil) } // UnsetUnofficialCurrencyCode ensures that no value is present for UnofficialCurrencyCode, not even an explicit nil func (o *NetPay) UnsetUnofficialCurrencyCode() { o.UnofficialCurrencyCode.Unset() } // GetYtdAmount returns the YtdAmount field value if set, zero value otherwise (both if not set or set to explicit null). func (o *NetPay) GetYtdAmount() float32 { if o == nil || o.YtdAmount.Get() == nil { var ret float32 return ret } return *o.YtdAmount.Get() } // GetYtdAmountOk returns a tuple with the YtdAmount field value if set, nil otherwise // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned func (o *NetPay) GetYtdAmountOk() (*float32, bool) { if o == nil { return nil, false } return o.YtdAmount.Get(), o.YtdAmount.IsSet() } // HasYtdAmount returns a boolean if a field has been set. func (o *NetPay) HasYtdAmount() bool { if o != nil && o.YtdAmount.IsSet() { return true } return false } // SetYtdAmount gets a reference to the given NullableFloat32 and assigns it to the YtdAmount field. func (o *NetPay) SetYtdAmount(v float32) { o.YtdAmount.Set(&v) } // SetYtdAmountNil sets the value for YtdAmount to be an explicit nil func (o *NetPay) SetYtdAmountNil() { o.YtdAmount.Set(nil) } // UnsetYtdAmount ensures that no value is present for YtdAmount, not even an explicit nil func (o *NetPay) UnsetYtdAmount() { o.YtdAmount.Unset() } // GetTotal returns the Total field value if set, zero value otherwise. func (o *NetPay) GetTotal() Total { if o == nil || o.Total == nil { var ret Total return ret } return *o.Total } // GetTotalOk returns a tuple with the Total field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *NetPay) GetTotalOk() (*Total, bool) { if o == nil || o.Total == nil { return nil, false } return o.Total, true } // HasTotal returns a boolean if a field has been set. func (o *NetPay) HasTotal() bool { if o != nil && o.Total != nil { return true } return false } // SetTotal gets a reference to the given Total and assigns it to the Total field. func (o *NetPay) SetTotal(v Total) { o.Total = &v } func (o NetPay) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.CurrentAmount.IsSet() { toSerialize["current_amount"] = o.CurrentAmount.Get() } if o.Description.IsSet() { toSerialize["description"] = o.Description.Get() } if o.IsoCurrencyCode.IsSet() { toSerialize["iso_currency_code"] = o.IsoCurrencyCode.Get() } if o.UnofficialCurrencyCode.IsSet() { toSerialize["unofficial_currency_code"] = o.UnofficialCurrencyCode.Get() } if o.YtdAmount.IsSet() { toSerialize["ytd_amount"] = o.YtdAmount.Get() } if o.Total != nil { toSerialize["total"] = o.Total } for key, value := range o.AdditionalProperties { toSerialize[key] = value } return json.Marshal(toSerialize) } func (o *NetPay) UnmarshalJSON(bytes []byte) (err error) { varNetPay := _NetPay{} if err = json.Unmarshal(bytes, &varNetPay); err == nil { *o = NetPay(varNetPay) } additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(bytes, &additionalProperties); err == nil { delete(additionalProperties, "current_amount") delete(additionalProperties, "description") delete(additionalProperties, "iso_currency_code") delete(additionalProperties, "unofficial_currency_code") delete(additionalProperties, "ytd_amount") delete(additionalProperties, "total") o.AdditionalProperties = additionalProperties } return err } type NullableNetPay struct { value *NetPay isSet bool } func (v NullableNetPay) Get() *NetPay { return v.value } func (v *NullableNetPay) Set(val *NetPay) { v.value = val v.isSet = true } func (v NullableNetPay) IsSet() bool { return v.isSet } func (v *NullableNetPay) Unset() { v.value = nil v.isSet = false } func NewNullableNetPay(val *NetPay) *NullableNetPay { return &NullableNetPay{value: val, isSet: true} } func (v NullableNetPay) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableNetPay) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
plaid/model_net_pay.go
0.837055
0.470128
model_net_pay.go
starcoder
package blockchain import ( "crypto/sha256" "errors" "fmt" "log" ) type ( // Chain is an array of Blocks Chain struct { Links []Block } ) // GetBlock returns a block by the index func (c Chain) GetBlock(index int) Block { return c.Links[index] } // GetLatestBlock returns the newest block func (c Chain) GetLatestBlock() Block { if len(c.Links) > 0 { return c.Links[len(c.Links)-1] } return c.Links[0] } // GetBlockLength returns the block length func (c Chain) GetBlockLength() int { return len(c.Links) } // CreateHash returns a hash for a given string func (c Chain) CreateHash(hashString string) string { h := sha256.New() h.Write([]byte(hashString)) return fmt.Sprintf("%x", h.Sum(nil)) } // CalculateHashForBlock returns a hash for a given block func (c Chain) CalculateHashForBlock(b Block) string { return c.CreateHash(b.GetHashableString()) } // IsValidBlock compares 2 blocks func (c Chain) IsValidBlock(b1 Block, b2 Block) (bool, error) { if b2.Index == 0 { return true, nil } if b1.PreviousHash != b2.Hash { log.Printf("block 1: %s and block 2: %s", b1.PreviousHash, b2.Hash) return false, errors.New("invalid previoushash") } else if c.CalculateHashForBlock(b1) != b1.Hash { return false, fmt.Errorf("invalid hash: %s %s", c.CalculateHashForBlock(b1), b1.Hash) } return true, nil } // IsValidNewBlock checks that the block being added is valid func (c Chain) IsValidNewBlock(b1 Block, b2 Block) (bool, error) { if (b1.Index <= b2.Index) || (b1.Index != b2.Index+1) { return false, errors.New("invalid index") } return c.IsValidBlock(b1, b2) } // CheckValidity returns the status for the chain func (c Chain) CheckValidity() (bool, error) { for _, b := range c.Links { // we made it to the end if b.Index-1 <= 0 { return true, nil } pass, err := c.IsValidBlock(b, c.GetBlock(b.Index-1)) if err != nil { return false, err } if !pass { return false, fmt.Errorf("invalid block at %d with hash %s", b.Index, b.Hash) } } return true, nil } // AddBlock adds a new block to the chain func (c *Chain) AddBlock(b Block) (Block, error) { pass, err := c.IsValidNewBlock(b, c.GetLatestBlock()) if err != nil { return b, err } if pass { // c.Links[b.Index] = b c.Links = append(c.Links, b) return b, nil } return b, errors.New("failed to add new block") } // CreateNewBlock adds a block to the chain given some data func (c *Chain) CreateNewBlock(data string) (Block, error) { newBlock := c.GetLatestBlock().GenerateChild(data) return c.AddBlock(newBlock) }
chain.go
0.83152
0.488405
chain.go
starcoder
package ln import "math" type Cylinder struct { Radius float64 Z0, Z1 float64 } func NewCylinder(radius, z0, z1 float64) *Cylinder { return &Cylinder{radius, z0, z1} } func (c *Cylinder) Compile() { } func (c *Cylinder) BoundingBox() Box { r := c.Radius return Box{Vector{-r, -r, c.Z0}, Vector{r, r, c.Z1}} } func (c *Cylinder) Contains(v Vector, f float64) bool { xy := Vector{v.X, v.Y, 0} if xy.Length() > c.Radius+f { return false } return v.Z >= c.Z0-f && v.Z <= c.Z1+f } func (shape *Cylinder) Intersect(ray Ray) Hit { r := shape.Radius o := ray.Origin d := ray.Direction a := d.X*d.X + d.Y*d.Y b := 2*o.X*d.X + 2*o.Y*d.Y c := o.X*o.X + o.Y*o.Y - r*r q := b*b - 4*a*c if q < 0 { return NoHit } s := math.Sqrt(q) t := 1e30 t0 := (-b + s) / (2 * a) if 1e-6 < t0 && t0 < t { z := o.Z + t0*d.Z if shape.Z0 < z && z < shape.Z1 { t = t0 } } t1 := (-b - s) / (2 * a) if 1e-6 < t1 && t1 < t { z := o.Z + t1*d.Z if shape.Z0 < z && z < shape.Z1 { t = t1 } } if d.Z != 0 { t3 := (shape.Z0 - o.Z) / d.Z if 1e-6 < t3 && t3 < t { z := o.Add(d.MulScalar(t3)) z.Z = 0 if z.LengthSquared() < r * r { t = t3 } } t4 := (shape.Z1 - o.Z) / d.Z if 1e-6 < t4 && t4 < t { z := o.Add(d.MulScalar(t4)) z.Z = 0 if z.LengthSquared() < r * r { t = t4 } } } if t < 1e30 { return Hit{shape, t} } else { return NoHit } } func (c *Cylinder) Paths() Paths { var result Paths for a := 0; a < 360; a += 10 { x := c.Radius * math.Cos(Radians(float64(a))) y := c.Radius * math.Sin(Radians(float64(a))) result = append(result, Path{{x, y, c.Z0}, {x, y, c.Z1}}) } return result } type OutlineCylinder struct { Cylinder Eye Vector Up Vector } func NewOutlineCylinder(eye, up Vector, radius, z0, z1 float64) *OutlineCylinder { cylinder := NewCylinder(radius, z0, z1) return &OutlineCylinder{*cylinder, eye, up} } func (c *OutlineCylinder) Paths() Paths { center := Vector{0, 0, c.Z0} hyp := center.Sub(c.Eye).Length() opp := c.Radius theta := math.Asin(opp / hyp) adj := opp / math.Tan(theta) d := math.Cos(theta) * adj // r := math.Sin(theta) * adj w := center.Sub(c.Eye).Normalize() u := w.Cross(c.Up).Normalize() c0 := c.Eye.Add(w.MulScalar(d)) a0 := c0.Add(u.MulScalar(c.Radius * 1.01)) b0 := c0.Add(u.MulScalar(-c.Radius * 1.01)) center = Vector{0, 0, c.Z1} hyp = center.Sub(c.Eye).Length() opp = c.Radius theta = math.Asin(opp / hyp) adj = opp / math.Tan(theta) d = math.Cos(theta) * adj // r = math.Sin(theta) * adj w = center.Sub(c.Eye).Normalize() u = w.Cross(c.Up).Normalize() c1 := c.Eye.Add(w.MulScalar(d)) a1 := c1.Add(u.MulScalar(c.Radius * 1.01)) b1 := c1.Add(u.MulScalar(-c.Radius * 1.01)) var p0, p1 Path for a := 0; a < 360; a++ { x := c.Radius * math.Cos(Radians(float64(a))) y := c.Radius * math.Sin(Radians(float64(a))) p0 = append(p0, Vector{x, y, c.Z0}) p1 = append(p1, Vector{x, y, c.Z1}) } return Paths{ p0, p1, {{a0.X, a0.Y, c.Z0}, {a1.X, a1.Y, c.Z1}}, {{b0.X, b0.Y, c.Z0}, {b1.X, b1.Y, c.Z1}}, } } func NewTransformedOutlineCylinder(eye, up, v0, v1 Vector, radius float64) Shape { d := v1.Sub(v0) z := d.Length() a := math.Acos(d.Normalize().Dot(up)) m := Translate(v0) if a != 0 { u := d.Cross(up).Normalize() m = Rotate(u, a).Translate(v0) } c := NewOutlineCylinder(m.Inverse().MulPosition(eye), up, radius, 0, z) return NewTransformedShape(c, m) }
ln/cylinder.go
0.788257
0.473536
cylinder.go
starcoder
package stats import ( "runtime" "sync" "sync/atomic" hist "github.com/samaritan-proxy/circonusllhist" ) var ( defaultSupportedQuantiles = []float64{0.0, 0.25, 0.5, 0.9, 0.95, 0.99, 1.0} ) // HistogramStatistics holds the computed statistic of a histogram. type HistogramStatistics struct { *hist.Histogram mu sync.RWMutex // supported quantiles qs []float64 // computed quantile values qvals []float64 } func newHistogramStatistics(h *hist.Histogram) *HistogramStatistics { qs := defaultSupportedQuantiles qvals, _ := h.ApproxQuantile(qs) return &HistogramStatistics{ Histogram: h, qs: qs, qvals: qvals, } } // SupportedQuantiles returns the supported quantiles. func (hs *HistogramStatistics) SupportedQuantiles() []float64 { return hs.qs } // ComputedQuantiles returns the computed quantile values during the period. func (hs *HistogramStatistics) ComputedQuantiles() []float64 { if len(hs.qvals) == 0 { return make([]float64, len(hs.qs)) } return hs.qvals } // A Histogram records values one at a time. type Histogram struct { metric store *Store sampleCount uint64 rawHistCount uint64 rawHists []*hist.Histogram itlHist *hist.Histogram // interval hist cumHist *hist.Histogram // cumulative hist } func NewHistogram(store *Store, name, tagExtractedName string, tags []*Tag) *Histogram { h := &Histogram{ store: store, metric: newMetric(name, tagExtractedName, tags), itlHist: hist.NewNoLocks(), cumHist: hist.New(), rawHistCount: uint64(runtime.GOMAXPROCS(0)), } h.rawHists = make([]*hist.Histogram, h.rawHistCount) for i := uint64(0); i < h.rawHistCount; i++ { h.rawHists[i] = hist.New() } return h } // Record records a value to the Histogram. func (h *Histogram) Record(val uint64) { rawHist := h.rawHists[atomic.AddUint64(&h.sampleCount, 1)%h.rawHistCount] rawHist.RecordIntScale(int64(val), 0) if h.store != nil { h.store.deliverHistogramSampleToSinks(h, val) } h.markUsed() } func (h *Histogram) refreshIntervalStatistic() { // merge all raw hists merged := hist.NewNoLocks() for _, rawHist := range h.rawHists { merged.Merge(rawHist) } h.itlHist = merged.Copy() h.cumHist.Merge(merged) } // IntervalStatistics returns the interval statistics of Histogram. func (h *Histogram) IntervalStatistics() *HistogramStatistics { return newHistogramStatistics(h.itlHist) }
vendor/github.com/kirk91/stats/histogram.go
0.777975
0.447823
histogram.go
starcoder
package gorm import ( "context" "fmt" "reflect" "strings" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/protoc-gen-go/generator" "github.com/infobloxopen/atlas-app-toolkit/query" "github.com/infobloxopen/atlas-app-toolkit/rpc/resource" ) // FilterStringToGorm is a shortcut to parse a filter string using default FilteringParser implementation // and call FilteringToGorm on the returned filtering expression. func FilterStringToGorm(ctx context.Context, filter string, obj interface{}, pb proto.Message) (string, []interface{}, map[string]struct{}, error) { f, err := query.ParseFiltering(filter) if err != nil { return "", nil, nil, err } return FilteringToGorm(ctx, f, obj, pb) } // FilteringToGorm returns GORM Plain SQL representation of the filtering expression. func FilteringToGorm(ctx context.Context, m *query.Filtering, obj interface{}, pb proto.Message) (string, []interface{}, map[string]struct{}, error) { if m == nil || m.Root == nil { return "", nil, nil, nil } switch r := m.Root.(type) { case *query.Filtering_Operator: return LogicalOperatorToGorm(ctx, r.Operator, obj, pb) case *query.Filtering_StringCondition: return StringConditionToGorm(ctx, r.StringCondition, obj, pb) case *query.Filtering_NumberCondition: return NumberConditionToGorm(ctx, r.NumberCondition, obj, pb) case *query.Filtering_NullCondition: return NullConditionToGorm(ctx, r.NullCondition, obj, pb) case *query.Filtering_NumberArrayCondition: return NumberArrayConditionToGorm(ctx, r.NumberArrayCondition, obj, pb) case *query.Filtering_StringArrayCondition: return StringArrayConditionToGorm(ctx, r.StringArrayCondition, obj, pb) default: return "", nil, nil, fmt.Errorf("%T type is not supported in Filtering", r) } } // LogicalOperatorToGorm returns GORM Plain SQL representation of the logical operator. func LogicalOperatorToGorm(ctx context.Context, lop *query.LogicalOperator, obj interface{}, pb proto.Message) (string, []interface{}, map[string]struct{}, error) { var lres string var largs []interface{} var lAssocToJoin map[string]struct{} var err error switch l := lop.Left.(type) { case *query.LogicalOperator_LeftOperator: lres, largs, lAssocToJoin, err = LogicalOperatorToGorm(ctx, l.LeftOperator, obj, pb) case *query.LogicalOperator_LeftStringCondition: lres, largs, lAssocToJoin, err = StringConditionToGorm(ctx, l.LeftStringCondition, obj, pb) case *query.LogicalOperator_LeftNumberCondition: lres, largs, lAssocToJoin, err = NumberConditionToGorm(ctx, l.LeftNumberCondition, obj, pb) case *query.LogicalOperator_LeftNullCondition: lres, largs, lAssocToJoin, err = NullConditionToGorm(ctx, l.LeftNullCondition, obj, pb) case *query.LogicalOperator_LeftNumberArrayCondition: lres, largs, lAssocToJoin, err = NumberArrayConditionToGorm(ctx, l.LeftNumberArrayCondition, obj, pb) case *query.LogicalOperator_LeftStringArrayCondition: lres, largs, lAssocToJoin, err = StringArrayConditionToGorm(ctx, l.LeftStringArrayCondition, obj, pb) default: return "", nil, nil, fmt.Errorf("%T type is not supported in Filtering", l) } if err != nil { return "", nil, nil, err } var rres string var rargs []interface{} var rAssocToJoin map[string]struct{} switch r := lop.Right.(type) { case *query.LogicalOperator_RightOperator: rres, rargs, rAssocToJoin, err = LogicalOperatorToGorm(ctx, r.RightOperator, obj, pb) case *query.LogicalOperator_RightStringCondition: rres, rargs, rAssocToJoin, err = StringConditionToGorm(ctx, r.RightStringCondition, obj, pb) case *query.LogicalOperator_RightNumberCondition: rres, rargs, rAssocToJoin, err = NumberConditionToGorm(ctx, r.RightNumberCondition, obj, pb) case *query.LogicalOperator_RightNullCondition: rres, rargs, rAssocToJoin, err = NullConditionToGorm(ctx, r.RightNullCondition, obj, pb) case *query.LogicalOperator_RightNumberArrayCondition: rres, rargs, rAssocToJoin, err = NumberArrayConditionToGorm(ctx, r.RightNumberArrayCondition, obj, pb) case *query.LogicalOperator_RightStringArrayCondition: rres, rargs, rAssocToJoin, err = StringArrayConditionToGorm(ctx, r.RightStringArrayCondition, obj, pb) default: return "", nil, nil, fmt.Errorf("%T type is not supported in Filtering", r) } if err != nil { return "", nil, nil, err } if lAssocToJoin == nil && rAssocToJoin != nil { lAssocToJoin = make(map[string]struct{}) } for k := range rAssocToJoin { lAssocToJoin[k] = struct{}{} } var o string switch lop.Type { case query.LogicalOperator_AND: o = "AND" case query.LogicalOperator_OR: o = "OR" } var neg string if lop.IsNegative { neg = "NOT" } return fmt.Sprintf("%s(%s %s %s)", neg, lres, o, rres), append(largs, rargs...), lAssocToJoin, nil } // StringConditionToGorm returns GORM Plain SQL representation of the string condition. func StringConditionToGorm(ctx context.Context, c *query.StringCondition, obj interface{}, pb proto.Message) (string, []interface{}, map[string]struct{}, error) { var assocToJoin map[string]struct{} dbName, assoc, err := HandleFieldPath(ctx, c.FieldPath, obj) if err != nil { return "", nil, nil, err } if assoc != "" { assocToJoin = make(map[string]struct{}) assocToJoin[assoc] = struct{}{} } var o string switch c.Type { case query.StringCondition_EQ, query.StringCondition_IEQ: o = "=" case query.StringCondition_MATCH: o = "REGEXP" case query.StringCondition_GT: o = ">" case query.StringCondition_GE: o = ">=" case query.StringCondition_LT: o = "<" case query.StringCondition_LE: o = "<=" } var neg string if c.IsNegative { neg = "NOT" } var value interface{} if v, err := processStringCondition(ctx, c.FieldPath, c.Value, pb); err != nil { value = c.Value } else { value = v } if c.Type == query.StringCondition_IEQ { return insensitiveCaseStringConditionToGorm(neg, dbName, o), []interface{}{value}, assocToJoin, nil } return fmt.Sprintf("%s(%s %s ?)", neg, dbName, o), []interface{}{value}, assocToJoin, nil } func insensitiveCaseStringConditionToGorm(neg, dbName, operator string) string { return fmt.Sprintf("%s(lower(%s) %s lower(?))", neg, dbName, operator) } func processStringCondition(ctx context.Context, fieldPath []string, value string, pb proto.Message) (interface{}, error) { objType := indirectType(reflect.TypeOf(pb)) pathLength := len(fieldPath) for i, part := range fieldPath { sf, ok := objType.FieldByName(generator.CamelCase(part)) if !ok { return nil, fmt.Errorf("Cannot find field %s in %s", part, objType) } if i < pathLength-1 { objType = indirectType(sf.Type) if !isProtoMessage(objType) { return nil, fmt.Errorf("%s: non-last field of %s field path should be a proto message", objType, fieldPath) } } else { if isIdentifier(indirectType(sf.Type)) { id := &resource.Identifier{} if err := jsonpb.UnmarshalString(fmt.Sprintf("\"%s\"", value), id); err != nil { return nil, err } newPb := reflect.New(objType) v := newPb.Elem().FieldByName(generator.CamelCase(part)) v.Set(reflect.ValueOf(id)) toOrm := newPb.MethodByName("ToORM") if !toOrm.IsValid() { return nil, fmt.Errorf("ToORM method cannot be found for %s", objType) } res := toOrm.Call([]reflect.Value{reflect.ValueOf(ctx)}) if len(res) != 2 { return nil, fmt.Errorf("ToORM signature of %s is unknown", objType) } orm := res[0] err := res[1] if !err.IsNil() { if tErr, ok := err.Interface().(error); ok { return nil, tErr } else { return nil, fmt.Errorf("ToOrm second return value of %s is expected to be error", objType) } } ormId := orm.FieldByName(generator.CamelCase(part)) if !ormId.IsValid() { return nil, fmt.Errorf("Cannot find field %s in %s", part, objType) } return reflect.Indirect(ormId).Interface(), nil } } } return value, nil } // NumberConditionToGorm returns GORM Plain SQL representation of the number condition. func NumberConditionToGorm(ctx context.Context, c *query.NumberCondition, obj interface{}, pb proto.Message) (string, []interface{}, map[string]struct{}, error) { var assocToJoin map[string]struct{} dbName, assoc, err := HandleFieldPath(ctx, c.FieldPath, obj) if err != nil { return "", nil, nil, err } if assoc != "" { assocToJoin = make(map[string]struct{}) assocToJoin[assoc] = struct{}{} } var neg string if c.IsNegative { neg = "NOT" } if c.Type == query.NumberCondition_CONTAINS { return fmt.Sprintf("%s(%s & ?>0)", neg, dbName), []interface{}{c.Value}, assocToJoin, nil } var o string switch c.Type { case query.NumberCondition_EQ: o = "=" case query.NumberCondition_GT: o = ">" case query.NumberCondition_GE: o = ">=" case query.NumberCondition_LT: o = "<" case query.NumberCondition_LE: o = "<=" } return fmt.Sprintf("%s(%s %s ?)", neg, dbName, o), []interface{}{c.Value}, assocToJoin, nil } // NullConditionToGorm returns GORM Plain SQL representation of the null condition. func NullConditionToGorm(ctx context.Context, c *query.NullCondition, obj interface{}, pb proto.Message) (string, []interface{}, map[string]struct{}, error) { var assocToJoin map[string]struct{} dbName, assoc, err := HandleFieldPath(ctx, c.FieldPath, obj) if err != nil { return "", nil, nil, err } if assoc != "" { assocToJoin = make(map[string]struct{}) assocToJoin[assoc] = struct{}{} } o := "IS NULL" var neg string if c.IsNegative { neg = "NOT" } return fmt.Sprintf("%s(%s %s)", neg, dbName, o), nil, assocToJoin, nil } func NumberArrayConditionToGorm(ctx context.Context, c *query.NumberArrayCondition, obj interface{}, pb proto.Message) (string, []interface{}, map[string]struct{}, error) { var assocToJoin map[string]struct{} dbName, assoc, err := HandleFieldPath(ctx, c.FieldPath, obj) if err != nil { return "", nil, nil, err } if assoc != "" { assocToJoin = make(map[string]struct{}) assocToJoin[assoc] = struct{}{} } o := "IN" var neg string if c.IsNegative { neg = "NOT" } placeholder := "" values := make([]interface{}, 0, len(c.Values)) for _, val := range c.Values { placeholder += "?, " values = append(values, val) } return fmt.Sprintf("(%s %s %s (%s))", dbName, neg, o, strings.TrimSuffix(placeholder, ", ")), values, assocToJoin, nil } func StringArrayConditionToGorm(ctx context.Context, c *query.StringArrayCondition, obj interface{}, pb proto.Message) (string, []interface{}, map[string]struct{}, error) { var assocToJoin map[string]struct{} dbName, assoc, err := HandleFieldPath(ctx, c.FieldPath, obj) if err != nil { return "", nil, nil, err } if assoc != "" { assocToJoin = make(map[string]struct{}) assocToJoin[assoc] = struct{}{} } o := "IN" var neg string if c.IsNegative { neg = "NOT" } values := make([]interface{}, 0, len(c.Values)) placeholder := "" for _, str := range c.Values { placeholder += "?, " if val, err := processStringCondition(ctx, c.FieldPath, str, pb); err == nil { values = append(values, val) continue } values = append(values, str) } return fmt.Sprintf("(%s %s %s (%s))", dbName, neg, o, strings.TrimSuffix(placeholder, ", ")), values, assocToJoin, nil }
gorm/filtering.go
0.668015
0.416619
filtering.go
starcoder
package gtk3Import import ( "bytes" "fmt" "sort" "strings" ) var pangoEscapeChar = [][]string{{"<", "&lt;", string([]byte{0x15})}, {"&", "&amp;", string([]byte{0x16})}} var markupType = map[string][]string{ "bold": {"<b>", "</b>"}, "bld": {"<b>", "</b>"}, // Bold "big": {"<big>", "</big>"}, // Makes font relatively larger, equivalent to <span size="larger"> "small": {"<small>", "</small>"}, "sml": {"<small>", "</small>"}, // Makes font relatively smaller, equivalent to <span size="smaller"> "italic": {"<i>", "</i>"}, "ita": {"<i>", "</i>"}, // Italic "subscript": {"<sub>", "</sub>"}, "sub": {"<sub>", "</sub>"}, // Subscript "supscript": {"<sup>", "</sup>"}, "sup": {"<sup>", "</sup>"}, // Superscript "monospace": {"<tt>", "</tt>"}, "msp": {"<tt>", "</tt>"}, // Monospace font "font_family": {`<span font_family="`, `">`, `</span>`}, "ffy": {`<span font_family="`, `">`, `</span>`}, // A font family name /* A font description string, such as "Sans Italic 12". See pango_font_description_from_string() for a description of the format of the string representation. Note that any other span attributes will override this description. So if you have "Sans Italic" and also a style="normal" attribute, you will get Sans normal, not italic.*/ "font": {`<span font="`, `">`, `</span>`}, "fnt": {`<span font="`, `">`, `</span>`}, /* Font size in 1024ths of a point, or one of the absolute sizes 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large' or one of the relative sizes 'smaller' or 'larger'. If you want to specify a absolute size, it's usually easier to take advantage of the ability to specify a partial font description using 'font'; you can use font='12.5' rather than size='12800'.*/ "font_size": {`<span font_size="`, `">`, `</span>`}, "fsz": {`<span font_size="`, `">`, `</span>`}, "strike": {"<s>", "</s>"}, "stk": {"<s>", "</s>"}, // Strikethrough "strikethrough_color": {`<span strikethrough="true" strikethrough_color="`, `">`, `</span>`}, // 'true' or 'false' whether to strike through the text "stc": {`<span strikethrough="true" strikethrough_color="`, `">`, `</span>`}, // An RGB color specification such as '#00FF00' or a color name such as 'blue'. "underline": {"<u>", "</u>"}, "und": {"<u>", "</u>"}, // Underline "underline_color": {`<span underline="`, `" underline_color="`, `">`, `</span>`}, // One of 'none', 'single', 'double', 'low', 'error' "udc": {`<span underline="`, `" underline_color="`, `">`, `</span>`}, // An RGB color specification such as '#00FF00' or a color name such as 'red'. "foreground": {`<span foreground="`, `">`, `</span>`}, "fgc": {`<span foreground="`, `">`, `</span>`}, // An RGB color specification such as '#00FF00' or a color name such as 'red'. "background": {`<span background="`, `">`, `</span>`}, "bgc": {`<span background="`, `">`, `</span>`}, // An RGB color specification such as '#00FF00' or a color name such as 'red'. "fgalpha": {`<span fgalpha="`, `">`, `</span>`}, "fga": {`<span fgalpha="`, `">`, `</span>`}, // An alpha value for the background color, either a plain integer between 1 and 65536 or a percentage value like '50%'. "bgalpha": {`<span bgalpha="`, `">`, `</span>`}, "bga": {`<span bgalpha="`, `">`, `</span>`}, // An alpha value for the background color, either a plain integer between 1 and 65536 or a percentage value like '50%'. "url": {`<a href="`, `">`, `</a>`}, // Url clickable 1st arg: adress "font_style": {`<span font_style="`, `">`, `</span>`}, // font_style: One of 'normal', 'oblique', 'italic'. N.b: 'oblique' seems to be the same as 'italic' "fst": {`<span font_style="`, `">`, `</span>`}, // font_style: One of 'normal', 'oblique', 'italic'. So, look really useless ... "font_variant": {`<span font_variant="`, `">`, `</span>`}, "fvt": {`<span font_variant="`, `">`, `</span>`}, // One of 'normal' or 'smallcaps' "font_stretch": {`<span font_stretch="`, `">`, `</span>`}, "fsh": {`<span font_stretch="`, `">`, `</span>`}, // One of 'ultracondensed', 'extracondensed', 'condensed', 'semicondensed', 'normal', 'semiexpanded', 'expanded', 'extraexpanded', 'ultraexpanded' "font_weight": {`<span font_weight="`, `">`, `</span>`}, "wgt": {`<span font_weight="`, `">`, `</span>`}, // One of 'ultralight', 'light', 'normal', 'bold', 'ultrabold', 'heavy', or a numeric weight } type PangoMarkup struct { InString string OutString string OutStringSl []string markPositions [][]int markTypes [][]string } func (pm *PangoMarkup) Init(inString string) { // Object initialisation/cleaning pm.InString = inString pm.markPositions = [][]int{} pm.markTypes = [][]string{} pm.OutString = "" } // Add multiples positions, (where markup is applied) func (pm *PangoMarkup) AddPosition(pos ...[]int) { pm.markPositions = append(pm.markPositions, pos...) } // Add multiples markup types, (the style applied at given positions) func (pm *PangoMarkup) AddTypes(mType ...[]string) { pm.markTypes = append(pm.markTypes, mType...) } // Apply multiples pango markups to the whole text. func (pm *PangoMarkup) Markup() string { pm.prepare() text := pm.InString for _, mType := range pm.markTypes { text = markup(text, mType...) } pm.OutString = text pm.finalize() return pm.OutString } // Apply multiples pango markups to the whole text. func (pm *PangoMarkup) MarkupSeparate() (outStrSl []string) { pm.prepare() text := pm.InString for _, mType := range pm.markTypes { outStrSl = append(outStrSl, markupSeparate(text, mType...)...) } copy(pm.OutStringSl, outStrSl) pm.finalizeSeparate() return pm.OutStringSl } // Apply multiples pango markups to text at specified positions given into 2d slices. func (pm *PangoMarkup) MarkupAtPos() string { var eol = [][]byte{{0x0D, 0x0A}, {0x0D}, {0x0A}} var actEol string var multiMarks []string pm.prepare() // Sorting slice to get positions from the last to the first, (preserve positions in string) sort.SliceStable(pm.markPositions, func(i, j int) bool { return pm.markPositions[i][0] > pm.markPositions[j][0] }) pm.OutString = pm.InString for _, pos := range pm.markPositions { prefix := pm.OutString[:pos[0]] toMark := pm.OutString[pos[0]:pos[1]] suffix := pm.OutString[pos[1]:] multiMarks = []string{toMark} for idx, val := range eol { if bytes.Contains([]byte(toMark), val) { actEol = string(eol[idx]) multiMarks = strings.Split(toMark, actEol) } } for idx, _ := range multiMarks { for _, mType := range pm.markTypes { multiMarks[idx] = markup(multiMarks[idx], mType...) } } pm.OutString = prefix + strings.Join(multiMarks, actEol) + suffix } pm.finalize() return pm.OutString } // Prepare string with special characters to be marked ("<", "&") func (pm *PangoMarkup) prepare() { pm.InString = strings.Replace(pm.InString, pangoEscapeChar[1][0], pangoEscapeChar[1][2], -1) pm.InString = strings.Replace(pm.InString, pangoEscapeChar[0][0], pangoEscapeChar[0][2], -1) } // Escape special characters after marking ("<", "&") func (pm *PangoMarkup) finalize() { pm.OutString = strings.Replace(pm.OutString, pangoEscapeChar[1][2], pangoEscapeChar[1][1], -1) pm.OutString = strings.Replace(pm.OutString, pangoEscapeChar[0][2], pangoEscapeChar[0][1], -1) } // Escape special characters after marking ("<", "&"), SEPARATE version func (pm *PangoMarkup) finalizeSeparate() { for idx, _ := range pm.OutStringSl { pm.OutStringSl[idx] = strings.Replace(pm.OutStringSl[idx], pangoEscapeChar[1][2], pangoEscapeChar[1][1], -1) pm.OutStringSl[idx] = strings.Replace(pm.OutStringSl[idx], pangoEscapeChar[0][2], pangoEscapeChar[0][1], -1) } } // Apply pango markup format to text. They can be combined. func markup(text string, mType ...string) string { switch len(mType) { case 1: // i.e: markup("display", "sub") return fmt.Sprint(markupType[mType[0]][0], text, markupType[mType[0]][1]) case 2: // i.e: markup("display", "stc", "red") return fmt.Sprint(markupType[mType[0]][0], mType[1], markupType[mType[0]][1], text, markupType[mType[0]][2]) case 3: // i.e: markup("display", "stc", "double", "red") return fmt.Sprint(markupType[mType[0]][0], mType[1], markupType[mType[0]][1], mType[2], markupType[mType[0]][2], text, markupType[mType[0]][3]) default: return fmt.Sprint("Markup type error: ", mType) } } // Apply pango markup format to text. They can be combined. // Output will give []sting containing each markup sections separatly. func markupSeparate(text string, mType ...string) []string { switch len(mType) { case 1: // i.e: markup("display", "sub") return []string{markupType[mType[0]][0], text, markupType[mType[0]][1]} case 2: // i.e: markup("display", "stc", "red") return []string{markupType[mType[0]][0], mType[1], markupType[mType[0]][1], text, markupType[mType[0]][2]} case 3: // i.e: markup("display", "stc", "double", "red") return []string{markupType[mType[0]][0], mType[1], markupType[mType[0]][1], mType[2], markupType[mType[0]][2], text, markupType[mType[0]][3]} default: return []string{fmt.Sprint("Markup type error: ", mType)} } } // type PangoColor struct { // Black string // Brown string // White string // Red string // Green string // Blue string // Cyan string // Magenta string // Purple string // Turquoise string // Violet string // Darkred string // Darkgreen string // Darkblue string // Darkgray string // Darkcyan string // Lightblue string // Lightgray string // Lightgreen string // Lightturquoise string // Lightred string // Lightyellow string // } // func (pc *PangoColor) Init() { // // Colors initialisation // pc.Black = "#000000" // pc.Brown = "#7C2020" // pc.White = "#FFFFFF" // pc.Red = "#FF2222" // pc.Green = "#22BB22" // pc.Blue = "#0044FF" // pc.Cyan = "#14FFFA" // pc.Magenta = "#D72D6C" // pc.Purple = "#8B0037" // pc.Turquoise = "#009187" // pc.Violet = "#7F00FF" // pc.Darkred = "#300000" // pc.Darkgreen = "#003000" // pc.Darkblue = "#000030" // pc.Darkcyan = "#003333" // pc.Darkgray = "#303030" // pc.Lightturquoise = "#80FFE7" // pc.Lightblue = "#ADD8E6" // pc.Lightgray = "#E4DDDD" // pc.Lightgreen = "#87FF87" // pc.Lightred = "#FF6666" // pc.Lightyellow = "#FFFF6F" // }
vendor/github.com/hfmrow/gtk3Import/pango/pangoMarkupBinder.go
0.734215
0.543833
pangoMarkupBinder.go
starcoder
// Package no provides holiday definitions for Norway. package no import ( "time" "github.com/devechelon/cal/v2" "github.com/devechelon/cal/v2/aa" ) var ( // FoersteNyttaarsdag represents New Year's Day on 1-Jan FoersteNyttaarsdag = aa.NewYear.Clone(&cal.Holiday{Name: "Første nyttårsdag", Type: cal.ObservancePublic}) // Skjaertorsdag represents Maundy Thursday on the Thursday before Easter Skjaertorsdag = aa.MaundyThursday.Clone(&cal.Holiday{Name: "Skjærtorsdag", Type: cal.ObservancePublic}) // Langfredag represents Good Friday on the Friday before Easter Langfredag = aa.GoodFriday.Clone(&cal.Holiday{Name: "Langfredag", Type: cal.ObservancePublic}) // AndrePaaskedag represents Easter Monday on the day after Easter AndrePaaskedag = aa.EasterMonday.Clone(&cal.Holiday{Name: "Andre påskedag", Type: cal.ObservancePublic}) // Arbeiderenesdag represents Labour Day on 1-May Arbeiderenesdag = aa.WorkersDay.Clone(&cal.Holiday{Name: "Arbeidernes dag", Type: cal.ObservancePublic}) // Grunnlovsdag represents Constitution Day on 17-May Grunnlovsdag = &cal.Holiday{ Name: "Grunnlovsdag", Type: cal.ObservancePublic, Month: time.May, Day: 17, Func: cal.CalcDayOfMonth, } // Kristihimmelfartsdag represents Ascension Day on the 39th day after Easter Kristihimmelfartsdag = aa.AscensionDay.Clone(&cal.Holiday{Name: "Kristi Himmelfartsdag", Type: cal.ObservancePublic}) // AndrePinsedag represents Pentecost Monday on the day after Pentecost (50 days after Easter) AndrePinsedag = aa.PentecostMonday.Clone(&cal.Holiday{Name: "Andre pinsedag", Type: cal.ObservancePublic}) // FoersteJuledag represents Christmas Day on 25-Dec FoersteJuledag = aa.ChristmasDay.Clone(&cal.Holiday{Name: "Første juledag", Type: cal.ObservancePublic}) // AndreJuledag represents the second day of Christmas on 26-Dec AndreJuledag = aa.ChristmasDay2.Clone(&cal.Holiday{Name: "Andre juledag", Type: cal.ObservancePublic}) // Holidays provides a list of the standard national holidays Holidays = []*cal.Holiday{ FoersteNyttaarsdag, Skjaertorsdag, Langfredag, AndrePaaskedag, Arbeiderenesdag, Grunnlovsdag, Kristihimmelfartsdag, AndrePinsedag, FoersteJuledag, AndreJuledag, } )
v2/no/no_holidays.go
0.50293
0.403861
no_holidays.go
starcoder
package tsm1 import ( "sync" "sync/atomic" "github.com/influxdata/influxql" ) // entry is a set of values and some metadata. type entry struct { // Tracks the number of values in the entry. Must always be accessed via // atomic; must be 8b aligned. n int64 mu sync.RWMutex values Values // All stored values. // The type of values stored. Read only so doesn't need to be protected by mu. vtype byte } // newEntryValues returns a new instance of entry with the given values. If the // values are not valid, an error is returned. func newEntryValues(values []Value) (*entry, error) { e := &entry{ values: make(Values, 0, len(values)), n: int64(len(values)), } e.values = append(e.values, values...) // No values, don't check types and ordering if len(values) == 0 { return e, nil } et := valueType(values[0]) for _, v := range values { // Make sure all the values are the same type if et != valueType(v) { return nil, errFieldTypeConflict } } // Set the type of values stored. e.vtype = et return e, nil } // add adds the given values to the entry. func (e *entry) add(values []Value) error { if len(values) == 0 { return nil // Nothing to do. } // Are any of the new values the wrong type? if e.vtype != 0 { for _, v := range values { if e.vtype != valueType(v) { return errFieldTypeConflict } } } // entry currently has no values, so add the new ones and we're done. e.mu.Lock() if len(e.values) == 0 { e.values = values atomic.StoreInt64(&e.n, int64(len(e.values))) e.vtype = valueType(values[0]) e.mu.Unlock() return nil } // Append the new values to the existing ones... e.values = append(e.values, values...) atomic.StoreInt64(&e.n, int64(len(e.values))) e.mu.Unlock() return nil } // deduplicate sorts and orders the entry's values. If values are already deduped and sorted, // the function does no work and simply returns. func (e *entry) deduplicate() { e.mu.Lock() defer e.mu.Unlock() if len(e.values) <= 1 { return } e.values = e.values.Deduplicate() atomic.StoreInt64(&e.n, int64(len(e.values))) } // count returns the number of values in this entry. func (e *entry) count() int { return int(atomic.LoadInt64(&e.n)) } // filter removes all values with timestamps between min and max inclusive. func (e *entry) filter(min, max int64) { e.mu.Lock() if len(e.values) > 1 { e.values = e.values.Deduplicate() } e.values = e.values.Exclude(min, max) atomic.StoreInt64(&e.n, int64(len(e.values))) e.mu.Unlock() } // size returns the size of this entry in bytes. func (e *entry) size() int { e.mu.RLock() sz := e.values.Size() e.mu.RUnlock() return sz } // InfluxQLType returns for the entry the data type of its values. func (e *entry) InfluxQLType() (influxql.DataType, error) { e.mu.RLock() defer e.mu.RUnlock() return e.values.InfluxQLType() }
tsdb/tsm1/cache_entry.go
0.648689
0.560253
cache_entry.go
starcoder
package progress import ( "fmt" ) // UnitsNotationPosition determines units position relative of tracker value. type UnitsNotationPosition int // Supported unit positions relative to tracker value; // default: UnitsNotationPositionBefore const ( UnitsNotationPositionBefore UnitsNotationPosition = iota UnitsNotationPositionAfter ) // Units defines the "type" of the value being tracked by the Tracker. type Units struct { Notation string NotationPosition UnitsNotationPosition Formatter func(value int64) string } var ( // UnitsDefault doesn't define any units. The value will be treated as any // other number. UnitsDefault = Units{ Notation: "", Formatter: FormatNumber, } // UnitsBytes defines the value as a storage unit. Values will be converted // and printed in one of these forms: B, KB, MB, GB, TB, PB UnitsBytes = Units{ Notation: "", Formatter: FormatBytes, } // UnitsCurrencyDollar defines the value as a Dollar amount. Values will be // converted and printed in one of these forms: $x.yz, $x.yzK, $x.yzM, // $x.yzB, $x.yzT UnitsCurrencyDollar = Units{ Notation: "$", Formatter: FormatNumber, } // UnitsCurrencyEuro defines the value as a Euro amount. Values will be // converted and printed in one of these forms: ₠x.yz, ₠x.yzK, ₠x.yzM, // ₠x.yzB, ₠x.yzT UnitsCurrencyEuro = Units{ Notation: "₠", Formatter: FormatNumber, } // UnitsCurrencyPound defines the value as a Pound amount. Values will be // converted and printed in one of these forms: £x.yz, £x.yzK, £x.yzM, // £x.yzB, £x.yzT UnitsCurrencyPound = Units{ Notation: "£", Formatter: FormatNumber, } ) // Sprint prints the value as defined by the Units. func (tu Units) Sprint(value int64) string { formatter := tu.Formatter if formatter == nil { formatter = FormatNumber } formattedValue := formatter(value) switch tu.NotationPosition { case UnitsNotationPositionAfter: return formattedValue + tu.Notation default: // UnitsNotationPositionBefore return tu.Notation + formattedValue } } // FormatBytes formats the given value as a "Byte". func FormatBytes(value int64) string { if value < 1000 { return fmt.Sprintf("%dB", value) } else if value < 1000000 { return fmt.Sprintf("%.2fKB", float64(value)/1000.0) } else if value < 1000000000 { return fmt.Sprintf("%.2fMB", float64(value)/1000000.0) } else if value < 1000000000000 { return fmt.Sprintf("%.2fGB", float64(value)/1000000000.0) } else if value < 1000000000000000 { return fmt.Sprintf("%.2fTB", float64(value)/1000000000000.0) } return fmt.Sprintf("%.2fPB", float64(value)/1000000000000000.0) } // FormatNumber formats the given value as a "regular number". func FormatNumber(value int64) string { if value < 1000 { return fmt.Sprintf("%d", value) } else if value < 1000000 { return fmt.Sprintf("%.2fK", float64(value)/1000.0) } else if value < 1000000000 { return fmt.Sprintf("%.2fM", float64(value)/1000000.0) } else if value < 1000000000000 { return fmt.Sprintf("%.2fB", float64(value)/1000000000.0) } else if value < 1000000000000000 { return fmt.Sprintf("%.2fT", float64(value)/1000000000000.0) } return fmt.Sprintf("%.2fQ", float64(value)/1000000000000000.0) }
progress/units.go
0.733165
0.659021
units.go
starcoder
package galtonwatson import ( "sort" "golang.org/x/exp/rand" "gonum.org/v1/gonum/stat/distuv" "github.com/tommyreddad/galtonwatson/stat/dist" "github.com/tommyreddad/galtonwatson/tree" ) // GaltonWatson implements a conditioned Galton-Watson tree generator. The // conditioned Galton-Watson tree is specified by an offspring distribution // and a node count. type GaltonWatson struct { // n is the number of nodes in the tree. n uint32 // rander is an object implementing the method Rand which generates random // samples from the offspring distribution. rander distuv.Rander // offspringProb is the probability mass function of the offspring distribution. offspringProb []float64 backingMultinomial *dist.Multinomial src rand.Source } // NewGaltonWatsonFromRander returns a GaltonWatson which represents a // conditioned Galton-Watson tree of a given size `n`, whose offspring // distribution can be generated by the given `rander`. func NewGaltonWatsonFromRander(n uint32, rander distuv.Rander, src rand.Source) *GaltonWatson { return &GaltonWatson{ n: n, rander: rander, src: src, } } // NewGaltonWatson returns a GaltonWatson which represents a conditioned // Galton-Watson tree of a given size `n`, whose offspring distribution is a // given probability mass function `offspringProb`. func NewGaltonWatson(n uint32, offspringProb []float64, src rand.Source) *GaltonWatson { backingMultinomial := &dist.Multinomial{ N: n, CategoryProb: offspringProb, Src: src, } return &GaltonWatson{ n: n, offspringProb: offspringProb, backingMultinomial: backingMultinomial, src: src, } } // generateXiFromRander will generate the Xi array of offspring counts based on // a given Rander. Includes specific generators for special Randers. func (gw *GaltonWatson) generateXiFromRander(rander distuv.Rander) []uint32 { Xi := make([]uint32, gw.n) switch rander.(type) { case *distuv.Poisson: // The conditional Poisson distribution here is distributed as // Multinomial(n-1; 1/n, ..., 1/n). for i := uint32(0); i < gw.n-1; i++ { u := uint32(distuv.UnitUniform.Rand() * float64(gw.n)) Xi[u]++ } case *dist.Geometric: // The conditional geometric distribution here is distributed uniformly // on the discrete simplex (k_1, ..., k_n) such that k_1 + ... + k_n = n - 1. spaces := make([]int, gw.n+1) // Place n-1 spacers in the n spaces between and on either end of the integers 1, 2, ..., n-1. // The consecutive spaces represent the offspring count. // e.g., for n=4, // 1 || 2 3 | // This spacing corresponds to // Xi[0] = 1, Xi[2] = 0, Xi[3] = 2, Xi[4] = 0 for i := uint32(0); i < gw.n-1; i++ { spaces[i] = int(distuv.UnitUniform.Rand() * float64(gw.n)) } // Add some extra spaces on the ends for convenience. spaces[gw.n-1] = 0 spaces[gw.n] = int(gw.n - 1) sort.Ints(spaces) for i := uint32(0); i < gw.n; i++ { Xi[i] = uint32(spaces[i+1] - spaces[i]) } default: // Default to rejection method. This is the slowest and most naive option. for total := uint32(0); total != gw.n-1; { total = 0 for i := uint32(0); i < gw.n; i++ { Xi[i] = uint32(rander.Rand()) total += Xi[i] } } } return Xi } // Xi is the array of the number of offspring of the nodes in a Galton-Watson // tree observed when traversed in breadth-first or depth-first order. The // number of these offspring can be viewed as a conditioned multinomial random // variable. func (gw *GaltonWatson) generateXiFromMultinomial(m *dist.Multinomial) []uint32 { // Generate multinomial conditionally upon the total sum being n-1, by the rejection method. var mCond map[uint32]uint32 for total := uint32(0); total != gw.n-1; { mCond = m.Rand() total = 0 for k, v := range mCond { total += k * v } } // Compute the first pass at the Xi array of DFS-order offspring. Xi := make([]uint32, gw.n) { i := 0 for k, v := range mCond { for j := uint32(0); j < v; j++ { Xi[i] = k i++ } } } rand.Shuffle(int(gw.n), func(i, j int) { Xi[i], Xi[j] = Xi[j], Xi[i] }) return Xi } // S is the array of the sequence of queue sizes observed when traversing a // Galton-Watson tree in breadth-first order. func (gw *GaltonWatson) generateSFromXi(Xi []uint32) ([]int32, uint32) { S := make([]int32, gw.n+1) S[0] = 1 minIndex := uint32(1) for i := uint32(1); i < gw.n+1; i++ { S[i] = S[i-1] + int32(Xi[i-1]) - 1 if S[i] < S[minIndex] { minIndex = i } } return S, minIndex } // Rand returns a random sample drawn from the distribution. The generation // algorithm is due to Devroye (2011), see: http://luc.devroye.org/gw-simulation.pdf. func (gw *GaltonWatson) Rand() *tree.Node { var Xi []uint32 if gw.rander != nil { // Prioritize the rander if specified. Xi = gw.generateXiFromRander(gw.rander) } else { // If no rander is specified, the offspringProb must be present. Xi = gw.generateXiFromMultinomial(gw.backingMultinomial) } // Compute the S array and keep track of the first minimum encounter. _, minIndex := gw.generateSFromXi(Xi) // Rotate Xi according to Dvoretzky-Motzkin. Xi = append(Xi[minIndex:gw.n], Xi[0:minIndex]...) // Build the tree using the correct Xi in DFS order. rootNode := tree.New(0) { nodeCount := 0 traversalCount := 0 currNode := rootNode dfsStack := []*tree.Node{currNode} for len(dfsStack) > 0 { currNode = dfsStack[len(dfsStack)-1] dfsStack = dfsStack[:len(dfsStack)-1] for i := uint32(0); i < Xi[traversalCount]; i++ { nodeCount++ newNode := tree.New(nodeCount) currNode.AppendChild(newNode) dfsStack = append(dfsStack, newNode) } traversalCount++ } } return rootNode }
galtonwatson.go
0.754192
0.472014
galtonwatson.go
starcoder
package primitives import ( "fmt" "github.com/zimmski/tavor/token" "reflect" ) // Pointer implements a general pointer token which references a token type Pointer struct { token token.Token typ reflect.Type cloned bool } // NewPointer returns a new instance of a Pointer token and sets the token reference type to the token's type func NewPointer(tok token.Token) *Pointer { return &Pointer{ token: tok, typ: reflect.TypeOf(tok).Elem(), } } // NewEmptyPointer returns a new instance of a Pointer token with a token reference type but without a referenced token func NewEmptyPointer(typ interface{}) *Pointer { return &Pointer{ token: nil, typ: reflect.TypeOf(typ).Elem(), } } // NewTokenPointer returns a new instance of a Pointer token with the token reference type Token but without a referenced token func NewTokenPointer(tok token.Token) *Pointer { var tokenType *token.Token return &Pointer{ token: tok, typ: reflect.TypeOf(tokenType).Elem(), } } // Set sets the referenced token which must conform to the pointers token reference type func (p *Pointer) Set(o token.Token) error { if o == nil { p.token = nil p.cloned = true return nil } oType := reflect.TypeOf(o) if !oType.AssignableTo(p.typ) && (p.typ.Kind() == reflect.Interface && !oType.Implements(p.typ)) { return fmt.Errorf("does not implement type %s", p.typ) } p.token = o return nil } // Token interface methods // Clone returns a copy of the token and all its children func (p *Pointer) Clone() token.Token { return &Pointer{ token: p.token, // do not clone further typ: p.typ, cloned: false, } } func (p *Pointer) cloneOnFirstUse() { if !p.cloned && p.token != nil { // clone everything on first use until we hit pointers if _, ok := p.token.(*Pointer); !ok { p.token = p.token.Clone() p.cloned = true } } } // Parse tries to parse the token beginning from the current position in the parser data. // If the parsing is successful the error argument is nil and the next current position after the token is returned. func (p *Pointer) Parse(pars *token.InternalParser, cur int) (int, []error) { panic("Pointer token is not allowed during internal parsing") } // Permutation sets a specific permutation for this token func (p *Pointer) Permutation(i uint) error { permutations := p.Permutations() if i < 1 || i > permutations { return &token.PermutationError{ Type: token.PermutationErrorIndexOutOfBound, } } // do nothing return nil } // Permutations returns the number of permutations for this token func (p *Pointer) Permutations() uint { p.cloneOnFirstUse() return 1 } // PermutationsAll returns the number of all possible permutations for this token including its children func (p *Pointer) PermutationsAll() uint { p.cloneOnFirstUse() if p.token == nil { panic("Pointer token does not have a referencing token") } return p.token.PermutationsAll() } func (p *Pointer) String() string { if p.token == nil { panic("Pointer token does not have a referencing token") } return p.token.String() } // ForwardToken interface methods // Get returns the current referenced token func (p *Pointer) Get() token.Token { p.cloneOnFirstUse() return p.token } // InternalGet returns the current referenced internal token func (p *Pointer) InternalGet() token.Token { return p.token } // InternalLogicalRemove removes the referenced internal token and returns the replacement for the current token or nil if the current token should be removed. func (p *Pointer) InternalLogicalRemove(tok token.Token) token.Token { if p.token == tok { return nil } return p } // InternalReplace replaces an old with a new internal token if it is referenced by this token. The error return argument is not nil, if the replacement is not suitable. func (p *Pointer) InternalReplace(oldToken, newToken token.Token) error { if p.token == oldToken { p.token = newToken } return nil } // BooleanExpression interface methods /*func (p *Pointer) Evaluate() bool { if tok, ok := p.token.(conditions.BooleanExpression); ok { return tok.Evaluate() } else { panic(fmt.Errorf("TODO token %p(%#v) is not a BooleanExpression", p.token, p.token)) } }*/ // Minimize interface methods // Minimize tries to minimize itself and returns a token if it was successful, or nil if there was nothing to minimize func (p *Pointer) Minimize() token.Token { // Never ever _EVER_ minimize a pointer since it is normally there for a reason return nil } // Resolve interface methods // Resolve returns the token which is referenced by the token, or a path of tokens func (p *Pointer) Resolve() token.Token { var ok bool po := p for { c := po.InternalGet() po, ok = c.(*Pointer) if !ok { return c } } }
token/primitives/pointer.go
0.784113
0.439086
pointer.go
starcoder
package datadog import ( "encoding/json" ) // MetricIngestedIndexedVolumeAttributes Object containing the definition of a metric's ingested and indexed volume. type MetricIngestedIndexedVolumeAttributes struct { // Indexed volume for the given metric. IndexedVolume *int64 `json:"indexed_volume,omitempty"` // Ingested volume for the given metric. IngestedVolume *int64 `json:"ingested_volume,omitempty"` } // NewMetricIngestedIndexedVolumeAttributes instantiates a new MetricIngestedIndexedVolumeAttributes object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewMetricIngestedIndexedVolumeAttributes() *MetricIngestedIndexedVolumeAttributes { this := MetricIngestedIndexedVolumeAttributes{} return &this } // NewMetricIngestedIndexedVolumeAttributesWithDefaults instantiates a new MetricIngestedIndexedVolumeAttributes object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewMetricIngestedIndexedVolumeAttributesWithDefaults() *MetricIngestedIndexedVolumeAttributes { this := MetricIngestedIndexedVolumeAttributes{} return &this } // GetIndexedVolume returns the IndexedVolume field value if set, zero value otherwise. func (o *MetricIngestedIndexedVolumeAttributes) GetIndexedVolume() int64 { if o == nil || o.IndexedVolume == nil { var ret int64 return ret } return *o.IndexedVolume } // GetIndexedVolumeOk returns a tuple with the IndexedVolume field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *MetricIngestedIndexedVolumeAttributes) GetIndexedVolumeOk() (*int64, bool) { if o == nil || o.IndexedVolume == nil { return nil, false } return o.IndexedVolume, true } // HasIndexedVolume returns a boolean if a field has been set. func (o *MetricIngestedIndexedVolumeAttributes) HasIndexedVolume() bool { if o != nil && o.IndexedVolume != nil { return true } return false } // SetIndexedVolume gets a reference to the given int64 and assigns it to the IndexedVolume field. func (o *MetricIngestedIndexedVolumeAttributes) SetIndexedVolume(v int64) { o.IndexedVolume = &v } // GetIngestedVolume returns the IngestedVolume field value if set, zero value otherwise. func (o *MetricIngestedIndexedVolumeAttributes) GetIngestedVolume() int64 { if o == nil || o.IngestedVolume == nil { var ret int64 return ret } return *o.IngestedVolume } // GetIngestedVolumeOk returns a tuple with the IngestedVolume field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *MetricIngestedIndexedVolumeAttributes) GetIngestedVolumeOk() (*int64, bool) { if o == nil || o.IngestedVolume == nil { return nil, false } return o.IngestedVolume, true } // HasIngestedVolume returns a boolean if a field has been set. func (o *MetricIngestedIndexedVolumeAttributes) HasIngestedVolume() bool { if o != nil && o.IngestedVolume != nil { return true } return false } // SetIngestedVolume gets a reference to the given int64 and assigns it to the IngestedVolume field. func (o *MetricIngestedIndexedVolumeAttributes) SetIngestedVolume(v int64) { o.IngestedVolume = &v } func (o MetricIngestedIndexedVolumeAttributes) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.IndexedVolume != nil { toSerialize["indexed_volume"] = o.IndexedVolume } if o.IngestedVolume != nil { toSerialize["ingested_volume"] = o.IngestedVolume } return json.Marshal(toSerialize) } type NullableMetricIngestedIndexedVolumeAttributes struct { value *MetricIngestedIndexedVolumeAttributes isSet bool } func (v NullableMetricIngestedIndexedVolumeAttributes) Get() *MetricIngestedIndexedVolumeAttributes { return v.value } func (v *NullableMetricIngestedIndexedVolumeAttributes) Set(val *MetricIngestedIndexedVolumeAttributes) { v.value = val v.isSet = true } func (v NullableMetricIngestedIndexedVolumeAttributes) IsSet() bool { return v.isSet } func (v *NullableMetricIngestedIndexedVolumeAttributes) Unset() { v.value = nil v.isSet = false } func NewNullableMetricIngestedIndexedVolumeAttributes(val *MetricIngestedIndexedVolumeAttributes) *NullableMetricIngestedIndexedVolumeAttributes { return &NullableMetricIngestedIndexedVolumeAttributes{value: val, isSet: true} } func (v NullableMetricIngestedIndexedVolumeAttributes) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableMetricIngestedIndexedVolumeAttributes) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
api/v2/datadog/model_metric_ingested_indexed_volume_attributes.go
0.844922
0.66182
model_metric_ingested_indexed_volume_attributes.go
starcoder
package insteon import ( "fmt" "sync" ) // SwitchConfig contains the HouseCode and UnitCode for a switch's // X10 configuration type SwitchConfig struct { // HouseCode is the X10 house code of the switch or dimmer HouseCode int // UnitCode is the X10 unit code of the switch or dimmer UnitCode int } // UnmarshalBinary takes the given byte buffer and unmarshals it into // the receiver func (sc *SwitchConfig) UnmarshalBinary(buf []byte) error { if len(buf) < 14 { return ErrBufferTooShort } sc.HouseCode = int(buf[4]) sc.UnitCode = int(buf[5]) return nil } // MarshalBinary will convert the receiver into a serialized byte buffer func (sc *SwitchConfig) MarshalBinary() ([]byte, error) { buf := make([]byte, 14) buf[4] = byte(sc.HouseCode) buf[5] = byte(sc.UnitCode) return buf, nil } // LightFlags are the operating flags for a switch or dimmer type LightFlags [5]byte // ProgramLock indicates if the Program Lock flag is set func (lf LightFlags) ProgramLock() bool { return lf[0]&01 == 0x01 } // TxLED indicates whether the status LED will flash when Insteon traffic is received func (lf LightFlags) TxLED() bool { return lf[0]&0x02 == 0x02 } // ResumeDim indicates if the switch will return to the previous on level or // will return to the default on level func (lf LightFlags) ResumeDim() bool { return lf[0]&0x04 == 0x04 } // LED indicates if the status LED is enabled func (lf LightFlags) LED() bool { return lf[3]&0x10 == 0x10 } // LoadSense indicates if the device should activate when a load is // added func (lf LightFlags) LoadSense() bool { return lf[4]&0x20 == 0x20 } // DBDelta indicates the number of changes that have been written to the all-link // database func (lf LightFlags) DBDelta() int { return int(lf[1]) } // SNR indicates the current signal-to-noise ratio func (lf LightFlags) SNR() int { return int(lf[2]) } // X10Enabled indicates if the device will respond to X10 commands func (lf LightFlags) X10Enabled() bool { return lf[4]&0x01 != 0x01 } // ErrorBlink enables the device to blink the status LED when errors occur // TODO: Confirm this description is correct func (lf LightFlags) ErrorBlink() bool { return lf[4]&0x02 == 0x02 } // CleanupReport enables sending All-link cleanup reports // TODO: Confirm this description is correct func (lf LightFlags) CleanupReport() bool { return lf[4]&0x04 == 0x04 } type LightState struct { Level int } type Switch struct { Device bus Bus info DeviceInfo state LightState mu sync.Mutex } // NewSwitch will return an initialize switch object that controls // a physical switch on the netork func NewSwitch(device Device, bus Bus, info DeviceInfo) *Switch { sd := &Switch{Device: device, bus: bus, info: info} sd.On(And(AllLinkMatcher(), CmdMatcher(CmdLightOn)), sd.onTurnOn) sd.On(And(AllLinkMatcher(), CmdMatcher(CmdLightOff)), sd.onTurnOff) return sd } // Status sends a LightStatusRequest to determine the device's current // level. For switched devices this is either 0 or 255, dimmable devices // will be the current dim level between 0 and 255 func (sd *Switch) Status() (level int, err error) { ack, err := sd.Send(CmdLightStatusRequest, nil) if err == nil { level = ack.Command2() } return level, err } func (sd *Switch) String() string { return fmt.Sprintf("Switch (%s)", sd.info.Address) } func (sd *Switch) Config() (config SwitchConfig, err error) { // SEE Dimmer.Config() notes for explanation of D1 and D2 (payload[0] and payload[1]) rx := sd.Subscribe(And(Not(AckMatcher()), CmdMatcher(CmdExtendedGetSet))) defer sd.Unsubscribe(rx) msg, err := sd.Publish(&Message{Command: CmdExtendedGetSet, Payload: []byte{0x01, 0x00}}) if err == nil { msg, err = ReadWithTimeout(rx, sd.bus.Config().Timeout(true)) if err == nil { err = config.UnmarshalBinary(msg.Payload) } } return config, err } func (sd *Switch) OperatingFlags() (flags LightFlags, err error) { commands := []Command{ CmdGetOperatingFlags.SubCommand(0x01), CmdGetOperatingFlags.SubCommand(0x02), CmdGetOperatingFlags.SubCommand(0x04), CmdGetOperatingFlags.SubCommand(0x10), CmdGetOperatingFlags.SubCommand(0x20), } var ack *Message for i := 0; i < len(commands) && err == nil; i++ { ack, err = sd.Publish(&Message{Command: commands[i]}) if err == nil { flags[i] = byte(ack.Command.Command2()) } } return } func (sd *Switch) SetLoadSense(loadsense bool) error { if loadsense { return sd.SendCommand(CmdEnableLoadSense, make([]byte, 14)) } return sd.SendCommand(CmdDisableLoadSense, make([]byte, 14)) } func (sd *Switch) SetBacklight(light bool) error { if light { return sd.SendCommand(CmdEnableLED, make([]byte, 14)) } return sd.SendCommand(CmdDisableLED, make([]byte, 14)) } func (sd *Switch) onTurnOff(msg *Message) { sd.mu.Lock() defer sd.mu.Unlock() Log.Debugf("%s turned off", sd.info.Address) sd.state.Level = 0 } func (sd *Switch) TurnOff() error { return sd.SendCommand(CmdLightOff, nil) } func (sd *Switch) onTurnOn(msg *Message) { sd.mu.Lock() defer sd.mu.Unlock() sd.state.Level = int(msg.Command.Command2()) Log.Debugf("%s turned on to level %d", sd.info.Address, sd.state.Level) } func (sd *Switch) TurnOn(level int) error { return sd.SendCommand(CmdLightOn.SubCommand(level), nil) } func (sd *Switch) Address() Address { return sd.info.Address }
switch.go
0.599837
0.404684
switch.go
starcoder
package units import ( "math" ) type magnitude struct { Symbol string Prefix string Power float64 } var mags = map[string]magnitude{ "exa": magnitude{"E", "exa", 18.0}, "peta": magnitude{"P", "peta", 15.0}, "tera": magnitude{"T", "tera", 12.0}, "giga": magnitude{"G", "giga", 9.0}, "mega": magnitude{"M", "mega", 6.0}, "kilo": magnitude{"k", "kilo", 3.0}, "hecto": magnitude{"h", "hecto", 2.0}, "deca": magnitude{"da", "deca", 1.0}, "deci": magnitude{"d", "deci", -1.0}, "centi": magnitude{"c", "centi", -2.0}, "milli": magnitude{"m", "milli", -3.0}, "micro": magnitude{"μ", "micro", -6.0}, "nano": magnitude{"n", "nano", -9.0}, "pico": magnitude{"p", "pico", -12.0}, "femto": magnitude{"f", "femto", -15.0}, "atto": magnitude{"a", "atto", -18.0}, } // Magnitude prefix methods create and return a new Unit, while automatically registering // conversions to and from the provided base Unit func Exa(b Unit, o ...UnitOption) Unit { return mags["exa"].makeUnit(b, o...) } func Peta(b Unit, o ...UnitOption) Unit { return mags["peta"].makeUnit(b, o...) } func Tera(b Unit, o ...UnitOption) Unit { return mags["tera"].makeUnit(b, o...) } func Giga(b Unit, o ...UnitOption) Unit { return mags["giga"].makeUnit(b, o...) } func Mega(b Unit, o ...UnitOption) Unit { return mags["mega"].makeUnit(b, o...) } func Kilo(b Unit, o ...UnitOption) Unit { return mags["kilo"].makeUnit(b, o...) } func Hecto(b Unit, o ...UnitOption) Unit { return mags["hecto"].makeUnit(b, o...) } func Deca(b Unit, o ...UnitOption) Unit { return mags["deca"].makeUnit(b, o...) } func Deci(b Unit, o ...UnitOption) Unit { return mags["deci"].makeUnit(b, o...) } func Centi(b Unit, o ...UnitOption) Unit { return mags["centi"].makeUnit(b, o...) } func Milli(b Unit, o ...UnitOption) Unit { return mags["milli"].makeUnit(b, o...) } func Micro(b Unit, o ...UnitOption) Unit { return mags["micro"].makeUnit(b, o...) } func Nano(b Unit, o ...UnitOption) Unit { return mags["nano"].makeUnit(b, o...) } func Pico(b Unit, o ...UnitOption) Unit { return mags["pico"].makeUnit(b, o...) } func Femto(b Unit, o ...UnitOption) Unit { return mags["femto"].makeUnit(b, o...) } func Atto(b Unit, o ...UnitOption) Unit { return mags["atto"].makeUnit(b, o...) } // Create magnitude unit and conversion given a base unit func (mag magnitude) makeUnit(base Unit, addOpts ...UnitOption) Unit { name := mag.Prefix + base.Name symbol := mag.Symbol + base.Symbol // set system to metric by default opts := []UnitOption{SI} // create prefixed aliases if needed for _, alias := range base.aliases { magAlias := mag.Prefix + alias opts = append(opts, UnitOptionAliases(magAlias)) } // append any supplmental options for _, opt := range addOpts { opts = append(opts, opt) } // append quantity name opt opts = append(opts, UnitOptionQuantity(base.Quantity)) u := NewUnit(name, symbol, opts...) // only create conversions to and from base unit ratio := 1.0 * math.Pow(10.0, mag.Power) NewRatioConversion(u, base, ratio) return u }
metric.go
0.687525
0.546254
metric.go
starcoder
package springweb import "math" var ArmResist float64 = 1e-3 var SpringResist float64 = 1e-3 type Arm struct { K, w, InitAngle, PrevAngle, prevAngleUnrest float64 Rotations int } type Spring struct { To *Node K,Distance,prevDistance float64 FromArm, ToArm Arm } type Node struct { X, Y, R, M float64 VelocityX, VelocityY float64 Angle, wAvgSum float64 Springs []Spring } func (arm *Arm) Prepare() { arm.PrevAngle = arm.InitAngle arm.Rotations = 0 } func (s *Spring) Prepare() { s.FromArm.Prepare() s.ToArm.Prepare() } func (node *Node) Prepare() { node.VelocityX = 0 node.VelocityY = 0 node.avgRotationsPrepare() for j, _ := range node.Springs { node.Springs[j].Prepare() } } func distanceXY(xDiff, yDiff float64) float64 { return math.Sqrt(math.Pow(xDiff, 2) + math.Pow(yDiff, 2)) } func distance(a, b *Node) float64 { return distanceXY(a.X-b.X, a.Y-b.Y) } func (node *Node) angle(to *Node) float64 { return math.Atan2(to.Y-node.Y, to.X-node.X) } func NewNode(x, y, r, m float64) Node { return Node{x, y, r, m, 0, 0, 0, 0, nil} } func (node *Node) NewSpring(to *Node, k, a float64) { d := distance(node, to) node.Springs = append(node.Springs, Spring{to, k, d, d, Arm{a, 0, node.angle(to), 0, 0, 0}, Arm{a, 0, to.angle(node), 0, 0, 0}}) } func (node *Node) accelerate(forceX, forceY, duration float64) { w := duration / node.M node.VelocityX += forceX * w node.VelocityY += forceY * w } func (s *Spring) bounce(node *Node, duration float64) { xDiff := s.To.X - node.X yDiff := s.To.Y - node.Y actualDistance := distanceXY(xDiff, yDiff) xDiffN := xDiff / actualDistance yDiffN := yDiff / actualDistance contractF := s.K * (actualDistance - s.Distance) distIncr := actualDistance - s.prevDistance s.prevDistance = actualDistance if distIncr > 0 { contractF += SpringResist } else if distIncr < -0 { contractF -= SpringResist } forceX := xDiffN * contractF forceY := yDiffN * contractF impactDepth := (node.R + s.To.R) - actualDistance if impactDepth > 0 { refDepth := math.Min(node.R, s.To.R) elasticF := s.K * s.Distance * impactDepth / refDepth forceX -= xDiffN * elasticF forceY -= yDiffN * elasticF } node.accelerate(forceX, forceY, duration) s.To.accelerate(-forceX, -forceY, duration) } func (arm *Arm) updateAngle(angle float64) { diff := angle - arm.PrevAngle arm.PrevAngle = angle if diff > math.Pi { arm.Rotations-- } if diff < -math.Pi { arm.Rotations++ } } func (arm *Arm) Angle() float64 { return arm.PrevAngle + float64(arm.Rotations)*math.Pi*2 } func (node *Node) torque(arm *Arm, to *Node, duration float64) { d := distance(node, to) arm.w = arm.K / d Angle := arm.Angle() restAngle := arm.InitAngle + node.Angle angleUnrest := Angle - restAngle unrestIncr := angleUnrest - arm.prevAngleUnrest arm.prevAngleUnrest = angleUnrest if unrestIncr > 0 { angleUnrest += ArmResist*d } else if unrestIncr < -0 { angleUnrest -= ArmResist*d } normalizeAndTorqueF := angleUnrest * arm.w / d forceX := (to.Y - node.Y) * normalizeAndTorqueF forceY := (node.X - to.X) * normalizeAndTorqueF node.accelerate(-forceX, -forceY, duration) to.accelerate(forceX, forceY, duration) } func (s *Spring) torque(node *Node, duration float64) { node.torque(&s.FromArm, s.To, duration) s.To.torque(&s.ToArm, node, duration) } func (node *Node) move(duration float64) { dMove := duration * distanceXY(node.VelocityX, node.VelocityY) rMove := node.R * .6 if dMove > rMove { velocityCap := rMove / dMove node.VelocityX *= velocityCap node.VelocityY *= velocityCap } node.X += node.VelocityX * duration node.Y += node.VelocityY * duration } func (node *Node) avgRotationsPrepare() { node.Angle = 0 node.wAvgSum = 0 } func avgRotations(nodes []Node) { iLast := len(nodes) - 1 for iForward, _ := range nodes { i := iLast - iForward n := &nodes[i] for j, _ := range n.Springs { s := &n.Springs[j] t := s.To s.FromArm.updateAngle(n.angle(t)) s.ToArm.updateAngle(t.angle(n)) n.Angle += (s.FromArm.Angle() - s.FromArm.InitAngle) * s.FromArm.w t.Angle += (s.ToArm.Angle() - s.ToArm.InitAngle) * s.ToArm.w n.wAvgSum += s.FromArm.w t.wAvgSum += s.ToArm.w } n.Angle /= n.wAvgSum } } func StepsPrepare(nodes []Node) { for i, _ := range nodes { nodes[i].Prepare() } } func Step(nodes []Node, duration float64) { iLast := len(nodes) - 1 for iForward, _ := range nodes { i := iLast - iForward n := &nodes[i] for j, _ := range n.Springs { s := &n.Springs[j] s.bounce(n, duration) s.torque(n, duration) } n.move(duration) n.avgRotationsPrepare() } avgRotations(nodes) }
springweb.go
0.721841
0.476092
springweb.go
starcoder
package goutils import( "time"; ) /* int alias for uint byte alias for uint8 rune alias for int32 */ // Compare a,b: // -1 , if a < b // 0 , if a == b // 1 , if a > b type TypeComparator func(a, b interface{}) int // Compare strings func StringComparator(a, b interface{}) int { s1 := a.(string) s2 := b.(string) min := len(s2) if len(s1) < len(s2) { min = len(s1) } diff := 0 for i := 0; i < min && diff == 0; i++ { diff = int(s1[i]) - int(s2[i]) } if diff == 0 { diff = len(s1) - len(s2) } if diff < 0 { return -1 } if diff > 0 { return 1 } return 0 } // Compare int func IntComparator(a, b interface{}) int { aAsserted := a.(int) bAsserted := b.(int) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare int8 func Int8Comparator(a, b interface{}) int { aAsserted := a.(int8) bAsserted := b.(int8) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare int16 func Int16Comparator(a, b interface{}) int { aAsserted := a.(int16) bAsserted := b.(int16) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare int32 func Int32Comparator(a, b interface{}) int { aAsserted := a.(int32) bAsserted := b.(int32) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare int64 func Int64Comparator(a, b interface{}) int { aAsserted := a.(int64) bAsserted := b.(int64) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare uint func UIntComparator(a, b interface{}) int { aAsserted := a.(uint) bAsserted := b.(uint) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare uint8 func UInt8Comparator(a, b interface{}) int { aAsserted := a.(uint8) bAsserted := b.(uint8) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare uint16 func UInt16Comparator(a, b interface{}) int { aAsserted := a.(uint16) bAsserted := b.(uint16) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare uint32 func UInt32Comparator(a, b interface{}) int { aAsserted := a.(uint32) bAsserted := b.(uint32) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare uint64 func UInt64Comparator(a, b interface{}) int { aAsserted := a.(uint64) bAsserted := b.(uint64) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare float32 func Float32Comparator(a, b interface{}) int { aAsserted := a.(float32) bAsserted := b.(float32) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare float64 func Float64Comparator(a, b interface{}) int { aAsserted := a.(float64) bAsserted := b.(float64) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare byte func ByteComparator(a, b interface{}) int { aAsserted := a.(byte) bAsserted := b.(byte) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare rune (char with int32) func RuneComparator(a, b interface{}) int { aAsserted := a.(rune) bAsserted := b.(rune) switch { case aAsserted > bAsserted: return 1 case aAsserted < bAsserted: return -1 default: return 0 } } // Compare Time func TimeComparator(a, b interface{}) int { aAsserted := a.(time.Time) bAsserted := b.(time.Time) switch { case aAsserted.After(bAsserted): return 1 case aAsserted.Before(bAsserted): return -1 default: return 0 } }
goutils_typecomparator_functions.go
0.657318
0.438665
goutils_typecomparator_functions.go
starcoder
//nolint:unparam // golangci-linter package bls12381 import ( "errors" "math" "math/big" ) // PointG2 is type for point in G2. // PointG2 is both used for Affine and Jacobian point representation. // If z is equal to one the point is considered as in affine form. type PointG2 [3]fe2 // Set copies valeus of one point to another. func (p *PointG2) Set(p2 *PointG2) *PointG2 { p[0].set(&p2[0]) p[1].set(&p2[1]) p[2].set(&p2[2]) return p } // Zero returns G2 point in point at infinity representation func (p *PointG2) Zero() *PointG2 { p[0].zero() p[1].one() p[2].zero() return p } type tempG2 struct { t [9]*fe2 } // G2 is struct for G2 group. type G2 struct { f *fp2 tempG2 } // NewG2 constructs a new G2 instance. func NewG2() *G2 { return newG2(nil) } func newG2(f *fp2) *G2 { if f == nil { f = newFp2() } t := newTempG2() return &G2{f, t} } func newTempG2() tempG2 { t := [9]*fe2{} for i := 0; i < 9; i++ { t[i] = &fe2{} } return tempG2{t} } // Q returns group order in big.Int. func (g *G2) Q() *big.Int { return new(big.Int).Set(q) } func (g *G2) fromBytesUnchecked(in []byte) (*PointG2, error) { p0, err := g.f.fromBytes(in[:96]) if err != nil { return nil, err } p1, err := g.f.fromBytes(in[96:]) if err != nil { return nil, err } p2 := new(fe2).one() return &PointG2{*p0, *p1, *p2}, nil } // FromBytes constructs a new point given uncompressed byte input. // FromBytes does not take zcash flags into account. // Byte input expected to be larger than 96 bytes. // First 192 bytes should be concatenation of x and y values // Point (0, 0) is considered as infinity. func (g *G2) FromBytes(in []byte) (*PointG2, error) { if len(in) != 192 { return nil, errors.New("input string should be equal or larger than 192") } p0, err := g.f.fromBytes(in[:96]) if err != nil { return nil, err } p1, err := g.f.fromBytes(in[96:]) if err != nil { return nil, err } // check if given input points to infinity if p0.isZero() && p1.isZero() { return g.Zero(), nil } p2 := new(fe2).one() p := &PointG2{*p0, *p1, *p2} if !g.IsOnCurve(p) { return nil, errors.New("point is not on curve") } return p, nil } // DecodePoint given encoded (x, y) coordinates in 256 bytes returns a valid G1 Point. func (g *G2) DecodePoint(in []byte) (*PointG2, error) { if len(in) != 256 { return nil, errors.New("invalid g2 point length") } pointBytes := make([]byte, 192) x0Bytes, err := decodeFieldElement(in[:64]) if err != nil { return nil, err } x1Bytes, err := decodeFieldElement(in[64:128]) if err != nil { return nil, err } y0Bytes, err := decodeFieldElement(in[128:192]) if err != nil { return nil, err } y1Bytes, err := decodeFieldElement(in[192:]) if err != nil { return nil, err } copy(pointBytes[:48], x1Bytes) copy(pointBytes[48:96], x0Bytes) copy(pointBytes[96:144], y1Bytes) copy(pointBytes[144:192], y0Bytes) return g.FromBytes(pointBytes) } // ToBytes serializes a point into bytes in uncompressed form, // does not take zcash flags into account, // returns (0, 0) if point is infinity. func (g *G2) ToBytes(p *PointG2) []byte { out := make([]byte, 192) if g.IsZero(p) { return out } g.Affine(p) copy(out[:96], g.f.toBytes(&p[0])) copy(out[96:], g.f.toBytes(&p[1])) return out } // EncodePoint encodes a point into 256 bytes. func (g *G2) EncodePoint(p *PointG2) []byte { // outRaw is 96 bytes outRaw := g.ToBytes(p) out := make([]byte, 256) // encode x copy(out[16:16+48], outRaw[48:96]) copy(out[80:80+48], outRaw[:48]) // encode y copy(out[144:144+48], outRaw[144:]) copy(out[208:208+48], outRaw[96:144]) return out } // New creates a new G2 Point which is equal to zero in other words point at infinity. func (g *G2) New() *PointG2 { return new(PointG2).Zero() } // Zero returns a new G2 Point which is equal to point at infinity. func (g *G2) Zero() *PointG2 { return new(PointG2).Zero() } // One returns a new G2 Point which is equal to generator point. func (g *G2) One() *PointG2 { p := &PointG2{} return p.Set(&g2One) } // IsZero returns true if given point is equal to zero. func (g *G2) IsZero(p *PointG2) bool { return p[2].isZero() } // Equal checks if given two G2 point is equal in their affine form. func (g *G2) Equal(p1, p2 *PointG2) bool { if g.IsZero(p1) { return g.IsZero(p2) } if g.IsZero(p2) { return g.IsZero(p1) } t := g.t g.f.square(t[0], &p1[2]) g.f.square(t[1], &p2[2]) g.f.mul(t[2], t[0], &p2[0]) g.f.mul(t[3], t[1], &p1[0]) g.f.mul(t[0], t[0], &p1[2]) g.f.mul(t[1], t[1], &p2[2]) g.f.mul(t[1], t[1], &p1[1]) g.f.mul(t[0], t[0], &p2[1]) return t[0].equal(t[1]) && t[2].equal(t[3]) } // InCorrectSubgroup checks whether given point is in correct subgroup. func (g *G2) InCorrectSubgroup(p *PointG2) bool { tmp := &PointG2{} g.MulScalar(tmp, p, q) return g.IsZero(tmp) } // IsOnCurve checks a G2 point is on curve. func (g *G2) IsOnCurve(p *PointG2) bool { if g.IsZero(p) { return true } t := g.t g.f.square(t[0], &p[1]) g.f.square(t[1], &p[0]) g.f.mul(t[1], t[1], &p[0]) g.f.square(t[2], &p[2]) g.f.square(t[3], t[2]) g.f.mul(t[2], t[2], t[3]) g.f.mul(t[2], b2, t[2]) g.f.add(t[1], t[1], t[2]) return t[0].equal(t[1]) } // IsAffine checks a G2 point whether it is in affine form. func (g *G2) IsAffine(p *PointG2) bool { return p[2].isOne() } // Affine calculates affine form of given G2 point. func (g *G2) Affine(p *PointG2) *PointG2 { if g.IsZero(p) { return p } if !g.IsAffine(p) { t := g.t g.f.inverse(t[0], &p[2]) g.f.square(t[1], t[0]) g.f.mul(&p[0], &p[0], t[1]) g.f.mul(t[0], t[0], t[1]) g.f.mul(&p[1], &p[1], t[0]) p[2].one() } return p } // Add adds two G2 points p1, p2 and assigns the result to point at first argument. func (g *G2) Add(r, p1, p2 *PointG2) *PointG2 { // http://www.hyperelliptic.org/EFD/gp/auto-shortw-jacobian-0.html#addition-add-2007-bl if g.IsZero(p1) { return r.Set(p2) } if g.IsZero(p2) { return r.Set(p1) } t := g.t g.f.square(t[7], &p1[2]) g.f.mul(t[1], &p2[0], t[7]) g.f.mul(t[2], &p1[2], t[7]) g.f.mul(t[0], &p2[1], t[2]) g.f.square(t[8], &p2[2]) g.f.mul(t[3], &p1[0], t[8]) g.f.mul(t[4], &p2[2], t[8]) g.f.mul(t[2], &p1[1], t[4]) if t[1].equal(t[3]) { if t[0].equal(t[2]) { return g.Double(r, p1) } else { return r.Zero() } } g.f.sub(t[1], t[1], t[3]) g.f.double(t[4], t[1]) g.f.square(t[4], t[4]) g.f.mul(t[5], t[1], t[4]) g.f.sub(t[0], t[0], t[2]) g.f.double(t[0], t[0]) g.f.square(t[6], t[0]) g.f.sub(t[6], t[6], t[5]) g.f.mul(t[3], t[3], t[4]) g.f.double(t[4], t[3]) g.f.sub(&r[0], t[6], t[4]) g.f.sub(t[4], t[3], &r[0]) g.f.mul(t[6], t[2], t[5]) g.f.double(t[6], t[6]) g.f.mul(t[0], t[0], t[4]) g.f.sub(&r[1], t[0], t[6]) g.f.add(t[0], &p1[2], &p2[2]) g.f.square(t[0], t[0]) g.f.sub(t[0], t[0], t[7]) g.f.sub(t[0], t[0], t[8]) g.f.mul(&r[2], t[0], t[1]) return r } // Double doubles a G2 point p and assigns the result to the point at first argument. func (g *G2) Double(r, p *PointG2) *PointG2 { // http://www.hyperelliptic.org/EFD/gp/auto-shortw-jacobian-0.html#doubling-dbl-2009-l if g.IsZero(p) { return r.Set(p) } t := g.t g.f.square(t[0], &p[0]) g.f.square(t[1], &p[1]) g.f.square(t[2], t[1]) g.f.add(t[1], &p[0], t[1]) g.f.square(t[1], t[1]) g.f.sub(t[1], t[1], t[0]) g.f.sub(t[1], t[1], t[2]) g.f.double(t[1], t[1]) g.f.double(t[3], t[0]) g.f.add(t[0], t[3], t[0]) g.f.square(t[4], t[0]) g.f.double(t[3], t[1]) g.f.sub(&r[0], t[4], t[3]) g.f.sub(t[1], t[1], &r[0]) g.f.double(t[2], t[2]) g.f.double(t[2], t[2]) g.f.double(t[2], t[2]) g.f.mul(t[0], t[0], t[1]) g.f.sub(t[1], t[0], t[2]) g.f.mul(t[0], &p[1], &p[2]) r[1].set(t[1]) g.f.double(&r[2], t[0]) return r } // Neg negates a G2 point p and assigns the result to the point at first argument. func (g *G2) Neg(r, p *PointG2) *PointG2 { r[0].set(&p[0]) g.f.neg(&r[1], &p[1]) r[2].set(&p[2]) return r } // Sub subtracts two G2 points p1, p2 and assigns the result to point at first argument. func (g *G2) Sub(c, a, b *PointG2) *PointG2 { d := &PointG2{} g.Neg(d, b) g.Add(c, a, d) return c } // MulScalar multiplies a point by given scalar value in big.Int and assigns the result to point at first argument. func (g *G2) MulScalar(c, p *PointG2, e *big.Int) *PointG2 { q, n := &PointG2{}, &PointG2{} n.Set(p) l := e.BitLen() for i := 0; i < l; i++ { if e.Bit(i) == 1 { g.Add(q, q, n) } g.Double(n, n) } return c.Set(q) } // ClearCofactor maps given a G2 point to correct subgroup func (g *G2) ClearCofactor(p *PointG2) { g.MulScalar(p, p, cofactorEFFG2) } // MultiExp calculates multi exponentiation. Given pairs of G2 point and scalar values // (P_0, e_0), (P_1, e_1), ... (P_n, e_n) calculates r = e_0 * P_0 + e_1 * P_1 + ... + e_n * P_n // Length of points and scalars are expected to be equal, otherwise an error is returned. // Result is assigned to point at first argument. func (g *G2) MultiExp(r *PointG2, points []*PointG2, powers []*big.Int) (*PointG2, error) { if len(points) != len(powers) { return nil, errors.New("point and scalar vectors should be in same length") } var c uint32 = 3 if len(powers) >= 32 { c = uint32(math.Ceil(math.Log10(float64(len(powers))))) } bucketSize, numBits := (1<<c)-1, uint32(g.Q().BitLen()) windows := make([]*PointG2, numBits/c+1) bucket := make([]*PointG2, bucketSize) acc, sum := g.New(), g.New() for i := 0; i < bucketSize; i++ { bucket[i] = g.New() } mask := (uint64(1) << c) - 1 j := 0 var cur uint32 for cur <= numBits { acc.Zero() bucket = make([]*PointG2, (1<<c)-1) for i := 0; i < len(bucket); i++ { bucket[i] = g.New() } for i := 0; i < len(powers); i++ { s0 := powers[i].Uint64() index := uint(s0 & mask) if index != 0 { g.Add(bucket[index-1], bucket[index-1], points[i]) } powers[i] = new(big.Int).Rsh(powers[i], uint(c)) } sum.Zero() for i := len(bucket) - 1; i >= 0; i-- { g.Add(sum, sum, bucket[i]) g.Add(acc, acc, sum) } windows[j] = g.New() windows[j].Set(acc) j++ cur += c } acc.Zero() for i := len(windows) - 1; i >= 0; i-- { for j := uint32(0); j < c; j++ { g.Double(acc, acc) } g.Add(acc, acc, windows[i]) } return r.Set(acc), nil } // MapToCurve given a byte slice returns a valid G2 point. // This mapping function implements the Simplified Shallue-van de Woestijne-Ulas method. // https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-05#section-6.6.2 // Input byte slice should be a valid field element, otherwise an error is returned. func (g *G2) MapToCurve(in []byte) (*PointG2, error) { fp2 := g.f u, err := fp2.fromBytes(in) if err != nil { return nil, err } x, y := swuMapG2(fp2, u) isogenyMapG2(fp2, x, y) z := new(fe2).one() q := &PointG2{*x, *y, *z} g.ClearCofactor(q) return g.Affine(q), nil }
plugin/dapp/evm/executor/vm/common/crypto/bls12381/g2.go
0.562657
0.436502
g2.go
starcoder
package spec import ( "strings" "sync/atomic" "testing" "time" "github.com/256dpi/gomqtt/client" "github.com/256dpi/gomqtt/packet" "github.com/256dpi/gomqtt/transport" "github.com/256dpi/gomqtt/transport/flow" "github.com/stretchr/testify/assert" ) // PublishSubscribeTest tests the broker for basic pub sub support. func PublishSubscribeTest(t *testing.T, config *Config, pub, sub string, subQOS, pubQOS, recQOS packet.QOS) { c := client.New() wait := make(chan struct{}) c.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, pub, msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, recQOS, msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := c.Subscribe(sub, subQOS) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{subQOS}, sf.ReturnCodes()) pf, err := c.Publish(pub, testPayload, pubQOS, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) safeReceive(wait) time.Sleep(config.NoMessageWait) err = c.Disconnect() assert.NoError(t, err) } // UnsubscribeTest tests the broker for unsubscribe support. func UnsubscribeTest(t *testing.T, config *Config, topic string, qos packet.QOS) { c := client.New() wait := make(chan struct{}) c.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic+"/2", msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, qos, msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := c.Subscribe(topic+"/1", qos) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{qos}, sf.ReturnCodes()) sf, err = c.Subscribe(topic+"/2", qos) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{qos}, sf.ReturnCodes()) uf, err := c.Unsubscribe(topic + "/1") assert.NoError(t, err) assert.NoError(t, uf.Wait(10*time.Second)) pf, err := c.Publish(topic+"/1", testPayload, qos, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) pf, err = c.Publish(topic+"/2", testPayload, qos, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) safeReceive(wait) time.Sleep(config.NoMessageWait) err = c.Disconnect() assert.NoError(t, err) } // UnsubscribeNotExistingSubscriptionTest tests the broker for allowing // unsubscribing not existing topics. func UnsubscribeNotExistingSubscriptionTest(t *testing.T, config *Config, topic string) { c := client.New() c.Callback = func(msg *packet.Message, err error) error { assert.Fail(t, "should not be called") return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) uf, err := c.Unsubscribe(topic) assert.NoError(t, err) assert.NoError(t, uf.Wait(10*time.Second)) time.Sleep(config.NoMessageWait) err = c.Disconnect() assert.NoError(t, err) } // UnsubscribeOverlappingSubscriptions tests the broker for properly unsubscribing // overlapping topics. func UnsubscribeOverlappingSubscriptions(t *testing.T, config *Config, topic string) { c := client.New() wait := make(chan struct{}) c.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic+"/foo", msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, packet.QOS(0), msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := c.Subscribe(topic+"/#", 0) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{0}, sf.ReturnCodes()) sf, err = c.Subscribe(topic+"/+", 0) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{0}, sf.ReturnCodes()) uf, err := c.Unsubscribe(topic + "/#") assert.NoError(t, err) assert.NoError(t, uf.Wait(10*time.Second)) pf, err := c.Publish(topic+"/foo", testPayload, 0, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) safeReceive(wait) time.Sleep(config.NoMessageWait) err = c.Disconnect() assert.NoError(t, err) } // SubscriptionUpgradeTest tests the broker for properly upgrading subscriptions, func SubscriptionUpgradeTest(t *testing.T, config *Config, topic string, from, to packet.QOS) { c := client.New() wait := make(chan struct{}) c.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic, msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, to, msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := c.Subscribe(topic, from) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{from}, sf.ReturnCodes()) sf, err = c.Subscribe(topic, to) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{to}, sf.ReturnCodes()) pf, err := c.Publish(topic, testPayload, to, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) safeReceive(wait) err = c.Disconnect() assert.NoError(t, err) } // OverlappingSubscriptionsTest tests the broker for properly handling overlapping // subscriptions. func OverlappingSubscriptionsTest(t *testing.T, config *Config, pub, sub string) { c := client.New() wait := make(chan struct{}) c.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, pub, msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, packet.QOS(0), msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := c.Subscribe(sub, 0) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{0}, sf.ReturnCodes()) sf, err = c.Subscribe(pub, 0) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{0}, sf.ReturnCodes()) pf, err := c.Publish(pub, testPayload, 0, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) safeReceive(wait) time.Sleep(config.NoMessageWait) err = c.Disconnect() assert.NoError(t, err) } // MultipleSubscriptionTest tests the broker for properly handling multiple // subscriptions. func MultipleSubscriptionTest(t *testing.T, config *Config, topic string) { c := client.New() wait := make(chan struct{}) c.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic+"/3", msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, packet.QOS(2), msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) subs := []packet.Subscription{ {Topic: topic + "/1", QOS: 0}, {Topic: topic + "/2", QOS: 1}, {Topic: topic + "/3", QOS: 2}, } sf, err := c.SubscribeMultiple(subs) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{0, 1, 2}, sf.ReturnCodes()) pf, err := c.Publish(topic+"/3", testPayload, 2, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) safeReceive(wait) time.Sleep(config.NoMessageWait) err = c.Disconnect() assert.NoError(t, err) } // DuplicateSubscriptionTest tests the broker for properly handling duplicate // subscriptions. func DuplicateSubscriptionTest(t *testing.T, config *Config, topic string) { c := client.New() wait := make(chan struct{}) c.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic, msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, packet.QOS(1), msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) subs := []packet.Subscription{ {Topic: topic, QOS: 0}, {Topic: topic, QOS: 1}, } sf, err := c.SubscribeMultiple(subs) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{0, 1}, sf.ReturnCodes()) pf, err := c.Publish(topic, testPayload, 1, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) safeReceive(wait) time.Sleep(config.NoMessageWait) err = c.Disconnect() assert.NoError(t, err) } // IsolatedSubscriptionTest tests the broker for properly isolating subscriptions. func IsolatedSubscriptionTest(t *testing.T, config *Config, topic string) { c := client.New() wait := make(chan struct{}) c.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic+"/foo", msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, packet.QOS(0), msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err := c.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := c.Subscribe(topic+"/foo", 0) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{0}, sf.ReturnCodes()) pf, err := c.Publish(topic, testPayload, 0, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) pf, err = c.Publish(topic+"/bar", testPayload, 0, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) pf, err = c.Publish(topic+"/baz", testPayload, 0, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) pf, err = c.Publish(topic+"/foo", testPayload, 0, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) safeReceive(wait) time.Sleep(config.NoMessageWait) err = c.Disconnect() assert.NoError(t, err) } // WillTest tests the broker for supporting will messages. func WillTest(t *testing.T, config *Config, topic string, sub, pub packet.QOS) { clientWithWill := client.New() opts := client.NewConfig(config.URL) opts.WillMessage = &packet.Message{ Topic: topic, Payload: testPayload, QOS: pub, } cf, err := clientWithWill.Connect(opts) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) clientReceivingWill := client.New() wait := make(chan struct{}) clientReceivingWill.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic, msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, sub, msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err = clientReceivingWill.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := clientReceivingWill.Subscribe(topic, sub) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{sub}, sf.ReturnCodes()) err = clientWithWill.Close() assert.NoError(t, err) safeReceive(wait) time.Sleep(config.NoMessageWait) err = clientReceivingWill.Disconnect() assert.NoError(t, err) } // CleanWillTest tests the broker for properly handling will messages on a clean // disconnect. func CleanWillTest(t *testing.T, config *Config, topic string) { clientWithWill := client.New() opts := client.NewConfig(config.URL) opts.WillMessage = &packet.Message{ Topic: topic, Payload: testPayload, QOS: 0, } cf, err := clientWithWill.Connect(opts) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) nonReceiver := client.New() nonReceiver.Callback = func(msg *packet.Message, err error) error { assert.Fail(t, "should not be called") return nil } cf, err = nonReceiver.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := nonReceiver.Subscribe(topic, 0) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{0}, sf.ReturnCodes()) err = clientWithWill.Disconnect() assert.NoError(t, err) time.Sleep(config.NoMessageWait) err = nonReceiver.Disconnect() assert.NoError(t, err) } // KeepAliveTest tests the broker for proper keep alive support. func KeepAliveTest(t *testing.T, config *Config) { opts := client.NewConfig(config.URL) opts.KeepAlive = "2s" // mosquitto fails with a keep alive of 1s c := client.New() var reqCounter int32 var respCounter int32 c.Logger = func(message string) { if strings.Contains(message, "Pingreq") { atomic.AddInt32(&reqCounter, 1) } else if strings.Contains(message, "Pingresp") { atomic.AddInt32(&respCounter, 1) } } cf, err := c.Connect(opts) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) time.Sleep(4500 * time.Millisecond) err = c.Disconnect() assert.NoError(t, err) assert.Equal(t, int32(2), atomic.LoadInt32(&reqCounter)) assert.Equal(t, int32(2), atomic.LoadInt32(&respCounter)) } // KeepAliveTimeoutTest tests the broker for proper keep alive timeout detection // support. func KeepAliveTimeoutTest(t *testing.T, config *Config) { username, password := config.usernamePassword() connect := packet.NewConnect() connect.KeepAlive = 1 connect.Username = username connect.Password = password connack := packet.NewConnack() c := flow.New(). Send(connect). Receive(connack). End() conn, err := transport.Dial(config.URL) assert.NoError(t, err) assert.NotNil(t, conn) err = c.Test(conn) assert.NoError(t, err) } // UnexpectedPubrelTest tests the broker for proper handling of unexpected pubrel // packets. func UnexpectedPubrelTest(t *testing.T, config *Config) { username, password := config.usernamePassword() connect := packet.NewConnect() connect.Username = username connect.Password = password connack := packet.NewConnack() pubrel := packet.NewPubrel() pubrel.ID = 42 pubcomp := packet.NewPubcomp() pubcomp.ID = 42 c := flow.New(). Send(connect). Receive(connack). Send(pubrel). Receive(pubcomp). Send(&packet.Disconnect{}). End() conn, err := transport.Dial(config.URL) assert.NoError(t, err) assert.NotNil(t, conn) err = c.Test(conn) assert.NoError(t, err) }
spec/base.go
0.543833
0.551091
base.go
starcoder
package continuous import ( "github.com/jtejido/stats" "github.com/jtejido/stats/err" "math" ) // Benktander type I distribution (Benktander-Gibrat Distribution) // https://en.wikipedia.org/wiki/Benktander_type_I_distribution type BenktanderType1 struct { a, b float64 } func NewBenktanderType1(a, b float64) (*BenktanderType1, error) { if a <= 0 || b <= 0 { return nil, err.Invalid() } return &BenktanderType1{a, b}, nil } func (bfk *BenktanderType1) String() string { return "BenktanderType1: Parameters - " + bfk.Parameters().String() + ", Support(x) - " + bfk.Support().String() } // a ∈ (0,∞) // b ∈ (0,∞) func (bfk *BenktanderType1) Parameters() stats.Limits { return stats.Limits{ "a": stats.Interval{0, math.Inf(1), true, true}, "b": stats.Interval{0, math.Inf(1), true, true}, } } // x ∈ [1,∞) func (bfk *BenktanderType1) Support() stats.Interval { return stats.Interval{1, math.Inf(1), false, true} } func (bfk *BenktanderType1) Probability(x float64) float64 { if bfk.Support().IsWithinInterval(x) { a := 1 + ((2. * bfk.b * math.Log(x)) / bfk.a) b := 1 + bfk.a + 2.*bfk.b*math.Log(x) return ((a * b) - ((2. * bfk.b) / bfk.a)) * math.Pow(x, -(2.+bfk.a+bfk.b*math.Log(x))) } return 0 } func (bfk *BenktanderType1) Distribution(x float64) float64 { if bfk.Support().IsWithinInterval(x) { return 1. - (1.+((2.*bfk.b)/bfk.a)*math.Log(x))*math.Pow(x, -(bfk.a+1.+bfk.b*math.Log(x))) } return 0 } func (bfk *BenktanderType1) Mean() float64 { return 1. + (1 / bfk.a) } func (bfk *BenktanderType1) Variance() float64 { m1 := bfk.rm(1) m2 := bfk.rm(2) return -(m1 * m1) + m2 } func (bfk *BenktanderType1) Skewness() float64 { m1 := bfk.rm(1) m2 := bfk.rm(2) m3 := bfk.rm(3) return (m1*(2*(m1*m1)-3*m2) + m3) / math.Pow(m2-(m1*m1), 3./2) } func (bfk *BenktanderType1) ExKurtosis() float64 { m1 := bfk.rm(1) m2 := bfk.rm(2) m3 := bfk.rm(3) m4 := bfk.rm(4) return (-3*(m1*m1*m1*m1) + 6*(m1*m1)*m2 - 4*m1*m3 + m4 - 3*((m2-(m1*m1))*(m2-(m1*m1)))) / ((m2 - (m1 * m1)) * (m2 - (m1 * m1))) } func (bfk *BenktanderType1) rm(r float64) float64 { sqrtb := math.Sqrt(bfk.b) sqrtpi := math.Sqrt(math.Pi) n1 := math.Pow(1+bfk.a-r, 2) d1 := 4 * bfk.b en1d1 := math.Exp(n1 / d1) n2 := 1 + bfk.a - r d2 := 2 * sqrtb erfcn2d2 := math.Erfc(n2 / d2) num := 2*bfk.a*sqrtb + 2*sqrtb*r - en1d1*sqrtpi*r*erfcn2d2 + en1d1*sqrtpi*(r*r)*erfcn2d2 denom := 2 * bfk.a * sqrtb return num / denom }
dist/continuous/benktander_type_1.go
0.713432
0.452475
benktander_type_1.go
starcoder
package algorithm import ( "reflect" ) // Getter can get a value at 'i'th position. type Getter interface { Get(i int) interface{} } // Lesser compares two elements. type Lesser interface { Less(i, j int) bool } // Lenner has a length type Lenner interface { Len() int } // Swapper can swap type Swapper interface { Swap(i, j int) } // LenSwapper is both a Lenner and a Swapper type LenSwapper interface { Lenner Swapper } // LessSwapper is both a Lesser and a Swapper type LessSwapper interface { Lesser Swapper } // GetLenner is both a Getter and a Lenner type GetLenner interface { Getter Lenner } // GetSwapper is both a Getter and a Swapper type GetSwapper interface { Swapper Getter } // GetLenSwapper is a Getter, a Lenner, and a Swapper type GetLenSwapper interface { Getter Lenner Swapper } // LenLessSwapper is both a Lenner, a Lesser and a Swapper type LenLessSwapper interface { Lenner Lesser Swapper } func transformPred(pred func(v interface{}) bool, g Getter) func(i int) bool { return func(i int) bool { return pred(g.Get(i)) } } // Reverse reverse a given container. func Reverse(ls LenSwapper) { ReverseRange(ls, 0, ls.Len()) } // ReverseRange reverse a given container within a range [begin, end). func ReverseRange(s Swapper, begin, end int) { reverseImpl(s.Swap, begin, end) } // ReverseSlice is a slice version of Reverse. func ReverseSlice(slice interface{}) { rv := reflect.ValueOf(slice) swap := reflect.Swapper(slice) length := rv.Len() reverseImpl(swap, 0, length) } func reverseImpl(swap func(i, j int), begin, end int) { mid := (begin + end) / 2 for i := 0; i < mid-begin; i++ { swap(begin+i, end-1-i) } } // Rotate rotate elements in such a way that the value at 'middle' becomes the first element. func Rotate(ls LenSwapper, middle int) int { return RotateRange(ls, 0, middle, ls.Len()) } // RotateRange rotates elements in a given container within a range [begin, end) // It returns a index to the value previously at 'begin'. func RotateRange(s Swapper, begin, middle, end int) int { return rotateImpl(s.Swap, begin, middle, end) } // RotateSlice is a Rotate function with a slice. func RotateSlice(slice interface{}, middle int) int { return rotateImpl(reflect.Swapper(slice), 0, middle, reflect.ValueOf(slice).Len()) } func rotateImpl(swap func(i, j int), begin, middle, end int) int { if begin > middle || middle > end { return begin } reverseImpl(swap, begin, middle) reverseImpl(swap, middle, end) reverseImpl(swap, begin, end) return end - middle + begin } // StablePartition partitions in two groups. func StablePartition(gls GetLenSwapper, pred func(v interface{}) bool) int { return StablePartitionRange(gls, 0, gls.Len(), pred) } // StablePartitionRange partitions in two groups. func StablePartitionRange(gs GetSwapper, begin, end int, pred func(v interface{}) bool) int { return stablePartitionSliceImpl(gs.Swap, begin, end, transformPred(pred, gs)) } // StablePartitionSlice is a Rotate function with a slice. func StablePartitionSlice(slice interface{}, pred func(i int) bool) int { return stablePartitionSliceImpl(reflect.Swapper(slice), 0, reflect.ValueOf(slice).Len(), pred) } func stablePartitionSliceImpl(swap func(i, j int), begin, end int, pred func(i int) bool) int { if len := end - begin; len == 0 { return begin } else if len == 1 { if pred(begin) { return begin + 1 } return begin } else { middle := (begin + end) / 2 return rotateImpl(swap, stablePartitionSliceImpl(swap, begin, middle, pred), middle, stablePartitionSliceImpl(swap, middle, end, pred)) } } // AllOf returns true only if all elements meet a given condition. func AllOf(gl GetLenner, pred func(v interface{}) bool) bool { return AllOfRange(gl, 0, gl.Len(), pred) } // AllOfRange returns true only if all elements meet a given condition. func AllOfRange(g Getter, begin, end int, pred func(v interface{}) bool) bool { return allOfImpl(begin, end, transformPred(pred, g)) } // AllOfSlice returns true only if all elements in a given slice meet a given condition. func AllOfSlice(slice interface{}, pred func(i int) bool) bool { return allOfImpl(0, reflect.ValueOf(slice).Len(), pred) } func allOfImpl(begin, end int, pred func(i int) bool) bool { for i := begin; i < end; i++ { if !pred(i) { return false } } return true } // NoneOf returns true only if no element meets a given condition func NoneOf(gl GetLenner, pred func(v interface{}) bool) bool { return NoneOfRange(gl, 0, gl.Len(), pred) } // NoneOfRange returns true only if no element meets a given condition func NoneOfRange(g Getter, begin, end int, pred func(v interface{}) bool) bool { return noneOfImpl(begin, end, transformPred(pred, g)) } // NoneOfSlice returns true only if no element in a given slice meets a given condition. func NoneOfSlice(slice interface{}, pred func(i int) bool) bool { return noneOfImpl(0, reflect.ValueOf(slice).Len(), pred) } func noneOfImpl(begin, end int, pred func(i int) bool) bool { for i := begin; i < end; i++ { if pred(i) { return false } } return true } // AnyOf returns true if any element meet a given condition func AnyOf(gl GetLenner, pred func(v interface{}) bool) bool { return AnyOfRange(gl, 0, gl.Len(), pred) } // AnyOfRange returns true if any element meet a given condition func AnyOfRange(g Getter, begin, end int, pred func(v interface{}) bool) bool { return anyOfImpl(begin, end, transformPred(pred, g)) } // AnyOfSlice returns true if any element in a given slice meets a given condition. func AnyOfSlice(slice interface{}, pred func(i int) bool) bool { return anyOfImpl(0, reflect.ValueOf(slice).Len(), pred) } func anyOfImpl(begin, end int, pred func(i int) bool) bool { for i := begin; i < end; i++ { if pred(i) { return true } } return false } // NthElement rearranges a slice in such a way that the element at nth(k) position is // the element that would occur in that position if slice is sorted. // All of the other elements before nth position is less than or equal to the new nth element. func NthElement(lls LenLessSwapper, k int) { NthElementRange(lls, 0, lls.Len(), k) } // NthElementRange rearranges a range [begin, end) in such a way that the element at // nth(k) position is the element that would occur in that position if a range is sorted. // All of the other elements in a range before nth position is less than or equal to the new nth element. func NthElementRange(ls LessSwapper, begin, end, k int) { nthElementSliceImpl(ls.Swap, ls.Less, begin, end, k) } // NthElementSlice rearranges a slice in such a way that the element at nth(k) position is // the element that would occur in that position if slice is sorted. // All of the other elements before nth position is less than or equal to the new nth element. func NthElementSlice(slice interface{}, less func(i, j int) bool, k int) { nthElementSliceImpl(reflect.Swapper(slice), less, 0, reflect.ValueOf(slice).Len(), k) } func nthElementSliceImpl(swap func(i, j int), less func(i, j int) bool, begin, end, k int) { if begin+1 >= end { return } pidx := begin pidx = partitionSliceImpl(swap, less, begin, end, pidx) if k == pidx { return } else if k < pidx { nthElementSliceImpl(swap, less, begin, pidx, k) return } else { nthElementSliceImpl(swap, less, pidx+1, end, k) return } } func partitionSliceImpl(swap func(i, j int), less func(i, j int) bool, begin, end, pidx int) int { swap(end-1, pidx) pidx = end - 1 sidx := begin for i := begin; i < end-1; i++ { if less(i, pidx) { swap(sidx, i) sidx++ } } swap(end-1, sidx) return sidx }
algorithm/algorithm.go
0.78374
0.445891
algorithm.go
starcoder
package data import "fmt" // FrameInputConverter is a type to support building a Frame while also // doing conversion as data is added to the Frame. type FrameInputConverter struct { Frame *Frame fieldConverters []FieldConverter } // A FieldConverter is a type to support building Frame fields of a different // type than one's input data. type FieldConverter struct { // OutputFieldType is the type of Field that will be created. OutputFieldType FieldType // Converter is a conversion function that is called when setting Field values with a FrameInputConverter. // Care must be taken that the type returned by the conversion function matches the member type of the FieldType, // and that the input type matches the expected input type for the Converter function, or panics can occur. // If the Converter is nil, no conversion is performed when calling methods to set values. Converter Converter } // Converter is a function type for converting values in a Frame. It is the consumers responsibility // to the check the underlying interface types of the input and return types to avoid panics. type Converter func(v interface{}) (interface{}, error) // NewFrameInputConverter returns a FrameInputConverter which is used to create a Frame from data // that needs value conversions. The FrameInputConverter will create a new Frame with fields // based on the FieldConverters' OutputFieldTypes of length rowLen. func NewFrameInputConverter(fieldConvs []FieldConverter, rowLen int) (*FrameInputConverter, error) { fTypes := make([]FieldType, len(fieldConvs)) for i, fc := range fieldConvs { fTypes[i] = fc.OutputFieldType } f := NewFrameOfFieldTypes("", rowLen, fTypes...) return &FrameInputConverter{ Frame: f, fieldConverters: fieldConvs, }, nil } // Set sets val a FieldIdx and rowIdx of the frame. If the corresponding FieldConverter's // Converter is not nil, then the Converter function is called before setting the value (otherwise Frame.Set is called directly). // If an error is returned from the Converter function this function returns that error. // Like Frame.Set and Field.Set, it will panic if fieldIdx or rowIdx are out of range. func (fcb *FrameInputConverter) Set(fieldIdx, rowIdx int, val interface{}) error { if fcb.fieldConverters[fieldIdx].Converter == nil { fcb.Frame.Set(fieldIdx, rowIdx, val) return nil } convertedVal, err := fcb.fieldConverters[fieldIdx].Converter(val) if err != nil { return err } fcb.Frame.Set(fieldIdx, rowIdx, convertedVal) return nil } var asStringConverter Converter = func(v interface{}) (interface{}, error) { return fmt.Sprintf("%v", v), nil } // AsStringFieldConverter will always return a string a regardless of the input. // This is done with fmt.Sprintf which uses reflection. var AsStringFieldConverter = FieldConverter{ OutputFieldType: FieldTypeString, Converter: asStringConverter, }
vendor/github.com/grafana/grafana-plugin-sdk-go/data/conversion_input.go
0.708515
0.598987
conversion_input.go
starcoder
package common import ( "image/color" "engo.io/engo" "engo.io/gl" ) type TriangleType uint8 const ( // Indicates a Triangle where two sides have equal length TriangleIsosceles TriangleType = iota // Indicates a Triangles where one angle is at 90 degrees TriangleRight ) // Triangle is a basic triangular form; the "point" of the triangle is pointing to the top type Triangle struct { TriangleType TriangleType BorderWidth float32 BorderColor color.Color } func (Triangle) Texture() *gl.Texture { return nil } func (Triangle) Width() float32 { return 0 } func (Triangle) Height() float32 { return 0 } func (Triangle) View() (float32, float32, float32, float32) { return 0, 0, 1, 1 } func (Triangle) Close() {} // Rectangle is a basic rectangular form; the dimensions are controlled via the `SpaceComponent`. type Rectangle struct { BorderWidth float32 BorderColor color.Color } func (Rectangle) Texture() *gl.Texture { return nil } func (Rectangle) Width() float32 { return 0 } func (Rectangle) Height() float32 { return 0 } func (Rectangle) View() (float32, float32, float32, float32) { return 0, 0, 1, 1 } func (Rectangle) Close() {} // Circle is a basic circular form; the dimensions / radius are controlled via the `SpaceComponent`. // This was made possible by the shared knowledge of <NAME> (@hydroflame). type Circle struct { BorderWidth float32 BorderColor color.Color } func (Circle) Texture() *gl.Texture { return nil } func (Circle) Width() float32 { return 0 } func (Circle) Height() float32 { return 0 } func (Circle) View() (float32, float32, float32, float32) { return 0, 0, 1, 1 } func (Circle) Close() {} // ComplexTriangles is a complex form, made out of triangles. type ComplexTriangles struct { // Points are the points the form is made of. They should be defined on a scale from 0 to 1, where (0, 0) starts // at the top-left of the area (as defined by the `SpaceComponent`. // You should use a multitude of 3 points, because each triangle is defined by defining 3 points. Points []engo.Point // BorderWidth indicates the width of the border, around EACH of the Triangles it is made out of BorderWidth float32 // BorderColor indicates the color of the border, around EACH of the Triangles it is made out of BorderColor color.Color } func (ComplexTriangles) Texture() *gl.Texture { return nil } func (ComplexTriangles) Width() float32 { return 0 } func (ComplexTriangles) Height() float32 { return 0 } func (ComplexTriangles) View() (float32, float32, float32, float32) { return 0, 0, 1, 1 } func (ComplexTriangles) Close() {}
common/render_shapes.go
0.837421
0.660515
render_shapes.go
starcoder
package container import "github.com/nwillc/genfuncs" var _ Queue[int] = (*Deque[int])(nil) // Deque is a doubly ended implementation of Queue with default behavior of a Fifo but provides left and right access. // Employs a List for storage. type Deque[T any] struct { list *List[T] } // NewDeque creates a Deque containing any provided elements. func NewDeque[T any](t ...T) (degue *Deque[T]) { degue = &Deque[T]{list: NewList[T]()} degue.AddAll(t...) return degue } // Add an element to the right of the Deque. func (d *Deque[T]) Add(t T) { d.list.Add(t) } // AddAll elements to the right of the Deque. func (d *Deque[T]) AddAll(t ...T) { d.list.AddAll(t...) } // AddLeft an element to the left of the Deque. func (d *Deque[T]) AddLeft(t T) { d.list.AddLeft(t) } // AddRight an element to the right of the Deque. func (d *Deque[T]) AddRight(t T) { d.list.AddRight(t) } // Len reports the length of the Deque. func (d *Deque[T]) Len() (length int) { length = d.list.Len() return length } // Peek returns the left most element in the Deque without removing it. func (d *Deque[T]) Peek() (value T) { value = d.PeekLeft() return value } // PeekLeft returns the left most element in the Deque without removing it. func (d *Deque[T]) PeekLeft() (value T) { if d.Len() == 0 { panic(genfuncs.NoSuchElement) } value = d.list.PeekLeft().Value return value } // PeekRight returns the right most element in the Deque without removing it. func (d *Deque[T]) PeekRight() (value T) { if d.Len() == 0 { panic(genfuncs.NoSuchElement) } value = d.list.PeekRight().Value return value } // Remove and return the left most element in the Deque. func (d *Deque[T]) Remove() (value T) { value = d.RemoveLeft() return value } // RemoveLeft and return the left most element in the Deque. func (d *Deque[T]) RemoveLeft() (value T) { if d.Len() == 0 { panic(genfuncs.NoSuchElement) } e := d.list.PeekLeft() value = d.list.Remove(e) return value } // RemoveRight and return the right most element in the Deque. func (d *Deque[T]) RemoveRight() (value T) { if d.Len() == 0 { panic(genfuncs.NoSuchElement) } e := d.list.PeekRight() value = d.list.Remove(e) return value } // Values in the Deque returned in a new GSlice. func (d *Deque[T]) Values() (values GSlice[T]) { values = d.list.Values() return values }
container/deque.go
0.782496
0.521167
deque.go
starcoder
package fptower import ( "github.com/consensys/gnark-crypto/ecc/bls12-377/fp" ) // Mul sets z to the E2-product of x,y, returns z func (z *E2) Mul(x, y *E2) *E2 { var a, b, c fp.Element a.Add(&x.A0, &x.A1) b.Add(&y.A0, &y.A1) a.Mul(&a, &b) b.Mul(&x.A0, &y.A0) c.Mul(&x.A1, &y.A1) z.A1.Sub(&a, &b).Sub(&z.A1, &c) z.A0.Double(&c).Double(&z.A0).AddAssign(&c).Add(&z.A0, &b) return z } // Square sets z to the E2-product of x,x returns z func (z *E2) Square(x *E2) *E2 { //algo 22 https://eprint.iacr.org/2010/354.pdf var c0, c2 fp.Element c2.Double(&x.A1).Double(&c2).AddAssign(&x.A1).AddAssign(&x.A0) c0.Add(&x.A0, &x.A1) c0.Mul(&c0, &c2) // (x1+x2)*(x1+(u**2)x2) z.A1.Mul(&x.A0, &x.A1).Double(&z.A1) z.A0.Sub(&c0, &z.A1).SubAssign(&z.A1).SubAssign(&z.A1) return z } // MulByNonResidue multiplies a E2 by (0,1) func (z *E2) MulByNonResidue(x *E2) *E2 { a := x.A0 b := x.A1 // fetching x.A1 in the function below is slower z.A0.Double(&b).Double(&z.A0).Add(&z.A0, &b) z.A1 = a return z } // MulByNonResidueInv multiplies a E2 by (0,1)^{-1} func (z *E2) MulByNonResidueInv(x *E2) *E2 { //z.A1.MulByNonResidueInv(&x.A0) a := x.A1 fiveinv := fp.Element{ 330620507644336508, 9878087358076053079, 11461392860540703536, 6973035786057818995, 8846909097162646007, 104838758629667239, } z.A1.Mul(&x.A0, &fiveinv) z.A0 = a return z } // Inverse sets z to the E2-inverse of x, returns z func (z *E2) Inverse(x *E2) *E2 { // Algorithm 8 from https://eprint.iacr.org/2010/354.pdf //var a, b, t0, t1, tmp fp.Element var t0, t1, tmp fp.Element a := &x.A0 // creating the buffers a, b is faster than querying &x.A0, &x.A1 in the functions call below b := &x.A1 t0.Square(a) t1.Square(b) tmp.Double(&t1).Double(&tmp).Add(&tmp, &t1) t0.Sub(&t0, &tmp) t1.Inverse(&t0) z.A0.Mul(a, &t1) z.A1.Mul(b, &t1).Neg(&z.A1) return z } // norm sets x to the norm of z func (z *E2) norm(x *fp.Element) { var tmp fp.Element x.Square(&z.A1) tmp.Double(x).Double(&tmp).Add(&tmp, x) x.Square(&z.A0).Sub(x, &tmp) }
ecc/bls12-377/internal/fptower/e2_bls377.go
0.644561
0.461805
e2_bls377.go
starcoder
package models import ( "fmt" "strings" grpcapi "github.com/SKF/proto/v2/hierarchy" ) // InspectionPoint - holds parameters for inspection point type InspectionPoint struct { // Type of value to record ValueType ValueType `json:"valueType" example:"numeric" swaggertype:"string" enums:"numeric,single_choice,multi_choice,unknown"` // Unit of the value recorded, in case of numeric inspection NumericUnit string `json:"unit" example:"bar"` // Possible answers for single_choice and multi_choice inspections Answers Answers `json:"answers" swaggertype:"array,string" example:"first,second"` // Type of visualization in Enlight Centre VisualizationType VisualizationType `json:"visualizationType" swaggertype:"string" example:"visualization_circular_gauge" enums:"visualization_none,visualization_circular_gauge,visualization_level_gauge"` VisualizationMinValue string `json:"visualizationMinValue" example:"3"` VisualizationMaxValue string `json:"visualizationMaxValue" example:"13"` } type ValueType string const ( ValueTypeNumeric ValueType = "numeric" ValueTypeSingleChoice ValueType = "single_choice" ValueTypeMultiChoice ValueType = "multi_choice" ValueTypeUnknown ValueType = "unknown" ) var valueTypes = []ValueType{ ValueTypeNumeric, ValueTypeSingleChoice, ValueTypeMultiChoice, } func ParseInspectionType(inspectionType string) ValueType { switch inspectionType { case "numeric": return ValueTypeNumeric case "single_choice": return ValueTypeSingleChoice case "multi_choice": return ValueTypeMultiChoice default: return ValueTypeUnknown } } func (t ValueType) String() string { return string(t) } func (t ValueType) Validate() error { for _, valueType := range valueTypes { if t == valueType { return nil } } return fmt.Errorf("'%s' is not a valid value type", t) } type Answers []string func (as Answers) Array() []string { return []string(as) } func (as Answers) Validate(valueType ValueType) error { if len(as) < 1 { return fmt.Errorf("ValueType is %s, there need to be at least 1 answers", valueType) } for _, answer := range as { if answer == "" { return fmt.Errorf("'%s' is not a valid answer", answer) } } return nil } // VisualizationType - defines visualization type when value type is numeric type VisualizationType string // Constants for VisualizationType const ( VisualizationTypeNone VisualizationType = "visualization_none" VisualizationTypeCircularGauge VisualizationType = "visualization_circular_gauge" VisualizationTypeLevelGauge VisualizationType = "visualization_level_gauge" ) // Array fpr VisualizationType constants var visualizationTypes = []VisualizationType{ VisualizationTypeNone, VisualizationTypeCircularGauge, VisualizationTypeLevelGauge, } // String - stringifies VisualizationType func (t VisualizationType) String() string { return string(t) } // Validate - validates VisualizationType func (t VisualizationType) Validate() error { for _, visualizationType := range visualizationTypes { if t == visualizationType { return nil } } return fmt.Errorf("'%s' is not a valid visualization type", t) } func (p InspectionPoint) Validate() error { if err := p.ValueType.Validate(); err != nil { return err } if p.ValueType == ValueTypeNumeric { if p.NumericUnit == "" { return fmt.Errorf("ValueType is numeric, numeric unit cannot be empty string") } // Default to none if p.VisualizationType == "" { p.VisualizationType = VisualizationTypeNone } if err := p.VisualizationType.Validate(); err != nil { return err } if p.VisualizationType != VisualizationTypeNone && (p.VisualizationMinValue == "" || p.VisualizationMaxValue == "") { return fmt.Errorf("ValueType is numeric and VisualizationType: \"%s\", minValue: \"%s\", maxValue: \"%s\" - min/maxValue cannot be empty", p.VisualizationType.String(), p.VisualizationMinValue, p.VisualizationMaxValue) } } if p.ValueType == ValueTypeSingleChoice || p.ValueType == ValueTypeMultiChoice { if err := p.Answers.Validate(p.ValueType); err != nil { return err } } return nil } func (p InspectionPoint) ToGRPC() *grpcapi.InspectionPoint { return &grpcapi.InspectionPoint{ ValueType: grpcapi.ValueType(grpcapi.ValueType_value[strings.ToUpper(p.ValueType.String())]), NumericUnit: p.NumericUnit, Answers: p.Answers.Array(), VisualizationType: grpcapi.VisualizationType(grpcapi.VisualizationType_value[strings.ToUpper(p.VisualizationType.String())]), VisualizationMinValue: p.VisualizationMinValue, VisualizationMaxValue: p.VisualizationMaxValue, } } func (p *InspectionPoint) FromGRPC(inspectPoint grpcapi.InspectionPoint) { p.ValueType = ValueType(strings.ToLower(inspectPoint.ValueType.String())) p.NumericUnit = inspectPoint.NumericUnit p.Answers = Answers(inspectPoint.Answers) p.VisualizationType = VisualizationType(strings.ToLower(inspectPoint.VisualizationType.String())) p.VisualizationMinValue = inspectPoint.VisualizationMinValue p.VisualizationMaxValue = inspectPoint.VisualizationMaxValue }
v2/services/hierarchy/models/inspection_point.go
0.691185
0.477554
inspection_point.go
starcoder