code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package search_query_injection
import (
"github.com/threagile/threagile/model"
)
func Category() model.RiskCategory {
return model.RiskCategory{
Id: "search-query-injection",
Title: "Search-Query Injection",
Description: "Quando um servidor de mecanismo de pesquisa é acessado, os riscos de injeção de consulta de pesquisa podem surgir." +
"<br><br>Veja por exemploo <a href=\"https://github.com/veracode-research/solr-injection\">https://github.com/veracode-research/solr-injection</a> e " +
"<a href=\"https://github.com/veracode-research/solr-injection/blob/master/slides/DEFCON-27-Michael-Stepankin-Apache-Solr-Injection.pdf\">https://github.com/veracode-research/solr-injection/blob/master/slides/DEFCON-27-Michael-Stepankin-Apache-Solr-Injection.pdf</a> " +
"Para mais detalhes (aqui relacionados ao Solr, mas em geral, mostrando o tópico das injeções de consulta de pesquisa).",
Impact: "Se este risco permanecer desconhecido, os invasores podem ser capazes de ler mais dados do índice de pesquisa e " +
"eventualmente, escalar ainda mais para uma penetração mais profunda no sistema por meio de execuções de código.",
ASVS: "V5 - Validation, Sanitization and Encoding Verification Requirements",
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Injection_Prevention_Cheat_Sheet.html",
Action: "Search-Query Injection Prevention",
Mitigation: "Tente usar bibliotecas que codifiquem corretamente os metacaracteres de consulta de pesquisa em pesquisas e não exponha o " +
"consulta não filtrada para o chamador. " +
"Quando um produto de terceiros é usado em vez de um software desenvolvido sob medida, verifique se o produto aplica a atenuação adequada e garanta um nível de patch razoável.",
Check: "As recomendações do cheat sheet e do ASVS/CSVS referenciado são aplicadas?",
Function: model.Development,
STRIDE: model.Tampering,
DetectionLogic: "Clientes dentro do escopo acessando servidores de mecanismo de pesquisa por meio de protocolos de acesso de pesquisa típicos.",
RiskAssessment: "A classificação de risco depende da sensibilidade do próprio servidor do mecanismo de pesquisa e dos ativos de dados processados ou armazenados.",
FalsePositives: "As consultas do motor do servidor por valores de pesquisa que não consistem em partes controláveis pelo chamador podem ser consideradas " +
"como falsos positivos após revisão individual.",
ModelFailurePossibleReason: false,
CWE: 74,
}
}
func GenerateRisks() []model.Risk {
risks := make([]model.Risk, 0)
for _, id := range model.SortedTechnicalAssetIDs() {
technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
if technicalAsset.Technology == model.SearchEngine || technicalAsset.Technology == model.SearchIndex {
incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
for _, incomingFlow := range incomingFlows {
if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
continue
}
if incomingFlow.Protocol == model.HTTP || incomingFlow.Protocol == model.HTTPS ||
incomingFlow.Protocol == model.BINARY || incomingFlow.Protocol == model.BINARY_encrypted {
likelihood := model.VeryLikely
if incomingFlow.Usage == model.DevOps {
likelihood = model.Likely
}
risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood))
}
}
}
}
return risks
}
func SupportedTags() []string {
return []string{}
}
func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk {
caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId]
title := "<b>Search Query Injection</b> risk at <b>" + caller.Title + "</b> against search engine server <b>" + technicalAsset.Title + "</b>" +
" via <b>" + incomingFlow.Title + "</b>"
impact := model.MediumImpact
if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical {
impact = model.HighImpact
} else if technicalAsset.HighestConfidentiality() <= model.Internal && technicalAsset.HighestIntegrity() == model.Operational {
impact = model.LowImpact
}
risk := model.Risk{
Category: Category(),
Severity: model.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: caller.Id,
MostRelevantCommunicationLinkId: incomingFlow.Id,
DataBreachProbability: model.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
return risk
} | risks/built-in/search-query-injection/search-query-injection-rule.go | 0.573559 | 0.469703 | search-query-injection-rule.go | starcoder |
package hyperneat
import (
"sort"
"github.com/klokare/evo"
"github.com/klokare/evo/neat"
)
// Constants for the output index
const (
Weight int = iota
Bias
LEO
)
// Seeder creates the seed population geared towards Cppns. Each encoded substrate will have 8
// inputs, one for each dimension of the source and target nodes, and 3 ouputs: output weight,
// output enabled check (LEO), and bias value. The bias is used for hidden and output nodes only.
type Seeder struct {
NumTraits int
DisconnectRate float64
OutputActivation evo.Activation
SeedLocalityLayer bool
SeedLocalityX bool
SeedLocalityY bool
SeedLocalityZ bool
}
// Seed returns the seed genome for a HyperNEAT setup. If SeedLocality<Dim> is set to true then a
// node is added and connected to the appropriate inputs and the LEO output.
func (s Seeder) Seed() (g evo.Genome, err error) {
// Create the seed genome using the NEAT seeder
ns := neat.Seeder{
NumInputs: 8,
NumOutputs: 3,
NumTraits: s.NumTraits,
DisconnectRate: s.DisconnectRate,
OutputActivation: evo.InverseAbs, // need a function that gives us [-1,1].
}
if g, err = ns.Seed(); err != nil {
return
}
// Add locality
enc := g.Encoded
for i := 0; i < 4; i++ {
// Skip if locality is not desired for this dimension
switch i {
case 0:
if !s.SeedLocalityLayer {
continue
}
case 1:
if !s.SeedLocalityX {
continue
}
case 2:
if !s.SeedLocalityY {
continue
}
case 3:
if !s.SeedLocalityZ {
continue
}
}
// Create the new node
x0 := enc.Nodes[i].Position
x1 := enc.Nodes[i+4].Position
n := evo.Node{
Position: evo.Midpoint(evo.Midpoint(x0, x1), enc.Nodes[10].Position),
Neuron: evo.Hidden,
Activation: evo.Gauss,
}
enc.Nodes = append(enc.Nodes, n)
// Add the connections
enc.Conns = append(enc.Conns,
evo.Conn{
Source: evo.Position{Layer: 0.0, X: float64(i) / 7.0},
Target: n.Position,
Enabled: true,
},
evo.Conn{
Source: evo.Position{Layer: 0.0, X: float64(i+4) / 7.0},
Target: n.Position,
Enabled: true,
},
evo.Conn{
Source: n.Position,
Target: evo.Position{Layer: 1.0, X: 1.0},
Enabled: true,
},
)
}
// Ensure sorted substrate and return
sort.Slice(enc.Nodes, func(i, j int) bool { return enc.Nodes[i].Compare(enc.Nodes[j]) < 0 })
sort.Slice(enc.Conns, func(i, j int) bool { return enc.Conns[i].Compare(enc.Conns[j]) < 0 })
g.Encoded = enc
return
} | hyperneat/seeder.go | 0.651466 | 0.561636 | seeder.go | starcoder |
package main
type MinHeap struct {
items []int64
}
func (m *MinHeap) GetLeftChildIndex(parentIndex int64) int64 {
return 2*parentIndex + 1
}
func (m *MinHeap) GetRightChildIndex(parentIndex int64) int64 {
return 2*parentIndex + 2
}
func (m *MinHeap) HasLeftChild(index int64) bool {
return m.GetLeftChildIndex(index) < int64(len(m.items))
}
func (m *MinHeap) HasRightChild(index int64) bool {
return m.GetRightChildIndex(index) < int64(len(m.items))
}
func (m *MinHeap) LeftChild(index int64) int64 {
return m.items[m.GetLeftChildIndex(index)]
}
func (m *MinHeap) RightChild(index int64) int64 {
return m.items[m.GetRightChildIndex(index)]
}
func (m *MinHeap) GetParentIndex(childIndex int64) int64 {
return (childIndex - 1) / 2
}
func (m *MinHeap) HasParent(index int64) bool {
return m.GetParentIndex(index) >= 0
}
func (m *MinHeap) Parent(index int64) int64 {
return m.items[m.GetParentIndex(index)]
}
func (m *MinHeap) Swap(index1 int64, index2 int64) {
tmp := m.items[index1]
m.items[index1] = m.items[index2]
m.items[index2] = tmp
}
func (m *MinHeap) Peak() int64 {
return m.items[0]
}
func (m *MinHeap) DeleteRoot() int64 {
item := m.items[0]
m.items[0] = m.items[len(m.items)-1]
m.items = m.items[:len(m.items)-1]
m.HeapifyDown()
return item
}
func (m *MinHeap) Delete(itemToDelete int64) int64 {
index := 0
for i := 0; i < len(m.items); i++ {
if m.items[i] == itemToDelete {
index = i
}
}
m.items[index] = m.items[len(m.items)-1]
m.items = m.items[:len(m.items)-1]
m.HeapifyDown()
return itemToDelete
}
func (m *MinHeap) Add(item int64) {
m.items = append(m.items, item)
m.HeapifyUp()
}
func (m *MinHeap) HeapifyUp() {
index := int64(len(m.items) - 1)
for m.HasParent(index) && m.Parent(index) > m.items[index] {
m.Swap(m.GetParentIndex(index), index)
index = m.GetParentIndex(index)
}
}
func (m *MinHeap) HeapifyDown() {
index := int64(0)
for m.HasLeftChild(index) {
smallerChildIndex := m.GetLeftChildIndex(index)
if m.HasRightChild(index) && m.RightChild(index) < m.LeftChild(index) {
smallerChildIndex = m.GetRightChildIndex(index)
}
if m.items[index] < m.items[smallerChildIndex] {
break
} else {
m.Swap(index, smallerChildIndex)
}
index = smallerChildIndex
}
}
func main() {
case2()
}
func case2() {
h := MinHeap{}
h.Add(10)
h.Add(9)
h.Add(3)
h.Delete(9)
h.Delete(3)
println(h.Peak() == 10)
}
func case1() {
h := MinHeap{}
h.Add(5)
h.Add(3)
h.Add(6)
h.Add(9)
h.Add(8)
h.Add(1)
h.DeleteRoot()
h.Delete(3)
println(h.Peak() == 5)
} | ed/ad/tree/_minHeap.go | 0.548432 | 0.460956 | _minHeap.go | starcoder |
package trivyscanner
import (
"math"
"github.com/aquasecurity/trivy-db/pkg/types"
"github.com/klustair/cvssv3"
log "github.com/sirupsen/logrus"
cvssv2 "github.com/umisama/go-cvss"
)
type Cvss struct {
V2 struct {
Vector string `json:"vector"`
Vendor string `json:"vendor"`
Scores struct {
Base float64 `json:"base"`
Temporal float64 `json:"temporal"`
Environmental float64 `json:"environmental"`
} `json:"scores"`
Metrics cvssv2.Vectors `json:"metrics"`
} `json:"v2"`
V3 struct {
Vector string `json:"vector"`
Vendor string `json:"vendor"`
Scores struct {
Base float64 `json:"base"`
Temporal float64 `json:"temporal"`
Environmental float64 `json:"environmental"`
//Impact float64 `json:"impact"`
//Exploitability float64 `json:"exploitability"`
} `json:"scores"`
Metrics map[string]string `json:"metrics"`
} `json:"v3"`
Score float64 `json:"score"`
Version float64 `json:"version"`
}
/*
"CVSS": {
"nvd": {
"V2Vector": "AV:N/AC:L/Au:N/C:P/I:P/A:P",
"V3Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"V2Score": 7.5,
"V3Score": 9.8
},
"redhat": {
"V3Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"V3Score": 9.8
}
},
*/
/*
{
"nvd": {
"V2Score": 5,
"V3Score": 7.5,
"V2Vector": "AV:N/AC:L/Au:N/C:N/I:P/A:N",
"V3Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N",
"provider": "nvd",
"V2Vector_metrics": {
"A": "N",
"C": "N",
"I": "P",
"AC": "L",
"AV": "N",
"Au": "N"
},
"V3Vector_metrics": {
"A": "N",
"C": "N",
"I": "H",
"S": "U",
"AC": "L",
"AV": "N",
"MA": "N",
"MC": "N",
"MI": "H",
"PR": "N",
"UI": "N",
"MAC": "L",
"MAV": "N",
"MPR": "N",
"MUI": "N"
},
"V2Vector_base_score": "5.0",
"V3Vector_base_score": "7.5",
"V3Vector_modified_esc": "3.9",
"V3Vector_modified_isc": "3.6"
},
"redhat": {
"V2Score": 1.9,
"V2Vector": "AV:L/AC:M/Au:N/C:N/I:P/A:N",
"provider": "redhat",
"V2Vector_metrics": {
"A": "N",
"C": "N",
"I": "P",
"AC": "M",
"AV": "L",
"Au": "N"
},
"V2Vector_base_score": "1.9"
}
}
*/
func NewCVSS(CVSS types.VendorCVSS) *Cvss {
for vendor, v := range CVSS {
if v.V3Vector != "" && vendor == "nvd" {
cvss := new(Cvss)
return cvss.parseV3(v, vendor)
}
if v.V3Vector != "" && vendor == "redhat" {
cvss := new(Cvss)
return cvss.parseV3(v, vendor)
} else if v.V2Vector != "" && vendor == "nvd" {
cvss := new(Cvss)
return cvss.parseV2(v, vendor)
} else if v.V2Vector != "" && vendor == "redhat" {
cvss := new(Cvss)
return cvss.parseV2(v, vendor)
}
cvss := new(Cvss)
cvss.empty("vendor:" + vendor)
//log.Debugf("Failed to parse CVSS vector from %s: %v", v, vendor)
return cvss
}
//log.Debugf("Failed to parse CVSS vector %v", CVSS)
cvss := new(Cvss)
cvss.empty("empty")
return cvss
}
func (cvss *Cvss) empty(vendor string) *Cvss {
cvss.V2.Vendor = vendor
cvss.V2.Scores.Base = 0.0
cvss.Version = 0.0
cvss.Score = 0.0
return cvss
}
func (cvss *Cvss) parseV2(v types.CVSS, vendor string) *Cvss {
//log.Debugf("CVSS: %s %s %s", v.V2Vector, v.V2Score, vendor) //Too much Logoutput
cvss.V2.Vector = v.V2Vector
cvss.V2.Vendor = vendor
v2Vector, err := cvssv2.ParseVectors("(" + v.V2Vector + ")")
if err != nil {
log.Infof("Failed to parse CVSSv2 vector: %s", err)
}
cvss.V2.Scores.Base = v2Vector.BaseScore()
if v2Vector.HasTemporalVectors() && !math.IsNaN(v2Vector.TemporalScore()) {
cvss.V2.Scores.Temporal = v2Vector.TemporalScore()
}
if v2Vector.HasEnvironmentalVectors() && !math.IsNaN(v2Vector.EnvironmentalScore()) {
cvss.V2.Scores.Environmental = v2Vector.EnvironmentalScore()
}
cvss.V2.Metrics = v2Vector
cvss.V2.Scores.Base = v.V2Score
cvss.Version = 2.0
return cvss
}
func (cvss *Cvss) parseV3(v types.CVSS, vendor string) *Cvss {
//log.Debugf("CVSS: %s %s %s", v.V3Vector, v.V3Score, vendor) //Too much Logoutput
cvss.V3.Vector = v.V3Vector
cvss.V3.Vendor = vendor
v3Vector, err := cvssv3.ParseVector(v.V3Vector)
if err != nil {
log.Infof("Failed to parse CVSSv3 vector: %s", err)
}
cvss.V3.Scores.Base = v3Vector.BaseScore()
cvss.V3.Scores.Environmental = v3Vector.EnvironmentalScore()
cvss.V3.Scores.Temporal = v3Vector.TemporalScore()
cvss.V3.Metrics = v3Vector
cvss.Version = 3.0
cvss.Score = v.V3Score
return cvss
} | pkg/trivyscanner/cvss.go | 0.590779 | 0.461927 | cvss.go | starcoder |
// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate
package descriptions
// Local
const (
LocalFetch = "Fetch Beacons that are similar to a specified concept from the Objects subsets on a Weaviate network"
LocalFetchObj = "An object used to perform a Fuzzy Fetch to search for Objects and Actions similar to a specified concept on a Weaviate network"
)
const (
LocalFetchObjects = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from the Objects subset"
LocalFetchFuzzy = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from both the Objects subsets"
)
const (
LocalFetchBeacon = "A Beacon result from a local Weaviate Local Fetch query"
LocalFetchClassName = "The class name of the result from a local Weaviate Local Fetch query"
LocalFetchCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept"
LocalFetchActionsObj = "An object used to Fetch Beacons from the Actions subset of the dataset"
)
const (
LocalFetchFuzzyBeacon = "A Beacon result from a local Weaviate Fetch Fuzzy query from both the Objects subsets"
LocalFetchFuzzyClassName = "Class name of the result from a local Weaviate Fetch Fuzzy query from both the Objects subsets"
LocalFetchFuzzyCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept"
LocalFetchFuzzyObj = "An object used to Fetch Beacons from both the Objects subsets"
)
// NETWORK
const (
NetworkFetch = "Fetch Beacons that are similar to a specified concept from the Objects subsets on a Weaviate network"
NetworkFetchObj = "An object used to perform a Fuzzy Fetch to search for Objects similar to a specified concept on a Weaviate network"
)
const (
NetworkFetchFuzzy = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from both the Objects subsets"
)
const (
NetworkFetchFuzzyClassName = "The class name of the result from a network Weaviate Fetch Fuzzy query from both the Objects subsets"
NetworkFetchFuzzyBeacon = "A Beacon result from a network Weaviate Fetch Fuzzy query from both the Objects subsets"
NetworkFetchFuzzyCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept"
NetworkFetchFuzzyObj = "An object used to Fetch Beacons from both the Objects subsets"
) | adapters/handlers/graphql/descriptions/fetch.go | 0.716219 | 0.729881 | fetch.go | starcoder |
package copypasta
import (
. "fmt"
"math/bits"
)
/*
标准库 "math/bits" 包含了位运算常用的函数,如二进制中 1 的个数、二进制表示的长度等
注意:bits.Len(0) 返回的是 0 而不是 1
bits.Len(x) 相当于 int(Log2(x)+eps)+1
或者说 2^(Len(x)-1) <= x < 2^Len(x)
TIPS: & 和 | 在区间求和上具有单调性;^ 的区间求和见 strings.go 中的 trie.maxXor
** 代码和题目见下面的 bitOpTrick 和 bitOpTrickCnt
常用等式(若改变了计算的顺序,注意优先级!)
a|b = (a^b) + (a&b) a^b = (a|b) - (a&b)
a+b = (a|b) + (a&b)
= (a&b)*2 + (a^b)
= (a|b)*2 - (a^b)
相关题目
https://codeforces.com/problemset/problem/1325/D
https://atcoder.jp/contests/abc050/tasks/arc066_b
结合律:(a&b)^(a&c) = a&(b^c) 其他符号类似
相关题目 https://leetcode-cn.com/contest/weekly-contest-237/problems/find-xor-sum-of-all-pairs-bitwise-and/
运算符优先级 https://golang.org/ref/spec#Operators
Precedence Operator
5 * / % << >> & &^
4 + - | ^
3 == != < <= > >=
2 &&
1 ||
一些子集的枚举算法见 search.go
S∪{i}: S|1<<i
S\{i}: S&^(1<<i)
构造 2^n-1,即 n 个 1 的另一种方法: ^(-1<<n)
检测是否只有一个 1:x&(x-1) == 0
https://oeis.org/A060142 每一段连续 0 的长度均为偶数的数:如 100110000100
Ordered set S defined by these rules: 0 is in S and if x is in S then 2x+1 and 4x are in S
0, 1, 3, 4, 7, 9, 12, 15, 16, 19, 25, 28, 31, 33, 36, 39, 48, 51, 57, 60, 63, 64, 67, 73, 76, 79, 97, 100
https://oeis.org/A086747 Baum-Sweet sequence
相关题目:蒙德里安的梦想 https://www.acwing.com/problem/content/293/
https://oeis.org/A048004 最长连续 1 为 k 的长为 n 的二进制串的个数
相关题目:https://codeforces.com/problemset/problem/1027/E
https://oeis.org/A047778 Concatenation of first n numbers in binary, converted to base 10
相关题目:https://leetcode-cn.com/contest/weekly-contest-218/problems/concatenation-of-consecutive-binary-numbers/
钱珀瑙恩数 Champernowne constant https://en.wikipedia.org/wiki/Champernowne_constant
https://oeis.org/A072339
Any number n can be written (in two ways, one with m even and one with m odd) in the form n = 2^k_1 - 2^k_2 + 2^k_3 - ... + 2^k_m
where the signs alternate and k_1 > k_2 > k_3 > ... >k_m >= 0; sequence gives minimal value of m
https://codeforces.com/problemset/problem/1617/E
异或和相关
https://oeis.org/A003987 异或矩阵
https://oeis.org/A003815 异或和 i a(0)=0, a(4n+1)=1, a(4n+2)=4n+3, a(4n+3)=0, a(4n+4)=4n+4
相关题目 https://codeforces.com/problemset/problem/1493/E
https://codeforces.com/problemset/problem/460/D
https://oeis.org/A145768 异或和 i*i
https://oeis.org/A126084 异或和 质数
https://oeis.org/A018252 异或和 合数?
https://oeis.org/A072594 异或和 质因数分解 是积性函数 a(p^k)=p*(k&1)
https://oeis.org/A072595 满足 A072594(n)=0 的数
https://oeis.org/A178910 异或和 因子
https://oeis.org/A178911 满足 A178910(n)=n 的数 Perfex number
https://oeis.org/A038712 a(n) = n^(n-1) = 1, 3, 1, 7, 1, 3, 1, 15, 1, ...
https://oeis.org/A080277 A038712 的前缀和 => a(n) = n + 2*a(n/2)
二进制长度
https://oeis.org/A029837 Binary order of n: log_2(n) rounded up to next integer
https://oeis.org/A001855 A029837 的前缀和
https://oeis.org/A070939 a(0)=1, a(n)=bits.Len(n)
https://oeis.org/A083652 A070939 的前缀和
OnesCount 相当于二进制的 digsum
https://oeis.org/A000120 wt(n) = OnesCount(n)
https://oeis.org/A000788 前缀和 a(0) = 0, a(2n) = a(n)+a(n-1)+n, a(2n+1) = 2a(n)+n+1
https://oeis.org/A121853 前缀积 https://www.luogu.com.cn/problem/P4317
https://oeis.org/A092391 n+OnesCount(n)
https://oeis.org/A010061 二进制自我数/哥伦比亚数(A092391 的补集)
https://oeis.org/A011371 n-OnesCount(n) Also highest power of 2 dividing n!
a(n) = floor(n/2) + a(floor(n/2))
这同时是前 n 个数的质因子分解的 2 的幂次之和
https://oeis.org/A027868 Number of trailing zeros in n!; highest power of 5 dividing n!
a(n) = (n-A053824(n))/4, 其中 A053824(n) = Sum of digits of (n written in base 5)
推广至任意数:n! 的质因子分解中,p 的幂次为 (n-digsum_p(n))/(p-1),其中 digsum_p(n) 表示 n 的 p 进制的数位和
https://oeis.org/A245788 n*OnesCount(n)
https://oeis.org/A049445 OnesCount(n)|n
- n/OnesCount(n)
https://oeis.org/A199238 n%OnesCount(n)
https://oeis.org/A010062 a(0)=1, a(n+1)=a(n)+OnesCount(a(n))
https://oeis.org/A096303 从 n 出发不断执行 n+=OnesCount(n),直到 n 在 A010062 中,所需要的迭代次数
Number of iterations of n -> n + (number of 1's in binary representation of n) needed for the trajectory of n to join the trajectory of A010062
https://oeis.org/A229743 Positions of records
https://oeis.org/A229744 Values of records
相关题目 https://www.luogu.com.cn/problem/P5891 https://class.luogu.com.cn/classroom/lgr66
https://oeis.org/A180094 Number of steps to reach 0 or 1, starting with n and applying the map k -> (number of 1's in binary expansion of k) repeatedly
https://oeis.org/A023416 Number of 0's in binary expansion of n
a(n) = a(n/2) + 1 - n&1
https://oeis.org/A059015 A023416 的前缀和
十进制 digsum
一点点数学 https://codeforces.com/problemset/problem/817/C
https://oeis.org/A007953 digsum(n)
https://oeis.org/A062028 n+digsum(n) 质数 https://oeis.org/A047791 合数 https://oeis.org/A107743
https://oeis.org/A003052 自我数/哥伦比亚数 Self number / Colombian number
https://en.wikipedia.org/wiki/Self_number
1, 3, 5, 7, 9, 20, 31, 42, 53, 64, 75, 86, 97, 108, ...
https://oeis.org/A006378 自我质数 Self primes
https://oeis.org/A066568 n-digsum(n)
https://oeis.org/A057147 n*digsum(n)
https://oeis.org/A005349 digsum(n)|n Niven (or Harshad) numbers
https://oeis.org/A065877 digsum(n)∤n Non-Niven (or non-Harshad) numbers
https://oeis.org/A001101 Moran numbers: n such that (n / digsum(n)) is prime
https://oeis.org/A016052 a(1)=3, a(n+1)=a(n)+digsum(a(n))
https://oeis.org/A051885 Smallest number whose digsum = n
int64(n%9+1) * int64(math.Pow10(n/9)) - 1
相关题目 https://codeforces.com/contest/1373/problem/E
https://oeis.org/A077196 Smallest possible sum of the digits of a multiple of n https://oeis.org/A077194 https://oeis.org/A077195
相关题目(0-1 最短路)https://atcoder.jp/contests/arc084/tasks/arc084_b
https://oeis.org/A118137 digsum(n)+digsum(n+1)
https://oeis.org/A003132 Sum of squares of digits of n
https://oeis.org/A003621 Number of iterations until n reaches 1 or 4 under x goes to sum of squares of digits map
https://oeis.org/A055012 Sum of cubes of digits of n
https://oeis.org/A055013 Sum of 4th powers of digits of n
https://oeis.org/A055014 Sum of 5th powers of digits of n
https://oeis.org/A055015 Sum of 6th powers of digits of n
相关题目 https://www.luogu.com.cn/problem/P1660
https://oeis.org/A031286 Additive persistence: number of summations of digits needed to obtain a single digit (the additive digital root)
https://oeis.org/A031346 Multiplicative persistence: number of iterations of "multiply digits" needed to reach a number < 10
回文数
https://oeis.org/A002113 十进制回文数
https://oeis.org/A043269 digsum(A002113(n))
https://oeis.org/A070199 Number of palindromes of length <= n
https://oeis.org/A002779 回文平方数
https://oeis.org/A002778 Numbers whose square is a palindrome
https://oeis.org/A002781 回文立方数
https://oeis.org/A002780 Numbers whose cube is a palindrome
https://oeis.org/A002385 回文素数
https://en.wikipedia.org/wiki/Palindromic_prime
https://oeis.org/A006567 反素数 emirp (primes whose reversal is a different prime)
https://en.wikipedia.org/wiki/Emirp
https://oeis.org/A003459 绝对素数/可交换素数 Absolute primes (or permutable primes): every permutation of the digits is a prime
https://en.wikipedia.org/wiki/Permutable_prime
https://oeis.org/A007500 Primes whose reversal in base 10 is also prime
https://oeis.org/A006995 二进制回文数
https://oeis.org/A007632 既是二进制回文数又是十进制回文数
https://oeis.org/A090994 Number of meaningful differential operations of the n-th order on the space R^9
a(k+5) = a(k+4) + 4*a(k+3) - 3*a(k+2) - 3*a(k+1) + a(k)
相关题目 LC1215/双周赛10C https://leetcode-cn.com/contest/biweekly-contest-10/problems/stepping-numbers/
套路题 https://codeforces.com/problemset/problem/1415/D
按位归纳 https://codeforces.com/problemset/problem/925/C
*/
// Bitset
// 参考 C++ 的标准库源码 https://gcc.gnu.org/onlinedocs/libstdc++/libstdc++-html-USERS-3.4/bitset-source.html
// 若要求方法内不修改 b 而是返回一个修改后的拷贝,可以在方法开头加上 b = append(Bitset(nil), b...) 并返回 b
// 应用:https://codeforces.com/problemset/problem/33/D(也可以用 LCA)
const _w = bits.UintSize
func NewBitset(n int) Bitset { return make(Bitset, n/_w+1) } // (n+_w-1)/_w
type Bitset []uint
func (b Bitset) Has(p int) bool { return b[p/_w]&(1<<(p%_w)) != 0 } // get
func (b Bitset) Flip(p int) { b[p/_w] ^= 1 << (p % _w) }
func (b Bitset) Set(p int) { b[p/_w] |= 1 << (p % _w) } // 置 1
func (b Bitset) Reset(p int) { b[p/_w] &^= 1 << (p % _w) } // 置 0
// 左移 k 位
// 应用 https://leetcode-cn.com/problems/minimize-the-difference-between-target-and-chosen-elements/submissions/
func (b Bitset) Lsh(k int) {
if k == 0 {
return
}
shift, offset := k/_w, k%_w
if shift >= len(b) {
for i := range b {
b[i] = 0
}
return
}
if offset == 0 {
// Fast path
copy(b[shift:], b)
} else {
for i := len(b) - 1; i > shift; i-- {
b[i] = b[i-shift]<<offset | b[i-shift-1]>>(_w-offset)
}
b[shift] = b[0] << offset
}
for i := 0; i < shift; i++ {
b[i] = 0
}
}
// 右移 k 位
func (b Bitset) Rsh(k int) {
if k == 0 {
return
}
shift, offset := k/_w, k%_w
if shift >= len(b) {
for i := range b {
b[i] = 0
}
return
}
lim := len(b) - 1 - shift
if offset == 0 {
// Fast path
copy(b, b[shift:])
} else {
for i := 0; i < lim; i++ {
b[i] = b[i+shift]>>offset | b[i+shift+1]<<(_w-offset)
}
// 注意:若前后调用 lsh 和 rsh,需要注意超出 n 的范围的 1 对结果的影响
b[lim] = b[len(b)-1] >> offset
}
for i := lim + 1; i < len(b); i++ {
b[i] = 0
}
}
// 返回 1 的个数
func (b Bitset) OnesCount() (c int) {
for _, v := range b {
c += bits.OnesCount(v)
}
return
}
// 遍历所有 1 的位置
// 如果对范围有要求,可在 f 中 return p < n
func (b Bitset) Foreach(f func(p int) (Break bool)) {
for i, v := range b {
for ; v > 0; v &= v - 1 {
j := i*_w | bits.TrailingZeros(v)
if f(j) {
return
}
}
}
}
// 返回第一个 0 的下标,不存在时会返回一个不小于 n 的位置
func (b Bitset) Index0() int {
for i, v := range b {
if ^v != 0 {
return i*_w | bits.TrailingZeros(^v)
}
}
return len(b) * _w
}
// 返回第一个 1 的下标,不存在时会返回一个不小于 n 的位置(同 C++ 中的 _Find_first)
func (b Bitset) Index1() int {
for i, v := range b {
if v != 0 {
return i*_w | bits.TrailingZeros(v)
}
}
return len(b) * _w
}
// 返回下标严格大于 p 的第一个 1 的下标,不存在时会返回一个不小于 n 的位置(同 C++ 中的 _Find_next)
func (b Bitset) Next1(p int) int {
p++ // make bound inclusive
if i := p / _w; i < len(b) {
v := b[i] & (^uint(0) << (p % _w)) // mask off bits below bound
if v != 0 {
return i*_w | bits.TrailingZeros(v)
}
for i++; i < len(b); i++ {
if b[i] != 0 {
return i*_w | bits.TrailingZeros(b[i])
}
}
}
return len(b) * _w
}
// 判断 [l,r] 范围内的数是否全为 0
// https://codeforces.com/contest/1107/problem/D(标准做法是二维前缀和)
func (b Bitset) All0(l, r int) bool {
i := l / _w
if i == r/_w {
return b[i]>>(l%_w)&(1<<(r-l+1)-1) == 0
}
if b[i]>>(l%_w) != 0 {
return false
}
for i++; i < r/_w; i++ {
if b[i] != 0 {
return false
}
}
return b[r/_w]&(1<<(r%_w+1)-1) == 0
}
// 判断 [l,r] 范围内的数是否全为 1
func (b Bitset) All1(l, r int) bool {
i := l / _w
if i == r/_w {
mask := 1<<(r-l+1) - 1
return b[i]>>(l%_w)&mask == mask
}
if ^(b[i] | (1<<(l%_w) - 1)) != 0 {
return false
}
for i++; i < r/_w; i++ {
if ^b[i] != 0 {
return false
}
}
mask := 1<<(r%_w+1) - 1
return b[r/_w]&mask == mask
}
// 下面几个方法均需保证长度相同
func (b Bitset) Equals(c Bitset) bool {
for i, v := range b {
if v != c[i] {
return false
}
}
return true
}
func (b Bitset) HasSubset(c Bitset) bool {
for i, v := range b {
if v|c[i] != v {
return false
}
}
return true
}
// 将 c 的元素合并进 b
func (b Bitset) Merge(c Bitset) {
for i, v := range c {
b[i] |= v
}
}
// 注:有关子集枚举的位运算技巧,见 search.go
func bitsCollection() {
// 利用 -v = ^v+1
lowbit := func(v int64) int64 { return v & -v }
// 1,2,4,8,...
isPow2 := func(v int64) bool { return v > 0 && v&(v-1) == 0 }
// 是否有两个相邻的 1 有 https://oeis.org/A004780 没有 https://oeis.org/A003714
hasAdjacentOnes := func(v uint) bool { return v>>1&v > 0 }
// 是否有两个相邻的 0(不考虑前导零) 有 https://oeis.org/A004753 没有 http://oeis.org/A003754
hasAdjacentZeros := func(v uint) bool {
v |= v >> 1 // 若没有相邻的 0,则 v 会变成全 1 的数
return v&(v+1) > 0
}
bits31 := func(v int) []byte {
bits := make([]byte, 31)
for i := range bits {
bits[i] = byte(v >> (30 - i) & 1)
}
return bits
}
_bits31 := func(v int) string { return Sprintf("%031b", v) }
_bits32 := func(v uint) string { return Sprintf("%032b", v) }
// 返回最小的非负 x,其满足 n^x >= m
// https://codeforces.com/problemset/problem/1554/C
leastXor := func(n, m int) (res int) {
for i := 29; i >= 0; i-- { // 29 for 1e9
bn, bm := n>>i&1, m>>i&1
if bn == 1 && bm == 0 { // 后面都填 0
break
}
if bn == 0 && bm == 1 { // 必须填 1
res |= 1 << i
}
// bn = bm 的情况填 0
}
return
}
// 对于数组 a 的所有区间,返回 op(区间元素) 的全部运算结果 logTrick
// 利用操作的单调性求解
// 复杂度 O(f * n * logU),f 为 op(x,y) 的时间复杂度,n 为 a 的长度,U 为 a 中元素最大值
// |: LC898/周赛100C https://leetcode-cn.com/contest/weekly-contest-100/problems/bitwise-ors-of-subarrays/
// &: LC1521/周赛198D https://leetcode-cn.com/contest/weekly-contest-198/problems/find-a-value-of-a-mysterious-function-closest-to-target/
// GCD: https://codeforces.com/edu/course/2/lesson/9/2/practice/contest/307093/problem/G
// https://codeforces.com/problemset/problem/475/D (见下面的 bitOpTrickCnt)
bitOpTrick := func(a []int, op func(x, y int) int) map[int]bool {
ans := map[int]bool{} // 统计 op(一段区间) 的不同结果
set := []int{}
for _, v := range a {
for i, w := range set {
set[i] = op(w, v)
}
set = append(set, v)
// 去重
k := 0
for _, w := range set[1:] {
if set[k] != w {
k++
set[k] = w
}
}
set = set[:k+1]
for _, w := range set {
// do w...
ans[w] = true
}
}
return ans
}
// 进阶:对于数组 a 的所有区间,返回 op(区间元素) 的全部运算结果及其出现次数
// https://codeforces.com/problemset/problem/475/D
// 与单调栈结合 https://codeforces.com/problemset/problem/875/D
// CERC13,紫书例题 10-29,UVa 1642 https://onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&category=825&page=show_problem&problem=4517
bitOpTrickCnt := func(a []int, op func(x, y int) int) map[int]int64 {
cnt := map[int]int64{}
type result struct{ v, l, r int } // [l,r)
set := []result{}
for i, v := range a {
for j, p := range set {
set[j].v = op(p.v, v)
}
set = append(set, result{v, i, i + 1})
// 去重
k := 0
for _, q := range set[1:] {
if set[k].v != q.v {
k++
set[k] = q
} else {
set[k].r = q.r
}
}
set = set[:k+1]
// 此时我们将区间 [0,i] 划分成了 len(set) 个(左闭右开)区间,对 ∀j∈[set[k].l,set[k].r),op(区间[j,i]) 的计算结果均为 set[k].v
for _, p := range set {
// do p... [l,r)
cnt[p.v] += int64(p.r - p.l)
}
}
return cnt
}
//(接上)考虑乘法
// 问题:给一数组 a,元素均为正整数,求区间和等于区间积的区间个数
// 我们来考虑对每个区间右端点,有多少个合法的区间左端点
// 核心思路是,对于每个满足题目要求的区间,其区间积不会超过 sum(a)
// 由于乘积至少要乘 2 才会变化,所以对于一个固定的区间右端点,不同的区间积至多有 O(log(sum(a))) 个
// 同时由于元素均为正数,所以对一个固定的区间右端点,区间左端点也至多有 O(log(sum(a))) 个
// 据此我们只需要在加入一个新的数后,去重并去掉区间积超过 sum(a) 的区间,就可以暴力做出此题
// 注:根据以上推导过程,我们还可以得出总的答案个数至多为 O(nlog(sum(a)))
// https://www.dotcpp.com/oj/problem2622.html
countSumEqMul := func(a []int) (ans int) {
tot := 0
for _, v := range a {
tot += v
}
// 每个前缀和互不相同
posS := map[int]int{0: 0} // int64
sum := 0
type result struct{ v, l, r int }
muls := []result{}
for i, v := range a {
sum += v
for j := range muls {
muls[j].v *= v
}
muls = append(muls, result{v, i, i + 1})
// 去重
k := 0
for _, q := range muls[1:] {
if muls[k].v != q.v {
k++
muls[k] = q
} else {
muls[k].r = q.r
}
}
muls = muls[:k+1]
// 去掉超过 tot 的,从而保证 muls 中至多有 O(log(tot)) 个元素
for muls[0].v > tot {
muls = muls[1:]
}
// 此时我们将区间 [0,i] 划分成了 len(muls) 个(左闭右开)区间,对 ∀j∈[muls[k].l,muls[k].r),[j,i] 的区间积均为 muls[k].v
for _, p := range muls {
// 判断左端点前缀和对应下标是否在范围内
if pos, has := posS[sum-p.v]; has && p.l <= pos && pos < p.r {
ans++
}
}
posS[sum] = i + 1
}
return
}
// 找三个不同的在 [l,r] 范围内的数,其异或和为 0
// 考虑尽可能地小化最大减最小的值,构造 (x, y, z) = (b*2-1, b*3-1, b*3), b=2^k
// 相关题目 https://codeforces.com/problemset/problem/460/D
zeroXorSum3 := func(l, r int64) []int64 {
for b := int64(1); b*3 <= r; b <<= 1 {
if x, y, z := b*2-1, b*3-1, b*3; l <= x && z <= r {
return []int64{x, y, z}
}
}
return nil
}
min := func(a, b int) int {
if a < b {
return a
}
return b
}
// 在 [low,high] 区间内找两个数字 A B,使其异或值最大且不超过 limit
// 返回值保证 A <= B
// 复杂度 O(log(high))
maxXorWithLimit := func(low, high, limit int) (int, int) {
n := bits.Len(uint(high ^ low))
maxXor := 1<<n - 1
mid := high&^maxXor | 1<<(n-1)
if limit >= maxXor { // 无约束,相关题目 https://codeforces.com/problemset/problem/276/D
return mid - 1, mid
}
if limit >= 1<<(n-1) { // A 和 B 能否在第 n-1 位不同的情况下,构造出一个满足要求的解?
a, b := mid&(mid-1), mid
for i := n - 2; i >= 0; i-- {
bt := 1 << i
if limit&bt > 0 { // a 取 1,b 取 0 总是优于 a 取 0,b 取 1
a |= bt
} else if high&(bt<<1-1) > ^low&(bt<<1-1) { // high 侧大,都取 1
if high&bt == 0 { // b 没法取 1
goto next
}
a |= bt
b |= bt
} else { // low 侧大,都取 0
if low&bt > 0 { // a 没法取 0
goto next
}
}
if (a^low)&bt > 0 { // a 不受 low 的约束
a |= limit & (bt - 1)
break
}
if (b^high)&bt > 0 { // b 不受 high 的约束
a |= bt - 1
b |= ^limit & (bt - 1)
break
}
}
return a, b
}
// A 和 B 在第 n-1 位上必须相同
next:
f := func(high int) (int, int) {
n := bits.Len(uint(high ^ mid))
maxXor := min(1<<n-1, limit)
// 只有当 maxXor 为 0 时,返回值才必须相等
if maxXor == 0 {
return mid, mid
}
// maxXor 的最高位置于 B,其余置于 A
mb := 1 << (bits.Len(uint(maxXor)) - 1)
return mid | maxXor&^mb, mid | mb
}
if high-mid > mid-1-low { // 选区间长的一侧
return f(high)
}
a, b := f(2*mid - 1 - low) // 对称到 high
return 2*mid - 1 - b, 2*mid - 1 - a
}
_ = []interface{}{lowbit, isPow2, hasAdjacentOnes, hasAdjacentZeros, bits31, _bits31, _bits32, leastXor, bitOpTrick, bitOpTrickCnt, countSumEqMul, zeroXorSum3, maxXorWithLimit}
}
// https://halfrost.com/go_s2_de_bruijn/
// LC137 https://leetcode-cn.com/problems/single-number-ii/
// 除了某个元素只出现一次以外,其余每个元素均出现了三次。返回只出现了一次的元素
// 定义两个集合 ones 和 twos,初始为空
// 第一次出现就放在 ones 中
// 第二次出现就在 ones 中删除并放在 twos
// 第三次出现就从 twos 中删除
// 这样最终 ones 中就留下了最后的结果
func singleNumber(a []int) int {
ones, twos := 0, 0
for _, v := range a {
ones = (ones ^ v) &^ twos
twos = (twos ^ v) &^ ones
}
return ones
} | copypasta/bits.go | 0.582135 | 0.582135 | bits.go | starcoder |
package container
import (
"log"
"reflect"
)
// CreateInvocable - Binding should be a struct or function
func CreateInvocable(bindingType reflect.Type) *Invocable {
isInvoc, invocableType := isInvocable(bindingType)
if !isInvoc {
log.Printf("type passed to CreateInvocable (%s) is not an invocable type(function or struct)", bindingType.String())
return nil
}
return &Invocable{
bindingType: bindingType,
typeOfBinding: invocableType,
}
}
// CreateInvocableFunction - Pass a function reference through - skips the need to get/resolve the type etc
func CreateInvocableFunction(function any) *Invocable {
bindingType := getType(function)
isInvoc, invocableType := isInvocable(bindingType)
if !isInvoc {
log.Printf("type passed to CreateInvocableFunction is not an invocable type(function or struct)")
return nil
}
return &Invocable{
instance: getVal(function),
bindingType: bindingType,
typeOfBinding: invocableType,
isInstantiated: true,
}
}
// CreateInvocableStruct - Pass a struct reference through - skips the need to get/resolve the type etc
func CreateInvocableStruct(structRef any) *Invocable {
bindingType := getType(structRef)
isInvoc, invocableType := isInvocable(bindingType)
if !isInvoc {
log.Printf("type passed to CreateInvocableStruct (%s) is not an invocable type(function or struct)", bindingType.String())
return nil
}
return &Invocable{
instance: getVal(structRef),
bindingType: bindingType,
typeOfBinding: invocableType,
isInstantiated: true,
}
}
type Invocable struct {
bindingType reflect.Type
// struct or func
typeOfBinding string
// Instantiated Value
instance reflect.Value
isInstantiated bool
}
func (invocable *Invocable) instantiate() {
if invocable.typeOfBinding == "func" {
invocable.instantiateFunction()
}
if invocable.typeOfBinding == "struct" {
invocable.instantiateStruct()
}
}
func (invocable *Invocable) instantiateFunction() {
if invocable.typeOfBinding != "func" {
log.Printf("Cannot Instantiate type of %s. instantiateFunction() can only instantiate functions.", invocable.bindingType.String())
return
}
invocable.instance = reflect.New(invocable.bindingType)
invocable.isInstantiated = true
}
func (invocable *Invocable) instantiateStruct() {
if invocable.typeOfBinding != "struct" {
log.Printf("Cannot Instantiate type of %s. instantiateStruct() can only instantiate structs.", invocable.bindingType.String())
return
}
invocable.instance = reflect.New(invocable.bindingType)
invocable.isInstantiated = true
}
func (invocable *Invocable) InstantiateStructAndFill(container *ContainerInstance) reflect.Value {
if !invocable.isInstantiated {
invocable.instantiate()
}
return container.resolveStructFields(invocable.bindingType, invocable.instance)
}
// InstantiateWith - Instantiate a struct and fill its fields with values from the container
func (invocable *Invocable) InstantiateWith(container *ContainerInstance) any {
resolvedStruct := invocable.InstantiateStructAndFill(container)
return resolvedStruct.Interface()
}
// CallMethodByNameWith - Call the method and assign its parameters from the passed parameters & container
func (invocable *Invocable) CallMethodByNameWith(methodName string, container *ContainerInstance, parameters ...any) []reflect.Value {
if invocable.typeOfBinding != "struct" {
panic("CallMethodByNameWith is only usable when the Invocable instance is created with a struct.")
}
if !invocable.isInstantiated {
invocable.instantiate()
}
structInstance := invocable.InstantiateStructAndFill(container)
method := structInstance.MethodByName(methodName)
return method.Call(container.ResolveFunctionArgs(method, parameters...))
}
func (invocable *Invocable) CallMethodByNameWithArgInterceptor(methodName string, container *ContainerInstance, interceptor FuncArgResolverInterceptor, parameters ...any) []reflect.Value {
if invocable.typeOfBinding != "struct" {
panic("CallMethodByNameWith is only usable when the Invocable instance is created with a struct.")
}
if !invocable.isInstantiated {
invocable.instantiate()
}
structInstance := invocable.InstantiateStructAndFill(container)
method := structInstance.MethodByName(methodName)
return method.Call(container.ResolveFunctionArgsWithInterceptor(method, interceptor, parameters...))
}
// CallMethodWith - Call the method and assign its parameters from the passed parameters & container
func (invocable *Invocable) CallMethodWith(container *ContainerInstance, parameters ...any) []reflect.Value {
if !invocable.isInstantiated {
invocable.instantiate()
}
return invocable.instance.Call(
container.ResolveFunctionArgs(invocable.instance, parameters...),
)
} | invocable.go | 0.614047 | 0.509032 | invocable.go | starcoder |
package datastructure
import (
"errors"
"fmt"
"reflect"
)
// CircularQueue implements circular queue with slice,
// last index of CircularQueue don't contain value, so acturl capacity is size - 1
type CircularQueue[T any] struct {
data []T
front int
rear int
size int
}
// NewCircularQueue return a empty CircularQueue pointer
func NewCircularQueue[T any](size int) *CircularQueue[T] {
data := make([]T, size)
return &CircularQueue[T]{data: data, front: 0, rear: 0, size: size}
}
// Data return queue data
func (q *CircularQueue[T]) Data() []T {
data := []T{}
front := q.front
rear := q.rear
if front <= rear {
return q.data[front:rear]
}
data = append(data, q.data[front:]...)
data = append(data, q.data[0:rear]...)
return data
}
// Length return current data length of queue
func (q *CircularQueue[T]) Length() int {
if q.size == 0 {
return 0
}
return (q.rear - q.front + q.size) % q.size
}
// IsEmpty checks if queue is empty or not
func (q *CircularQueue[T]) IsEmpty() bool {
return q.front == q.rear
}
// IsFull checks if queue is full or not
func (q *CircularQueue[T]) IsFull() bool {
return (q.rear+1)%q.size == q.front
}
// Front return front value of queue
func (q *CircularQueue[T]) Front() T {
return q.data[q.front]
}
// Back return back value of queue
func (q *CircularQueue[T]) Back() T {
if q.rear-1 >= 0 {
return q.data[q.rear-1]
}
return q.data[q.size-1]
}
// EnQueue put element into queue
func (q *CircularQueue[T]) EnQueue(value T) error {
if q.IsFull() {
return errors.New("queue is full!")
}
q.data[q.rear] = value
q.rear = (q.rear + 1) % q.size
return nil
}
// DeQueue remove head element of queue and return it, if queue is empty, return nil and error
func (q *CircularQueue[T]) DeQueue() (*T, error) {
if q.IsEmpty() {
return nil, errors.New("queue is empty")
}
headItem := q.data[q.front]
var t T
q.data[q.front] = t
q.front = (q.front + 1) % q.size
return &headItem, nil
}
// Clear the queue data
func (q *CircularQueue[T]) Clear() {
q.data = []T{}
q.front = 0
q.rear = 0
q.size = 0
}
// Contain checks if the value is in queue or not
func (q *CircularQueue[T]) Contain(value T) bool {
for _, v := range q.data {
if reflect.DeepEqual(v, value) {
return true
}
}
return false
}
// Print queue data
func (q *CircularQueue[T]) Print() {
fmt.Printf("%+v\n", q)
} | datastructure/queue/circularqueue.go | 0.704668 | 0.474388 | circularqueue.go | starcoder |
package emacs
import (
"fmt"
"math"
"time"
)
// #include "wrappers.h"
import "C"
// Time is a type with underlying type time.Time that knows how to convert
// itself from and to an Emacs time value.
type Time time.Time
// String formats the time as a string. It calls time.Time.String.
func (t Time) String() string { return time.Time(t).String() }
// Emacs returns an Emacs timestamp as a pair (ticks . hz) or a quadruple
// (high low μs ps) in the same format as the Emacs function current-time.
func (t Time) Emacs(e Env) (Value, error) {
x := time.Time(t)
return e.makeTime(x.Unix(), x.Nanosecond())
}
// FromEmacs sets *t to the Go equivalent of the Emacs time value in v. v can
// be any time value: nil (for the current time), a number of seconds since the
// epoch, a pair (ticks . hz), a pair (high low), a triple (high low μs), or a
// quadruple (high low μs ps). The picosecond value is truncated to nanosecond
// precision. If the Emacs time value would overflow a Go time, FromEmacs
// returns an error.
func (t *Time) FromEmacs(e Env, v Value) error {
r, err := e.Time(v)
if err != nil {
return err
}
*t = Time(r)
return nil
}
// Time returns the Go equivalent of the Emacs time value in v. v can be any
// time value: nil (for the current time), a number of seconds since the epoch,
// a pair (ticks . hz), a pair (high low), a triple (high low μs), or a
// quadruple (high low μs ps). The picosecond value is truncated to nanosecond
// precision. If the Emacs time value would overflow a Go time, Time returns
// an error.
func (e Env) Time(v Value) (time.Time, error) {
s, ns, err := e.extractTime(v)
return time.Unix(s, int64(ns)), err
}
// Duration is a type with underlying type time.Duration that knows how to
// convert itself from and to an Emacs time value.
type Duration time.Duration
// String formats the duration as a string. It calls time.Duration.String.
func (d Duration) String() string { return time.Duration(d).String() }
// Emacs returns an Emacs timestamp as a pair (ticks . hz) or a quadruple
// (high low μs ps) in the same format as the Emacs function current-time.
func (d Duration) Emacs(e Env) (Value, error) {
x := time.Duration(d)
s, ns := int64(x/time.Second), int(x%time.Second)
if ns < 0 {
s--
ns += int(time.Second)
}
return e.makeTime(s, ns)
}
// FromEmacs sets *d to the Go equivalent of the Emacs time value in v,
// interpreted as a duration. v can be any time value: nil (for the current
// time), a number of seconds, a pair (ticks . hz), a pair (high low), a triple
// (high low μs), or a quadruple (high low μs ps). The picosecond value is
// truncated to nanosecond precision. If the Emacs time value would overflow a
// Go duration, FromEmacs returns an error.
func (d *Duration) FromEmacs(e Env, v Value) error {
r, err := e.Duration(v)
if err != nil {
return err
}
*d = Duration(r)
return nil
}
// Duration returns the Go equivalent of the Emacs time value in v, interpreted
// as a duration. v can be any time value: nil (for the current time), a
// number of seconds, a pair (ticks . hz) a pair (high low), a triple
// (high low μs), or a quadruple (high low μs ps). The picosecond value is
// truncated to nanosecond precision. If the Emacs time value would overflow a
// Go duration, Duration returns an error.
func (e Env) Duration(v Value) (time.Duration, error) {
s, ns, err := e.extractTime(v)
if err != nil {
return 0, err
}
if s <= math.MinInt64/int64(time.Second) || s >= math.MaxInt64/int64(time.Second) {
return 0, OverflowError(fmt.Sprintf("%d.%09ds", s, ns))
}
return time.Duration(s)*time.Second + time.Duration(ns), nil
}
func (e Env) makeTime(s int64, ns int) (Value, error) {
return e.checkValue(C.phst_emacs_make_time(e.raw(), C.struct_timespec{C.time_t(s), C.long(ns)}))
}
func (e Env) extractTime(v Value) (s int64, ns int, err error) {
r := C.phst_emacs_extract_time(e.raw(), v.r)
return int64(r.value.tv_sec), int(r.value.tv_nsec), e.check(r.base)
} | time.go | 0.835249 | 0.541409 | time.go | starcoder |
package goalgorithms
func hoarePartition(a []int, left, right int) int {
p := a[left+(right-left)/2]
i := left
j := right - 1
for {
for a[i] < p {
i++
}
for a[j] > p {
j--
}
if i >= j {
return j
}
a[i], a[j] = a[j], a[i]
}
}
func quickSortHoare(a []int, left, right int) {
if right-left < 2 {
return
}
p := hoarePartition(a, left, right)
quickSortHoare(a, left, p)
quickSortHoare(a, p+1, right)
}
// QuickSortHoare performs in-place sort of int slice in ascending order using Hoare partitioning.
// Worst case time compexity: O(n^2)
// Average time compexity: O(n log(n))
// Worst case space compexity: O(n)
func QuickSortHoare(a []int) {
quickSortHoare(a, 0, len(a))
}
func max(a, b int) int {
if b > a {
return b
}
return a
}
func medianOfThree(a []int, v1, v2, v3 int) int {
if v1 > v2 && v1 < v3 || v1 > v3 && v1 < v2 {
return v1
} else if v2 > v1 && v2 < v3 || v2 > v3 && v2 < v1 {
return v2
} else {
return v3
}
}
func hoarePartitionM3(a []int, left, right int) int {
p := medianOfThree(a, a[left], a[left+(right-left)/2], a[right-1])
i := left
j := right - 1
for {
for a[i] < p {
i++
}
for a[j] > p {
j--
}
if i >= j {
return j
}
a[i], a[j] = a[j], a[i]
}
}
func quickSortHoareM3(a []int, left, right int) {
if right-left < 2 {
return
}
p := hoarePartitionM3(a, left, right)
quickSortHoareM3(a, left, p)
quickSortHoareM3(a, p+1, right)
}
// QuickSortHoareM3 performs in-place sort of int slice in ascending order using Hoare
// partitioning and median of three for pivot selection.
// Worst case time compexity is still O(n^2), but not so for already sorted arrays.
// Average time compexity: O(n log(n)).
// Worst case space compexity: O(n).
func QuickSortHoareM3(a []int) {
quickSortHoareM3(a, 0, len(a))
}
func lomutoPartition(a []int, left, right int) int {
p := a[right-1]
i := left
for j := left; j < right-1; j++ {
if a[j] < p {
a[i], a[j] = a[j], a[i]
i++
}
}
a[i], a[right-1] = a[right-1], a[i]
return i
}
func quickSortLomuto(a []int, left, right int) {
if right-left < 2 {
return
}
p := lomutoPartition(a, left, right)
quickSortLomuto(a, left, p)
quickSortLomuto(a, p+1, right)
}
// QuickSortLomuto performs in-place sort of int slice in ascending order using Lomuto partitioning.
// Worst case time compexity: O(n^2)
// Average time compexity: O(n log(n))
// Worst case space compexity: O(n)
func QuickSortLomuto(a []int) {
quickSortLomuto(a, 0, len(a))
} | sort/quick.go | 0.790813 | 0.642334 | quick.go | starcoder |
package main
import (
"fmt"
"strconv"
"github.com/jung-kurt/gofpdf"
)
func main() {
// Change papersize to letter because 'Murica is special!
pdf := gofpdf.New(gofpdf.OrientationPortrait, gofpdf.UnitPoint, gofpdf.PageSizeLetter, "")
// Get page size. It depends on the unit defined in New.
w, h := pdf.GetPageSize()
fmt.Println(w, h)
pdf.AddPage()
pdf.MoveTo(0, 0)
pdf.SetFont("Courier", "B", 16)
pdf.SetTextColor(255, 0, 0)
// To get the line height based on font size (which is important for going
// to the next lines), use pdf.GetFontSize()
// GetFontSize returns the size of the current font in points followed by the
// size in the unit of measure specified in New(). The second value can be used
// as a line height value in drawing operations.
spt, lpt := pdf.GetFontSize()
fmt.Printf("font size: 16, fontsize-point: %f, fontsize-unit: %f\n", spt, lpt)
// So if we want the next line to be visible but at the top of the page,
// we need to start it from lpt.
pdf.Text(0, lpt, "Line one in red")
// To get to next line, we can use MoveTo.
pdf.MoveTo(0, lpt*2)
// To get the coordinates we can do this.
curX, curY := pdf.GetXY()
// Modify and render next line.
pdf.SetTextColor(0, 255, 0)
pdf.Text(curX, curY, "Text two")
// We can also use cells. Cells are like textboxes.
pdf.MoveTo(0, lpt*2)
curX, curY = pdf.GetXY()
pdf.SetTextColor(0, 0, 255)
pdf.Cell(curX, curY, "Cell line in blue")
// Draw color is the color of the lines (e.g. edges of shapes.)
pdf.SetDrawColor(0, 255, 0)
// Fill color is the color inside the edges.
pdf.SetFillColor(0, 0, 255)
// Create a rectangle.
// F in style means fill, D in style means draw.
// We already set draw and fill color.
pdf.Rect(100, 100, 100, 100, "FD")
// Create arbitrary looking shapes with polygon.
// Point type is
// type PointType struct {
// X, Y float64
// }
// Order is important, last point is connected back to the first point.
// This creates a square.
points := []gofpdf.PointType{
{140, 140},
{240, 140},
{240, 240},
{140, 240},
}
pdf.Polygon(points, "FD")
// Add images.
// First we need to create imageoptions.
// type ImageOptions struct {
// ImageType string
// ReadDpi bool
// AllowNegativePosition bool
// }
// ImageType: Type of image (e.g. jpeg, jpg, gif, png). If empty, uses extension.
// ReadDpi: If true, read the DPI from image. Default is false.
// AllowNegativePosition: Did not understand what it does, keeping at false.
opt1 := gofpdf.ImageOptions{
ImageType: "jpg",
ReadDpi: true,
AllowNegativePosition: false,
}
// Images can have links, if internal, create a link with AddLink() and put
// the resulting number in the parameter one before last, otherwise pass as
// 0 and put the external link URL in the last parameter.
pdf.ImageOptions("img/gopher1.jpg", 240, 80, 0, 0, true, opt1, 0, "https://example.net")
pdf.MoveTo(60, 270)
// Multicell creates a text cell that wraps and has a specific dimension.
// In this case we are creating 140 width. Second parameter is height of each line.
// So we set it to line height for current font size.
// borderStr specifies (4th param)
pdf.MultiCell(
140, // Width of cell.
lpt, // Height of each line, so it's set to line height for current font size.
"This is a long text that is supposed to wrap around the cell, more text and text", // Text
"0", // how the cell border will be drawn. An empty string
// indicates no border, "1" indicates a full border, and one or more of "L",
// "T", "R" and "B" indicate the left, top, right and bottom sides of the
// border.
"LT", // alignStr specifies how the text is to be positioned within the cell.
// Horizontal alignment is controlled by including "L", "C" or "R" (left,
// center, right) in alignStr. Vertical alignment is controlled by including
// "T", "M", "B" or "A" (top, middle, bottom, baseline) in alignStr. The default
// alignment is left middle.
false, // fill is true to paint the cell background or false to leave it transparent.
)
drawGrid(pdf)
if err := pdf.OutputFileAndClose("hello.pdf"); err != nil {
panic(err)
}
}
// drawGrid draws a grid in the PDF for better navigation.
func drawGrid(pdf *gofpdf.Fpdf) {
w, h := pdf.GetPageSize()
pdf.SetFont("courier", "B", 10)
pdf.SetTextColor(80, 80, 80)
pdf.SetDrawColor(125, 125, 125)
_, lpt := pdf.GetFontSize()
for x := 0.0; x < w; x += (w / 20.0) {
pdf.Line(x, 0, x, h)
pdf.Text(x, lpt, strconv.Itoa(int(x)))
}
for y := 0.0; y < h; y += (h / 20.0) {
pdf.Line(0, y, w, y)
pdf.Text(lpt, y, strconv.Itoa(int(y)))
}
} | gophercises/20-pdf/learn-gofpdf/main.go | 0.607081 | 0.407333 | main.go | starcoder |
package sequence_flow
import (
"fmt"
"bpxe.org/pkg/bpmn"
"bpxe.org/pkg/errors"
)
type SequenceFlow struct {
*bpmn.SequenceFlow
definitions *bpmn.Definitions
}
func Make(sequenceFlow *bpmn.SequenceFlow, definitions *bpmn.Definitions) SequenceFlow {
return SequenceFlow{
SequenceFlow: sequenceFlow,
definitions: definitions,
}
}
func New(sequenceFlow *bpmn.SequenceFlow, definitions *bpmn.Definitions) *SequenceFlow {
seqFlow := Make(sequenceFlow, definitions)
return &seqFlow
}
func (sequenceFlow *SequenceFlow) resolveId(id *string) (result bpmn.FlowNodeInterface, err error) {
ownId, present := sequenceFlow.SequenceFlow.Id()
if !present {
err = errors.InvalidStateError{
Expected: "SequenceFlow to have an FlowNodeId",
Actual: "FlowNodeId is not present",
}
return
}
var process *bpmn.Process
for i := range *sequenceFlow.definitions.Processes() {
proc := &(*sequenceFlow.definitions.Processes())[i]
sequenceFlows := proc.SequenceFlows()
for j := range *sequenceFlows {
if idPtr, present := (*sequenceFlows)[j].Id(); present {
if *idPtr == *ownId {
process = proc
}
}
}
}
if process == nil {
err = errors.NotFoundError{
Expected: fmt.Sprintf("sequence flow with ID %s", *ownId),
}
return
}
if flowNode, found := process.FindBy(
bpmn.ExactId(*id).
And(bpmn.ElementInterface((*bpmn.FlowNodeInterface)(nil)))); found {
result = flowNode.(bpmn.FlowNodeInterface)
} else {
err = errors.NotFoundError{Expected: fmt.Sprintf("flow node with ID %s", *id)}
}
return
}
func (sequenceFlow *SequenceFlow) Source() (bpmn.FlowNodeInterface, error) {
return sequenceFlow.resolveId(sequenceFlow.SequenceFlow.SourceRef())
}
func (sequenceFlow *SequenceFlow) Target() (bpmn.FlowNodeInterface, error) {
return sequenceFlow.resolveId(sequenceFlow.SequenceFlow.TargetRef())
}
func (sequenceFlow *SequenceFlow) TargetIndex() (index int, err error) {
var target bpmn.FlowNodeInterface
target, err = sequenceFlow.Target()
if err != nil {
return
}
// ownId is present since Target() already checked for this
ownId, _ := sequenceFlow.SequenceFlow.Id()
incomings := target.Incomings()
for i := range *incomings {
if (*incomings)[i] == *ownId {
index = i
return
}
}
err = errors.NotFoundError{Expected: fmt.Sprintf("matching incoming for %s", *ownId)}
return
} | pkg/sequence_flow/sequence_flow.go | 0.548674 | 0.494385 | sequence_flow.go | starcoder |
package did
import (
"fmt"
"strings"
"github.com/di-wu/parser/ast"
)
// Argument describes the argument types of a Field.
type Argument struct {
// Name only serves documentation purposes and have no semantic significance.
Name *string
// Data is the data type of the argument.
Data Data
}
func convertArgument(n *ast.Node) Argument {
data := convertData(n.LastChild)
if len(n.Children()) == 1 {
return Argument{
Data: data,
}
}
return Argument{
Name: &n.FirstChild.Value,
Data: data,
}
}
func (a Argument) String() string {
var s string
if a.Name != nil {
s += fmt.Sprintf("%s : ", *a.Name)
}
return s + a.Data.String()
}
// Func indicates the function’s signature (argument and results types, annotations),
// and values of this type are references to functions with that signature.
type Func struct {
// ArgTypes is the list of parameters.
ArgTypes Tuple
// ResTypes is the list of results.
ResTypes Tuple
// Annotation indicates an (optional) invocation method.
Annotation *FuncAnnotation
}
func convertFunc(n *ast.Node) Func {
var f Func
for i, n := range n.Children() {
switch i {
case 0:
f.ArgTypes = convertTuple(n)
case 1:
f.ResTypes = convertTuple(n)
case 2:
ann := FuncAnnotation(n.Value)
f.Annotation = &ann
default:
panic(n)
}
}
return f
}
func (f Func) String() string {
s := fmt.Sprintf("%s -> %s", f.ArgTypes.String(), f.ResTypes.String())
if f.Annotation != nil {
s += fmt.Sprintf(" %s", *f.Annotation)
}
return s
}
// FuncAnnotation represents a function annotation.
type FuncAnnotation string
const (
// AnnOneWay indicates that this function returns no response, intended for
// fire-and-forget scenarios.
AnnOneWay FuncAnnotation = "oneway"
// AnnQuery indicates that the referenced function is a query method, meaning
// it does not alter the state of its canister, and that it can be invoked
// using the cheaper “query call” mechanism.
AnnQuery FuncAnnotation = "query"
)
// Tuple represents one or more arguments.
type Tuple []Argument
func convertTuple(n *ast.Node) Tuple {
var tuple Tuple
for _, n := range n.Children() {
tuple = append(tuple, convertArgument(n))
}
return tuple
}
func (t Tuple) String() string {
if len(t) == 1 {
s := t[0].String()
if strings.Contains(s, " ") {
return "(" + s + ")"
}
return s
}
s := "("
for i, a := range t {
s += a.String()
if i != len(t)-1 {
s += ", "
}
}
return s + ")"
} | did/func.go | 0.708717 | 0.414306 | func.go | starcoder |
package builds
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
buildapi "github.com/openshift/origin/pkg/build/api"
exutil "github.com/openshift/origin/test/extended/util"
eximages "github.com/openshift/origin/test/extended/images"
)
var _ = Describe("default: Check S2I and Docker build image for proper labels", func() {
defer GinkgoRecover()
var (
imageStreamFixture = exutil.FixturePath("..", "integration", "fixtures", "test-image-stream.json")
stiBuildFixture = exutil.FixturePath("fixtures", "test-sti-build.json")
dockerBuildFixture = exutil.FixturePath("fixtures", "test-docker-build.json")
oc = exutil.NewCLI("build-sti-env", exutil.KubeConfigPath())
)
Describe("S2I build from a template", func() {
It(fmt.Sprintf("should create a image from %q template with proper Docker labels", stiBuildFixture), func() {
oc.SetOutputDir(exutil.TestContext.OutputDir)
By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture))
err := oc.Run("create").Args("-f", imageStreamFixture).Execute()
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("calling oc create -f %q", stiBuildFixture))
err = oc.Run("create").Args("-f", stiBuildFixture).Execute()
Expect(err).NotTo(HaveOccurred())
By("starting a test build")
buildName, err := oc.Run("start-build").Args("test").Output()
Expect(err).NotTo(HaveOccurred())
By("expecting the S2I build is in Complete phase")
err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName,
// The build passed
func(b *buildapi.Build) bool {
return b.Name == buildName && b.Status.Phase == buildapi.BuildPhaseComplete
},
// The build failed
func(b *buildapi.Build) bool {
if b.Name != buildName {
return false
}
return b.Status.Phase == buildapi.BuildPhaseFailed || b.Status.Phase == buildapi.BuildPhaseError
},
)
Expect(err).NotTo(HaveOccurred())
By("getting the Docker image reference from ImageStream")
imageRef, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
Expect(err).NotTo(HaveOccurred())
imageLabels, err := eximages.GetImageLabels(oc.REST().ImageStreamImages(oc.Namespace()), "test", imageRef)
Expect(err).NotTo(HaveOccurred())
By("inspecting the new image for proper Docker labels")
err = expectOpenShiftLabels(imageLabels)
Expect(err).NotTo(HaveOccurred())
})
})
Describe("Docker build from a template", func() {
It(fmt.Sprintf("should create a image from %q template with proper Docker labels", dockerBuildFixture), func() {
oc.SetOutputDir(exutil.TestContext.OutputDir)
By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture))
err := oc.Run("create").Args("-f", imageStreamFixture).Execute()
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("calling oc create -f %q", dockerBuildFixture))
err = oc.Run("create").Args("-f", dockerBuildFixture).Execute()
Expect(err).NotTo(HaveOccurred())
By("starting a test build")
buildName, err := oc.Run("start-build").Args("test").Output()
Expect(err).NotTo(HaveOccurred())
By("expecting the Docker build is in Complete phase")
err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName,
// The build passed
func(b *buildapi.Build) bool {
return b.Name == buildName && b.Status.Phase == buildapi.BuildPhaseComplete
},
// The build failed
func(b *buildapi.Build) bool {
if b.Name != buildName {
return false
}
return b.Status.Phase == buildapi.BuildPhaseFailed || b.Status.Phase == buildapi.BuildPhaseError
},
)
Expect(err).NotTo(HaveOccurred())
By("getting the Docker image reference from ImageStream")
imageRef, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
Expect(err).NotTo(HaveOccurred())
imageLabels, err := eximages.GetImageLabels(oc.REST().ImageStreamImages(oc.Namespace()), "test", imageRef)
Expect(err).NotTo(HaveOccurred())
By("inspecting the new image for proper Docker labels")
err = expectOpenShiftLabels(imageLabels)
Expect(err).NotTo(HaveOccurred())
})
})
})
// expectOpenShiftLabels tests if builded Docker image contains appropriate
// labels.
func expectOpenShiftLabels(labels map[string]string) error {
expectedLabels := []string{
"io.openshift.build.commit.author",
"io.openshift.build.commit.date",
"io.openshift.build.commit.id",
"io.openshift.build.commit.ref",
"io.openshift.build.commit.message",
"io.openshift.build.source-location",
"io.openshift.build.source-context-dir",
}
for _, label := range expectedLabels {
if labels[label] == "" {
return fmt.Errorf("Builded image doesn't contain proper Docker image labels. Missing %q label", label)
}
}
return nil
} | test/extended/builds/labels.go | 0.53048 | 0.416559 | labels.go | starcoder |
package main
import (
"fmt"
"strings"
"github.com/zyedidia/micro/cmd/micro/optionprovider"
"github.com/zyedidia/tcell"
)
// OptionProvider is the signature of a function which returns all of the available options, potentially using the prefix
// data. For example, given input "abc\nab", start offset 4 and end offset 5, then the prefix is "ab", and the result
// should be the option "abc".
// Logger provides logging. Can be satisfied with t.Logf for tests, or LogToMessenger.
type OptionProvider func(logger func(s string, values ...interface{}), buffer []byte, startOffset, endOffset int) (options []optionprovider.Option, startOffsetDelta int, err error)
// ContentSetter is the signature of a function which allows the content of a cell to be set.
type ContentSetter func(x int, y int, mainc rune, combc []rune, style tcell.Style)
// CompleterEnabledFlagFromView gets whether autocomplete is enabled from the buffer settings.
func CompleterEnabledFlagFromView(v *View) func() bool {
return func() bool {
setting, hasSetting := v.Buf.Settings["autocomplete"]
if !hasSetting {
return false
}
enabled, isBool := setting.(bool)
if !isBool {
return false
}
return enabled
}
}
// CurrentBytesAndOffsetFromView gets bytes from a view.
func CurrentBytesAndOffsetFromView(v *View) func() (bytes []byte, offset int) {
return func() (bytes []byte, offset int) {
bytes = v.Buf.Buffer(false).Bytes()
offset = ByteOffset(v.Cursor.Loc, v.Buf)
return
}
}
// LocationOffsetFromView provides the offset of a given location.
func LocationOffsetFromView(v *View) func(Loc) (offset int) {
return func(l Loc) (offset int) {
return ByteOffset(l, v.Buf)
}
}
// CurrentLocationFromView gets the current location from a view.
func CurrentLocationFromView(v *View) func() Loc {
return func() Loc {
return v.Cursor.Loc
}
}
// ReplaceFromBuffer replaces text in a buffer.
func ReplaceFromBuffer(buf *Buffer) func(from, to Loc, with string) {
return func(from, to Loc, with string) {
LogToMessenger()("replacing from %v to %v with %s", from, to, with)
buf.Replace(from, to, with)
buf.Cursor.GotoLoc(Loc{X: from.X + len(with), Y: from.Y})
}
}
// ContentSetterForView sets the content of a cell for the x, y coordinate of a document.
func ContentSetterForView(v *View) ContentSetter {
return func(x int, y int, mainc rune, combc []rune, style tcell.Style) {
targetY := y - v.Topline
targetX := x + v.leftCol + v.lineNumOffset
screen.SetContent(targetX, targetY, mainc, combc, style)
}
}
// LogToMessenger logs to the global messenger.
func LogToMessenger() func(s string, values ...interface{}) {
return func(s string, values ...interface{}) {
messenger.AddLog(fmt.Sprintf(s, values...))
}
}
// Completer completes code as you type.
type Completer struct {
// Active is the state which determines whether the completer is active (visible) or not.
Active bool
// X stores the X position of the suggestion box
X int
// Y stores the Y position of the suggestion box
Y int
// Options stores the current list of suggestions.
Options []optionprovider.Option
// ActiveIndex store the index of the active option (the one that will be selected).
ActiveIndex int
// Activators are insertions that start autocomplete, e.g. a "." or an opening bracket "(".
Activators map[rune]int
// Deactivators are insertions that stop autocomplete, e.g. a closing bracket, or a semicolon.
Deactivators []rune
// Provider is the provider of completion options, e.g. gocode, or another provider such as a language server.
Provider OptionProvider
// Logger is where log messages are written via fmt.Sprintf.
Logger func(s string, values ...interface{})
// CurrentBytesAndOffset is a function which returns the bytes and the current offset position from the current view.
CurrentBytesAndOffset func() (bytes []byte, offset int)
// CurrentLocation is a function which returns the current location of the cursor.
CurrentLocation func() Loc
// LocationOffset is a function which returns the offset of a given location.
LocationOffset func(Loc) int
// Replacer is a function which replaces text.
Replacer func(from, to Loc, with string)
// Setter is a function which draws to the console at a given location.
Setter ContentSetter
// OptionStyleInactive is the style for completer options which are not currently highlighted.
OptionStyleInactive tcell.Style
// OptionStyleActive is the style for completer options which are currently highlighted.
OptionStyleActive tcell.Style
// Enabled determines whether the view has the enabled flag set or not.
Enabled func() bool
// PreviousLocation stores the last known location of the cursor.
PreviousLocation Loc
}
// defaultActivators sets whether the character should start autocompletion. The value of zero means that
// the character itself is not included in the replacement, -1 means that it is.
var defaultActivators = map[rune]int{
'.': 0,
'(': 0,
'a': -1, 'b': -1, 'c': -1, 'd': -1, 'e': -1, 'f': -1, 'g': -1, 'h': -1, 'i': -1, 'j': -1, 'k': -1, 'l': -1, 'm': -1, 'n': -1, 'o': -1, 'p': -1, 'q': -1, 'r': -1, 's': -1, 't': -1, 'u': -1, 'v': -1, 'w': -1, 'x': -1, 'y': -1, 'z': -1,
'A': -1, 'B': -1, 'C': -1, 'D': -1, 'E': -1, 'F': -1, 'G': -1, 'H': -1, 'I': -1, 'J': -1, 'K': -1, 'L': -1, 'M': -1, 'N': -1, 'O': -1, 'P': -1, 'Q': -1, 'R': -1, 'S': -1, 'T': -1, 'U': -1, 'V': -1, 'W': -1, 'X': -1, 'Y': -1, 'Z': -1,
}
const defaultDeactivators = "), \n."
// NewCompleterForView creates a new autocompleter with defaults for writing to the console.
func NewCompleterForView(v *View) *Completer {
var provider OptionProvider
// Load the provider based on filename.
fileName := v.Buf.GetName()
if strings.HasSuffix(fileName, ".go") {
provider = optionprovider.GoCode
} else {
provider = optionprovider.Generic
}
// If no matching provider was found, we can't autocomplete.
if provider == nil {
provider = optionprovider.Noop
}
return NewCompleter(defaultActivators, []rune(defaultDeactivators),
provider,
LogToMessenger(),
CurrentBytesAndOffsetFromView(v),
CurrentLocationFromView(v),
LocationOffsetFromView(v),
ReplaceFromBuffer(v.Buf),
ContentSetterForView(v),
colorscheme["default"].Reverse(true),
colorscheme["default"],
CompleterEnabledFlagFromView(v),
)
}
// NewCompleter creates a new completer with all options exposed. See NewCompleterForView for more common usage.
func NewCompleter(activators map[rune]int,
deactivators []rune,
provider OptionProvider,
logger func(s string, values ...interface{}),
currentBytesAndOffset func() (bytes []byte, offset int),
currentLocation func() Loc,
locationOffset func(Loc) int,
replacer func(from, to Loc, with string),
setter ContentSetter,
optionStyleInactive tcell.Style,
optionStyleActive tcell.Style,
enabled func() bool) *Completer {
return &Completer{
Activators: activators,
Deactivators: deactivators,
Provider: provider,
Logger: logger,
CurrentBytesAndOffset: currentBytesAndOffset,
CurrentLocation: currentLocation,
LocationOffset: locationOffset,
Replacer: replacer,
Setter: setter,
OptionStyleInactive: optionStyleInactive,
OptionStyleActive: optionStyleActive,
Enabled: enabled,
}
}
// Process handles incoming events from the view and starts looking up via autocomplete.
func (c *Completer) Process(r rune) error {
if !c.Enabled() {
return nil
}
if c.Provider == nil {
return nil
}
// Hide the autocomplete view if needed.
if c.Active && containsRune(c.Deactivators, r) {
c.Logger("completer.Process: deactivating, because received %v", string(r))
c.Active = false
}
if !c.Active {
// Check to work out whether we should activate the autocomplete.
if indexAdjustment, ok := c.Activators[r]; ok {
c.Logger("completer.Process: activating, because received %v", string(r))
c.Active = true
currentLocation := c.CurrentLocation()
c.PreviousLocation = currentLocation
c.X, c.Y = currentLocation.X+indexAdjustment, currentLocation.Y
c.Logger("completer.Process: SetStartPosition to %d, %d", c.X, c.Y)
}
}
if !c.Active {
// We're not active.
return nil
}
// Get options.
//TODO: We only need the answer by the time Display is called, so we could let the rest of the
// program continue until we're ready to receive the value by using a go routine or channel.
bytes, currentOffset := c.CurrentBytesAndOffset()
startOffset := c.LocationOffset(Loc{X: c.X, Y: c.Y})
options, delta, err := c.Provider(c.Logger, bytes, startOffset, currentOffset)
if err != nil {
return err
}
c.X += delta
c.Options = options
c.ActiveIndex = 0
// If there are no options, just deactivate.
if len(options) == 0 {
c.Logger("completer.Process: Deactivating because there are no options")
c.Active = false
}
return err
}
// HandleEvent handles incoming key presses if the completer is active.
// It returns true if it took over the key action, or false if it didn't.
func (c *Completer) HandleEvent(key tcell.Key) bool {
if !c.Enabled() {
c.Logger("completer.HandleEvent: not enabled")
return false
}
if !c.Active {
c.Logger("completer.HandleEvent: not active")
return false
}
// Handle selecting various options in the list.
switch key {
case tcell.KeyUp:
if c.ActiveIndex > 0 {
c.ActiveIndex--
}
break
case tcell.KeyDown:
if c.ActiveIndex < len(c.Options)-1 {
c.ActiveIndex++
}
break
case tcell.KeyEsc:
c.Active = false
break
case tcell.KeyTab, tcell.KeyEnter:
// Complete the text.
if toUse, ok := getOption(c.ActiveIndex, c.Options); ok {
c.Replacer(Loc{X: c.X, Y: c.Y}, c.CurrentLocation(), toUse)
}
c.Active = false
break
default:
// Not part of the keys that the autocomplete menu handles.
return false
}
// The completer handled the key.
return true
}
func getOption(i int, options []optionprovider.Option) (toUse string, ok bool) {
if len(options) == 0 {
return "", false
}
if i > len(options)-1 {
return "", false
}
if i < 0 {
i = 0
}
return options[i].Text(), true
}
// DeactivateIfOutOfBounds for example, if duplicating lines or backspacing past the start of the completion.
func (c *Completer) DeactivateIfOutOfBounds() {
// Disable autocomplete if we've switched lines (e.g. by duplicating a line, or moving the cursor away)
// of if the X position is equal to or less than current.
if !c.Active {
return
}
cur := c.CurrentLocation()
beforeStart := cur.X < c.X
movedMoreThanOneXSinceLastCheck := distance(c.PreviousLocation.X, cur.X) > 1
c.Logger("completed.DeactivateIfOutOfBounds: Previous loc %v, current loc %v, distance: %v", cur, c.PreviousLocation, distance(c.PreviousLocation.X, cur.X))
movedLine := cur.Y != c.Y
if beforeStart || movedMoreThanOneXSinceLastCheck || movedLine {
c.Logger("completer.DeactivateIfOutOfBounds: deactivating")
c.Active = false
}
c.PreviousLocation = cur
}
func distance(a, b int) int {
if a == b {
return 0
}
if a > b {
return a - b
}
return b - a
}
// Display the suggestion box.
func (c *Completer) Display() {
if !c.Enabled() {
c.Logger("completer.Display: not enabled")
return
}
if !c.Active {
return
}
c.Logger("completer.Display: showing %d options", len(c.Options))
width := getWidth(c.Options)
start := c.CurrentLocation()
for iy, o := range c.Options {
y := start.Y + iy + 1 // +1 to draw a line below the cursor.
// If it's active, show it differently.
style := c.OptionStyleInactive
if c.ActiveIndex == iy {
style = c.OptionStyleActive
}
// Draw the runes.
for ix, r := range padRight(o.Text(), width+1) {
x := start.X + ix
c.Setter(x, y, r, nil, style)
}
}
}
func getWidth(options []optionprovider.Option) (max int) {
for _, o := range options {
if l := len(o.Text()); l > max {
max = l
}
}
return
}
func padRight(s string, minSize int) string {
extra := minSize - len(s)
if extra > 0 {
padding := make([]byte, extra)
return s + string(padding)
}
return s
}
func containsRune(array []rune, r rune) bool {
for _, r1 := range array {
if r1 == r {
return true
}
}
return false
} | cmd/micro/completer.go | 0.681197 | 0.473475 | completer.go | starcoder |
package ds
// Heap vs Binary tree:
// - Heap is implemented as a array so no memory overhead. For each node in the binary tree two pointers are allocated.
// - **Average** insertion in heap is O(1) comparing to O(logn).
// - Heap only supports min operation and is simpler than binary tree.
// - Heap is easier to implement, balancing red-black trees is annoying.
// - Creating heap is O(N) while creating binary tree is O(n*logn)
// Detailed summary here https://stackoverflow.com/questions/6147242/heap-vs-binary-search-tree-bst
type Heap struct {
Values []int
Len int
Compare func(int, int) bool
}
func NewMinHeap() *Heap {
h := new(Heap)
h.Len = 0
h.Values = []int{}
h.Compare = func(a int, b int) bool {
return a < b
}
return h
}
func NewMaxHeap() *Heap {
h := new(Heap)
h.Len = 0
h.Values = []int{}
h.Compare = func(a int, b int) bool {
return a > b
}
return h
}
func (h *Heap) Min() (bool, int) {
if h.Len > 0 {
return true, h.Values[0]
}
return false, -1
}
func (h *Heap) ExtractMin() (bool, int) {
if h.Len > 0 {
result := h.Values[0]
h.Values[0] = h.Values[h.Len-1]
h.Values[h.Len-1] = -1
h.Len--
h.topDown()
return true, result
}
return false, -1
}
func (h *Heap) Insert(value int) {
if h.Len < len(h.Values) {
h.Values[h.Len] = value
h.Len++
} else {
h.Values = append(h.Values, value)
h.Len++
}
h.bottomUpRebalance()
}
func (h *Heap) bottomUpRebalance() {
pos := h.Len - 1
for pos > 0 {
parent := (pos - 1) / 2
if !h.Compare(h.Values[pos], h.Values[parent]) {
break
}
h.Values[parent], h.Values[pos] = h.Values[pos], h.Values[parent]
pos = parent
}
}
func (h *Heap) topDown() {
pos := 0
for {
val := h.Values[pos]
swap := -1
one := 2*pos + 1
if one < h.Len && !h.Compare(val, h.Values[one]) {
val = h.Values[one]
swap = one
}
two := 2*pos + 2
if two < h.Len && !h.Compare(val, h.Values[two]) {
swap = two
}
if swap < 0 {
break
}
h.Values[pos], h.Values[swap] = h.Values[swap], h.Values[pos]
pos = swap
}
} | ds/heap.go | 0.793026 | 0.465873 | heap.go | starcoder |
package gen
import (
pschema "github.com/pulumi/pulumi/pkg/v3/codegen/schema"
v1 "k8s.io/api/core/v1"
)
var serviceSpec = pschema.ComplexTypeSpec{
ObjectTypeSpec: pschema.ObjectTypeSpec{
Properties: map[string]pschema.PropertySpec{
"type": {
TypeSpec: pschema.TypeSpec{
OneOf: []pschema.TypeSpec{
{Type: "string"},
{Ref: "#/types/kubernetes:core/v1:ServiceSpecType"},
},
},
},
},
},
}
var serviceSpecType = pschema.ComplexTypeSpec{
ObjectTypeSpec: pschema.ObjectTypeSpec{
Type: "string",
},
Enum: []pschema.EnumValueSpec{
{Value: v1.ServiceTypeExternalName},
{Value: v1.ServiceTypeClusterIP},
{Value: v1.ServiceTypeNodePort},
{Value: v1.ServiceTypeLoadBalancer},
},
}
var helmV3Release = pschema.ComplexTypeSpec{
ObjectTypeSpec: pschema.ObjectTypeSpec{
Description: "A Release is an instance of a chart running in a Kubernetes cluster.\nA Chart is a Helm package. It contains all of the resource definitions necessary to run an application, tool, or service inside of a Kubernetes cluster.\nNote - Helm Release is currently in BETA and may change. Use in production environment is discouraged.",
Properties: map[string]pschema.PropertySpec{
"name": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Release name.",
},
"repositoryOpts": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:RepositoryOpts",
},
Description: "Specification defining the Helm chart repository to use.",
},
"chart": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Chart name to be installed. A path may be used.",
},
"version": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Specify the exact chart version to install. If this is not specified, the latest version is installed.",
},
"devel": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.",
},
"valueYamlFiles": {
TypeSpec: pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Ref: "pulumi.json#/Asset",
},
},
Description: "List of assets (raw yaml files). Content is read and merged with values. Not yet supported.",
},
"values": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "Custom values set for the release.",
},
"manifest": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "The rendered manifests as JSON. Not yet supported.",
},
"resourceNames": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Type: "string",
},
},
},
Description: "Names of resources created by the release grouped by \"kind/version\".",
},
"namespace": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Namespace to install the release into.",
},
"verify": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Verify the package before installing it.",
},
"keyring": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Location of public keys used for verification. Used only if `verify` is true",
},
"timeout": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Time in seconds to wait for any individual kubernetes operation.",
},
"disableWebhooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent hooks from running.",
},
"disableCRDHooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook",
},
"reuseValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored",
},
"resetValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reset the values to the ones built into the chart.",
},
"forceUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Force resource update through delete/recreate if needed.",
},
"recreatePods": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Perform pods restart during upgrade/rollback.",
},
"cleanupOnFail": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Allow deletion of new resources created in this upgrade when upgrade fails.",
},
"maxHistory": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Limit the maximum number of revisions saved per release. Use 0 for no limit.",
},
"atomic": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.",
},
"skipCrds": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, no CRDs will be installed. By default, CRDs are installed if not already present.",
},
"renderSubchartNotes": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, render subchart notes along with the parent.",
},
"disableOpenapiValidation": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema",
},
"skipAwait": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.",
},
"waitForJobs": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.",
},
"dependencyUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm dependency update before installing the chart.",
},
"replace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Re-use the given name, even if that name is already used. This is unsafe in production",
},
"description": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Add a custom description",
},
"createNamespace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Create the namespace if it does not exist.",
},
"postrender": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Postrender command to run.",
},
"lint": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm lint when planning.",
},
"status": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:ReleaseStatus",
},
Description: "Status of the deployed release.",
},
},
Type: "object",
Required: []string{
"chart",
"repositoryOpts",
"values",
"status",
},
Language: map[string]pschema.RawMessage{
"nodejs": rawMessage(map[string][]string{
"requiredOutputs": {
"name",
"repositoryOpts",
"chart",
"version",
"devel",
"values",
"set",
"manifest",
"namespace",
"verify",
"keyring",
"timeout",
"disableWebhooks",
"disableCRDHooks",
"reuseValues",
"resetValues",
"forceUpdate",
"recreatePods",
"cleanupOnFail",
"maxHistory",
"atomic",
"skipCrds",
"renderSubchartNotes",
"disableOpenapiValidation",
"skipAwait",
"waitForJobs",
"dependencyUpdate",
"replace",
"description",
"createNamespace",
"postrender",
"lint",
"status",
},
}),
},
},
}
var helmV3RepoOpts = pschema.ComplexTypeSpec{
ObjectTypeSpec: pschema.ObjectTypeSpec{
Description: "Specification defining the Helm chart repository to use.",
Properties: map[string]pschema.PropertySpec{
"repo": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.",
},
"keyFile": { // TODO: Content or file
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The repository's cert key file",
},
"certFile": { // TODO: Content or file
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The repository's cert file",
},
"caFile": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The Repository's CA File",
},
"username": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Username for HTTP basic authentication",
},
"password": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Secret: true,
Description: "Password for HTTP basic authentication",
},
},
Language: map[string]pschema.RawMessage{
"nodejs": rawMessage(map[string][]string{
"requiredOutputs": {
"repo",
"keyFile",
"certFile",
"caFile",
"username",
"password",
}}),
},
Type: "object",
},
}
var helmV3ReleaseStatus = pschema.ComplexTypeSpec{
ObjectTypeSpec: pschema.ObjectTypeSpec{
Required: []string{"status"},
Properties: map[string]pschema.PropertySpec{
"name": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Name is the name of the release.",
},
"revision": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Version is an int32 which represents the version of the release.",
},
"namespace": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Namespace is the kubernetes namespace of the release.",
},
"chart": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The name of the chart.",
},
"version": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "A SemVer 2 conformant version string of the chart.",
},
"appVersion": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The version number of the application being deployed.",
},
"status": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Status of the release.",
},
},
Language: map[string]pschema.RawMessage{
"nodejs": rawMessage(map[string][]string{
"requiredOutputs": {
"name",
"revision",
"namespace",
"chart",
"version",
"appVersion",
"values",
"status",
}}),
},
Type: "object",
},
}
var kubeClientSettings = pschema.ComplexTypeSpec{
ObjectTypeSpec: pschema.ObjectTypeSpec{
Description: "Options for tuning the Kubernetes client used by a Provider.",
Properties: map[string]pschema.PropertySpec{
"burst": {
Description: "Maximum burst for throttle. Default value is 10.",
TypeSpec: pschema.TypeSpec{Type: "integer"},
DefaultInfo: &pschema.DefaultSpec{
Environment: []string{
"PULUMI_K8S_CLIENT_BURST",
},
},
},
"qps": {
Description: "Maximum queries per second (QPS) to the API server from this client. Default value is 5.",
TypeSpec: pschema.TypeSpec{Type: "number"},
DefaultInfo: &pschema.DefaultSpec{
Environment: []string{
"PULUMI_K8S_CLIENT_QPS",
},
},
},
},
Type: "object",
},
}
var helmReleaseSettings = pschema.ComplexTypeSpec{
ObjectTypeSpec: pschema.ObjectTypeSpec{
Description: "BETA FEATURE - Options to configure the Helm Release resource.",
Properties: map[string]pschema.PropertySpec{
"driver": {
DefaultInfo: &pschema.DefaultSpec{
Environment: []string{
"PULUMI_K8S_HELM_DRIVER",
},
},
Description: "The backend storage driver for Helm. Values are: configmap, secret, memory, sql.",
TypeSpec: pschema.TypeSpec{Type: "string"},
},
"pluginsPath": {
DefaultInfo: &pschema.DefaultSpec{
Environment: []string{
"PULUMI_K8S_HELM_PLUGINS_PATH",
},
},
Description: "The path to the helm plugins directory.",
TypeSpec: pschema.TypeSpec{Type: "string"},
},
"registryConfigPath": {
DefaultInfo: &pschema.DefaultSpec{
Environment: []string{
"PULUMI_K8S_HELM_REGISTRY_CONFIG_PATH",
},
},
Description: "The path to the registry config file.",
TypeSpec: pschema.TypeSpec{Type: "string"},
},
"repositoryConfigPath": {
DefaultInfo: &pschema.DefaultSpec{
Environment: []string{
"PULUMI_K8S_HELM_REPOSITORY_CONFIG_PATH",
},
},
Description: "The path to the file containing repository names and URLs.",
TypeSpec: pschema.TypeSpec{Type: "string"},
},
"repositoryCache": {
DefaultInfo: &pschema.DefaultSpec{
Environment: []string{
"PULUMI_K8S_HELM_REPOSITORY_CACHE",
},
},
Description: "The path to the file containing cached repository indexes.",
TypeSpec: pschema.TypeSpec{Type: "string"},
},
"suppressBetaWarning": {
DefaultInfo: &pschema.DefaultSpec{
Environment: []string{
"PULUMI_K8S_SUPPRESS_HELM_RELEASE_BETA_WARNING",
},
},
Description: "While Helm Release provider is in beta, by default 'pulumi up' will log a warning if the resource is used. If present and set to \"true\", this warning is omitted.",
TypeSpec: pschema.TypeSpec{Type: "boolean"},
},
},
Type: "object",
},
}
var helmV3ReleaseResource = pschema.ResourceSpec{
ObjectTypeSpec: pschema.ObjectTypeSpec{
Description: "A Release is an instance of a chart running in a Kubernetes cluster.\n\nA Chart is a Helm package. It contains all of the resource definitions necessary to run an application, tool, or service inside of a Kubernetes cluster.",
Properties: map[string]pschema.PropertySpec{
"name": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Release name.",
},
"repositoryOpts": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:RepositoryOpts",
},
Description: "Specification defining the Helm chart repository to use.",
},
"chart": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Chart name to be installed. A path may be used.",
},
"version": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Specify the exact chart version to install. If this is not specified, the latest version is installed.",
},
"devel": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.",
},
"valueYamlFiles": {
TypeSpec: pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Ref: "pulumi.json#/Asset",
},
},
Description: "List of assets (raw yaml files). Content is read and merged with values. Not yet supported.",
},
"values": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "Custom values set for the release.",
},
"manifest": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "The rendered manifests as JSON. Not yet supported.",
},
"resourceNames": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Type: "string",
},
},
},
Description: "Names of resources created by the release grouped by \"kind/version\".",
},
"namespace": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Namespace to install the release into.",
},
"verify": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Verify the package before installing it.",
},
"keyring": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Location of public keys used for verification. Used only if `verify` is true",
},
"timeout": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Time in seconds to wait for any individual kubernetes operation.",
},
"disableWebhooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent hooks from running.",
},
"disableCRDHooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook",
},
"reuseValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored",
},
"resetValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reset the values to the ones built into the chart.",
},
"forceUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Force resource update through delete/recreate if needed.",
},
"recreatePods": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Perform pods restart during upgrade/rollback.",
},
"cleanupOnFail": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Allow deletion of new resources created in this upgrade when upgrade fails.",
},
"maxHistory": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Limit the maximum number of revisions saved per release. Use 0 for no limit.",
},
"atomic": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.",
},
"skipCrds": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, no CRDs will be installed. By default, CRDs are installed if not already present.",
},
"renderSubchartNotes": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, render subchart notes along with the parent.",
},
"disableOpenapiValidation": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema",
},
"skipAwait": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.",
},
"waitForJobs": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.",
},
"dependencyUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm dependency update before installing the chart.",
},
"replace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Re-use the given name, even if that name is already used. This is unsafe in production",
},
"description": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Add a custom description",
},
"createNamespace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Create the namespace if it does not exist.",
},
"postrender": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Postrender command to run.",
},
"lint": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm lint when planning.",
},
"status": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:ReleaseStatus",
},
Description: "Status of the deployed release.",
},
},
Type: "object",
Required: []string{
"chart",
"repositoryOpts",
"status",
},
Language: map[string]pschema.RawMessage{
"nodejs": rawMessage(map[string][]string{
"requiredOutputs": {
"name",
"repositoryOpts",
"chart",
"version",
"devel",
"values",
"set",
"manifest",
"namespace",
"verify",
"keyring",
"timeout",
"disableWebhooks",
"disableCRDHooks",
"reuseValues",
"resetValues",
"forceUpdate",
"recreatePods",
"cleanupOnFail",
"maxHistory",
"atomic",
"skipCrds",
"renderSubchartNotes",
"disableOpenapiValidation",
"skipAwait",
"waitForJobs",
"dependencyUpdate",
"replace",
"description",
"createNamespace",
"postrender",
"lint",
"status",
},
}),
},
},
InputProperties: map[string]pschema.PropertySpec{
"name": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Release name.",
},
"repositoryOpts": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:RepositoryOpts",
},
Description: "Specification defining the Helm chart repository to use.",
},
"chart": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Chart name to be installed. A path may be used.",
},
"version": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Specify the exact chart version to install. If this is not specified, the latest version is installed.",
},
"devel": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.",
},
"valueYamlFiles": {
TypeSpec: pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Ref: "pulumi.json#/Asset",
},
},
Description: "List of assets (raw yaml files). Content is read and merged with values. Not yet supported.",
},
"values": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "Custom values set for the release.",
},
"manifest": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "The rendered manifests as JSON. Not yet supported.",
},
"resourceNames": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Type: "string",
},
},
},
Description: "Names of resources created by the release grouped by \"kind/version\".",
},
"namespace": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Namespace to install the release into.",
},
"verify": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Verify the package before installing it.",
},
"keyring": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Location of public keys used for verification. Used only if `verify` is true",
},
"timeout": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Time in seconds to wait for any individual kubernetes operation.",
},
"disableWebhooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent hooks from running.",
},
"disableCRDHooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook",
},
"reuseValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored",
},
"resetValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reset the values to the ones built into the chart.",
},
"forceUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Force resource update through delete/recreate if needed.",
},
"recreatePods": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Perform pods restart during upgrade/rollback.",
},
"cleanupOnFail": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Allow deletion of new resources created in this upgrade when upgrade fails.",
},
"maxHistory": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Limit the maximum number of revisions saved per release. Use 0 for no limit.",
},
"atomic": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.",
},
"skipCrds": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, no CRDs will be installed. By default, CRDs are installed if not already present.",
},
"renderSubchartNotes": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, render subchart notes along with the parent.",
},
"disableOpenapiValidation": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema",
},
"skipAwait": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.",
},
"waitForJobs": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.",
},
"dependencyUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm dependency update before installing the chart.",
},
"replace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Re-use the given name, even if that name is already used. This is unsafe in production",
},
"description": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Add a custom description",
},
"createNamespace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Create the namespace if it does not exist.",
},
"postrender": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Postrender command to run.",
},
"lint": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm lint when planning.",
},
"compat": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Const: "true",
},
},
RequiredInputs: []string{
"chart",
"repositoryOpts",
},
}
func init() {
typeOverlays["kubernetes:core/v1:ServiceSpec"] = serviceSpec
typeOverlays["kubernetes:core/v1:ServiceSpecType"] = serviceSpecType
typeOverlays["kubernetes:helm.sh/v3:Release"] = helmV3Release
typeOverlays["kubernetes:helm.sh/v3:RepositoryOpts"] = helmV3RepoOpts
typeOverlays["kubernetes:helm.sh/v3:ReleaseStatus"] = helmV3ReleaseStatus
typeOverlays["kubernetes:index:KubeClientSettings"] = kubeClientSettings
typeOverlays["kubernetes:index:HelmReleaseSettings"] = helmReleaseSettings
resourceOverlays["kubernetes:helm.sh/v3:Release"] = helmV3ReleaseResource
} | provider/pkg/gen/overlays.go | 0.50952 | 0.417331 | overlays.go | starcoder |
package grid
import (
"math/rand"
"time"
)
// dense is a corridor only skirmish grid. It is a Prim's maze where the
// dead-ends have been eliminated. Additionally each side of the grid is
// guaranteed to have one exit to the level exterior.
type dense struct {
grid // superclass grid.
}
// Generate a maze using a Prim's maze as the basis. Make a skirmish
// friendly level by knocking out a wall at any dead end and then chopping
// some outside exits if necessary.
func (d *dense) Generate(width, depth int) Grid {
maze := &primMaze{}
maze.Generate(width, depth)
d.cells = maze.cells
random := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
// randomly traverse the grid removing dead ends.
candidates := d.cellSlice()
for len(candidates) > 0 {
index := random.Intn(len(candidates))
cell := candidates[index]
d.fixDeadEnd(random, cell)
candidates = append(candidates[:index], candidates[index+1:]...)
}
d.ensureExits(random)
return d
}
// fixDeadEnd checks if the given cell is a dead end and creates another
// passage if it is.
func (d *dense) fixDeadEnd(random *rand.Rand, u *cell) {
if !u.isWall {
walls := d.neighbours(u, allWalls)
if len(walls) > 2 {
index := random.Intn(len(walls))
u = walls[index]
u.isWall = allPassages
}
}
}
// ensureExits makes sure there is an outside exit on each side.
// The corners are left alone.
func (d *dense) ensureExits(random *rand.Rand) {
var north, south, east, west []*cell
xmax, ymax := d.Size()
for x := 1; x < xmax-1; x++ {
if d.cells[x][ymax-1].isWall {
north = append(north, d.cells[x][ymax-1])
}
if d.cells[x][0].isWall {
south = append(south, d.cells[x][0])
}
}
for y := 1; y < ymax-1; y++ {
if d.cells[xmax-1][y].isWall {
east = append(east, d.cells[xmax-1][y])
}
if d.cells[0][y].isWall {
west = append(west, d.cells[0][y])
}
}
d.ensureExit(random, south, xmax-2)
d.ensureExit(random, north, xmax-2)
d.ensureExit(random, west, ymax-2)
d.ensureExit(random, east, ymax-2)
}
// ensureExit chops a hole in the given side. Sometimes the hole chopped
// can be a dead-end. Chopping an additional hole in the holes neighbouring
// walls guarantees an exit.
func (d *dense) ensureExit(random *rand.Rand, side []*cell, max int) {
if len(side) == max {
index := random.Intn(len(side))
u := side[index]
u.isWall = allPassages
// ensure the chop gets into the grid by chopping again if necessary.
walls := d.neighbours(u, allWalls)
if len(walls) == 3 {
wallIndex := random.Intn(len(walls))
u := walls[wallIndex]
u.isWall = allPassages
}
}
} | interfaces/vu/src/grid/dense.go | 0.613584 | 0.54359 | dense.go | starcoder |
package abcsort
import "sort"
// Sorter provides functionality to easily sort slices using a custom alphabet.
// A sorter is safe for concurrent use.
type Sorter struct {
// Alphabet is the custom alphabet
Alphabet string
// Weights is used for "normal" sorting.
Weights map[rune]int
// WeightsFold is used for fold sorting.
WeightsFold map[rune]int
}
// New returns a new Sorter that will use the given custom alphabet when sorting.
func New(alphabet string) *Sorter {
return &Sorter{
Alphabet: alphabet,
Weights: Weights(alphabet),
WeightsFold: WeightsFold(alphabet),
}
}
// Strings sorts a string slice.
func (s *Sorter) Strings(ss []string) {
sort.Sort(&StringSlice{Slice: ss, Weights: s.Weights})
}
// StringsFold sorts a string slice under Unicode folding.
func (s *Sorter) StringsFold(ss []string) {
sort.Sort(&StringFoldSlice{Slice: ss, Weights: s.Weights})
}
// Slice sorts a slice.
// getField is a function that must return the string value (e.g. a field) of
// the element at the ith index.
func (s *Sorter) Slice(slice interface{}, getField func(i int) string) {
sort.Slice(slice, func(i int, j int) bool {
return Less(getField(i), getField(j), s.Weights)
})
}
// SliceFold sorts a slice under Unicode folding.
// getField is a function that must return the string value (e.g. a field) of
// the element at the ith index.
func (s *Sorter) SliceFold(slice interface{}, getField func(i int) string) {
sort.Slice(slice, func(i int, j int) bool {
return LessFold(getField(i), getField(j), s.WeightsFold)
})
}
// StringSlice is a helper struct that implements sort.Interface.
type StringSlice struct {
Slice []string
Weights map[rune]int
}
func (ss *StringSlice) Len() int { return len(ss.Slice) }
func (ss *StringSlice) Less(i, j int) bool { return Less(ss.Slice[i], ss.Slice[j], ss.Weights) }
func (ss *StringSlice) Swap(i, j int) { ss.Slice[i], ss.Slice[j] = ss.Slice[j], ss.Slice[i] }
// StringFoldSlice is a helper struct that implements sort.Interface.
type StringFoldSlice struct {
Slice []string
Weights map[rune]int
}
func (ss *StringFoldSlice) Len() int { return len(ss.Slice) }
func (ss *StringFoldSlice) Less(i, j int) bool { return LessFold(ss.Slice[i], ss.Slice[j], ss.Weights) }
func (ss *StringFoldSlice) Swap(i, j int) { ss.Slice[i], ss.Slice[j] = ss.Slice[j], ss.Slice[i] } | sorter.go | 0.776411 | 0.446857 | sorter.go | starcoder |
package interpolation ; import ( "math" ; "github.com/sjbog/math_tools" )
/* HIROSHI AKIMA "A method of smooth curve fitting" 1969
Summary
This method is based on a piecewise function composed of a set of polynomials, each of degree three, at most, and applicable to successive intervals of the given points.
Method assumes that the slope of the curve at each given point is determined locally by the coordinates of five points, with the point in question as a center point, and two points on each side of it ( section 1 ).
A polynomial of degree three representing a portion of the curve between a pair of given points is determined by the coordinates of and the slopes at the two points ( section 2 ).
Since the slope of the curve must thus be determined also at the end points of the curve, estimation of two more points is necessary at each end point ( section 3 ).
Details
1) We assume that the slope t of the curve at point 3 is determined by :
t = ( | m4 - m3 | * m2 + | m2 - m1 | * m3 ) / ( | m4 - m3 | + | m2 - m1 | )
where m1, m2, m3, and m4 are the slopes of line segments 12, 23, 34, and 45 respectively.
Note : m1 [ 12 ] = ( y2 - y1 ) / ( x2 - x1 )
1.1) Special case if m1 == m2 && m3 == m4 :
t = ( m2 + m3 ) / 2
2) Since we have four conditions for determining the polynomial for an interval between two points ( x1, y1 ) and ( x2, y2 ), we assume that the curve between a pair of points can be expressed by a polynomial of, at most, degree three.
The polynomial, though uniquely determined, can be written in several ways.
As an example we shall give the following form:
y = p0 + p1( x - x1 ) + p2( x - x1 )^2 + p3( x - x1 )^3
where
p0 = y1
p1 = t1
p2 = [ 3( y2 - y1 ) / ( x2 - x1 ) - 2* t1 - t2 ] / ( x2 - x1 )
p3 = [ t1 + t2 - 2( y2 - y1 ) / ( x2 - x1 ) ] / ( x2 - x1 )^2
t1 and t2 are the slopes at the two points
3) At each end of the curve, two more points have to be estimated from the given points.
We assume for this purpose that the end point (x3, y3) and two adjacent given points (x2, y2) and (x1, y1), together with two more points (x4, y4) and (x5, y5) to be estimated, lie on a curve expressed by :
y = g0 + g1( x - x3 ) + g2( x - x3 )^2
where the g's are constants. Assuming that x5 - x3 == x4 - x2 == x3 - x1, we can determine the ordinates y4 and y5.
( y5 - y4 ) / ( x5 - x4 ) - ( y4 - y3 ) / ( x4 - x3 ) =
( y4 - y3 ) / ( x4 - x3 ) - ( y3 - y2 ) / ( x3 - x2 ) =
( y3 - y2 ) / ( x3 - x2 ) - ( y2 - y1 ) / ( x2 - x1 )
--------
x5 - x3 == x4 - x2 =>
x2 = x3 + x4 - x5
x5 = x3 + x4 - x2
x4 - x2 == x3 - x1 =>
x1 = x2 + x3 - x4
x4 = x2 + x3 - x1
Interpolation is better on mirror edge points
x5 - x3 == x3 - x1 =>
x1 = 2 * x3 - x5
x5 = 2 * x3 - x1
--------
( y5 - y4 ) / ( x5 - x4 ) - ( y4 - y3 ) / ( x4 - x3 ) = ( y4 - y3 ) / ( x4 - x3 ) - ( y3 - y2 ) / ( x3 - x2 )
=>
y2 = y3 - ( x3 - x2 ) * ( 2 * ( y4 - y3 ) / ( x4 - x3 ) - ( y5 - y4 ) / ( x5 - x4 ) )
y5 = y4 + ( x5 - x4 ) * ( 2 * ( y4 - y3 ) / ( x4 - x3 ) - ( y3 - y2 ) / ( x3 - x2 ) )
( y4 - y3 ) / ( x4 - x3 ) - ( y3 - y2 ) / ( x3 - x2 ) = ( y3 - y2 ) / ( x3 - x2 ) - ( y2 - y1 ) / ( x2 - x1 )
=>
y1 = y2 - ( x2 - x1 ) * ( 2 * ( y3 - y2 ) / ( x3 - x2 ) - ( y4 - y3 ) / ( x4 - x3 ) )
y4 = y3 + ( x4 - x3 ) * ( 2 * ( y3 - y2 ) / ( x3 - x2 ) - ( y2 - y1 ) / ( x2 - x1 ) )
Interpolation is better on mirror edge points
( y5 - y4 ) / ( x5 - x4 ) - ( y4 - y3 ) / ( x4 - x3 ) = ( y3 - y2 ) / ( x3 - x2 ) - ( y2 - y1 ) / ( x2 - x1 )
=>
y1 = y2 - ( x2 - x1 ) * ( ( y3 - y2 ) / ( x3 - x2 ) + ( y4 - y3 ) / ( x4 - x3 ) - ( y5 - y4 ) / ( x5 - x4 ) )
y5 = y4 + ( x5 - x4 ) * ( ( y3 - y2 ) / ( x3 - x2 ) + ( y4 - y3 ) / ( x4 - x3 ) - ( y2 - y1 ) / ( x2 - x1 ) )
Or using Lagrange polynomial ( http://mathworld.wolfram.com/LagrangeInterpolatingPolynomial.html )
y2 = y3 * ( x2 - x4 ) * ( x2 - x5 ) / ( ( x3 - x4 ) * ( x3 - x5 ) ) +
y4 * ( x2 - x3 ) * ( x2 - x5 ) / ( ( x4 - x3 ) * ( x4 - x5 ) ) +
y5 * ( x2 - x3 ) * ( x2 - x4 ) / ( ( x5 - x3 ) * ( x5 - x4 ) )
y1 = y2 * ( x1 - x3 ) * ( x1 - x4 ) / ( ( x2 - x3 ) * ( x2 - x4 ) ) +
y3 * ( x1 - x2 ) * ( x1 - x4 ) / ( ( x3 - x2 ) * ( x3 - x4 ) ) +
y4 * ( x1 - x2 ) * ( x1 - x3 ) / ( ( x4 - x2 ) * ( x4 - x3 ) )
*/
/* Akima interpolation and smooth curve fitting
Computes a curve coefficients for the interval where x lies : x1 <= x <= x2
Method requires at least 5 data points, err might also indicate that x is out of bounds
*/
func Akima_interval_curve ( data_points * [][] float64, x float64 ) ( interval_curve * Akima_curve, err error ) {
var points_len = uint ( len ( * data_points ) )
if points_len < 5 ||
// Range Error
x < ( * data_points ) [ 0 ][ 0 ] ||
x > ( * data_points ) [ points_len -1 ][ 0 ] {
return interval_curve, math_tools.Arg_range_error ()
}
var (
// Interval points where x lies : x1 <= x <= x2
y1, y2 float64
x1, x2 float64
i uint
// Slopes ( 5 point ) of interval points
t1, t2 float64
)
// [ Double side search ] Find the control points where x belong
for i_x1, i_x2 := uint ( points_len -2 ), uint ( 1 )
i_x2 < points_len
i_x1, i_x2 = i_x1 -1, i_x2 +1 {
i = i_x2
x2 = ( * data_points ) [ i_x2 ][ 0 ]
if x <= x2 { break }
x1 = ( * data_points ) [ i_x1 ][ 0 ]
if x >= x1 {
i = i_x1 +1 ; break
}
}
x2, y2 = ( * data_points ) [ i ][ 0 ] , ( * data_points ) [ i ][ 1 ]
t2 = slope_five_point ( data_points, points_len, i )
i --
x1, y1 = ( * data_points ) [ i ][ 0 ] , ( * data_points ) [ i ][ 1 ]
t1 = slope_five_point ( data_points, points_len, i )
// See section 2 : y = p0 + p1( x - x1 ) + p2( x - x1 )^2 + p3( x - x1 )^3
interval_curve = & Akima_curve {
X1 : x1, X2 : x2,
T1 : t1, T2 : t2, Index_x1 : i,
}
interval_curve.set_coefficients ( y1, y2 ) ; return
}
// ----------------------------------------
/* Smoothing curve on the interval
x1 <= x <= x2
*/
type Akima_curve struct {
Index_x1 uint
X1, X2, T1, T2 float64
// Coefficients for a polynomial y
p0, p2, p3 float64
}
/* Calculates a point on a given interval x1 <= x <= x2 ( bounds are not checked )
By the formula
y = p0 + p1( x - x1 ) + p2( x - x1 )^2 + p3( x - x1 )^3
where
p0 = y1
p1 = t1
p2 = [ 3( y2 - y1 ) / ( x2 - x1 ) - 2* t1 - t2 ] / ( x2 - x1 )
p3 = [ t1 + t2 - 2( y2 - y1 ) / ( x2 - x1 ) ] / ( x2 - x1 )^2
t1 and t2 are 5 point slopes of the two interval points
*/
func ( self * Akima_curve ) Point ( x float64 ) float64 {
var (
x_minus_x1 = x - self.X1
x_minus_x1_pow2 = x_minus_x1 * x_minus_x1
)
return self.p0 + self.p3 * x_minus_x1 * x_minus_x1_pow2 +
self.T1 * x_minus_x1 + self.p2 * x_minus_x1_pow2
}
func ( self * Akima_curve ) Equal ( other * Akima_curve ) bool {
return self.X1 == other.X1 && self.X2 == other.X2 &&
self.T1 == other.T1 && self.T2 == other.T2 &&
self.p2 == other.p2 && self.p3 == other.p3
}
/* Computes a curve for the next interval ( Index_x1 +1 )
Uses property : next.X1, next.T1 = self.X2, self.T2
Argument tells that the method :
- is not a getter for a hidden variable ( computations are made on every call )
- depends on the data points ( so they should not be changed between the calls )
Returns nil if this interval is the last one
*/
func ( self * Akima_curve ) Next_curve ( data_points * [][] float64 ) ( next * Akima_curve ) {
var points_len = uint ( len ( * data_points ) )
var new_i_x2 = self.Index_x1 +2
if new_i_x2 >= points_len { return nil }
next = new ( Akima_curve )
next.Index_x1 = self.Index_x1 +1
next.X1, next.T1 = self.X2 , self.T2
next.X2 = ( * data_points ) [ new_i_x2 ][ 0 ]
next.T2 = slope_five_point ( data_points, points_len, new_i_x2 )
next.set_coefficients ( ( * data_points ) [ next.Index_x1 ][ 1 ] , ( * data_points ) [ new_i_x2 ][ 1 ] ) ; return
}
/* Computes a curve for the prev interval ( Index_x1 -1 )
Uses property : prev.X2, prev.T2 = self.X1, self.T1
Returns nil if this interval is the first one
*/
func ( self * Akima_curve ) Prev_curve ( data_points * [][] float64 ) ( prev * Akima_curve ) {
var points_len = uint ( len ( * data_points ) )
// Care : Uint 0 -1 ~ undefined
if self.Index_x1 == 0 { return nil }
prev = new ( Akima_curve )
prev.Index_x1 = self.Index_x1 -1
prev.X1 = ( * data_points ) [ prev.Index_x1 ][ 0 ]
prev.T1 = slope_five_point ( data_points, points_len, prev.Index_x1 )
prev.X2, prev.T2 = self.X1 , self.T1
prev.set_coefficients ( ( * data_points ) [ prev.Index_x1 ][ 1 ] , ( * data_points ) [ self.Index_x1 ][ 1 ] ) ; return
}
func ( self * Akima_curve ) set_coefficients ( y1, y2 float64 ) {
var (
x2_minus_x1 = self.X2 - self.X1
y2_minus_y1 = y2 - y1
m_slope12 = y2_minus_y1 / x2_minus_x1
t1_plus_t2 = self.T1 + self.T2
)
self.p0 = y1
self.p2 = ( 3 * m_slope12 - self.T1 - t1_plus_t2 ) / x2_minus_x1
self.p3 = ( t1_plus_t2 - 2 * m_slope12 ) / ( x2_minus_x1 * x2_minus_x1 )
}
func slope_five_point ( data_points * [][] float64, points_len, i uint ) float64 {
// 2 point Slopes
var m12, m23, m34, m45 , x1, x2, x3, x4, x5 , y1, y2, y3, y4, y5 float64
x3, y3 = ( * data_points ) [ i ][ 0 ] , ( * data_points ) [ i ][ 1 ]
if i == 0 || i == 1 {
x4, y4 = ( * data_points ) [ i +1 ][ 0 ] , ( * data_points ) [ i +1 ][ 1 ]
x5, y5 = ( * data_points ) [ i +2 ][ 0 ] , ( * data_points ) [ i +2 ][ 1 ]
m34, m45 = ( y4 - y3 ) / ( x4 - x3 ) , ( y5 - y4 ) / ( x5 - x4 )
} else
if i +1 == points_len || i +2 == points_len {
x1, y1 = ( * data_points ) [ i -2 ][ 0 ] , ( * data_points ) [ i -2 ][ 1 ]
x2, y2 = ( * data_points ) [ i -1 ][ 0 ] , ( * data_points ) [ i -1 ][ 1 ]
m12, m23 = ( y2 - y1 ) / ( x2 - x1 ) , ( y3 - y2 ) / ( x3 - x2 )
}
switch i {
case 0 :
x2 = x3 + x4 - x5
y2 = y3 - ( x3 - x2 ) * ( 2 * m34 - m45 )
m23 = ( y3 - y2 ) / ( x3 - x2 )
x1 = 2 * x3 - x5
y1 = y2 - ( x2 - x1 ) * ( 2 * m23 - m34 )
m12 = ( y2 - y1 ) / ( x2 - x1 )
case 1 :
x2, y2 = ( * data_points ) [ i -1 ][ 0 ] , ( * data_points ) [ i -1 ][ 1 ]
m23 = ( y3 - y2 ) / ( x3 - x2 )
x1 = 2 * x3 - x5
y1 = y2 - ( x2 - x1 ) * ( 2 * m23 - m34 )
m12 = ( y2 - y1 ) / ( x2 - x1 )
case points_len -2 :
x4, y4 = ( * data_points ) [ i +1 ][ 0 ] , ( * data_points ) [ i +1 ][ 1 ]
m34 = ( y4 - y3 ) / ( x4 - x3 )
x5 = 2 * x3 - x1
y5 = y4 + ( x5 - x4 ) * ( 2 * m34 - m23 )
m45 = ( y5 - y4 ) / ( x5 - x4 )
case points_len -1 :
x4 = x3 + x2 - x1
y4 = y3 + ( x4 - x3 ) * ( 2 * m23 - m12 )
m34 = ( y4 - y3 ) / ( x4 - x3 )
x5 = 2 * x3 - x1
y5 = y4 + ( x5 - x4 ) * ( 2 * m34 - m23 )
m45 = ( y5 - y4 ) / ( x5 - x4 )
default :
x1, x2 = ( * data_points ) [ i -2 ][ 0 ] , ( * data_points ) [ i -1 ][ 0 ]
y1, y2 = ( * data_points ) [ i -2 ][ 1 ] , ( * data_points ) [ i -1 ][ 1 ]
x5, x4 = ( * data_points ) [ i +2 ][ 0 ] , ( * data_points ) [ i +1 ][ 0 ]
y5, y4 = ( * data_points ) [ i +2 ][ 1 ] , ( * data_points ) [ i +1 ][ 1 ]
m12, m23, m34, m45 = ( y2 - y1 ) / ( x2 - x1 ) , ( y3 - y2 ) / ( x3 - x2 ) , ( y4 - y3 ) / ( x4 - x3 ) , ( y5 - y4 ) / ( x5 - x4 )
}
if m12 == m23 && m34 == m45 { return ( m23 + m34 ) / 2.0
} else {
var (
m45_minus_m34 = math.Abs ( m45 - m34 )
m23_minus_m12 = math.Abs ( m23 - m12 )
)
return ( m45_minus_m34 * m23 + m23_minus_m12 * m34 ) / ( m45_minus_m34 + m23_minus_m12 )
}
} | interpolation/akima.go | 0.565539 | 0.837487 | akima.go | starcoder |
package header
/**
* The Expires header field gives the relative time after which the message
* (or content) expires. The precise meaning of this is method dependent.
* The expiration time in an INVITE does not affect the duration of the
* actual session that may result from the invitation. Session description
* protocols may offer the ability to express time limits on the session
* duration, however.
* The value of this field is an integral number of seconds (in decimal)
* between 0 and (2**32)-1, measured from the receipt of the request. Malformed
* values SHOULD be treated as equivalent to 3600.
* <p>
* This interface represents the Expires entity-header. The ExpiresHeader is
* optional in both REGISTER and INVITE Requests.
* <ul>
* <li>REGISTER - When a client sends a REGISTER request, it MAY suggest an
* expiration interval that indicates how long the client would like the
* registration to be valid. There are two ways in which a client can suggest
* an expiration interval for a binding: through an Expires header field or an
* "expires" Contact header parameter. The latter allows expiration intervals
* to be suggested on a per-binding basis when more than one binding is given
* in a single REGISTER request, whereas the former suggests an expiration
* interval for all Contact header field values that do not contain the
* "expires" parameter.
* <li> INVITE - The UAC MAY add an Expires header field to limit the validity
* of the invitation. If the time indicated in the Expires header field is
* reached and no final answer for the INVITE has been received, the UAC core
* SHOULD generate a CANCEL request for the INVITE.
* </ul>
* Example:<br>
* <code>Expires: 5</code>
*/
type ExpiresHeader interface {
Header
/**
* Sets the relative expires value of the ExpiresHeader. The expires value
* MUST be between zero and (2**31)-1.
*
* @param expires - the new expires value of this ExpiresHeader
* @throws InvalidArgumentException if supplied value is less than zero.
*
*/
SetExpires(expires int) (InvalidArgumentException error)
/**
* Gets the expires value of the ExpiresHeader. This expires value is
* relative time.
*
* @return the expires value of the ExpiresHeader.
*
*/
GetExpires() int
} | sip/header/ExpiresHeader.go | 0.875215 | 0.534612 | ExpiresHeader.go | starcoder |
package tensor3
// methods with a matrix parameter can come with 'T' prefixed version, meaning the result is post transposed, or a "T" suffix meaning the parameter is transposed before the operation. adding transpose(s) is a no extra cost operation.
func (m *Matrix) TProduct(m2 Matrix) {
m[0].x, m[1].x, m[2].x, m[0].y, m[1].y, m[2].y, m[0].z, m[1].z, m[2].z =
m[0].x*m2[0].x+m[0].y*m2[1].x+m[0].z*m2[2].x, m[0].x*m2[0].y+m[0].y*m2[1].y+m[0].z*m2[2].y, m[0].x*m2[0].z+m[0].y*m2[1].z+m[0].z*m2[2].z,
m[1].x*m2[0].x+m[1].y*m2[1].x+m[1].z*m2[2].x, m[1].x*m2[0].y+m[1].y*m2[1].y+m[1].z*m2[2].y, m[1].x*m2[0].z+m[1].y*m2[1].z+m[1].z*m2[2].z,
m[2].x*m2[0].x+m[2].y*m2[1].x+m[2].z*m2[2].x, m[2].x*m2[0].y+m[2].y*m2[1].y+m[2].z*m2[2].y, m[2].x*m2[0].z+m[2].y*m2[1].z+m[2].z*m2[2].z
vectorUnscale(&m[0])
vectorUnscale(&m[1])
vectorUnscale(&m[2])
}
func (m *Matrix) ProductT(m2 Matrix) {
m[0].x, m[0].y, m[0].z, m[1].x, m[1].y, m[1].z, m[2].x, m[2].y, m[2].z =
m[0].x*m2[0].x+m[0].y*m2[0].y+m[0].z*m2[0].z, m[0].x*m2[1].x+m[0].y*m2[1].y+m[0].z*m2[1].z, m[0].x*m2[2].x+m[0].y*m2[2].y+m[0].z*m2[2].z,
m[1].x*m2[0].x+m[1].y*m2[0].y+m[1].z*m2[0].z, m[1].x*m2[1].x+m[1].y*m2[1].y+m[1].z*m2[1].z, m[1].x*m2[2].x+m[1].y*m2[2].y+m[1].z*m2[2].z,
m[2].x*m2[0].x+m[2].y*m2[0].y+m[2].z*m2[0].z, m[2].x*m2[1].x+m[2].y*m2[1].y+m[2].z*m2[1].z, m[2].x*m2[2].x+m[2].y*m2[2].y+m[2].z*m2[2].z
vectorUnscale(&m[0])
vectorUnscale(&m[1])
vectorUnscale(&m[2])
}
func (m *Matrix) TProductT(m2 Matrix) {
m[0].x, m[1].x, m[2].x, m[0].y, m[1].y, m[2].y, m[0].z, m[1].z, m[2].z =
m[0].x*m2[0].x+m[0].y*m2[0].y+m[0].z*m2[0].z, m[0].x*m2[1].x+m[0].y*m2[1].y+m[0].z*m2[1].z, m[0].x*m2[2].x+m[0].y*m2[2].y+m[0].z*m2[2].z,
m[1].x*m2[0].x+m[1].y*m2[0].y+m[1].z*m2[0].z, m[1].x*m2[1].x+m[1].y*m2[1].y+m[1].z*m2[1].z, m[1].x*m2[2].x+m[1].y*m2[2].y+m[1].z*m2[2].z,
m[2].x*m2[0].x+m[2].y*m2[0].y+m[2].z*m2[0].z, m[2].x*m2[1].x+m[2].y*m2[1].y+m[2].z*m2[1].z, m[2].x*m2[2].x+m[2].y*m2[2].y+m[2].z*m2[2].z
vectorUnscale(&m[0])
vectorUnscale(&m[1])
vectorUnscale(&m[2])
}
func (m *Matrix) TProductRight(m2 Matrix) {
m[0].x, m[1].x, m[2].x, m[0].y, m[1].y, m[2].y, m[0].z, m[1].z, m[2].z =
m2[0].x*m[0].x+m2[0].y*m[1].x+m2[0].z*m[2].x, m2[0].x*m[0].y+m2[0].y*m[1].y+m2[0].z*m[2].y, m2[0].x*m[0].z+m2[0].y*m[1].z+m2[0].z*m[2].z,
m2[1].x*m[0].x+m2[1].y*m[1].x+m2[1].z*m[2].x, m2[1].x*m[0].y+m2[1].y*m[1].y+m2[1].z*m[2].y, m2[1].x*m[0].z+m2[1].y*m[1].z+m2[1].z*m[2].z,
m2[2].x*m[0].x+m2[2].y*m[1].x+m2[2].z*m[2].x, m2[2].x*m[0].y+m2[2].y*m[1].y+m2[2].z*m[2].y, m2[2].x*m[0].z+m2[2].y*m[1].z+m2[2].z*m[2].z
vectorUnscale(&m[0])
vectorUnscale(&m[1])
vectorUnscale(&m[2])
}
func (m *Matrix) TProductRightT(m2 Matrix) {
m[0].x, m[1].x, m[2].x, m[0].y, m[1].y, m[2].y, m[0].z, m[1].z, m[2].z =
m2[0].x*m[0].x+m2[0].y*m[0].y+m2[0].z*m[0].z, m2[0].x*m[1].x+m2[0].y*m[1].y+m2[0].z*m[1].z, m2[0].x*m[2].x+m2[0].y*m[2].y+m2[0].z*m[2].z,
m2[1].x*m[0].x+m2[1].y*m[0].y+m2[1].z*m[0].z, m2[1].x*m[1].x+m2[1].y*m[1].y+m2[1].z*m[1].z, m2[1].x*m[2].x+m2[1].y*m[2].y+m2[1].z*m[2].z,
m2[2].x*m[0].x+m2[2].y*m[0].y+m2[2].z*m[0].z, m2[2].x*m[1].x+m2[2].y*m[1].y+m2[2].z*m[1].z, m2[2].x*m[2].x+m2[2].y*m[2].y+m2[2].z*m[2].z
vectorUnscale(&m[0])
vectorUnscale(&m[1])
vectorUnscale(&m[2])
}
func (m *Matrix) ProductRightT(m2 Matrix) {
m[0].x, m[0].y, m[0].z, m[1].x, m[1].y, m[1].z, m[2].x, m[2].y, m[2].z =
m2[0].x*m[0].x+m2[0].y*m[0].y+m2[0].z*m[0].z, m2[0].x*m[1].x+m2[0].y*m[1].y+m2[0].z*m[1].z, m2[0].x*m[2].x+m2[0].y*m[2].y+m2[0].z*m[2].z,
m2[1].x*m[0].x+m2[1].y*m[0].y+m2[1].z*m[0].z, m2[1].x*m[1].x+m2[1].y*m[1].y+m2[1].z*m[1].z, m2[1].x*m[2].x+m2[1].y*m[2].y+m2[1].z*m[2].z,
m2[2].x*m[0].x+m2[2].y*m[0].y+m2[2].z*m[0].z, m2[2].x*m[1].x+m2[2].y*m[1].y+m2[2].z*m[1].z, m2[2].x*m[2].x+m2[2].y*m[2].y+m2[2].z*m[2].z
vectorUnscale(&m[0])
vectorUnscale(&m[1])
vectorUnscale(&m[2])
} | scaledint/matT.go | 0.500488 | 0.618924 | matT.go | starcoder |
package kafka
var metricsData = []byte(`{
"metrics": {
"lowercaseOutputName": true,
"rules": [
{
"labels": {
"clientId": "$3",
"partition": "$5",
"topic": "$4"
},
"name": "kafka_server_$1_$2",
"pattern": "kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value",
"type": "GAUGE"
},
{
"labels": {
"broker": "$4:$5",
"clientId": "$3"
},
"name": "kafka_server_$1_$2",
"pattern": "kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value",
"type": "GAUGE"
},
{
"labels": {
"cipher": "$5",
"listener": "$2",
"networkProcessor": "$3",
"protocol": "$4"
},
"name": "kafka_server_$1_connections_tls_info",
"pattern": "kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections",
"type": "GAUGE"
},
{
"labels": {
"clientSoftwareName": "$2",
"clientSoftwareVersion": "$3",
"listener": "$4",
"networkProcessor": "$5"
},
"name": "kafka_server_$1_connections_software",
"pattern": "kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections",
"type": "GAUGE"
},
{
"labels": {
"listener": "$2",
"networkProcessor": "$3"
},
"name": "kafka_server_$1_$4",
"pattern": "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):",
"type": "GAUGE"
},
{
"labels": {
"listener": "$2",
"networkProcessor": "$3"
},
"name": "kafka_server_$1_$4",
"pattern": "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)",
"type": "GAUGE"
},
{
"name": "kafka_$1_$2_$3_percent",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)Percent\\w*><>MeanRate",
"type": "GAUGE"
},
{
"name": "kafka_$1_$2_$3_percent",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)Percent\\w*><>Value",
"type": "GAUGE"
},
{
"labels": {
"$4": "$5"
},
"name": "kafka_$1_$2_$3_percent",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)Percent\\w*, (.+)=(.+)><>Value",
"type": "GAUGE"
},
{
"labels": {
"$4": "$5",
"$6": "$7"
},
"name": "kafka_$1_$2_$3_total",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)PerSec\\w*, (.+)=(.+), (.+)=(.+)><>Count",
"type": "COUNTER"
},
{
"labels": {
"$4": "$5"
},
"name": "kafka_$1_$2_$3_total",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)PerSec\\w*, (.+)=(.+)><>Count",
"type": "COUNTER"
},
{
"name": "kafka_$1_$2_$3_total",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)PerSec\\w*><>Count",
"type": "COUNTER"
},
{
"labels": {
"$4": "$5",
"$6": "$7"
},
"name": "kafka_$1_$2_$3",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value",
"type": "GAUGE"
},
{
"labels": {
"$4": "$5"
},
"name": "kafka_$1_$2_$3",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value",
"type": "GAUGE"
},
{
"name": "kafka_$1_$2_$3",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)><>Value",
"type": "GAUGE"
},
{
"labels": {
"$4": "$5",
"$6": "$7"
},
"name": "kafka_$1_$2_$3_count",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count",
"type": "COUNTER"
},
{
"labels": {
"$4": "$5",
"$6": "$7",
"quantile": "0.$8"
},
"name": "kafka_$1_$2_$3",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\\d+)thPercentile",
"type": "GAUGE"
},
{
"labels": {
"$4": "$5"
},
"name": "kafka_$1_$2_$3_count",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count",
"type": "COUNTER"
},
{
"labels": {
"$4": "$5",
"quantile": "0.$6"
},
"name": "kafka_$1_$2_$3",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\\d+)thPercentile",
"type": "GAUGE"
},
{
"name": "kafka_$1_$2_$3_count",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)><>Count",
"type": "COUNTER"
},
{
"labels": {
"quantile": "0.$4"
},
"name": "kafka_$1_$2_$3",
"pattern": "kafka.(\\w+)<type=(.+), name=(.+)><>(\\d+)thPercentile",
"type": "GAUGE"
}
]
}
}`) | controllers/cloud.redhat.com/providers/kafka/metrics.go | 0.5 | 0.413122 | metrics.go | starcoder |
package timex
import (
"time"
)
const SimpleTime = "2006-01-02 15:04:05"
const SimpleTimeMills = "2006-01-02 15:04:05.000"
const SimpleDate = "2006-01-02"
// OfEpochMills convert unix epoch mills to time
func OfEpochMills(millis int64) time.Time {
return time.Unix(0, millis*int64(time.Millisecond))
}
// OfEpochSeconds convert unix epoch seconds to time
func OfEpochSeconds(secs int64) time.Time {
return time.Unix(secs, 0)
}
// ToEpochMills convert time to unix epoch millis
func ToEpochMills(t time.Time) int64 {
return t.Unix()*1000 + int64(t.Nanosecond()/1000_000)
}
// EpochMills return unix mills timestamp for now
func EpochMills() int64 {
return ToEpochMills(time.Now())
}
// ParseLocal parse time string with layout, in local Location
func ParseLocal(layout string, value string) (time.Time, error) {
return time.ParseInLocation(layout, value, time.Local)
}
// MustParse parse time string with layout. If parse failed, panic
func MustParse(layout string, value string) time.Time {
t, err := time.Parse(layout, value)
if err != nil {
panic(err)
}
return t
}
// MustParseLocal parse time string with layout, in local Location. If parse failed, panic
func MustParseLocal(layout string, value string) time.Time {
t, err := ParseLocal(layout, value)
if err != nil {
panic(err)
}
return t
}
// Date create a time, at the beginning of day.
func Date(year int, month time.Month, day int, loc *time.Location) time.Time {
return time.Date(year, month, day, 0, 0, 0, 0, loc)
}
// LocalDate create a time, at the beginning of day, at local time zone.
func LocalDate(year int, month time.Month, day int) time.Time {
return time.Date(year, month, day, 0, 0, 0, 0, time.Local)
}
// TruncateToMinute return the beginning time of the minute.
func TruncateToMinute(t time.Time) time.Time {
return t.Truncate(time.Minute)
}
// TruncateToHour return the beginning time of the hour.
func TruncateToHour(t time.Time) time.Time {
return t.Truncate(time.Hour)
}
// TruncateToDay return the beginning time of the day.
func TruncateToDay(t time.Time) time.Time {
year, month, day := t.Date()
return time.Date(year, month, day, 0, 0, 0, 0, t.Location())
}
// TruncateToMonth return the beginning time of the month.
func TruncateToMonth(t time.Time) time.Time {
year, month, _ := t.Date()
return time.Date(year, month, 1, 0, 0, 0, 0, t.Location())
}
// TruncateToYear return the beginning time of the month.
func TruncateToYear(t time.Time) time.Time {
year, _, _ := t.Date()
return time.Date(year, time.January, 1, 0, 0, 0, 0, t.Location())
}
// TruncateToWeek return the beginning time of the week(Sunday).
func TruncateToWeek(t time.Time) time.Time {
weekday := t.Weekday()
t = t.AddDate(0, 0, int(weekday-time.Sunday))
return TruncateToDay(t)
} | timex/time_utils.go | 0.82308 | 0.540681 | time_utils.go | starcoder |
package c
const MOD = 1000000007
func modAdd(a, b int) int {
a += b
if a >= MOD {
a -= MOD
}
if a < 0 {
a += MOD
}
return a
}
func numsGame(nums []int) []int {
n := len(nums)
res := make([]int, n)
var root *Node
var sum int
for i := 0; i < n; i++ {
x := nums[i] - i
sum = sum + x
root = Insert(root, x)
if i == 0 {
continue
}
h := (i + 1) / 2
y := root.GetSum(h)
z := sum - y
if (i+1)&1 == 1 {
//odd cnt
z = sum - root.GetSum(h+1)
}
res[i] = (z - y) % MOD
if res[i] < 0 {
res[i] += MOD
}
}
return res
}
/**
* this is a AVL tree
*/
type Node struct {
key int
height int
sum int
size int
left, right *Node
}
func max(a, b int) int {
if a >= b {
return a
}
return b
}
func NewNode(key int) *Node {
node := new(Node)
node.key = key
node.height = 1
node.size = 1
node.sum = key
return node
}
func (node *Node) Size() int {
if node == nil {
return 0
}
return node.size
}
func (node *Node) Sum() int {
if node == nil {
return 0
}
return node.sum
}
func (node *Node) Height() int {
if node == nil {
return 0
}
return node.height
}
func update(x *Node) {
x.height = max(x.left.Height(), x.right.Height()) + 1
x.size = x.left.Size() + x.right.Size() + 1
x.sum = x.key + x.left.Sum() + x.right.Sum()
}
func rightRotate(y *Node) *Node {
x := y.left
t2 := x.right
x.right = y
y.left = t2
update(y)
update(x)
return x
}
func leftRotate(x *Node) *Node {
y := x.right
t2 := y.left
y.left = x
x.right = t2
update(x)
update(y)
return y
}
func (node *Node) GetBalance() int {
if node == nil {
return 0
}
return node.left.Height() - node.right.Height()
}
func Insert(node *Node, key int) *Node {
if node == nil {
return NewNode(key)
}
if node.key > key {
node.left = Insert(node.left, key)
} else {
node.right = Insert(node.right, key)
}
update(node)
balance := node.GetBalance()
if balance > 1 && key <= node.left.key {
return rightRotate(node)
}
if balance < -1 && key >= node.right.key {
return leftRotate(node)
}
if balance > 1 && key >= node.left.key {
node.left = leftRotate(node.left)
return rightRotate(node)
}
if balance < -1 && key <= node.right.key {
node.right = rightRotate(node.right)
return leftRotate(node)
}
return node
}
func (node *Node) GetSum(cnt int) int {
if cnt == 0 {
return 0
}
if node.Size() == cnt {
return node.Sum()
}
// node.size > cnt
if node.left.Size() >= cnt {
return node.left.GetSum(cnt)
}
// node.left.size < cnt
var res = node.left.Sum()
res += node.key
res += node.right.GetSum(cnt - node.left.Size() - 1)
return res
} | src/leetcode/contest/season2020/fall2/c/solution.go | 0.630799 | 0.419053 | solution.go | starcoder |
package graphhopper
import (
"io/ioutil"
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
"encoding/json"
)
// Linger please
var (
_ context.Context
)
type MatrixApiService service
/* MatrixApiService Matrix API
The Matrix API is part of the GraphHopper Directions API and with this API you can calculate many-to-many distances, times or routes a lot more efficient than calling the Routing API multiple times. In the Routing API we support multiple points, so called 'via points', which results in one route being calculated. The Matrix API results in NxM routes or more precise NxM weights, distances or times being calculated but is a lot faster compared to NxM single requests. The most simple example is a tourist trying to decide which pizza is close to him instead of using beeline distance she can calculate a 1x4 matrix. Or a delivery service in the need of often big NxN matrices to solve vehicle routing problems. E.g. the GraphHopper Route Optimization API uses the Matrix API under the hood to achieve this.
* @param ctx context.Context for authentication, logging, tracing, etc.
@param key Get your key at graphhopper.com
@param optional (nil or map[string]interface{}) with one or more of:
@param "point" ([]string) Specifiy multiple points for which the weight-, route-, time- or distance-matrix should be calculated. In this case the starts are identical to the destinations. If there are N points, then NxN entries will be calculated. The order of the point parameter is important. Specify at least three points. Cannot be used together with from_point or to_point. Is a string with the format latitude,longitude.
@param "fromPoint" ([]string) The starting points for the routes. E.g. if you want to calculate the three routes A-&gt;1, A-&gt;2, A-&gt;3 then you have one from_point parameter and three to_point parameters. Is a string with the format latitude,longitude.
@param "toPoint" ([]string) The destination points for the routes. Is a string with the format latitude,longitude.
@param "pointHint" ([]string) Optional parameter. Specifies a hint for each `point` parameter to prefer a certain street for the closest location lookup. E.g. if there is an address or house with two or more neighboring streets you can control for which street the closest location is looked up.
@param "fromPointHint" ([]string) For the from_point parameter. See point_hint
@param "toPointHint" ([]string) For the to_point parameter. See point_hint
@param "outArray" ([]string) Specifies which arrays should be included in the response. Specify one or more of the following options 'weights', 'times', 'distances'. To specify more than one array use e.g. out_array=times&out_array=distances. The units of the entries of distances are meters, of times are seconds and of weights is arbitrary and it can differ for different vehicles or versions of this API.
@param "vehicle" (string) The vehicle for which the route should be calculated. Other vehicles are foot, small_truck etc
@return MatrixResponse*/
func (a *MatrixApiService) MatrixGet(ctx context.Context, key string, localVarOptionals map[string]interface{}) (MatrixResponse, *http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Get")
localVarPostBody interface{}
localVarFileName string
localVarFileBytes []byte
successPayload MatrixResponse
)
// create path and map variables
localVarPath := a.client.cfg.BasePath + "/matrix"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
if err := typeCheckParameter(localVarOptionals["vehicle"], "string", "vehicle"); err != nil {
return successPayload, nil, err
}
if localVarTempParam, localVarOk := localVarOptionals["point"].([]string); localVarOk {
localVarQueryParams.Add("point", parameterToString(localVarTempParam, "multi"))
}
if localVarTempParam, localVarOk := localVarOptionals["fromPoint"].([]string); localVarOk {
localVarQueryParams.Add("from_point", parameterToString(localVarTempParam, "multi"))
}
if localVarTempParam, localVarOk := localVarOptionals["toPoint"].([]string); localVarOk {
localVarQueryParams.Add("to_point", parameterToString(localVarTempParam, "multi"))
}
if localVarTempParam, localVarOk := localVarOptionals["pointHint"].([]string); localVarOk {
localVarQueryParams.Add("point_hint", parameterToString(localVarTempParam, "multi"))
}
if localVarTempParam, localVarOk := localVarOptionals["fromPointHint"].([]string); localVarOk {
localVarQueryParams.Add("from_point_hint", parameterToString(localVarTempParam, "multi"))
}
if localVarTempParam, localVarOk := localVarOptionals["toPointHint"].([]string); localVarOk {
localVarQueryParams.Add("to_point_hint", parameterToString(localVarTempParam, "multi"))
}
if localVarTempParam, localVarOk := localVarOptionals["outArray"].([]string); localVarOk {
localVarQueryParams.Add("out_array", parameterToString(localVarTempParam, "multi"))
}
if localVarTempParam, localVarOk := localVarOptionals["vehicle"].(string); localVarOk {
localVarQueryParams.Add("vehicle", parameterToString(localVarTempParam, ""))
}
localVarQueryParams.Add("key", parameterToString(key, ""))
// to determine the Content-Type header
localVarHttpContentTypes := []string{}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)
if err != nil {
return successPayload, nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil {
return successPayload, localVarHttpResponse, err
}
defer localVarHttpResponse.Body.Close()
if localVarHttpResponse.StatusCode >= 300 {
bodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)
return successPayload, localVarHttpResponse, reportError("Status: %v, Body: %s", localVarHttpResponse.Status, bodyBytes)
}
if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {
return successPayload, localVarHttpResponse, err
}
return successPayload, localVarHttpResponse, err
}
/* MatrixApiService Matrix API Post
The GET request has an URL length limitation, which hurts for many locations per request. In those cases use a HTTP POST request with JSON data as input. The only parameter in the URL will be the key which stays in the URL. Both request scenarios are identically except that all singular parameter names are named as their plural for a POST request.
* @param ctx context.Context for authentication, logging, tracing, etc.
@param key Get your key at <EMAIL>
@param optional (nil or map[string]interface{}) with one or more of:
@param "body" (MatrixRequest)
@return MatrixResponse*/
func (a *MatrixApiService) MatrixPost(ctx context.Context, key string, localVarOptionals map[string]interface{}) (MatrixResponse, *http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFileName string
localVarFileBytes []byte
successPayload MatrixResponse
)
// create path and map variables
localVarPath := a.client.cfg.BasePath + "/matrix"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
localVarQueryParams.Add("key", parameterToString(key, ""))
// to determine the Content-Type header
localVarHttpContentTypes := []string{}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
// body params
if localVarTempParam, localVarOk := localVarOptionals["body"].(MatrixRequest); localVarOk {
localVarPostBody = &localVarTempParam
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)
if err != nil {
return successPayload, nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil {
return successPayload, localVarHttpResponse, err
}
defer localVarHttpResponse.Body.Close()
if localVarHttpResponse.StatusCode >= 300 {
bodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)
return successPayload, localVarHttpResponse, reportError("Status: %v, Body: %s", localVarHttpResponse.Status, bodyBytes)
}
if err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {
return successPayload, localVarHttpResponse, err
}
return successPayload, localVarHttpResponse, err
} | go/api_matrix.go | 0.668772 | 0.512571 | api_matrix.go | starcoder |
package asciilines
import (
"os"
"fmt"
"io/ioutil"
"strings"
"errors"
"strconv"
)
// Since we are restricted to Ascii, 2-D array of bytes is sufficient
type AsciiLines [][]byte
func LoadTVG(filename string) (*AsciiLines, error) {
// Declare a single static object to work with
var state AsciiLines
// Does file exists?
_, err := os.Stat(filename)
if err != nil {
return nil, err
}
// Read entire file
source, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
// Seperate by lines
lines := strings.Split(string(source), "\n")
// Split arguments by spaces on line 1 and check quantity
arguments := strings.Split(lines[0], " ")
if len(arguments) != 2 {
return nil, errors.New("Wrong number of arguments")
}
// Read dimensions of array
xSize, err := strconv.Atoi(arguments[0])
if err != nil {
return nil, err
}
ySize, err := strconv.Atoi(arguments[1])
if err != nil {
return nil, err
}
// Check dimensions for sanity
if xSize < 1 || ySize < 1 {
return nil, errors.New("Invalid dimension integer value")
}
// Initialize 2-D array object
state = make(AsciiLines, xSize)
for x := 0; x < xSize; x++ {
state[x] = make([]byte, ySize)
for y := 0; y < ySize; y++ {
state[x][y] = "."[0]
}
}
// Iterate over each line
for line := 1; line < len(lines); line++ {
// Only read line if it has stuff in it
if lines[line] != "" {
// Split parameters by spaces and check quantity
parameters := strings.Split(lines[line], " ")
if len(parameters) != 5 {
return nil, errors.New("Invalid line length")
}
// Read character to write and check for errors
var character byte
if len(parameters[0]) == 1 {
character = parameters[0][0]
} else {
return nil, errors.New("Character must be a single ascii character")
}
// Read starting position
xStart, err := strconv.Atoi(parameters[1])
if err != nil {
return nil, err
}
yStart, err := strconv.Atoi(parameters[2])
if err != nil {
return nil, err
}
// Read length
length, err := strconv.Atoi(parameters[4])
if err != nil {
return nil, err
}
// Read direction as boolean value
var horizontal bool
if parameters[3] == "h" {
horizontal = true
} else if parameters[3] == "v" {
horizontal = false
} else {
return nil, errors.New("Line must be either horizontal (h) or vertical (v)")
}
// Iterate over length
for i := 0; i < length; i++ {
// Generate position to write
var x int
var y int
if horizontal {
x = xStart
y = yStart + i
} else {
x = xStart + i
y = yStart
}
// If is in bounds, overwrite position with new character
if x >= 0 && x < len(state) && y >= 0 && y < len(state[0]) {
state[x][y] = character
}
}
}
}
// Has not early returned, so there must be no errors
return &state, nil
}
func (a *AsciiLines) Print() {
// For each row, print it as a string
for _, line := range *a {
fmt.Println(string(line))
}
} | asciilines/asciilines.go | 0.779196 | 0.400398 | asciilines.go | starcoder |
package wasmer
import (
"fmt"
"math"
)
// ValueType represents the `Value` type.
type ValueType int
const (
// TypeI32 represents the WebAssembly `i32` type.
TypeI32 ValueType = iota
// TypeI64 represents the WebAssembly `i64` type.
TypeI64
// TypeF32 represents the WebAssembly `f32` type.
TypeF32
// TypeF64 represents the WebAssembly `f64` type.
TypeF64
// TypeVoid represents nothing.
// WebAssembly doesn't have “void” type, but it is introduced
// here to represent the returned value of a WebAssembly exported
// function that returns nothing.
TypeVoid
)
// Value represents a WebAssembly value of a particular type.
type Value struct {
// The WebAssembly value (as bits).
value uint64
// The WebAssembly value type.
ty ValueType
}
// I32 constructs a WebAssembly value of type `i32`.
func I32(value int32) Value {
return Value{
value: uint64(value),
ty: TypeI32,
}
}
// I64 constructs a WebAssembly value of type `i64`.
func I64(value int64) Value {
return Value{
value: uint64(value),
ty: TypeI64,
}
}
// F32 constructs a WebAssembly value of type `f32`.
func F32(value float32) Value {
return Value{
value: uint64(math.Float32bits(value)),
ty: TypeF32,
}
}
// F64 constructs a WebAssembly value of type `f64`.
func F64(value float64) Value {
return Value{
value: math.Float64bits(value),
ty: TypeF64,
}
}
// void constructs an empty WebAssembly value.
func void() Value {
return Value{
value: 0,
ty: TypeVoid,
}
}
// GetType gets the type of the WebAssembly value.
func (value Value) GetType() ValueType {
return value.ty
}
// ToI32 reads the WebAssembly value bits as an `int32`. The WebAssembly
// value type is ignored.
func (value Value) ToI32() int32 {
return int32(value.value)
}
// ToI64 reads the WebAssembly value bits as an `int64`. The WebAssembly
// value type is ignored.
func (value Value) ToI64() int64 {
return int64(value.value)
}
// ToF32 reads the WebAssembly value bits as a `float32`. The WebAssembly
// value type is ignored.
func (value Value) ToF32() float32 {
return math.Float32frombits(uint32(value.value))
}
// ToF64 reads the WebAssembly value bits as a `float64`. The WebAssembly
// value type is ignored.
func (value Value) ToF64() float64 {
return math.Float64frombits(value.value)
}
// ToVoid reads the WebAssembly value bits as a `nil`. The WebAssembly
// value type is ignored.
func (value Value) ToVoid() interface{} {
return nil
}
// String formats the WebAssembly value as a Go string.
func (value Value) String() string {
switch value.ty {
case TypeI32:
return fmt.Sprintf("%d", value.ToI32())
case TypeI64:
return fmt.Sprintf("%d", value.ToI64())
case TypeF32:
return fmt.Sprintf("%f", value.ToF32())
case TypeF64:
return fmt.Sprintf("%f", value.ToF64())
case TypeVoid:
return "void"
default:
return ""
}
} | vendor/github.com/wasmerio/go-ext-wasm/wasmer/value.go | 0.821116 | 0.479382 | value.go | starcoder |
package sqe
import (
"bytes"
"context"
"fmt"
"io"
"strings"
)
// MaxRecursionDeepness is the limit we impose on the number of direct ORs expression.
// It's possible to have more than that, just not in a single successive sequence or `1 or 2 or 3 ...`.
// This is to avoid first a speed problem where parsing start to be
const MaxRecursionDeepness = 2501
func Parse(ctx context.Context, input string) (expr Expression, err error) {
parser, err := NewParser(bytes.NewBufferString(input))
if err != nil {
return nil, fmt.Errorf("new parser: %w", err)
}
return parser.Parse(ctx)
}
type Parser struct {
ctx context.Context
l *lexer
lookForRightParenthesis uint
}
func NewParser(reader io.Reader) (*Parser, error) {
lexer, err := newLexer(reader)
if err != nil {
return nil, err
}
return &Parser{
ctx: context.Background(),
l: lexer,
}, nil
}
func (p *Parser) Parse(ctx context.Context) (out Expression, err error) {
defer func() {
recoveredErr := recover()
if recoveredErr == nil {
return
}
switch v := recoveredErr.(type) {
case *ParseError:
err = v
case error:
err = fmt.Errorf("unexpected error occurred while parsing SQE expression: %w", v)
case string, fmt.Stringer:
err = fmt.Errorf("unexpected error occurred while parsing SQE expression: %s", v)
default:
err = fmt.Errorf("unexpected error occurred while parsing SQE expression: %v", v)
}
}()
rootExpr, err := p.parseExpression(0)
if err != nil {
return nil, err
}
return optimizeExpression(ctx, rootExpr), nil
}
func (p *Parser) parseExpression(depth int) (Expression, error) {
if depth >= MaxRecursionDeepness {
// This is a small hack, the panic is trapped at the public API `Parse` method. We do it with a panic
// to avoid the really deep wrapping of error that would happen if we returned right away. A test ensure
// that this behavior works as expected.
panic(parserError("expression is too long, too much ORs or parenthesis expressions", p.l.peekPos()))
}
left, err := p.parseUnaryExpression(depth)
if err != nil {
return nil, err
}
for {
p.l.skipSpaces()
next, err := p.l.Peek(0)
if err != nil {
return nil, err
}
// If we reached end of file, we have finished our job
if next.EOF() {
return left, nil
}
// If we reached right parenthesis, check if we were expecting one
if p.l.isRightParenthesis(next) {
if p.lookForRightParenthesis == 0 {
return nil, parserError("unexpected right parenthesis, expected right hand side expression or end of input", next.Pos)
}
// We were expecting one, we finished our job for this part, decrement will be done at parsing site
return left, nil
}
isImplicitAnd := true
if p.l.isBinaryOperator(next) {
isImplicitAnd = false
p.l.mustLexNext()
p.l.skipSpaces()
}
// This implements precedence order between `&&` and `||`. A `&&` is parsed with the smallest
// next unit so it takes precedences while `||` parse with the longuest possibility.
parser := p.parseUnaryExpression
depthIncrease := 0
if p.l.isOrOperator(next) {
parser = p.parseExpression
depthIncrease = 1
}
right, err := parser(depth + depthIncrease)
switch {
case isImplicitAnd || p.l.isAndOperator(next):
if err != nil {
if isImplicitAnd {
return nil, fmt.Errorf("missing expression after implicit 'and' clause: %w", err)
}
return nil, fmt.Errorf("missing expression after 'and' clause: %w", err)
}
if v, ok := left.(*AndExpression); ok {
v.Children = append(v.Children, right)
} else {
left = &AndExpression{Children: []Expression{left, right}}
}
case p.l.isOrOperator(next):
if err != nil {
return nil, fmt.Errorf("missing expression after 'or' clause: %w", err)
}
// It's impossible to coascle `||` expressions since they are recursive
left = &OrExpression{Children: []Expression{left, right}}
default:
if err != nil {
return nil, fmt.Errorf("unable to parse right hand side expression: %w", err)
}
return nil, parserError(fmt.Sprintf("token type %s is not valid binary right hand side expression", p.l.getTokenType(next)), next.Pos)
}
}
}
func (p *Parser) parseUnaryExpression(depth int) (Expression, error) {
p.l.skipSpaces()
token, err := p.l.Peek(0)
if err != nil {
return nil, err
}
if token.EOF() {
return nil, parserError("expected a search term, minus sign or left parenthesis, got end of input", token.Pos)
}
switch {
case p.l.isName(token):
return p.parseSearchTerm()
case p.l.isLeftParenthesis(token):
return p.parseParenthesisExpression(depth)
case p.l.isNotOperator(token):
return p.parseNotExpression(depth)
default:
return nil, parserError(fmt.Sprintf("expected a search term, minus sign or left parenthesis, got %s", p.l.getTokenType(token)), token.Pos)
}
}
func (p *Parser) parseParenthesisExpression(depth int) (Expression, error) {
// Consume left parenthesis
openingParenthesis := p.l.mustLexNext()
p.lookForRightParenthesis++
child, err := p.parseExpression(depth + 1)
if err != nil {
return nil, fmt.Errorf("invalid expression after opening parenthesis: %w", err)
}
p.l.skipSpaces()
token, err := p.l.Next()
if err != nil {
return nil, err
}
if token.EOF() {
return nil, parserError("expecting closing parenthesis, got end of input", openingParenthesis.Pos)
}
if !p.l.isRightParenthesis(token) {
return nil, parserError(fmt.Sprintf("expecting closing parenthesis after expression, got %s", p.l.getTokenType(token)), token.Pos)
}
p.lookForRightParenthesis--
return &ParenthesisExpression{child}, nil
}
func (p *Parser) parseNotExpression(depth int) (Expression, error) {
// Consume minus sign
p.l.mustLexNext()
child, err := p.parseUnaryExpression(depth)
if err != nil {
return nil, fmt.Errorf("invalid expression after minus sign: %w", err)
}
return &NotExpression{child}, nil
}
func (p *Parser) parseSearchTerm() (Expression, error) {
fieldNameToken := p.l.mustLexNext()
p.l.skipSpaces()
colonToken, err := p.l.Next()
if err != nil {
return nil, err
}
if colonToken.EOF() {
return nil, parserError("expecting colon after search field, got end of input", fieldNameToken.Pos)
}
if !p.l.isColon(colonToken) {
return nil, parserError(fmt.Sprintf("expecting colon after search field, got %s", p.l.getTokenType(colonToken)), colonToken.Pos)
}
p.l.skipSpaces()
token, err := p.l.Peek(0)
if err != nil {
return nil, err
}
if token.EOF() {
return nil, parserError("expecting search value after field, got end of input", token.Pos)
}
var searchValue SearchTermValue
switch {
case p.l.isName(token):
searchValue = &StringLiteral{
Value: p.l.mustLexNext().String(),
}
case p.l.isQuoting(token):
literal, err := p.parseQuotedString()
if err != nil {
return nil, err
}
searchValue = literal
case p.l.isLeftSquareBracket(token):
list, err := p.parseStringsList()
if err != nil {
return nil, err
}
searchValue = list
default:
return nil, parserError(fmt.Sprintf("expecting search value after colon, either a string, quoted string or strings list got %s", p.l.getTokenType(token)), token.Pos)
}
return &SearchTerm{
Field: fieldNameToken.String(),
Value: searchValue,
}, nil
}
func (p *Parser) parseStringsList() (*StringsList, error) {
// Consume left square bracket
p.l.mustLexNext()
list := &StringsList{}
lookFor := "name"
for {
p.l.skipSpaces()
token, err := p.l.Peek(0)
if err != nil {
return nil, err
}
if token.EOF() {
return nil, parserError("expecting string value in list, got end of input, right square bracket ']' missing", token.Pos)
}
// If we reached the square bracket, we are done, consume characters and return accumulated list
if p.l.isRightSquareBracket(token) {
p.l.mustLexNext()
return list, nil
}
if lookFor == "comma" {
if !p.l.isComma(token) {
return nil, parserError(fmt.Sprintf("expecting comma after string value item, got %T", p.l.getTokenType(token)), token.Pos)
}
// It's a comma, skip and change to look for name
p.l.mustLexNext()
lookFor = "name"
continue
}
var searchValue *StringLiteral
switch {
case p.l.isName(token):
searchValue = &StringLiteral{
Value: p.l.mustLexNext().String(),
}
case p.l.isQuoting(token):
literal, err := p.parseQuotedString()
if err != nil {
return nil, err
}
searchValue = literal
default:
return nil, parserError(fmt.Sprintf("expecting string value in list, either a string or quoted string got %s", p.l.getTokenType(token)), token.Pos)
}
list.Values = append(list.Values, searchValue)
lookFor = "comma"
}
}
func (p *Parser) parseQuotedString() (*StringLiteral, error) {
startQuoting := p.l.mustLexNext()
builder := &strings.Builder{}
for {
token, err := p.l.Next()
if err != nil {
return nil, err
}
if token.EOF() {
return nil, parserError(fmt.Sprintf("expecting closing quoting char %q, got end of input", startQuoting.Value), startQuoting.Pos)
}
if p.l.isQuoting(token) {
value := builder.String()
if value == "" {
return nil, rangeParserError("an empty string is not valid", startQuoting.Pos, token.Pos)
}
return &StringLiteral{
Value: value,
QuotingChar: startQuoting.Value,
}, nil
}
builder.WriteString(token.Value)
}
} | sqe/parser.go | 0.779616 | 0.536677 | parser.go | starcoder |
package main
import (
"fmt"
)
type Matrix struct {
matrix [][]Fraction
rows int
cols int
}
// Matrix constructor to initialize a matrix of slices (vectors)
func newMatrix(rows, cols int) Matrix {
var m Matrix
m.matrix = make([][]Fraction, rows)
m.rows = rows
m.cols = cols
// Initialize each row with `cols` number of columns
for i, _ := range m.matrix {
m.matrix[i] = make([]Fraction, cols)
}
return m
}
// Transform the matrix to rref form! :D
func (m Matrix) rref() Matrix {
for r, row := range m.matrix {
// Find the pivot point for the current row
pivot_point := 0
for pivot_point < m.cols && row[pivot_point].numerator == 0 {
pivot_point += 1
}
// Make sure a pivot was actually found
if pivot_point < m.cols {
// Find the inverse (k*x = 1) [Basically, make pivot == 1.]
inv := row[pivot_point].inv()
// Multiply entire row times the inverse that was found
for c := pivot_point; c < m.cols; c += 1 {
m.matrix[r][c] = m.matrix[r][c].mul(inv)
}
// Work back upwards to clear numbers above the pivot
for above_r := r - 1; above_r >= 0; above_r -= 1 {
sub_val := m.matrix[above_r][pivot_point].mulInt(-1)
for col := pivot_point; col < m.cols; col += 1 {
m.matrix[above_r][col] = m.matrix[above_r][col].add(
m.matrix[r][col].mul(sub_val))
}
}
// Clear below pivot
for below_r := r + 1; below_r < m.rows; below_r += 1 {
sub_val := m.matrix[below_r][pivot_point].mulInt(-1)
for i := pivot_point; i < m.cols; i += 1 {
m.matrix[below_r][i] = m.matrix[below_r][i].add(
m.matrix[r][i].mul(sub_val))
}
}
}
}
return m
}
func (m *Matrix) rowMul(row int, value Fraction) {
for i, _ := range m.matrix[row] {
m.matrix[row][i].imul(value)
}
}
func (m Matrix) mulRow(row int, value Fraction) []Fraction {
for i, _ := range m.matrix[row] {
m.matrix[row][i].imul(value)
}
return m.matrix[row]
}
// Add one row of the matrix to another
func (m *Matrix) rowAdd(src, dest int) {
for i, value := range m.matrix[src] {
m.matrix[dest][i].iadd(value)
}
}
// A nice print out method for matricies
func (m Matrix) String() string {
// Initialize the string to build and return
s := ""
for _, row := range m.matrix {
for _, value := range row {
// Make sure the fractions are simplified
value.simplify()
if value.denominator == 1 {
// This is the same as a plain ol' integer
s += fmt.Sprintf("%3d ", value.numerator)
} else if value.denominator == 0 {
// Just output a 0
s += fmt.Sprintf("%3d ", 0)
} else {
// Let the Fraction stringify itself
s += value.String() + " "
}
}
s += "\n"
}
// Return a slice of the string so as to chop off the last extra newline
return s[:len(s)-1]
} | matrix.go | 0.763924 | 0.527742 | matrix.go | starcoder |
package common
import (
"fmt"
"strconv"
"strings"
)
type rang struct {
from int
to int
}
func (r *rang) IsIntersect(o *rang) bool {
l := r
u := o
if l.from > u.from {
l = o
u = r
}
return l.to >= u.from
}
type Quantum struct {
ranges []*rang
}
func NewQuantum() *Quantum {
return &Quantum{make([]*rang, 0)}
}
func isIncluded(min int, max int, v int) bool {
return min <= v && v <= max
}
func (q *Quantum) IsIncluded(v int) bool {
return q.findRange(v) != -1
}
func (q *Quantum) findRange(v int) int {
for i, r := range q.ranges {
if isIncluded(r.from, r.to, v) {
return i
}
}
return -1
}
func (q *Quantum) deleteRange(index int) {
copy(q.ranges[index:], q.ranges[index+1:])
q.ranges[len(q.ranges)-1] = nil
q.ranges = q.ranges[:len(q.ranges)-1]
}
func (q *Quantum) insertRange(index int, r *rang) {
q.ranges = append(q.ranges[:index], append([]*rang{r}, q.ranges[index:]...)...)
}
func (q *Quantum) Get(index int) (int, error) {
i := index
for _, r := range q.ranges {
if isIncluded(0, r.to-r.from, i) {
return r.from + i, nil
}
i -= r.to - r.from + 1
}
return 0, fmt.Errorf("index %d out of range %d-%d", index, 0, q.Len())
}
func (q *Quantum) Add(v int) {
if len(q.ranges) == 0 {
r := &rang{v, v}
q.ranges = append(q.ranges, r)
return
}
cmpIndex0 := -1
cmpIndex1 := -1
index := 0
for index < len(q.ranges) {
r := q.ranges[index]
if isIncluded(r.from, r.to, v) {
return
}
if v+1 == r.from {
r.from = v
if index > 0 {
cmpIndex0 = index - 1
cmpIndex1 = index
}
break
}
if v-1 == r.to {
r.to = v
if index < len(q.ranges)-1 {
cmpIndex0 = index
cmpIndex1 = index + 1
}
break
}
index++
}
if cmpIndex0 != -1 {
r0 := q.ranges[cmpIndex0]
r1 := q.ranges[cmpIndex1]
if r0.to+1 >= r1.from {
r0.to = r1.to
q.deleteRange(cmpIndex1)
}
return
}
if index == len(q.ranges) {
q.ranges = append(q.ranges, &rang{v, v})
}
}
func (q *Quantum) Remove(v int) {
if len(q.ranges) == 0 {
return
}
index := 0
for index < len(q.ranges) {
r := q.ranges[index]
if isIncluded(r.from, r.to, v) {
if v == r.from {
r.from = v + 1
if r.from > r.to {
q.deleteRange(index)
}
return
}
if v == r.to {
r.to = v - 1
if r.from > r.to {
q.deleteRange(index)
}
return
}
r.to = v - 1
q.insertRange(index, &rang{v + 1, r.to})
return
}
index++
}
}
func (q *Quantum) AddRange(from int, to int) {
if from > to {
from, to = to, from
}
for v := from; v <= to; v++ {
q.Add(v)
}
}
func (q *Quantum) AddQuantum(other *Quantum) {
ints := other.ToSlice()
for _, v := range ints {
q.Add(v)
}
}
func (q *Quantum) RemoveRange(from int, to int) {
if from > to {
from, to = to, from
}
for v := from; v <= to; v++ {
q.Remove(v)
}
}
func (q *Quantum) RemoveQuantum(other *Quantum) {
ints := other.ToSlice()
for _, v := range ints {
q.Remove(v)
}
}
func (q *Quantum) String() string {
sb := strings.Builder{}
for i, r := range q.ranges {
if i > 0 {
sb.WriteString(";")
}
sb.WriteString(fmt.Sprintf("%d-%d", r.from, r.to))
}
return sb.String()
}
func ParseQuantum(txt string) (*Quantum, error) {
q := NewQuantum()
items := strings.Split(strings.TrimSpace(txt), ";")
for _, item := range items {
item = strings.TrimSpace(item)
limits := strings.Split(item, "-")
iLimits := make([]int, len(limits))
for i := 0; i < len(limits); i++ {
var err error
iLimits[i], err = strconv.Atoi(strings.TrimSpace(limits[i]))
if Error(err) {
return nil, err
}
}
if len(iLimits) == 1 {
q.Add(iLimits[0])
} else {
q.AddRange(iLimits[0], iLimits[1])
}
}
return q, nil
}
func (q *Quantum) RemoveAll() {
q.ranges = q.ranges[:0]
}
func (q *Quantum) Len() int {
l := 0
for _, r := range q.ranges {
l += 1 + r.to - r.from
}
return l
}
func (q *Quantum) ToSlice() []int {
l := make([]int, 0)
for _, r := range q.ranges {
for i := r.from; i <= r.to; i++ {
l = append(l, i)
}
}
return l
} | quantum.go | 0.548915 | 0.47725 | quantum.go | starcoder |
package elem
const DistanceThreshhold = 3
var (
closed []Coordinate
other []Coordinate
closedElementMap = make(map[Coordinate][5]int)
)
func findClosedLand(n Coordinate) {
var distancesGold = make([]float64, len(gold))
var distancesWood = make([]float64, len(wood))
var distancesLake = make([]float64, len(water))
var distancesFire = make([]float64, len(fire))
var distancesEarth = make([]float64, len(earth))
for i := 0; i < len(wood); i++ {
distancesWood[i] = CalculateDistance(n, wood[i])
}
for i := 0; i < len(gold); i++ {
distancesGold[i] = CalculateDistance(n, gold[i])
}
for i := 0; i < len(water); i++ {
distancesLake[i] = CalculateDistance(n, water[i])
}
for i := 0; i < len(fire); i++ {
distancesFire[i] = CalculateDistance(n, fire[i])
}
for i := 0; i < len(earth); i++ {
distancesEarth[i] = CalculateDistance(n, earth[i])
}
minGold := MinVal(distancesGold)
minWood := MinVal(distancesWood)
minLake := MinVal(distancesLake)
minFire := MinVal(distancesFire)
minEarth := MinVal(distancesEarth)
elemPut := [5]int{0}
weight := func(val float64) int {
return int(100*1 + 1.0/val)
}
if minGold <= DistanceThreshhold {
elemPut[GOLD] = weight(minGold)
}
if minWood <= DistanceThreshhold {
elemPut[WOOD] = weight(minWood)
}
if minLake <= DistanceThreshhold {
elemPut[WATER] = weight(minLake)
}
if minFire <= DistanceThreshhold {
elemPut[FIRE] = weight(minFire)
}
if minEarth <= DistanceThreshhold {
elemPut[EARTH] = weight(minEarth)
}
if minGold <= DistanceThreshhold || minWood <= DistanceThreshhold || minLake <= DistanceThreshhold || minFire <= DistanceThreshhold || minEarth <= DistanceThreshhold {
closed = append(closed, n)
closedElementMap[n] = elemPut
} else {
other = append(other, n)
}
}
func FindBarren() {
resources := MergeSlice(gold, wood, water, fire, earth, reserved)
for i := cordRange.minx; i <= cordRange.maxx; i++ {
for j := cordRange.miny; j <= cordRange.maxy; j++ {
if !IsExist(Coordinate{i, j}, resources) {
findClosedLand(Coordinate{i, j})
}
}
}
} | elem/closed.go | 0.542136 | 0.430327 | closed.go | starcoder |
package hole
import (
"math/rand"
"strconv"
"strings"
)
// a bounding box (bbox) is defined in
// terms of its top-left vertex coordinates
// (x, y) and its width and height (w, h).
type bbox struct{ x, y, w, h int }
// couldn't find a quick way to loop a struct
func strconvbox(box bbox) (out string) {
var outs []string
outs = append(outs, strconv.Itoa(box.x))
outs = append(outs, strconv.Itoa(box.y))
outs = append(outs, strconv.Itoa(box.w))
outs = append(outs, strconv.Itoa(box.h))
return strings.Join(outs, " ")
}
// compute bottom-right (br) bbox coordinates
func unbox(b bbox) (tlx, tly, brx, bry int) {
tlx = b.x
tly = b.y
brx = b.x + b.w
bry = b.y + b.h
return
}
func calculateIntersection(b1, b2 bbox) int {
tlx1, tly1, brx1, bry1 := unbox(b1)
tlx2, tly2, brx2, bry2 := unbox(b2)
// find top-left and bottom-right intersection coordinates
itlx := max(tlx1, tlx2) // intersection top left x
itly := max(tly1, tly2)
ibrx := min(brx1, brx2) // intersection bottom right x
ibry := min(bry1, bry2)
// calculate intersection dimensions
iw := ibrx - itlx
ih := ibry - itly
// intersection is empty if the bboxes do not overlap
if iw < 0 || ih < 0 || iw > b1.w+b2.w || ih > b1.h+b2.h {
return 0
}
return iw * ih
}
// generator of random non-null boxes (i.e. with area != 0)
func boxGen() bbox {
return bbox{
x: rand.Intn(101),
y: rand.Intn(101),
w: rand.Intn(50) + 1,
h: rand.Intn(50) + 1,
}
}
func intersection() ([]string, string) {
var tests []test
//// default cases
// define two non overlapping 1x1 boxes
b1 := bbox{x: 0, y: 0, h: 1, w: 1}
b2 := bbox{x: 0, y: 0, h: 2, w: 2}
b3 := bbox{x: 3, y: 3, h: 1, w: 2}
b4 := bbox{x: 3, y: 1, h: 3, w: 2}
b5 := bbox{x: 3, y: 1, h: 3, w: 1}
b6 := bbox{x: 0, y: 0, h: 10, w: 10}
b7 := bbox{x: 2, y: 2, h: 2, w: 2}
// b1 and b2 overlap by 1 pixel
tests = append(tests, test{strconvbox(b1) + " " + strconvbox(b2), "1"})
// b1 and b3 are far away and don't overlap
tests = append(tests, test{strconvbox(b1) + " " + strconvbox(b3), "0"})
// b3 and b4 overlap on one horizontal side
tests = append(tests, test{strconvbox(b3) + " " + strconvbox(b4), "2"})
// b4 and b5 overlap on one vertical side
tests = append(tests, test{strconvbox(b4) + " " + strconvbox(b5), "3"})
// b4 is inside b6
tests = append(tests, test{strconvbox(b4) + " " + strconvbox(b6), "6"})
// b2 and b7 are side by side but don't overlap
tests = append(tests, test{strconvbox(b2) + " " + strconvbox(b7), "0"})
//// generate 100 random cases
for zeros, nonZeros := 0, 0; zeros+nonZeros < 100; {
b1 := boxGen()
b2 := boxGen()
intersection := calculateIntersection(b1, b2)
// compute 90 non-zero cases and 10 zero ones
if intersection > 0 && nonZeros < 90 {
nonZeros++
} else if intersection == 0 && zeros < 10 {
zeros++
} else {
continue
}
tests = append(tests, test{
strconvbox(b1) + " " + strconvbox(b2),
strconv.Itoa(intersection),
})
}
// 13x13 default side cases
// - | |
// --| |
// --|- |
// --|---|
// --|---|-
// |- |
// |---|
// |---|-
// | - |
// | --|
// | --|-
// | |-
// | | -
bigbox := bbox{x: 2, y: 2, w: 3, h: 3}
strbigbox := strconvbox(bigbox)
xs := []int{0, 2, 3, 5, 6}
ys := []int{0, 2, 3, 5, 6}
for _, x := range xs {
for _, y := range ys {
for w := 1; w < 7; w++ {
for h := 1; h < 7; h++ {
if (x == 0 && (w == 4 || w > 6)) ||
(x == 2 && (w == 2 || w > 4)) ||
(x == 3 && w > 3) ||
(x == 5 && w > 1) || (x == 6 && w > 1) {
continue
}
if (y == 0 && (h == 4 || h > 6)) ||
(y == 2 && (h == 2 || h > 4)) ||
(y == 3 && h > 3) ||
(y == 5 && h > 1) || (y == 6 && h > 1) {
continue
}
if rand.Float32() > 0.5 { // randomly add test ?
b := bbox{x: x, y: y, w: w, h: h}
var in string
if rand.Float32() > 0.5 { // randomly flip input
in = strconvbox(b) + " " + strbigbox
} else {
in = strbigbox + " " + strconvbox(b)
}
tests = append(tests, test{
in,
strconv.Itoa(calculateIntersection(b, bigbox)),
})
}
}
}
}
}
return outputTests(shuffle(tests))
} | hole/intersection.go | 0.612541 | 0.482551 | intersection.go | starcoder |
package ch2
import (
"flag"
"fmt"
"strconv"
"log"
"io"
"os"
)
// CToF converts a Celsius temperature to Fahrenheit.
func CToF(c Celsius) Fahrenheit { return Fahrenheit(c*9/5 + 32) }
// FToC converts a Fahrenheit temperature to Celsius.
func FToC(f Fahrenheit) Celsius { return Celsius((f - 32) * 5 / 9) }
// CToK converts a Celsius temperature to Kelvin.
func CToK(c Celsius) Kelvin {
return Kelvin(c - AbsoluteZeroC)
}
// KToC converts a Kelvin temperature to Celsius.
func KToC(k Kelvin) Celsius {
return Celsius(k) + AbsoluteZeroC
}
// FToK converts a Fahrenheit temperature to Kelvin.
func FToK(f Fahrenheit) Kelvin {
return CToK(FToC(f))
}
// KToF converts a Kelvin temperature to Fahrenheit.
func KToF(k Kelvin) Fahrenheit {
return CToF(KToC(k))
}
// FtToM ...
func FtToM(f Foot) Metre {
return Metre(0.3048 * f)
}
// MToFt ...
func MToFt(m Metre) Foot {
return Foot(3.28083 * m)
}
func PToKg(p Pound) Kilogram {
return Kilogram(0.453592 * p)
}
func KgToP(kg Kilogram) Pound {
return Pound(2.2046226 * kg)
}
func unitConverter(t, l, w bool, v float64) {
if t {
c := Celsius(v)
f := Fahrenheit(v)
k := Kelvin(v)
fmt.Printf("%s = %s, %s = %s, %s = %s, %s = %s, %s = %s, %s = %s\n",
c, CToF(c), c, CToK(c), f, FToC(f), f, FToK(f), k, KToC(k), k, KToF(k))
}
if l {
ft := Foot(v)
m := Metre(v)
fmt.Printf("%s = %s, %s = %s\n",
ft, FtToM(ft), m, MToFt(m))
}
if w {
p := Pound(v)
kg := Kilogram(v)
fmt.Printf("%s = %s, %s = %s\n",
p, PToKg(p), kg, KgToP(kg))
}
}
// UnitConverterMain ...
func UnitConverterMain() {
t := flag.Bool("t", false, "temperature convert")
l := flag.Bool("l", false, "length convert")
w := flag.Bool("w", false, "weight convert")
flag.Parse()
input := flag.Args()
if len(input) != 0 {
for _, s := range input {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
log.Println(s, ":", err)
} else {
unitConverter(*t, *l, *w, v)
}
fmt.Println()
}
} else {
var v float64
for {
fmt.Print(">> ")
_, err := fmt.Scanf("%f", &v)
if err == io.EOF {
os.Exit(0)
} else if err != nil {
panic(err)
}
unitConverter(*t, *l, *w, v)
fmt.Println()
}
}
} | ch2/conv.go | 0.567098 | 0.459622 | conv.go | starcoder |
package f64
import (
"math"
"github.com/colinrgodsey/cartesius/f64/filters"
)
// Grid2D creates a 2D grid-based interpolator using the provided filter.
// The Z axis of a sample is the value that will be interpolated.
func Grid2D(samples []Vec3, filter filters.GridFilter) (Function2D, error) {
stride, offs, max, vals, err := makeGrid2d(samples)
if err != nil {
return nil, err
}
return func(pos Vec2) (v float64, err error) {
var rPos [2]float64
for i, p := range pos {
if p < offs[i] || p > max[i] {
err = ErrBadCoord
return
}
rPos[i] = (p - offs[i]) / stride[i]
}
v = interp2d(vals, Vec2{rPos[0], rPos[1]}, filter)
if math.IsNaN(v) {
v = 0
err = ErrBadCoord
}
return
}, nil
}
/* x should already be offset and scaled */
func interp1d(values []float64, x float64, filter filters.GridFilter) float64 {
var weights, sum float64
low, high := filterRange(x, filter)
for i := low; i <= high; i++ {
if i < 0 || i >= len(values) {
continue
}
w := filter.Kernel(float64(i) - x)
sum += values[i] * w
weights += w
}
return sum / weights
}
/* pos should already be offset and scaled */
func interp2d(values [][]float64, pos [2]float64, filter filters.GridFilter) float64 {
var weights, sum float64
low, high := filterRange(pos[1], filter)
for i := low; i <= high; i++ {
if i < 0 || i >= len(values) {
continue
}
w := filter.Kernel(float64(i) - pos[1])
sum += interp1d(values[i], pos[0], filter) * w
weights += w
}
return sum / weights
}
func makeGrid2d(samples []Vec3) (stride, offs, max [2]float64, values [][]float64, err error) {
for si, s := range samples {
for i, p := range s.Vec2() {
if p > max[i] || si == 0 {
max[i] = p
}
if p < offs[i] || si == 0 {
offs[i] = p
}
}
}
var num [2]int
for _, s := range samples {
for i, p := range s.Vec2() {
if p-offs[i] == 0 {
num[i]++
}
}
}
if num[0]*num[1] != len(samples) {
err = ErrBadGrid
return
}
for i := range stride {
stride[i] = (max[i] - offs[i]) / float64(num[i]-1)
max[i] += stride[i]
}
values = make([][]float64, num[1])
for i := range values {
values[i] = make([]float64, num[0])
}
for _, s := range samples {
var idx [2]int
for i, p := range s.Vec2() {
v := (p - offs[i]) / stride[i]
idx[i] = int(math.Round(v))
}
values[idx[1]][idx[0]] = s[2]
}
return
}
func filterRange(v float64, filter filters.GridFilter) (low, high int) {
units := math.Ceil(filter.Size)
low = int(math.Floor(v - units))
high = int(math.Ceil(v + units))
return
} | f64/grid.go | 0.780537 | 0.554772 | grid.go | starcoder |
package svg
import (
"github.com/goki/gi/gi"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
"github.com/goki/mat32"
)
// Ellipse is a SVG ellipse
type Ellipse struct {
NodeBase
Pos mat32.Vec2 `xml:"{cx,cy}" desc:"position of the center of the ellipse"`
Radii mat32.Vec2 `xml:"{rx,ry}" desc:"radii of the ellipse in the horizontal, vertical axes"`
}
var KiT_Ellipse = kit.Types.AddType(&Ellipse{}, ki.Props{"EnumType:Flag": gi.KiT_NodeFlags})
// AddNewEllipse adds a new button to given parent node, with given name, pos and radii.
func AddNewEllipse(parent ki.Ki, name string, x, y, rx, ry float32) *Ellipse {
g := parent.AddNewChild(KiT_Ellipse, name).(*Ellipse)
g.Pos.Set(x, y)
g.Radii.Set(rx, ry)
return g
}
func (g *Ellipse) SVGName() string { return "ellipse" }
func (g *Ellipse) CopyFieldsFrom(frm interface{}) {
fr := frm.(*Ellipse)
g.NodeBase.CopyFieldsFrom(&fr.NodeBase)
g.Pos = fr.Pos
g.Radii = fr.Radii
}
func (g *Ellipse) SetPos(pos mat32.Vec2) {
g.Pos = pos.Sub(g.Radii)
}
func (g *Ellipse) SetSize(sz mat32.Vec2) {
g.Radii = sz.MulScalar(0.5)
}
func (g *Ellipse) SVGLocalBBox() mat32.Box2 {
bb := mat32.Box2{}
hlw := 0.5 * g.LocalLineWidth()
bb.Min = g.Pos.Sub(g.Radii.AddScalar(hlw))
bb.Max = g.Pos.Add(g.Radii.AddScalar(hlw))
return bb
}
func (g *Ellipse) Render2D() {
vis, rs := g.PushXForm()
if !vis {
return
}
pc := &g.Pnt
pc.DrawEllipse(rs, g.Pos.X, g.Pos.Y, g.Radii.X, g.Radii.Y)
pc.FillStrokeClear(rs)
g.ComputeBBoxSVG()
g.Render2DChildren()
rs.PopXFormLock()
}
// ApplyXForm applies the given 2D transform to the geometry of this node
// each node must define this for itself
func (g *Ellipse) ApplyXForm(xf mat32.Mat2) {
rot := xf.ExtractRot()
if rot != 0 || !g.Pnt.XForm.IsIdentity() {
g.Pnt.XForm = g.Pnt.XForm.Mul(xf)
g.SetProp("transform", g.Pnt.XForm.String())
g.GradientApplyXForm(xf)
} else {
g.Pos = xf.MulVec2AsPt(g.Pos)
g.Radii = xf.MulVec2AsVec(g.Radii)
g.GradientApplyXForm(xf)
}
}
// ApplyDeltaXForm applies the given 2D delta transforms to the geometry of this node
// relative to given point. Trans translation and point are in top-level coordinates,
// so must be transformed into local coords first.
// Point is upper left corner of selection box that anchors the translation and scaling,
// and for rotation it is the center point around which to rotate
func (g *Ellipse) ApplyDeltaXForm(trans mat32.Vec2, scale mat32.Vec2, rot float32, pt mat32.Vec2) {
if rot != 0 {
xf, lpt := g.DeltaXForm(trans, scale, rot, pt, false) // exclude self
mat := g.Pnt.XForm.MulCtr(xf, lpt)
g.Pnt.XForm = mat
g.SetProp("transform", g.Pnt.XForm.String())
} else {
xf, lpt := g.DeltaXForm(trans, scale, rot, pt, true) // include self
g.Pos = xf.MulVec2AsPtCtr(g.Pos, lpt)
g.Radii = xf.MulVec2AsVec(g.Radii)
g.GradientApplyXFormPt(xf, lpt)
}
}
// WriteGeom writes the geometry of the node to a slice of floating point numbers
// the length and ordering of which is specific to each node type.
// Slice must be passed and will be resized if not the correct length.
func (g *Ellipse) WriteGeom(dat *[]float32) {
SetFloat32SliceLen(dat, 4+6)
(*dat)[0] = g.Pos.X
(*dat)[1] = g.Pos.Y
(*dat)[2] = g.Radii.X
(*dat)[3] = g.Radii.Y
g.WriteXForm(*dat, 4)
g.GradientWritePts(dat)
}
// ReadGeom reads the geometry of the node from a slice of floating point numbers
// the length and ordering of which is specific to each node type.
func (g *Ellipse) ReadGeom(dat []float32) {
g.Pos.X = dat[0]
g.Pos.Y = dat[1]
g.Radii.X = dat[2]
g.Radii.Y = dat[3]
g.ReadXForm(dat, 4)
g.GradientReadPts(dat)
} | svg/ellipse.go | 0.81571 | 0.429968 | ellipse.go | starcoder |
package glm
import (
"fmt"
"math"
)
// VarianceType is used to specify a GLM variance function.
type VarianceType uint8
// BinomialVar, ... define variance functions for a GLM.
const (
BinomialVar VarianceType = iota
IdentityVar
ConstantVar
SquaredVar
CubedVar
)
// NewVariance returns a new variance function object corresponding to
// the given name. Supported names are binomial, const, cubed,
// identity, and, squared.
func NewVariance(vartype VarianceType) *Variance {
switch vartype {
case BinomialVar:
return &binomVariance
case IdentityVar:
return &identVariance
case ConstantVar:
return &constVariance
case SquaredVar:
return &squaredVariance
case CubedVar:
return &cubedVariance
default:
msg := fmt.Sprintf("Unknown variance function: %d\n", vartype)
panic(msg)
}
}
// Variance represents a GLM variance function.
type Variance struct {
Name string
Var VecFunc
Deriv VecFunc
}
var binomVariance = Variance{
Name: "Binomial",
Var: binomVar,
Deriv: binomVarDeriv,
}
var identVariance = Variance{
Name: "Identity",
Var: identVar,
Deriv: identVarDeriv,
}
var constVariance = Variance{
Name: "Constant",
Var: constVar,
Deriv: constVarDeriv,
}
var squaredVariance = Variance{
Name: "Squared",
Var: squaredVar,
Deriv: squaredVarDeriv,
}
var cubedVariance = Variance{
Name: "Cubed",
Var: cubedVar,
Deriv: cubedVarDeriv,
}
func binomVar(mn []float64, v []float64) {
for i, p := range mn {
v[i] = p * (1 - p)
}
}
func binomVarDeriv(mn []float64, dv []float64) {
for i, p := range mn {
dv[i] = 1 - 2*p
}
}
func identVar(mn []float64, v []float64) {
copy(v, mn)
}
func identVarDeriv(mn []float64, v []float64) {
one(v)
}
func constVar(mn []float64, v []float64) {
one(v)
}
func constVarDeriv(mn []float64, v []float64) {
zero(v)
}
func squaredVar(mn []float64, v []float64) {
for i, m := range mn {
v[i] = m * m
}
}
func squaredVarDeriv(mn []float64, v []float64) {
for i, m := range mn {
v[i] = 2 * m
}
}
func cubedVar(mn []float64, v []float64) {
for i, m := range mn {
v[i] = m * m * m
}
}
func cubedVarDeriv(mn []float64, v []float64) {
for i, m := range mn {
v[i] = 3 * m * m
}
}
// NewNegBinomVariance returns a variance function for the negative
// binomial family, using the given parameter alpha to determine the
// mean/variance relationship. The variance for mean m is m +
// alpha*m^2.
func NewNegBinomVariance(alpha float64) *Variance {
vaf := func(mn []float64, v []float64) {
for i, m := range mn {
v[i] = m + alpha*m*m
}
}
vad := func(mn []float64, v []float64) {
for i, m := range mn {
v[i] = 1 + 2*alpha*m
}
}
return &Variance{
Var: vaf,
Deriv: vad,
}
}
// NewTweedieVariance returns a variance function for the Tweedie
// family, using the given parameter pw to determine the
// mean/variance relationship. The variance for mean m is m^pw.
func NewTweedieVariance(pw float64) *Variance {
vaf := func(mn []float64, v []float64) {
for i, m := range mn {
v[i] = math.Pow(m, pw)
}
}
vad := func(mn []float64, v []float64) {
for i, m := range mn {
v[i] = pw * math.Pow(m, pw-1)
}
}
return &Variance{
Var: vaf,
Deriv: vad,
}
} | glm/varfuncs.go | 0.820218 | 0.542742 | varfuncs.go | starcoder |
package stats
import (
"sync"
"time"
"k8s.io/klog"
)
var (
// GlobalStats is a map that stores the duration of each stats.
GlobalStats map[Type]time.Duration
// CountStats is a map that stores the count of each stats.
CountStats map[Type]int
mutex *sync.RWMutex
)
// Type represents differnet statistics that are being collected.
type Type string
const (
// Total represents the total duration of a specific operation.
Total Type = "Total"
// System represents the duration it takes to list all aad-pod-identity CRDs.
System Type = "System"
// CacheSync represents the duration it takes to sync CRD client's cache.
CacheSync Type = "CacheSync"
// CurrentState represents the duration it takes to generate a list of desired AzureAssignedIdentities.
CurrentState Type = "Gather current state"
// PodList represents the duration it takes to list pods.
PodList Type = "Pod listing"
// BindingList represents the duration it takes to list AzureIdentityBindings.
BindingList Type = "Binding listing"
// IDList represents the duration it takes to list AzureIdentities.
IDList Type = "ID listing"
// ExceptionList represents the duration it takes to list AzurePodIdentityExceptions.
ExceptionList Type = "Pod Identity Exception listing"
// AssignedIDList represents the duration it takes to list AzureAssignedIdentities.
AssignedIDList Type = "Assigned ID listing"
// CloudGet represents the duration it takes to complete a GET request to ARM in a given sync cycle.
CloudGet Type = "Cloud provider get"
// CloudUpdate represents the duration it takes to complete a PATCH request to ARM in a given sync cycle.
CloudUpdate Type = "Cloud provider update"
// TotalUpdateCalls represents the number of PATCH requests to ARM in a given sync cycle.
TotalUpdateCalls Type = "Number of cloud provider PATCH"
// TotalGetCalls represents the number of GET requests to ARM in a given sync cycle.
TotalGetCalls Type = "Number of cloud provider GET"
// TotalAssignedIDsCreated represents the number of AzureAssignedIdentities created in a given sync cycle.
TotalAssignedIDsCreated Type = "Number of assigned ids created in this sync cycle"
// TotalAssignedIDsUpdated represents the number of AzureAssignedIdentities updated in a given sync cycle.
TotalAssignedIDsUpdated Type = "Number of assigned ids updated in this sync cycle"
// TotalAssignedIDsDeleted represents the number of AzureAssignedIdentities deleted in a given sync cycle.
TotalAssignedIDsDeleted Type = "Number of assigned ids deleted in this sync cycle"
// FindAssignedIDDel represents the duration it takes to generate a list of AzureAssignedIdentities to be deleted.
FindAssignedIDDel Type = "Find assigned ids to delete"
// FindAssignedIDCreate represents the duration it takes to generate a list of AzureAssignedIdentities to be created.
FindAssignedIDCreate Type = "Find assigned ids to create"
// AssignedIDDel represents the duration it takes to delete an AzureAssignedIdentity.
AssignedIDDel Type = "Assigned ID deletion"
// AssignedIDAdd represents the duration it takes to create an AzureAssignedIdentity.
AssignedIDAdd Type = "Assigned ID addition"
// TotalCreateOrUpdate represents the duration it takes to create or update a given list of AzureAssignedIdentities.
TotalCreateOrUpdate Type = "Total time to assign or update IDs"
)
// Init initializes the maps uesd to store the stats.
func Init() {
GlobalStats = make(map[Type]time.Duration)
CountStats = make(map[Type]int)
mutex = &sync.RWMutex{}
}
// Put puts a value to a specific stat.
func Put(key Type, val time.Duration) {
if GlobalStats != nil {
mutex.Lock()
defer mutex.Unlock()
GlobalStats[key] = val
}
}
// Get returns the stat value of a given key.
func Get(key Type) time.Duration {
if GlobalStats != nil {
mutex.RLock()
defer mutex.RUnlock()
return GlobalStats[key]
}
return 0
}
// Update updates the value of a specific stat.
func Update(key Type, val time.Duration) {
if GlobalStats != nil {
mutex.Lock()
defer mutex.Unlock()
GlobalStats[key] = GlobalStats[key] + val
}
}
// Print prints the value of a specific stat.
func Print(key Type) {
mutex.RLock()
defer mutex.RUnlock()
klog.Infof("%s: %s", key, GlobalStats[key])
}
// PrintCount prints the count of a specific stat.
func PrintCount(key Type) {
mutex.RLock()
defer mutex.RUnlock()
klog.Infof("%s: %d", key, CountStats[key])
}
// UpdateCount updates the count of a specific stat.
func UpdateCount(key Type, val int) {
mutex.Lock()
defer mutex.Unlock()
CountStats[key] = CountStats[key] + val
}
// PrintSync prints all relevant statistics in a sync cycle.
func PrintSync() {
klog.Infof("** stats collected **")
if GlobalStats != nil {
Print(PodList)
Print(IDList)
Print(BindingList)
Print(AssignedIDList)
Print(System)
Print(CacheSync)
Print(CloudGet)
Print(CloudUpdate)
Print(AssignedIDAdd)
Print(AssignedIDDel)
PrintCount(TotalUpdateCalls)
PrintCount(TotalGetCalls)
PrintCount(TotalAssignedIDsCreated)
PrintCount(TotalAssignedIDsUpdated)
PrintCount(TotalAssignedIDsDeleted)
Print(FindAssignedIDCreate)
Print(FindAssignedIDDel)
Print(TotalCreateOrUpdate)
Print(Total)
}
klog.Infof("*********************")
}
// GetAll returns the global statistics it is currently collecting
func GetAll() map[Type]time.Duration {
mutex.RLock()
defer mutex.RUnlock()
return GlobalStats
} | pkg/stats/stats.go | 0.578448 | 0.436142 | stats.go | starcoder |
package linear
import (
"math"
)
type ArrayRealVector struct {
data []float64
}
/**
* Construct a vector of zeroes.
*
* @param size Size of the vector.
*/
func NewSizedArrayRealVector(size int) (*ArrayRealVector, error) {
return &ArrayRealVector{data: make([]float64, size)}, nil
}
/**
* Construct a vector from another vector, using a deep copy.
*
* @param v Vector to copy.
*/
func NewArrayRealVectorCopy(v RealVector) (*ArrayRealVector, error) {
if v == nil {
return nil, invalidArgumentSimpleErrorf()
}
data := make([]float64, v.Dimension())
for i := 0; i < len(data); i++ {
data[i] = v.At(i)
}
return &ArrayRealVector{data: data}, nil
}
/**
* Construct a vector from an array, copying the input array.
*
* @param d Array.
*/
func NewArrayRealVectorFromSlice(d []float64) (*ArrayRealVector, error) {
if d == nil {
return nil, invalidArgumentSimpleErrorf()
}
ans := new(ArrayRealVector)
ans.data = append([]float64{}, d...)
return ans, nil
}
/**
* Create a new ArrayRealVector using the input array as the underlying
* data array.
* If an array is built specially in order to be embedded in a
* ArrayRealVector and not used directly, the {@code copyArray} may be
* set to {@code false}. This will prevent the copying and improve
* performance as no new array will be built and no data will be copied.
*
* @param d Data for the new vector.
* @param copyArray if {@code true}, the input array will be copied,
* otherwise it will be referenced.
* @see #ArrayRealVector(double[])
*/
func NewArrayRealVector(d []float64, copyArray bool) (*ArrayRealVector, error) {
if copyArray {
return NewArrayRealVectorFromSlice(d)
}
if d == nil {
return nil, invalidArgumentSimpleErrorf()
}
ans := new(ArrayRealVector)
ans.data = d
return ans, nil
}
/**
* Construct a vector with preset values.
*
* @param size Size of the vector
* @param preset All entries will be set with this value.
*/
func NewSizedArrayRealVectorWithPreset(size int, preset float64) (*ArrayRealVector, error) {
ans := &ArrayRealVector{data: make([]float64, size)}
for i := 0; i < size; i++ {
ans.data[i] = preset
}
return ans, nil
}
/**
* Construct a vector by appending one vector to another vector.
* @param v1 First vector (will be put in front of the new vector).
* @param v2 Second vector (will be put at back of the new vector).
*/
func NewArrayRealVectorFromTwoArrayRealVector(v1, v2 *ArrayRealVector) (*ArrayRealVector, error) {
if v1 == nil || v2 == nil {
return nil, invalidArgumentSimpleErrorf()
}
data := make([]float64, len(v1.data)+len(v2.data))
copy(data[:len(v1.data)], v1.data)
copy(data[len(v1.data):len(v1.data)+len(v2.data)], v2.data)
return &ArrayRealVector{data: data}, nil
}
/**
* Construct a vector by appending one vector to another vector.
* @param v1 First vector (will be put in front of the new vector).
* @param v2 Second vector (will be put at back of the new vector).
*/
func NewArrayRealVectorFromTwoRealVector(v1, v2 RealVector) (*ArrayRealVector, error) {
if v1 == nil || v2 == nil {
return nil, invalidArgumentSimpleErrorf()
}
l1 := v1.Dimension()
l2 := v2.Dimension()
data := make([]float64, l1+l2)
for i := 0; i < l1; i++ {
data[i] = v1.At(i)
}
for i := l1; i < l2; i++ {
data[i] = v2.At(i)
}
return &ArrayRealVector{data: data}, nil
}
func (arv *ArrayRealVector) CopyFrom(vec RealVector) {
err := checkVectorDimensions(arv, vec)
if err != nil {
panic(err)
}
dim := vec.Dimension()
for i := 0; i < dim; i++ {
arv.data[i] = vec.At(i)
}
}
func (arv *ArrayRealVector) Copy() RealVector {
r, err := NewArrayRealVectorCopy(arv)
if err != nil {
panic(err)
}
return r
}
func (arv *ArrayRealVector) Map(f func(float64) float64) {
for i := 0; i < len(arv.data); i++ {
arv.data[i] = f(arv.data[i])
}
}
func (arv *ArrayRealVector) MapAdd(d float64) {
for i := 0; i < len(arv.data); i++ {
arv.data[i] += d
}
}
func (arv *ArrayRealVector) MapSubtract(d float64) {
for i := 0; i < len(arv.data); i++ {
arv.data[i] -= d
}
}
func (arv *ArrayRealVector) MapMultiply(d float64) {
for i := 0; i < len(arv.data); i++ {
arv.data[i] *= d
}
}
func (arv *ArrayRealVector) MapDivide(d float64) {
for i := 0; i < len(arv.data); i++ {
arv.data[i] /= d
}
}
func (arv *ArrayRealVector) Add(vec RealVector) RealVector {
if v, ok := vec.(*ArrayRealVector); ok {
vData := v.data
dim := len(vData)
err := checkDimensions(arv, dim)
if err != nil {
panic(err)
}
ret, _ := NewSizedArrayRealVector(dim)
resultData := ret.data
for i := 0; i < dim; i++ {
resultData[i] = arv.data[i] + vData[i]
}
return ret
}
err := checkVectorDimensions(arv, vec)
if err != nil {
panic(err)
}
ret, _ := NewSizedArrayRealVector(vec.Dimension())
resultData := ret.data
for i := 0; i < len(arv.data); i++ {
resultData[i] = arv.data[i] + vec.At(i)
}
return ret
}
func (arv *ArrayRealVector) Subtract(vec RealVector) RealVector {
if v, ok := vec.(*ArrayRealVector); ok {
vData := v.data
dim := len(vData)
err := checkDimensions(arv, dim)
if err != nil {
panic(err)
}
ret, _ := NewSizedArrayRealVector(dim)
resultData := ret.data
for i := 0; i < dim; i++ {
resultData[i] = arv.data[i] - vData[i]
}
return ret
}
err := checkVectorDimensions(arv, vec)
if err != nil {
panic(err)
}
ret, _ := NewSizedArrayRealVector(vec.Dimension())
resultData := ret.data
for i := 0; i < len(arv.data); i++ {
resultData[i] = arv.data[i] - vec.At(i)
}
return ret
}
func (arv *ArrayRealVector) EBEMultiply(vec RealVector) RealVector {
if v, ok := vec.(*ArrayRealVector); ok {
vData := v.data
dim := len(vData)
err := checkDimensions(arv, dim)
if err != nil {
panic(err)
}
ret, _ := NewSizedArrayRealVector(dim)
resultData := ret.data
for i := 0; i < dim; i++ {
resultData[i] = arv.data[i] * vData[i]
}
return ret
}
err := checkVectorDimensions(arv, vec)
if err != nil {
panic(err)
}
ret, _ := NewSizedArrayRealVector(vec.Dimension())
resultData := ret.data
for i := 0; i < len(arv.data); i++ {
resultData[i] = arv.data[i] * vec.At(i)
}
return ret
}
func (arv *ArrayRealVector) EBEDivide(vec RealVector) RealVector {
if v, ok := vec.(*ArrayRealVector); ok {
vData := v.data
dim := len(vData)
err := checkDimensions(arv, dim)
if err != nil {
panic(err)
}
ret, _ := NewSizedArrayRealVector(dim)
resultData := ret.data
for i := 0; i < dim; i++ {
resultData[i] = arv.data[i] / vData[i]
}
return ret
}
err := checkVectorDimensions(arv, vec)
if err != nil {
panic(err)
}
ret, _ := NewSizedArrayRealVector(vec.Dimension())
resultData := ret.data
for i := 0; i < len(arv.data); i++ {
resultData[i] = arv.data[i] / vec.At(i)
}
return ret
}
func (arv *ArrayRealVector) DataRef() []float64 {
return arv.data
}
func (arv *ArrayRealVector) Unitize() {
norm := VecNorm(arv)
if norm == 0 {
panic(mathArithmeticErrorf(zero_norm))
}
arv.MapDivide(norm)
}
func (arv *ArrayRealVector) Iterator() EntryIterator {
return newEntryIterator(arv)
}
func (arv *ArrayRealVector) At(index int) float64 {
err := checkIndex(arv, index)
if err != nil {
panic(err)
}
return arv.data[index]
}
func (arv *ArrayRealVector) Dimension() int {
return len(arv.data)
}
func (arv *ArrayRealVector) AppendVector(vec RealVector) RealVector {
if v, ok := vec.(*ArrayRealVector); ok {
r, err := NewArrayRealVectorFromTwoArrayRealVector(arv, v)
if err != nil {
panic(err)
}
return r
} else {
r, err := NewArrayRealVectorFromTwoRealVector(arv, vec)
if err != nil {
panic(err)
}
return r
}
}
func (arv *ArrayRealVector) Append(in float64) RealVector {
out := make([]float64, len(arv.data)+1)
copy(out[:len(arv.data)], arv.data)
out[len(arv.data)] = in
r, err := NewArrayRealVector(out, false)
if err != nil {
panic(err)
}
return r
}
func (arv *ArrayRealVector) SubVector(index, n int) RealVector {
if n < 0 {
panic(notPositiveErrorf(number_of_elements_should_be_positive, float64(n)))
}
out, err := NewSizedArrayRealVector(n)
if err != nil {
panic(err)
}
err = checkIndex(arv, index)
if err != nil {
panic(err)
}
err = checkIndex(arv, index+n-1)
if err != nil {
panic(err)
}
copy(out.data[0:n], arv.data[index:])
return out
}
func (arv *ArrayRealVector) SetEntry(index int, value float64) {
err := checkIndex(arv, index)
if err != nil {
panic(err)
}
arv.data[index] = value
}
func (arv *ArrayRealVector) AddToEntry(index int, increment float64) {
err := checkIndex(arv, index)
if err != nil {
panic(err)
}
arv.data[index] += increment
}
func (arv *ArrayRealVector) SetSubVector(index int, vec RealVector) {
err := checkIndex(arv, index)
if err != nil {
panic(err)
}
err = checkIndex(arv, index+vec.Dimension()-1)
if err != nil {
panic(err)
}
if v, ok := vec.(*ArrayRealVector); ok {
arv.SetSubVectorWithSlice(index, v.data)
}
for i := index; i < index+vec.Dimension(); i++ {
arv.data[i] = vec.At(i - index)
}
}
func (arv *ArrayRealVector) SetSubVectorWithSlice(index int, v []float64) {
err := checkIndex(arv, index)
if err != nil {
panic(err)
}
err = checkIndex(arv, index+len(v)-1)
if err != nil {
panic(err)
}
copy(arv.data[index:index+len(v)], v)
}
func (arv *ArrayRealVector) Set(v float64) {
for i := 0; i < len(arv.data); i++ {
arv.data[i] = v
}
}
func (arv *ArrayRealVector) ToArray() []float64 {
return append([]float64{}, arv.data...)
}
func (arv *ArrayRealVector) IsNaN() bool {
for i := 0; i < len(arv.data); i++ {
if math.IsNaN(arv.data[i]) {
return true
}
}
return false
}
func (arv *ArrayRealVector) IsInf() bool {
if arv.IsNaN() {
return false
}
for i := 0; i < len(arv.data); i++ {
if math.IsInf(arv.data[i], 1) || math.IsInf(arv.data[i], -1) {
return true
}
}
return false
}
func (arv *ArrayRealVector) Equals(other interface{}) bool {
if arv == other {
return true
}
if _, ok := other.(RealVector); !ok {
return false
}
rhs := other.(RealVector)
if len(arv.data) != rhs.Dimension() {
return false
}
if rhs.IsNaN() {
return arv.IsNaN()
}
for i := 0; i < len(arv.data); i++ {
if arv.data[i] != rhs.At(i) {
return false
}
}
return true
}
func (arv *ArrayRealVector) Combine(a, b float64, y RealVector) {
err := checkVectorDimensions(arv, y)
if err != nil {
panic(err)
}
for i := 0; i < len(arv.data); i++ {
arv.data[i] = a*arv.data[i] + b*y.At(i)
}
}
func (arv *ArrayRealVector) WalkInDefaultOrder(visitor RealVectorPreservingVisitor) float64 {
visitor.Start(len(arv.data), 0, len(arv.data)-1)
for i := 0; i < len(arv.data); i++ {
visitor.Visit(i, arv.data[i])
}
return visitor.End()
}
func (arv *ArrayRealVector) WalkInDefaultOrderBounded(visitor RealVectorPreservingVisitor, start, end int) float64 {
err := checkIndices(arv, start, end)
if err != nil {
panic(err)
}
visitor.Start(len(arv.data), start, end)
for i := start; i <= end; i++ {
visitor.Visit(i, arv.data[i])
}
return visitor.End()
}
func (arv *ArrayRealVector) WalkInUpdateDefaultOrder(visitor RealVectorChangingVisitor) float64 {
visitor.Start(len(arv.data), 0, len(arv.data)-1)
for i := 0; i < len(arv.data); i++ {
arv.data[i] = visitor.Visit(i, arv.data[i])
}
return visitor.End()
}
func (arv *ArrayRealVector) WalkInUpdateDefaultOrderBounded(visitor RealVectorChangingVisitor, start, end int) float64 {
err := checkIndices(arv, start, end)
if err != nil {
panic(err)
}
visitor.Start(len(arv.data), start, end)
for i := start; i <= end; i++ {
arv.data[i] = visitor.Visit(i, arv.data[i])
}
return visitor.End()
}
func (arv *ArrayRealVector) WalkInOptimizedOrder(visitor RealVectorPreservingVisitor) float64 {
return arv.WalkInDefaultOrder(visitor)
}
func (arv *ArrayRealVector) WalkInOptimizedOrderBounded(visitor RealVectorPreservingVisitor, start, end int) float64 {
return arv.WalkInDefaultOrderBounded(visitor, start, end)
}
func (arv *ArrayRealVector) WalkInUpdateOptimizedOrder(visitor RealVectorChangingVisitor) float64 {
return arv.WalkInUpdateDefaultOrder(visitor)
}
func (arv *ArrayRealVector) WalkInUpdateOptimizedOrderBounded(visitor RealVectorChangingVisitor, start, end int) float64 {
return arv.WalkInUpdateDefaultOrderBounded(visitor, start, end)
}
func checkIndex(v RealVector, idx int) error {
dim := v.Dimension()
if idx < 0 || idx >= dim {
return outOfRangeErrorf(index, float64(idx), 0, float64(dim)-1)
}
return nil
}
func checkIndices(v RealVector, start, end int) error {
dim := v.Dimension()
if (start < 0) || (start >= dim) {
return outOfRangeErrorf(index, float64(start), 0, float64(dim-1))
}
if (end < 0) || (end >= dim) {
return outOfRangeErrorf(index, float64(end), 0, float64(dim-1))
}
if end < start {
return numberIsTooSmallErrorf(initial_row_after_final_row, float64(end), float64(start), false)
}
return nil
}
func checkVectorDimensions(v1, v2 RealVector) error {
return checkDimensions(v1, v2.Dimension())
}
func checkDimensions(v RealVector, n int) error {
if v.Dimension() != n {
return dimensionsMismatchSimpleErrorf(v.Dimension(), n)
}
return nil
} | array_real_vector.go | 0.903705 | 0.72431 | array_real_vector.go | starcoder |
package aws
import (
"testing"
"github.com/gruntwork-io/terratest/modules/ssh"
)
// FetchContentsOfFileFromInstance looks up the public IP address of the EC2 Instance with the given ID, connects to
// the Instance via SSH using the given username and Key Pair, fetches the contents of the file at the given path
// (using sudo if useSudo is true), and returns the contents of that file as a string.
func FetchContentsOfFileFromInstance(t *testing.T, awsRegion string, sshUserName string, keyPair *Ec2Keypair, instanceID string, useSudo bool, filePath string) string {
out, err := FetchContentsOfFileFromInstanceE(t, awsRegion, sshUserName, keyPair, instanceID, useSudo, filePath)
if err != nil {
t.Fatal(err)
}
return out
}
// FetchContentsOfFileFromInstanceE looks up the public IP address of the EC2 Instance with the given ID, connects to
// the Instance via SSH using the given username and Key Pair, fetches the contents of the file at the given path
// (using sudo if useSudo is true), and returns the contents of that file as a string.
func FetchContentsOfFileFromInstanceE(t *testing.T, awsRegion string, sshUserName string, keyPair *Ec2Keypair, instanceID string, useSudo bool, filePath string) (string, error) {
publicIp, err := GetPublicIpOfEc2InstanceE(t, instanceID, awsRegion)
if err != nil {
return "", err
}
host := ssh.Host{
SshUserName: sshUserName,
SshKeyPair: keyPair.KeyPair,
Hostname: publicIp,
}
return ssh.FetchContentsOfFileE(t, host, useSudo, filePath)
}
// FetchContentsOfFilesFromInstance looks up the public IP address of the EC2 Instance with the given ID, connects to
// the Instance via SSH using the given username and Key Pair, fetches the contents of the files at the given paths
// (using sudo if useSudo is true), and returns a map from file path to the contents of that file as a string.
func FetchContentsOfFilesFromInstance(t *testing.T, awsRegion string, sshUserName string, keyPair *Ec2Keypair, instanceID string, useSudo bool, filePaths ...string) map[string]string {
out, err := FetchContentsOfFilesFromInstanceE(t, awsRegion, sshUserName, keyPair, instanceID, useSudo, filePaths...)
if err != nil {
t.Fatal(err)
}
return out
}
// FetchContentsOfFilesFromInstanceE looks up the public IP address of the EC2 Instance with the given ID, connects to
// the Instance via SSH using the given username and Key Pair, fetches the contents of the files at the given paths
// (using sudo if useSudo is true), and returns a map from file path to the contents of that file as a string.
func FetchContentsOfFilesFromInstanceE(t *testing.T, awsRegion string, sshUserName string, keyPair *Ec2Keypair, instanceID string, useSudo bool, filePaths ...string) (map[string]string, error) {
publicIp, err := GetPublicIpOfEc2InstanceE(t, instanceID, awsRegion)
if err != nil {
return nil, err
}
host := ssh.Host{
SshUserName: sshUserName,
SshKeyPair: keyPair.KeyPair,
Hostname: publicIp,
}
return ssh.FetchContentsOfFilesE(t, host, useSudo, filePaths...)
}
// FetchContentsOfFileFromAsg looks up the EC2 Instances in the given ASG, looks up the public IPs of those EC2
// Instances, connects to each Instance via SSH using the given username and Key Pair, fetches the contents of the file
// at the given path (using sudo if useSudo is true), and returns a map from Instance ID to the contents of that file
// as a string.
func FetchContentsOfFileFromAsg(t *testing.T, awsRegion string, sshUserName string, keyPair *Ec2Keypair, asgName string, useSudo bool, filePath string) map[string]string {
out, err := FetchContentsOfFileFromAsgE(t, awsRegion, sshUserName, keyPair, asgName, useSudo, filePath)
if err != nil {
t.Fatal(err)
}
return out
}
// FetchContentsOfFileFromAsgE looks up the EC2 Instances in the given ASG, looks up the public IPs of those EC2
// Instances, connects to each Instance via SSH using the given username and Key Pair, fetches the contents of the file
// at the given path (using sudo if useSudo is true), and returns a map from Instance ID to the contents of that file
// as a string.
func FetchContentsOfFileFromAsgE(t *testing.T, awsRegion string, sshUserName string, keyPair *Ec2Keypair, asgName string, useSudo bool, filePath string) (map[string]string, error) {
instanceIDs, err := GetInstanceIdsForAsgE(t, asgName, awsRegion)
if err != nil {
return nil, err
}
instanceIdToContents := map[string]string{}
for _, instanceID := range instanceIDs {
contents, err := FetchContentsOfFileFromInstanceE(t, awsRegion, sshUserName, keyPair, instanceID, useSudo, filePath)
if err != nil {
return nil, err
}
instanceIdToContents[instanceID] = contents
}
return instanceIdToContents, err
}
// FetchContentsOfFilesFromAsg looks up the EC2 Instances in the given ASG, looks up the public IPs of those EC2
// Instances, connects to each Instance via SSH using the given username and Key Pair, fetches the contents of the files
// at the given paths (using sudo if useSudo is true), and returns a map from Instance ID to a map of file path to the
// contents of that file as a string.
func FetchContentsOfFilesFromAsg(t *testing.T, awsRegion string, sshUserName string, keyPair *Ec2Keypair, asgName string, useSudo bool, filePaths ...string) map[string]map[string]string {
out, err := FetchContentsOfFilesFromAsgE(t, awsRegion, sshUserName, keyPair, asgName, useSudo, filePaths...)
if err != nil {
t.Fatal(err)
}
return out
}
// FetchContentsOfFilesFromAsgE looks up the EC2 Instances in the given ASG, looks up the public IPs of those EC2
// Instances, connects to each Instance via SSH using the given username and Key Pair, fetches the contents of the files
// at the given paths (using sudo if useSudo is true), and returns a map from Instance ID to a map of file path to the
// contents of that file as a string.
func FetchContentsOfFilesFromAsgE(t *testing.T, awsRegion string, sshUserName string, keyPair *Ec2Keypair, asgName string, useSudo bool, filePaths ...string) (map[string]map[string]string, error) {
instanceIDs, err := GetInstanceIdsForAsgE(t, asgName, awsRegion)
if err != nil {
return nil, err
}
instanceIdToFilePathToContents := map[string]map[string]string{}
for _, instanceID := range instanceIDs {
contents, err := FetchContentsOfFilesFromInstanceE(t, awsRegion, sshUserName, keyPair, instanceID, useSudo, filePaths...)
if err != nil {
return nil, err
}
instanceIdToFilePathToContents[instanceID] = contents
}
return instanceIdToFilePathToContents, err
} | modules/aws/ec2-files.go | 0.543106 | 0.413951 | ec2-files.go | starcoder |
package num
import (
"errors"
"fmt"
"math"
"strings"
)
const (
kiB float64 = 1024
miB float64 = kiB * 1024
giB float64 = miB * 1024
)
/*
Bytes takes n representing a number of bytes and produces
a human-friendly string that chooses the most compact form
possible. If there is a fractional unit it will be represented
with one position after the decimal point.
Since the output of Bytes is intended to be read by non-programmers
it notates its measurements as GB, MB, and KB (e.g. "50KB") rather
than GiB, MiB, and KiB respectively. However, when calculating
the output Bytes does interpret 1024 bytes as a kilobyte and so on.
In short, Bytes is not intended to be highly precise. Its output
is intended to be read by users performing tasks such as uploading
images.
println(num.Bytes(70000000000)) // 65.2GB
println(num.Bytes(6000000000)) // 5.6GB
println(num.Bytes(500000000)) // 476.8MB
println(num.Bytes(30000000)) // 28.6MB
println(num.Bytes(2000000)) // 2MB
println(num.Bytes(300000)) // 293KB
println(num.Bytes(20000)) // 19.5KB
println(num.Bytes(1000)) // 1KB
println(num.Bytes(600)) // 600B
println(num.Bytes(10)) // 10B
*/
func Bytes(n int) string {
var unit string
threshold := 0.95
f := float64(n)
switch {
case f/giB > threshold:
unit = "GB"
f /= giB
case f/miB > threshold:
unit = "MB"
f /= miB
case f/kiB > threshold:
unit = "KB"
f /= kiB
default:
return fmt.Sprintf("%dB", n)
}
fractional := f - float64(int(f))
if fractional < 0.1 || fractional > 0.9 {
return fmt.Sprintf("%.0f%s", math.Round(f), unit)
}
return fmt.Sprintf("%.1f%s", f, unit)
}
/*
Roman converts n to a Roman numeral of type string.
Returns an error if n is less than one.
Keep in mind there is no Roman numeral for zero. Further,
the largest value a single Roman numeral can represent is
M (1000). Therefore an n above a few thousand such as 7412
will produce the output "MMMMMMMCDXII" which may be
undesirable.
s, _ := Roman(-1) // Error; n is negative.
s, _ = Roman(0) // Error; no Roman numeral for zero.
s, _ = Roman(1) // "I"
s, _ = Roman(4) // "IV"
s, _ = Roman(467) // "CDLXVII"
s, _ = Roman(1991) // "MCMXCI"
*/
func Roman(n int) (string, error) {
if n == 0 {
return "", errors.New("Input cannot be 0.")
}
if n < 0 {
return "", fmt.Errorf("Input cannot be a negative number. Got %d.", n)
}
type multiple struct {
number int
letter string
}
multiples := []multiple{
{1000, "M"}, {900, "CM"},
{500, "D"}, {400, "CD"},
{100, "C"}, {90, "XC"},
{50, "L"}, {40, "XL"},
{10, "X"}, {9, "IX"},
{5, "V"}, {4, "IV"},
{1, "I"},
}
var s string
for _, m := range multiples {
s += strings.Repeat(m.letter, n/m.number)
n %= m.number
}
return s, nil
}
func WordFloat(f float64, precision int) string {
// Get words for whole number then remove
// that part of the number.
s := Word(int(f))
f -= float64(int(f))
if precision > 0 {
s += " point"
}
for i := 0; i < precision; i++ {
// Multiply fractional part til first number is a whole number.
n := int(f * 10)
// Remove the first number from fractional part.
f = f*10 - float64(n)
// Add word for first number in fractional part.
s += " " + Word(n)
}
return s
}
/*
Word takes n and returns an English language rendition of it.
Word(7232) // "seven thousand two hundred and thirty-two"
Word(-5) // "negative five"
*/
func Word(n int) string {
// Zero screws with our logic so we handle it here.
if n == 0 {
return "zero"
}
/*
We have to record n's sign here because we
modify n below. We also force it to be a
positive number so we can use the same logic
for negative or positive.
*/
var s string
var negative bool
if n < 0 {
negative = true
n = -n
}
type unit struct {
number int
word string
}
units := []unit{
{1000000000, "billion"},
{1000000, "million"},
{1000, "thousand"},
{100, "hundred"},
{90, "ninety"},
{80, "eighty"},
{70, "seventy"},
{60, "sixty"},
{50, "fifty"},
{40, "fourty"},
{30, "thirty"},
{20, "twenty"},
{19, "nineteen"},
{18, "eighteen"},
{17, "seventeen"},
{16, "sixteen"},
{15, "fifteen"},
{14, "fourteen"},
{13, "thirteen"},
{12, "twelve"},
{11, "eleven"},
{10, "ten"},
{9, "nine"},
{8, "eight"},
{7, "seven"},
{6, "six"},
{5, "five"},
{4, "four"},
{3, "three"},
{2, "two"},
{1, "one"},
}
for _, u := range units {
instances := n / u.number
n %= u.number
if instances == 0 {
continue
}
/*
If we've already got preceding words and there's
no trailing hyphen we should add "and" before
numbers less than 100 - e.g. two hundred and five,
six thousand and eighty-four, etc. Regardless, we
always add a trailing space.
*/
if len(s) > 0 && !strings.HasSuffix(s, "-") {
if u.number < 100 {
s += " and"
}
s += " "
}
if instances == 1 {
/*
Single instances of "hundred" and greater units
("thousand", etc) need to be prefixed with "one"
- e.g. one hundred, one thousand, etc.
*/
if u.number >= 100 {
s += "one "
}
// Add the actual word.
s += u.word
/*
If there's still more of n left and the number
we're currently dealing with is less than 100
we need a hyphen - e.g. sixty-nine.
*/
if u.number < 100 && n > 0 {
s += "-"
}
continue
}
/*
If there are multiple instances of the unit number -
e.g. in 2,400,000 there are two instances of the unit
"million" - we recurse to get the word for the number
of instances.
*/
s += Word(instances) + " " + u.word
}
/*
We prefix "negative" right before returning
otherwise it messes with the conditionals
that decide when to add "and" between words.
*/
if negative {
s = "negative " + s
}
return s
}
/*
Alpha converts n to a base 52 string where each numeral
is represented by an upper or lower case alphabet character.
Returns an error if n is negative.
s, _ := Alpha(-1) // Error; n is negative.
s, _ = Alpha(0) // "A"
s, _ = Alpha(25) // "Z"
s, _ = Alpha(52) // "AA"
*/
func Alpha(n int) (string, error) {
const encoding = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
return Encode(n, encoding)
}
/*
Encode converts n to a string that uses the characters
in encoding as its numerals. Returns an error if: n is
negative, encoding is an empty string, encoding contains
less than two characters, or encoding contains duplicate
characters.
Multi-byte characters such as kanji, emojis, and so on
will be treated as a single character. The base of the
result will be determined by the number of characters
(not bytes) in encoding.
The first character of encoding acts as the zero value.
This means the encoding string must contain at least
two characters.
s, _ := Encode(-1, "0123456789") // Error; n is negative.
s, _ = Encode(2, "") // Error; encoding is an empty string.
s, _ = Encode(5, "A") // Error; encoding contains < 2 characters.
s, _ = Encode(-1, "01123") // Error; encoding contains duplicates.
s, _ = Encode(0, "0123456789") // "0"
s, _ = Encode(1, "0123456789") // "1"
s, _ = Encode(10, "0123456789") // "10"
s, _ = Encode(0, "世界") // "世"
s, _ = Encode(1, "世界") // "界"
s, _ = Encode(2, "世界") // "界世"
s, _ = Encode(3, "世界") // "界界"
s, _ = Encode(4, "世界") // "界世世"
s, _ = Encode(2, "😀😁😂🤣😄😅") // "😂"
s, _ = Encode(6, "😀😁😂🤣😄😅") // "😁😀"
s, _ = Encode(2, "!@#$%^&*()") // "#"
s, _ = Encode(11, "!@#$%^&*()") // "@@"
s, _ = Encode(67427, "!@#$%^&*()") // "&*%#*"
*/
func Encode(n int, encoding string) (string, error) {
if n < 0 {
return "", fmt.Errorf("Input number cannot be negative. Got %d", n)
}
if encoding == "" {
return "", errors.New("Encoding cannot be an empty string.")
}
enc := strings.Split(encoding, "")
if err := uniqueSet(enc); err != nil {
return "", err
}
if n == 0 {
return enc[0], nil
}
length := len(enc)
if length == 1 {
return "", errors.New("Encoding must have at least two characters.")
}
var result string
var remainder int
quotient := n
for quotient != 0 {
decremented := quotient
quotient = decremented / length
remainder = decremented % length
result = enc[remainder] + result
}
return result, nil
}
func uniqueSet(ss []string) error {
seen := make(map[string]bool, len(ss))
for _, s := range ss {
if seen[s] {
return fmt.Errorf("%q appears multiple times.", s)
}
seen[s] = true
}
return nil
} | num/num.go | 0.544317 | 0.534552 | num.go | starcoder |
package assert
import (
"testing"
)
type Assertor struct {
t *testing.T
}
func New(t *testing.T) Assertor {
if t == nil {
panic("`t` could not be nil")
}
return Assertor{t}
}
func (assertor Assertor) AssertThatInt(got int) Integer {
return Integer{got: int64(got), t: assertor.t}
}
func (assertor Assertor) AssertThatString(got string) String {
return String{got: got, t: assertor.t}
}
func (assertor Assertor) AssertThatBool(got bool) Bool {
return Bool{got: got, t: assertor.t}
}
type function func()
func (assertor Assertor) AssertThatFunction(f function) Function {
return Function{f: f, t: assertor.t}
}
func (assertor Assertor) AssertThatArray(arr []interface{}) Array {
return Array{arr: arr, t: assertor.t}
}
func (assertor Assertor) AssertThatError(err error) Error {
return Error{err: err, t: assertor.t}
}
type Integer struct {
got int64
t *testing.T
}
func (context Integer) IsEqualInt(expected int) {
if context.got != int64(expected) {
context.t.Fatalf("expected: %d, got %d", expected, context.got)
}
}
type String struct {
got string
t *testing.T
}
func (context String) IsEqual(expected string) {
if context.got != expected {
context.t.Fatalf("expected: %s, got %s", expected, context.got)
}
}
type Bool struct {
got bool
t *testing.T
}
func (context Bool) IsEqual(expected bool) {
if context.got != expected {
context.t.Fatalf("expected: %t, got %t", expected, context.got)
}
}
type Function struct {
f function
t *testing.T
}
func (context Function) Panics(args ... interface{}) {
if !didPanic(context.f) {
context.t.Fatalf("expected: panics")
}
}
func didPanic(f function) (panicFlag bool) {
defer func() {
if err := recover(); err != nil {
panicFlag = true
}
}()
f()
return false
}
type Array struct {
arr []interface{}
t *testing.T
}
func (arr Array) IsEmpty() {
if len(arr.arr) != 0 {
arr.t.Fatalf("expected an empty slice, got %v", arr.arr)
}
}
type Error struct {
err error
t *testing.T
}
func (err Error) IsNil() {
if err.err != nil {
err.t.Fatalf("expected nil error, got %v", err)
}
}
func (err Error) IsNotNil() {
if err.err == nil {
err.t.Fatalf("expected not nil error, got nil")
}
} | common/assert/assert.go | 0.656768 | 0.750278 | assert.go | starcoder |
package testcomponent
import (
"context"
"encoding/json"
componenttest "github.com/ONSdigital/dp-component-test"
mongoDriver "github.com/ONSdigital/dp-mongodb/v3/mongodb"
"github.com/cucumber/godog"
"github.com/stretchr/testify/assert"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
)
type dataModel struct {
Id int `bson:"_id,omitempty" json:"id,omitempty"`
Name string
Age string
}
type MongoV2Component struct {
database string
collection string
rawClient mongo.Client
testClient *mongoDriver.MongoConnection
find *mongoDriver.Find
insertResult *mongoDriver.CollectionInsertManyResult
updateResult *mongoDriver.CollectionUpdateResult
deleteResult *mongoDriver.CollectionDeleteResult
mustErrorResult error
ErrorFeature componenttest.ErrorFeature
}
func (m *MongoV2Component) RegisterSteps(ctx *godog.ScenarioContext) {
ctx.Step(`^I have inserted these Records$`, m.insertedTheseRecords)
ctx.Step(`^I should receive these records$`, m.shouldReceiveTheseRecords)
ctx.Step(`^I will count (\d+) records$`, m.countRecords)
ctx.Step(`^I start a find operation`, m.findRecords)
ctx.Step(`^I set the limit to (\d+)`, m.setLimit)
ctx.Step(`^I skip (\d+) records$`, m.setSkip)
ctx.Step(`^I find records with Id > (\d+)$`, m.findWithId)
ctx.Step(`^I find this one record$`, m.findOneRecord)
ctx.Step(`^I sort by ID desc`, m.sortByIdDesc)
ctx.Step(`^I select the field "([^"]*)"$`, m.selectField)
ctx.Step(`^I upsertById this record with id (\d+)$`, m.upsertRecordById)
ctx.Step(`^I upsert this record with id (\d+)$`, m.upsertRecord)
ctx.Step(`^I updateById this record with id (\d+)$`, m.updateRecordById)
ctx.Step(`^I update this record with id (\d+)$`, m.updateRecord)
ctx.Step(`^I deleteById a record with id (\d+)$`, m.deleteRecordById)
ctx.Step(`^I delete a record with id (\d+)$`, m.deleteRecord)
ctx.Step(`^I delete a record with name like (\w+)$`, m.deleteRecordByName)
ctx.Step(`^I insert these records$`, m.insertRecords)
ctx.Step(`^there are (\d+) matched, (\d+) modified, (\d+) upserted records, with upsert Id of (\d+)$`, m.modifiedCountWithid)
ctx.Step(`^there are (\d+) matched, (\d+) modified, (\d+) upserted records$`, m.modifiedCount)
ctx.Step(`^there are (\d+) deleted records$`, m.deletedRecords)
ctx.Step(`^this is the inserted records result$`, m.insertedRecords)
ctx.Step(`^Itr All should fail with a wrapped error if an incorrect result param is provided$`, m.testErrorItrAll)
ctx.Step(`^Find Itr All should fail with a wrapped error if an incorrect result param is provided$`, m.testFindErrorItrAll)
ctx.Step(`^Find One should fail with an ErrNoDocumentFound error$`, m.testFindOneError)
ctx.Step(`^I should receive a ErrNoDocumentFound error$`, m.testRecieveErrNoDocumentFoundError)
ctx.Step(`^Must did not return an error$`, m.testMustDidNotReturnError)
ctx.Step(`^I Must update this record with id (\d+)$`, m.mustUpdateRecord)
ctx.Step(`^I Must updateById this record with id (\d+)$`, m.mustUpdateId)
ctx.Step(`^I Must deleteById a record with id (\d+)$`, m.mustDeleteRecordById)
ctx.Step(`^I Must delete a record with id (\d+)$`, m.mustDeleteRecord)
ctx.Step(`^I Must delete records with name like (\w+)$`, m.mustDeleteRecordsByName)
}
func newMongoV2Component(database string, collection string, rawClient mongo.Client) *MongoV2Component {
return &MongoV2Component{database, collection, rawClient,
mongoDriver.NewMongoConnection(&rawClient, database, collection),
nil, nil, nil, nil, nil, componenttest.ErrorFeature{}}
}
func (m *MongoV2Component) reset() {
m.find = nil
m.insertResult = nil
m.updateResult = nil
m.deleteResult = nil
m.mustErrorResult = nil
m.ErrorFeature = componenttest.ErrorFeature{}
}
func (m *MongoV2Component) insertedTheseRecords(recordsJson *godog.DocString) error {
foundRecords := make([]dataModel, 0)
records := make([]dataModel, 0)
collection := m.rawClient.Database(m.database).Collection(m.collection)
err := json.Unmarshal([]byte(recordsJson.Content), &records)
if err != nil {
return err
}
for _, record := range records {
_, err := collection.InsertOne(context.Background(), record)
if err != nil {
return err
}
}
cursor, err := collection.Find(context.Background(), bson.D{})
if err != nil {
return err
}
err = cursor.All(context.Background(), &foundRecords)
if err != nil {
return err
}
assert.ElementsMatch(&m.ErrorFeature, records, foundRecords)
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) findRecords() error {
m.find = m.testClient.C(m.collection).Find(bson.D{})
return nil
}
func (m *MongoV2Component) shouldReceiveTheseRecords(recordsJson *godog.DocString) error {
actualRecords := make([]dataModel, 0)
err := m.find.Iter().All(context.Background(), &actualRecords)
if err != nil {
return err
}
expectedRecords := make([]dataModel, 0)
err = json.Unmarshal([]byte(recordsJson.Content), &expectedRecords)
if err != nil {
return err
}
assert.ElementsMatch(&m.ErrorFeature, expectedRecords, actualRecords)
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) countRecords(expected int) error {
actual, err := m.find.Count(context.Background())
if err != nil {
return err
}
assert.EqualValues(&m.ErrorFeature, int(expected), int(actual))
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) setLimit(limit int) error {
m.find.Limit(limit)
return nil
}
func (m *MongoV2Component) setSkip(skip int) error {
m.find.Skip(skip)
return nil
}
func (m *MongoV2Component) findWithId(id int) error {
m.find = m.testClient.C(m.collection).Find(bson.M{"_id": bson.M{"$gt": id}})
return nil
}
func (m *MongoV2Component) sortByIdDesc() error {
m.find.Sort(bson.D{{Key: "_id", Value: -1}})
return nil
}
func (m *MongoV2Component) selectField(field string) error {
m.find.Select(bson.M{field: 1})
return nil
}
func (m *MongoV2Component) findOneRecord(recordAsString *godog.DocString) error {
actualRecord := new(dataModel)
expectedRecord := new(dataModel)
err := json.Unmarshal([]byte(recordAsString.Content), expectedRecord)
if err != nil {
return err
}
err = m.find.One(context.Background(), &actualRecord)
if err != nil {
return err
}
assert.Equal(&m.ErrorFeature, expectedRecord, actualRecord)
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) upsertRecordById(id int, recordAsString *godog.DocString) error {
record := new(dataModel)
err := json.Unmarshal([]byte(recordAsString.Content), &record)
if err != nil {
return err
}
upsert := bson.D{{Key: "$set", Value: bson.D{{Key: "name", Value: record.Name}, {Key: "age", Value: record.Age}}}}
m.updateResult, err = m.testClient.C(m.collection).UpsertById(context.Background(), id, upsert)
return err
}
func (m *MongoV2Component) upsertRecord(id int, recordAsString *godog.DocString) error {
record := new(dataModel)
err := json.Unmarshal([]byte(recordAsString.Content), &record)
if err != nil {
return err
}
idQuery := bson.D{{Key: "_id", Value: id}}
upsert := bson.D{{Key: "$set", Value: bson.D{{Key: "name", Value: record.Name}, {Key: "age", Value: record.Age}}}}
m.updateResult, err = m.testClient.C(m.collection).Upsert(context.Background(), idQuery, upsert)
return err
}
func (m *MongoV2Component) updateRecordById(id int, recordAsString *godog.DocString) error {
record := new(dataModel)
err := json.Unmarshal([]byte(recordAsString.Content), &record)
if err != nil {
return err
}
update := bson.D{{Key: "$set", Value: bson.D{{Key: "name", Value: record.Name}, {Key: "age", Value: record.Age}}}}
m.updateResult, err = m.testClient.C(m.collection).UpdateById(context.Background(), id, update)
return err
}
func (m *MongoV2Component) updateRecord(id int, recordAsString *godog.DocString) error {
record := new(dataModel)
err := json.Unmarshal([]byte(recordAsString.Content), &record)
if err != nil {
return err
}
idQuery := bson.D{{Key: "_id", Value: id}}
update := bson.D{{Key: "$set", Value: bson.D{{Key: "name", Value: record.Name}, {Key: "age", Value: record.Age}}}}
m.updateResult, err = m.testClient.C(m.collection).Update(context.Background(), idQuery, update)
return err
}
func (m *MongoV2Component) deleteRecordById(id int) error {
var err error
m.deleteResult, err = m.testClient.C(m.collection).DeleteById(context.Background(), id)
return err
}
func (m *MongoV2Component) deleteRecord(id int) error {
var err error
idQuery := bson.D{{Key: "_id", Value: id}}
m.deleteResult, err = m.testClient.C(m.collection).Delete(context.Background(), idQuery)
return err
}
func (m *MongoV2Component) deleteRecordByName(name string) error {
var err error
selector := bson.D{{Key: "name", Value: primitive.Regex{Pattern: ".*" + name + ".*"}}}
m.deleteResult, err = m.testClient.C(m.collection).DeleteMany(context.Background(), selector)
return err
}
func (m *MongoV2Component) modifiedCountWithid(matched, modified, upserted, upsertId int) error {
assert.Equal(&m.ErrorFeature, matched, m.updateResult.MatchedCount)
assert.Equal(&m.ErrorFeature, modified, m.updateResult.ModifiedCount)
assert.Equal(&m.ErrorFeature, upserted, m.updateResult.UpsertedCount)
assert.EqualValues(&m.ErrorFeature, upsertId, m.updateResult.UpsertedID)
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) modifiedCount(matched, modified, upserted int) error {
assert.Equal(&m.ErrorFeature, matched, m.updateResult.MatchedCount)
assert.Equal(&m.ErrorFeature, modified, m.updateResult.ModifiedCount)
assert.Equal(&m.ErrorFeature, upserted, m.updateResult.UpsertedCount)
assert.Empty(&m.ErrorFeature, m.updateResult.UpsertedID)
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) deletedRecords(deleted int) error {
assert.Equal(&m.ErrorFeature, deleted, m.deleteResult.DeletedCount)
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) insertRecords(recordsJson *godog.DocString) error {
records := make([]dataModel, 0)
err := json.Unmarshal([]byte(recordsJson.Content), &records)
if err != nil {
return err
}
testRecords := []interface{}{records[0], records[1]}
m.insertResult, err = m.testClient.C(m.collection).InsertMany(context.Background(), testRecords)
if err != nil {
return err
}
return err
}
func (m *MongoV2Component) insertedRecords(recordsJson *godog.DocString) error {
expected := make([]int32, 0)
actual := make([]int32, 0)
err := json.Unmarshal([]byte(recordsJson.Content), &expected)
if err != nil {
return err
}
for _, element := range m.insertResult.InsertedIds {
actual = append(actual, element.(int32))
}
assert.ElementsMatch(&m.ErrorFeature, expected, actual)
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) testErrorItrAll() error {
badResult := 1
err := m.find.Iter().All(context.Background(), &badResult)
assert.True(&m.ErrorFeature, mongoDriver.IsServerErr(err))
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) testFindErrorItrAll() error {
badResult := 1
err := m.find.IterAll(context.Background(), &badResult)
assert.True(&m.ErrorFeature, mongoDriver.IsServerErr(err))
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) testFindOneError() error {
var result dataModel
err := m.find.One(context.Background(), &result)
assert.True(&m.ErrorFeature, mongoDriver.IsErrNoDocumentFound(err))
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) mustUpdateId(id int, recordAsString *godog.DocString) error {
record := new(dataModel)
err := json.Unmarshal([]byte(recordAsString.Content), &record)
if err != nil {
return err
}
update := bson.M{"$set": record}
m.updateResult, m.mustErrorResult = m.testClient.C(m.collection).Must().UpdateById(context.Background(), id, update)
return nil
}
func (m *MongoV2Component) mustUpdateRecord(id int, recordAsString *godog.DocString) error {
record := new(dataModel)
err := json.Unmarshal([]byte(recordAsString.Content), &record)
if err != nil {
return err
}
idQuery := bson.M{"_id": id}
update := bson.M{"$set": record}
m.updateResult, m.mustErrorResult = m.testClient.C(m.collection).Must().Update(context.Background(), idQuery, update)
return nil
}
func (m *MongoV2Component) testRecieveErrNoDocumentFoundError() error {
assert.True(&m.ErrorFeature, mongoDriver.IsErrNoDocumentFound(m.mustErrorResult))
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) testMustDidNotReturnError() error {
assert.NoError(&m.ErrorFeature, m.mustErrorResult)
return m.ErrorFeature.StepError()
}
func (m *MongoV2Component) mustDeleteRecordById(id int) error {
m.deleteResult, m.mustErrorResult = m.testClient.C(m.collection).Must().DeleteById(context.Background(), id)
return nil
}
func (m *MongoV2Component) mustDeleteRecord(id int) error {
idQuery := bson.D{{Key: "_id", Value: id}}
m.deleteResult, m.mustErrorResult = m.testClient.C(m.collection).Must().Delete(context.Background(), idQuery)
return nil
}
func (m *MongoV2Component) mustDeleteRecordsByName(name string) error {
selector := bson.D{{Key: "name", Value: primitive.Regex{Pattern: ".*" + name + ".*"}}}
m.deleteResult, m.mustErrorResult = m.testClient.C(m.collection).Must().DeleteMany(context.Background(), selector)
return nil
} | testcomponent/steps.go | 0.541651 | 0.404155 | steps.go | starcoder |
package main
//returns true if white pawn can attack the specified square
func whitePawnAttack(sourceRow int, sourceCol int, targetRow int, targetCol int) bool {
if sourceRow-1 == targetRow && sourceCol-1 == targetCol { //capture top left
return true
} else if sourceRow-1 == targetRow && sourceCol+1 == targetCol { //capture top right
return true
}
return false
}
func blackPawnAttack(sourceRow int, sourceCol int, targetRow int, targetCol int) bool {
if sourceRow+1 == targetRow && sourceCol-1 == targetCol { //capture bottom left
return true
} else if sourceRow+1 == targetRow && sourceCol+1 == targetCol { //capture bottom right
return true
}
return false
}
func bishopAttack(sourceRow int, sourceCol int, targetRow int, targetCol int) bool {
for i, j := sourceRow-1, sourceCol-1; i >= targetRow; i, j = i-1, j-1 { //top left diagonal
if i >= 0 && j >= 0 && ChessBoard[i][j] == "--" {
if i == targetRow && j == targetCol {
return true
}
} else { //encountered a piece on the diagonal
if i == targetRow && j == targetCol { //the piece could be our actual target
return true
}
break
}
}
for i, j := sourceRow-1, sourceCol+1; i >= targetRow; i, j = i-1, j+1 { //top right diagonal
if i >= 0 && j <= 7 && ChessBoard[i][j] == "--" {
if i == targetRow && j == targetCol {
return true
}
} else { //encountered a piece on the diagonal
if i == targetRow && j == targetCol { //the piece could be our actual target
return true
}
break
}
}
for i, j := sourceRow+1, sourceCol-1; i <= targetRow; i, j = i+1, j-1 { //bottom left diagonal
if i <= 7 && j >= 0 && ChessBoard[i][j] == "--" {
if i == targetRow && j == targetCol {
return true
}
} else { //encountered a piece on the diagonal
if i == targetRow && j == targetCol { //the piece could be our actual target
return true
}
break
}
}
for i, j := sourceRow+1, sourceCol+1; i <= targetRow; i, j = i+1, j+1 { //bottom right diagonal
if i <= 7 && j <= 7 && ChessBoard[i][j] == "--" {
if i == targetRow && j == targetCol {
return true
}
} else { //encountered a piece on the diagonal
if i == targetRow && j == targetCol { //the piece could be our actual target
return true
}
break
}
}
return false
}
func knightAttack(sourceRow int, sourceCol int, targetRow int, targetCol int) bool {
if (targetRow-sourceRow == 2 && targetCol-sourceCol == 1) || (targetRow-sourceRow == -2 && targetCol-sourceCol == -1) {
return true
} else if (targetRow-sourceRow == 2 && targetCol-sourceCol == -1) || (targetRow-sourceRow == -2 && targetCol-sourceCol == 1) {
return true
} else if (targetRow-sourceRow == 1 && targetRow-sourceRow == 2) || (targetCol-sourceCol == 1 && targetCol-sourceCol == -2) {
return true
} else if (targetRow-sourceRow == -1 && targetRow-sourceRow == 2) || (targetCol-sourceCol == -1 && targetCol-sourceCol == -2) {
return true
}
return false
}
//rook + bishop movments
func queenAttack(sourceRow int, sourceCol int, targetRow int, targetCol int) bool {
//bishop movements
for i, j := sourceRow-1, sourceCol-1; i >= targetRow; i, j = i-1, j-1 { //top left diagonal
if i >= 0 && j >= 0 && ChessBoard[i][j] == "--" {
if i == targetRow && j == targetCol {
return true
}
} else { //encountered a piece on the diagonal
if i == targetRow && j == targetCol { //the piece could be our actual target
return true
}
break
}
}
for i, j := sourceRow-1, sourceCol+1; i >= targetRow; i, j = i-1, j+1 { //top right diagonal
if i >= 0 && j <= 7 && ChessBoard[i][j] == "--" {
if i == targetRow && j == targetCol {
return true
}
} else { //encountered a piece on the diagonal
if i == targetRow && j == targetCol { //the piece could be our actual target
return true
}
break
}
}
for i, j := sourceRow+1, sourceCol-1; i <= targetRow; i, j = i+1, j-1 { //bottom left diagonal
if i <= 7 && j >= 0 && ChessBoard[i][j] == "--" {
if i == targetRow && j == targetCol {
return true
}
} else { //encountered a piece on the diagonal
if i == targetRow && j == targetCol { //the piece could be our actual target
return true
}
break
}
}
for i, j := sourceRow+1, sourceCol+1; i <= targetRow; i, j = i+1, j+1 { //bottom right diagonal
if i <= 7 && j <= 7 && ChessBoard[i][j] == "--" {
if i == targetRow && j == targetCol {
return true
}
} else { //encountered a piece on the diagonal
if i == targetRow && j == targetCol { //the piece could be our actual target
return true
}
break
}
}
//rook movements
for i := sourceRow + 1; i <= targetRow; i++ { //up
if i <= 7 && ChessBoard[i][sourceCol] == "--" {
if i == targetRow && sourceCol == targetCol {
return true
}
} else {
if i == targetRow && sourceCol == targetCol {
return true
}
break
}
}
for i := sourceRow - 1; i >= targetRow; i-- { //down
if i >= 0 && ChessBoard[i][sourceCol] == "--" {
if i == targetRow && sourceCol == targetCol {
return true
}
} else {
if i == targetRow && sourceCol == targetCol {
return true
}
break
}
}
for i := sourceCol + 1; i <= targetCol; i++ { //right
if i <= 7 && ChessBoard[sourceRow][i] == "--" {
if i == targetCol && sourceRow == targetRow {
return true
}
} else {
if i == targetCol && sourceRow == targetRow {
return true
}
break
}
}
for i := sourceCol - 1; i >= targetCol; i-- { //left
if i >= 0 && ChessBoard[sourceRow][i] == "--" {
if i == targetCol && sourceRow == targetRow {
return true
}
} else {
if i == targetCol && sourceRow == targetRow {
return true
}
break
}
}
return false
}
func rookAttack(sourceRow int, sourceCol int, targetRow int, targetCol int) bool {
for i := sourceRow + 1; i <= targetRow; i++ { //up
if i <= 7 && ChessBoard[i][sourceCol] == "--" {
if i == targetRow && sourceCol == targetCol {
return true
}
} else {
if i == targetRow && sourceCol == targetCol {
return true
}
break
}
}
for i := sourceRow - 1; i >= targetRow; i-- { //down
if i >= 0 && ChessBoard[i][sourceCol] == "--" {
if i == targetRow && sourceCol == targetCol {
return true
}
} else {
if i == targetRow && sourceCol == targetCol {
return true
}
break
}
}
for i := sourceCol + 1; i <= targetCol; i++ { //right
if i <= 7 && ChessBoard[sourceRow][i] == "--" {
if i == targetCol && sourceRow == targetRow {
return true
}
} else {
if i == targetCol && sourceRow == targetRow {
return true
}
break
}
}
for i := sourceCol - 1; i >= targetCol; i-- { //left
if i >= 0 && ChessBoard[sourceRow][i] == "--" {
if i == targetCol && sourceRow == targetRow {
return true
}
} else {
if i == targetCol && sourceRow == targetRow {
return true
}
break
}
}
return false
}
func kingAttack(sourceRow int, sourceCol int, targetRow int, targetCol int) bool {
if (sourceRow-targetRow == 1 || sourceRow-targetRow == -1) && (sourceCol-targetCol == -1 || sourceCol-targetCol == 1) { // four diagonals
return true
} else if (sourceRow-targetRow == 1 || sourceRow-targetRow == -1) && sourceCol == targetCol { //up or down
return true
} else if (sourceCol-targetCol == 1 || sourceCol-targetCol == -1) && sourceRow == targetRow { //left or right
return true
}
return false
} | attack.go | 0.676513 | 0.478346 | attack.go | starcoder |
package ast
// Unify returns a set of variables that will be unified when the equality expression defined by
// terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already
// unified.
func Unify(safe VarSet, a *Term, b *Term) VarSet {
u := &unifier{
safe: safe,
unified: VarSet{},
unknown: map[Var]VarSet{},
}
u.unify(a, b)
return u.unified
}
type unifier struct {
safe VarSet
unified VarSet
unknown map[Var]VarSet
}
func (u *unifier) isSafe(x Var) bool {
return u.safe.Contains(x) || u.unified.Contains(x)
}
func (u *unifier) unify(a *Term, b *Term) {
switch a := a.Value.(type) {
case Var:
switch b := b.Value.(type) {
case Var:
if u.isSafe(b) {
u.markSafe(a)
} else if u.isSafe(a) {
u.markSafe(b)
} else {
u.markUnknown(a, b)
u.markUnknown(b, a)
}
case Array, Object:
u.unifyAll(a, b)
case Ref:
if u.isSafe(b[0].Value.(Var)) {
u.markSafe(a)
}
default:
u.markSafe(a)
}
case Ref:
if u.isSafe(a[0].Value.(Var)) {
switch b := b.Value.(type) {
case Var:
u.markSafe(b)
case Array, Object:
u.markAllSafe(b)
}
}
case *ArrayComprehension:
switch b := b.Value.(type) {
case Var:
u.markSafe(b)
case Array:
u.markAllSafe(b)
}
case *ObjectComprehension:
switch b := b.Value.(type) {
case Var:
u.markSafe(b)
case Object:
u.markAllSafe(b)
}
case *SetComprehension:
switch b := b.Value.(type) {
case Var:
u.markSafe(b)
}
case Array:
switch b := b.Value.(type) {
case Var:
u.unifyAll(b, a)
case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension:
u.markAllSafe(a)
case Array:
if len(a) == len(b) {
for i := range a {
u.unify(a[i], b[i])
}
}
}
case Object:
switch b := b.Value.(type) {
case Var:
u.unifyAll(b, a)
case Ref:
u.markAllSafe(a)
case Object:
if a.Len() == b.Len() {
a.Iter(func(k, v *Term) error {
if v2 := b.Get(k); v2 != nil {
u.unify(v, v2)
}
return nil
})
}
}
default:
switch b := b.Value.(type) {
case Var:
u.markSafe(b)
}
}
}
func (u *unifier) markAllSafe(x Value) {
vis := u.varVisitor()
vis.Walk(x)
for v := range vis.Vars() {
u.markSafe(v)
}
}
func (u *unifier) markSafe(x Var) {
u.unified.Add(x)
// Add dependencies of 'x' to safe set
vs := u.unknown[x]
delete(u.unknown, x)
for v := range vs {
u.markSafe(v)
}
// Add dependants of 'x' to safe set if they have no more
// dependencies.
for v, deps := range u.unknown {
if deps.Contains(x) {
delete(deps, x)
if len(deps) == 0 {
u.markSafe(v)
}
}
}
}
func (u *unifier) markUnknown(a, b Var) {
if _, ok := u.unknown[a]; !ok {
u.unknown[a] = NewVarSet()
}
u.unknown[a].Add(b)
}
func (u *unifier) unifyAll(a Var, b Value) {
if u.isSafe(a) {
u.markAllSafe(b)
} else {
vis := u.varVisitor()
vis.Walk(b)
unsafe := vis.Vars().Diff(u.safe).Diff(u.unified)
if len(unsafe) == 0 {
u.markSafe(a)
} else {
for v := range unsafe {
u.markUnknown(a, v)
}
}
}
}
func (u *unifier) varVisitor() *VarVisitor {
return NewVarVisitor().WithParams(VarVisitorParams{
SkipRefHead: true,
SkipObjectKeys: true,
SkipClosures: true,
})
} | ast/unify.go | 0.648466 | 0.451871 | unify.go | starcoder |
package de
import "github.com/ContextLogic/cldr"
var calendar = cldr.Calendar{
Formats: cldr.CalendarFormats{
Date: cldr.CalendarDateFormat{Full: "EEEE, d. MMMM y", Long: "d. MMMM y", Medium: "dd.MM.y", Short: "dd.MM.yy"},
Time: cldr.CalendarDateFormat{Full: "HH:mm:ss zzzz", Long: "HH:mm:ss z", Medium: "HH:mm:ss", Short: "HH:mm"},
DateTime: cldr.CalendarDateFormat{Full: "{1} 'um' {0}", Long: "{1} 'um' {0}", Medium: "{1}, {0}", Short: "{1}, {0}"},
},
FormatNames: cldr.CalendarFormatNames{
Months: cldr.CalendarMonthFormatNames{
Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "Jan", Feb: "Feb", Mar: "Mär", Apr: "Apr", May: "Mai", Jun: "Jun", Jul: "Jul", Aug: "Aug", Sep: "Sep", Oct: "Okt", Nov: "Nov", Dec: "Dez"},
Narrow: cldr.CalendarMonthFormatNameValue{Jan: "J", Feb: "F", Mar: "M", Apr: "A", May: "M", Jun: "J", Jul: "J", Aug: "A", Sep: "S", Oct: "O", Nov: "N", Dec: "D"},
Short: cldr.CalendarMonthFormatNameValue{},
Wide: cldr.CalendarMonthFormatNameValue{Jan: "Januar", Feb: "Februar", Mar: "März", Apr: "April", May: "Mai", Jun: "Juni", Jul: "Juli", Aug: "August", Sep: "September", Oct: "Oktober", Nov: "November", Dec: "Dezember"},
},
Days: cldr.CalendarDayFormatNames{
Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "So", Mon: "Mo", Tue: "Di", Wed: "Mi", Thu: "Do", Fri: "Fr", Sat: "Sa"},
Narrow: cldr.CalendarDayFormatNameValue{Sun: "S", Mon: "M", Tue: "D", Wed: "M", Thu: "D", Fri: "F", Sat: "S"},
Short: cldr.CalendarDayFormatNameValue{Sun: "So.", Mon: "Mo.", Tue: "Di.", Wed: "Mi.", Thu: "Do.", Fri: "Fr.", Sat: "Sa."},
Wide: cldr.CalendarDayFormatNameValue{Sun: "Sonntag", Mon: "Montag", Tue: "Dienstag", Wed: "Mittwoch", Thu: "Donnerstag", Fri: "Freitag", Sat: "Samstag"},
},
Periods: cldr.CalendarPeriodFormatNames{
Abbreviated: cldr.CalendarPeriodFormatNameValue{},
Narrow: cldr.CalendarPeriodFormatNameValue{AM: "vm.", PM: "nm."},
Short: cldr.CalendarPeriodFormatNameValue{},
Wide: cldr.CalendarPeriodFormatNameValue{},
},
},
} | resources/locales/de/calendar.go | 0.506347 | 0.440349 | calendar.go | starcoder |
package assimp
//#cgo windows LDFLAGS: -lassimp
//#cgo linux freebsd darwin openbsd pkg-config: assimp
//#include <assimp/types.h>
import "C"
type Plane C.struct_aiPlane
type Ray C.struct_aiRay
type Color3 C.struct_aiColor3D
type Return C.enum_aiReturn
const (
Return_Success Return = C.aiReturn_SUCCESS
Return_Failure Return = C.aiReturn_FAILURE
Return_OutOfMemory Return = C.aiReturn_OUTOFMEMORY
)
type Origin C.enum_aiOrigin
const (
Origin_Set Origin = C.aiOrigin_SET
Origin_Cur Origin = C.aiOrigin_CUR
Origin_End Origin = C.aiOrigin_END
)
type DefaultLogStream C.enum_aiDefaultLogStream
const (
DefaultLogStream_File DefaultLogStream = C.aiDefaultLogStream_FILE
DefaultLogStream_StdOut DefaultLogStream = C.aiDefaultLogStream_STDOUT
DefaultLogStream_StdErr DefaultLogStream = C.aiDefaultLogStream_STDERR
DefaultLogStream_Debugger DefaultLogStream = C.aiDefaultLogStream_DEBUGGER
)
type MemoryInfo C.struct_aiMemoryInfo
func (mi *MemoryInfo) Textures() uint {
return uint(mi.textures)
}
func (mi *MemoryInfo) Materials() uint {
return uint(mi.materials)
}
func (mi *MemoryInfo) Meshes() uint {
return uint(mi.meshes)
}
func (mi *MemoryInfo) Nodes() uint {
return uint(mi.nodes)
}
func (mi *MemoryInfo) Animations() uint {
return uint(mi.animations)
}
func (mi *MemoryInfo) Cameras() uint {
return uint(mi.cameras)
}
func (mi *MemoryInfo) Lights() uint {
return uint(mi.lights)
}
func (mi *MemoryInfo) Total() uint {
return uint(mi.total)
}
type Vector2 C.struct_aiVector2D
type Vector3 C.struct_aiVector3D
type Color4 C.struct_aiColor4D
type Quaternion C.struct_aiQuaternion
type Matrix3x3 C.struct_aiMatrix3x3
type Matrix4x4 C.struct_aiMatrix4x4
func (v *Vector2) X() float32 {
return float32(v.x)
}
func (v *Vector2) Y() float32 {
return float32(v.y)
}
func (v *Vector2) Values() [2]float32 {
return [2]float32{float32(v.x), float32(v.y)}
}
func (v *Vector3) X() float32 {
return float32(v.x)
}
func (v *Vector3) Y() float32 {
return float32(v.y)
}
func (v *Vector3) Z() float32 {
return float32(v.z)
}
func (v *Vector3) Values() [3]float32 {
return [3]float32{float32(v.x), float32(v.y), float32(v.z)}
}
func (v *Quaternion) W() float32 {
return float32(v.x)
}
func (v *Quaternion) X() float32 {
return float32(v.x)
}
func (v *Quaternion) Y() float32 {
return float32(v.y)
}
func (v *Quaternion) Z() float32 {
return float32(v.z)
}
// order w,x,y,z
func (q *Quaternion) Values() [4]float32 {
return [4]float32{float32(q.w), float32(q.x), float32(q.y), float32(q.z)}
}
func (c *Color3) R() float32 {
return float32(c.r)
}
func (c *Color3) G() float32 {
return float32(c.g)
}
func (c *Color3) B() float32 {
return float32(c.b)
}
func (c *Color3) Values() [3]float32 {
return [3]float32{float32(c.r), float32(c.g), float32(c.b)}
}
func (c *Color4) R() float32 {
return float32(c.r)
}
func (c *Color4) G() float32 {
return float32(c.g)
}
func (c *Color4) B() float32 {
return float32(c.b)
}
func (c *Color4) A() float32 {
return float32(c.a)
}
func (c *Color4) Values() [4]float32 {
return [4]float32{float32(c.r), float32(c.g), float32(c.b), float32(c.a)}
}
func (m *Matrix3x3) Values() [3][3]float32 {
return [3][3]float32{
[3]float32{float32(m.a1), float32(m.a2), float32(m.a3)},
[3]float32{float32(m.b1), float32(m.b2), float32(m.b3)},
[3]float32{float32(m.c1), float32(m.c2), float32(m.c3)},
}
}
func (m *Matrix4x4) Values() [4][4]float32 {
return [4][4]float32{
[4]float32{float32(m.a1), float32(m.a2), float32(m.a3), float32(m.a4)},
[4]float32{float32(m.b1), float32(m.b2), float32(m.b3), float32(m.b4)},
[4]float32{float32(m.c1), float32(m.c2), float32(m.c3), float32(m.c4)},
[4]float32{float32(m.d1), float32(m.d2), float32(m.d3), float32(m.d4)},
}
} | Types.go | 0.687525 | 0.41941 | Types.go | starcoder |
package query
import (
"context"
"encoding/binary"
"io"
"github.com/gogo/protobuf/proto"
internal "github.com/influxdata/influxdb/query/internal"
)
// FloatPoint represents a point with a float64 value.
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
// See TestPoint_Fields in influxql/point_test.go for more details.
type FloatPoint struct {
Name string
Tags Tags
Time int64
Value float64
Aux []interface{}
// Total number of points that were combined into this point from an aggregate.
// If this is zero, the point is not the result of an aggregate function.
Aggregated uint32
Nil bool
}
func (v *FloatPoint) name() string { return v.Name }
func (v *FloatPoint) tags() Tags { return v.Tags }
func (v *FloatPoint) time() int64 { return v.Time }
func (v *FloatPoint) nil() bool { return v.Nil }
func (v *FloatPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *FloatPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *FloatPoint) Clone() *FloatPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
// CopyTo makes a deep copy into the point.
func (v *FloatPoint) CopyTo(other *FloatPoint) {
other.Name, other.Tags = v.Name, v.Tags
other.Time = v.Time
other.Value, other.Nil = v.Value, v.Nil
if v.Aux != nil {
if len(other.Aux) != len(v.Aux) {
other.Aux = make([]interface{}, len(v.Aux))
}
copy(other.Aux, v.Aux)
}
}
func encodeFloatPoint(p *FloatPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
Aggregated: proto.Uint32(p.Aggregated),
FloatValue: proto.Float64(p.Value),
}
}
func decodeFloatPoint(pb *internal.Point) *FloatPoint {
return &FloatPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Aggregated: pb.GetAggregated(),
Value: pb.GetFloatValue(),
}
}
// floatPoints represents a slice of points sortable by value.
type floatPoints []FloatPoint
func (a floatPoints) Len() int { return len(a) }
func (a floatPoints) Less(i, j int) bool {
if a[i].Time != a[j].Time {
return a[i].Time < a[j].Time
}
return a[i].Value < a[j].Value
}
func (a floatPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// floatPointsByValue represents a slice of points sortable by value.
type floatPointsByValue []FloatPoint
func (a floatPointsByValue) Len() int { return len(a) }
func (a floatPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a floatPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// floatPointsByTime represents a slice of points sortable by value.
type floatPointsByTime []FloatPoint
func (a floatPointsByTime) Len() int { return len(a) }
func (a floatPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a floatPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// floatPointByFunc represents a slice of points sortable by a function.
type floatPointsByFunc struct {
points []FloatPoint
cmp func(a, b *FloatPoint) bool
}
func (a *floatPointsByFunc) Len() int { return len(a.points) }
func (a *floatPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *floatPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *floatPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(FloatPoint))
}
func (a *floatPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func floatPointsSortBy(points []FloatPoint, cmp func(a, b *FloatPoint) bool) *floatPointsByFunc {
return &floatPointsByFunc{
points: points,
cmp: cmp,
}
}
// FloatPointEncoder encodes FloatPoint points to a writer.
type FloatPointEncoder struct {
w io.Writer
}
// NewFloatPointEncoder returns a new instance of FloatPointEncoder that writes to w.
func NewFloatPointEncoder(w io.Writer) *FloatPointEncoder {
return &FloatPointEncoder{w: w}
}
// EncodeFloatPoint marshals and writes p to the underlying writer.
func (enc *FloatPointEncoder) EncodeFloatPoint(p *FloatPoint) error {
// Marshal to bytes.
buf, err := proto.Marshal(encodeFloatPoint(p))
if err != nil {
return err
}
// Write the length.
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
// Write the encoded point.
if _, err := enc.w.Write(buf); err != nil {
return err
}
return nil
}
// FloatPointDecoder decodes FloatPoint points from a reader.
type FloatPointDecoder struct {
r io.Reader
stats IteratorStats
ctx context.Context
}
// NewFloatPointDecoder returns a new instance of FloatPointDecoder that reads from r.
func NewFloatPointDecoder(ctx context.Context, r io.Reader) *FloatPointDecoder {
return &FloatPointDecoder{r: r, ctx: ctx}
}
// Stats returns iterator stats embedded within the stream.
func (dec *FloatPointDecoder) Stats() IteratorStats { return dec.stats }
// DecodeFloatPoint reads from the underlying reader and unmarshals into p.
func (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error {
for {
// Read length.
var sz uint32
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
return err
}
// Read point data.
buf := make([]byte, sz)
if _, err := io.ReadFull(dec.r, buf); err != nil {
return err
}
// Unmarshal into point.
var pb internal.Point
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
// If the point contains stats then read stats and retry.
if pb.Stats != nil {
dec.stats = decodeIteratorStats(pb.Stats)
continue
}
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeFloatPoint(&pb)
return nil
}
}
// IntegerPoint represents a point with a int64 value.
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
// See TestPoint_Fields in influxql/point_test.go for more details.
type IntegerPoint struct {
Name string
Tags Tags
Time int64
Value int64
Aux []interface{}
// Total number of points that were combined into this point from an aggregate.
// If this is zero, the point is not the result of an aggregate function.
Aggregated uint32
Nil bool
}
func (v *IntegerPoint) name() string { return v.Name }
func (v *IntegerPoint) tags() Tags { return v.Tags }
func (v *IntegerPoint) time() int64 { return v.Time }
func (v *IntegerPoint) nil() bool { return v.Nil }
func (v *IntegerPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *IntegerPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *IntegerPoint) Clone() *IntegerPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
// CopyTo makes a deep copy into the point.
func (v *IntegerPoint) CopyTo(other *IntegerPoint) {
other.Name, other.Tags = v.Name, v.Tags
other.Time = v.Time
other.Value, other.Nil = v.Value, v.Nil
if v.Aux != nil {
if len(other.Aux) != len(v.Aux) {
other.Aux = make([]interface{}, len(v.Aux))
}
copy(other.Aux, v.Aux)
}
}
func encodeIntegerPoint(p *IntegerPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
Aggregated: proto.Uint32(p.Aggregated),
IntegerValue: proto.Int64(p.Value),
}
}
func decodeIntegerPoint(pb *internal.Point) *IntegerPoint {
return &IntegerPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Aggregated: pb.GetAggregated(),
Value: pb.GetIntegerValue(),
}
}
// integerPoints represents a slice of points sortable by value.
type integerPoints []IntegerPoint
func (a integerPoints) Len() int { return len(a) }
func (a integerPoints) Less(i, j int) bool {
if a[i].Time != a[j].Time {
return a[i].Time < a[j].Time
}
return a[i].Value < a[j].Value
}
func (a integerPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// integerPointsByValue represents a slice of points sortable by value.
type integerPointsByValue []IntegerPoint
func (a integerPointsByValue) Len() int { return len(a) }
func (a integerPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a integerPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// integerPointsByTime represents a slice of points sortable by value.
type integerPointsByTime []IntegerPoint
func (a integerPointsByTime) Len() int { return len(a) }
func (a integerPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a integerPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// integerPointByFunc represents a slice of points sortable by a function.
type integerPointsByFunc struct {
points []IntegerPoint
cmp func(a, b *IntegerPoint) bool
}
func (a *integerPointsByFunc) Len() int { return len(a.points) }
func (a *integerPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *integerPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *integerPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(IntegerPoint))
}
func (a *integerPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func integerPointsSortBy(points []IntegerPoint, cmp func(a, b *IntegerPoint) bool) *integerPointsByFunc {
return &integerPointsByFunc{
points: points,
cmp: cmp,
}
}
// IntegerPointEncoder encodes IntegerPoint points to a writer.
type IntegerPointEncoder struct {
w io.Writer
}
// NewIntegerPointEncoder returns a new instance of IntegerPointEncoder that writes to w.
func NewIntegerPointEncoder(w io.Writer) *IntegerPointEncoder {
return &IntegerPointEncoder{w: w}
}
// EncodeIntegerPoint marshals and writes p to the underlying writer.
func (enc *IntegerPointEncoder) EncodeIntegerPoint(p *IntegerPoint) error {
// Marshal to bytes.
buf, err := proto.Marshal(encodeIntegerPoint(p))
if err != nil {
return err
}
// Write the length.
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
// Write the encoded point.
if _, err := enc.w.Write(buf); err != nil {
return err
}
return nil
}
// IntegerPointDecoder decodes IntegerPoint points from a reader.
type IntegerPointDecoder struct {
r io.Reader
stats IteratorStats
ctx context.Context
}
// NewIntegerPointDecoder returns a new instance of IntegerPointDecoder that reads from r.
func NewIntegerPointDecoder(ctx context.Context, r io.Reader) *IntegerPointDecoder {
return &IntegerPointDecoder{r: r, ctx: ctx}
}
// Stats returns iterator stats embedded within the stream.
func (dec *IntegerPointDecoder) Stats() IteratorStats { return dec.stats }
// DecodeIntegerPoint reads from the underlying reader and unmarshals into p.
func (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error {
for {
// Read length.
var sz uint32
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
return err
}
// Read point data.
buf := make([]byte, sz)
if _, err := io.ReadFull(dec.r, buf); err != nil {
return err
}
// Unmarshal into point.
var pb internal.Point
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
// If the point contains stats then read stats and retry.
if pb.Stats != nil {
dec.stats = decodeIteratorStats(pb.Stats)
continue
}
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeIntegerPoint(&pb)
return nil
}
}
// UnsignedPoint represents a point with a uint64 value.
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
// See TestPoint_Fields in influxql/point_test.go for more details.
type UnsignedPoint struct {
Name string
Tags Tags
Time int64
Value uint64
Aux []interface{}
// Total number of points that were combined into this point from an aggregate.
// If this is zero, the point is not the result of an aggregate function.
Aggregated uint32
Nil bool
}
func (v *UnsignedPoint) name() string { return v.Name }
func (v *UnsignedPoint) tags() Tags { return v.Tags }
func (v *UnsignedPoint) time() int64 { return v.Time }
func (v *UnsignedPoint) nil() bool { return v.Nil }
func (v *UnsignedPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *UnsignedPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *UnsignedPoint) Clone() *UnsignedPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
// CopyTo makes a deep copy into the point.
func (v *UnsignedPoint) CopyTo(other *UnsignedPoint) {
other.Name, other.Tags = v.Name, v.Tags
other.Time = v.Time
other.Value, other.Nil = v.Value, v.Nil
if v.Aux != nil {
if len(other.Aux) != len(v.Aux) {
other.Aux = make([]interface{}, len(v.Aux))
}
copy(other.Aux, v.Aux)
}
}
func encodeUnsignedPoint(p *UnsignedPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
Aggregated: proto.Uint32(p.Aggregated),
}
}
func decodeUnsignedPoint(pb *internal.Point) *UnsignedPoint {
return &UnsignedPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Aggregated: pb.GetAggregated(),
Value: pb.GetUnsignedValue(),
}
}
// unsignedPoints represents a slice of points sortable by value.
type unsignedPoints []UnsignedPoint
func (a unsignedPoints) Len() int { return len(a) }
func (a unsignedPoints) Less(i, j int) bool {
if a[i].Time != a[j].Time {
return a[i].Time < a[j].Time
}
return a[i].Value < a[j].Value
}
func (a unsignedPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// unsignedPointsByValue represents a slice of points sortable by value.
type unsignedPointsByValue []UnsignedPoint
func (a unsignedPointsByValue) Len() int { return len(a) }
func (a unsignedPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a unsignedPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// unsignedPointsByTime represents a slice of points sortable by value.
type unsignedPointsByTime []UnsignedPoint
func (a unsignedPointsByTime) Len() int { return len(a) }
func (a unsignedPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a unsignedPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// unsignedPointByFunc represents a slice of points sortable by a function.
type unsignedPointsByFunc struct {
points []UnsignedPoint
cmp func(a, b *UnsignedPoint) bool
}
func (a *unsignedPointsByFunc) Len() int { return len(a.points) }
func (a *unsignedPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *unsignedPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *unsignedPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(UnsignedPoint))
}
func (a *unsignedPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func unsignedPointsSortBy(points []UnsignedPoint, cmp func(a, b *UnsignedPoint) bool) *unsignedPointsByFunc {
return &unsignedPointsByFunc{
points: points,
cmp: cmp,
}
}
// UnsignedPointEncoder encodes UnsignedPoint points to a writer.
type UnsignedPointEncoder struct {
w io.Writer
}
// NewUnsignedPointEncoder returns a new instance of UnsignedPointEncoder that writes to w.
func NewUnsignedPointEncoder(w io.Writer) *UnsignedPointEncoder {
return &UnsignedPointEncoder{w: w}
}
// EncodeUnsignedPoint marshals and writes p to the underlying writer.
func (enc *UnsignedPointEncoder) EncodeUnsignedPoint(p *UnsignedPoint) error {
// Marshal to bytes.
buf, err := proto.Marshal(encodeUnsignedPoint(p))
if err != nil {
return err
}
// Write the length.
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
// Write the encoded point.
if _, err := enc.w.Write(buf); err != nil {
return err
}
return nil
}
// UnsignedPointDecoder decodes UnsignedPoint points from a reader.
type UnsignedPointDecoder struct {
r io.Reader
stats IteratorStats
ctx context.Context
}
// NewUnsignedPointDecoder returns a new instance of UnsignedPointDecoder that reads from r.
func NewUnsignedPointDecoder(ctx context.Context, r io.Reader) *UnsignedPointDecoder {
return &UnsignedPointDecoder{r: r, ctx: ctx}
}
// Stats returns iterator stats embedded within the stream.
func (dec *UnsignedPointDecoder) Stats() IteratorStats { return dec.stats }
// DecodeUnsignedPoint reads from the underlying reader and unmarshals into p.
func (dec *UnsignedPointDecoder) DecodeUnsignedPoint(p *UnsignedPoint) error {
for {
// Read length.
var sz uint32
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
return err
}
// Read point data.
buf := make([]byte, sz)
if _, err := io.ReadFull(dec.r, buf); err != nil {
return err
}
// Unmarshal into point.
var pb internal.Point
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
// If the point contains stats then read stats and retry.
if pb.Stats != nil {
dec.stats = decodeIteratorStats(pb.Stats)
continue
}
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeUnsignedPoint(&pb)
return nil
}
}
// StringPoint represents a point with a string value.
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
// See TestPoint_Fields in influxql/point_test.go for more details.
type StringPoint struct {
Name string
Tags Tags
Time int64
Value string
Aux []interface{}
// Total number of points that were combined into this point from an aggregate.
// If this is zero, the point is not the result of an aggregate function.
Aggregated uint32
Nil bool
}
func (v *StringPoint) name() string { return v.Name }
func (v *StringPoint) tags() Tags { return v.Tags }
func (v *StringPoint) time() int64 { return v.Time }
func (v *StringPoint) nil() bool { return v.Nil }
func (v *StringPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *StringPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *StringPoint) Clone() *StringPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
// CopyTo makes a deep copy into the point.
func (v *StringPoint) CopyTo(other *StringPoint) {
other.Name, other.Tags = v.Name, v.Tags
other.Time = v.Time
other.Value, other.Nil = v.Value, v.Nil
if v.Aux != nil {
if len(other.Aux) != len(v.Aux) {
other.Aux = make([]interface{}, len(v.Aux))
}
copy(other.Aux, v.Aux)
}
}
func encodeStringPoint(p *StringPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
Aggregated: proto.Uint32(p.Aggregated),
StringValue: proto.String(p.Value),
}
}
func decodeStringPoint(pb *internal.Point) *StringPoint {
return &StringPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Aggregated: pb.GetAggregated(),
Value: pb.GetStringValue(),
}
}
// stringPoints represents a slice of points sortable by value.
type stringPoints []StringPoint
func (a stringPoints) Len() int { return len(a) }
func (a stringPoints) Less(i, j int) bool {
if a[i].Time != a[j].Time {
return a[i].Time < a[j].Time
}
return a[i].Value < a[j].Value
}
func (a stringPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// stringPointsByValue represents a slice of points sortable by value.
type stringPointsByValue []StringPoint
func (a stringPointsByValue) Len() int { return len(a) }
func (a stringPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a stringPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// stringPointsByTime represents a slice of points sortable by value.
type stringPointsByTime []StringPoint
func (a stringPointsByTime) Len() int { return len(a) }
func (a stringPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a stringPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// stringPointByFunc represents a slice of points sortable by a function.
type stringPointsByFunc struct {
points []StringPoint
cmp func(a, b *StringPoint) bool
}
func (a *stringPointsByFunc) Len() int { return len(a.points) }
func (a *stringPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *stringPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *stringPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(StringPoint))
}
func (a *stringPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func stringPointsSortBy(points []StringPoint, cmp func(a, b *StringPoint) bool) *stringPointsByFunc {
return &stringPointsByFunc{
points: points,
cmp: cmp,
}
}
// StringPointEncoder encodes StringPoint points to a writer.
type StringPointEncoder struct {
w io.Writer
}
// NewStringPointEncoder returns a new instance of StringPointEncoder that writes to w.
func NewStringPointEncoder(w io.Writer) *StringPointEncoder {
return &StringPointEncoder{w: w}
}
// EncodeStringPoint marshals and writes p to the underlying writer.
func (enc *StringPointEncoder) EncodeStringPoint(p *StringPoint) error {
// Marshal to bytes.
buf, err := proto.Marshal(encodeStringPoint(p))
if err != nil {
return err
}
// Write the length.
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
// Write the encoded point.
if _, err := enc.w.Write(buf); err != nil {
return err
}
return nil
}
// StringPointDecoder decodes StringPoint points from a reader.
type StringPointDecoder struct {
r io.Reader
stats IteratorStats
ctx context.Context
}
// NewStringPointDecoder returns a new instance of StringPointDecoder that reads from r.
func NewStringPointDecoder(ctx context.Context, r io.Reader) *StringPointDecoder {
return &StringPointDecoder{r: r, ctx: ctx}
}
// Stats returns iterator stats embedded within the stream.
func (dec *StringPointDecoder) Stats() IteratorStats { return dec.stats }
// DecodeStringPoint reads from the underlying reader and unmarshals into p.
func (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error {
for {
// Read length.
var sz uint32
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
return err
}
// Read point data.
buf := make([]byte, sz)
if _, err := io.ReadFull(dec.r, buf); err != nil {
return err
}
// Unmarshal into point.
var pb internal.Point
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
// If the point contains stats then read stats and retry.
if pb.Stats != nil {
dec.stats = decodeIteratorStats(pb.Stats)
continue
}
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeStringPoint(&pb)
return nil
}
}
// BooleanPoint represents a point with a bool value.
// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.
// See TestPoint_Fields in influxql/point_test.go for more details.
type BooleanPoint struct {
Name string
Tags Tags
Time int64
Value bool
Aux []interface{}
// Total number of points that were combined into this point from an aggregate.
// If this is zero, the point is not the result of an aggregate function.
Aggregated uint32
Nil bool
}
func (v *BooleanPoint) name() string { return v.Name }
func (v *BooleanPoint) tags() Tags { return v.Tags }
func (v *BooleanPoint) time() int64 { return v.Time }
func (v *BooleanPoint) nil() bool { return v.Nil }
func (v *BooleanPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *BooleanPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *BooleanPoint) Clone() *BooleanPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
// CopyTo makes a deep copy into the point.
func (v *BooleanPoint) CopyTo(other *BooleanPoint) {
other.Name, other.Tags = v.Name, v.Tags
other.Time = v.Time
other.Value, other.Nil = v.Value, v.Nil
if v.Aux != nil {
if len(other.Aux) != len(v.Aux) {
other.Aux = make([]interface{}, len(v.Aux))
}
copy(other.Aux, v.Aux)
}
}
func encodeBooleanPoint(p *BooleanPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
Aggregated: proto.Uint32(p.Aggregated),
BooleanValue: proto.Bool(p.Value),
}
}
func decodeBooleanPoint(pb *internal.Point) *BooleanPoint {
return &BooleanPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Aggregated: pb.GetAggregated(),
Value: pb.GetBooleanValue(),
}
}
// booleanPoints represents a slice of points sortable by value.
type booleanPoints []BooleanPoint
func (a booleanPoints) Len() int { return len(a) }
func (a booleanPoints) Less(i, j int) bool {
if a[i].Time != a[j].Time {
return a[i].Time < a[j].Time
}
return !a[i].Value
}
func (a booleanPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// booleanPointsByValue represents a slice of points sortable by value.
type booleanPointsByValue []BooleanPoint
func (a booleanPointsByValue) Len() int { return len(a) }
func (a booleanPointsByValue) Less(i, j int) bool { return !a[i].Value }
func (a booleanPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// booleanPointsByTime represents a slice of points sortable by value.
type booleanPointsByTime []BooleanPoint
func (a booleanPointsByTime) Len() int { return len(a) }
func (a booleanPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a booleanPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// booleanPointByFunc represents a slice of points sortable by a function.
type booleanPointsByFunc struct {
points []BooleanPoint
cmp func(a, b *BooleanPoint) bool
}
func (a *booleanPointsByFunc) Len() int { return len(a.points) }
func (a *booleanPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *booleanPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *booleanPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(BooleanPoint))
}
func (a *booleanPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func booleanPointsSortBy(points []BooleanPoint, cmp func(a, b *BooleanPoint) bool) *booleanPointsByFunc {
return &booleanPointsByFunc{
points: points,
cmp: cmp,
}
}
// BooleanPointEncoder encodes BooleanPoint points to a writer.
type BooleanPointEncoder struct {
w io.Writer
}
// NewBooleanPointEncoder returns a new instance of BooleanPointEncoder that writes to w.
func NewBooleanPointEncoder(w io.Writer) *BooleanPointEncoder {
return &BooleanPointEncoder{w: w}
}
// EncodeBooleanPoint marshals and writes p to the underlying writer.
func (enc *BooleanPointEncoder) EncodeBooleanPoint(p *BooleanPoint) error {
// Marshal to bytes.
buf, err := proto.Marshal(encodeBooleanPoint(p))
if err != nil {
return err
}
// Write the length.
if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {
return err
}
// Write the encoded point.
if _, err := enc.w.Write(buf); err != nil {
return err
}
return nil
}
// BooleanPointDecoder decodes BooleanPoint points from a reader.
type BooleanPointDecoder struct {
r io.Reader
stats IteratorStats
ctx context.Context
}
// NewBooleanPointDecoder returns a new instance of BooleanPointDecoder that reads from r.
func NewBooleanPointDecoder(ctx context.Context, r io.Reader) *BooleanPointDecoder {
return &BooleanPointDecoder{r: r, ctx: ctx}
}
// Stats returns iterator stats embedded within the stream.
func (dec *BooleanPointDecoder) Stats() IteratorStats { return dec.stats }
// DecodeBooleanPoint reads from the underlying reader and unmarshals into p.
func (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error {
for {
// Read length.
var sz uint32
if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {
return err
}
// Read point data.
buf := make([]byte, sz)
if _, err := io.ReadFull(dec.r, buf); err != nil {
return err
}
// Unmarshal into point.
var pb internal.Point
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
// If the point contains stats then read stats and retry.
if pb.Stats != nil {
dec.stats = decodeIteratorStats(pb.Stats)
continue
}
if len(pb.Trace) > 0 {
var err error
err = decodeIteratorTrace(dec.ctx, pb.Trace)
if err != nil {
return err
}
continue
}
// Decode into point object.
*p = *decodeBooleanPoint(&pb)
return nil
}
} | vendor/github.com/influxdata/influxdb/query/point.gen.go | 0.819352 | 0.48182 | point.gen.go | starcoder |
package picker
import (
"math"
"math/rand"
"github.com/chronos-tachyon/roxy/lib/syncrand"
)
// Picker is a utility class for picking values from a list using weighted
// random selection. The weight is based on a user-defined score function,
// where low scores are exponentially more favorable than high scores.
type Picker struct {
values []interface{}
scores []float64
weights []float64
probs []float64
cprobs []float64
}
// Make creates a Picker with the given values and score function.
func Make(values []interface{}, scoreFn func(interface{}) float64) Picker {
p := Picker{
values: make([]interface{}, len(values)),
scores: make([]float64, len(values)),
weights: make([]float64, len(values)),
probs: make([]float64, len(values)),
cprobs: make([]float64, len(values)),
}
for index, value := range values {
score := scoreFn(value)
weight := math.Exp2(-score)
p.values[index] = value
p.scores[index] = score
p.weights[index] = weight
}
p.recomputeProbabilities()
return p
}
// Len returns the number of values in this Picker.
func (p Picker) Len() uint {
return uint(len(p.values))
}
// Get returns the index'th value in this Picker, using 0-based indexing.
func (p Picker) Get(index uint) interface{} {
return p.values[index]
}
// Disable semi-permanently alters the probability of selecting the index'th
// value to 0.
func (p Picker) Disable(index uint) {
p.weights[index] = 0.0
p.recomputeProbabilities()
}
// Enable undoes the effect of a previous call to Disable.
func (p Picker) Enable(index uint) {
p.weights[index] = math.Exp2(-p.scores[index])
p.recomputeProbabilities()
}
// Worst deterministically picks the index of the value with the worst score.
// Disabled values are ignored.
func (p Picker) Worst() (uint, bool) {
var (
hasWorst bool
worstIndex uint
worstProb float64
)
length := p.Len()
for index := uint(0); index < length; index++ {
prob := p.probs[index]
if prob == 0.0 {
continue
}
if hasWorst && prob >= worstProb {
continue
}
hasWorst = true
worstIndex = index
worstProb = prob
}
return worstIndex, hasWorst
}
// Pick randomly selects the index of a value.
func (p Picker) Pick(rng *rand.Rand) uint {
if rng == nil {
rng = syncrand.Global()
}
k := rng.Float64()
length := p.Len()
index := uint(0)
for {
if k < p.cprobs[index] {
break
}
index++
if index == length {
index--
break
}
}
return index
}
// Divvy divides and distributes the given quantity among the values according
// to their individual probabilities.
func (p Picker) Divvy(amount float64) []float64 {
length := p.Len()
out := make([]float64, length)
for index := uint(0); index < length; index++ {
out[index] = amount * p.probs[index]
}
return out
}
func (p Picker) recomputeProbabilities() {
length := p.Len()
var sumOfWeights float64
for index := uint(0); index < length; index++ {
sumOfWeights += p.weights[index]
}
if sumOfWeights == 0.0 {
sumOfWeights = 1.0
}
norm := 1.0 / sumOfWeights
var cumulativeProbability float64
for index := uint(0); index < length; index++ {
weight := p.weights[index]
prob := weight * norm
cumulativeProbability += prob
p.probs[index] = prob
p.cprobs[index] = cumulativeProbability
}
} | internal/picker/picker.go | 0.814459 | 0.611817 | picker.go | starcoder |
package wire
import (
"github.com/soteria-dag/soterd/chaincfg/chainhash"
"io"
)
const (
// maxParents is the maximum number of parents that a ParentSubHeader can contain
maxParents = 8
// ParentSize is the size in bytes of a parent
ParentSize = chainhash.HashSize + 32
ParentVersionSize = 4
ParentCountSize = 4
// MaxParentSubHeaderPayload is the maximum number of bytes a parent header can be.
// Version 4 bytes + Size 4 bytes + max size of Parents
MaxParentSubHeaderPayload = ParentVersionSize + ParentCountSize + (ParentSize * maxParents)
)
// ParentSubHeader represent a block parent sub-header
type ParentSubHeader struct {
// Version of the parents sub header.
Version int32
// Size of Parents array (number of Parent associations)
// (Used for serialization/deserialization of ParentSubHeader)
Size int32
// Array of meta data for previous block tips in the block DAG.
Parents []*Parent
}
// Parent represents a parent of a block
type Parent struct {
// Hash of parent block in the block dag.
Hash chainhash.Hash
// Metadata of this parent block. Currently a place-holder.
Data [32]byte
}
// ParentHashes returns the hashes of the block's parents
func (h *ParentSubHeader) ParentHashes() []chainhash.Hash {
hashes := make([]chainhash.Hash, len(h.Parents), len(h.Parents))
for i := 0; i < len(h.Parents); i++ {
hashes[i] = h.Parents[i].Hash
}
return hashes
}
// IsParent returns true if the given hash is a parent of the this block
func (h *ParentSubHeader) IsParent(hash *chainhash.Hash) bool {
for _, parent := range h.Parents {
if hash.IsEqual(&parent.Hash) {
return true
}
}
return false
}
// Deserialize decodes a parent sub-header from r into the receiver using a format
// that is suitable for long-term storage (such as a database)
func (h *ParentSubHeader) Deserialize(r io.Reader) error {
// At time of writing the encoding for protocol version 0 is the same between wire and long-term storage.
return readParentSubHeader(r, 0, h)
}
// Serialize encodes a block header from r into the receiver using a format
// that is suitable for long-term storage (such as a database)
func (h *ParentSubHeader) Serialize(w io.Writer) error {
// At time of writing the encoding for protocol version 0 is the same between wire and long-term storage.
return writeParentSubHeader(w, 0, h)
}
// readParentSubHeader reads a block's parent sub-header from r
func readParentSubHeader(r io.Reader, pver uint32, psh *ParentSubHeader) error {
// Here we list type elements in the same order that they appear in the encoded data
// Read version info
err := readElement(r, &psh.Version)
if err != nil {
return err
}
// Read the size of Parents
err = readElement(r, &psh.Size)
if err != nil {
return err
}
// readElement and writeElement deals mostly with primitive types, so
// we'll build needed complex types for fields that use them, then populate them in psh.
// At time of writing this is just the Parents field.
p := []*Parent{}
// Attempt to read psh.Size Parent data-structures from r
for i := int32(1); i <= psh.Size; i++ {
pi := Parent{}
err = readElements(r, &pi.Hash, &pi.Data)
if err != nil {
return err
}
p = append(p, &pi)
}
psh.Parents = p
return nil
}
// writeParentSubHeader writes a block's parent sub-header to w
func writeParentSubHeader(w io.Writer, pver uint32, psh *ParentSubHeader) error {
// Write version info
err := writeElement(w, psh.Version)
if err != nil {
return err
}
// Write size of Parents
currentSize := int32(len(psh.Parents))
var size int32
if psh.Size == currentSize {
size = psh.Size
} else {
// Something didn't update psh.Size when number of Parents changed. In this case the size
// of the Parents slice is used.
size = currentSize
}
err = writeElement(w, size)
if err != nil {
return err
}
for _, pi := range psh.Parents {
err = writeElements(w, &pi.Hash, &pi.Data)
if err != nil {
return err
}
}
return nil
} | wire/parentsubheader.go | 0.684053 | 0.410756 | parentsubheader.go | starcoder |
package common
import (
"bytes"
"database/sql/driver"
"encoding/hex"
"fmt"
"math/big"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/harmony-one/harmony/internal/bech32"
"github.com/harmony-one/harmony/internal/utils"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
)
// Lengths of addresses in bytes.
const (
// AddressLength is the expected length of the address
AddressLength = 20
)
var (
emptyAddress = Address{}
)
// Address represents the 20 byte address of an Harmony account.
type Address [AddressLength]byte
// BytesToAddress returns Address with value b.
// If b is larger than len(h), b will be cropped from the left.
func BytesToAddress(b []byte) Address {
var a Address
a.SetBytes(b)
return a
}
// BigToAddress returns Address with byte values of b.
// If b is larger than len(h), b will be cropped from the left.
func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
// HexToAddress returns Address with byte values of s.
// If s is larger than len(h), s will be cropped from the left.
func HexToAddress(s string) Address { return BytesToAddress(utils.FromHex(s)) }
// IsBech32Address verifies whether a string can represent a valid bech32-encoded
// Harmony address or not.
func IsBech32Address(s string) bool {
hrp, bytes, err := bech32.DecodeAndConvert(s)
if err != nil || (hrp != "one" && hrp != "tone") || len(bytes) != AddressLength {
return false
}
return true
}
// IsEmpty gets whether the address contains all 0 bytes
func (a Address) IsEmpty() bool {
return bytes.Compare(a[:], emptyAddress[:]) == 0
}
// Bytes gets the string representation of the underlying address.
func (a Address) Bytes() []byte { return a[:] }
// Big converts an address to a big integer.
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
// Hash converts an address to a hash by left-padding it with zeros.
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Bech32 returns an bip0173-compliant string representation of the address.
func (a Address) Bech32() string {
unchecksummed := hex.EncodeToString(a[:])
sha := sha3.NewLegacyKeccak256()
sha.Write([]byte(unchecksummed))
hash := sha.Sum(nil)
result := []byte(unchecksummed)
for i := 0; i < len(result); i++ {
hashByte := hash[i/2]
if i%2 == 0 {
hashByte = hashByte >> 4
} else {
hashByte &= 0xf
}
if result[i] > '9' && hashByte > 7 {
result[i] -= 32
}
}
return "0x" + string(result)
}
// String implements fmt.Stringer.
func (a Address) String() string {
return a.Bech32()
}
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
// without going through the stringer interface used for logging.
func (a Address) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "%"+string(c), a[:])
}
// SetBytes sets the address to the value of b.
// If b is larger than len(a) it will panic.
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-AddressLength:]
}
copy(a[AddressLength-len(b):], b)
}
// MarshalText returns the hex representation of a.
func (a Address) MarshalText() ([]byte, error) {
return hexutil.Bytes(a[:]).MarshalText()
}
// UnmarshalText parses a hash in hex syntax.
func (a *Address) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Address", input, a[:])
}
// UnmarshalJSON parses a hash in hex syntax.
func (a *Address) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
}
// Scan implements Scanner for database/sql.
func (a *Address) Scan(src interface{}) error {
srcB, ok := src.([]byte)
if !ok {
return fmt.Errorf("can't scan %T into Address", src)
}
if len(srcB) != AddressLength {
return fmt.Errorf("can't scan []byte of len %d into Address, want %d", len(srcB), AddressLength)
}
copy(a[:], srcB)
return nil
}
// Value implements valuer for database/sql.
func (a Address) Value() (driver.Value, error) {
return a[:], nil
}
// UnprefixedAddress allows marshaling an Address without 0x prefix.
type UnprefixedAddress Address
// UnmarshalText decodes the address from hex. The 0x prefix is optional.
func (a *UnprefixedAddress) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedUnprefixedText("UnprefixedAddress", input, a[:])
}
// MarshalText encodes the address as hex.
func (a UnprefixedAddress) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(a[:])), nil
}
// TODO ek – the following functions use Ethereum addresses until we have a
// proper abstraction set in place.
// ParseBech32Addr decodes the given bech32 address and populates the given
// human-readable-part string and address with the decoded result.
func ParseBech32Addr(b32 string, hrp *string, addr *ethCommon.Address) error {
h, b, err := bech32.DecodeAndConvert(b32)
if err != nil {
return errors.Wrapf(err, "cannot decode %#v as bech32 address", b32)
}
if len(b) != ethCommon.AddressLength {
return errors.Errorf("decoded bech32 %#v has invalid length %d",
b32, len(b))
}
*hrp = h
addr.SetBytes(b)
return nil
}
// BuildBech32Addr encodes the given human-readable-part string and address
// into a bech32 address.
func BuildBech32Addr(hrp string, addr ethCommon.Address) (string, error) {
return bech32.ConvertAndEncode(hrp, addr.Bytes())
}
// MustBuildBech32Addr encodes the given human-readable-part string and
// address into a bech32 address. It panics on error.
func MustBuildBech32Addr(hrp string, addr ethCommon.Address) string {
b32, err := BuildBech32Addr(hrp, addr)
if err != nil {
panic(err)
}
return b32
}
// Bech32AddressHRP is the human-readable part of the Harmony address used by
// this process.
var Bech32AddressHRP = "one"
// Bech32ToAddress decodes the given bech32 address.
func Bech32ToAddress(b32 string) (addr ethCommon.Address, err error) {
var hrp string
err = ParseBech32Addr(b32, &hrp, &addr)
if err == nil && hrp != Bech32AddressHRP {
err = errors.Errorf("%#v is not a %#v address", b32, Bech32AddressHRP)
}
return
}
// MustBech32ToAddress decodes the given bech32 address. It panics on error.
func MustBech32ToAddress(b32 string) ethCommon.Address {
addr, err := Bech32ToAddress(b32)
if err != nil {
panic(err)
}
return addr
}
// AddressToBech32 encodes the given address into bech32 format.
func AddressToBech32(addr ethCommon.Address) (string, error) {
return BuildBech32Addr(Bech32AddressHRP, addr)
}
// MustAddressToBech32 encodes the given address into bech32 format.
// It panics on error.
func MustAddressToBech32(addr ethCommon.Address) string {
b32, err := BuildBech32Addr(Bech32AddressHRP, addr)
if err != nil {
panic(err)
}
return b32
}
// ParseAddr parses the given address, either as bech32 or as hex.
// The result can be 0x00..00 if the passing param is not a correct address.
func ParseAddr(s string) ethCommon.Address {
if addr, err := Bech32ToAddress(s); err == nil {
return addr
}
// The result can be 0x00...00 if the passing param is not a correct address.
return ethCommon.HexToAddress(s)
} | internal/common/address.go | 0.708414 | 0.423637 | address.go | starcoder |
package kol
import (
"golang.org/x/exp/maps"
)
// Set is an un-ordered collection of elements without duplicate elements.
type Set[E comparable] interface {
Collection[E]
}
type set[E comparable] struct {
m map[E]struct{}
}
func NewSet[E comparable](elements ...E) Set[E] {
m := make(map[E]struct{}, 0)
for _, e := range elements {
m[e] = struct{}{}
}
return &set[E]{m: m}
}
func newSet[E comparable](m map[E]struct{}) Set[E] {
return &set[E]{m: m}
}
func (s *set[E]) clone() Set[E] {
return newSet(maps.Clone(s.m))
}
var _ Collection[int] = (*set[int])(nil)
func (s *set[E]) Add(elements ...E) {
for _, e := range elements {
s.m[e] = struct{}{}
}
}
func (s *set[E]) Clear() {
maps.Clear(s.m)
}
func (s *set[E]) IsEmpty() bool {
return s.Size() == 0
}
func (s *set[E]) Remove(targets ...E) {
for _, t := range targets {
delete(s.m, t)
}
}
func (s *set[E]) Retain(targets ...E) {
tl := NewList(targets...)
for e := range s.m {
if !tl.Contains(e) {
delete(s.m, e)
}
}
}
func (s *set[E]) Size() int {
return len(s.m)
}
var _ Iterable[int] = (*set[int])(nil)
func (s *set[E]) All(p func(e E) bool) bool {
if s.Size() == 0 {
return false
}
for e := range s.m {
if !p(e) {
return false
}
}
return true
}
func (s *set[E]) Any(p func(e E) bool) bool {
for e := range s.m {
if p(e) {
return true
}
}
return false
}
func (s *set[E]) Contains(e E) bool {
_, ok := s.m[e]
return ok
}
func (s *set[E]) Count(p func(e E) bool) int {
count := 0
for e := range s.m {
if p(e) {
count++
}
}
return count
}
func (s *set[E]) Distinct() Collection[E] {
return s
}
func (s *set[E]) Find(p func(e E) bool) (E, bool) {
for e := range s.m {
if p(e) {
return e, true
}
}
var zero E
return zero, false
}
func (s *set[E]) Filter(p func(e E) bool) Collection[E] {
filtered := make(map[E]struct{}, 0)
s.ForEach(func(e E) {
if p(e) {
filtered[e] = struct{}{}
}
})
return newSet(filtered)
}
func (s *set[E]) ForEach(a func(e E)) {
for e := range s.m {
a(e)
}
}
func (s *set[E]) Intersect(other Iterable[E]) Set[E] {
res := other.ToSet()
for e := range s.m {
if _, ok := res.(*set[E]).m[e]; !ok {
res.Remove(e)
}
}
return res
}
func (s *set[E]) Map(t func(e E) E) Collection[E] {
mapped := make(map[E]struct{}, 0)
s.ForEach(func(e E) {
mapped[t(e)] = struct{}{}
})
return newSet(mapped)
}
func (s *set[E]) Minus(e ...E) Collection[E] {
cloned := s.clone()
cloned.Remove(e...)
return cloned
}
func (s *set[E]) None(p func(e E) bool) bool {
for e := range s.m {
if p(e) {
return false
}
}
return true
}
func (s *set[E]) Plus(e ...E) Collection[E] {
cloned := s.clone()
cloned.Add(e...)
return cloned
}
func (s *set[E]) Single(p func(e E) bool) (E, bool) {
found := false
var res E
for e := range s.m {
if p(e) {
if found {
var zero E
return zero, false
}
res = e
found = true
}
}
return res, true
}
func (s *set[E]) Subtract(other Iterable[E]) Set[E] {
res := s.clone()
for _, e := range other.ToSlice() {
if _, ok := res.(*set[E]).m[e]; ok {
res.Remove(e)
}
}
return res
}
func (s *set[E]) ToList() List[E] {
return NewList(maps.Keys(s.m)...)
}
func (s *set[E]) ToSet() Set[E] {
return s.clone()
}
func (s *set[E]) ToSlice() []E {
return maps.Keys(s.m)
}
func (s *set[E]) Union(other Iterable[E]) Set[E] {
return s.clone().Plus(other.ToSlice()...)
}
func MapSet[E1 comparable, E2 comparable](collection Collection[E1], transform func(E1) E2) Set[E2] {
result := make([]E2, 0, collection.Size())
collection.ForEach(func(e1 E1) {
result = append(result, transform(e1))
})
return NewSet(result...)
} | set.go | 0.656658 | 0.4165 | set.go | starcoder |
// player keeps track of all player-specific variables, as well as some basic logic functions to support more complex table logic
package player
import (
"hearts/logic/card"
)
// Returns a player instance with playerIndex equal to index
func NewPlayer(index int) *Player {
return &Player{
hand: nil,
tricks: make([]*card.Card, 0),
score: 0,
playerIndex: index,
donePassing: false,
doneTaking: false,
doneScoring: false,
}
}
type Player struct {
hand []*card.Card
passedFrom []*card.Card
passedTo []*card.Card
tricks []*card.Card
score int
playerIndex int
donePassing bool
doneTaking bool
donePlaying bool
doneScoring bool
}
// Returns the hand of p
func (p *Player) GetHand() []*card.Card {
return p.hand
}
// Returns the cards that have been passed to p
func (p *Player) GetPassedTo() []*card.Card {
return p.passedTo
}
// Returns the cards that p passed
func (p *Player) GetPassedFrom() []*card.Card {
return p.passedFrom
}
// Returns the number of tricks p has taken
// Assumes each trick is 4 cards
func (p *Player) GetNumTricks() int {
return len(p.tricks) / 4
}
// Returns the score of p
func (p *Player) GetScore() int {
return p.score
}
// Returns the playerIndex of p
func (p *Player) GetPlayerIndex() int {
return p.playerIndex
}
// Returns true if p has finished the pass phase of the current round
func (p *Player) GetDonePassing() bool {
return p.donePassing
}
// Returns true if p has finished the take phase of the current round
func (p *Player) GetDoneTaking() bool {
return p.doneTaking
}
// Returns true if p has finished the play phase of the current trick
func (p *Player) GetDonePlaying() bool {
return p.donePlaying
}
// Returns true if p has finished the score phase of the current round
func (p *Player) GetDoneScoring() bool {
return p.doneScoring
}
// Adds card to the hand of p
func (p *Player) AddToHand(card *card.Card) {
p.hand = append(p.hand, card)
}
// Removes card from the hand of p, every time it appears
func (p *Player) RemoveFromHand(card *card.Card) {
for i, c := range p.hand {
if c == card {
p.hand = append(p.hand[:i], p.hand[i+1:]...)
}
}
}
// Sets hand of p in one chunk of cards
func (p *Player) SetHand(cards []*card.Card) {
p.hand = cards
}
// Sets passedTo of p to cards
func (p *Player) SetPassedTo(cards []*card.Card) {
p.passedTo = cards
}
// Sets passedFrom of p to cards
func (p *Player) SetPassedFrom(cards []*card.Card) {
p.passedFrom = cards
}
// Sets p.donePassing to isDone
func (p *Player) SetDonePassing(isDone bool) {
p.donePassing = isDone
}
// Sets p.doneTaking to isDone
func (p *Player) SetDoneTaking(isDone bool) {
p.doneTaking = isDone
}
// Sets p.donePlaying to isDone
func (p *Player) SetDonePlaying(isDone bool) {
p.donePlaying = isDone
}
// Sets p.doneScoring to isDone
func (p *Player) SetDoneScoring(isDone bool) {
p.doneScoring = isDone
}
// Adds cards to the tricks deck of p
func (p *Player) TakeTrick(cards []*card.Card) {
p.tricks = append(p.tricks, cards...)
}
// Adds points to the total score of p
func (p *Player) UpdateScore(points int) {
p.score += points
}
// Calculates and returns the total point value of the cards in the tricks deck of p
func (p *Player) CalculateScore() int {
score := 0
for _, c := range p.tricks {
if c.GetSuit() == card.Heart {
score += 1
} else if c.GetSuit() == card.Spade && c.GetFace() == card.Queen {
score += 13
}
}
return score
}
// Sets the passedTo deck of p to a new empty list
func (p *Player) ResetPassedTo() {
p.passedTo = make([]*card.Card, 0)
}
// Sets the passedFrom deck of p to a new empty list
func (p *Player) ResetPassedFrom() {
p.passedFrom = make([]*card.Card, 0)
}
// Sets the tricks deck of p to a new empty list
func (p *Player) ResetTricks() {
p.tricks = make([]*card.Card, 0)
}
// Resets the score of p to 0 for a new game
func (p *Player) ResetScore() {
p.score = 0
}
// Given a suit, returns whether not there is at least one card of that suit in the hand of p
func (p *Player) HasSuit(suit card.Suit) bool {
for _, c := range p.hand {
if c.GetSuit() == suit {
return true
}
}
return false
}
// Returns true if p has at least one heart card in hand and no cards of any other suit
func (p *Player) HasOnlyHearts() bool {
return !(p.HasSuit(card.Club) || p.HasSuit(card.Diamond) || p.HasSuit(card.Spade) || !p.HasSuit(card.Heart))
}
// Returns true if the hand of p doesn't contain any 0-point cards (all clubs and diamonds, and all spades aside from the queen)
func (p *Player) HasAllPoints() bool {
for _, c := range p.hand {
if !c.WorthPoints() {
return false
}
}
return true
}
// Returns true if p has the two of clubs in hand
func (p *Player) HasTwoOfClubs() bool {
for _, c := range p.hand {
if c.GetSuit() == card.Club && c.GetFace() == card.Two {
return true
}
}
return false
} | go/src/hearts/logic/player/player.go | 0.715126 | 0.432183 | player.go | starcoder |
package ovsdb
import (
"encoding/json"
"fmt"
"reflect"
)
type ConditionFunction string
const (
// ConditionLessThan is the less than condition
ConditionLessThan ConditionFunction = "<"
// ConditionLessThanOrEqual is the less than or equal condition
ConditionLessThanOrEqual ConditionFunction = "<="
// ConditionEqual is the equal condition
ConditionEqual ConditionFunction = "=="
// ConditionNotEqual is the not equal condition
ConditionNotEqual ConditionFunction = "!="
// ConditionGreaterThan is the greater than condition
ConditionGreaterThan ConditionFunction = ">"
// ConditionGreaterThanOrEqual is the greater than or equal condition
ConditionGreaterThanOrEqual ConditionFunction = ">="
// ConditionIncludes is the includes condition
ConditionIncludes ConditionFunction = "includes"
// ConditionExcludes is the excludes condition
ConditionExcludes ConditionFunction = "excludes"
)
// Condition is described in RFC 7047: 5.1
type Condition struct {
Column string
Function ConditionFunction
Value interface{}
}
func (c Condition) String() string {
return fmt.Sprintf("where column %s %s %v", c.Column, c.Function, c.Value)
}
// NewCondition returns a new condition
func NewCondition(column string, function ConditionFunction, value interface{}) Condition {
return Condition{
Column: column,
Function: function,
Value: value,
}
}
// MarshalJSON marshals a condition to a 3 element JSON array
func (c Condition) MarshalJSON() ([]byte, error) {
v := []interface{}{c.Column, c.Function, c.Value}
return json.Marshal(v)
}
// UnmarshalJSON converts a 3 element JSON array to a Condition
func (c *Condition) UnmarshalJSON(b []byte) error {
var v []interface{}
err := json.Unmarshal(b, &v)
if err != nil {
return err
}
if len(v) != 3 {
return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v))
}
c.Column = v[0].(string)
function := ConditionFunction(v[1].(string))
switch function {
case ConditionEqual,
ConditionNotEqual,
ConditionIncludes,
ConditionExcludes,
ConditionGreaterThan,
ConditionGreaterThanOrEqual,
ConditionLessThan,
ConditionLessThanOrEqual:
c.Function = function
default:
return fmt.Errorf("%s is not a valid function", function)
}
vv, err := ovsSliceToGoNotation(v[2])
if err != nil {
return err
}
c.Value = vv
return nil
}
// Evaluate will evaluate the condition on the two provided values
// The conditions operately differently depending on the type of
// the provided values. The behavior is as described in RFC7047
func (c ConditionFunction) Evaluate(a interface{}, b interface{}) (bool, error) {
x := reflect.ValueOf(a)
y := reflect.ValueOf(b)
if x.Kind() != y.Kind() {
return false, fmt.Errorf("comparison between %s and %s not supported", x.Kind(), y.Kind())
}
switch c {
case ConditionEqual:
return reflect.DeepEqual(a, b), nil
case ConditionNotEqual:
return !reflect.DeepEqual(a, b), nil
case ConditionIncludes:
switch x.Kind() {
case reflect.Slice:
return sliceContains(x, y), nil
case reflect.Map:
return mapContains(x, y), nil
case reflect.Int, reflect.Float64, reflect.Bool, reflect.String:
return reflect.DeepEqual(a, b), nil
default:
return false, fmt.Errorf("condition not supported on %s", x.Kind())
}
case ConditionExcludes:
switch x.Kind() {
case reflect.Slice:
return !sliceContains(x, y), nil
case reflect.Map:
return !mapContains(x, y), nil
case reflect.Int, reflect.Float64, reflect.Bool, reflect.String:
return !reflect.DeepEqual(a, b), nil
default:
return false, fmt.Errorf("condition not supported on %s", x.Kind())
}
case ConditionGreaterThan:
switch x.Kind() {
case reflect.Int:
return x.Int() > y.Int(), nil
case reflect.Float64:
return x.Float() > y.Float(), nil
case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
default:
return false, fmt.Errorf("condition not supported on %s", x.Kind())
}
case ConditionGreaterThanOrEqual:
switch x.Kind() {
case reflect.Int:
return x.Int() >= y.Int(), nil
case reflect.Float64:
return x.Float() >= y.Float(), nil
case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
default:
return false, fmt.Errorf("condition not supported on %s", x.Kind())
}
case ConditionLessThan:
switch x.Kind() {
case reflect.Int:
return x.Int() < y.Int(), nil
case reflect.Float64:
return x.Float() < y.Float(), nil
case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
default:
return false, fmt.Errorf("condition not supported on %s", x.Kind())
}
case ConditionLessThanOrEqual:
switch x.Kind() {
case reflect.Int:
return x.Int() <= y.Int(), nil
case reflect.Float64:
return x.Float() <= y.Float(), nil
case reflect.Bool, reflect.String, reflect.Slice, reflect.Map:
default:
return false, fmt.Errorf("condition not supported on %s", x.Kind())
}
default:
return false, fmt.Errorf("unsupported condition function %s", c)
}
// we should never get here
return false, fmt.Errorf("unreachable condition")
}
func sliceContains(x, y reflect.Value) bool {
for i := 0; i < y.Len(); i++ {
found := false
vy := y.Index(i)
for j := 0; j < x.Len(); j++ {
vx := x.Index(j)
if vy.Kind() == reflect.Interface {
if vy.Elem() == vx.Elem() {
found = true
break
}
} else {
if vy.Interface() == vx.Interface() {
found = true
break
}
}
}
if !found {
return false
}
}
return true
}
func mapContains(x, y reflect.Value) bool {
iter := y.MapRange()
for iter.Next() {
k := iter.Key()
v := iter.Value()
vx := x.MapIndex(k)
if !vx.IsValid() {
return false
}
if v.Kind() != reflect.Interface {
if v.Interface() != vx.Interface() {
return false
}
} else {
if v.Elem() != vx.Elem() {
return false
}
}
}
return true
} | go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go | 0.631481 | 0.45302 | condition.go | starcoder |
package iso20022
// Describes the type of product and the assets to be transferred.
type ISATransfer19 struct {
// Information identifying the primary individual investor, for example, name, address, social security number and date of birth.
PrimaryIndividualInvestor *IndividualPerson8 `xml:"PmryIndvInvstr,omitempty"`
// Information identifying the secondary individual investor, for example, name, address, social security number and date of birth.
SecondaryIndividualInvestor *IndividualPerson8 `xml:"ScndryIndvInvstr,omitempty"`
// Information identifying the other individual investors, for example, name, address, social security number and date of birth.
OtherIndividualInvestor []*IndividualPerson8 `xml:"OthrIndvInvstr,omitempty"`
// Information identifying the primary corporate investor, for example, name and address.
PrimaryCorporateInvestor *Organisation4 `xml:"PmryCorpInvstr,omitempty"`
// Information identifying the secondary corporate investor, for example, name and address.
SecondaryCorporateInvestor *Organisation4 `xml:"ScndryCorpInvstr,omitempty"`
// Information identifying the other corporate investors, for example, name and address.
OtherCorporateInvestor []*Organisation4 `xml:"OthrCorpInvstr,omitempty"`
// Identification of an account owned by the investor at the old plan manager (account servicer).
TransferorAccount *Account15 `xml:"TrfrAcct"`
// Account held in the name of a party that is not the name of the beneficial owner of the shares.
NomineeAccount *Account16 `xml:"NmneeAcct,omitempty"`
// Information related to the institution to which the financial instrument is to be transferred.
Transferee *PartyIdentification2Choice `xml:"Trfee"`
// Identification of an account owned by the investor to which a cash entry is made based on the transfer of asset(s).
CashAccount *CashAccount29 `xml:"CshAcct,omitempty"`
// Details of the transfer to be cancelled.
ProductTransferAndReference *ISATransfer20 `xml:"PdctTrfAndRef"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*Extension1 `xml:"Xtnsn,omitempty"`
}
func (i *ISATransfer19) AddPrimaryIndividualInvestor() *IndividualPerson8 {
i.PrimaryIndividualInvestor = new(IndividualPerson8)
return i.PrimaryIndividualInvestor
}
func (i *ISATransfer19) AddSecondaryIndividualInvestor() *IndividualPerson8 {
i.SecondaryIndividualInvestor = new(IndividualPerson8)
return i.SecondaryIndividualInvestor
}
func (i *ISATransfer19) AddOtherIndividualInvestor() *IndividualPerson8 {
newValue := new (IndividualPerson8)
i.OtherIndividualInvestor = append(i.OtherIndividualInvestor, newValue)
return newValue
}
func (i *ISATransfer19) AddPrimaryCorporateInvestor() *Organisation4 {
i.PrimaryCorporateInvestor = new(Organisation4)
return i.PrimaryCorporateInvestor
}
func (i *ISATransfer19) AddSecondaryCorporateInvestor() *Organisation4 {
i.SecondaryCorporateInvestor = new(Organisation4)
return i.SecondaryCorporateInvestor
}
func (i *ISATransfer19) AddOtherCorporateInvestor() *Organisation4 {
newValue := new (Organisation4)
i.OtherCorporateInvestor = append(i.OtherCorporateInvestor, newValue)
return newValue
}
func (i *ISATransfer19) AddTransferorAccount() *Account15 {
i.TransferorAccount = new(Account15)
return i.TransferorAccount
}
func (i *ISATransfer19) AddNomineeAccount() *Account16 {
i.NomineeAccount = new(Account16)
return i.NomineeAccount
}
func (i *ISATransfer19) AddTransferee() *PartyIdentification2Choice {
i.Transferee = new(PartyIdentification2Choice)
return i.Transferee
}
func (i *ISATransfer19) AddCashAccount() *CashAccount29 {
i.CashAccount = new(CashAccount29)
return i.CashAccount
}
func (i *ISATransfer19) AddProductTransferAndReference() *ISATransfer20 {
i.ProductTransferAndReference = new(ISATransfer20)
return i.ProductTransferAndReference
}
func (i *ISATransfer19) AddExtension() *Extension1 {
newValue := new (Extension1)
i.Extension = append(i.Extension, newValue)
return newValue
} | ISATransfer19.go | 0.718298 | 0.45302 | ISATransfer19.go | starcoder |
package zfs
type DatasetPathForest struct {
roots []*datasetPathTree
}
func NewDatasetPathForest() *DatasetPathForest {
return &DatasetPathForest{
make([]*datasetPathTree, 0),
}
}
func (f *DatasetPathForest) Add(p *DatasetPath) {
if len(p.comps) <= 0 {
panic("dataset path too short. must have length > 0")
}
// Find its root
var root *datasetPathTree
for _, r := range f.roots {
if r.Add(p.comps) {
root = r
break
}
}
if root == nil {
root = newDatasetPathTree(p.comps)
f.roots = append(f.roots, root)
}
}
type DatasetPathVisit struct {
Path *DatasetPath
// If true, the dataset referenced by Path was not in the list of datasets to traverse
FilledIn bool
Parent *DatasetPathVisit
}
type DatasetPathsVisitor func(v *DatasetPathVisit) (visitChildTree bool)
// Traverse a list of DatasetPaths top down, i.e. given a set of datasets with same
// path prefix, those with shorter prefix are traversed first.
// If there are gaps, i.e. the intermediary component a/b between a and a/b/c,
// those gaps are still visited but the FilledIn property of the visit is set to true.
func (f *DatasetPathForest) WalkTopDown(visitor DatasetPathsVisitor) {
for _, r := range f.roots {
r.WalkTopDown(&DatasetPathVisit{
Path: &DatasetPath{nil},
FilledIn: true,
Parent: nil,
}, visitor)
}
}
/* PRIVATE IMPLEMENTATION */
type datasetPathTree struct {
Component string
FilledIn bool
Children []*datasetPathTree
}
func (t *datasetPathTree) Add(p []string) bool {
if len(p) == 0 {
return true
}
if p[0] == t.Component {
remainder := p[1:]
if len(remainder) == 0 {
t.FilledIn = false
return true
}
for _, c := range t.Children {
if c.Add(remainder) {
return true
}
}
t.Children = append(t.Children, newDatasetPathTree(remainder))
return true
} else {
return false
}
}
func (t *datasetPathTree) WalkTopDown(parent *DatasetPathVisit, visitor DatasetPathsVisitor) {
thisVisitPath := parent.Path.Copy()
thisVisitPath.Extend(&DatasetPath{[]string{t.Component}})
thisVisit := &DatasetPathVisit{
thisVisitPath,
t.FilledIn,
parent,
}
visitChildTree := visitor(thisVisit)
if visitChildTree {
for _, c := range t.Children {
c.WalkTopDown(thisVisit, visitor)
}
}
}
func newDatasetPathTree(initialComps []string) (t *datasetPathTree) {
t = &datasetPathTree{}
cur := t
for i, comp := range initialComps {
cur.Component = comp
cur.FilledIn = true
cur.Children = make([]*datasetPathTree, 0, 1)
if i == len(initialComps)-1 {
cur.FilledIn = false // last component is not filled in
break
}
child := &datasetPathTree{}
cur.Children = append(cur.Children, child)
cur = child
}
return t
} | zfs/datasetpath_visitor.go | 0.630002 | 0.513485 | datasetpath_visitor.go | starcoder |
package strdist
import (
"fmt"
"sort"
)
// CaseMod represents the different behaviours with regards to case
// handling when measuring distances
type CaseMod int
const (
// NoCaseChange indicates that the case should not be changed
NoCaseChange CaseMod = iota
// ForceToLower indicates that the case should be forced to lower case
// when calculating distances
ForceToLower
)
// DistAlgo describes the algorithm which the Finder will use to calculate
// distance. There is a Prep func provided which will allow some common tasks
// to be performed before the distance is calculated - some algorithms can
// cache some intermediate results to save time when calculating the
// string-to-string distance.
type DistAlgo interface {
Prep(s string, cm CaseMod)
Dist(s1, s2 string, cm CaseMod) float64
}
// DfltMinStrLen is a suggested minimum length of string to be matched. The
// problem with trying to find similar strings to very short targets is that
// they can match with a lot of not obviously similar alternatives. For
// instance a match for a single character string might be every other single
// character string in the population. For a number of use cases this is not
// particularly helpful.
const DfltMinStrLen = 4
// Finder records the parameters of the finding algorithm
type Finder struct {
// MinStrLen records the minimum length of string to be matched
MinStrLen int
// T is the threshold for similarity for this finder
T float64
// CM, if set to ForceToLower, will convert all strings to lower case
// before generating the distance
CM CaseMod
// Algo is the algorithm with which to calculate the distance between two
// strings
Algo DistAlgo
// pop holds the default population of strings for the Find... methods to
// search if no strings are provided.
pop []string
}
// NewFinder checks that the parameters are valid and creates a new
// Finder if they are. The minLen and threshold limit must each be >=
// 0. A zero threshold wil require an exact match.
func NewFinder(minLen int, limit float64, cm CaseMod, a DistAlgo) (*Finder, error) {
if minLen < 0 {
return nil,
fmt.Errorf("bad minimum string length (%d) - it should be >= 0",
minLen)
}
if limit < 0.0 {
return nil,
fmt.Errorf("bad threshold (%f) - it should be >= 0.0", limit)
}
f := &Finder{
MinStrLen: minLen,
T: limit,
CM: cm,
Algo: a,
}
return f, nil
}
// SetPop will set the population of strings to be searched by the
// Find... methods
func (f *Finder) SetPop(pop []string) {
f.pop = pop
}
// FindLike returns StrDists for those strings in the population (pop) which
// are similar to the string (s). A string is similar if it has a common
// difference calculated from the n-grams which is less than or equal to the
// NGram finder's threshold value. If the list of strings to search is empty
// then the default population from the Finder will be used. This should be
// set in advance using the SetPop method.
func (f *Finder) FindLike(s string, pop ...string) []StrDist {
if len(pop) == 0 {
pop = f.pop
}
if len(pop) == 0 || len(s) < f.MinStrLen {
return []StrDist{}
}
dists := make([]StrDist, 0, len(pop))
f.Algo.Prep(s, f.CM)
for _, p := range pop {
if len(p) < f.MinStrLen {
continue
}
d := f.Algo.Dist(s, p, f.CM)
if d > f.T {
continue
}
dists = append(dists, StrDist{
Str: p,
Dist: d,
})
}
sort.Slice(dists, func(i, j int) bool { return SDSlice(dists).Cmp(i, j) })
return dists
}
// FindStrLike returns those strings in the population (pop) which are
// similar to the string (s). Similarity is as for the Find func.
func (f *Finder) FindStrLike(s string, pop ...string) []string {
return convertStrDist(f.FindLike(s, pop...))
}
// FindNStrLike returns the first n strings in the population (pop) which are
// similar to the string (s). Similarity is as for the Find func.
func (f *Finder) FindNStrLike(n int, s string, pop ...string) []string {
return convertStrDistN(n, f.FindLike(s, pop...))
}
// convertStrDist returns the strings from a slice of StrDists
func convertStrDist(dists []StrDist) []string {
rval := make([]string, 0, len(dists))
for _, d := range dists {
rval = append(rval, d.Str)
}
return rval
}
// convertStrDistN returns the first n strings from a slice of StrDists
func convertStrDistN(n int, dists []StrDist) []string {
if len(dists) < n {
n = len(dists)
}
rval := make([]string, 0, n)
for i, d := range dists {
if i >= n {
break
}
rval = append(rval, d.Str)
}
return rval
} | strdist/finder.go | 0.738009 | 0.52275 | finder.go | starcoder |
package cli
const helpText = `
fns - a command line utility for managing serverless functions.
https://www.github.com/gbdubs/fns
Commands (* indicates not yet implemented)
* create_project creates a new collection of functions in a new folder
* create_fn creates a new function within a project
* deploy_project deploys a project and all of its functions to the cloud
* deploy_fn deploys a single function to the cloud
* invoke invokes a given function with a set input
* test invokes all tests against the cloud, verifying expected results
* destroy_fn deletes a given function from the cloud + locally
* destory_project deletes a project and all of its functions from the cloud
Use "fns help <command>" for detials about the command.
`
const createProjectHelpText = `
create_project - creates a new folder to house a collection of functions.
fns create_project --project=my_project_name [--dir=~/go/src/.../myproject]
A project is a collection of resources (functions, storage, ACLs, domains) that can all communicate with one another with trust by deafult. On your local machine, all of the configuration for these resources live in a top-level directory with the project's name. On a cloud service provider, all of the project resources will be prefixed by the project name, and will be located in the project's configured region. Projects can be deployed, tested, or destroyed as a unit. Projects can be version controlled, but they do not need to be.
Required Flags
--project=my_project_name The name of the directory to house the project.
Optional Flags
--where=path/from/here The path that the project should be created at. If not
specified, defaults to the present working directory.
Use "fns help" to learn about other commands.
`
const createFnHelpText = `
create_fn - creates a new function within a project.
fns create_fn --fn=my_fn_name [--project=my_project_name] [--fn_type=UNAUTH|AUTH|SYSTEM|RPC]
A function is a piece of code that can be run on any cloud service provider's serverless computing solution. This command creates a new folder within the project directory with the function's name, and creates the files to deploy, test and invoke the function. This package supports four types of functions, 3 "serving" flavors, and 1 "internal" flavor:
- UNAUTH: Unauthenticated HTTP Functions, implements standard HTTP interface, used for serving websites over HTTP.
- AUTH: Authenticated HTTP Functions, provides authorization data in interface, used for interacting with users who are already logged in.
- SYSTEM: HTTP functions, called by trusted in-house systems (ex: a database trigger or asynchronous queue delivery).
- RPC: Internal remote procedure call functions, used to provide microservice interfaces between lambdas. Unlike the other function flavors, this type is "collapsible".
Collapsible functions are a beautiful thing - they can either be compiled into all of thier calling functions, or they can be invoked as microservices. The benefit of this architechure is that it allows you to not be bound to a specific service configuration. For small projects, you can bundle and deploy all of your bizlogic into a single serving function/binary, without hemming yourself into this architechure long term. As a future goal, I want to support real-trafic analysis to support and inform how different functions are built and deployed - ideally trying to maximize some form of user expressed constraints, build size, and end-user latency. After initial development, you should expect RPC to be the primary type of function you create.
Required Flags
--fn=my_new_fn_name The name of the new directory to house the function.
Optional Flags
--project=my_project_name The name of the project to push this function from. Defaults
to the project the command is invoked within
--fn_type=... The type of the function to create (defaults to UNAUTH), see
above for function types and their properties
Use "fns help" to learn about other commands.
`
const deployProject = `
deploy_project - push a project to one or more cloud-service-providers, actualizing local configuration.
fns deploy_project [--project=my_project_name] [--provider=AWS|GCP] [--dryrun]
Pushes the local project configuration to all clouds that it is currently configured for. If provider is specified, this command only pushes to that provider. On the first push to a cloud provider, you will need to specify the --provider flag.
Optional Flags
--project=my_project_name The name of the project to push, defaults to PWD project
--provider=AWS|GCP The cloud service provider to push this project to.
--dryrun If set, no actual pushes will be performed, but a summary
of what WOULD be performed will be printed to the console.
Use "fns help" to learn about other commands.
`
const deployFnHelpText = `
deploy_fn - push a function to one or more cloud-service-providers, actualizing local configuration.
fns deploy_fn [--project=my_project_name] [--fn=my_fn_name] [--provider=AWS|GCP] [--dryrun]
Pushes the local function configuration to all cloud service providers that it is presently configured for. If the provider is specified, the command only pushes to that provider. On the first push to a cloud provider, you will need to specify the --provider flag.
Optional Flags
--project=my_project_name The name of the project to push, defaults to the PWD project.
--fu=my_function_name The name of the function to push, defaults to the PWD fn.
--provider=AWS|GCP The cloud service provider to push this project to.
--dryrun If set, no actual pushes will be performed, but a summary
of what WOULD have been performed will be printed to the
console.
Use "fns help" to learn about other commands.
` | trash/help.go | 0.595375 | 0.634232 | help.go | starcoder |
package plaid
import (
"encoding/json"
)
// WalletTransactionAmount The amount and currency of a transaction
type WalletTransactionAmount struct {
// The ISO-4217 currency code of the transaction. Currently, only `\"GBP\"` is supported.
IsoCurrencyCode string `json:"iso_currency_code"`
// The amount of the transaction. Must contain at most two digits of precision e.g. `1.23`.
Value float32 `json:"value"`
AdditionalProperties map[string]interface{}
}
type _WalletTransactionAmount WalletTransactionAmount
// NewWalletTransactionAmount instantiates a new WalletTransactionAmount object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewWalletTransactionAmount(isoCurrencyCode string, value float32) *WalletTransactionAmount {
this := WalletTransactionAmount{}
this.IsoCurrencyCode = isoCurrencyCode
this.Value = value
return &this
}
// NewWalletTransactionAmountWithDefaults instantiates a new WalletTransactionAmount object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewWalletTransactionAmountWithDefaults() *WalletTransactionAmount {
this := WalletTransactionAmount{}
return &this
}
// GetIsoCurrencyCode returns the IsoCurrencyCode field value
func (o *WalletTransactionAmount) GetIsoCurrencyCode() string {
if o == nil {
var ret string
return ret
}
return o.IsoCurrencyCode
}
// GetIsoCurrencyCodeOk returns a tuple with the IsoCurrencyCode field value
// and a boolean to check if the value has been set.
func (o *WalletTransactionAmount) GetIsoCurrencyCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.IsoCurrencyCode, true
}
// SetIsoCurrencyCode sets field value
func (o *WalletTransactionAmount) SetIsoCurrencyCode(v string) {
o.IsoCurrencyCode = v
}
// GetValue returns the Value field value
func (o *WalletTransactionAmount) GetValue() float32 {
if o == nil {
var ret float32
return ret
}
return o.Value
}
// GetValueOk returns a tuple with the Value field value
// and a boolean to check if the value has been set.
func (o *WalletTransactionAmount) GetValueOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.Value, true
}
// SetValue sets field value
func (o *WalletTransactionAmount) SetValue(v float32) {
o.Value = v
}
func (o WalletTransactionAmount) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["iso_currency_code"] = o.IsoCurrencyCode
}
if true {
toSerialize["value"] = o.Value
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *WalletTransactionAmount) UnmarshalJSON(bytes []byte) (err error) {
varWalletTransactionAmount := _WalletTransactionAmount{}
if err = json.Unmarshal(bytes, &varWalletTransactionAmount); err == nil {
*o = WalletTransactionAmount(varWalletTransactionAmount)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "iso_currency_code")
delete(additionalProperties, "value")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableWalletTransactionAmount struct {
value *WalletTransactionAmount
isSet bool
}
func (v NullableWalletTransactionAmount) Get() *WalletTransactionAmount {
return v.value
}
func (v *NullableWalletTransactionAmount) Set(val *WalletTransactionAmount) {
v.value = val
v.isSet = true
}
func (v NullableWalletTransactionAmount) IsSet() bool {
return v.isSet
}
func (v *NullableWalletTransactionAmount) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableWalletTransactionAmount(val *WalletTransactionAmount) *NullableWalletTransactionAmount {
return &NullableWalletTransactionAmount{value: val, isSet: true}
}
func (v NullableWalletTransactionAmount) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableWalletTransactionAmount) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_wallet_transaction_amount.go | 0.784897 | 0.487246 | model_wallet_transaction_amount.go | starcoder |
package ovsdb
import (
"encoding/json"
"fmt"
"reflect"
)
// OvsMap is the JSON map structure used for OVSDB
// RFC 7047 uses the following notation for map as JSON doesnt support non-string keys for maps.
// A 2-element JSON array that represents a database map value. The
// first element of the array must be the string "map", and the
// second element must be an array of zero or more <pair>s giving the
// values in the map. All of the <pair>s must have the same key and
// value types.
type OvsMap struct {
GoMap map[interface{}]interface{}
}
// MarshalJSON marshalls an OVSDB style Map to a byte array
func (o OvsMap) MarshalJSON() ([]byte, error) {
if len(o.GoMap) > 0 {
var ovsMap, innerMap []interface{}
ovsMap = append(ovsMap, "map")
for key, val := range o.GoMap {
var mapSeg []interface{}
mapSeg = append(mapSeg, key)
mapSeg = append(mapSeg, val)
innerMap = append(innerMap, mapSeg)
}
ovsMap = append(ovsMap, innerMap)
return json.Marshal(ovsMap)
}
return []byte("[\"map\",[]]"), nil
}
// UnmarshalJSON unmarshalls an OVSDB style Map from a byte array
func (o *OvsMap) UnmarshalJSON(b []byte) (err error) {
var oMap []interface{}
o.GoMap = make(map[interface{}]interface{})
if err := json.Unmarshal(b, &oMap); err == nil && len(oMap) > 1 {
innerSlice := oMap[1].([]interface{})
for _, val := range innerSlice {
f := val.([]interface{})
switch f[1].(type) {
case []interface{}:
vSet := f[1].([]interface{})
if len(vSet) != 2 || vSet[0] == "map" {
return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)}
}
goSlice, err := ovsSliceToGoNotation(vSet)
if err != nil {
return err
}
o.GoMap[f[0]] = goSlice
default:
o.GoMap[f[0]] = f[1]
}
}
}
return err
}
// NewOvsMap will return an OVSDB style map from a provided Golang Map
func NewOvsMap(goMap interface{}) (OvsMap, error) {
v := reflect.ValueOf(goMap)
if v.Kind() != reflect.Map {
return OvsMap{}, fmt.Errorf("ovsmap supports only go map types")
}
genMap := make(map[interface{}]interface{})
keys := v.MapKeys()
for _, key := range keys {
genMap[key.Interface()] = v.MapIndex(key).Interface()
}
return OvsMap{genMap}, nil
} | go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go | 0.595257 | 0.430447 | map.go | starcoder |
package collector
import (
"fmt"
"github.com/Tinkerforge/go-api-bindings/air_quality_bricklet"
"github.com/Tinkerforge/go-api-bindings/barometer_bricklet"
"github.com/Tinkerforge/go-api-bindings/barometer_v2_bricklet"
"github.com/Tinkerforge/go-api-bindings/humidity_bricklet"
"github.com/Tinkerforge/go-api-bindings/humidity_v2_bricklet"
"github.com/prometheus/client_golang/prometheus"
)
func (b *BrickdCollector) RegisterAirQualityBricklet(uid string) ([]Register, error) {
d, err := air_quality_bricklet.New(uid, &b.Connection)
if err != nil {
return nil, fmt.Errorf("failed to connect Air Quality Bricklet (uid=%s): %s", uid, err)
}
cbID := d.RegisterAllValuesCallback(func(iaqIndex int32, iaqIndexAccuracy uint8, temperature int32, humidity int32, airPressure int32) {
b.Values <- Value{
Index: 0,
DeviceID: air_quality_bricklet.DeviceIdentifier,
UID: uid,
Help: "IAQ Index Value",
Name: "iaq_index",
Type: prometheus.GaugeValue,
Value: float64(iaqIndex),
}
b.Values <- Value{
Index: 1,
DeviceID: air_quality_bricklet.DeviceIdentifier,
UID: uid,
Help: "IAQ Index Accuracy",
Name: "iaq_index_accuracy",
Type: prometheus.GaugeValue,
Value: float64(iaqIndexAccuracy),
}
b.Values <- Value{
Index: 2,
DeviceID: air_quality_bricklet.DeviceIdentifier,
UID: uid,
Help: "Temperature of the air in °C",
Name: "temperature",
Type: prometheus.GaugeValue,
Value: float64(temperature) / 100,
}
b.Values <- Value{
Index: 3,
DeviceID: air_quality_bricklet.DeviceIdentifier,
UID: uid,
Help: "Air Pressure in hPa",
Name: "pressure",
Type: prometheus.GaugeValue,
Value: float64(airPressure) / 100,
}
b.Values <- Value{
Index: 4,
DeviceID: air_quality_bricklet.DeviceIdentifier,
UID: uid,
Help: "Humidity of the air in %rH",
Name: "humidity",
Type: prometheus.GaugeValue,
Value: float64(humidity) / 100,
}
})
if err := d.SetAllValuesCallbackConfiguration(b.CallbackPeriod, false); err != nil {
return nil, fmt.Errorf("failed to set callback config for Air Quality Bricklet (uid=%s): %s", uid, err)
}
return []Register{
{
Deregister: d.DeregisterAllValuesCallback,
ID: cbID,
},
}, nil
}
func (b *BrickdCollector) RegisterHumidityBricklet(uid string) ([]Register, error) {
d, err := humidity_bricklet.New(uid, &b.Connection)
if err != nil {
return nil, fmt.Errorf("failed to connect Humidity Bricklet (uid=%s): %s", uid, err)
}
callbackID := d.RegisterHumidityCallback(func(humidity uint16) {
b.Values <- Value{
Index: 0,
DeviceID: humidity_bricklet.DeviceIdentifier,
UID: uid,
Help: "Humidity of the air in %rF",
Name: "humidity",
Type: prometheus.GaugeValue,
Value: float64(humidity) / 10.0,
}
})
d.SetHumidityCallbackPeriod(b.CallbackPeriod)
return []Register{
{
Deregister: d.DeregisterHumidityCallback,
ID: callbackID,
},
}, nil
}
func (b *BrickdCollector) RegisterHumidityV2Bricklet(uid string) ([]Register, error) {
d, err := humidity_v2_bricklet.New(uid, &b.Connection)
if err != nil {
return nil, fmt.Errorf("failed to connect Humidity Bricklet V2.0 (uid=%s): %s", uid, err)
}
humID := d.RegisterHumidityCallback(func(humidity uint16) {
b.Values <- Value{
Index: 0,
DeviceID: humidity_v2_bricklet.DeviceIdentifier,
UID: uid,
Help: "Humidity of the air in %rF",
Name: "humidity",
Type: prometheus.GaugeValue,
Value: float64(humidity) / 100.0,
}
})
d.SetHumidityCallbackConfiguration(b.CallbackPeriod, true, 'x', 0, 0)
tempID := d.RegisterTemperatureCallback(func(temperature int16) {
b.Values <- Value{
Index: 1,
DeviceID: humidity_v2_bricklet.DeviceIdentifier,
UID: uid,
Help: "Temperature of the air in °C",
Name: "temperature",
Type: prometheus.GaugeValue,
Value: float64(temperature) / 100.0,
}
})
d.SetTemperatureCallbackConfiguration(b.CallbackPeriod, true, 'x', 0, 0)
return []Register{
{
Deregister: d.DeregisterHumidityCallback,
ID: humID,
},
{
Deregister: d.DeregisterTemperatureCallback,
ID: tempID,
},
}, nil
}
func (b *BrickdCollector) RegisterBarometerBricklet(uid string) ([]Register, error) {
d, err := barometer_bricklet.New(uid, &b.Connection)
if err != nil {
return nil, fmt.Errorf("failed to connect Barometer Bricklet (uid=%s): %s", uid, err)
}
apID := d.RegisterAirPressureCallback(func(airPressure int32) {
b.Values <- Value{
Index: 0,
DeviceID: barometer_bricklet.DeviceIdentifier,
UID: uid,
Help: "Air Pressure in hPa",
Name: "air_pressure",
Type: prometheus.GaugeValue,
Value: float64(airPressure) * 1000.0,
}
})
d.SetAirPressureCallbackPeriod(b.CallbackPeriod)
altID := d.RegisterAltitudeCallback(func(altitude int32) {
b.Values <- Value{
Index: 1,
DeviceID: barometer_bricklet.DeviceIdentifier,
UID: uid,
Help: "Altitude in m",
Name: "altitude",
Type: prometheus.GaugeValue,
Value: float64(altitude) * 100.0,
}
})
d.SetAltitudeCallbackPeriod(b.CallbackPeriod)
return []Register{
{
Deregister: d.DeregisterAirPressureCallback,
ID: apID,
},
{
Deregister: d.DeregisterAltitudeCallback,
ID: altID,
},
}, nil
}
func (b *BrickdCollector) RegisterBarometerV2Bricklet(uid string) ([]Register, error) {
d, err := barometer_v2_bricklet.New(uid, &b.Connection)
if err != nil {
return nil, fmt.Errorf("failed to connect Barometer Bricklet V2.0 (uid=%s): %s", uid, err)
}
apID := d.RegisterAirPressureCallback(func(airPressure int32) {
b.Values <- Value{
Index: 0,
DeviceID: barometer_v2_bricklet.DeviceIdentifier,
UID: uid,
Help: "Air Pressure in hPa",
Name: "air_pressure",
Type: prometheus.GaugeValue,
Value: float64(airPressure) * 1000.0,
}
})
d.SetAirPressureCallbackConfiguration(b.CallbackPeriod, true, 'x', 0, 0)
altID := d.RegisterAltitudeCallback(func(altitude int32) {
b.Values <- Value{
Index: 1,
DeviceID: barometer_v2_bricklet.DeviceIdentifier,
UID: uid,
Help: "Altitude in m",
Name: "altitude",
Type: prometheus.GaugeValue,
Value: float64(altitude) * 1000.0,
}
})
d.SetAltitudeCallbackConfiguration(b.CallbackPeriod, true, 'x', 0, 0)
tempID := d.RegisterTemperatureCallback(func(temperature int32) {
b.Values <- Value{
Index: 2,
DeviceID: barometer_v2_bricklet.DeviceIdentifier,
UID: uid,
Help: "Temperature in °C",
Name: "temperature",
Type: prometheus.GaugeValue,
Value: float64(temperature) * 100.0,
}
})
d.SetTemperatureCallbackConfiguration(b.CallbackPeriod, true, 'x', 0, 0)
return []Register{
{
Deregister: d.DeregisterAirPressureCallback,
ID: apID,
},
{
Deregister: d.DeregisterAltitudeCallback,
ID: altID,
},
{
Deregister: d.DeregisterTemperatureCallback,
ID: tempID,
},
}, nil
} | collector/bricklets.go | 0.599016 | 0.460895 | bricklets.go | starcoder |
package types
import (
"github.com/rancher/norman/pkg/types/convert"
)
var (
CondEQ = QueryConditionType{ModifierEQ, 1}
CondNE = QueryConditionType{ModifierNE, 1}
CondNull = QueryConditionType{ModifierNull, 0}
CondNotNull = QueryConditionType{ModifierNotNull, 0}
CondIn = QueryConditionType{ModifierIn, -1}
CondNotIn = QueryConditionType{ModifierNotIn, -1}
CondOr = QueryConditionType{ModifierType("or"), 1}
CondAnd = QueryConditionType{ModifierType("and"), 1}
mods = map[ModifierType]QueryConditionType{
CondEQ.Name: CondEQ,
CondNE.Name: CondNE,
CondNull.Name: CondNull,
CondNotNull.Name: CondNotNull,
CondIn.Name: CondIn,
CondNotIn.Name: CondNotIn,
CondOr.Name: CondOr,
CondAnd.Name: CondAnd,
}
)
type QueryConditionType struct {
Name ModifierType
Args int
}
type QueryCondition struct {
Field string
Value string
Values map[string]bool
conditionType QueryConditionType
left, right *QueryCondition
}
func (q *QueryCondition) Valid(schema *Schema, data map[string]interface{}) bool {
switch q.conditionType {
case CondAnd:
if q.left == nil || q.right == nil {
return false
}
return q.left.Valid(schema, data) && q.right.Valid(schema, data)
case CondOr:
if q.left == nil || q.right == nil {
return false
}
return q.left.Valid(schema, data) || q.right.Valid(schema, data)
case CondEQ:
return q.Value == convert.ToString(valueOrDefault(schema, data, q))
case CondNE:
return q.Value != convert.ToString(valueOrDefault(schema, data, q))
case CondIn:
return q.Values[convert.ToString(valueOrDefault(schema, data, q))]
case CondNotIn:
return !q.Values[convert.ToString(valueOrDefault(schema, data, q))]
case CondNotNull:
return convert.ToString(valueOrDefault(schema, data, q)) != ""
case CondNull:
return convert.ToString(valueOrDefault(schema, data, q)) == ""
}
return false
}
func valueOrDefault(schema *Schema, data map[string]interface{}, q *QueryCondition) interface{} {
value := data[q.Field]
if value == nil {
value = schema.ResourceFields[q.Field].Default
}
return value
}
func (q *QueryCondition) ToCondition() Condition {
cond := Condition{
Modifier: q.conditionType.Name,
}
if q.conditionType.Args == 1 {
cond.Value = q.Value
} else if q.conditionType.Args == -1 {
stringValues := []string{}
for val := range q.Values {
stringValues = append(stringValues, val)
}
cond.Value = stringValues
}
return cond
}
func ValidMod(mod ModifierType) bool {
_, ok := mods[mod]
return ok
}
func EQ(key, value string) *QueryCondition {
return NewConditionFromString(key, ModifierEQ, value)
}
func NewConditionFromString(field string, mod ModifierType, values ...string) *QueryCondition {
q := &QueryCondition{
Field: field,
conditionType: mods[mod],
Values: map[string]bool{},
}
for i, value := range values {
if i == 0 {
q.Value = value
}
q.Values[value] = true
}
return q
} | vendor/github.com/rancher/norman/pkg/types/condition.go | 0.622574 | 0.456713 | condition.go | starcoder |
package goma
import (
"math/rand"
"errors"
"time"
)
type Matrix struct {
Rows, Cols int
Data [][]float64
}
func (m *Matrix) Copy() (*Matrix) {
c := NewMatrix(m.Rows, m.Cols)
for i, _ := range m.Data {
copy(c.Data[i], m.Data[i])
}
return c
}
func NewMatrix(rows, cols int) (*Matrix) {
matrix := make([][]float64, rows)
for x := 0; x < rows; x++ {
matrix[x] = make([]float64, cols)
}
return &Matrix{rows, cols, matrix}
}
func (m *Matrix) Map(fn func(float64, int, int) (float64)) {
for x := 0; x < m.Rows; x++ {
for y := 0; y < m.Cols; y++ {
m.Data[x][y] = fn(m.Data[x][y], x, y)
}
}
}
func (orig *Matrix) MapMatrix(
m1, m2 *Matrix,
fn func(*Matrix, *Matrix, int, int) (float64),
) {
for x := 0; x < orig.Rows; x++ {
for y := 0; y < orig.Cols; y++ {
orig.Data[x][y] = fn(m1, m2, x, y)
}
}
}
func (m *Matrix) Randomize() {
rand.Seed(time.Now().UTC().UnixNano())
m.Map(func(field float64, i, j int) (float64){
return rand.Float64()
})
}
func (m *Matrix) Scale(n float64) {
m.Map(func(field float64, i, j int) (float64){
return field * n
})
}
func (m *Matrix) AddScalar(n float64) {
m.Map(func(field float64, i, j int) (float64){
return field + n
})
}
func (m1 *Matrix) AddMatrix(m2 *Matrix) (error) {
if ( m1.Rows != m2.Rows || m1.Cols != m2.Cols ) {
return errors.New("Matrices are not the same shape")
}
m1.MapMatrix(m1, m2, func(m3, m4 *Matrix, x, y int) (float64) {
return m3.Data[x][y] + m4.Data[x][y]
})
return nil
}
func (m1 *Matrix) SubtractMatrix(m2 *Matrix) (error) {
if ( m1.Rows != m2.Rows || m1.Cols != m2.Cols ) {
return errors.New("Matrices are not the same shape")
}
m1.MapMatrix(m1, m2, func(m3, m4 *Matrix, x, y int) (float64) {
return m3.Data[x][y] - m4.Data[x][y]
})
return nil
}
// Multiply performs matrix multiplication
func Multiply(m1 *Matrix, m2 *Matrix) (*Matrix, error) {
if ( m1.Cols != m2.Rows ) {
return nil, errors.New("Cols of m1 != Rows of m2")
}
new := NewMatrix(m1.Rows, m2.Cols)
m2 = Rotate(m2)
// mapping the multiplyVectors value to every field in new Matrix
new.MapMatrix(m1, m2, func(m3, m4 *Matrix, x, y int) (float64) {
return multiplyVectors(m3.Data[x], m4.Data[y])
})
return new, nil
}
func Rotate(m *Matrix) (*Matrix) {
new := NewMatrix(m.Cols, m.Rows)
new.MapMatrix(new, m, func(m3, m4 *Matrix, x, y int) (float64) {
return m4.Data[y][x]
})
return new
}
func multiplyVectors(v1, v2 []float64) (float64) {
l := len(v1)
var sum float64
for x := 0; x < l; x++ {
sum += v1[x]*v2[x]
}
return sum
} | goma.go | 0.793466 | 0.663049 | goma.go | starcoder |
package geojson
import (
"github.com/tidwall/tile38/pkg/geojson/geohash"
"github.com/tidwall/tile38/pkg/geojson/poly"
)
// MultiLineString is a geojson object with the type "MultiLineString"
type MultiLineString struct {
Coordinates [][]Position
BBox *BBox
bboxDefined bool
}
func fillMultiLineString(coordinates [][]Position, bbox *BBox, err error) (MultiLineString, error) {
if err == nil {
for _, coordinates := range coordinates {
if len(coordinates) < 2 {
err = errLineStringInvalidCoordinates
break
}
}
}
bboxDefined := bbox != nil
if !bboxDefined {
cbbox := level3CalculatedBBox(coordinates, nil, false)
bbox = &cbbox
}
return MultiLineString{
Coordinates: coordinates,
BBox: bbox,
bboxDefined: bboxDefined,
}, err
}
func (g MultiLineString) getLineString(index int) LineString {
return LineString{Coordinates: g.Coordinates[index]}
}
// CalculatedBBox is exterior bbox containing the object.
func (g MultiLineString) CalculatedBBox() BBox {
return level3CalculatedBBox(g.Coordinates, g.BBox, false)
}
// CalculatedPoint is a point representation of the object.
func (g MultiLineString) CalculatedPoint() Position {
return g.CalculatedBBox().center()
}
// Geohash converts the object to a geohash value.
func (g MultiLineString) Geohash(precision int) (string, error) {
p := g.CalculatedPoint()
return geohash.Encode(p.Y, p.X, precision)
}
// PositionCount return the number of coordinates.
func (g MultiLineString) PositionCount() int {
return level3PositionCount(g.Coordinates, g.BBox)
}
// Weight returns the in-memory size of the object.
func (g MultiLineString) Weight() int {
return level3Weight(g.Coordinates, g.BBox)
}
// MarshalJSON allows the object to be encoded in json.Marshal calls.
func (g MultiLineString) MarshalJSON() ([]byte, error) {
return g.appendJSON(nil), nil
}
func (g MultiLineString) appendJSON(json []byte) []byte {
return appendLevel3JSON(json, "MultiLineString", g.Coordinates, g.BBox, g.bboxDefined)
}
// JSON is the json representation of the object. This might not be exactly the same as the original.
func (g MultiLineString) JSON() string {
return string(g.appendJSON(nil))
}
// String returns a string representation of the object. This might be JSON or something else.
func (g MultiLineString) String() string {
return g.JSON()
}
func (g MultiLineString) bboxPtr() *BBox {
return g.BBox
}
func (g MultiLineString) hasPositions() bool {
if g.bboxDefined {
return true
}
for _, c := range g.Coordinates {
if len(c) > 0 {
return true
}
}
return false
}
// WithinBBox detects if the object is fully contained inside a bbox.
func (g MultiLineString) WithinBBox(bbox BBox) bool {
if g.bboxDefined {
return rectBBox(g.CalculatedBBox()).InsideRect(rectBBox(bbox))
}
if len(g.Coordinates) == 0 {
return false
}
for _, ls := range g.Coordinates {
if len(ls) == 0 {
return false
}
for _, p := range ls {
if !poly.Point(p).InsideRect(rectBBox(bbox)) {
return false
}
}
}
return true
}
// IntersectsBBox detects if the object intersects a bbox.
func (g MultiLineString) IntersectsBBox(bbox BBox) bool {
if g.bboxDefined {
return rectBBox(g.CalculatedBBox()).IntersectsRect(rectBBox(bbox))
}
for _, ls := range g.Coordinates {
if polyPositions(ls).IntersectsRect(rectBBox(bbox)) {
return true
}
}
return false
}
// Within detects if the object is fully contained inside another object.
func (g MultiLineString) Within(o Object) bool {
return withinObjectShared(g, o,
func(v Polygon) bool {
if len(g.Coordinates) == 0 {
return false
}
for _, ls := range g.Coordinates {
if !polyPositions(ls).Inside(polyExteriorHoles(v.Coordinates)) {
return false
}
}
return true
},
)
}
// WithinCircle detects if the object is fully contained inside a circle.
func (g MultiLineString) WithinCircle(center Position, meters float64) bool {
if len(g.Coordinates) == 0 {
return false
}
for _, ls := range g.Coordinates {
if len(ls) == 0 {
return false
}
for _, position := range ls {
if center.DistanceTo(position) >= meters {
return false
}
}
}
return true
}
// Intersects detects if the object intersects another object.
func (g MultiLineString) Intersects(o Object) bool {
return intersectsObjectShared(g, o,
func(v Polygon) bool {
if len(g.Coordinates) == 0 {
return false
}
for _, ls := range g.Coordinates {
if polyPositions(ls).Intersects(polyExteriorHoles(v.Coordinates)) {
return true
}
}
return false
},
)
}
// IntersectsCircle detects if the object intersects a circle.
func (g MultiLineString) IntersectsCircle(center Position, meters float64) bool {
for _, ls := range g.Coordinates {
for i := 0; i < len(ls) - 1 ; i++ {
if SegmentIntersectsCircle(ls[i], ls[i + 1], center, meters) {
return true
}
}
}
return false
}
// Nearby detects if the object is nearby a position.
func (g MultiLineString) Nearby(center Position, meters float64) bool {
return nearbyObjectShared(g, center.X, center.Y, meters)
}
// IsBBoxDefined returns true if the object has a defined bbox.
func (g MultiLineString) IsBBoxDefined() bool {
return g.bboxDefined
}
// IsGeometry return true if the object is a geojson geometry object. false if it something else.
func (g MultiLineString) IsGeometry() bool {
return true
}
// Clip returns the object obtained by clipping this object by a bbox.
func (g MultiLineString) Clipped(bbox BBox) Object {
var new_coordinates [][]Position
for ix := range g.Coordinates {
clippedMultiLineString, _ := g.getLineString(ix).Clipped(bbox).(MultiLineString)
for _, ls := range clippedMultiLineString.Coordinates {
new_coordinates = append(new_coordinates, ls)
}
}
res, _ := fillMultiLineString(new_coordinates, nil, nil)
return res
} | pkg/geojson/multilinestring.go | 0.727685 | 0.415432 | multilinestring.go | starcoder |
package openapi
import (
"encoding/json"
"fmt"
"net/url"
"strings"
)
// Optional parameters for the method 'CreateNotification'
type CreateNotificationParams struct {
// The actions to display for the notification. For APNS, translates to the `aps.category` value. For GCM, translates to the `data.twi_action` value. For SMS, this parameter is not supported and is omitted from deliveries to those channels.
Action *string `json:"Action,omitempty"`
// Deprecated.
Alexa *map[string]interface{} `json:"Alexa,omitempty"`
// The APNS-specific payload that overrides corresponding attributes in the generic payload for APNS Bindings. This property maps to the APNS `Payload` item, therefore the `aps` key must be used to change standard attributes. Adds custom key-value pairs to the root of the dictionary. See the [APNS documentation](https://developer.apple.com/library/content/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/CommunicatingwithAPNs.html) for more details. We reserve keys that start with `twi_` for future use. Custom keys that start with `twi_` are not allowed.
Apn *map[string]interface{} `json:"Apn,omitempty"`
// The notification text. For FCM and GCM, translates to `data.twi_body`. For APNS, translates to `aps.alert.body`. For SMS, translates to `body`. SMS requires either this `body` value, or `media_urls` attribute defined in the `sms` parameter of the notification.
Body *string `json:"Body,omitempty"`
// The custom key-value pairs of the notification's payload. For FCM and GCM, this value translates to `data` in the FCM and GCM payloads. FCM and GCM [reserve certain keys](https://firebase.google.com/docs/cloud-messaging/http-server-ref) that cannot be used in those channels. For APNS, attributes of `data` are inserted into the APNS payload as custom properties outside of the `aps` dictionary. In all channels, we reserve keys that start with `twi_` for future use. Custom keys that start with `twi_` are not allowed and are rejected as 400 Bad request with no delivery attempted. For SMS, this parameter is not supported and is omitted from deliveries to those channels.
Data *map[string]interface{} `json:"Data,omitempty"`
// URL to send webhooks.
DeliveryCallbackUrl *string `json:"DeliveryCallbackUrl,omitempty"`
// Deprecated.
FacebookMessenger *map[string]interface{} `json:"FacebookMessenger,omitempty"`
// The FCM-specific payload that overrides corresponding attributes in the generic payload for FCM Bindings. This property maps to the root JSON dictionary. See the [FCM documentation](https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream) for more details. Target parameters `to`, `registration_ids`, `condition`, and `notification_key` are not allowed in this parameter. We reserve keys that start with `twi_` for future use. Custom keys that start with `twi_` are not allowed. FCM also [reserves certain keys](https://firebase.google.com/docs/cloud-messaging/http-server-ref), which cannot be used in that channel.
Fcm *map[string]interface{} `json:"Fcm,omitempty"`
// The GCM-specific payload that overrides corresponding attributes in the generic payload for GCM Bindings. This property maps to the root JSON dictionary. See the [GCM documentation](https://firebase.google.com/docs/cloud-messaging/http-server-ref) for more details. Target parameters `to`, `registration_ids`, and `notification_key` are not allowed. We reserve keys that start with `twi_` for future use. Custom keys that start with `twi_` are not allowed. GCM also [reserves certain keys](https://firebase.google.com/docs/cloud-messaging/http-server-ref).
Gcm *map[string]interface{} `json:"Gcm,omitempty"`
// The `identity` value that uniquely identifies the new resource's [User](https://www.twilio.com/docs/chat/rest/user-resource) within the [Service](https://www.twilio.com/docs/notify/api/service-resource). Delivery will be attempted only to Bindings with an Identity in this list. No more than 20 items are allowed in this list.
Identity *[]string `json:"Identity,omitempty"`
// The priority of the notification. Can be: `low` or `high` and the default is `high`. A value of `low` optimizes the client app's battery consumption; however, notifications may be delivered with unspecified delay. For FCM and GCM, `low` priority is the same as `Normal` priority. For APNS `low` priority is the same as `5`. A value of `high` sends the notification immediately, and can wake up a sleeping device. For FCM and GCM, `high` is the same as `High` priority. For APNS, `high` is a priority `10`. SMS does not support this property.
Priority *string `json:"Priority,omitempty"`
// The Segment resource is deprecated. Use the `tag` parameter, instead.
Segment *[]string `json:"Segment,omitempty"`
// The SMS-specific payload that overrides corresponding attributes in the generic payload for SMS Bindings. Each attribute in this value maps to the corresponding `form` parameter of the Twilio [Message](https://www.twilio.com/docs/sms/send-messages) resource. These parameters of the Message resource are supported in snake case format: `body`, `media_urls`, `status_callback`, and `max_price`. The `status_callback` parameter overrides the corresponding parameter in the messaging service, if configured. The `media_urls` property expects a JSON array.
Sms *map[string]interface{} `json:"Sms,omitempty"`
// The name of the sound to be played for the notification. For FCM and GCM, this Translates to `data.twi_sound`. For APNS, this translates to `aps.sound`. SMS does not support this property.
Sound *string `json:"Sound,omitempty"`
// A tag that selects the Bindings to notify. Repeat this parameter to specify more than one tag, up to a total of 5 tags. The implicit tag `all` is available to notify all Bindings in a Service instance. Similarly, the implicit tags `apn`, `fcm`, `gcm`, `sms` and `facebook-messenger` are available to notify all Bindings in a specific channel.
Tag *[]string `json:"Tag,omitempty"`
// The notification title. For FCM and GCM, this translates to the `data.twi_title` value. For APNS, this translates to the `aps.alert.title` value. SMS does not support this property. This field is not visible on iOS phones and tablets but appears on Apple Watch and Android devices.
Title *string `json:"Title,omitempty"`
// The destination address specified as a JSON string. Multiple `to_binding` parameters can be included but the total size of the request entity should not exceed 1MB. This is typically sufficient for 10,000 phone numbers.
ToBinding *[]string `json:"ToBinding,omitempty"`
// How long, in seconds, the notification is valid. Can be an integer between 0 and 2,419,200, which is 4 weeks, the default and the maximum supported time to live (TTL). Delivery should be attempted if the device is offline until the TTL elapses. Zero means that the notification delivery is attempted immediately, only once, and is not stored for future delivery. SMS does not support this property.
Ttl *int `json:"Ttl,omitempty"`
}
func (params *CreateNotificationParams) SetAction(Action string) *CreateNotificationParams {
params.Action = &Action
return params
}
func (params *CreateNotificationParams) SetAlexa(Alexa map[string]interface{}) *CreateNotificationParams {
params.Alexa = &Alexa
return params
}
func (params *CreateNotificationParams) SetApn(Apn map[string]interface{}) *CreateNotificationParams {
params.Apn = &Apn
return params
}
func (params *CreateNotificationParams) SetBody(Body string) *CreateNotificationParams {
params.Body = &Body
return params
}
func (params *CreateNotificationParams) SetData(Data map[string]interface{}) *CreateNotificationParams {
params.Data = &Data
return params
}
func (params *CreateNotificationParams) SetDeliveryCallbackUrl(DeliveryCallbackUrl string) *CreateNotificationParams {
params.DeliveryCallbackUrl = &DeliveryCallbackUrl
return params
}
func (params *CreateNotificationParams) SetFacebookMessenger(FacebookMessenger map[string]interface{}) *CreateNotificationParams {
params.FacebookMessenger = &FacebookMessenger
return params
}
func (params *CreateNotificationParams) SetFcm(Fcm map[string]interface{}) *CreateNotificationParams {
params.Fcm = &Fcm
return params
}
func (params *CreateNotificationParams) SetGcm(Gcm map[string]interface{}) *CreateNotificationParams {
params.Gcm = &Gcm
return params
}
func (params *CreateNotificationParams) SetIdentity(Identity []string) *CreateNotificationParams {
params.Identity = &Identity
return params
}
func (params *CreateNotificationParams) SetPriority(Priority string) *CreateNotificationParams {
params.Priority = &Priority
return params
}
func (params *CreateNotificationParams) SetSegment(Segment []string) *CreateNotificationParams {
params.Segment = &Segment
return params
}
func (params *CreateNotificationParams) SetSms(Sms map[string]interface{}) *CreateNotificationParams {
params.Sms = &Sms
return params
}
func (params *CreateNotificationParams) SetSound(Sound string) *CreateNotificationParams {
params.Sound = &Sound
return params
}
func (params *CreateNotificationParams) SetTag(Tag []string) *CreateNotificationParams {
params.Tag = &Tag
return params
}
func (params *CreateNotificationParams) SetTitle(Title string) *CreateNotificationParams {
params.Title = &Title
return params
}
func (params *CreateNotificationParams) SetToBinding(ToBinding []string) *CreateNotificationParams {
params.ToBinding = &ToBinding
return params
}
func (params *CreateNotificationParams) SetTtl(Ttl int) *CreateNotificationParams {
params.Ttl = &Ttl
return params
}
func (c *ApiService) CreateNotification(ServiceSid string, params *CreateNotificationParams) (*NotifyV1Notification, error) {
path := "/v1/Services/{ServiceSid}/Notifications"
path = strings.Replace(path, "{"+"ServiceSid"+"}", ServiceSid, -1)
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.Action != nil {
data.Set("Action", *params.Action)
}
if params != nil && params.Alexa != nil {
v, err := json.Marshal(params.Alexa)
if err != nil {
return nil, err
}
data.Set("Alexa", string(v))
}
if params != nil && params.Apn != nil {
v, err := json.Marshal(params.Apn)
if err != nil {
return nil, err
}
data.Set("Apn", string(v))
}
if params != nil && params.Body != nil {
data.Set("Body", *params.Body)
}
if params != nil && params.Data != nil {
v, err := json.Marshal(params.Data)
if err != nil {
return nil, err
}
data.Set("Data", string(v))
}
if params != nil && params.DeliveryCallbackUrl != nil {
data.Set("DeliveryCallbackUrl", *params.DeliveryCallbackUrl)
}
if params != nil && params.FacebookMessenger != nil {
v, err := json.Marshal(params.FacebookMessenger)
if err != nil {
return nil, err
}
data.Set("FacebookMessenger", string(v))
}
if params != nil && params.Fcm != nil {
v, err := json.Marshal(params.Fcm)
if err != nil {
return nil, err
}
data.Set("Fcm", string(v))
}
if params != nil && params.Gcm != nil {
v, err := json.Marshal(params.Gcm)
if err != nil {
return nil, err
}
data.Set("Gcm", string(v))
}
if params != nil && params.Identity != nil {
for _, item := range *params.Identity {
data.Add("Identity", item)
}
}
if params != nil && params.Priority != nil {
data.Set("Priority", *params.Priority)
}
if params != nil && params.Segment != nil {
for _, item := range *params.Segment {
data.Add("Segment", item)
}
}
if params != nil && params.Sms != nil {
v, err := json.Marshal(params.Sms)
if err != nil {
return nil, err
}
data.Set("Sms", string(v))
}
if params != nil && params.Sound != nil {
data.Set("Sound", *params.Sound)
}
if params != nil && params.Tag != nil {
for _, item := range *params.Tag {
data.Add("Tag", item)
}
}
if params != nil && params.Title != nil {
data.Set("Title", *params.Title)
}
if params != nil && params.ToBinding != nil {
for _, item := range *params.ToBinding {
data.Add("ToBinding", item)
}
}
if params != nil && params.Ttl != nil {
data.Set("Ttl", fmt.Sprint(*params.Ttl))
}
resp, err := c.requestHandler.Post(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &NotifyV1Notification{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
} | rest/notify/v1/services_notifications.go | 0.797557 | 0.410697 | services_notifications.go | starcoder |
package geom
import "math"
// A Bounds represents a multi-dimensional bounding box.
type Bounds struct {
layout Layout
min Coord
max Coord
}
// NewBounds creates a new Bounds.
func NewBounds(layout Layout) *Bounds {
stride := layout.Stride()
min, max := make(Coord, stride), make(Coord, stride)
for i := 0; i < stride; i++ {
min[i], max[i] = math.Inf(1), math.Inf(-1)
}
return &Bounds{
layout: layout,
min: min,
max: max,
}
}
// Clone returns a deep copy of b.
func (b *Bounds) Clone() *Bounds {
return deriveCloneBounds(b)
}
// Extend extends b to include geometry g.
func (b *Bounds) Extend(g T) *Bounds {
b.extendLayout(g.Layout())
if b.layout == XYZM && g.Layout() == XYM {
return b.extendXYZMFlatCoordsWithXYM(g.FlatCoords(), 0, len(g.FlatCoords()))
}
return b.extendFlatCoords(g.FlatCoords(), 0, len(g.FlatCoords()), g.Stride())
}
// IsEmpty returns true if b is empty.
func (b *Bounds) IsEmpty() bool {
if b.layout == NoLayout {
return true
}
for i, stride := 0, b.layout.Stride(); i < stride; i++ {
if b.max[i] < b.min[i] {
return true
}
}
return false
}
// Layout returns b's layout.
func (b *Bounds) Layout() Layout {
return b.layout
}
// Max returns the maximum value in dimension dim.
func (b *Bounds) Max(dim int) float64 {
return b.max[dim]
}
// Min returns the minimum value in dimension dim.
func (b *Bounds) Min(dim int) float64 {
return b.min[dim]
}
// Overlaps returns true if b overlaps b2 in layout.
func (b *Bounds) Overlaps(layout Layout, b2 *Bounds) bool {
for i, stride := 0, layout.Stride(); i < stride; i++ {
if b.min[i] > b2.max[i] || b.max[i] < b2.min[i] {
return false
}
}
return true
}
// Polygon returns b as a two-dimensional Polygon.
func (b *Bounds) Polygon() *Polygon {
if b.IsEmpty() {
return NewPolygonFlat(XY, nil, nil)
}
x1, y1 := b.min[0], b.min[1]
x2, y2 := b.max[0], b.max[1]
flatCoords := []float64{
x1, y1,
x1, y2,
x2, y2,
x2, y1,
x1, y1,
}
return NewPolygonFlat(XY, flatCoords, []int{len(flatCoords)})
}
// Set sets the minimum and maximum values. args must be an even number of
// values: the first half are the minimum values for each dimension and the
// second half are the maximum values for each dimension. If necessary, the
// layout of b will be extended to cover all the supplied dimensions implied by
// args.
func (b *Bounds) Set(args ...float64) *Bounds {
if len(args)&1 != 0 {
panic("geom: even number of arguments required")
}
stride := len(args) / 2
b.extendStride(stride)
for i := 0; i < stride; i++ {
b.min[i], b.max[i] = args[i], args[i+stride]
}
return b
}
// SetCoords sets the minimum and maximum values of the Bounds.
func (b *Bounds) SetCoords(min, max Coord) *Bounds {
b.min = Coord(make([]float64, b.layout.Stride()))
b.max = Coord(make([]float64, b.layout.Stride()))
for i := 0; i < b.layout.Stride(); i++ {
b.min[i] = math.Min(min[i], max[i])
b.max[i] = math.Max(min[i], max[i])
}
return b
}
// OverlapsPoint determines if the bounding box overlaps the point (point is
// within or on the border of the bounds).
func (b *Bounds) OverlapsPoint(layout Layout, point Coord) bool {
for i, stride := 0, layout.Stride(); i < stride; i++ {
if b.min[i] > point[i] || b.max[i] < point[i] {
return false
}
}
return true
}
func (b *Bounds) extendFlatCoords(flatCoords []float64, offset, end, stride int) *Bounds {
b.extendStride(stride)
for i := offset; i < end; i += stride {
for j := 0; j < stride; j++ {
b.min[j] = math.Min(b.min[j], flatCoords[i+j])
b.max[j] = math.Max(b.max[j], flatCoords[i+j])
}
}
return b
}
func (b *Bounds) extendLayout(layout Layout) {
switch {
case b.layout == XYZ && layout == XYM:
b.min = append(b.min, math.Inf(1))
b.max = append(b.max, math.Inf(-1))
b.layout = XYZM
case b.layout == XYM && (layout == XYZ || layout == XYZM):
b.min = append(b.min[:2], math.Inf(1), b.min[2])
b.max = append(b.max[:2], math.Inf(-1), b.max[2])
b.layout = XYZM
case b.layout < layout:
b.extendStride(layout.Stride())
b.layout = layout
}
}
func (b *Bounds) extendStride(stride int) {
for s := b.layout.Stride(); s < stride; s++ {
b.min = append(b.min, math.Inf(1))
b.max = append(b.max, math.Inf(-1))
}
}
func (b *Bounds) extendXYZMFlatCoordsWithXYM(flatCoords []float64, offset, end int) *Bounds {
for i := offset; i < end; i += 3 {
b.min[0] = math.Min(b.min[0], flatCoords[i+0])
b.max[0] = math.Max(b.max[0], flatCoords[i+0])
b.min[1] = math.Min(b.min[1], flatCoords[i+1])
b.max[1] = math.Max(b.max[1], flatCoords[i+1])
b.min[3] = math.Min(b.min[3], flatCoords[i+2])
b.max[3] = math.Max(b.max[3], flatCoords[i+2])
}
return b
} | vendor/github.com/twpayne/go-geom/bounds.go | 0.876145 | 0.437223 | bounds.go | starcoder |
package alphafoxtrot
type Airport struct {
ICAOCode string
Type string
Name string
LatitudeDeg float64
LongitudeDeg float64
ElevationFt int64
Continent string
Municipality string
ScheduledService bool
GPSCode string
IATACode string
LocalCode string
HomeLink string
WikipediaLink string
Keywords string
Region Region
Country Country
Runways []Runway
Frequencies []Frequency
Navaids []Navaid
}
type Frequency struct {
Type string
Description string
FrequencyMHZ float64
}
type Runway struct {
LengthFt int64
WidthFt int64
Surface string
Lighted bool
Closed bool
LowEndIdent string
LowEndLatitudeDeg float64
LowEndLongitudeDeg float64
LowEndElevationFt int64
LowEndHeadingDegT float64
LowEndDisplacedThresholdFt int64
HighEndIdent string
HighEndLatitudeDeg float64
HighEndLongitudeDeg float64
HighEndElevationFt int64
HighEndHeadingDegT float64
HighEndDisplacedThresholdFt int64
}
type Region struct {
ISOCode string
LocalCode string
Name string
WikipediaLink string
Keywords string
}
type Country struct {
ISOCode string
Name string
Continent string
WikipediaLink string
Keywords string
}
type Navaid struct {
Ident string
Name string
Type string
FrequencyKHZ uint64
LatitudeDeg float64
LongitudeDeg float64
ElevationFt int64
ISOCountry string
DMEFrequencyKHZ uint64
DMEChannel string
DMELatitudeDeg float64
DMELongitudeDeg float64
DMEElevationFt int64
SlavedVariationDeg float64
MagneticVariationDeg float64
UsageType string
Power string
AssociatedAirport string
}
func NewAirport(airport *AirportData, region *RegionData, country *CountryData, frequencies []*FrequencyData, runways []*RunwayData, navaids []*NavaidData) *Airport {
if airport == nil {
return nil
}
aeroport := &Airport{
ICAOCode: airport.ICAOCode,
Type: airport.Type,
Name: airport.Name,
LatitudeDeg: airport.LatitudeDeg,
LongitudeDeg: airport.LongitudeDeg,
ElevationFt: airport.ElevationFt,
Continent: airport.Continent,
Municipality: airport.Municipality,
ScheduledService: airport.ScheduledService,
GPSCode: airport.GPSCode,
IATACode: airport.IATACode,
LocalCode: airport.LocalCode,
HomeLink: airport.HomeLink,
WikipediaLink: airport.WikipediaLink,
Keywords: airport.Keywords,
Frequencies: make([]Frequency, 0, len(frequencies)),
Runways: make([]Runway, 0, len(runways)),
Navaids: make([]Navaid, 0, len(navaids)),
}
if region != nil {
aeroport.Region = *NewRegion(region)
}
if country != nil {
aeroport.Country = *NewCountry(country)
}
for _, frequency := range frequencies {
aeroport.Frequencies = append(aeroport.Frequencies, *NewFrequency(frequency))
}
for _, runway := range runways {
aeroport.Runways = append(aeroport.Runways, *NewRunway(runway))
}
for _, navaid := range navaids {
aeroport.Navaids = append(aeroport.Navaids, *NewNavaid(navaid))
}
return aeroport
}
func NewRegion(region *RegionData) *Region {
return &Region{
ISOCode: region.ISOCode,
LocalCode: region.LocalCode,
Name: region.Name,
WikipediaLink: region.WikipediaLink,
Keywords: region.Keywords,
}
}
func NewCountry(country *CountryData) *Country {
return &Country{
ISOCode: country.ISOCode,
Name: country.Name,
Continent: country.Continent,
WikipediaLink: country.WikipediaLink,
Keywords: country.Keywords,
}
}
func NewFrequency(frequency *FrequencyData) *Frequency {
return &Frequency{
Type: frequency.Type,
Description: frequency.Description,
FrequencyMHZ: frequency.FrequencyMHZ,
}
}
func NewRunway(runway *RunwayData) *Runway {
return &Runway{
LengthFt: runway.LengthFt,
WidthFt: runway.WidthFt,
Surface: runway.Surface,
Lighted: runway.Lighted,
Closed: runway.Closed,
LowEndIdent: runway.LowEndIdent,
LowEndLatitudeDeg: runway.LowEndLatitudeDeg,
LowEndLongitudeDeg: runway.LowEndLongitudeDeg,
LowEndElevationFt: runway.LowEndElevationFt,
LowEndHeadingDegT: runway.LowEndHeadingDegT,
LowEndDisplacedThresholdFt: runway.LowEndDisplacedThresholdFt,
HighEndIdent: runway.HighEndIdent,
HighEndLatitudeDeg: runway.HighEndLatitudeDeg,
HighEndLongitudeDeg: runway.HighEndLongitudeDeg,
HighEndElevationFt: runway.HighEndElevationFt,
HighEndHeadingDegT: runway.HighEndHeadingDegT,
HighEndDisplacedThresholdFt: runway.HighEndDisplacedThresholdFt,
}
}
func NewNavaid(navaid *NavaidData) *Navaid {
return &Navaid{
Ident: navaid.Ident,
Name: navaid.Name,
Type: navaid.Type,
FrequencyKHZ: navaid.FrequencyKHZ,
LatitudeDeg: navaid.LatitudeDeg,
LongitudeDeg: navaid.LongitudeDeg,
ElevationFt: navaid.ElevationFt,
ISOCountry: navaid.ISOCountry,
DMEFrequencyKHZ: navaid.DMEFrequencyKHZ,
DMEChannel: navaid.DMEChannel,
DMELatitudeDeg: navaid.DMELatitudeDeg,
DMELongitudeDeg: navaid.DMELongitudeDeg,
DMEElevationFt: navaid.DMEElevationFt,
SlavedVariationDeg: navaid.SlavedVariationDeg,
MagneticVariationDeg: navaid.MagneticVariationDeg,
UsageType: navaid.UsageType,
Power: navaid.Power,
AssociatedAirport: navaid.AssociatedAirport,
}
} | airport.go | 0.549399 | 0.466116 | airport.go | starcoder |
package main
import (
"fmt"
"math"
)
// Circle description
type Circle struct {
radius float64
}
// Rectangle description
type Rectangle struct {
width float64
height float64
}
// Triangle description
type Triangle struct {
a float64
b float64
c float64
}
// Cylinder description
type Cylinder struct {
radius float64
height float64
}
// Circle area
func (c Circle) areaCircle() float64 {
return math.Pi * math.Pow(c.radius, 2)
}
// Circle circumference
func (c Circle) circCircle() float64 {
return 2 * math.Pi * c.radius
}
// Rectangle area
func (r Rectangle) areaRectangle() float64 {
return r.width * r.height
}
// Rectangle perimeter
func (r Rectangle) perimRectangle() float64 {
return (r.width + r.height) * 2
}
// Triangle area
func (t Triangle) areaTriangle() float64 {
// Heron's Formula to get area from 3 sides
s := ((t.a + t.b + t.c) / 2)
return math.Sqrt(s * (s - t.a) * (s - t.a) * (s - t.a))
}
// Triangle perimeter
func (t Triangle) perimTriangle() float64 {
return t.a + t.b + t.c
}
// Cylinder volume
func (c Cylinder) volCylinder() float64 {
return math.Pi * math.Pow(c.radius, 2) * c.height
}
// Cylinder surface area
func (c Cylinder) surfaceCylinder() float64 {
return (2 * math.Pi * c.radius * c.height) + (2 * math.Pi * math.Pow(c.radius, 2))
}
func main() {
// Declare and assign
circle1 := Circle{5}
rectangle1 := Rectangle{5, 3}
triangle1 := Triangle{4, 5, 6}
cylinder1 := Cylinder{5, 3}
// Get shape properties
areaCircle1 := circle1.areaCircle()
circCircle1 := circle1.circCircle()
areaRectangle1 := rectangle1.areaRectangle()
perimRectangle1 := rectangle1.perimRectangle()
areaTriangle1 := triangle1.areaTriangle()
perimTriangle1 := triangle1.perimTriangle()
volumeCylinder1 := cylinder1.volCylinder()
surfaceCylinder1 := cylinder1.surfaceCylinder()
fmt.Println(circle1.radius, areaCircle1, circCircle1)
fmt.Println(rectangle1.width, rectangle1.height, areaRectangle1, perimRectangle1)
fmt.Println(triangle1.a, triangle1.b, triangle1.c, areaTriangle1, perimTriangle1)
fmt.Println(cylinder1.radius, cylinder1.height, volumeCylinder1, surfaceCylinder1)
fmt.Printf("Circle1 (radius %.2f) area is %10.3f, circumference is %10.3f\n",
circle1.radius, areaCircle1, circCircle1)
fmt.Printf("Rectangle1 (width %.2f, height %.2f) area is %10.3f, perimeter is %10.3f\n",
rectangle1.width, rectangle1.height, areaRectangle1, perimRectangle1)
fmt.Printf("Triangle1 (a %.2f, b %.2f, c %.2f) area is %10.3f, perimeter is %10.3f\n",
triangle1.a, triangle1.b, triangle1.c, areaTriangle1, perimTriangle1)
fmt.Printf("Cylinder1 (radius %.2f, height %.2f) vol is %10.3f, surface area is %10.3f\n",
cylinder1.radius, cylinder1.height, volumeCylinder1, surfaceCylinder1)
} | software/development/languages/go-cheat-sheet/src/function-method-interface-package-example/method/method.go | 0.786582 | 0.451206 | method.go | starcoder |
package gstream
type Stream[T any] struct {
*sCtx[T]
}
func NewStream[T any](list []T) *Stream[T] {
return newStreamWithCtx(&sCtx[T]{values: list, loop: ST_SEQUENTIAL})
}
func newStreamWithCtx[T any](ctx *sCtx[T]) *Stream[T] {
return &Stream[T]{sCtx: ctx}
}
func Pass[T any](value T) T {
return value
}
func Map[T, K any](src *Stream[T], f func(T) K) *Stream[K] {
nl := make([]K, 0)
src.forEachSequential(func(val T) (brk bool) {
nl = append(nl, f(val))
return
})
return newStreamWithCtx(newSCtxFrom(src.sCtx, nl))
}
func (s *Stream[T]) Parallel() *Stream[T] {
s.loop = ST_PARALLEL
return s
}
func (s *Stream[T]) Sequential() *Stream[T] {
s.loop = ST_SEQUENTIAL
return s
}
func (s *Stream[T]) Skip(n int) *Stream[T] {
return &Stream[T]{sCtx: newSCtxFrom(s.sCtx, s.values[n:])}
}
func (s *Stream[T]) Limit(max int) *Stream[T] {
return &Stream[T]{sCtx: newSCtxFrom(s.sCtx, s.values[:max])}
}
func (s *Stream[T]) Filter(f func(T) bool) *Stream[T] {
nl := make([]T, 0)
s.forEachSequential(func(val T) (brk bool) {
if f(val) {
nl = append(nl, val)
}
return
})
return &Stream[T]{sCtx: newSCtxFrom(s.sCtx, nl)}
}
func (s *Stream[T]) ErrorFilter(f func(T) error) *Stream[T] {
nl := make([]T, 0)
s.forEachSequential(func(val T) (brk bool) {
err := f(val)
if err != nil {
nl = append(nl, val)
} else {
s.errors = append(s.errors, err)
}
return
})
return &Stream[T]{sCtx: newSCtxFrom(s.sCtx, nl)}
}
func (s *Stream[T]) Reverse() *Stream[T] {
length := len(s.values)
nl := make([]T, length)
copy(nl, s.values)
for i, j := 0, length-1; i < j; i, j = i+1, j-1 {
nl[i], nl[j] = nl[j], nl[i]
}
return &Stream[T]{newSCtxFrom(s.sCtx, nl)}
}
func (s *Stream[T]) MapToInt(f func(T) int) *IntStream {
nl := make([]int, 0)
s.forEachSequential(func(val T) (brk bool) {
nl = append(nl, f(val))
return
})
return newIntStreamWithCtx(newSCtxFrom(s.sCtx, nl))
}
func (s *Stream[T]) MapToFloat(f func(T) float64) *FloatStream {
nl := make([]float64, 0)
s.forEachSequential(func(val T) (brk bool) {
nl = append(nl, f(val))
return
})
return newFloatStreamWithCtx(newSCtxFrom(s.sCtx, nl))
}
func (s *Stream[T]) MapToStr(f func(T) string) *StringStream {
nl := make([]string, 0)
s.forEachSequential(func(val T) (brk bool) {
nl = append(nl, f(val))
return
})
return newStringStreamWithCtx(newSCtxFrom(s.sCtx, nl))
}
func (s *Stream[T]) Map(f func(T) any) *Stream[any] {
nl := make([]any, 0)
s.forEachSequential(func(val T) (brk bool) {
nl = append(nl, f(val))
return
})
return newStreamWithCtx(newSCtxFrom(s.sCtx, nl))
}
func (s *Stream[T]) ForEach(f func(T)) {
s.forEachDefault(func(val T) (brk bool) {
f(val)
return
})
}
func (s *Stream[T]) Count() int {
return len(s.values)
}
func (s *Stream[T]) ToSlice() ([]T, []error) {
return s.values, s.errors
} | stream.go | 0.575111 | 0.419826 | stream.go | starcoder |
package xorfilter
import (
"math"
)
// Xor32 holds an xorfilter with approximately 32 bits per element and about one in a billion false positives.
type Xor32 struct {
XorFilterCommon
Fingerprints []uint32
}
// Populate32 creates an xor filter with approx 32 bits per element.
func Populate32(keys []uint64) (*Xor32, error) {
var bld Builder
return bld.Populate32(keys)
}
// Contains tell you whether the key is likely part of the set
func (filter *Xor32) Contains(key uint64) bool {
hash := mixsplit(key, filter.Seed)
f := uint32(fingerprint(hash))
r0 := uint32(hash)
r1 := uint32(rotl64(hash, 21))
r2 := uint32(rotl64(hash, 42))
h0 := reduce(r0, filter.BlockLength)
h1 := reduce(r1, filter.BlockLength) + filter.BlockLength
h2 := reduce(r2, filter.BlockLength) + 2*filter.BlockLength
return f == (filter.Fingerprints[h0] ^ filter.Fingerprints[h1] ^ filter.Fingerprints[h2])
}
func (filter *Xor32) allocate(size int) {
capacity := 32 + uint32(math.Ceil(1.23*float64(size)))
capacity = capacity / 3 * 3 // round it down to a multiple of 3
// slice capacity defaults to length
filter.Fingerprints = make([]uint32, capacity)
filter.BlockLength = capacity / 3
}
// Populate32 creates an xor filter with approx 32 bits per element.
func (bld *Builder) Populate32(keys []uint64) (*Xor32, error) {
size := len(keys)
filter := new(Xor32)
filter.allocate(size)
stack, err := bld.populateCommon(keys, &filter.XorFilterCommon)
if err != nil {
return nil, err
}
stacksize := size
for stacksize > 0 {
stacksize--
ki := stack[stacksize]
val := uint32(fingerprint(ki.hash))
if ki.index < filter.BlockLength {
val ^= filter.Fingerprints[filter.geth1(ki.hash)+filter.BlockLength] ^ filter.Fingerprints[filter.geth2(ki.hash)+2*filter.BlockLength]
} else if ki.index < 2*filter.BlockLength {
val ^= filter.Fingerprints[filter.geth0(ki.hash)] ^ filter.Fingerprints[filter.geth2(ki.hash)+2*filter.BlockLength]
} else {
val ^= filter.Fingerprints[filter.geth0(ki.hash)] ^ filter.Fingerprints[filter.geth1(ki.hash)+filter.BlockLength]
}
filter.Fingerprints[ki.index] = val
}
return filter, nil
} | xor32.go | 0.660939 | 0.503479 | xor32.go | starcoder |
package rstrie
import (
"bytes"
"github.com/PatrickCronin/routesum/pkg/routesum/bitslice"
)
// RSTrie is a radix-like trie of radix 2 whose stored "words" are the binary representations of networks and IPs. An
// optimization rstrie makes over a generic radix tree is that since routes covered by other routes don't need to be
// stored, each node in the trie will have either 0 or 2 children; never 1.
type RSTrie struct {
root *node
}
type node struct {
children *[2]*node
bits bitslice.BitSlice
}
// NewRSTrie returns an initialized RSTrie for use
func NewRSTrie() *RSTrie {
return &RSTrie{
root: nil,
}
}
// InsertRoute inserts a new BitSlice into the trie. Each insert results in a space-optimized trie structure
// representing its contents. If a route being inserted is already covered by an existing route, it's simply ignored. If
// a route being inserted covers one or more routes already in the trie, those nodes are removed and replaced by the new
// route.
func (t *RSTrie) InsertRoute(routeBits bitslice.BitSlice) {
// If the trie has no root node, simply create one to store the new route
if t.root == nil {
t.root = &node{
bits: routeBits,
children: nil,
}
return
}
// Otherwise, perform a non-recursive search of the trie's nodes for the best place to insert the route, and do so.
visited := []*node{}
curNode := t.root
remainingRouteBits := routeBits
for {
remainingRouteBitsLen := len(remainingRouteBits)
curNodeBitsLen := len(curNode.bits)
// Does the requested route cover the current node? If so, update the current node.
if remainingRouteBitsLen <= curNodeBitsLen && bytes.HasPrefix(curNode.bits, remainingRouteBits) {
curNode.bits = remainingRouteBits
curNode.children = nil
return
}
if curNodeBitsLen <= remainingRouteBitsLen && bytes.HasPrefix(remainingRouteBits, curNode.bits) {
// Does the current node cover the requested route? If so, we're done.
if curNode.isLeaf() {
return
}
// Otherwise, we traverse to the correct child.
remainingRouteBits = remainingRouteBits[curNodeBitsLen:]
visited = append(visited, curNode)
curNode = curNode.children[remainingRouteBits[0]]
continue
}
// Otherwise the requested route diverges from the current node. We'll need to split the current node.
// As an optimization, if the split would result in a new node whose children represent a complete subtrie, we
// just update the current node, instead of allocating new nodes and optimizing them away immediately after.
if curNode.isLeaf() &&
curNodeBitsLen == remainingRouteBitsLen &&
commonPrefixLen(curNode.bits, remainingRouteBits) == len(curNode.bits)-1 {
curNode.bits = curNode.bits[:len(curNode.bits)-1]
curNode.children = nil
} else {
newNode := splitNodeForRoute(curNode, remainingRouteBits)
visitedLen := len(visited)
if visitedLen == 0 {
t.root = newNode
} else {
visited[visitedLen-1].children[newNode.bits[0]] = newNode
}
}
simplifyVisitedSubtries(visited)
return
}
}
func (n *node) childrenAreCompleteSubtrie() bool {
if n.isLeaf() {
return false
}
if !n.children[0].isLeaf() || !n.children[1].isLeaf() {
return false
}
if len(n.children[0].bits) != 1 || len(n.children[1].bits) != 1 {
return false
}
return true
}
func (n *node) isLeaf() bool {
return n.children == nil
}
func splitNodeForRoute(oldNode *node, routeBits bitslice.BitSlice) *node {
commonBitsLen := commonPrefixLen(oldNode.bits, routeBits)
commonBits := oldNode.bits[:commonBitsLen]
routeNode := &node{
bits: routeBits[commonBitsLen:],
children: nil,
}
oldNode.bits = oldNode.bits[commonBitsLen:]
newNode := &node{
bits: commonBits,
children: &[2]*node{},
}
newNode.children[routeNode.bits[0]] = routeNode
newNode.children[oldNode.bits[0]] = oldNode
return newNode
}
// A completed subtrie is a node in the trie whose children when taken together represent the complete subtrie below the
// node. For example, if a node represented the route "00", and it had a child for "0" and a child for "1", the node
// would be representing the "000" and "001" routes. But that's the same as having a single node for "00".
// simplifyCompletedSubtries takes a stack of visited nodes and simplifies completed subtries as far down the stack as
// possible. If at any point in the stack we find a node representing an incomplete subtrie, we stop.
func simplifyVisitedSubtries(visited []*node) {
for i := len(visited) - 1; i >= 0; i-- {
if visited[i].isLeaf() {
return
}
if !visited[i].childrenAreCompleteSubtrie() {
return
}
visited[i].children = nil
}
}
func commonPrefixLen(a, b bitslice.BitSlice) int {
i := 0
maxLen := min(len(a), len(b))
for ; i < maxLen; i++ {
if a[i] != b[i] {
break
}
}
return i
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
type traversalStep struct {
n *node
precedingRouteBits bitslice.BitSlice
}
// Contents returns the BitSlices contained in the RSTrie.
func (t *RSTrie) Contents() []bitslice.BitSlice {
// If the trie is empty
if t.root == nil {
return []bitslice.BitSlice{}
}
// Otherwise
queue := []traversalStep{
{
n: t.root,
precedingRouteBits: bitslice.BitSlice{},
},
}
contents := []bitslice.BitSlice{}
for len(queue) > 0 {
step := queue[0]
queue = queue[1:]
stepRouteBits := bitslice.BitSlice{}
stepRouteBits = append(stepRouteBits, step.precedingRouteBits...)
stepRouteBits = append(stepRouteBits, step.n.bits...)
if step.n.isLeaf() {
contents = append(contents, stepRouteBits)
} else {
queue = append([]traversalStep{
{
n: step.n.children[0],
precedingRouteBits: stepRouteBits,
},
{
n: step.n.children[1],
precedingRouteBits: stepRouteBits,
},
}, queue...)
}
}
return contents
} | pkg/routesum/rstrie/rstrie.go | 0.697506 | 0.496399 | rstrie.go | starcoder |
package wkt
import (
"fmt"
"reflect"
"strings"
"github.com/go-spatial/geom"
)
func isNil(a interface{}) bool {
defer func() { recover() }()
return a == nil || reflect.ValueOf(a).IsNil()
}
func isMultiLineStringerEmpty(ml geom.MultiLineStringer) bool {
if isNil(ml) || len(ml.LineStrings()) == 0 {
return true
}
lns := ml.LineStrings()
// It's not nil, and there are several lines.
// We need to go through all the lines and make sure that at least one of them has a non-zero length.
for i := range lns {
if len(lns[i]) != 0 {
return false
}
}
return true
}
func isPolygonerEmpty(p geom.Polygoner) bool {
if isNil(p) || len(p.LinearRings()) == 0 {
return true
}
lns := p.LinearRings()
// It's not nil, and there are several lines.
// We need to go through all the lines and make sure that at least one of them has a non-zero length.
for i := range lns {
if len(lns[i]) != 0 {
return false
}
}
return true
}
func isMultiPolygonerEmpty(mp geom.MultiPolygoner) bool {
if isNil(mp) || len(mp.Polygons()) == 0 {
return true
}
plys := mp.Polygons()
for i := range plys {
for j := range plys[i] {
if len(plys[i][j]) != 0 {
return false
}
}
}
return true
}
func isCollectionerEmpty(col geom.Collectioner) bool {
if isNil(col) || len(col.Geometries()) == 0 {
return true
}
geos := col.Geometries()
for i := range geos {
switch g := geos[i].(type) {
case geom.Pointer:
if !isNil(g) {
return false
}
case geom.MultiPointer:
if !(isNil(g) || len(g.Points()) == 0) {
return false
}
case geom.LineStringer:
if !(isNil(g) || len(g.Verticies()) == 0) {
return false
}
case geom.MultiLineStringer:
if !isMultiLineStringerEmpty(g) {
return false
}
case geom.Polygoner:
if !isPolygonerEmpty(g) {
return false
}
case geom.MultiPolygoner:
if !isMultiPolygonerEmpty(g) {
return false
}
case geom.Collectioner:
if !isCollectionerEmpty(g) {
return false
}
}
}
return true
}
/*
This purpose of this file is to house the wkt functions. These functions are
use to take a tagola.Geometry and convert it to a wkt string. It will, also,
contain functions to parse a wkt string into a wkb.Geometry.
*/
func _encode(geo geom.Geometry) string {
switch g := geo.(type) {
case geom.Pointer:
xy := g.XY()
return fmt.Sprintf("%v %v", xy[0], xy[1])
case geom.MultiPointer:
var points []string
for _, p := range g.Points() {
points = append(points, _encode(geom.Point(p)))
}
return "(" + strings.Join(points, ",") + ")"
case geom.LineStringer:
var points []string
for _, p := range g.Verticies() {
points = append(points, _encode(geom.Point(p)))
}
return "(" + strings.Join(points, ",") + ")"
case geom.MultiLineStringer:
var lines []string
for _, l := range g.LineStrings() {
if len(l) == 0 {
continue
}
lines = append(lines, _encode(geom.LineString(l)))
}
return "(" + strings.Join(lines, ",") + ")"
case geom.Polygoner:
var rings []string
for _, l := range g.LinearRings() {
if len(l) == 0 {
continue
}
rings = append(rings, _encode(geom.LineString(l)))
}
return "(" + strings.Join(rings, ",") + ")"
case geom.MultiPolygoner:
var polygons []string
for _, p := range g.Polygons() {
if len(p) == 0 {
continue
}
polygons = append(polygons, _encode(geom.Polygon(p)))
}
return "(" + strings.Join(polygons, ",") + ")"
}
panic(fmt.Sprintf("Don't know the geometry type! %+v", geo))
}
//WKT returns a WKT representation of the Geometry if possible.
// the Error will be non-nil if geometry is unknown.
func Encode(geo geom.Geometry) (string, error) {
switch g := geo.(type) {
default:
return "", geom.ErrUnknownGeometry{geo}
case geom.Pointer:
// POINT( 10 10)
if isNil(g) {
return "POINT EMPTY", nil
}
return "POINT (" + _encode(geo) + ")", nil
case geom.MultiPointer:
if isNil(g) || len(g.Points()) == 0 {
return "MULTIPOINT EMPTY", nil
}
return "MULTIPOINT " + _encode(geo), nil
case geom.LineStringer:
if isNil(g) || len(g.Verticies()) == 0 {
return "LINESTRING EMPTY", nil
}
return "LINESTRING " + _encode(geo), nil
case geom.MultiLineStringer:
if isMultiLineStringerEmpty(g) {
return "MULTILINESTRING EMPTY", nil
}
return "MULTILINESTRING " + _encode(geo), nil
case geom.Polygoner:
if isPolygonerEmpty(g) {
return "POLYGON EMPTY", nil
}
return "POLYGON " + _encode(geo), nil
case geom.MultiPolygoner:
if isMultiPolygonerEmpty(g) {
return "MULTIPOLYGON EMPTY", nil
}
return "MULTIPOLYGON " + _encode(geo), nil
case geom.Collectioner:
if isCollectionerEmpty(g) {
return "GEOMETRYCOLLECTION EMPTY", nil
}
var geometries []string
for _, sg := range g.Geometries() {
s, err := Encode(sg)
if err != nil {
return "", err
}
geometries = append(geometries, s)
}
return "GEOMETRYCOLLECTION (" + strings.Join(geometries, ",") + ")", nil
}
}
func Decode(text string) (geo geom.Geometry, err error) {
return nil, nil
} | encoding/wkt/wkt.go | 0.579638 | 0.40987 | wkt.go | starcoder |
package packetcache
import (
"math/bits"
"sync"
)
// The maximum size of packets stored in the cache. Chosen to be
// a multiple of 8.
const BufSize = 1504
// The maximum number of packets that constitute a keyframe.
const maxFrame = 1024
// entry represents a cached packet.
type entry struct {
seqno uint16
lengthAndMarker uint16 // 1 bit of marker, 15 bits of length
timestamp uint32
buf [BufSize]byte
}
func (e *entry) length() uint16 {
return e.lengthAndMarker & 0x7FFF
}
func (e *entry) marker() bool {
return (e.lengthAndMarker & 0x8000) != 0
}
// bitmap keeps track of recent loss history
type bitmap struct {
valid bool
first uint16
bitmap uint32
}
// frame is used for storing the last keyframe
type frame struct {
timestamp uint32
complete bool
entries []entry
}
type Cache struct {
mu sync.Mutex
//stats
last uint16
cycle uint16
lastValid bool
expected uint32
lost uint32
totalLost uint32
// bitmap
bitmap bitmap
// buffered keyframe
keyframe frame
// the actual cache
tail uint16
entries []entry
}
// New creates a cache with the given capacity.
func New(capacity int) *Cache {
if capacity > int(^uint16(0)) {
return nil
}
return &Cache{
entries: make([]entry, capacity),
}
}
// compare performs comparison modulo 2^16.
func compare(s1, s2 uint16) int {
if s1 == s2 {
return 0
}
if ((s2 - s1) & 0x8000) != 0 {
return 1
}
return -1
}
// seqnoInvalid returns true if seqno is unreasonably far in the past
func seqnoInvalid(seqno, reference uint16) bool {
if compare(reference, seqno) < 0 {
return false
}
if reference-seqno > 0x100 {
return true
}
return false
}
// set sets a bit in the bitmap, shifting if necessary
func (bitmap *bitmap) set(seqno uint16) {
if !bitmap.valid || seqnoInvalid(seqno, bitmap.first) {
bitmap.first = seqno
bitmap.bitmap = 1
bitmap.valid = true
return
}
if compare(bitmap.first, seqno) > 0 {
return
}
if seqno-bitmap.first >= 32 {
shift := seqno - bitmap.first - 31
bitmap.bitmap >>= shift
bitmap.first += shift
}
if (bitmap.bitmap & 1) == 1 {
ones := bits.TrailingZeros32(^bitmap.bitmap)
bitmap.bitmap >>= ones
bitmap.first += uint16(ones)
}
bitmap.bitmap |= (1 << uint16(seqno-bitmap.first))
return
}
// BitmapGet shifts up to 17 bits out of the bitmap. It returns a boolean
// indicating if any were 0, the index of the first 0 bit, and a bitmap
// indicating any 0 bits after the first one.
func (cache *Cache) BitmapGet(next uint16) (bool, uint16, uint16) {
cache.mu.Lock()
defer cache.mu.Unlock()
return cache.bitmap.get(next)
}
func (bitmap *bitmap) get(next uint16) (bool, uint16, uint16) {
first := bitmap.first
if compare(first, next) >= 0 {
return false, first, 0
}
count := next - first
if count > 17 {
count = 17
}
bm := (^bitmap.bitmap) & ^((^uint32(0)) << count)
bitmap.bitmap >>= count
bitmap.first += count
if bm == 0 {
return false, first, 0
}
if (bm & 1) == 0 {
count := bits.TrailingZeros32(bm)
bm >>= count
first += uint16(count)
}
return true, first, uint16(bm >> 1)
}
// insert inserts a packet into a frame.
func (frame *frame) insert(seqno uint16, timestamp uint32, marker bool, data []byte) bool {
n := len(frame.entries)
i := 0
if n == 0 || seqno > frame.entries[n-1].seqno {
// fast path
i = n
} else {
for i < n {
if frame.entries[i].seqno >= seqno {
break
}
i++
}
if i < n && frame.entries[i].seqno == seqno {
// duplicate
return false
}
}
if n >= maxFrame {
// overflow
return false
}
lam := uint16(len(data))
if marker {
lam |= 0x8000
}
e := entry{
seqno: seqno,
lengthAndMarker: lam,
timestamp: timestamp,
}
copy(e.buf[:], data)
if i >= n {
frame.entries = append(frame.entries, e)
return true
}
frame.entries = append(frame.entries, entry{})
copy(frame.entries[i+1:], frame.entries[i:])
frame.entries[i] = e
return true
}
// store checks whether a packet is part of the current keyframe and, if
// so, inserts it.
func (frame *frame) store(seqno uint16, timestamp uint32, first bool, marker bool, data []byte) bool {
if first {
if frame.timestamp != timestamp {
frame.timestamp = timestamp
frame.complete = false
frame.entries = frame.entries[:0]
}
} else if len(frame.entries) > 0 {
if frame.timestamp != timestamp {
delta := seqno - frame.entries[0].seqno
if (delta&0x8000) == 0 && delta > 0x4000 {
frame.complete = false
frame.entries = frame.entries[:0]
}
return false
}
} else {
return false
}
done := frame.insert(seqno, timestamp, marker, data)
if done && !frame.complete {
marker := false
fst := frame.entries[0].seqno
for i := 1; i < len(frame.entries); i++ {
if frame.entries[i].seqno != fst+uint16(i) {
return done
}
if frame.entries[i].marker() {
marker = true
}
}
if marker {
frame.complete = true
}
}
return done
}
// Store stores a packet in the cache. It returns the first seqno in the
// bitmap, and the index at which the packet was stored.
func (cache *Cache) Store(seqno uint16, timestamp uint32, keyframe bool, marker bool, buf []byte) (uint16, uint16) {
cache.mu.Lock()
defer cache.mu.Unlock()
if !cache.lastValid || seqnoInvalid(seqno, cache.last) {
cache.last = seqno
cache.lastValid = true
cache.expected++
} else {
if compare(cache.last, seqno) <= 0 {
cache.expected += uint32(seqno - cache.last)
cache.lost += uint32(seqno - cache.last - 1)
if seqno < cache.last {
cache.cycle++
}
cache.last = seqno
} else {
if cache.lost > 0 {
cache.lost--
}
}
}
cache.bitmap.set(seqno)
done := cache.keyframe.store(seqno, timestamp, keyframe, marker, buf)
if done && !cache.keyframe.complete {
completeKeyframe(cache)
}
i := cache.tail
cache.entries[i].seqno = seqno
copy(cache.entries[i].buf[:], buf)
lam := uint16(len(buf))
if marker {
lam |= 0x8000
}
cache.entries[i].lengthAndMarker = lam
cache.entries[i].timestamp = timestamp
cache.tail = (i + 1) % uint16(len(cache.entries))
return cache.bitmap.first, i
}
// completeKeyFrame attempts to complete the current keyframe.
func completeKeyframe(cache *Cache) {
l := len(cache.keyframe.entries)
if l == 0 {
return
}
first := cache.keyframe.entries[0].seqno
last := cache.keyframe.entries[l-1].seqno
count := (last - first) // may wrap around
if count > 0x4000 {
// this shouldn't happen
return
}
var buf []byte
if count > 1 {
if buf == nil {
buf = make([]byte, BufSize)
}
for i := uint16(1); i < count; i++ {
n, ts, marker := get(first+i, cache.entries, buf)
if n > 0 {
cache.keyframe.store(
first+i, ts, false, marker, buf,
)
}
}
}
if !cache.keyframe.complete {
// Try to find packets after the last one.
for {
l := len(cache.keyframe.entries)
if cache.keyframe.entries[l-1].marker() {
break
}
if buf == nil {
buf = make([]byte, BufSize)
}
seqno := cache.keyframe.entries[l-1].seqno + 1
n, ts, marker := get(seqno, cache.entries, buf)
if n <= 0 {
break
}
done := cache.keyframe.store(
seqno, ts, false, marker, buf,
)
if !done || marker {
break
}
}
}
}
// Expect records that we expect n packets. It is used for loss statistics.
func (cache *Cache) Expect(n int) {
if n <= 0 {
return
}
cache.mu.Lock()
defer cache.mu.Unlock()
cache.expected += uint32(n)
}
// get retrieves a packet from a slice of entries.
func get(seqno uint16, entries []entry, result []byte) (uint16, uint32, bool) {
for i := range entries {
if entries[i].lengthAndMarker == 0 || entries[i].seqno != seqno {
continue
}
var n uint16
if len(result) > 0 {
n = uint16(copy(
result[:entries[i].length()],
entries[i].buf[:]))
} else {
n = entries[i].length()
}
return n, entries[i].timestamp, entries[i].marker()
}
return 0, 0, false
}
// Get retrieves a packet from the cache, returns the number of bytes
// copied. If result is of length 0, returns the size of the packet.
func (cache *Cache) Get(seqno uint16, result []byte) uint16 {
cache.mu.Lock()
defer cache.mu.Unlock()
n, _, _ := get(seqno, cache.keyframe.entries, result)
if n > 0 {
return n
}
n, _, _ = get(seqno, cache.entries, result)
if n > 0 {
return n
}
return 0
}
func (cache *Cache) Last() (bool, uint16, uint32) {
cache.mu.Lock()
defer cache.mu.Unlock()
if !cache.lastValid {
return false, 0, 0
}
len, ts, _ := get(cache.last, cache.entries, nil)
if len == 0 {
return false, 0, 0
}
return true, cache.last, ts
}
// GetAt retrieves a packet from the cache assuming it is at the given index.
func (cache *Cache) GetAt(seqno uint16, index uint16, result []byte) uint16 {
cache.mu.Lock()
defer cache.mu.Unlock()
if int(index) >= len(cache.entries) {
return 0
}
if cache.entries[index].seqno != seqno {
return 0
}
return uint16(copy(
result[:cache.entries[index].length()],
cache.entries[index].buf[:]),
)
}
// Keyframe returns the last buffered keyframe. It returns the frame's
// timestamp and a boolean indicating if the frame is complete.
func (cache *Cache) Keyframe() (uint32, bool, []uint16) {
cache.mu.Lock()
defer cache.mu.Unlock()
if len(cache.keyframe.entries) == 0 {
return 0, false, nil
}
seqnos := make([]uint16, len(cache.keyframe.entries))
for i := range cache.keyframe.entries {
seqnos[i] = cache.keyframe.entries[i].seqno
}
return cache.keyframe.timestamp, cache.keyframe.complete, seqnos
}
func (cache *Cache) KeyframeSeqno() (bool, uint16, uint32) {
cache.mu.Lock()
defer cache.mu.Unlock()
if len(cache.keyframe.entries) == 0 {
return false, 0, 0
}
return true, cache.keyframe.entries[0].seqno, cache.keyframe.timestamp
}
func (cache *Cache) resize(capacity int) {
if len(cache.entries) == capacity {
return
}
entries := make([]entry, capacity)
if capacity > len(cache.entries) {
copy(entries, cache.entries[:cache.tail])
copy(entries[int(cache.tail)+capacity-len(cache.entries):],
cache.entries[cache.tail:])
} else if capacity > int(cache.tail) {
copy(entries, cache.entries[:cache.tail])
copy(entries[cache.tail:],
cache.entries[int(cache.tail)+
len(cache.entries)-capacity:])
} else {
// too bad, invalidate all indices
copy(entries,
cache.entries[int(cache.tail)-capacity:cache.tail])
cache.tail = 0
}
cache.entries = entries
}
// Resize resizes the cache to the given capacity. This might invalidate
// indices of recently stored packets.
func (cache *Cache) Resize(capacity int) {
cache.mu.Lock()
defer cache.mu.Unlock()
cache.resize(capacity)
}
// ResizeCond is like Resize, but avoids invalidating recent indices.
func (cache *Cache) ResizeCond(capacity int) bool {
cache.mu.Lock()
defer cache.mu.Unlock()
current := len(cache.entries)
if current >= capacity*3/4 && current < capacity*2 {
return false
}
if capacity < current {
if int(cache.tail) > capacity {
// this would invalidate too many indices
return false
}
}
cache.resize(capacity)
return true
}
// GetStats returns statistics about received packets. If reset is true,
// the statistics are reset.
func (cache *Cache) GetStats(reset bool) (uint32, uint32, uint32, uint32) {
cache.mu.Lock()
defer cache.mu.Unlock()
expected := cache.expected
lost := cache.lost
totalLost := cache.totalLost + cache.lost
eseqno := uint32(cache.cycle)<<16 | uint32(cache.last)
if reset {
cache.expected = 0
cache.totalLost += cache.lost
cache.lost = 0
}
return expected, lost, totalLost, eseqno
}
// ToBitmap takes a non-empty sorted list of seqnos, and computes a bitmap
// covering a prefix of the list. It returns the part of the list that
// couldn't be covered.
func ToBitmap(seqnos []uint16) (first uint16, bitmap uint16, remain []uint16) {
first = seqnos[0]
bitmap = uint16(0)
remain = seqnos[1:]
for len(remain) > 0 {
delta := remain[0] - first - 1
if delta >= 16 {
break
}
bitmap = bitmap | (1 << delta)
remain = remain[1:]
}
return
} | packetcache/packetcache.go | 0.711531 | 0.425963 | packetcache.go | starcoder |
package operator
import (
"github.com/matrixorigin/matrixone/pkg/container/nulls"
"github.com/matrixorigin/matrixone/pkg/container/types"
"github.com/matrixorigin/matrixone/pkg/container/vector"
"github.com/matrixorigin/matrixone/pkg/encoding"
"github.com/matrixorigin/matrixone/pkg/vectorize/mul"
"github.com/matrixorigin/matrixone/pkg/vm/process"
"golang.org/x/exp/constraints"
)
func Mult[T constraints.Integer | constraints.Float](vectors []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
lv, rv := vectors[0], vectors[1]
lvs, rvs := vector.MustTCols[T](lv), vector.MustTCols[T](rv)
rtl := lv.Typ.Oid.FixedLength()
if lv.IsScalarNull() || rv.IsScalarNull() {
return proc.AllocScalarNullVector(lv.Typ), nil
}
switch {
case lv.IsScalar() && rv.IsScalar():
vec := proc.AllocScalarVector(lv.Typ)
rs := make([]T, 1)
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, mul.NumericMul(lvs, rvs, rs))
return vec, nil
case lv.IsScalar() && !rv.IsScalar():
vec, err := proc.AllocVector(lv.Typ, int64(rtl)*int64(len(rvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeFixedSlice[T](vec.Data, rtl)
nulls.Set(vec.Nsp, rv.Nsp)
vector.SetCol(vec, mul.NumericMulScalar(lvs[0], rvs, rs))
return vec, nil
case !lv.IsScalar() && rv.IsScalar():
vec, err := proc.AllocVector(lv.Typ, int64(rtl)*int64(len(lvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeFixedSlice[T](vec.Data, rtl)
nulls.Set(vec.Nsp, lv.Nsp)
vector.SetCol(vec, mul.NumericMulScalar(rvs[0], lvs, rs))
return vec, nil
default:
vec, err := proc.AllocVector(lv.Typ, int64(rtl)*int64(len(lvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeFixedSlice[T](vec.Data, rtl)
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, mul.NumericMul(lvs, rvs, rs))
return vec, nil
}
}
//LeftType: types.T_decimal64,
//RightType: types.T_decimal64,
//ReturnType: types.T_decimal64,
func MultDecimal64(vectors []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
lv, rv := vectors[0], vectors[1]
lvs, rvs := vector.MustTCols[types.Decimal64](lv), vector.MustTCols[types.Decimal64](rv)
resultScale := lv.Typ.Scale + rv.Typ.Scale
resultTyp := types.Type{Oid: types.T_decimal128, Size: 16, Width: 38, Scale: resultScale}
if lv.IsScalarNull() || rv.IsScalarNull() {
return proc.AllocScalarNullVector(resultTyp), nil
}
switch {
case lv.IsScalar() && rv.IsScalar():
vec := proc.AllocScalarVector(resultTyp)
rs := make([]types.Decimal128, 1)
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, mul.Decimal64Mul(lvs, rvs, rs))
return vec, nil
case lv.IsScalar() && !rv.IsScalar():
vec, err := proc.AllocVector(resultTyp, int64(resultTyp.Size)*int64(len(rvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeDecimal128Slice(vec.Data)
rs = rs[:len(rvs)]
nulls.Set(vec.Nsp, rv.Nsp)
vector.SetCol(vec, mul.Decimal64MulScalar(lvs[0], rvs, rs))
vec.Typ = resultTyp
return vec, nil
case !lv.IsScalar() && rv.IsScalar():
vec, err := proc.AllocVector(lv.Typ, int64(resultTyp.Size)*int64(len(lvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeDecimal128Slice(vec.Data)
rs = rs[:len(lvs)]
nulls.Set(vec.Nsp, lv.Nsp)
vector.SetCol(vec, mul.Decimal64MulScalar(rvs[0], lvs, rs))
vec.Typ = resultTyp
return vec, nil
default:
vec, err := proc.AllocVector(lv.Typ, int64(resultTyp.Size)*int64(len(lvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeDecimal128Slice(vec.Data)
rs = rs[:len(rvs)]
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, mul.Decimal64Mul(lvs, rvs, rs))
vec.Typ = resultTyp
return vec, nil
}
}
//LeftType: types.T_decimal128,
//RightType: types.T_decimal128,
//ReturnType: types.T_decimal128,
func MultDecimal128(vectors []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
lv, rv := vectors[0], vectors[1]
lvs, rvs := vector.MustTCols[types.Decimal128](lv), vector.MustTCols[types.Decimal128](rv)
resultScale := lv.Typ.Scale + rv.Typ.Scale
resultTyp := types.Type{Oid: types.T_decimal128, Size: 16, Width: 38, Scale: resultScale}
if lv.IsScalarNull() || rv.IsScalarNull() {
return proc.AllocScalarNullVector(resultTyp), nil
}
switch {
case lv.IsScalar() && rv.IsScalar():
vec := proc.AllocScalarVector(resultTyp)
rs := make([]types.Decimal128, 1)
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, mul.Decimal128Mul(lvs, rvs, rs))
vec.Typ = resultTyp
return vec, nil
case lv.IsScalar() && !rv.IsScalar():
vec, err := proc.AllocVector(resultTyp, int64(resultTyp.Size)*int64(len(rvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeDecimal128Slice(vec.Data)
rs = rs[:len(rvs)]
nulls.Set(vec.Nsp, rv.Nsp)
vector.SetCol(vec, mul.Decimal128MulScalar(lvs[0], rvs, rs))
vec.Typ = resultTyp
return vec, nil
case !lv.IsScalar() && rv.IsScalar():
vec, err := proc.AllocVector(lv.Typ, int64(resultTyp.Size)*int64(len(lvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeDecimal128Slice(vec.Data)
rs = rs[:len(lvs)]
nulls.Set(vec.Nsp, lv.Nsp)
vector.SetCol(vec, mul.Decimal128MulScalar(rvs[0], lvs, rs))
vec.Typ = resultTyp
return vec, nil
default:
vec, err := proc.AllocVector(lv.Typ, int64(resultTyp.Size)*int64(len(lvs)))
if err != nil {
return nil, err
}
rs := encoding.DecodeDecimal128Slice(vec.Data)
rs = rs[:len(rvs)]
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, mul.Decimal128Mul(lvs, rvs, rs))
vec.Typ = resultTyp
return vec, nil
}
} | pkg/sql/plan2/function/operator/mult.go | 0.563498 | 0.44342 | mult.go | starcoder |
package infinitescroll
// Checkbox represents the state of the select all checkbox
type Checkbox int
const (
// Checked is when the select all box is selected.
Checked Checkbox = iota
// Unchecked is when the select all box is deselected.
Unchecked
// Indeterminate is when the select all box is greyed out.
Indeterminate
)
type table struct {
currentState Checkbox
sites map[string]bool
totalCount, clickCount uint64
}
// Table represents an infinite scroll list in a webpage.
type Table interface {
AddSites(sites []string)
ToggleSelectAll()
CurrentState() Checkbox
IsClicked(site string) bool
Click(site string)
}
// NewTable creates a new instance of an Table
func NewTable(totalCount uint64) Table {
return &table{sites: make(map[string]bool),
currentState: Unchecked, totalCount: totalCount}
}
func (ist *table) AddSites(sites []string) {
click := ist.CurrentState() == Checked
for _, site := range sites {
ist.sites[site] = click
}
if click {
ist.clickCount += uint64(len(sites))
}
}
func (ist *table) ToggleSelectAll() {
if ist.currentState == Checked {
ist.currentState = Unchecked
toggleAll(ist.sites, false)
ist.clickCount = 0
} else {
ist.currentState = Checked
toggleAll(ist.sites, true)
ist.clickCount = uint64(len(ist.sites))
}
}
func toggleAll(sites map[string]bool, click bool) {
for site := range sites {
sites[site] = click
}
}
func (ist *table) CurrentState() Checkbox { return ist.currentState }
func (ist *table) IsClicked(site string) bool {
if ist.currentState == Checked {
return true
} else if ist.currentState == Unchecked {
return false
}
return ist.sites[site]
}
func (ist *table) Click(site string) {
if state := ist.CurrentState(); state == Unchecked || state == Checked {
ist.currentState = Indeterminate
}
clicked := ist.sites[site]
ist.sites[site] = !clicked
if clicked {
ist.clickCount--
} else {
ist.clickCount++
}
if ist.clickCount == 0 {
ist.currentState = Unchecked
}
if ist.clickCount == ist.totalCount {
ist.currentState = Checked
}
} | infinitescroll/problem.go | 0.639173 | 0.470676 | problem.go | starcoder |
package stats
import (
"math"
"math/rand"
)
// NormalDist is a normal (Gaussian) distribution with mean Mu and
// standard deviation Sigma.
type NormalDist struct {
Mu, Sigma float64
}
// StdNormal is the standard normal distribution (Mu = 0, Sigma = 1)
var StdNormal = NormalDist{0, 1}
// 1/sqrt(2 * pi)
const invSqrt2Pi = 0.39894228040143267793994605993438186847585863116493465766592583
func (n NormalDist) PDF(x float64) float64 {
z := x - n.Mu
return math.Exp(-z*z/(2*n.Sigma*n.Sigma)) * invSqrt2Pi / n.Sigma
}
func (n NormalDist) pdfEach(xs []float64) []float64 {
res := make([]float64, len(xs))
if n.Mu == 0 && n.Sigma == 1 {
// Standard normal fast path
for i, x := range xs {
res[i] = math.Exp(-x*x/2) * invSqrt2Pi
}
} else {
a := -1 / (2 * n.Sigma * n.Sigma)
b := invSqrt2Pi / n.Sigma
for i, x := range xs {
z := x - n.Mu
res[i] = math.Exp(z*z*a) * b
}
}
return res
}
func (n NormalDist) CDF(x float64) float64 {
return math.Erfc(-(x-n.Mu)/(n.Sigma*math.Sqrt2)) / 2
}
func (n NormalDist) cdfEach(xs []float64) []float64 {
res := make([]float64, len(xs))
a := 1 / (n.Sigma * math.Sqrt2)
for i, x := range xs {
res[i] = math.Erfc(-(x-n.Mu)*a) / 2
}
return res
}
func (n NormalDist) InvCDF(p float64) (x float64) {
// This is based on <NAME>'s inverse normal CDF
// algorithm: http://home.online.no/~pjacklam/notes/invnorm/
const (
a1 = -3.969683028665376e+01
a2 = 2.209460984245205e+02
a3 = -2.759285104469687e+02
a4 = 1.383577518672690e+02
a5 = -3.066479806614716e+01
a6 = 2.506628277459239e+00
b1 = -5.447609879822406e+01
b2 = 1.615858368580409e+02
b3 = -1.556989798598866e+02
b4 = 6.680131188771972e+01
b5 = -1.328068155288572e+01
c1 = -7.784894002430293e-03
c2 = -3.223964580411365e-01
c3 = -2.400758277161838e+00
c4 = -2.549732539343734e+00
c5 = 4.374664141464968e+00
c6 = 2.938163982698783e+00
d1 = 7.784695709041462e-03
d2 = 3.224671290700398e-01
d3 = 2.445134137142996e+00
d4 = 3.754408661907416e+00
plow = 0.02425
phigh = 1 - plow
)
if p < 0 || p > 1 {
return nan
} else if p == 0 {
return -inf
} else if p == 1 {
return inf
}
if p < plow {
// Rational approximation for lower region.
q := math.Sqrt(-2 * math.Log(p))
x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
((((d1*q+d2)*q+d3)*q+d4)*q + 1)
} else if phigh < p {
// Rational approximation for upper region.
q := math.Sqrt(-2 * math.Log(1-p))
x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
((((d1*q+d2)*q+d3)*q+d4)*q + 1)
} else {
// Rational approximation for central region.
q := p - 0.5
r := q * q
x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q /
(((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1)
}
// Refine approximation.
e := 0.5*math.Erfc(-x/math.Sqrt2) - p
u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2)
x = x - u/(1+x*u/2)
// Adjust from standard normal.
return x*n.Sigma + n.Mu
}
func (n NormalDist) Rand(r *rand.Rand) float64 {
var x float64
if r == nil {
x = rand.NormFloat64()
} else {
x = r.NormFloat64()
}
return x*n.Sigma + n.Mu
}
func (n NormalDist) Bounds() (float64, float64) {
const stddevs = 3
return n.Mu - stddevs*n.Sigma, n.Mu + stddevs*n.Sigma
}
func (n NormalDist) Mean() float64 {
return n.Mu
}
func (n NormalDist) Variance() float64 {
return n.Sigma * n.Sigma
} | stats/normaldist.go | 0.761538 | 0.600803 | normaldist.go | starcoder |
package converter
import (
"encoding/json"
"fmt"
"strconv"
"strings"
)
type argInt []int
func (a argInt) Get(i int, args ...int) (r int) {
if i >= 0 && i < len(a) {
r = a[i]
} else if len(args) > 0 {
r = args[0]
}
return
}
// ToStr Convert any type to string.
func ToStr(value interface{}, args ...int) (s string) {
switch v := value.(type) {
case bool:
s = strconv.FormatBool(v)
case float32:
s = strconv.FormatFloat(float64(v), 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 32))
case float64:
s = strconv.FormatFloat(v, 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 64))
case int:
s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
case int8:
s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
case int16:
s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
case int32:
s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
case int64:
s = strconv.FormatInt(v, argInt(args).Get(0, 10))
case uint:
s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
case uint8:
s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
case uint16:
s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
case uint32:
s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
case uint64:
s = strconv.FormatUint(v, argInt(args).Get(0, 10))
case string:
s = v
case []byte:
s = string(v)
case []string:
s = strings.Join(v, ",")
default:
b, err := json.MarshalIndent(v, "", " ")
if err != nil {
fmt.Println(err)
}
s = string(b)
}
return s
}
func ToStrs(values []interface{}, args ...int) []string {
arr := make([]string, len(values))
for i, value := range values {
arr[i] = ToStr(value, args...)
}
return arr
}
// Int2HexStr converts decimal number to hex format string.
func Int2HexStr(num int) (hex string) {
if num == 0 {
return "0"
}
for num > 0 {
r := num % 16
var c string
if r >= 0 && r <= 9 {
c = string(rune(r + '0'))
} else {
c = string(rune(r + 'a' - 10))
}
hex = c + hex
num = num / 16
}
return hex
} | generators/app/templates/pkg/converter/toString.go | 0.538012 | 0.440229 | toString.go | starcoder |
package common
import (
"time"
)
// LerpPosition handles vector lerp interpolations
type LerpPosition struct {
start time.Time
startPosition *Vector
duration time.Duration
endPosition *Vector
endFunc func()
isEndFuncSet bool
isDestroyed bool
isEnabled bool
}
// Lerp returns a position
func (lc *LerpPosition) Lerp() (x float64, y float64) {
if !lc.isEnabled {
return lc.endPosition.X, lc.endPosition.Y
}
if lc.start.Add(lc.duration).Before(time.Now()) {
lc.isEnabled = false
return lc.endPosition.X, lc.endPosition.Y
}
elapsed := time.Since(lc.start).Nanoseconds()
destNano := lc.start.Add(lc.duration).Sub(lc.start).Nanoseconds()
t := float64(float64(elapsed) / float64(destNano))
x = (1-t)*lc.startPosition.X + t*lc.endPosition.X
y = (1-t)*lc.startPosition.Y + t*lc.endPosition.Y
return
}
// IsEnabled returns if enabled
func (lc *LerpPosition) IsEnabled() bool {
return lc.isEnabled
}
// SetIsEnabled sets if enabled or not
func (lc *LerpPosition) SetIsEnabled(isEnabled bool) {
lc.isEnabled = isEnabled
}
// IsDestroyed returns if enabled
func (lc *LerpPosition) IsDestroyed() bool {
return lc.isDestroyed
}
// SetIsDestroyed sets if destroyed on next frame or not
func (lc *LerpPosition) SetIsDestroyed(isDestroyed bool) {
lc.isDestroyed = isDestroyed
}
// SetEndFunc sets a function to call on end of lerp
func (lc *LerpPosition) SetEndFunc(endFunc func()) {
lc.endFunc = endFunc
lc.isEndFuncSet = true
}
// EndFunc returns the end function
func (lc *LerpPosition) EndFunc() func() {
return lc.endFunc
}
// IsEndFuncSet returns true if EndFunc exists
func (lc *LerpPosition) IsEndFuncSet() bool {
return lc.isEndFuncSet
}
// Init sets up a new lerp
func (lc *LerpPosition) Init(start time.Time, startPosition *Vector, endPosition *Vector, duration time.Duration, isEnabled bool, endFunc func(), isDestroyedAtEnd bool) {
lc.start = time.Now()
lc.startPosition = startPosition
lc.endPosition = endPosition
lc.duration = duration
lc.isEnabled = true
lc.endFunc = endFunc
lc.isDestroyed = isDestroyedAtEnd
} | common/lerp_position.go | 0.664649 | 0.427277 | lerp_position.go | starcoder |
package roast
import (
"fmt"
"reflect"
"strings"
"github.com/kamasamikon/miego/xmap"
"github.com/kamasamikon/miego/xtime"
"github.com/jinzhu/gorm"
)
type tabler interface {
TableName() string
}
type UpdateInfo struct {
Table string
SetArgs xmap.Map
WhereArgs xmap.Map
Err error
}
func SafeRemNew(tableName string, set xmap.Map, where xmap.Map) *UpdateInfo {
return &UpdateInfo{
Table: tableName,
SetArgs: set,
WhereArgs: where,
}
}
func (u *UpdateInfo) Set(dic xmap.Map) *UpdateInfo {
u.SetArgs = dic
return u
}
func (u *UpdateInfo) Where(dic xmap.Map) *UpdateInfo {
u.WhereArgs = dic
return u
}
func (u *UpdateInfo) Exec(db *gorm.DB) *UpdateInfo {
var setLines []string
for k, data := range u.SetArgs {
switch data.(type) {
case int:
v := reflect.ValueOf(data).Int()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case int8:
v := reflect.ValueOf(data).Int()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case int16:
v := reflect.ValueOf(data).Int()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case int32:
v := reflect.ValueOf(data).Int()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case int64:
v := reflect.ValueOf(data).Int()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case uint:
v := reflect.ValueOf(data).Uint()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case uint8:
v := reflect.ValueOf(data).Uint()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case uint16:
v := reflect.ValueOf(data).Uint()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case uint32:
v := reflect.ValueOf(data).Uint()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case uint64:
v := reflect.ValueOf(data).Uint()
setLines = append(setLines, fmt.Sprintf("`%s` = %d", k, v))
case bool:
v := reflect.ValueOf(data).Bool()
setLines = append(setLines, fmt.Sprintf("`%s` = %b", k, v))
case float32:
v := reflect.ValueOf(data).Float()
setLines = append(setLines, fmt.Sprintf("`%s` = %f", k, v))
case float64:
v := reflect.ValueOf(data).Float()
setLines = append(setLines, fmt.Sprintf("`%s` = %f", k, v))
case string:
v := reflect.ValueOf(data).String()
setLines = append(setLines, fmt.Sprintf("`%s` = \"%s\"", k, v))
}
}
var whereLines []string
for k, data := range u.WhereArgs {
switch data.(type) {
case int:
v := reflect.ValueOf(data).Int()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case int8:
v := reflect.ValueOf(data).Int()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case int16:
v := reflect.ValueOf(data).Int()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case int32:
v := reflect.ValueOf(data).Int()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case int64:
v := reflect.ValueOf(data).Int()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case uint:
v := reflect.ValueOf(data).Uint()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case uint8:
v := reflect.ValueOf(data).Uint()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case uint16:
v := reflect.ValueOf(data).Uint()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case uint32:
v := reflect.ValueOf(data).Uint()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case uint64:
v := reflect.ValueOf(data).Uint()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %d", k, v))
case bool:
v := reflect.ValueOf(data).Bool()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %b", k, v))
case float32:
v := reflect.ValueOf(data).Float()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %f", k, v))
case float64:
v := reflect.ValueOf(data).Float()
whereLines = append(whereLines, fmt.Sprintf("`%s` = %f", k, v))
case string:
v := reflect.ValueOf(data).String()
whereLines = append(whereLines, fmt.Sprintf("`%s` = \"%s\"", k, v))
}
}
s := fmt.Sprintf("UPDATE `%s` SET %s WHERE %s", u.Table, strings.Join(setLines, ", "), strings.Join(whereLines, " AND "))
if err := db.Exec(s).Error; err != nil {
u.Err = err
}
return u
}
func (u *UpdateInfo) Error() error {
return u.Err
}
func SafeRem(db *gorm.DB, tableName string, RemBy string, RemWhy int, where xmap.Map) error {
if where == nil {
return nil
}
set := xmap.Make(
"RemAt", xtime.TimeNowToNum(),
"RemBy", RemBy,
"RemWhy", RemWhy,
)
where.SafeMerge(xmap.Make("RemAt", 0))
return SafeRemNew(tableName, set, where).Exec(db).Error()
}
func SafeAdd(db *gorm.DB, Object interface{}, RemBy string, where xmap.Map) error {
tx := db.Begin()
var tableName string
if tabler, ok := Object.(tabler); ok {
tableName = tabler.TableName()
}
if err := SafeRem(db, tableName, RemBy, RemWhy_Update, where); err != nil {
tx.Rollback()
return err
}
if err := tx.Save(Object).Error; err != nil {
tx.Rollback()
return err
}
if err := tx.Commit().Error; err != nil {
tx.Rollback()
return err
}
return nil
} | roast/saveoperation.go | 0.505127 | 0.409162 | saveoperation.go | starcoder |
package hijri
import (
"time"
dec "github.com/shopspring/decimal"
)
func dateToJD(date time.Time) float64 {
// Convert to UTC
date = date.UTC()
// Prepare variables for calculating
Y := int64(date.Year())
M := int64(date.Month())
D := int64(date.Day())
H := int64(date.Hour())
m := int64(date.Minute())
s := int64(date.Second())
// If year is before 4713 B.C, stop
if Y < -4712 {
return 0
}
// If date is in blank days, stop
endOfJulian := time.Date(1582, 10, 4, 23, 59, 59, 0, time.UTC)
startOfGregorian := time.Date(1582, 10, 15, 0, 0, 0, 0, time.UTC)
if date.After(endOfJulian) && date.Before(startOfGregorian) {
return 0
}
// If month <= 2, change year and month
if M <= 2 {
M += 12
Y--
}
// Check whether date is gregorian or julian
constant := dec.Zero
if date.After(endOfJulian) {
temp := dec.New(Y, -2).Floor()
constant = dec.New(2, 0).
Add(temp.Div(dec.New(4, 0)).Floor()).
Sub(temp)
}
// Calculate julian day
yearToDays := dec.New(Y, 0).
Mul(dec.NewFromFloat(365.25)).
Floor()
monthToDays := dec.New(M+1, 0).
Mul(dec.NewFromFloat(30.6001)).
Floor()
timeToSeconds := H*3600 + m*60 + s
timeToDays := dec.New(timeToSeconds, 0).
Div(dec.New(86400, 0))
julianDay, _ := dec.NewFromFloat(1720994.5).
Add(yearToDays).
Add(monthToDays).
Add(constant).
Add(dec.New(D, 0)).
Add(timeToDays).
Float64()
return julianDay
}
func jdToDate(jd float64) time.Time {
// Prepare variables for calculating
jd1 := dec.NewFromFloat(jd).Add(dec.NewFromFloat(0.5))
z := jd1.Floor()
f := jd1.Sub(z)
a := z
if z.GreaterThanOrEqual(dec.New(2299161, 0)) {
aa := z.Sub(dec.NewFromFloat(1867216.25)).
Div(dec.NewFromFloat(36524.25)).
Floor()
aaBy4 := aa.Div(dec.New(4, 0)).Floor()
a = z.Add(dec.New(1, 0)).Add(aa).Sub(aaBy4)
}
b := a.Add(dec.New(1524, 0))
c := b.Sub(dec.NewFromFloat(122.1)).
Div(dec.NewFromFloat(365.25)).
Floor()
d := c.Mul(dec.NewFromFloat(365.25)).Floor()
e := b.Sub(d).Div(dec.NewFromFloat(30.6001)).Floor()
// Calculate day with its time
dayTime := b.Sub(d).
Sub(e.Mul(dec.NewFromFloat(30.6001)).Floor()).
Add(f)
day := dayTime.Floor()
// Calculate time
seconds := dayTime.Sub(day).Mul(dec.New(24*60*60, 0))
hour := seconds.Div(dec.New(3600, 0)).Floor()
min := seconds.Sub(hour.Mul(dec.New(3600, 0))).
Div(dec.New(60, 0)).
Floor()
sec := seconds.Sub(hour.Mul(dec.New(3600, 0))).
Sub(min.Mul(dec.New(60, 0))).
Floor()
// Calculate month
var month dec.Decimal
if e.LessThan(dec.New(14, 0)) {
month = e.Sub(dec.New(1, 0))
} else {
month = e.Sub(dec.New(13, 0))
}
// Calculate year
var year dec.Decimal
if month.GreaterThan(dec.New(2, 0)) {
year = c.Sub(dec.New(4716, 0))
} else {
year = c.Sub(dec.New(4715, 0))
}
// Create date
intYear := int(year.IntPart())
intMonth := int(month.IntPart())
intDay := int(day.IntPart())
intHour := int(hour.IntPart())
intMin := int(min.IntPart())
intSec := int(sec.IntPart())
return time.Date(intYear, time.Month(intMonth), intDay,
intHour, intMin, intSec, 0, time.UTC)
} | julian-days.go | 0.591487 | 0.403391 | julian-days.go | starcoder |
package main
import (
"bufio"
"fmt"
"math"
"os"
"sort"
)
type Asteroid struct {
x int
y int
angles map[float64]*Asteroid
}
const RADIAN_TO_DEGREE = 180 / math.Pi
func main() {
detectionTestN(1, 3, 4, 8)
detectionTestN(2, 5, 8, 33)
detectionTestN(3, 1, 2, 35)
detectionTestN(4, 6, 3, 41)
detectionTestN(5, 11, 13, 210)
part1()
part2()
}
func detectionTestN(n int, expected_x int, expected_y int, other_asteroids int) {
asteroids := loadAsteroids(fmt.Sprintf("2019/10/test_%d.txt", n))
winner := pickBestAsteroid(asteroids)
fmt.Printf("Test %d: %d, %d => %d asteroids Expected: %d, %d => %d asteroids\n", n, winner.x, winner.y, winner.numVisibleAsteroids(), expected_x, expected_y, other_asteroids)
}
func part1() {
asteroids := loadAsteroids("2019/10/input.txt")
winner := pickBestAsteroid(asteroids) //22,19 -> 282 asteroids
fmt.Printf("Part 1: %d, %d => %d asteroids Expected: 22, 19 => 282\n", winner.x, winner.y, winner.numVisibleAsteroids())
}
func part2() {
asteroids := loadAsteroids("2019/10/input.txt")
monitoringStation := pickBestAsteroid(asteroids)
vaporized := make([]*Asteroid, 0)
for len(asteroids) > 1 {
vaporizeThese := monitoringStation.getAsteroidsToVaporize(asteroids)
asteroids = removeAsteroids(vaporizeThese, asteroids)
monitoringStation.clearNearestVisible()
vaporized = append(vaporized, vaporizeThese...)
}
fmt.Printf("Part 2: The 200th asteroid to be vaporized is at %d,%d, the answer is: %d",
vaporized[199].x, vaporized[199].y, (vaporized[199].x*100)+vaporized[199].y)
}
func (station *Asteroid) getAsteroidsToVaporize(asteroids []*Asteroid) []*Asteroid {
if station.numVisibleAsteroids() == 0 {
station.pickNearestAsteroidsFrom(asteroids)
if station.numVisibleAsteroids() == 0 {
return nil
}
}
vaporizeThese := make([]*Asteroid, 0)
for _, asteroid := range station.angles {
vaporizeThese = append(vaporizeThese, asteroid)
}
sort.Slice(vaporizeThese, func(i int, j int) bool {
return station.angleTo(vaporizeThese[i]) < station.angleTo(vaporizeThese[j])
})
return vaporizeThese
}
func (this *Asteroid) pickNearestAsteroidsFrom(asteroids []*Asteroid) {
this.clearNearestVisible()
for _, other := range asteroids {
if this.x == other.x && this.y == other.y {
continue
}
this.angleTo(other)
}
}
func pickBestAsteroid(asteroids []*Asteroid) *Asteroid {
maxVisibleAsteroids := 0
var winner *Asteroid
for _, asteroid := range asteroids {
asteroid.pickNearestAsteroidsFrom(asteroids)
visibleAsteroids := asteroid.numVisibleAsteroids()
if visibleAsteroids > maxVisibleAsteroids {
winner = asteroid
maxVisibleAsteroids = visibleAsteroids
}
}
return winner
}
func removeAsteroids(removeThese []*Asteroid, fromThese []*Asteroid) []*Asteroid {
for _, removeMe := range removeThese {
i := findAsteroid(removeMe, fromThese)
if i != -1 {
fromThese = removeAsteroidAt(i, fromThese)
} else {
fmt.Printf("Could not find asteroid %d,%d\n", removeMe.x, removeMe.y)
}
}
return fromThese
}
func findAsteroid(asteroid *Asteroid, asteroids []*Asteroid) int {
for i, current := range asteroids {
if asteroid == current {
return i
}
}
return -1
}
func removeAsteroidAt(s int, slice []*Asteroid) []*Asteroid {
result := make([]*Asteroid, len(slice)-1)
result = append(slice[:s], slice[s+1:]...)
return result
}
func loadAsteroids(filePath string) []*Asteroid {
file, err := os.Open(filePath)
if err != nil {
panic(err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
y := 0
asteroids := []*Asteroid{}
for scanner.Scan() {
line := scanner.Text()
for x, character := range line {
if character == '#' {
asteroids = append(asteroids, &Asteroid{x, y, make(map[float64]*Asteroid)})
}
}
y++
}
return asteroids
}
func (this *Asteroid) angleTo(other *Asteroid) float64 {
// We calculate atan2(x,y) instead of (y,x), and we subtract that number from 180 to place
// 0 degrees on top (12 o'clock)
angle := 180 - (RADIAN_TO_DEGREE * math.Atan2(float64(other.x-this.x), float64(other.y-this.y)))
if this.angles[angle] == nil {
this.angles[angle] = other
} else {
previous := this.angles[angle]
if this.manhattanDistance(other) < this.manhattanDistance(previous) {
this.angles[angle] = other
}
}
return angle
}
func (this *Asteroid) printVisibleAsteroids() {
i := 1
for slope, asteroid := range this.angles {
fmt.Printf("%d slope=%f to %d,%d\n", 1+i, slope, asteroid.x, asteroid.y)
i++
}
}
func (this *Asteroid) numVisibleAsteroids() int {
if this.angles == nil {
return 0
}
return len(this.angles)
}
func (this *Asteroid) manhattanDistance(other *Asteroid) int {
return int(math.Abs(float64(other.x-this.x)) + math.Abs(float64(other.y-this.y)))
}
func (this *Asteroid) clearNearestVisible() {
for k := range this.angles {
delete(this.angles, k)
}
}
func testAngles() {
o := &Asteroid{0, 0, make(map[float64]*Asteroid)}
a := &Asteroid{0, -1, nil}
fmt.Printf("ABOVE angle to A %d,%d => %f == 0?\n", a.x, a.y, o.angleTo(a))
a.x = 1
a.y = -1
fmt.Printf("UPPER RIGHT angle to A %d,%d => %f == 45?\n", a.x, a.y, o.angleTo(a))
a.x = 1
a.y = 0
fmt.Printf("RIGHT angle to A %d,%d => %f == 90\n", a.x, a.y, o.angleTo(a))
a.x = 1
a.y = 1
fmt.Printf("LOWER RIGHT angle to A %d,%d => %f == 135?\n", a.x, a.y, o.angleTo(a))
a.x = 0
a.y = 1
fmt.Printf("BELOW angle to A %d,%d => %f == 180?\n", a.x, a.y, o.angleTo(a))
a.x = -1
a.y = 1
fmt.Printf("LOWER LEFT angle to A %d,%d => %f == 225\n", a.x, a.y, o.angleTo(a))
a.x = -1
a.y = 0
fmt.Printf("LEFT angle to A %d,%d => %f == 270\n", a.x, a.y, o.angleTo(a))
a.x = -1
a.y = -1
fmt.Printf("angle to A %d,%d => %f == 315\n", a.x, a.y, o.angleTo(a))
} | 2019/10/monitoringStation.go | 0.61878 | 0.448668 | monitoringStation.go | starcoder |
package table
import (
"fmt"
"io"
"sort"
"strings"
)
type (
// Table represents a table of data to be rendered.
Table struct {
Columns []Column
Data []Row
Sort []int
ColumnSpacing string
}
// Row is a single row of data in a table.
Row = []string
// Column represents metadata about a column in a table.
Column struct {
Header string
Width int
// If false, render this column.
Hide bool
// If true, set the width to the widest value in this column.
Flexible bool
LeftAlign bool
}
)
const defaultColumnSpacing = " "
// NewTable creates a new table with the given columns and rows.
func NewTable(cols []Column, data []Row) Table {
return Table{
Columns: cols,
Data: data,
Sort: []int{},
ColumnSpacing: defaultColumnSpacing,
}
}
// NewColumn creates a new flexible column with the given name.
func NewColumn(header string) Column {
return Column{
Header: header,
Flexible: true,
Width: len(header),
}
}
// WithLeftAlign turns on the left align of this column and returns it.
func (c Column) WithLeftAlign() Column {
c.LeftAlign = true
return c
}
// Render writes the full table to the given Writer.
func (t *Table) Render(w io.Writer) {
columnWidths := t.columnWidths()
t.renderRow(w, t.headerRow(), columnWidths)
t.sort()
for _, row := range t.Data {
t.renderRow(w, row, columnWidths)
}
}
func (t *Table) columnWidths() []int {
widths := make([]int, len(t.Columns))
for c, col := range t.Columns {
width := col.Width
if col.Flexible {
for _, row := range t.Data {
if len(row[c]) > width {
width = len(row[c])
}
}
}
widths[c] = width
}
return widths
}
func (t *Table) sort() {
if len(t.Sort) == 0 {
return
}
sort.Slice(t.Data, func(i, j int) bool {
for _, sortCol := range t.Sort {
if t.Data[i][sortCol] < t.Data[j][sortCol] {
return true
} else if t.Data[i][sortCol] > t.Data[j][sortCol] {
return false
}
}
return false
})
}
func (t *Table) renderRow(w io.Writer, row Row, columnWidths []int) {
for c, col := range t.Columns {
if col.Hide {
continue
}
value := row[c]
if len(value) > columnWidths[c] {
value = value[:columnWidths[c]]
}
padding := strings.Repeat(" ", columnWidths[c]-len(value))
if col.LeftAlign {
fmt.Fprintf(w, "%s%s%s", value, padding, t.ColumnSpacing)
} else {
fmt.Fprintf(w, "%s%s%s", padding, value, t.ColumnSpacing)
}
}
fmt.Fprint(w, "\n")
}
func (t *Table) headerRow() Row {
row := make(Row, len(t.Columns))
for c, col := range t.Columns {
row[c] = col.Header
}
return row
} | cli/table/table.go | 0.71721 | 0.435001 | table.go | starcoder |
package base
import (
"fmt"
)
// Fitness is a measure of quality of a solution.
// Define Fitness A greater than B means A better than B, A less than B means A worse than B
type Fitness struct {
weights []float64
wvalues []float64
values []float64
valid bool
}
// NewFitness returns a fitness value using weights and the values is a zero vector
func NewFitness(weights []float64) *Fitness {
if len(weights) > 0 {
fitness := &Fitness{weights: weights, valid: false}
return fitness
}
return nil
}
// NewFitnessWithValues is used to get a fitness value
func NewFitnessWithValues(weights []float64, values []float64) *Fitness {
if len(values) == len(weights) && len(values) > 0 {
wvalues := make([]float64, len(values))
fitness := &Fitness{weights: weights, wvalues: wvalues, values: values, valid: true}
fitness.SetValues(values)
return fitness
}
return nil
}
// GetWValues returns the copy of wvalues
func (fitness *Fitness) GetWValues() []float64 {
if fitness.weights == nil {
return nil
}
wvalues := make([]float64, len(fitness.wvalues))
copy(wvalues, fitness.wvalues)
return wvalues
}
// GetWeights is used to get a copy of fitness's weights
func (fitness *Fitness) GetWeights() []float64 {
weights := make([]float64, len(fitness.weights))
copy(weights, fitness.weights)
return weights
}
// GetValues is used to get the values of fitness
func (fitness *Fitness) GetValues() []float64 {
values := make([]float64, len(fitness.weights))
for i := range values {
values[i] = fitness.wvalues[i] / fitness.weights[i]
}
return values
}
// SetValues is used to set the values of fitness
func (fitness *Fitness) SetValues(values []float64) {
if fitness.wvalues == nil {
fitness.wvalues = make([]float64, len(fitness.weights))
fitness.values = make([]float64, len(fitness.weights))
}
for i, value := range values {
fitness.wvalues[i] = value * fitness.weights[i]
fitness.values[i] = value
}
fitness.valid = true
}
// Dominates is used to check if the fitness dominates the other
func (fitness *Fitness) Dominates(other *Fitness, obj []int) bool {
notEqual := false
if obj == nil {
for i, wv := range fitness.wvalues {
if wv > other.wvalues[i] {
notEqual = true
} else if wv < other.wvalues[i] {
return false
}
}
} else {
for _, i := range obj {
if fitness.wvalues[i] > other.wvalues[i] {
notEqual = true
} else if fitness.wvalues[i] < other.wvalues[i] {
return false
}
}
}
return notEqual
}
// Clone returns a copy of fitness
func (fitness *Fitness) Clone() *Fitness {
weights := make([]float64, len(fitness.weights))
copy(weights, fitness.weights)
if fitness.values != nil {
values := make([]float64, len(fitness.values))
copy(values, fitness.values)
return NewFitnessWithValues(weights, values)
}
return NewFitness(weights)
}
// Valid is used to assess if a fitness is valid or not.
func (fitness *Fitness) Valid() bool {
return fitness.valid && fitness.wvalues != nil && len(fitness.wvalues) != 0
}
// Invalidate makes the fitness value be invalid.
func (fitness *Fitness) Invalidate() {
fitness.valid = false
}
// Greater is used to check if the fitness is greater than the other
func (fitness *Fitness) Greater(other *Fitness) bool {
return !fitness.LessEqual(other)
}
// GreaterEqual is used to check if the fitness is greater than or equal to the other
func (fitness *Fitness) GreaterEqual(other *Fitness) bool {
return !fitness.Less(other)
}
// LessEqual is used to check if the fitness is lessthan or equal to the other
func (fitness *Fitness) LessEqual(other *Fitness) bool {
for i, fit := range fitness.wvalues {
if other.wvalues[i] < fit {
return false
}
}
return true
}
// Less is used to check if the fitness is less than the other
func (fitness *Fitness) Less(other *Fitness) bool {
for i, fit := range fitness.wvalues {
if other.wvalues[i] <= fit {
return false
}
}
return true
}
// Equal is used to check if the fitness is euqal to the other
func (fitness *Fitness) Equal(other *Fitness) bool {
for i, fit := range fitness.wvalues {
if other.wvalues[i] != fit {
return false
}
}
return true
}
// NotEqual is used to check if the fitness is not euqal to the other
func (fitness *Fitness) NotEqual(other *Fitness) bool {
return !fitness.Equal(other)
}
func (fitness *Fitness) String() string {
fmtStr := "Fitness{weights:%v, values:%v, wvalues:%v, valid:%v}"
return fmt.Sprintf(fmtStr, fitness.weights, fitness.values, fitness.wvalues, fitness.valid)
}
// Len returns the amounts of objective
func (fitness *Fitness) Len() int {
return len(fitness.values)
} | base/fitness.go | 0.825238 | 0.746578 | fitness.go | starcoder |
package commands
import (
"crypto/elliptic"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"foil/cryptospecials"
"hash"
"math/big"
"github.com/spf13/cobra"
)
func init() {
oprfCmd.PersistentFlags().BoolVarP(&mask, "mask", "", false, "mask a string using the ECC-OPRF")
oprfCmd.PersistentFlags().BoolVarP(&salt, "salt", "", false, "salt a masked value using the ECC-OPRF")
oprfCmd.PersistentFlags().BoolVarP(&unmask, "unmask", "", false, "unmask a salted value using the ECC-OPRF")
//oprfCmd.PersistentFlags().BoolVarP(&curveP256, "p256", "", false, "use P-256 as the elliptic curve for ECC-OPRF")
//oprfCmd.PersistentFlags().BoolVarP(&curveP384, "p384", "", false, "use P-384 as the elliptic curve for ECC-OPRF")
//oprfCmd.PersistentFlags().BoolVarP(&curveP521, "p521", "", false, "use P-521 as the elliptic curve for ECC-OPRF")
//oprfCmd.PersistentFlags().BoolVarP(&curve25519, "c25519", "", false, "use Curve25519 as the elliptic curve for ECC-OPRF")
oprfCmd.PersistentFlags().StringVarP(&xString, "x", "", "", "use [hex] as x-coordinate for ECC-OPRF operation (mask, salt, unmask)")
oprfCmd.PersistentFlags().StringVarP(&yString, "y", "", "", "use [hex] as y-coordinate for ECC-OPRF operation (mask, salt, unmask)")
oprfCmd.PersistentFlags().StringVarP(&saltString, "s", "", "", "use [hex] as the secret value \"s\" for ECC-OPRF salting operation")
oprfCmd.PersistentFlags().StringVarP(&rInvString, "rinv", "", "", "use [hex] as the secret value \"r_inv\" for ECC-OPRF unmaksing operation")
}
var (
mask bool
salt bool
unmask bool
xString string
yString string
saltString string
rInvString string
oprfCmd = &cobra.Command{
Use: "oprf",
Short: "Perform an EC-OPRF action",
Long: "Foil can perform [mask], [salt], and [unmask] operations for it's internal" +
" ECC-OPERF based on: https://eprint.iacr.org/2017/111.",
PersistentPreRunE: oprfPreCheck,
RunE: doOprf,
}
)
func oprfPreCheck(cmd *cobra.Command, args []string) error {
// Ensure an OPRF operation is specified
if mask == false && salt == false && unmask == false {
return errors.New("Error: specify an OPRF operation")
}
// Ensure only one options is selected
if (mask && salt) || (mask && unmask) || (salt && unmask) {
return errors.New("Error: specify only one OPRF operation")
}
// Ensure initial OPRF input is provided
if mask == true && stdInString == "" {
return errors.New("Error: specify OPRF input")
}
// If salting or unmasking, ensure an elliptic curve point (x,y) is provided
if salt == true || unmask == true {
// Ensure that a masked or salted elliptic curve point is provided
if xString == "" || yString == "" {
return errors.New("Error: specify an elliptic curve point -x [hex] -y [hex]")
}
// If not secret salt value is provided, warn the suer that one will be generated
if salt == true {
if saltString == "" {
fmt.Println("Warning: No salt value given; generating a random salt")
}
// Ensure that an r_inv value is provided
} else if unmask == true {
if rInvString == "" {
return errors.New("Error: specify an r_inv [hex] for unmasking")
}
}
}
return nil
}
/*
* doOprf currently only supports P256 but minor changes can be made to support P384 and P521
*/
func doOprf(cmd *cobra.Command, args []string) error {
var (
rInv, s, sOut *big.Int
xBytes, yBytes, swap []byte
pt cryptospecials.ECPoint
elem cryptospecials.OPRF
ec elliptic.Curve
h hash.Hash
err error
)
// Fill (x,y) with zero values
pt.X = new(big.Int)
pt.Y = new(big.Int)
// Sill salts with zero values
sOut = new(big.Int)
rInv = new(big.Int)
// Parameters that need to be abstracted away if supporting more curves
ec = elliptic.P256()
h = sha256.New()
// Decode StdIn(x,y) from [hex] into [bytes]; Check to ensure (x,y) is on the curve
if !mask {
xBytes, err = hex.DecodeString(xString)
if err != nil {
return err
}
yBytes, err = hex.DecodeString(yString)
if err != nil {
return err
}
pt.X.SetBytes(xBytes)
pt.Y.SetBytes(yBytes)
if !ec.IsOnCurve(pt.X, pt.Y) {
return errors.New("Error: provided points not on elliptic curve")
}
}
// Perform OPRF Masking
if mask {
pt, rInv, err = elem.Mask(stdInString, h, ec, Verbose)
if err != nil {
return err
}
fmt.Printf("Masked x-coordinate (hex): %x\n", pt.X)
fmt.Printf("Masked y-coordinate (hex): %x\n", pt.Y)
fmt.Printf("SECRET - r inverse (hex): %x\n", rInv)
}
// Perform OPRF Salting
if salt {
if saltString != "" {
swap, err = hex.DecodeString(saltString)
if err != nil {
return err
}
s = new(big.Int).SetBytes(swap)
}
pt, sOut, err = elem.Salt(pt, s, ec, Verbose)
if err != nil {
return fmt.Errorf("OPRF Salting failed: %v", err)
}
// Check to determine if s == sOut
if saltString == "" {
fmt.Printf("SECRET - new s generated (hex): %x\n", sOut)
fmt.Printf("SECRET - s given (hex) : %x\n", s)
}
fmt.Printf("Salted x-coordinate (hex): %x\n", pt.X)
fmt.Printf("Salted y-coordinate (hex): %x\n", pt.Y)
}
// Perform OPRF unmasking
if unmask {
// This does not check to ensure that rInv < N and warn the user if true
swap, err = hex.DecodeString(rInvString)
if err != nil {
return fmt.Errorf("OPRF Unmaksing failed: %v", err)
}
rInv = new(big.Int).SetBytes(swap)
pt, err = elem.Unmask(pt, rInv, ec, Verbose)
if err != nil {
return err
}
fmt.Printf("Unmasked x-coordinate (hex): %x\n", pt.X)
fmt.Printf("Unmasked y-coordinate (hex): %x\n", pt.Y)
}
return nil
} | commands/oprf.go | 0.604049 | 0.407687 | oprf.go | starcoder |
package main
// These all were copy-pasted from xgraphics because newDrawable() didn't support user-specified geometry
import (
"fmt"
"image"
"github.com/BurntSushi/xgb/xproto"
"github.com/BurntSushi/xgbutil"
"github.com/BurntSushi/xgbutil/xgraphics"
"github.com/BurntSushi/xgbutil/xrect"
"github.com/BurntSushi/xgbutil/xwindow"
)
//nolint:unused,deadcode
func newDrawable(X *xgbutil.XUtil, did xproto.Drawable) (*xgraphics.Image, error) {
// Get the geometry of the pixmap for use in the GetImage request.
pgeom, err := xwindow.RawGeometry(X, xproto.Drawable(did))
if err != nil {
return nil, err
}
return newDrawableFromGeometry(X, did, pgeom)
}
func newDrawableFromGeometry(X *xgbutil.XUtil, did xproto.Drawable, pgeom xrect.Rect) (*xgraphics.Image, error) {
// Get the image data for each pixmap.
pixmapData, err := xproto.GetImage(X.Conn(), xproto.ImageFormatZPixmap,
did,
int16(pgeom.X()), int16(pgeom.Y()), uint16(pgeom.Width()), uint16(pgeom.Height()),
(1<<32)-1).Reply()
if err != nil {
return nil, err
}
// Now create the xgraphics.Image and populate it with data from
// pixmapData and maskData.
ximg := xgraphics.New(X, image.Rect(0, 0, pgeom.Width(), pgeom.Height()))
// We'll try to be a little flexible with the image format returned,
// but not completely flexible.
err = readDrawableData(X, ximg, did, pixmapData,
pgeom.Width(), pgeom.Height())
if err != nil {
return nil, err
}
return ximg, nil
}
func readDrawableData(X *xgbutil.XUtil, ximg *xgraphics.Image, did xproto.Drawable,
imgData *xproto.GetImageReply, width, height int) error {
format := xgraphics.GetFormat(X, imgData.Depth)
if format == nil {
return fmt.Errorf("Could not find valid format for pixmap %d "+
"with depth %d", did, imgData.Depth)
}
switch format.Depth {
case 1: // We read bitmaps in as alpha masks.
if format.BitsPerPixel != 1 {
return fmt.Errorf("The image returned for pixmap id %d with "+
"depth %d has an unsupported value for bits-per-pixel: %d",
did, format.Depth, format.BitsPerPixel)
}
// Calculate the padded width of our image data.
pad := int(X.Setup().BitmapFormatScanlinePad)
paddedWidth := width
if width%pad != 0 {
paddedWidth = width + pad - (width % pad)
}
// Process one scanline at a time. Each 'y' represents a
// single scanline.
for y := 0; y < height; y++ {
// Each scanline has length 'width' padded to
// BitmapFormatScanlinePad, which is found in the X setup info.
// 'i' is the index to the starting byte of the yth scanline.
i := y * paddedWidth / 8
for x := 0; x < width; x++ {
b := imgData.Data[i+x/8] >> uint(x%8)
if b&1 > 0 { // opaque
ximg.Set(x, y, xgraphics.BGRA{
B: 0x0,
G: 0x0,
R: 0x0,
A: 0xff,
})
} else { // transparent
ximg.Set(x, y, xgraphics.BGRA{
B: 0xff,
G: 0xff,
R: 0xff,
A: 0x0,
})
}
}
}
case 24, 32:
switch format.BitsPerPixel {
case 24:
bytesPer := int(format.BitsPerPixel) / 8
var i int
ximg.For(func(x, y int) xgraphics.BGRA {
i = y*width*bytesPer + x*bytesPer
return xgraphics.BGRA{
B: imgData.Data[i],
G: imgData.Data[i+1],
R: imgData.Data[i+2],
A: 0xff,
}
})
case 32:
bytesPer := int(format.BitsPerPixel) / 8
var i int
ximg.For(func(x, y int) xgraphics.BGRA {
i = y*width*bytesPer + x*bytesPer
return xgraphics.BGRA{
B: imgData.Data[i],
G: imgData.Data[i+1],
R: imgData.Data[i+2],
A: imgData.Data[i+3],
}
})
default:
return fmt.Errorf("The image returned for pixmap id %d has "+
"an unsupported value for bits-per-pixel: %d",
did, format.BitsPerPixel)
}
default:
return fmt.Errorf("The image returned for pixmap id %d has an "+
"unsupported value for depth: %d", did, format.Depth)
}
return nil
} | cmd/workrecorder/x11misery.go | 0.692226 | 0.472136 | x11misery.go | starcoder |
package tuple
import "math"
// Tuple is a structure to hold the X,Y,Z co-ordinates and a flag indicating if this is a vector or point.
type Tuple struct {
X float64
Y float64
Z float64
W float64
}
// Color is a structure to hold red green and blue values for a pixel.
type Color struct {
Red float64
Green float64
Blue float64
}
const epsilon = 0.00001
func approximatelyEqual(a, b float64) bool {
if math.Abs(a-b) < epsilon {
return true
}
return false
}
// New returns a Tuple initialised with x, y, z and w values.
func New(x, y, z, w float64) Tuple {
return Tuple{x, y, z, w}
}
// Point returns a Tuple initialised with x, y, z and a point flag.
func Point(x, y, z float64) Tuple {
return Tuple{x, y, z, 1.0}
}
// Vector returns a Tuple initialised with x, y, z and a vector flag.
func Vector(x, y, z float64) Tuple {
return Tuple{x, y, z, 0.0}
}
// IsPoint returns true if the tuple represents a point
func IsPoint(a Tuple) bool {
return a.W == 1.0
}
// IsVector returns true if the tuple represents a vector
func IsVector(a Tuple) bool {
return a.W == 0.0
}
// IsEqual compares two tuples and returns true if they are equal
func IsEqual(a, b Tuple) bool {
return (approximatelyEqual(a.X, b.X) &&
approximatelyEqual(a.Y, b.Y) &&
approximatelyEqual(a.Z, b.Z) &&
approximatelyEqual(a.W, b.W))
}
// Plus adds two tuples and returns the result
func Plus(a, b Tuple) Tuple {
return New(a.X+b.X, a.Y+b.Y, a.Z+b.Z, a.W+b.W)
}
// Minus subtracts two tuples and returns the result
func Minus(a, b Tuple) Tuple {
return New(a.X-b.X, a.Y-b.Y, a.Z-b.Z, a.W-b.W)
}
// Scale returns the scaled value of a tuple
func Scale(a Tuple, scalar float64) Tuple {
return New(scalar*a.X, scalar*a.Y, scalar*a.Z, scalar*a.W)
}
// Negate returns the negated value of a tuple
func Negate(a Tuple) Tuple {
return Scale(a, -1)
}
// Divide returns the fractionally scaled value of a tuple
func Divide(a Tuple, scalar float64) Tuple {
return Scale(a, (1.0 / scalar))
}
// Magnitude returns the length (magnitude) of a vector
func Magnitude(a Tuple) float64 {
return math.Sqrt(a.X*a.X + a.Y*a.Y + a.Z*a.Z + a.W*a.W)
}
// Normalize returns the vector normalized to unit size
func Normalize(v Tuple) Tuple {
var magnitude float64
magnitude = Magnitude(v)
return New(v.X/magnitude, v.Y/magnitude, v.Z/magnitude, v.W/magnitude)
}
// Dot returns the product of two vectors
func Dot(v, w Tuple) float64 {
return v.X*w.X + v.Y*w.Y + v.Z*w.Z + v.W*w.W
}
// Cross returns the result of a cross product between two vectors
func Cross(v, w Tuple) Tuple {
return Vector(v.Y*w.Z-v.Z*w.Y,
v.Z*w.X-v.X*w.Z,
v.X*w.Y-v.Y*w.X)
} | tuple/tuple.go | 0.943099 | 0.794425 | tuple.go | starcoder |
Coding Exercise #1
Create a function called cube() that takes a parameter of type float64 and returns the cube of that parameter (the parameter to the power of 3).
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/4UMwXWkYDyy.
Coding Exercise #2
Create a Go program with a function called f1() that takes a parameter of type uint and returns 2 values:
a) the factorial of n
b) the sum of all integer numbers greater than zero (>0) and less than or equal to n (<=n)
Test the program by calling the function.
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/9UhNsDOxoEN.
Coding Exercise #3
Write a function called myFunc() that takes exactly one argument which is an int number written between double quotes (this is in fact a string). If the argument is integer 'n', the function should return the result of n + nn + nn
Example: myFunc('5') returns 5 + 55 + 555 which is 615 and myFunc('9') returns 9 + 99 + 999 which is 1107
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/EA7rzjXv_vl.
Coding Exercise #4
Create a function with the identifier sum that takes in a variadic parameter of type int and returns the sum of all values of type int passed in.
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/3HsGVDvfNBB.
Coding Exercise #5
Change the function from the previous exercise and use a `naked return`.
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/oF6_6ViX2pF.
Coding Exercise #6
Create a function called searchItem() that takes 2 parameters: a) a string slice and b) a string.
The function should search for the string (the second parameter) in the slice (the first parameter) and returns true if it finds the string in the slice and false otherwise. Do function does an case-sensitive search.
Call the function and see how it works.
Example:
animals := []string{"lion", "tiger", "bear"}
result := searchItem(animals, "bear")
fmt.Println(result) // => true
result = searchItem(animals, "pig")
fmt.Println(result) // => false
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/3u9bzvzb_Cc.
Coding Exercise #7
Change the function from the previous exercise to do a case-insensitive search.
Example:
animals := []string{"Lion", "tiger", "bear"}
result := searchItem(animals, "beaR")
fmt.Println(result) // => true
result = searchItem(animals, "lion")
fmt.Println(result) // => true
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/icJ_ovYXxCc.
Coding Exercise #8
Consider the following Go program that prints out:
The Go gopher is the iconic mascot of the Go project.
Hello, Go playground!
package main
import "fmt"
func print(msg string) {
fmt.Println(msg)
}
func main() {
print("The Go gopher is the iconic mascot of the Go project.")
fmt.Println("Hello, Go playground!")
}
Modify only the line in the main() body function where the print() function is invoked so that the program will print out Hello, Go playground! and then The Go gopher is the iconic mascot of the Go project.
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/kGmrBDov_3B.
Coding Exercise #9
Create a function that takes in an int value and prints out that value.
Assign the function to a variable, print out the type of the variable and then call that function through the variable name.
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/2bZEmgHQu3u. | more_code/coding_tasks/functions/main.go | 0.853134 | 0.843057 | main.go | starcoder |
package stardust
import (
"errors"
"math"
"strings"
"github.com/miku/stardust/set"
)
// Version of the application
const Version = "0.1.1"
// CompleteString returns all strings from pool that have a given prefix
func CompleteString(pool []string, prefix string) []string {
var candidates []string
for _, value := range pool {
if strings.HasPrefix(value, prefix) {
candidates = append(candidates, value)
}
}
return candidates
}
// JaccardSets measure Jaccard distance of two sets
func JaccardSets(a, b set.Strings) float64 {
return float64(a.Intersection(b).Size()) / float64(a.Union(b).Size())
}
// Unigrams returns a set of 1-grams
func Unigrams(s string) set.Strings {
return Ngrams(s, 1)
}
// Bigrams returns a set of 2-grams
func Bigrams(s string) set.Strings {
return Ngrams(s, 2)
}
// Trigrams returns a set of 3-grams
func Trigrams(s string) set.Strings {
return Ngrams(s, 3)
}
// Ngrams return a set of n-grams for a given string
func Ngrams(s string, n int) set.Strings {
result := set.NewStrings()
if n > 0 {
lastIndex := len(s) - n + 1
for i := 0; i < lastIndex; i++ {
result.Add(s[i : i+n])
}
}
return result
}
// NgramDistanceSize computes the ngram/Jaccard measure for a given n
func NgramDistanceSize(s, t string, n int) (float64, error) {
sset := Ngrams(s, n)
tset := Ngrams(t, n)
if tset.Size() == 0 && sset.Size() == 0 {
return 0, nil
}
return JaccardSets(sset, tset), nil
}
// NgramDistance computes the trigram/Jaccard measure
func NgramDistance(s, t string) (float64, error) {
return NgramDistanceSize(s, t, 3)
}
// HammingDistance computes the Hamming distance for two strings of equals length
func HammingDistance(a, b string) (int, error) {
if len(a) != len(b) {
return 0, errors.New("strings must be of equal length")
}
distance := 0
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
distance++
}
}
return distance, nil
}
func maxInt(numbers ...int) int {
result := math.MinInt64
for _, k := range numbers {
if k > result {
result = k
}
}
return result
}
func minInt(numbers ...int) int {
result := math.MaxInt64
for _, k := range numbers {
if k < result {
result = k
}
}
return result
}
// LevenshteinDistance computes the Levenshtein distance for two strings
func LevenshteinDistance(s, t string) (int, error) {
if len(s) < len(t) {
return LevenshteinDistance(t, s)
}
if len(t) == 0 {
return len(s), nil
}
previous := make([]int, len(t)+1)
for i, c := range s {
current := []int{i + 1}
for j, d := range t {
insertions := previous[j+1] + 1
deletions := current[j] + 1
cost := 0
if c != d {
cost = 1
}
subtitutions := previous[j] + cost
current = append(current, minInt(insertions, deletions, subtitutions))
}
previous = current
}
return previous[len(previous)-1], nil
}
// JaroDistance computes the Jaro distance for two strings
// From: https://github.com/xrash/smetrics
func JaroDistance(a, b string) (float64, error) {
la := float64(len(a))
lb := float64(len(b))
matchRange := int(math.Floor(math.Max(la, lb)/2.0)) - 1
matchRange = int(math.Max(0, float64(matchRange-1)))
var matches, halfs float64
transposed := make([]bool, len(b))
for i := 0; i < len(a); i++ {
start := int(math.Max(0, float64(i-matchRange)))
end := int(math.Min(lb-1, float64(i+matchRange)))
for j := start; j <= end; j++ {
if transposed[j] {
continue
}
if a[i] == b[j] {
if i != j {
halfs++
}
matches++
transposed[j] = true
break
}
}
}
if matches == 0 {
return 0, nil
}
transposes := math.Floor(float64(halfs / 2))
return ((matches / la) + (matches / lb) + (matches-transposes)/matches) / 3.0, nil
}
// JaroWinklerDistance computes the Jaro-Winkler distance for two strings
// From: https://github.com/xrash/smetrics
func JaroWinklerDistance(a, b string, boostThreshold float64, prefixSize int) (float64, error) {
j, _ := JaroDistance(a, b)
if j <= boostThreshold {
return j, nil
}
prefixSize = int(math.Min(float64(len(a)), math.Min(float64(prefixSize), float64(len(b)))))
var prefixMatch float64
for i := 0; i < prefixSize; i++ {
if a[i] == b[i] {
prefixMatch++
}
}
return j + 0.1*prefixMatch*(1.0-j), nil
}
func SorensenDiceDistance(a, b string) (float64, error) {
if len(a)+len(b) == 0 {
return 0, nil
}
ba := Bigrams(a)
bb := Bigrams(b)
distance := float64(2*ba.Intersection(bb).Size()) / float64(ba.Size()+bb.Size())
return distance, nil
} | common.go | 0.776453 | 0.456894 | common.go | starcoder |
package iso20022
// Parameters applied to the settlement of a security.
type FundSettlementParameters4 struct {
// Date and time at which the securities are to be delivered or received.
SettlementDate *ISODate `xml:"SttlmDt,omitempty"`
// Place where the settlement of transaction will take place. In the context of the investment funds, the place of settlement is the transfer agent, a Central Securities Depository (CSD) or an International Central Securities Depository (ICSD).
SettlementPlace *PartyIdentification2Choice `xml:"SttlmPlc"`
// Place where the securities are safe-kept, physically or notionally. This place can be, for example, a local custodian, a Central Securities Depository or an International Central Securities Depository.
SafekeepingPlace *PartyIdentification2Choice `xml:"SfkpgPlc,omitempty"`
// Identification of a specific system or set of rules and/or processes to be applied at the settlement place.
SecuritiesSettlementSystemIdentification *Max35Text `xml:"SctiesSttlmSysId,omitempty"`
// Chain of parties involved in the settlement of a transaction resulting in the movement of a security from one account to another.
ReceivingSideDetails *ReceivingPartiesAndAccount3 `xml:"RcvgSdDtls"`
// Chain of parties involved in the settlement of a transaction resulting in the movement of a security from one account to another.
DeliveringSideDetails *DeliveringPartiesAndAccount3 `xml:"DlvrgSdDtls,omitempty"`
}
func (f *FundSettlementParameters4) SetSettlementDate(value string) {
f.SettlementDate = (*ISODate)(&value)
}
func (f *FundSettlementParameters4) AddSettlementPlace() *PartyIdentification2Choice {
f.SettlementPlace = new(PartyIdentification2Choice)
return f.SettlementPlace
}
func (f *FundSettlementParameters4) AddSafekeepingPlace() *PartyIdentification2Choice {
f.SafekeepingPlace = new(PartyIdentification2Choice)
return f.SafekeepingPlace
}
func (f *FundSettlementParameters4) SetSecuritiesSettlementSystemIdentification(value string) {
f.SecuritiesSettlementSystemIdentification = (*Max35Text)(&value)
}
func (f *FundSettlementParameters4) AddReceivingSideDetails() *ReceivingPartiesAndAccount3 {
f.ReceivingSideDetails = new(ReceivingPartiesAndAccount3)
return f.ReceivingSideDetails
}
func (f *FundSettlementParameters4) AddDeliveringSideDetails() *DeliveringPartiesAndAccount3 {
f.DeliveringSideDetails = new(DeliveringPartiesAndAccount3)
return f.DeliveringSideDetails
} | FundSettlementParameters4.go | 0.786664 | 0.534916 | FundSettlementParameters4.go | starcoder |
package paillier
import (
"crypto/rand"
"errors"
"io"
"math/big"
)
var one = big.NewInt(1)
// ErrMessageTooLong is returned when attempting to encrypt a message which is
// too large for the size of the public key.
var ErrMessageTooLong = errors.New("paillier: message too long for Paillier public key size")
// GenerateKey generates an Paillier keypair of the given bit size using the
// random source random (for example, crypto/rand.Reader).
func GenerateKey(random io.Reader, bits int) (*PrivateKey, error) {
p, err := rand.Prime(random, bits)
if err != nil {
return nil, err
}
q, err := rand.Prime(random, bits)
if err != nil {
return nil, err
}
// n = p * q
n := new(big.Int).Mul(p, q)
// l = phi(n) = (p-1) * q(-1)
l := new(big.Int).Mul(
new(big.Int).Sub(p, one),
new(big.Int).Sub(q, one),
)
return &PrivateKey{
PublicKey: PublicKey{
N: n,
NSquared: new(big.Int).Mul(n, n),
G: new(big.Int).Add(n, one), // g = n + 1
},
L: l,
U: new(big.Int).ModInverse(l, n),
}, nil
}
// PrivateKey represents a Paillier key.
type PrivateKey struct {
PublicKey
L *big.Int // phi(n), (p-1)*(q-1)
U *big.Int // l^-1 mod n
}
// PublicKey represents the public part of a Paillier key.
type PublicKey struct {
N *big.Int // modulus
G *big.Int // n+1, since p and q are same length
NSquared *big.Int
}
// Encrypt encrypts a plain text represented as a byte array. The passed plain
// text MUST NOT be larger than the modulus of the passed public key.
func Encrypt(pubKey *PublicKey, plainText []byte) ([]byte, error) {
r, err := rand.Prime(rand.Reader, pubKey.N.BitLen())
if err != nil {
return nil, err
}
m := new(big.Int).SetBytes(plainText)
if pubKey.N.Cmp(m) < 1 { // N < m
return nil, ErrMessageTooLong
}
// c = g^m * r^n mod n^2
n := pubKey.N
c := new(big.Int).Mod(
new(big.Int).Mul(
new(big.Int).Exp(pubKey.G, m, pubKey.NSquared),
new(big.Int).Exp(r, n, pubKey.NSquared),
),
pubKey.NSquared,
)
return c.Bytes(), nil
}
// Decrypt decrypts the passed cipher text.
func Decrypt(privKey *PrivateKey, cipherText []byte) ([]byte, error) {
c := new(big.Int).SetBytes(cipherText)
if privKey.NSquared.Cmp(c) < 1 { // c < n^2
return nil, ErrMessageTooLong
}
// c^l mod n^2
a := new(big.Int).Exp(c, privKey.L, privKey.NSquared)
// L(a)
// (a - 1) / n
l := new(big.Int).Div(
new(big.Int).Sub(a, one),
privKey.N,
)
// m = L(c^l mod n^2) * u mod n
m := new(big.Int).Mod(
new(big.Int).Mul(l, privKey.U),
privKey.N,
)
return m.Bytes(), nil
}
// AddCipher homomorphically adds together two cipher texts.
// To do this we multiply the two cipher texts, upon decryption, the resulting
// plain text will be the sum of the corresponding plain texts.
func AddCipher(pubKey *PublicKey, cipher1, cipher2 []byte) []byte {
x := new(big.Int).SetBytes(cipher1)
y := new(big.Int).SetBytes(cipher2)
// x * y mod n^2
return new(big.Int).Mod(
new(big.Int).Mul(x, y),
pubKey.NSquared,
).Bytes()
}
// Add homomorphically adds a passed constant to the encrypted integer
// (our cipher text). We do this by multiplying the constant with our
// ciphertext. Upon decryption, the resulting plain text will be the sum of
// the plaintext integer and the constant.
func Add(pubKey *PublicKey, cipher, constant []byte) []byte {
c := new(big.Int).SetBytes(cipher)
x := new(big.Int).SetBytes(constant)
// c * g ^ x mod n^2
return new(big.Int).Mod(
new(big.Int).Mul(c, new(big.Int).Exp(pubKey.G, x, pubKey.NSquared)),
pubKey.NSquared,
).Bytes()
}
// Mul homomorphically multiplies an encrypted integer (cipher text) by a
// constant. We do this by raising our cipher text to the power of the passed
// constant. Upon decryption, the resulting plain text will be the product of
// the plaintext integer and the constant.
func Mul(pubKey *PublicKey, cipher []byte, constant []byte) []byte {
c := new(big.Int).SetBytes(cipher)
x := new(big.Int).SetBytes(constant)
// c ^ x mod n^2
return new(big.Int).Exp(c, x, pubKey.NSquared).Bytes()
} | paillier.go | 0.690872 | 0.446133 | paillier.go | starcoder |
package vec3
import (
"fmt"
"github.com/scritch007/gm/math32"
)
type Vec3 [3]float32
func New(x, y, z float32) *Vec3 {
return &Vec3{x, y, z}
}
// Clone initializes a new Vec3 initialized with values from an existing one.
func (lhs *Vec3) Clone() *Vec3 {
return &Vec3{lhs[0], lhs[1], lhs[2]}
}
// Cross calculates the vector cross product. Saves the result into the
// calling vector. Returns itself for function chaining.
func (lhs *Vec3) Cross(rhs *Vec3) *Vec3 {
a, b, c := lhs[0], lhs[1], lhs[2]
lhs[0] = b*rhs[2] - c*rhs[1]
lhs[1] = c*rhs[0] - a*rhs[2]
lhs[2] = a*rhs[1] - b*rhs[0]
return lhs
}
// Div divides the the calling vector by the provided one. The result is
// saved back into the calling vector. Returns itself for function chaining.
func (lhs *Vec3) Div(rhs float32) *Vec3 {
lhs[0] /= rhs
lhs[1] /= rhs
lhs[2] /= rhs
return lhs
}
// Len returns the vector length.
func (lhs *Vec3) Len() float32 {
return math32.Sqrt(lhs[0]*lhs[0] + lhs[1]*lhs[1] + lhs[2]*lhs[2])
}
// Multiply the vector with a scalar. Returns itself.
func (lhs *Vec3) Mul(rhs float32) *Vec3 {
lhs[0] *= rhs
lhs[1] *= rhs
lhs[2] *= rhs
return lhs
}
// Norrmalize the vector. Returns itself for function chaining.
func (lhs *Vec3) Normalize() *Vec3 {
lhs.Div(lhs.Len())
return lhs
}
// Sub subtracts the provided vector from the calling one. The result is
// saved into the calling vector. Returns itself for function chaining.
func (lhs *Vec3) Sub(rhs *Vec3) *Vec3 {
lhs[0] -= rhs[0]
lhs[1] -= rhs[1]
lhs[2] -= rhs[2]
return lhs
}
// Add adds the provided vector to the calling one. The result is
// saved into the calling vector. Returns itself for function chaining.
func (lhs *Vec3) Add(rhs *Vec3) *Vec3 {
lhs[0] += rhs[0]
lhs[1] += rhs[1]
lhs[2] += rhs[2]
return lhs
}
func (lhs *Vec3) MulInner(rhs *Vec3) float32 {
var p float32
p = 0.0
for i := 0; i < 3; i++ {
p += lhs[i] * rhs[i]
}
return p
}
func (lhs *Vec3) String() string {
return fmt.Sprintf("%f %f %f", lhs[0], lhs[1], lhs[2])
} | vec3/vec3.go | 0.887558 | 0.447279 | vec3.go | starcoder |
package jwt
import (
"bytes"
"crypto"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"time"
)
/*
When validating a JWT, the following steps are performed. The order
of the steps is not significant in cases where there are no
dependencies between the inputs and outputs of the steps. If any of
the listed steps fail, then the JWT MUST be rejected -- that is,
treated by the application as an invalid input.
1. Verify that the JWT contains at least one period ('.')
character.
2. Let the Encoded JOSE Header be the portion of the JWT before the
first period ('.') character.
3. Base64url decode the Encoded JOSE Header following the
restriction that no line breaks, whitespace, or other additional
characters have been used.
4. Verify that the resulting octet sequence is a UTF-8-encoded
representation of a completely valid JSON object conforming to
RFC 7159 [RFC7159]; let the JOSE Header be this JSON object.
5. Verify that the resulting JOSE Header includes only parameters
and values whose syntax and semantics are both understood and
supported or that are specified as being ignored when not
understood.
6. Determine whether the JWT is a JWS or a JWE using any of the
methods described in Section 9 of [JWE].
7. Depending upon whether the JWT is a JWS or JWE, there are two
cases:
* If the JWT is a JWS, follow the steps specified in [JWS] for
validating a JWS. Let the Message be the result of base64url
decoding the JWS Payload.
* Else, if the JWT is a JWE, follow the steps specified in
[JWE] for validating a JWE. Let the Message be the resulting
plaintext.
8. If the JOSE Header contains a "cty" (content type) value of
"JWT", then the Message is a JWT that was the subject of nested
signing or encryption operations. In this case, return to Step
1, using the Message as the JWT.
9. Otherwise, base64url decode the Message following the
restriction that no line breaks, whitespace, or other additional
characters have been used.
10. Verify that the resulting octet sequence is a UTF-8-encoded
representation of a completely valid JSON object conforming to
RFC 7159 [RFC7159]; let the JWT Claims Set be this JSON object.
Finally, note that it is an application decision which algorithms may
be used in a given context. Even if a JWT can be successfully
validated, unless the algorithms used in the JWT are acceptable to
the application, it SHOULD reject the JWT.
*/
func getValidateFunc(a AlgorithmType) ValidateFunc {
switch a {
case HS256:
return validateHMAC256
case RS256:
return validateRSA256
case None:
return func(_ *Token) (bool, error) {
return true, nil
}
}
return nil
}
func validateHMAC256(t *Token) (bool, error) {
encodedBytes, err := t.Encode()
if err != nil {
return false, err
}
if !bytes.Equal(encodedBytes, t.raw) {
return false, errors.New("failed to validated token - bytes are not equal")
}
return true, nil
}
func validateRSA256(t *Token) (bool, error) {
block, _ := pem.Decode(t.key)
key, err := x509.ParsePKCS1PrivateKey(block.Bytes); if err != nil {
return false, err
}
headerB64, _ := t.Header.ToBase64()
payloadB64, _ := t.Payload.ToBase64()
hashed := sha256.Sum256([]byte(fmt.Sprintf("%s.%s", headerB64, payloadB64)))
decodedSignature, err := base64.RawURLEncoding.DecodeString(string(t.Signature.Raw))
if err != nil {
return false, err
}
err = rsa.VerifyPKCS1v15(&key.PublicKey, crypto.SHA256, hashed[:], decodedSignature)
if err != nil {
return false, err
}
return true, nil
}
func (t *Token) Validate() (bool, error) {
if t.ValidateFunc == nil {
return false, errors.New("unable to verify data without a validating function defined")
}
valid, err := t.ValidateFunc(t)
if err != nil {
return false, err
}
//TODO: Validate more claims
exp, ok := t.Claims[string(ExpirationTime)]; if ok {
claim := exp.(string)
expiration, err := time.Parse(time.RFC3339, claim); if err != nil {
return false, err
}
if expiration.Before(time.Now()) {
return false, errors.New("token has expired")
}
}
return valid, nil
} | validate.go | 0.642881 | 0.542984 | validate.go | starcoder |
package staticarray
import (
"github.com/influxdata/flux/array"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/semantic"
)
type strings struct {
data []string
alloc *memory.Allocator
}
func String(data []string) array.String {
return &strings{data: data}
}
func (a *strings) Type() semantic.Type {
return semantic.String
}
func (a *strings) IsNull(i int) bool {
return false
}
func (a *strings) IsValid(i int) bool {
return i >= 0 && i < len(a.data)
}
func (a *strings) Len() int {
return len(a.data)
}
func (a *strings) NullN() int {
return 0
}
func (a *strings) Value(i int) string {
return a.data[i]
}
func (a *strings) Copy() array.Base {
panic("implement me")
}
func (a *strings) Free() {
if a.alloc != nil {
a.alloc.Free(cap(a.data) * stringSize)
}
a.data = nil
}
func (a *strings) Slice(start, stop int) array.BaseRef {
return a.StringSlice(start, stop)
}
func (a *strings) StringSlice(start, stop int) array.StringRef {
return &strings{data: a.data[start:stop]}
}
func (a *strings) StringValues() []string {
return a.data
}
func StringBuilder(a *memory.Allocator) array.StringBuilder {
return &stringBuilder{alloc: a}
}
type stringBuilder struct {
data []string
alloc *memory.Allocator
}
func (b *stringBuilder) Type() semantic.Type {
return semantic.String
}
func (b *stringBuilder) Len() int {
return len(b.data)
}
func (b *stringBuilder) Cap() int {
return cap(b.data)
}
func (b *stringBuilder) Reserve(n int) {
newCap := len(b.data) + n
if newCap := len(b.data) + n; newCap <= cap(b.data) {
return
}
if err := b.alloc.Allocate(newCap * stringSize); err != nil {
panic(err)
}
data := make([]string, len(b.data), newCap)
copy(data, b.data)
b.alloc.Free(cap(b.data) * stringSize)
b.data = data
}
func (b *stringBuilder) BuildArray() array.Base {
return b.BuildStringArray()
}
func (b *stringBuilder) Free() {
panic("implement me")
}
func (b *stringBuilder) Append(v string) {
if len(b.data) == cap(b.data) {
// Grow the slice in the same way as built-in append.
n := len(b.data)
if n == 0 {
n = 2
}
b.Reserve(n)
}
b.data = append(b.data, v)
}
func (b *stringBuilder) AppendNull() {
// The staticarray does not support nulls so it will do the current behavior of just appending
// the zero value.
b.Append("")
}
func (b *stringBuilder) AppendValues(v []string, valid ...[]bool) {
if newCap := len(b.data) + len(v); newCap > cap(b.data) {
b.Reserve(newCap - cap(b.data))
}
b.data = append(b.data, v...)
}
func (b *stringBuilder) BuildStringArray() array.String {
return &strings{
data: b.data,
alloc: b.alloc,
}
} | internal/staticarray/string.go | 0.624752 | 0.460774 | string.go | starcoder |
package rbtree
// This file contains all RB tree search methods implementations
// Search searches value specified within search tree
func (tree *rbTree) Search(value Comparable) (Comparable, bool) {
n, ok := tree.SearchNode(value)
if !ok {
return nil, ok
}
return n.key, ok
}
func (tree *rbTree) Floor(value Comparable) (Comparable, bool) {
if tree.root.isNil() {
return nil, false
}
n, ok := tree.root.floor(value)
if !ok {
return nil, ok
}
return n.key, ok
}
func (tree *rbTree) Ceiling(value Comparable) (Comparable, bool) {
if tree.root.isNil() {
return nil, false
}
n, ok := tree.root.ceiling(value)
if !ok {
return nil, ok
}
return n.key, ok
}
func (tree *rbTree) SearchAll(value Comparable) []Comparable {
var result []Comparable
n, ok := tree.SearchNode(value)
if ok {
result = append(result, n.key)
s := n.Successor()
for s.isNotNil() && s.key.Equal(value) {
result = append(result, s.key)
s = s.Successor()
}
}
return result
}
// SearchNode searches *Node which key is equals value specified
func (tree *rbTree) SearchNode(value Comparable) (*Node, bool) {
if tree.root.isNil() {
return nil, false
}
n, ok := tree.root.search(value)
if !ok {
return nil, ok
}
return n, ok
}
func (n *Node) search(value Comparable) (*Node, bool) {
if value == nil {
return nil, false
}
var x *Node
x = n
for x.isNotNil() && !value.Equal(x.key) {
if value.Less(x.key) {
x = x.left
} else {
x = x.right
}
}
if x.isNil() {
return nil, false
}
return x, true
}
func (n *Node) floor(value Comparable) (*Node, bool) {
if value == nil {
return nil, false
}
var min *Node
var x *Node
x = n
for x.isNotNil() && !value.Equal(x.key) {
if value.Less(x.key) {
if min.isNil() && x.left.isNil() {
min = x
}
x = x.left
} else {
min = x
x = x.right
}
}
if x.isNotNil() {
return x, true
}
return min, true
}
func (n *Node) ceiling(value Comparable) (*Node, bool) {
if value == nil {
return nil, false
}
var max *Node
var x *Node
x = n
for x.isNotNil() && !value.Equal(x.key) {
if value.Less(x.key) {
max = x
x = x.left
} else {
if max.isNil() && x.right.isNil() {
max = x
}
x = x.right
}
}
if x.isNotNil() {
return x, true
}
return max, true
}
// Minimum gets tree's min element
func (tree *rbTree) Minimum() *Node {
if tree.root.isNil() {
return nil
}
return tree.root.minimum()
}
func (n *Node) minimum() *Node {
x := n
for x.isNotNil() && x.left.isNotNil() {
x = x.left
}
return x
}
// Maximum gets tree's max element
func (tree *rbTree) Maximum() *Node {
if tree.root.isNil() {
return nil
}
return tree.root.maximum()
}
func (n *Node) maximum() *Node {
x := n
for x.isNotNil() && x.right.isNotNil() {
x = x.right
}
return x
}
// Successor gets Node's successor
func (n *Node) Successor() *Node {
if n.isNil() {
return nil
}
x := n
if x.right.isNotNil() {
return x.right.minimum()
}
y := x.parent
for y.isNotNil() && x == y.right {
x = y
y = y.parent
}
if y.isNil() {
return nil
}
return y
}
// Predecessor gets Node's predecessor
func (n *Node) Predecessor() *Node {
if n.isNil() {
return nil
}
x := n
if x.left.isNotNil() {
return x.left.maximum()
}
y := x.parent
for y.isNotNil() && x == y.left {
x = y
y = y.parent
}
if y.isNil() {
return nil
}
return y
}
// OrderStatisticSelect gets i element from subtree
// IMPORTANT: numeration starts from 1 not from 0
func (tree *rbTree) OrderStatisticSelect(i int64) (*Node, bool) {
if tree.root.isNil() {
return nil, false
}
x := tree.root
r := x.left.size + 1
for i != r {
if i < r {
x = x.left
} else {
i = i - r
x = x.right
}
if x.left == nil {
return nil, false
}
r = x.left.size + 1
}
return x, true
} | rbtree/search.go | 0.784526 | 0.410166 | search.go | starcoder |
package metric
import (
"fmt"
"github.com/codahale/hdrhistogram"
"strconv"
"sync/atomic"
"time"
)
type Histogram struct {
name string
pName string
histo *hdrhistogram.WindowedHistogram
overflowCount int64
}
type histogramExport struct {
Min float64
P50 float64
P95 float64
P99 float64
Max float64
Avg float64
Samples int64
}
func newHistogram(name string, pName string, max int64, n int) *Histogram {
return &Histogram{
name: name,
pName: prometheusName(pName),
histo: hdrhistogram.NewWindowed(n, 0, max, 1),
}
}
func (h *Histogram) Name() string {
return h.name
}
func (h *Histogram) RecordSince(t time.Time) {
d := time.Since(t).Nanoseconds()
if err := h.histo.Current.RecordValue(int64(d)); err != nil {
atomic.AddInt64(&h.overflowCount, 1)
}
}
func (h *Histogram) Record(measurement int64) {
if err := h.histo.Current.RecordValue(measurement); err != nil {
atomic.AddInt64(&h.overflowCount, 1)
}
}
func (h *Histogram) CurrentSamples() int64 {
histo := h.histo.Current
return histo.TotalCount()
}
func (h *Histogram) Value() interface{} {
return nil
}
func (h *Histogram) Export() interface{} {
histo := h.histo.Merge()
return &histogramExport{
toMillis(histo.Min()),
toMillis(histo.ValueAtQuantile(50)),
toMillis(histo.ValueAtQuantile(95)),
toMillis(histo.ValueAtQuantile(99)),
toMillis(histo.Max()),
floatToMillis(histo.Mean()),
histo.TotalCount(),
}
}
func (h *Histogram) Rotate() {
h.histo.Rotate()
}
// Note: in real life we have labels
// this is here because there is a different implementation for races
func (h *Histogram) exportPrometheus(labelString string) string {
histo := h.histo.Merge()
prometheusName := h.pName
typeRow := prometheusType(prometheusName, "histogram")
valueMinRow := fmt.Sprintf("%s{%s,aggregation=\"min\"} %s\n", prometheusName, labelString, strconv.FormatFloat(toMillis(histo.Min()), 'f', -1, 64))
valueMeanRow := fmt.Sprintf("%s{%s,aggregation=\"median\"} %s\n", prometheusName, labelString, strconv.FormatFloat(toMillis(histo.ValueAtQuantile(50)), 'f', -1, 64))
value95Row := fmt.Sprintf("%s{%s,aggregation=\"95p\"} %s\n", prometheusName, labelString, strconv.FormatFloat(toMillis(histo.ValueAtQuantile(95)), 'f', -1, 64))
value99Row := fmt.Sprintf("%s{%s,aggregation=\"99p\"} %s\n", prometheusName, labelString, strconv.FormatFloat(toMillis(histo.ValueAtQuantile(99)), 'f', -1, 64))
valueMaxRow := fmt.Sprintf("%s{%s,aggregation=\"max\"} %s\n", prometheusName, labelString, strconv.FormatFloat(toMillis(histo.Max()), 'f', -1, 64))
valueAvgRow := fmt.Sprintf("%s{%s,aggregation=\"avg\"} %s\n", prometheusName, labelString, strconv.FormatFloat(floatToMillis(histo.Mean()), 'f', -1, 64))
valueCountRow := fmt.Sprintf("%s{%s,aggregation=\"count\"} %s\n", prometheusName, labelString, strconv.FormatInt(histo.TotalCount(), 10))
return typeRow + valueMinRow + valueMeanRow + value95Row + value99Row + valueMaxRow + valueAvgRow + valueCountRow
}
func toMillis(nanoseconds int64) float64 {
return floatToMillis(float64(nanoseconds))
}
func floatToMillis(nanoseconds float64) float64 {
return nanoseconds / 1e+6
} | instrumentation/metric/histogram.go | 0.770983 | 0.428114 | histogram.go | starcoder |
package ns
/**
* Configuration for variable resource.
*/
type Nsvariable struct {
/**
* Variable name. This follows the same syntax rules as other expression entity names:
It must begin with an alpha character (A-Z or a-z) or an underscore (_).
The rest of the characters must be alpha, numeric (0-9) or underscores.
It cannot be re or xp (reserved for regular and XPath expressions).
It cannot be an expression reserved word (e.g. SYS or HTTP).
It cannot be used for an existing expression object (HTTP callout, patset, dataset, stringmap, or named expression).
*/
Name string `json:"name,omitempty"`
/**
* Specification of the variable type; one of the following:
ulong - singleton variable with an unsigned 64-bit value.
text(value-max-size) - singleton variable with a text string value.
map(text(key-max-size),ulong,max-entries) - map of text string keys to unsigned 64-bit values.
map(text(key-max-size),text(value-max-size),max-entries) - map of text string keys to text string values.
where
value-max-size is a positive integer that is the maximum number of bytes in a text string value.
key-max-size is a positive integer that is the maximum number of bytes in a text string key.
max-entries is a positive integer that is the maximum number of entries in a map variable.
For a global singleton text variable, value-max-size <= 64000.
For a global map with ulong values, key-max-size <= 64000.
For a global map with text values, key-max-size + value-max-size <= 64000.
max-entries is a positive integer that is the maximum number of entries in a map variable. This has a theoretical maximum of 2^64-1, but in actual use will be much smaller, considering the memory available for use by the map.
Example:
map(text(10),text(20),100) specifies a map of text string keys (max size 10 bytes) to text string values (max size 20 bytes), with 100 max entries.
*/
Type string `json:"type,omitempty"`
/**
* Scope of the variable:
global - (default) one set of values visible across all Packet Engines on a standalone Citrix ADC, an HA pair, or all nodes of a cluster
transaction - one value for each request-response transaction (singleton variables only; no expiration)
*/
Scope string `json:"scope,omitempty"`
/**
* Action to perform if an assignment to a map exceeds its configured max-entries:
lru - (default) reuse the least recently used entry in the map.
undef - force the assignment to return an undefined (Undef) result to the policy executing the assignment.
*/
Iffull string `json:"iffull,omitempty"`
/**
* Action to perform if an value is assigned to a text variable that exceeds its configured max-size,
or if a key is used that exceeds its configured max-size:
truncate - (default) truncate the text string to the first max-size bytes and proceed.
undef - force the assignment or expression evaluation to return an undefined (Undef) result to the policy executing the assignment or expression.
*/
Ifvaluetoobig string `json:"ifvaluetoobig,omitempty"`
/**
* Action to perform if on a variable reference in an expression if the variable is single-valued and uninitialized
or if the variable is a map and there is no value for the specified key:
init - (default) initialize the single-value variable, or create a map entry for the key and the initial value,
using the -init value or its default.
undef - force the expression evaluation to return an undefined (Undef) result to the policy executing the expression.
*/
Ifnovalue string `json:"ifnovalue,omitempty"`
/**
* Initialization value for this variable, to which a singleton variable or map entry will be set if it is referenced before an assignment action has assigned it a value. If the singleton variable or map entry already has been assigned a value, setting this parameter will have no effect on that variable value. Default: 0 for ulong, NULL for text
*/
Init string `json:"init,omitempty"`
/**
* Value expiration in seconds. If the value is not referenced within the expiration period it will be deleted. 0 (the default) means no expiration.
*/
Expires int `json:"expires,omitempty"`
/**
* Comments associated with this variable.
*/
Comment string `json:"comment,omitempty"`
//------- Read only Parameter ---------;
Referencecount string `json:"referencecount,omitempty"`
} | resource/config/ns/nsvariable.go | 0.77081 | 0.557062 | nsvariable.go | starcoder |
package opt
// This file implements conversion of scalar expressions to tree.TypedExpr
import (
"fmt"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)
var typedExprConvMap [numOperators]func(c *typedExprConvCtx, e *Expr) tree.TypedExpr
func init() {
// This code is not inline to avoid an initialization loop error (some of the
// functions depend on scalarToTypedExpr which depends on typedExprConvMap).
typedExprConvMap = [numOperators]func(c *typedExprConvCtx, e *Expr) tree.TypedExpr{
constOp: constOpToTypedExpr,
variableOp: variableOpToTypedExpr,
andOp: boolOpToTypedExpr,
orOp: boolOpToTypedExpr,
notOp: boolOpToTypedExpr,
unaryPlusOp: unaryOpToTypedExpr,
unaryMinusOp: unaryOpToTypedExpr,
unaryComplementOp: unaryOpToTypedExpr,
eqOp: comparisonOpToTypedExpr,
ltOp: comparisonOpToTypedExpr,
gtOp: comparisonOpToTypedExpr,
leOp: comparisonOpToTypedExpr,
geOp: comparisonOpToTypedExpr,
neOp: comparisonOpToTypedExpr,
inOp: comparisonOpToTypedExpr,
notInOp: comparisonOpToTypedExpr,
likeOp: comparisonOpToTypedExpr,
notLikeOp: comparisonOpToTypedExpr,
iLikeOp: comparisonOpToTypedExpr,
notILikeOp: comparisonOpToTypedExpr,
similarToOp: comparisonOpToTypedExpr,
notSimilarToOp: comparisonOpToTypedExpr,
regMatchOp: comparisonOpToTypedExpr,
notRegMatchOp: comparisonOpToTypedExpr,
regIMatchOp: comparisonOpToTypedExpr,
notRegIMatchOp: comparisonOpToTypedExpr,
isOp: comparisonOpToTypedExpr,
isNotOp: comparisonOpToTypedExpr,
containsOp: comparisonOpToTypedExpr,
containedByOp: comparisonOpToTypedExpr,
jsonExistsOp: comparisonOpToTypedExpr,
jsonAllExistsOp: comparisonOpToTypedExpr,
jsonSomeExistsOp: comparisonOpToTypedExpr,
anyOp: comparisonOpToTypedExpr,
someOp: comparisonOpToTypedExpr,
allOp: comparisonOpToTypedExpr,
bitandOp: binaryOpToTypedExpr,
bitorOp: binaryOpToTypedExpr,
bitxorOp: binaryOpToTypedExpr,
plusOp: binaryOpToTypedExpr,
minusOp: binaryOpToTypedExpr,
multOp: binaryOpToTypedExpr,
divOp: binaryOpToTypedExpr,
floorDivOp: binaryOpToTypedExpr,
modOp: binaryOpToTypedExpr,
powOp: binaryOpToTypedExpr,
concatOp: binaryOpToTypedExpr,
lShiftOp: binaryOpToTypedExpr,
rShiftOp: binaryOpToTypedExpr,
jsonFetchValOp: binaryOpToTypedExpr,
jsonFetchTextOp: binaryOpToTypedExpr,
jsonFetchValPathOp: binaryOpToTypedExpr,
jsonFetchTextPathOp: binaryOpToTypedExpr,
tupleOp: tupleOpToTypedExpr,
unsupportedScalarOp: unsupportedScalarOpToTypedExpr,
}
}
type typedExprConvCtx struct {
ivh *tree.IndexedVarHelper
// varToIndexedVar is a map used when converting a variableOp into an
// IndexedVar. It is optional: if it is nil, a 1-to-1 mapping is assumed.
varToIndexedVar columnMap
}
func constOpToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
return e.private.(tree.Datum)
}
func variableOpToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
col := e.private.(*columnProps)
var idx int
if c.varToIndexedVar.Empty() {
idx = col.index
} else {
var ok bool
idx, ok = c.varToIndexedVar.Get(col.index)
if !ok {
panic(fmt.Sprintf("missing variable-IndexedVar mapping for %d", col.index))
}
}
return c.ivh.IndexedVar(idx)
}
func boolOpToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
switch e.op {
case andOp, orOp:
n := scalarToTypedExpr(c, e.children[0])
for _, child := range e.children[1:] {
m := scalarToTypedExpr(c, child)
if e.op == andOp {
n = tree.NewTypedAndExpr(n, m)
} else {
n = tree.NewTypedOrExpr(n, m)
}
}
return n
case notOp:
return tree.NewTypedNotExpr(scalarToTypedExpr(c, e.children[0]))
default:
panic(fmt.Sprintf("invalid op %s", e.op))
}
}
func tupleOpToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
if isTupleOfConstants(e) {
datums := make(tree.Datums, len(e.children))
for i, child := range e.children {
datums[i] = constOpToTypedExpr(c, child).(tree.Datum)
}
return tree.NewDTuple(datums...)
}
children := make([]tree.TypedExpr, len(e.children))
for i, child := range e.children {
children[i] = scalarToTypedExpr(c, child)
}
return tree.NewTypedTuple(children)
}
func unaryOpToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
return tree.NewTypedUnaryExpr(
unaryOpReverseMap[e.op],
scalarToTypedExpr(c, e.children[0]),
e.scalarProps.typ,
)
}
func comparisonOpToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
return tree.NewTypedComparisonExprWithSubOp(
comparisonOpReverseMap[e.op],
comparisonOpReverseMap[e.subOperator],
scalarToTypedExpr(c, e.children[0]),
scalarToTypedExpr(c, e.children[1]),
)
}
func binaryOpToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
return tree.NewTypedBinaryExpr(
binaryOpReverseMap[e.op],
scalarToTypedExpr(c, e.children[0]),
scalarToTypedExpr(c, e.children[1]),
e.scalarProps.typ,
)
}
func unsupportedScalarOpToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
return e.private.(tree.TypedExpr)
}
func scalarToTypedExpr(c *typedExprConvCtx, e *Expr) tree.TypedExpr {
if fn := typedExprConvMap[e.op]; fn != nil {
return fn(c, e)
}
panic(fmt.Sprintf("unsupported op %s", e.op))
} | pkg/sql/opt/typed_expr.go | 0.78287 | 0.622086 | typed_expr.go | starcoder |
package pc
import (
"github.com/cadmean-ru/amphion/common/a"
"github.com/cadmean-ru/amphion/engine"
"github.com/cadmean-ru/amphion/rendering"
"github.com/go-gl/gl/v4.1-core/gl"
)
type TriangleRenderer struct {
*glPrimitiveRenderer
}
func (r *TriangleRenderer) OnStart() {
r.program = NewGlProgram(ShapeVertexShaderStr, TriangleFragShaderStr, "triangle")
r.program.CompileAndLink()
}
func (r *TriangleRenderer) OnRender(ctx *rendering.PrimitiveRenderingContext) {
r.glPrimitiveRenderer.OnRender(ctx)
gp := ctx.Primitive.(*rendering.GeometryPrimitive)
state := ctx.State.(*glPrimitiveState)
state.gen()
if ctx.Redraw {
gl.BindVertexArray(state.vao)
wSize := engine.GetScreenSize3()
ntlPos := gp.Transform.Position.Ndc(wSize)
nbrPos := gp.Transform.Position.Add(gp.Transform.Size).Ndc(wSize)
midX := ntlPos.X + ((nbrPos.X - ntlPos.X) / 2)
color := gp.Appearance.FillColor
r1 := float32(color.R)
g1 := float32(color.G)
b1 := float32(color.B)
a1 := float32(color.A)
strokeColor := gp.Appearance.StrokeColor
r2 := float32(strokeColor.R)
g2 := float32(strokeColor.G)
b2 := float32(strokeColor.B)
a2 := float32(strokeColor.A)
var stroke = a.NewIntVector3(int(gp.Appearance.StrokeWeight), int(gp.Appearance.StrokeWeight), int(gp.Appearance.StrokeWeight))
var nStroke = stroke.Ndc(wSize).Add(a.OneVector())
vertices := []float32 {
ntlPos.X, nbrPos.Y, 0, ntlPos.X, ntlPos.Y, 0, nbrPos.X, nbrPos.Y, 0, r1, g1, b1, a1, nStroke.X, r2, g2, b2, a2, 0,
midX, ntlPos.Y, 0, ntlPos.X, ntlPos.Y, 0, nbrPos.X, nbrPos.Y, 0, r1, g1, b1, a1, nStroke.X, r2, g2, b2, a2, 0,
nbrPos.X, nbrPos.Y, 0, ntlPos.X, ntlPos.Y, 0, nbrPos.X, nbrPos.Y, 0, r1, g1, b1, a1, nStroke.X, r2, g2, b2, a2, 0,
}
indices := []uint32 {
0, 1, 2,
}
const stride int32 = 76
gl.BindBuffer(gl.ARRAY_BUFFER, state.vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(vertices)*4, gl.Ptr(vertices), gl.STATIC_DRAW)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, state.ebo)
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(indices)*4, gl.Ptr(indices), gl.STATIC_DRAW)
gl.VertexAttribPointer(0, 3, gl.FLOAT, false, stride, nil)
gl.EnableVertexAttribArray(0)
gl.VertexAttribPointer(1, 3, gl.FLOAT, false, stride, gl.PtrOffset(12))
gl.EnableVertexAttribArray(1)
gl.VertexAttribPointer(2, 3, gl.FLOAT, false, stride, gl.PtrOffset(24))
gl.EnableVertexAttribArray(2)
gl.VertexAttribPointer(3, 4, gl.FLOAT, false, stride, gl.PtrOffset(36))
gl.EnableVertexAttribArray(3)
gl.VertexAttribPointer(4, 1, gl.FLOAT, false, stride, gl.PtrOffset(52))
gl.EnableVertexAttribArray(4)
gl.VertexAttribPointer(5, 4, gl.FLOAT, false, stride, gl.PtrOffset(56))
gl.EnableVertexAttribArray(5)
gl.VertexAttribPointer(6, 1, gl.FLOAT, false, stride, gl.PtrOffset(72))
gl.EnableVertexAttribArray(6)
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
gl.BindVertexArray(0)
}
gl.BindVertexArray(state.vao)
gl.DrawElements(gl.TRIANGLES, 3, gl.UNSIGNED_INT, nil)
} | frontend/pc/triangle.go | 0.564459 | 0.470372 | triangle.go | starcoder |
package geometry
import (
"math"
"github.com/gonum/matrix/mat64"
)
// TransMat is the Transformation Matrix representation.
type TransMat struct {
mat64.Dense
}
// NewTransMat creates a new Transformation Matrix which an 4x4 Idendity.
func NewTransMat() *TransMat {
data := []float64{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}
mat := mat64.NewDense(4, 4, data)
tmat := &TransMat{}
tmat.Clone(mat)
return tmat
}
// XRotation applies a X-Axis rotation.
func (tmat *TransMat) XRotation(angle float64) {
cosine := math.Cos(angle)
sine := math.Sin(angle)
data := []float64{
1, 0, 0, 0,
0, cosine, -sine, 0,
0, sine, cosine, 0,
0, 0, 0, 1,
}
mat := mat64.NewDense(4, 4, data)
m2 := &TransMat{}
m2.Clone(tmat)
tmat.Mul(m2, mat)
}
// YRotation applies a Y-Axis rotation.
func (tmat *TransMat) YRotation(angle float64) {
cosine := math.Cos(angle)
sine := math.Sin(angle)
data := []float64{
cosine, 0, sine, 0,
0, 1, 0, 0,
-sine, 0, cosine, 0,
0, 0, 0, 1,
}
mat := mat64.NewDense(4, 4, data)
m2 := &TransMat{}
m2.Clone(tmat)
tmat.Mul(m2, mat)
}
// ZRotation applies a Z-Axis rotation.
func (tmat *TransMat) ZRotation(angle float64) {
cosine := math.Cos(angle)
sine := math.Sin(angle)
data := []float64{
cosine, -sine, 0, 0,
sine, cosine, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}
mat := mat64.NewDense(4, 4, data)
m2 := &TransMat{}
m2.Clone(tmat)
tmat.Mul(m2, mat)
}
// Translation applies a Translation.
func (tmat *TransMat) Translation(dX, dY, dZ float64) {
data := []float64{
1, 0, 0, dX,
0, 1, 0, dY,
0, 0, 1, dZ,
0, 0, 0, 1,
}
mat := mat64.NewDense(4, 4, data)
m2 := &TransMat{}
m2.Clone(tmat)
tmat.Mul(m2, mat)
}
// Transform applys Transformation to a Vector
func (tmat *TransMat) Transform(vec *mat64.Vector) *mat64.Vector {
data := []float64{
vec.At(0, 0),
vec.At(1, 0),
vec.At(2, 0),
1,
}
g := mat64.NewVector(4, data)
v := &mat64.Vector{}
v.MulVec(tmat, g)
transVec := v.SliceVec(0, 3)
return transVec
} | matrix3.go | 0.814533 | 0.638765 | matrix3.go | starcoder |
package toscalib
// RequirementRelationshipType defines the Relationship type of a Requirement Definition
type RequirementRelationshipType struct {
Type string `yaml:"type" json:"type"`
Interfaces map[string]InterfaceDefinition `yaml:"interfaces,omitempty" json:"interfaces"`
}
// UnmarshalYAML is used to match both Simple Notation Example and Full Notation Example
func (r *RequirementRelationshipType) UnmarshalYAML(unmarshal func(interface{}) error) error {
// First try the Short notation
var rtype string
err := unmarshal(&rtype)
if err == nil {
r.Type = rtype
return nil
}
// If error, try the full struct
var test2 struct {
Type string `yaml:"type" json:"type"`
Interfaces map[string]InterfaceDefinition `yaml:"interfaces,omitempty" json:"interfaces"`
}
err = unmarshal(&test2)
if err != nil {
return err
}
r.Type = test2.Type
r.Interfaces = test2.Interfaces
return nil
}
// RequirementDefinition as described in Appendix 6.2
type RequirementDefinition struct {
Capability string `yaml:"capability" json:"capability"` // The required reserved keyname used that can be used to provide the name of a valid Capability Type that can fulfil the requirement
Node string `yaml:"node,omitempty" json:"node,omitempty"` // The optional reserved keyname used to provide the name of a valid Node Type that contains the capability definition that can be used to fulfil the requirement
Relationship RequirementRelationshipType `yaml:"relationship" json:"relationship,omitempty"`
Occurrences ToscaRange `yaml:"occurrences,omitempty" json:"occurrences,omitempty"` // The optional minimum and maximum occurrences for the requirement. Note: the keyword UNBOUNDED is also supported to represent any positive integer
}
// UnmarshalYAML is used to match both Simple Notation Example and Full Notation Example
func (r *RequirementDefinition) UnmarshalYAML(unmarshal func(interface{}) error) error {
// First try the Short notation
var cas string
err := unmarshal(&cas)
if err == nil {
r.Capability = cas
return nil
}
// If error, try the full struct
var test2 struct {
Capability string `yaml:"capability" json:"capability"` // The required reserved keyname used that can be used to provide the name of a valid Capability Type that can fulfil the requirement
Node string `yaml:"node,omitempty" json:"node,omitempty"` // The optional reserved keyname used to provide the name of a valid Node Type that contains the capability definition that can be used to fulfil the requirement
Relationship RequirementRelationshipType `yaml:"relationship" json:"relationship,omitempty"`
Occurrences ToscaRange `yaml:"occurrences,omitempty" json:"occurrences,omitempty"` // The optional minimum and maximum occurrences for the requirement. Note: the keyword UNBOUNDED is also supported to represent any positive integer
}
err = unmarshal(&test2)
if err != nil {
return err
}
r.Capability = test2.Capability
r.Node = test2.Node
r.Relationship = test2.Relationship
r.Occurrences = test2.Occurrences
return nil
}
// RequirementRelationship is the list of recognized keynames for a TOSCA requirement assignment’s relationship keyname which is used when Property assignments need to be provided to inputs of declared interfaces or their operations:
type RequirementRelationship struct {
Type string `yaml:"type" json:"type"` // The optional reserved keyname used to provide the name of the Relationship Type for the requirement assignment’s relationship keyname.
Interfaces map[string]InterfaceDefinition `yaml:"interfaces,omitempty" json:"interfaces,omitempty"` // The optional reserved keyname used to reference declared (named) interface definitions of the corresponding Relationship Type in order to provide Property assignments for these interfaces or operations of these interfaces.
Properties map[string]PropertyAssignment `yaml:"properties" json:"properties"` // The optional list property definitions that comprise the schema for a complex Data Type in TOSCA.
}
// UnmarshalYAML is used to match both Simple Notation Example and Full Notation Example
func (r *RequirementRelationship) UnmarshalYAML(unmarshal func(interface{}) error) error {
// First try the Short notation
var rtype string
err := unmarshal(&rtype)
if err == nil {
r.Type = rtype
return nil
}
// If error, try the full struct
var test2 struct {
Type string `yaml:"type" json:"type"`
Interfaces map[string]InterfaceDefinition `yaml:"interfaces,omitempty" json:"interfaces,omitempty"`
Properties map[string]PropertyAssignment `yaml:"properties" json:"properties"`
}
err = unmarshal(&test2)
if err != nil {
return err
}
r.Type = test2.Type
r.Interfaces = test2.Interfaces
r.Properties = test2.Properties
return nil
}
// RequirementAssignment as described in Appendix 7.2
type RequirementAssignment struct {
Capability string `yaml:"capability,omitempty" json:"capability,omitempty"` /* The optional reserved keyname used to provide the name of either a:
- Capability definition within a target node template that can fulfill the requirement.
- Capability Type that the provider will use to select a type-compatible target node template to fulfill the requirement at runtime. */
Node string `yaml:"node,omitempty" json:"node,omitempty"` /* The optional reserved keyname used to identify the target node of a relationship. specifically, it is used to provide either a:
- Node Template name that can fulfil the target node requirement.
- Node Type name that the provider will use to select a type-compatible node template to fulfil the requirement at runtime. */
Nodefilter NodeFilter `yaml:"node_filter,omitempty" json:"node_filter,omitempty"` // The optional filter definition that TOSCA orchestrators or providers would use to select a type-compatible target node that can fulfill the associated abstract requirement at runtime.o
/* The following is the list of recognized keynames for a TOSCA requirement assignment’s relationship keyname which is used when Property assignments need to be provided to inputs of declared interfaces or their operations:*/
Relationship RequirementRelationship `yaml:"relationship,omitempty" json:"relationship,omitempty"`
}
// UnmarshalYAML is used to match both Simple Notation Example and Full Notation Example
func (r *RequirementAssignment) UnmarshalYAML(unmarshal func(interface{}) error) error {
// First try the Short notation
var cas string
err := unmarshal(&cas)
if err == nil {
r.Node = cas
return nil
}
// If error, try the full struct
var test2 struct {
Capability string `yaml:"capability,omitempty"`
Node string `yaml:"node,omitempty"`
Nodefilter NodeFilter `yaml:"node_filter,omitempty"`
Relationship RequirementRelationship `yaml:"relationship,omitempty"`
}
err = unmarshal(&test2)
if err != nil {
return err
}
r.Capability = test2.Capability
r.Node = test2.Node
r.Nodefilter = test2.Nodefilter
r.Relationship = test2.Relationship
return nil
}
func (r *RequirementAssignment) extendFrom(rd RequirementDefinition) {
if r.Capability == "" {
r.Capability = rd.Capability
}
if r.Node == "" {
r.Node = rd.Node
}
if r.Relationship.Type == "" {
r.Relationship.Type = rd.Relationship.Type
}
for k, v := range rd.Relationship.Interfaces {
if len(r.Relationship.Interfaces) == 0 {
r.Relationship.Interfaces = make(map[string]InterfaceDefinition)
}
if intf, ok := r.Relationship.Interfaces[k]; ok {
intf.merge(v)
r.Relationship.Interfaces[k] = intf
} else {
r.Relationship.Interfaces[k] = v
}
}
} | requirements.go | 0.837653 | 0.485417 | requirements.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.