code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package lexer
// A structure that represents a Lexer
type Lexer struct {
// Represents the input to the lexer
input string
// Represents the position in the input (current char)
positionCurrent int
// Represents the current reading position (after current char)
positionNext int
// Represents the current char
ch byte
}
// A constructor function that generates and
// returns an initialised Lexer object
func NewLexer(input string) *Lexer {
// Construct a lexer with the input
l := &Lexer{input: input}
// Read the first character of the input
// to initialise the lexer
l.ReadChar()
// Return the lexer
return l
}
func (l *Lexer) NextToken() Token {
// Declare a token
var tok Token
// Eat all whitespaces until next character
l.EatWhitespaces()
// Check the value of the character read by the lexer
switch l.ch {
case '=':
// Check if the next character is a '='
if l.PeekChar() == '=' {
// Move lexer to the next character
l.ReadChar()
// Set the token value to '=='
tok = Token{Type: EQ, Literal: "=="}
} else {
// Set the token value to '='
tok = NewToken(ASSIGN, l.ch)
}
case '!':
// Check if the next character is a '='
if l.PeekChar() == '=' {
// Move lexer to the next character
l.ReadChar()
// Set the token value to '!='
tok = Token{Type: NOT_EQ, Literal: "!="}
} else {
// Set the token value to '!'
tok = NewToken(BANG, l.ch)
}
case '+':
tok = NewToken(PLUS, l.ch)
case '-':
tok = NewToken(MINUS, l.ch)
case '/':
tok = NewToken(SLASH, l.ch)
case '*':
tok = NewToken(ASTERISK, l.ch)
case '<':
tok = NewToken(LT, l.ch)
case '>':
tok = NewToken(GT, l.ch)
case ':':
tok = NewToken(COLON, l.ch)
case ';':
tok = NewToken(SEMICOLON, l.ch)
case '(':
tok = NewToken(LPAREN, l.ch)
case ')':
tok = NewToken(RPAREN, l.ch)
case ',':
tok = NewToken(COMMA, l.ch)
case '{':
tok = NewToken(LBRACE, l.ch)
case '}':
tok = NewToken(RBRACE, l.ch)
case '[':
tok = NewToken(LBRACK, l.ch)
case ']':
tok = NewToken(RBRACK, l.ch)
case '"':
tok.Type = STRING
tok.Literal = l.ReadString()
case 0:
// End of File
tok.Literal = ""
tok.Type = EOF
default:
// Check if character is a letter/digit
if isLetter(l.ch) {
// Identifier Detected - Read the full identifier
tok.Literal = l.ReadIdentifier()
// Get the mapping of the identifier literal to the type
tok.Type = LookUpIndentifier(tok.Literal)
// Return the identifier token
return tok
} else if isDigit(l.ch) {
// Number Detected - Read the full number
tok.Literal = l.ReadNumber()
// Set the token type
tok.Type = INT
// Return the numeric token
return tok
} else {
// Illegal Token
tok = NewToken(ILLEGAL, l.ch)
}
}
// Read the next character from the lexer input
l.ReadChar()
// Return the lexed token
return tok
}
func isLetter(ch byte) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
}
func isDigit(ch byte) bool {
return '0' <= ch && ch <= '9'
} | lexer/lexer.go | 0.735737 | 0.465813 | lexer.go | starcoder |
package geomfn
import (
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/twpayne/go-geom"
)
// Reverse returns a modified geometry by reversing the order of its vertexes
func Reverse(geometry geo.Geometry) (geo.Geometry, error) {
g, err := geometry.AsGeomT()
if err != nil {
return geo.Geometry{}, err
}
g, err = reverse(g)
if err != nil {
return geo.Geometry{}, err
}
return geo.MakeGeometryFromGeomT(g)
}
func reverse(g geom.T) (geom.T, error) {
if geomCollection, ok := g.(*geom.GeometryCollection); ok {
return reverseCollection(geomCollection)
}
switch t := g.(type) {
case *geom.Point, *geom.MultiPoint: // cases where reverse does change the order
return g, nil
case *geom.LineString:
g = geom.NewLineStringFlat(t.Layout(), reverseCoords(g.FlatCoords(), g.Stride())).SetSRID(g.SRID())
case *geom.Polygon:
g = geom.NewPolygonFlat(t.Layout(), reverseCoords(g.FlatCoords(), g.Stride()), t.Ends()).SetSRID(g.SRID())
case *geom.MultiLineString:
g = geom.NewMultiLineStringFlat(t.Layout(), reverseMulti(g, t.Ends()), t.Ends()).SetSRID(g.SRID())
case *geom.MultiPolygon:
var ends []int
for _, e := range t.Endss() {
ends = append(ends, e...)
}
g = geom.NewMultiPolygonFlat(t.Layout(), reverseMulti(g, ends), t.Endss()).SetSRID(g.SRID())
default:
return nil, geom.ErrUnsupportedType{Value: g}
}
return g, nil
}
func reverseCoords(coords []float64, stride int) []float64 {
for i := 0; i < len(coords)/2; i += stride {
for j := 0; j < stride; j++ {
coords[i+j], coords[len(coords)-stride-i+j] = coords[len(coords)-stride-i+j], coords[i+j]
}
}
return coords
}
// reverseMulti handles reversing coordinates of MULTI* geometries with nested sub-structures
func reverseMulti(g geom.T, ends []int) []float64 {
coords := g.FlatCoords()
prevEnd := 0
for _, end := range ends {
copy(
coords[prevEnd:end],
reverseCoords(coords[prevEnd:end], g.Stride()),
)
prevEnd = end
}
return coords
}
// reverseCollection iterates through a GeometryCollection and calls reverse() on each geometry.
func reverseCollection(geomCollection *geom.GeometryCollection) (*geom.GeometryCollection, error) {
res := geom.NewGeometryCollection()
for _, subG := range geomCollection.Geoms() {
subGeom, err := reverse(subG)
if err != nil {
return nil, err
}
if err := res.Push(subGeom); err != nil {
return nil, err
}
}
return res, nil
} | pkg/geo/geomfn/reverse.go | 0.835986 | 0.401365 | reverse.go | starcoder |
package i18n
// This can be used as independent library
var iso3166_1_alpha2 map[string]string
func EnglishCountryName(code string) string {
name, ok := Countries()[code]
if !ok {
return code
}
return name
}
// Countries returns a map of ISO 3166-1 alpha-2 country codes
// to the corresponding english country name
func Countries() map[string]string {
if iso3166_1_alpha2 == nil {
iso3166_1_alpha2 = map[string]string{
"AD": "Andorra",
"AE": "United Arab Emirates",
"AF": "Afghanistan",
"AG": "Antigua and Barbuda",
"AI": "Anguilla",
"AL": "Albania",
"AM": "Armenia",
"AO": "Angola",
"AQ": "Antarctica",
"AR": "Argentina",
"AS": "American Samoa",
"AT": "Austria",
"AU": "Australia",
"AW": "Aruba",
"AX": "Åland Islands",
"AZ": "Azerbaijan",
"BA": "Bosnia and Herzegovina",
"BB": "Barbados",
"BD": "Bangladesh",
"BE": "Belgium",
"BF": "Burkina Faso",
"BG": "Bulgaria",
"BH": "Bahrain",
"BI": "Burundi",
"BJ": "Benin",
"BL": "Saint Barthélemy",
"BM": "Bermuda",
"BN": "Brunei Darussalam",
"BO": "Bolivia",
"BQ": "Bonaire, Sint Eustatius and Saba",
"BR": "Brazil",
"BS": "Bahamas",
"BT": "Bhutan",
"BV": "Bouvet Island",
"BW": "Botswana",
"BY": "Belarus",
"BZ": "Belize",
"CA": "Canada",
"CC": "Cocos (Keeling) Islands",
"CD": "Congo, the Democratic Republic of the",
"CF": "Central African Republic",
"CG": "Congo",
"CH": "Switzerland",
"CI": "Côte d'Ivoire",
"CK": "Cook Islands",
"CL": "Chile",
"CM": "Cameroon",
"CN": "China",
"CO": "Colombia",
"CR": "Costa Rica",
"CU": "Cuba",
"CV": "Cape Verde",
"CW": "Curaçao",
"CX": "Christmas Island",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DE": "Germany",
"DJ": "Djibouti",
"DK": "Denmark",
"DM": "Dominica",
"DO": "Dominican Republic",
/*
"DZ": "Algeria 1974 .dz ISO 3166-2:DZ Code taken from name in Kabyle: Dzayer
"EC": "Ecuador 1974 .ec ISO 3166-2:EC
"EE": "Estonia 1992 .ee ISO 3166-2:EE Code taken from name in Estonian: Eesti
"EG": "Egypt 1974 .eg ISO 3166-2:EG
"EH": "Western Sahara 1974 .eh ISO 3166-2:EH Previous ISO country name: Spanish Sahara (code taken from name in Spanish: Sahara español)
"ER": "Eritrea 1993 .er ISO 3166-2:ER
"ES": "Spain 1974 .es ISO 3166-2:ES Code taken from name in Spanish: España
"ET": "Ethiopia 1974 .et ISO 3166-2:ET
"FI": "Finland 1974 .fi ISO 3166-2:FI
"FJ": "Fiji 1974 .fj ISO 3166-2:FJ
"FK": "Falkland Islands (Malvinas) 1974 .fk ISO 3166-2:FK
"FM": "Micronesia, Federated States of 1986 .fm ISO 3166-2:FM Previous ISO country name: Micronesia
"FO": "Faroe Islands 1974 .fo ISO 3166-2:FO
"FR": "France 1974 .fr ISO 3166-2:FR Includes Clipperton Island
"GA": "Gabon 1974 .ga ISO 3166-2:GA
"GB": "United Kingdom 1974 .gb
"GD": "Grenada 1974 .gd ISO 3166-2:GD
"GE": "Georgia 1992 .ge ISO 3166-2:GE GE previously represented Gilbert and Ellice Islands
"GF": "French Guiana 1974 .gf ISO 3166-2:GF Code taken from name in French: Guyane française
"GG": "Guernsey 2006 .gg ISO 3166-2:GG
"GH": "Ghana 1974 .gh ISO 3166-2:GH
"GI": "Gibraltar 1974 .gi ISO 3166-2:GI
"GL": "Greenland 1974 .gl ISO 3166-2:GL
"GM": "Gambia 1974 .gm ISO 3166-2:GM
"GN": "Guinea 1974 .gn ISO 3166-2:GN
"GP": "Guadeloupe 1974 .gp ISO 3166-2:GP
"GQ": "Equatorial Guinea 1974 .gq ISO 3166-2:GQ Code taken from name in French: Guinée équatoriale
"GR": "Greece 1974 .gr ISO 3166-2:GR
"GS": "South Georgia and the South Sandwich Islands 1993 .gs ISO 3166-2:GS
"GT": "Guatemala 1974 .gt ISO 3166-2:GT
"GU": "Guam 1974 .gu ISO 3166-2:GU
"GW": "Guinea-Bissau 1974 .gw ISO 3166-2:GW
"GY": "Guyana 1974 .gy ISO 3166-2:GY
"HK": "Hong Kong 1974 .hk ISO 3166-2:HK
"HM": "Heard Island and McDonald Islands 1974 .hm ISO 3166-2:HM
"HN": "Honduras 1974 .hn ISO 3166-2:HN
"HR": "Croatia 1992 .hr ISO 3166-2:HR Code taken from name in Croatian: Hrvatska
"HT": "Haiti 1974 .ht ISO 3166-2:HT
"HU": "Hungary 1974 .hu ISO 3166-2:HU
"ID": "Indonesia 1974 .id ISO 3166-2:ID
"IE": "Ireland 1974 .ie ISO 3166-2:IE
"IL": "Israel 1974 .il ISO 3166-2:IL
"IM": "Isle of Man 2006 .im ISO 3166-2:IM
"IN": "India 1974 .in ISO 3166-2:IN
"IO": "British Indian Ocean Territory 1974 .io ISO 3166-2:IO
"IQ": "Iraq 1974 .iq ISO 3166-2:IQ
"IR": "Iran, Islamic Republic of 1974 .ir ISO 3166-2:IR ISO country name follows UN designation (common name: Iran)
"IS": "Iceland 1974 .is ISO 3166-2:IS Code taken from name in Icelandic: Ísland
"IT": "Italy 1974 .it ISO 3166-2:IT
"JE": "Jersey 2006 .je ISO 3166-2:JE
"JM": "Jamaica 1974 .jm ISO 3166-2:JM
"JO": "Jordan 1974 .jo ISO 3166-2:JO
"JP": "Japan 1974 .jp ISO 3166-2:JP
"KE": "Kenya 1974 .ke ISO 3166-2:KE
"KG": "Kyrgyzstan 1992 .kg ISO 3166-2:KG
"KH": "Cambodia 1974 .kh ISO 3166-2:KH Code taken from former name: Khmer Republic
"KI": "Kiribati 1979 .ki ISO 3166-2:KI
"KM": "Comoros 1974 .km ISO 3166-2:KM Code taken from name in Comorian: Komori
"KN": "Saint Kitts and Nevis 1974 .kn ISO 3166-2:KN Previous ISO country name: Saint Kitts-Nevis-Anguilla
"KP": "Korea, Democratic People's Republic of 1974 .kp ISO 3166-2:KP ISO country name follows UN designation (common name: North Korea)
"KR": "Korea, Republic of 1974 .kr ISO 3166-2:KR ISO country name follows UN designation (common name: South Korea)
"KW": "Kuwait 1974 .kw ISO 3166-2:KW
"KY": "Cayman Islands 1974 .ky ISO 3166-2:KY
"KZ": "Kazakhstan 1992 .kz ISO 3166-2:KZ Previous ISO country name: Kazakstan
"LA": "Lao People's Democratic Republic 1974 .la ISO 3166-2:LA ISO country name follows UN designation (common name: Laos)
"LB": "Lebanon 1974 .lb ISO 3166-2:LB
"LC": "Saint Lucia 1974 .lc ISO 3166-2:LC
"LI": "Liechtenstein 1974 .li ISO 3166-2:LI
"LK": "Sri Lanka 1974 .lk ISO 3166-2:LK
"LR": "Liberia 1974 .lr ISO 3166-2:LR
"LS": "Lesotho 1974 .ls ISO 3166-2:LS
"LT": "Lithuania 1992 .lt ISO 3166-2:LT
"LU": "Luxembourg 1974 .lu ISO 3166-2:LU
"LV": "Latvia 1992 .lv ISO 3166-2:LV
"LY": "Libyan Arab Jamahiriya 1974 .ly ISO 3166-2:LY ISO country name follows UN designation (common name: Libya)
"MA": "Morocco 1974 .ma ISO 3166-2:MA Code taken from name in French: Maroc
"MC": "Monaco 1974 .mc ISO 3166-2:MC
"MD": "Moldova, Republic of 1992 .md ISO 3166-2:MD ISO country name follows UN designation (common name and previous ISO country name: Moldova)
"ME": "Montenegro 2006 .me ISO 3166-2:ME
"MF": "Saint Martin (French part) 2007 .mf ISO 3166-2:MF The Dutch part of Saint Martin island is assigned code SX
"MG": "Madagascar 1974 .mg ISO 3166-2:MG
"MH": "Marshall Islands 1986 .mh ISO 3166-2:MH
"MK": "Macedonia, the former Yugoslav Republic of 1993 .mk ISO 3166-2:MK ISO country name follows UN designation (due to Macedonia naming dispute; official name used by country itself: Republic of Macedonia)
"ML": "Mali 1974 .ml ISO 3166-2:ML
"MM": "Myanmar 1989 .mm ISO 3166-2:MM Name changed from Burma (BU)
"MN": "Mongolia 1974 .mn ISO 3166-2:MN
"MO": "Macao 1974 .mo ISO 3166-2:MO Previous ISO country name: Macau
"MP": "Northern Mariana Islands 1986 .mp ISO 3166-2:MP
"MQ": "Martinique 1974 .mq ISO 3166-2:MQ
"MR": "Mauritania 1974 .mr ISO 3166-2:MR
"MS": "Montserrat 1974 .ms ISO 3166-2:MS
"MT": "Malta 1974 .mt ISO 3166-2:MT
"MU": "Mauritius 1974 .mu ISO 3166-2:MU
"MV": "Maldives 1974 .mv ISO 3166-2:MV
"MW": "Malawi 1974 .mw ISO 3166-2:MW
"MX": "Mexico 1974 .mx ISO 3166-2:MX
"MY": "Malaysia 1974 .my ISO 3166-2:MY
"MZ": "Mozambique 1974 .mz ISO 3166-2:MZ
"NA": "Namibia 1974 .na ISO 3166-2:NA
"NC": "New Caledonia 1974 .nc ISO 3166-2:NC
"NE": "Niger 1974 .ne ISO 3166-2:NE
"NF": "Norfolk Island 1974 .nf ISO 3166-2:NF
"NG": "Nigeria 1974 .ng ISO 3166-2:NG
"NI": "Nicaragua 1974 .ni ISO 3166-2:NI
"NL": "Netherlands 1974 .nl ISO 3166-2:NL
"NO": "Norway 1974 .no ISO 3166-2:NO
"NP": "Nepal 1974 .np ISO 3166-2:NP
"NR": "Nauru 1974 .nr ISO 3166-2:NR
"NU": "Niue 1974 .nu ISO 3166-2:NU
"NZ": "New Zealand 1974 .nz ISO 3166-2:NZ
"OM": "Oman 1974 .om ISO 3166-2:OM
"PA": "Panama 1974 .pa ISO 3166-2:PA
"PE": "Peru 1974 .pe ISO 3166-2:PE
"PF": "French Polynesia 1974 .pf ISO 3166-2:PF Code taken from name in French: Polynésie française
"PG": "Papua New Guinea 1974 .pg ISO 3166-2:PG
"PH": "Philippines 1974 .ph ISO 3166-2:PH
"PK": "Pakistan 1974 .pk ISO 3166-2:PK
"PL": "Poland 1974 .pl ISO 3166-2:PL
"PM": "S<NAME> 1974 .pm ISO 3166-2:PM
"PN": "Pitcairn 1974 .pn ISO 3166-2:PN
"PR": "Puerto Rico 1974 .pr ISO 3166-2:PR
"PS": "Palestinian Territory, Occupied 1999 .ps ISO 3166-2:PS Consists of the West Bank and the Gaza Strip
"PT": "Portugal 1974 .pt ISO 3166-2:PT
"PW": "Palau 1986 .pw ISO 3166-2:PW
"PY": "Paraguay 1974 .py ISO 3166-2:PY
"QA": "Qatar 1974 .qa ISO 3166-2:QA
"RE": "Réunion 1974 .re ISO 3166-2:RE
"RO": "Romania 1974 .ro ISO 3166-2:RO
"RS": "Serbia 2006 .rs ISO 3166-2:RS Code taken from official name: Republic of Serbia (see Serbian country codes)
"RU": "Russian Federation 1992 .ru ISO 3166-2:RU ISO country name follows UN designation (common name: Russia)
"RW": "Rwanda 1974 .rw ISO 3166-2:RW
"SA": "Saudi Arabia 1974 .sa ISO 3166-2:SA
"SB": "Solomon Islands 1974 .sb ISO 3166-2:SB Code taken from former name: British Solomon Islands
"SC": "Seychelles 1974 .sc ISO 3166-2:SC
"SD": "Sudan 1974 .sd ISO 3166-2:SD
"SE": "Sweden 1974 .se ISO 3166-2:SE
"SG": "Singapore 1974 .sg ISO 3166-2:SG
"SH": "Saint Helena, Ascension and Tristan da Cunha 1974 .sh ISO 3166-2:SH Previous ISO country name: Saint Helena
"SI": "Slovenia 1992 .si ISO 3166-2:SI
"SJ": "Svalbard and <NAME> 1974 .sj ISO 3166-2:SJ Consists of two arctic territories of Norway: Svalbard and <NAME>en
"SK": "Slovakia 1993 .sk ISO 3166-2:SK SK previously represented Sikkim
"SL": "Sierra Leone 1974 .sl ISO 3166-2:SL
"SM": "San Marino 1974 .sm ISO 3166-2:SM
"SN": "Senegal 1974 .sn ISO 3166-2:SN
"SO": "Somalia 1974 .so ISO 3166-2:SO
"SR": "Suriname 1974 .sr ISO 3166-2:SR
"SS": "South Sudan 2011 .ss ISO 3166-2:SS
"ST": "Sao Tome and Principe 1974 .st ISO 3166-2:ST
"SV": "El Salvador 1974 .sv ISO 3166-2:SV
"SX": "<NAME> (Dutch part) 2010 .sx ISO 3166-2:SX The French part of Saint Martin island is assigned code MF
"SY": "Syrian Arab Republic 1974 .sy ISO 3166-2:SY ISO country name follows UN designation (common name: Syria)
"SZ": "Swaziland 1974 .sz ISO 3166-2:SZ
"TC": "Turks and Caicos Islands 1974 .tc ISO 3166-2:TC
"TD": "Chad 1974 .td ISO 3166-2:TD Code taken from name in French: Tchad
"TF": "French Southern Territories 1979 .tf ISO 3166-2:TF Covers the French Southern and Antarctic Lands except Adélie Land
"TG": "Togo 1974 .tg ISO 3166-2:TG
"TH": "Thailand 1974 .th ISO 3166-2:TH
"TJ": "Tajikistan 1992 .tj ISO 3166-2:TJ
"TK": "Tokelau 1974 .tk ISO 3166-2:TK
"TL": "Timor-Leste 2002 .tl ISO 3166-2:TL Name changed from East Timor (TP)
"TM": "Turkmenistan 1992 .tm ISO 3166-2:TM
"TN": "Tunisia 1974 .tn ISO 3166-2:TN
"TO": "Tonga 1974 .to ISO 3166-2:TO
"TR": "Turkey 1974 .tr ISO 3166-2:TR
"TT": "Trinidad and Tobago 1974 .tt ISO 3166-2:TT
"TV": "Tuvalu 1979 .tv ISO 3166-2:TV
"TW": "Taiwan, Province of China 1974 .tw ISO 3166-2:TW Covers the current jurisdiction of the Republic of China except Kinmen and Lienchiang
"TZ": "Tanzania, United Republic of 1974 .tz ISO 3166-2:TZ ISO country name follows UN designation (common name: Tanzania)
"UA": "Ukraine 1974 .ua ISO 3166-2:UA Previous ISO country name: Ukrainian SSR
"UG": "Uganda 1974 .ug ISO 3166-2:UG
"UM": "United States Minor Outlying Islands 1986 .um ISO 3166-2:UM Consists of nine minor insular areas of the United States: Baker Island, Howland Island, Jarvis Island, Johnston Atoll, Kingman Reef, Midway Islands, Navassa Island, Palmyra Atoll, and Wake Island
"US": "United States 1974 .us ISO 3166-2:US
"UY": "Uruguay 1974 .uy ISO 3166-2:UY
"UZ": "Uzbekistan 1992 .uz ISO 3166-2:UZ
"VA": "Holy See (Vatican City State) 1974 .va ISO 3166-2:VA Covers Vatican City, territory of the Holy See
"VC": "Saint Vincent and the Grenadines 1974 .vc ISO 3166-2:VC
"VE": "Venezuela, Bolivarian Republic of 1974 .ve ISO 3166-2:VE ISO country name follows UN designation (common name and previous ISO country name: Venezuela)
"VG": "Virgin Islands, British 1974 .vg ISO 3166-2:VG
"VI": "Virgin Islands, U.S. 1974 .vi ISO 3166-2:VI
"VN": "Viet Nam 1974 .vn ISO 3166-2:VN ISO country name follows UN spelling (common spelling: Vietnam)
"VU": "Vanuatu 1980 .vu ISO 3166-2:VU Name changed from New Hebrides (NH)
"WF": "Wallis and Futuna 1974 .wf ISO 3166-2:WF
"WS": "Samoa 1974 .ws ISO 3166-2:WS Code taken from former name: Western Samoa
"YE": "Yemen 1974 .ye ISO 3166-2:YE Previous ISO country name: Yemen, Republic of
"YT": "Mayotte 1993 .yt ISO 3166-2:YT
"ZA": "South Africa 1974 .za ISO 3166-2:ZA Code taken from name in Dutch: Zuid-Afrika
"ZM": "Zambia 1974 .zm ISO 3166-2:ZM
"ZW": "Zimbabwe 1980 .zw ISO 3166-2:ZW Name changed from Southern Rhodesia (RH)
*/
}
}
return iso3166_1_alpha2
} | i18n/countries.go | 0.525125 | 0.482734 | countries.go | starcoder |
package quickfix
import "honnef.co/go/tools/analysis/lint"
var Docs = lint.Markdownify(map[string]*lint.RawDocumentation{
"QF1001": {
Title: "Apply De Morgan's law",
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1002": {
Title: "Convert untagged switch to tagged switch",
Text: `
An untagged switch that compares a single variable against a series of
values can be replaced with a tagged switch.`,
Before: `
switch {
case x == 1 || x == 2, x == 3:
...
case x == 4:
...
default:
...
}`,
After: `
switch x {
case 1, 2, 3:
...
case 4:
...
default:
...
}`,
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1003": {
Title: "Convert if/else-if chain to tagged switch",
Text: `
A series of if/else-if checks comparing the same variable against
values can be replaced with a tagged switch.`,
Before: `
if x == 1 || x == 2 {
...
} else if x == 3 {
...
} else {
...
}`,
After: `
switch x {
case 1, 2:
...
case 3:
...
default:
...
}`,
Since: "2021.1",
Severity: lint.SeverityInfo,
},
"QF1004": {
Title: `Use \'strings.ReplaceAll\' instead of \'strings.Replace\' with \'n == -1\'`,
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1005": {
Title: `Expand call to \'math.Pow\'`,
Text: `Some uses of \'math.Pow\' can be simplified to basic multiplication.`,
Before: `math.Pow(x, 2)`,
After: `x * x`,
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1006": {
Title: `Lift \'if\'+\'break\' into loop condition`,
Before: `
for {
if done {
break
}
...
}`,
After: `
for !done {
...
}`,
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1007": {
Title: "Merge conditional assignment into variable declaration",
Before: `
x := false
if someCondition {
x = true
}`,
After: `x := someCondition`,
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1008": {
Title: "Omit embedded fields from selector expression",
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1009": {
Title: `Use \'time.Time.Equal\' instead of \'==\' operator`,
Since: "2021.1",
Severity: lint.SeverityInfo,
},
"QF1010": {
Title: "Convert slice of bytes to string when printing it",
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1011": {
Title: "Omit redundant type from variable declaration",
Since: "2021.1",
Severity: lint.SeverityHint,
},
"QF1012": {
Title: `Use \'fmt.Fprintf(x, ...)\' instead of \'x.Write(fmt.Sprintf(...))\'`,
Since: "Unreleased",
Severity: lint.SeverityHint,
},
}) | hack/tools/vendor/honnef.co/go/tools/quickfix/doc.go | 0.603231 | 0.615146 | doc.go | starcoder |
package bn256
import (
"math/big"
)
// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are
// kept in Jacobian form and t=z² when valid. The group G₂ is the set of
// n-torsion points of this curve over GF(p²) (where n = Order)
type twistPoint struct {
x, y, z, t *gfP2
}
var twistB = &gfP2{
bigFromBase10("6500054969564660373279643874235990574282535810762300357187714502686418407178"),
bigFromBase10("45500384786952622612957507119651934019977750675336102500314001518804928850249"),
}
// twistGen is the generator of group G₂.
var twistGen = &twistPoint{
&gfP2{
bigFromBase10("21167961636542580255011770066570541300993051739349375019639421053990175267184"),
bigFromBase10("64746500191241794695844075326670126197795977525365406531717464316923369116492"),
},
&gfP2{
bigFromBase10("20666913350058776956210519119118544732556678129809273996262322366050359951122"),
bigFromBase10("17778617556404439934652658462602675281523610326338642107814333856843981424549"),
},
&gfP2{
bigFromBase10("0"),
bigFromBase10("1"),
},
&gfP2{
bigFromBase10("0"),
bigFromBase10("1"),
},
}
func newTwistPoint(pool *bnPool) *twistPoint {
return &twistPoint{
newGFp2(pool),
newGFp2(pool),
newGFp2(pool),
newGFp2(pool),
}
}
func (c *twistPoint) String() string {
return "(" + c.x.String() + ", " + c.y.String() + ", " + c.z.String() + ")"
}
func (c *twistPoint) Put(pool *bnPool) {
c.x.Put(pool)
c.y.Put(pool)
c.z.Put(pool)
c.t.Put(pool)
}
func (c *twistPoint) Set(a *twistPoint) {
c.x.Set(a.x)
c.y.Set(a.y)
c.z.Set(a.z)
c.t.Set(a.t)
}
// IsOnCurve returns true iff c is on the curve where c must be in affine form.
func (c *twistPoint) IsOnCurve() bool {
pool := new(bnPool)
yy := newGFp2(pool).Square(c.y, pool)
xxx := newGFp2(pool).Square(c.x, pool)
xxx.Mul(xxx, c.x, pool)
yy.Sub(yy, xxx)
yy.Sub(yy, twistB)
yy.Minimal()
return yy.x.Sign() == 0 && yy.y.Sign() == 0
}
func (c *twistPoint) SetInfinity() {
c.z.SetZero()
}
func (c *twistPoint) IsInfinity() bool {
return c.z.IsZero()
}
func (c *twistPoint) Add(a, b *twistPoint, pool *bnPool) {
// For additional comments, see the same function in curve.go.
if a.IsInfinity() {
c.Set(b)
return
}
if b.IsInfinity() {
c.Set(a)
return
}
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
z1z1 := newGFp2(pool).Square(a.z, pool)
z2z2 := newGFp2(pool).Square(b.z, pool)
u1 := newGFp2(pool).Mul(a.x, z2z2, pool)
u2 := newGFp2(pool).Mul(b.x, z1z1, pool)
t := newGFp2(pool).Mul(b.z, z2z2, pool)
s1 := newGFp2(pool).Mul(a.y, t, pool)
t.Mul(a.z, z1z1, pool)
s2 := newGFp2(pool).Mul(b.y, t, pool)
h := newGFp2(pool).Sub(u2, u1)
xEqual := h.IsZero()
t.Add(h, h)
i := newGFp2(pool).Square(t, pool)
j := newGFp2(pool).Mul(h, i, pool)
t.Sub(s2, s1)
yEqual := t.IsZero()
if xEqual && yEqual {
c.Double(a, pool)
return
}
r := newGFp2(pool).Add(t, t)
v := newGFp2(pool).Mul(u1, i, pool)
t4 := newGFp2(pool).Square(r, pool)
t.Add(v, v)
t6 := newGFp2(pool).Sub(t4, j)
c.x.Sub(t6, t)
t.Sub(v, c.x) // t7
t4.Mul(s1, j, pool) // t8
t6.Add(t4, t4) // t9
t4.Mul(r, t, pool) // t10
c.y.Sub(t4, t6)
t.Add(a.z, b.z) // t11
t4.Square(t, pool) // t12
t.Sub(t4, z1z1) // t13
t4.Sub(t, z2z2) // t14
c.z.Mul(t4, h, pool)
z1z1.Put(pool)
z2z2.Put(pool)
u1.Put(pool)
u2.Put(pool)
t.Put(pool)
s1.Put(pool)
s2.Put(pool)
h.Put(pool)
i.Put(pool)
j.Put(pool)
r.Put(pool)
v.Put(pool)
t4.Put(pool)
t6.Put(pool)
}
func (c *twistPoint) Double(a *twistPoint, pool *bnPool) {
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
A := newGFp2(pool).Square(a.x, pool)
B := newGFp2(pool).Square(a.y, pool)
C := newGFp2(pool).Square(B, pool)
t := newGFp2(pool).Add(a.x, B)
t2 := newGFp2(pool).Square(t, pool)
t.Sub(t2, A)
t2.Sub(t, C)
d := newGFp2(pool).Add(t2, t2)
t.Add(A, A)
e := newGFp2(pool).Add(t, A)
f := newGFp2(pool).Square(e, pool)
t.Add(d, d)
c.x.Sub(f, t)
t.Add(C, C)
t2.Add(t, t)
t.Add(t2, t2)
c.y.Sub(d, c.x)
t2.Mul(e, c.y, pool)
c.y.Sub(t2, t)
t.Mul(a.y, a.z, pool)
c.z.Add(t, t)
A.Put(pool)
B.Put(pool)
C.Put(pool)
t.Put(pool)
t2.Put(pool)
d.Put(pool)
e.Put(pool)
f.Put(pool)
}
func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoint {
sum := newTwistPoint(pool)
sum.SetInfinity()
t := newTwistPoint(pool)
for i := scalar.BitLen(); i >= 0; i-- {
t.Double(sum, pool)
if scalar.Bit(i) != 0 {
sum.Add(t, a, pool)
} else {
sum.Set(t)
}
}
c.Set(sum)
sum.Put(pool)
t.Put(pool)
return c
}
// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets
// c to 0 : 1 : 0.
func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint {
if c.z.IsOne() {
return c
}
if c.IsInfinity() {
c.x.SetZero()
c.y.SetOne()
c.z.SetZero()
c.t.SetZero()
return c
}
zInv := newGFp2(pool).Invert(c.z, pool)
t := newGFp2(pool).Mul(c.y, zInv, pool)
zInv2 := newGFp2(pool).Square(zInv, pool)
c.y.Mul(t, zInv2, pool)
t.Mul(c.x, zInv2, pool)
c.x.Set(t)
c.z.SetOne()
c.t.SetOne()
zInv.Put(pool)
t.Put(pool)
zInv2.Put(pool)
return c
}
func (c *twistPoint) Negative(a *twistPoint, pool *bnPool) {
c.x.Set(a.x)
c.y.SetZero()
c.y.Sub(c.y, a.y)
c.z.Set(a.z)
c.t.SetZero()
} | vendor/golang.org/x/crypto/bn256/twist.go | 0.764804 | 0.512205 | twist.go | starcoder |
package twilight
import (
"time"
)
func lenToDuration(len float64) time.Duration {
return time.Duration(float64(time.Hour) * len)
}
type SunriseStatus int
const (
SunriseStatusOK = SunriseStatus(0)
SunriseStatusAboveHorizon = SunriseStatus(1)
SunriseStatusBelowHorizon = SunriseStatus(-1)
)
func DayLength(d time.Time, latitude, longitude float64) time.Duration {
len := dayLen(d.Year(), int(d.Month()), d.Day(), longitude, latitude, -35.0/60.0, true)
return lenToDuration(len)
}
func CivilTwilightLength(d time.Time, latitude, longitude float64) time.Duration {
len := dayLen(d.Year(), int(d.Month()), d.Day(), longitude, latitude, -6.0, false)
return lenToDuration(len)
}
func NauticalTwilightLength(d time.Time, latitude, longitude float64) time.Duration {
len := dayLen(d.Year(), int(d.Month()), d.Day(), longitude, latitude, -12.0, false)
return lenToDuration(len)
}
func AstronomicalTwilightLength(d time.Time, latitude, longitude float64) time.Duration {
len := dayLen(d.Year(), int(d.Month()), d.Day(), longitude, latitude, -18.0, false)
return lenToDuration(len)
}
func riseSetToTime(now time.Time, rise, set float64) (time.Time, time.Time) {
y, m, d := now.Date()
today := time.Date(y, m, d, 0, 0, 0, 0, now.Location())
riseDuration := lenToDuration(rise)
setDuration := lenToDuration(set)
return today.Add(riseDuration), today.Add(setDuration)
}
func SunRiseSet(date time.Time, latitude, longitude float64) (time.Time, time.Time, SunriseStatus) {
r, s, status := sunRiseSet(date.Year(), int(date.Month()), date.Day(), longitude, latitude, -35.0/60.0, true)
rise, set := riseSetToTime(date, r, s)
return rise, set, status
}
func CivilTwilight(date time.Time, latitude, longitude float64) (time.Time, time.Time, SunriseStatus) {
r, s, status := sunRiseSet(date.Year(), int(date.Month()), date.Day(), longitude, latitude, -6.0, true)
rise, set := riseSetToTime(date, r, s)
return rise, set, status
}
func NauticalTwilight(date time.Time, latitude, longitude float64) (time.Time, time.Time, SunriseStatus) {
r, s, status := sunRiseSet(date.Year(), int(date.Month()), date.Day(), longitude, latitude, -12.0, true)
rise, set := riseSetToTime(date, r, s)
return rise, set, status
}
func AstronomicalTwilight(date time.Time, latitude, longitude float64) (time.Time, time.Time, SunriseStatus) {
r, s, status := sunRiseSet(date.Year(), int(date.Month()), date.Day(), longitude, latitude, -18.0, true)
rise, set := riseSetToTime(date, r, s)
return rise, set, status
} | twilight.go | 0.828523 | 0.565239 | twilight.go | starcoder |
package tin
import "math"
const (
MAX_AVERAGING_SAMPLES = 64
)
func average(toAverage [MAX_AVERAGING_SAMPLES]float64, avgCount int) float64 {
if avgCount == 0 {
return math.NaN()
}
sum := float64(0)
for i := 0; i < MAX_AVERAGING_SAMPLES; i++ {
sum += toAverage[i]
}
avg := sum / float64(avgCount)
return avg
}
func MinInt(x, y int) int {
if x < y {
return x
}
return y
}
func AverageNanArr(toAverage []float64) float64 {
sum := float64(0)
avgCount := 0
for i := 0; i < len(toAverage); i++ {
v := toAverage[i]
if v != math.NaN() {
sum += v
avgCount++
}
}
if avgCount == 0 {
return math.NaN()
}
return sum / float64(avgCount)
}
func SafeGetPixel(src *RasterDouble, w, h, r, c int64) float64 {
if r >= 0 && r < h && c >= 0 && c < w {
return src.Value(int(r), int(c))
}
return math.NaN()
}
func SubSampleRaster3x3(src *RasterDouble, noDataValue float64, w, h, r, c int64) float64 {
var centerPixel float64
var crossPixels [4]float64
var diagPixels [4]float64
diagPixels[0] = SafeGetPixel(src, w, h, r-1, c-1) // top-left
crossPixels[0] = SafeGetPixel(src, w, h, r-1, c) // top
diagPixels[1] = SafeGetPixel(src, w, h, r-1, c+1) // top-right
crossPixels[1] = SafeGetPixel(src, w, h, r, c-1) // center-left
centerPixel = SafeGetPixel(src, w, h, r, c) // center-center
crossPixels[2] = SafeGetPixel(src, w, h, r, c+1) // center-right
diagPixels[2] = SafeGetPixel(src, w, h, r+1, c-1) // bottom-left
crossPixels[3] = SafeGetPixel(src, w, h, r+1, c) // bottom-center
diagPixels[3] = SafeGetPixel(src, w, h, r+1, c+1) // bottom-right
if centerPixel == noDataValue {
centerPixel = math.NaN()
}
for i := 0; i < 4; i++ {
if diagPixels[i] == noDataValue {
diagPixels[i] = math.NaN()
}
if crossPixels[i] == noDataValue {
crossPixels[i] = math.NaN()
}
}
crossAvg := AverageNanArr(crossPixels[:])
diagAvg := AverageNanArr(diagPixels[:])
weighted := [6]float64{
centerPixel, centerPixel, centerPixel, crossAvg, crossAvg, diagAvg,
}
weightedAvg := AverageNanArr(weighted[:])
return weightedAvg
}
func SampleNearestValidAvg(src *RasterDouble, _row, _column int, minAveragingSamples int) float64 {
minAveragingSamples = MinInt(minAveragingSamples, MAX_AVERAGING_SAMPLES)
row := _row
column := _column
w := src.Cols()
h := src.Rows()
maxRadius := int64(math.Sqrt(float64(w*w + h*h)))
noDataValue := src.NoData.(float64)
z := float64(0)
if row < h && column < w {
z = src.Value(row, column)
}
if !isNoData(z, noDataValue) {
return z
}
var toAverage [MAX_AVERAGING_SAMPLES]float64
for i := range toAverage {
toAverage[i] = 0.0
}
avgCount := 0
putpixel := func(x, y int64) {
destR := int64(row) + y
destC := int64(column) + x
z := SubSampleRaster3x3(src, noDataValue, int64(w), int64(h), destR, destC)
if !isNoData(z, noDataValue) {
toAverage[avgCount] = z
avgCount++
}
}
for radius := int64(2); radius <= maxRadius && avgCount < minAveragingSamples; radius++ {
x := radius - 1
y := int64(0)
dx := int64(1)
dy := int64(1)
err := int64(dx) - (radius / 2)
for {
if x >= y {
break
}
putpixel(x, y)
putpixel(y, x)
putpixel(-y, x)
putpixel(-x, y)
putpixel(-x, -y)
putpixel(-y, -x)
putpixel(y, -x)
putpixel(x, -y)
if err <= 0 {
y++
err += dy
dy += 2
} else {
x--
dx += 2
err += dx - (radius / 2)
}
}
}
if avgCount == 1 {
return toAverage[0]
}
return average(toAverage, avgCount)
} | raster_tools.go | 0.656438 | 0.436922 | raster_tools.go | starcoder |
package circle
import (
//. "github.com/y0ssar1an/q"
"image"
"image/color"
"math"
)
func Simple(x, y, radius int) *Circle {
return &Circle{
Point: image.Pt(x, y),
Radius: radius,
}
}
// Circle implements image.Image
// godoc image Image
type Circle struct {
Point image.Point
Radius int
}
func (c *Circle) ColorModel() color.Model {
return color.RGBAModel
}
func (c *Circle) Bounds() image.Rectangle {
return image.Rect(
c.Point.X-c.Radius, // x0
c.Point.Y-c.Radius, // y0
c.Point.X+c.Radius, // x1
c.Point.Y+c.Radius, // y1
)
}
func (c *Circle) At(x, y int) color.Color {
xx, yy, rr := float64(x-c.Point.X)+0.5, float64(y-c.Point.Y)+0.5, float64(c.Radius)
if xx*xx+yy*yy < rr*rr {
return color.Black
}
return color.White
}
// Sector coming soon
type Sector struct {
Circle
Θ1 float64
Θ2 float64
Color color.Color
}
func (s *Sector) At(x, y int) color.Color {
// Center around origin
xx, yy, rr := float64(x-s.Point.X), -float64(y-s.Point.Y), float64(s.Radius)
if xx*xx+yy*yy < rr*rr {
theta := math.Atan2(yy, xx)
if theta < 0 {
theta = theta + 2*math.Pi
}
if theta >= s.Θ1 && theta < s.Θ2 {
return s.Color
}
}
return color.Alpha{255}
}
type SectorCircle struct {
Circle
Sectors []Sector
}
func (s *SectorCircle) At(x, y int) color.Color {
// Center around origin
xx, yy, rr := float64(x-s.Point.X), -float64(y-s.Point.Y), float64(s.Radius)
if xx*xx+yy*yy < rr*rr {
theta := math.Atan2(yy, xx)
if theta < 0 {
theta = theta + 2*math.Pi
}
distance := theta / (2 * math.Pi)
sectorIndex := int(distance * float64(len(s.Sectors)))
return s.Sectors[sectorIndex].Color
}
return color.Alpha{255}
}
type colorCircle struct {
Circle
Colors []color.Color
Fill float64
SegmentsFilled int
}
func (c *colorCircle) At(x, y int) color.Color {
// Center around origin
xx, yy, rr := float64(x-c.Point.X), -float64(y-c.Point.Y), float64(c.Radius)
if xx*xx+yy*yy < rr*rr {
theta := math.Atan2(yy, xx)
if theta < 0 {
theta = theta + 2*math.Pi
}
distance := theta / (2 * math.Pi)
colorIndex := distance * float64(len(c.Colors))
if int(colorIndex) >= c.SegmentsFilled {
return color.Alpha{255}
}
diff := colorIndex - math.Floor(colorIndex)
// 1 = filled, .5 = half filled. 0 = no fill
if diff > c.Fill {
return color.Alpha{255}
}
return c.Colors[int(colorIndex)]
}
return color.Alpha{255}
}
func newColorCircle(radius int, fill float64, segments int, colors ...color.Color) *colorCircle {
if fill == 0 || fill > 1 {
fill = 1
}
cc := colorCircle{
Circle: Circle{
Radius: radius,
Point: image.Pt(0, 0),
},
Fill: fill,
}
for _, c := range colors {
cc.Colors = append(cc.Colors, c)
}
cc.SegmentsFilled = segments
return &cc
}
func ColorCircle(radius int, fill float64, colors ...color.Color) image.Image {
return newColorCircle(radius, fill, len(colors), colors...)
}
func ColorCircles(radius int, fill float64, colors ...color.Color) []image.Image {
var output []image.Image
for i, _ := range colors {
cc := newColorCircle(radius, fill, i+1, colors...)
output = append(output, cc)
}
return output
}
type Red struct{}
func (_ Red) RGBA() (r, g, b, a uint32) {
return 0xff, 0, 0, 0xff
} | circle/circle.go | 0.817064 | 0.507385 | circle.go | starcoder |
package network
import (
"math/rand"
)
// Matrix is an alias for [][]float64
type Matrix [][]float64
// RandomMatrix returns the value of a random matrix of *rows* and *columns* dimensions and
// where the values are between *lower* and *upper*.
func RandomMatrix(rows, columns int) (matrix Matrix) {
matrix = make(Matrix, rows)
for i := 0; i < rows; i++ {
matrix[i] = make([]float64, columns)
for j := 0; j < columns; j++ {
matrix[i][j] = rand.Float64()*2.0 - 1.0
}
}
return
}
// CreateMatrix returns an empty matrix which is the size of rows and columns
func CreateMatrix(rows, columns int) (matrix Matrix) {
matrix = make(Matrix, rows)
for i := 0; i < rows; i++ {
matrix[i] = make([]float64, columns)
}
return
}
// Rows returns number of matrix's rows
func Rows(matrix Matrix) int {
return len(matrix)
}
// Columns returns number of matrix's columns
func Columns(matrix Matrix) int {
if len(matrix) != 0 {
return len(matrix[0])
}
return 0
}
// ApplyFunctionWithIndex returns a matrix where fn has been applied with the indexes provided
func ApplyFunctionWithIndex(matrix Matrix, fn func(i, j int, x float64) float64) Matrix {
for i := 0; i < Rows(matrix); i++ {
for j := 0; j < Columns(matrix); j++ {
matrix[i][j] = fn(i, j, matrix[i][j])
}
}
return matrix
}
// ApplyFunction returns a matrix where fn has been applied
func ApplyFunction(matrix Matrix, fn func(x float64) float64) Matrix {
return ApplyFunctionWithIndex(matrix, func(i, j int, x float64) float64 {
return fn(x)
})
}
// ApplyRate returns a matrix where the learning rate has been multiplies
func ApplyRate(matrix Matrix, rate float64) Matrix {
return ApplyFunction(matrix, func(x float64) float64 {
return rate * x
})
}
// DotProduct returns a matrix which is the result of the dot product between matrix and matrix2
func DotProduct(matrix, matrix2 Matrix) Matrix {
if Columns(matrix) != Rows(matrix2) {
panic("Cannot make dot product between these two matrix.")
}
return ApplyFunctionWithIndex(
CreateMatrix(Rows(matrix), Columns(matrix2)),
func(i, j int, x float64) float64 {
var sum float64
for k := 0; k < Columns(matrix); k++ {
sum += matrix[i][k] * matrix2[k][j]
}
return sum
},
)
}
// Sum returns the sum of matrix and matrix2
func Sum(matrix, matrix2 Matrix) (resultMatrix Matrix) {
ErrorNotSameSize(matrix, matrix2)
resultMatrix = CreateMatrix(Rows(matrix), Columns(matrix))
return ApplyFunctionWithIndex(matrix, func(i, j int, x float64) float64 {
return matrix[i][j] + matrix2[i][j]
})
}
// Difference returns the difference between matrix and matrix2
func Difference(matrix, matrix2 Matrix) (resultMatrix Matrix) {
ErrorNotSameSize(matrix, matrix2)
resultMatrix = CreateMatrix(Rows(matrix), Columns(matrix))
return ApplyFunctionWithIndex(resultMatrix, func(i, j int, x float64) float64 {
return matrix[i][j] - matrix2[i][j]
})
}
// Multiplication returns the multiplication of matrix and matrix2
func Multiplication(matrix, matrix2 Matrix) (resultMatrix Matrix) {
ErrorNotSameSize(matrix, matrix2)
resultMatrix = CreateMatrix(Rows(matrix), Columns(matrix))
return ApplyFunctionWithIndex(matrix, func(i, j int, x float64) float64 {
return matrix[i][j] * matrix2[i][j]
})
}
// Transpose returns the matrix transposed
func Transpose(matrix Matrix) (resultMatrix Matrix) {
resultMatrix = CreateMatrix(Columns(matrix), Rows(matrix))
for i := 0; i < Rows(matrix); i++ {
for j := 0; j < Columns(matrix); j++ {
resultMatrix[j][i] = matrix[i][j]
}
}
return resultMatrix
}
// ErrorNotSameSize panics if the matrices do not have the same dimension
func ErrorNotSameSize(matrix, matrix2 Matrix) {
if Rows(matrix) != Rows(matrix2) && Columns(matrix) != Columns(matrix2) {
panic("These two matrices must have the same dimension.")
}
} | back/network/matrix.go | 0.887211 | 0.8321 | matrix.go | starcoder |
package main
import (
"github.com/anaseto/gruid"
)
func distance(from, to gruid.Point) int {
delta := to.Sub(from)
return abs(delta.X) + abs(delta.Y)
}
func distanceChebyshev(from, to gruid.Point) int {
delta := to.Sub(from)
deltaX := abs(delta.X)
deltaY := abs(delta.Y)
if deltaX > deltaY {
return deltaX
}
return deltaY
}
// ZP is the zero value for gruid.Point.
var ZP gruid.Point = gruid.Point{}
func dirString(dir gruid.Point) (s string) {
switch dir {
case ZP:
s = ""
case gruid.Point{1, 0}:
s = "E"
case gruid.Point{1, -1}:
s = "NE"
case gruid.Point{0, -1}:
s = "N"
case gruid.Point{-1, -1}:
s = "NW"
case gruid.Point{-1, 0}:
s = "W"
case gruid.Point{-1, 1}:
s = "SW"
case gruid.Point{0, 1}:
s = "S"
case gruid.Point{1, 1}:
s = "SE"
}
return s
}
func keyToDir(k action) (p gruid.Point) {
switch k {
case ActionW, ActionRunW:
p = gruid.Point{-1, 0}
case ActionE, ActionRunE:
p = gruid.Point{1, 0}
case ActionS, ActionRunS:
p = gruid.Point{0, 1}
case ActionN, ActionRunN:
p = gruid.Point{0, -1}
}
return p
}
func sign(n int) int {
var i int
switch {
case n > 0:
i = 1
case n < 0:
i = -1
}
return i
}
// dirnorm returns a normalized direction between two points, so that
// directions that aren't cardinal nor diagonal are transformed into the
// cardinal part (this corresponds to pruned intermediate nodes in diagonal
// jump).
func dirnorm(p, q gruid.Point) gruid.Point {
dir := q.Sub(p)
dx := abs(dir.X)
dy := abs(dir.Y)
dir = gruid.Point{sign(dir.X), sign(dir.Y)}
switch {
case dx == dy:
case dx > dy:
dir.Y = 0
default:
dir.X = 0
}
return dir
}
func idxtopos(i int) gruid.Point {
return gruid.Point{i % DungeonWidth, i / DungeonWidth}
}
func idx(p gruid.Point) int {
return p.Y*DungeonWidth + p.X
}
func valid(p gruid.Point) bool {
return p.Y >= 0 && p.Y < DungeonHeight && p.X >= 0 && p.X < DungeonWidth
}
func inViewCone(dir, from, to gruid.Point) bool {
if to == from || distance(from, to) <= 1 {
return true
}
d := dirnorm(from, to)
return d == dir || leftDir(d) == dir || rightDir(d) == dir
}
func leftDir(dir gruid.Point) gruid.Point {
switch {
case dir.X == 0 || dir.Y == 0:
return left(dir, dir)
default:
return gruid.Point{(dir.Y + dir.X) / 2, (dir.Y - dir.X) / 2}
}
}
func rightDir(dir gruid.Point) gruid.Point {
switch {
case dir.X == 0 || dir.Y == 0:
return right(dir, dir)
default:
return gruid.Point{(dir.X - dir.Y) / 2, (dir.Y + dir.X) / 2}
}
}
func right(p gruid.Point, dir gruid.Point) gruid.Point {
return gruid.Point{p.X - dir.Y, p.Y + dir.X}
}
func left(p gruid.Point, dir gruid.Point) gruid.Point {
return gruid.Point{p.X + dir.Y, p.Y - dir.X}
} | pos.go | 0.723798 | 0.476519 | pos.go | starcoder |
package httpref
var Headers = References{
{
Name: "Headers",
IsTitle: true,
Summary: "Guidance about headers",
Description: `HTTP headers let the client and the server pass additional information with an HTTP request or response. An HTTP header consists of its case-insensitive name followed by a colon (:), then by its value. Whitespace before the value is ignored.
Custom proprietary headers have historically been used with an X- prefix, but this convention was deprecated in June 2012 because of the inconveniences it caused when nonstandard fields became standard in RFC 6648; others are listed in an IANA registry, whose original content was defined in RFC 4229. IANA also maintains a registry of proposed new HTTP headers.
Headers can be grouped according to their contexts:
General headers apply to both requests and responses, but with no relation to the data transmitted in the body.
Request headers contain more information about the resource to be fetched, or about the client requesting the resource.
Response headers hold additional information about the response, like its location or about the server providing it.
Entity headers contain information about the body of the resource, like its content length or MIME type.
Headers can also be grouped according to how proxies handle them:
Connection
Keep-Alive
Proxy-Authenticate
Proxy-Authorization
TE
Trailer
Transfer-Encoding
Upgrade.
End-to-end headers
These headers must be transmitted to the final recipient of the message: the server for a request, or the client for a response. Intermediate proxies must retransmit these headers unmodified and caches must store them.
Hop-by-hop headers
These headers are meaningful only for a single transport-level connection, and must not be retransmitted by proxies or cached. Note that only hop-by-hop headers may be set using the Connection general header.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers`,
},
{
Name: "Authentication",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Authentication",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Authentication`,
},
{
Name: "WWW-Authenticate",
Summary: "Defines the authentication method that should be used to access a resource.",
Description: `The HTTP WWW-Authenticate response header defines the authentication method that should be used to gain access to a resource.
The WWW-Authenticate header is sent along with a 401 Unauthorized response.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/WWW-Authenticate`,
},
{
Name: "Authorization",
Summary: "Contains the credentials to authenticate a user-agent with a server.",
Description: `The HTTP Authorization request header contains the credentials to authenticate a user agent with a server, usually after the server has responded with a 401 Unauthorized status and the WWW-Authenticate header.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization`,
},
{
Name: "Proxy-Authenticate",
Summary: "Defines the authentication method that should be used to access a resource behind a proxy server.",
Description: `The HTTP Proxy-Authenticate response header defines the authentication method that should be used to gain access to a resource behind a proxy server. It authenticates the request to the proxy server, allowing it to transmit the request further.
The Proxy-Authenticate header is sent along with a 407 Proxy Authentication Required.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Proxy-Authenticate`,
},
{
Name: "Caching",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Caching",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Caching`,
},
{
Name: "Age",
Summary: "The time, in seconds, that the object has been in a proxy cache.",
Description: `The Age header contains the time in seconds the object has been in a proxy cache.
The Age header is usually close to zero. If it is Age: 0, it was probably just fetched from the origin server; otherwise It is usually calculated as a difference between the proxy's current date and the Date general header included in the HTTP response.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Age`,
},
{
Name: "Cache-Control",
Summary: "Directives for caching mechanisms in both requests and responses.",
Description: `The Cache-Control HTTP header holds directives (instructions) for caching in both requests and responses. A given directive in a request does not mean the same directive should be in the response.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control`,
},
{
Name: "Clear-Site-Data",
Summary: "Clears browsing data (e.g. cookies, storage, cache) associated with the requesting website.",
Description: `The Clear-Site-Data header clears browsing data (cookies, storage, cache) associated with the requesting website. It allows web developers to have more control over the data stored locally by a browser for their origins.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Clear-Site-Data`,
},
{
Name: "Expires",
Summary: "The date/time after which the response is considered stale.",
Description: `The Expires header contains the date/time after which the response is considered stale.
Invalid dates, like the value 0, represent a date in the past and mean that the resource is already expired.
If there is a Cache-Control header with the max-age or s-maxage directive in the response, the Expires header is ignored.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expires`,
},
{
Name: "Pragma",
Summary: "Implementation-specific header that may have various effects anywhere along the request-response chain. Used for backwards compatibility with HTTP/1.0 caches where the Cache-Control header is not yet present.",
Description: `The Pragma HTTP/1.0 general header is an implementation-specific header that may have various effects along the request-response chain. It is used for backwards compatibility with HTTP/1.0 caches where the Cache-Control HTTP/1.1 header is not yet present.
Note: Pragma is not specified for HTTP responses and is therefore not a reliable replacement for the general HTTP/1.1 Cache-Control header, although it does behave the same as Cache-Control: no-cache, if the Cache-Control header field is omitted in a request. Use Pragma only for backwards compatibility with HTTP/1.0 clients.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Pragma`,
},
{
Name: "Warning",
Summary: "General warning information about possible problems.",
Description: `
Note: The Warning header is soon to be deprecated; see Warning (https://github.com/httpwg/http-core/issues/139) and Warning: header & stale-while-revalidate (https://github.com/whatwg/fetch/issues/913) for more details.
The Warning general HTTP header contains information about possible problems with the status of the message. More than one Warning header may appear in a response.
Warning header fields can in general be applied to any message, however some warn-codes are specific to caches and can only be applied to response messages.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Warning`,
},
{
Name: "Client hints",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Client_hints",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Client_hints`,
},
{
Name: "Accept-CH",
Summary: "Servers can advertise support for Client Hints using the Accept-CH header field or an equivalent HTML <meta> element with http-equiv attribute ([HTML5]).",
Description: `Secure context
This feature is available only in secure contexts (HTTPS), in some or all supporting browsers.
This is an experimental technology
Check the Browser compatibility table carefully before using this in production.
The Accept-CH header is set by the server to specify which Client Hints headers client should include in subsequent requests.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-CH`,
},
{
Name: "Accept-CH-Lifetime",
Summary: "Servers can ask the client to remember the set of Client Hints that the server supports for a specified period of time, to enable delivery of Client Hints on subsequent requests to the server’s origin ([RFC6454]).",
Description: `The Accept-CH-Lifetime header is set by the server to specify the persistence of Accept-CH header value that specifies for which Client Hints headers client should include in subsequent requests.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-CH-Lifetime`,
},
{
Name: "Early-Data",
Summary: "Indicates that the request has been conveyed in early data.",
Description: `The Early-Data header is set by an intermediate to indicate that the request has been conveyed in TLS early data, and additionally indicates that an intermediary understands the 425 (Too Early) status code. The Early-Data header is not set by the originator of the request (i.e., a browser).
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Early-Data`,
},
{
Name: "Content-DPR",
Summary: "A number that indicates the ratio between physical pixels over CSS pixels of the selected image response.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Client_hints`,
},
{
Name: "DPR",
Summary: "A number that indicates the client’s current Device Pixel Ratio (DPR), which is the ratio of physical pixels over CSS pixels (Section 5.2 of [CSSVAL]) of the layout viewport (Section 9.1.1 of [CSS2]) on the device.",
Description: `The DPR header is a Client Hints headers which represents the client device pixel ratio (DPR), which is the the number of physical device pixels corresponding to every CSS pixel.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/DPR`,
},
{
Name: "Device-Memory",
Summary: "Technically a part of Device Memory API, this header represents an approximate amount of RAM client has.",
Description: `The Device-Memory header is a Device Memory API header that works like Client Hints header which represents the approximate amount of RAM client device has.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Device-Memory`,
},
{
Name: "Save-Data",
Summary: "A boolean that indicates the user agent's preference for reduced data usage.",
Description: `The Save-Data header field is a boolean which, in requests, indicates the client's preference for reduced data usage. This could be for reasons such as high transfer costs, slow connection speeds, etc.
A value of On indicates explicit user opt-in into a reduced data usage mode on the client, and when communicated to origins allows them to deliver alternative content to reduce the data downloaded such as smaller image and video resources, different markup and styling, disabled polling and automatic updates, and so on.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Save-Data`,
},
{
Name: "Viewport-Width",
Summary: "A number that indicates the layout viewport width in CSS pixels. The provided pixel value is a number rounded to the smallest following integer (i.e. ceiling value).",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Client_hints`,
},
{
Name: "Viewport-Width",
Summary: "A number that indicates the layout viewport width in CSS pixels. The provided pixel value is a number rounded to the smallest following integer (i.e. ceiling value).",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Client_hints`,
},
{
Name: "Width",
Summary: "The Width request header field is a number that indicates the desired resource width in physical pixels (i.e. intrinsic size of an image).",
Description: `
The Width request header field is a number that indicates the desired resource width in physical pixels (i.e. intrinsic size of an image). The provided pixel value is a number rounded to the smallest following integer (i.e. ceiling value).
If the desired resource width is not known at the time of the request or the resource does not have a display width, the Width header field can be omitted. If Width occurs in a message more than once, the last value overrides all previous occurrences
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Client_hints`,
},
{
Name: "Conditionals",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Conditionals",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Conditionals`,
},
{
Name: "Last-Modified",
Summary: "The last modification date of the resource, used to compare several versions of the same resource. It is less accurate than ETag, but easier to calculate in some environments. Conditional requests using If-Modified-Since and If-Unmodified-Since use this value to change the behavior of the request.",
Description: `The Last-Modified response HTTP header contains the date and time at which the origin server believes the resource was last modified. It is used as a validator to determine if a resource received or stored is the same. Less accurate than an ETag header, it is a fallback mechanism. Conditional requests containing If-Modified-Since or If-Unmodified-Since headers make use of this field.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified`,
},
{
Name: "ETag",
Summary: "A unique string identifying the version of the resource. Conditional requests using If-Match and If-None-Match use this value to change the behavior of the request.",
Description: `The ETag HTTP response header is an identifier for a specific version of a resource. It lets caches be more efficient and save bandwidth, as a web server does not need to resend a full response if the content has not changed. Additionally, etags help prevent simultaneous updates of a resource from overwriting each other ("mid-air collisions").
If the resource at a given URL changes, a new Etag value must be generated. A comparison of them can determine whether two representations of a resource are the same. Etags are therefore similar to fingerprints, and might also be used for tracking purposes by some servers. They might also be set to persist indefinitely by a tracking server.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag`,
},
{
Name: "If-Match",
Summary: "Makes the request conditional, and applies the method only if the stored resource matches one of the given ETags.",
Description: `The If-Match HTTP request header makes the request conditional. For GET and HEAD methods, the server will send back the requested resource only if it matches one of the listed ETags. For PUT and other non-safe methods, it will only upload the resource in this case.
The comparison with the stored ETag uses the strong comparison algorithm, meaning two files are considered identical byte to byte only. If a listed ETag has the W/ prefix indicating a weak entity tag, it will never match under this comparison algorithm.
There are two common use cases:
For GET and HEAD methods, used in combination with a Range header, it can guarantee that the new ranges requested comes from the same resource than the previous one. If it doesn't match, then a 416 (Range Not Satisfiable) response is returned.
For other methods, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Match`,
},
{
Name: "If-None-Match",
Summary: "Makes the request conditional, and applies the method only if the stored resource doesn't match any of the given ETags. This is used to update caches (for safe requests), or to prevent to upload a new resource when one already exists.",
Description: `The If-None-Match HTTP request header makes the request conditional. For GET and HEAD methods, the server will send back the requested resource, with a 200 status, only if it doesn't have an ETag matching the given ones. For other methods, the request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed.
When the condition fails for GET and HEAD methods, then the server must return HTTP status code 304 (Not Modified). For methods that apply server-side changes, the status code 412 (Precondition Failed) is used. Note that the server generating a 304 response MUST generate any of the following header fields that would have been sent in a 200 (OK) response to the same request: Cache-Control, Content-Location, Date, ETag, Expires, and Vary.
The comparison with the stored ETag uses the weak comparison algorithm, meaning two files are considered identical if the content is equivalent — they don't have to be identical byte for byte. For example, two pages that differ by the date of generation in the footer would still be considered as identical.
When used in combination with If-Modified-Since, If-None-Match has precedence (if the server supports it).
There are two common use cases:
For GET and HEAD methods, to update a cached entity that has an associated ETag.
For other methods, and in particular for PUT, If-None-Match used with the * value can be used to save a file not known to exist, guaranteeing that another upload didn't happen before, losing the data of the previous put; this problem is a variation of the lost update problem.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-None-Match`,
},
{
Name: "If-Modified-Since",
Summary: "Makes the request conditional, and expects the entity to be transmitted only if it has been modified after the given date. This is used to transmit data only when the cache is out of date.",
Description: `The If-Modified-Since request HTTP header makes the request conditional: the server will send back the requested resource, with a 200 status, only if it has been last modified after the given date. If the request has not been modified since, the response will be a 304 without any body; the Last-Modified response header of a previous request will contain the date of last modification. Unlike If-Unmodified-Since, If-Modified-Since can only be used with a GET or HEAD.
When used in combination with If-None-Match, it is ignored, unless the server doesn't support If-None-Match.
The most common use case is to update a cached entity that has no associated ETag.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Modified-Since`,
},
{
Name: "If-Unmodified-Since",
Summary: "Makes the request conditional, and expects the entity to be transmitted only if it has not been modified after the given date. This ensures the coherence of a new fragment of a specific range with previous ones, or to implement an optimistic concurrency control system when modifying existing documents.",
Description: `The If-Unmodified-Since request HTTP header makes the request conditional: the server will send back the requested resource, or accept it in the case of a POST or another non-safe method, only if it has not been last modified after the given date. If the resource has been modified after the given date, the response will be a 412 (Precondition Failed) error.
There are two common use cases:
In conjunction with non-safe methods, like POST, it can be used to implement an optimistic concurrency control, like done by some wikis: editions are rejected if the stored document has been modified since the original has been retrieved.
In conjunction with a range request with a If-Range header, it can be used to ensure that the new fragment requested comes from an unmodified document.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Unmodified-Since`,
},
{
Name: "Vary",
Summary: "Determines how to match request headers to decide whether a cached response can be used rather than requesting a fresh one from the origin server.",
Description: `The Vary HTTP response header determines how to match future request headers to decide whether a cached response can be used rather than requesting a fresh one from the origin server. It is used by the server to indicate which headers it used when selecting a representation of a resource in a content negotiation algorithm.
The Vary header should be set on a 304 Not Modified response exactly like it would have been set on an equivalent 200 OK response.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Vary`,
},
{
Name: "Connection management",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Connection_management",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Connection_management`,
},
{
Name: "Connection",
Summary: "Controls whether the network connection stays open after the current transaction finishes.",
Description: `The Connection general header controls whether or not the network connection stays open after the current transaction finishes. If the value sent is keep-alive, the connection is persistent and not closed, allowing for subsequent requests to the same server to be done.
Connection-specific header fields such as Connection must not be used with HTTP/2.
Except for the standard hop-by-hop headers (Keep-Alive, Transfer-Encoding, TE, Connection, Trailer, Upgrade, Proxy-Authorization and Proxy-Authenticate), any hop-by-hop headers used by the message must be listed in the Connection header, so that the first proxy knows it has to consume them and not forward them further. Standard hop-by-hop headers can be listed too (it is often the case of Keep-Alive, but this is not mandatory).
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection`,
},
{
Name: "Keep-Alive",
Summary: "Controls how long a persistent connection should stay open.",
Description: `The Keep-Alive general header allows the sender to hint about how the connection may be used to set a timeout and a maximum amount of requests.
The Connection header needs to be set to "keep-alive" for this header to have any meaning. Also, Connection and Keep-Alive are ignored in HTTP/2; connection management is handled by other mechanisms there.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Keep-Alive`,
},
{
Name: "Content negotiation",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation",
Description: `In HTTP, content negotiation is the mechanism that is used for serving different representations of a resource at the same URI, so that the user agent can specify which is best suited for the user (for example, which language of a document, which image format, or which content encoding).
https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation`,
},
{
Name: "Accept",
Summary: "Informs the server about the types of data that can be sent back.",
Description: `The Accept request HTTP header advertises which content types, expressed as MIME types, the client is able to understand. Using content negotiation, the server then selects one of the proposals, uses it and informs the client of its choice with the Content-Type response header. Browsers set adequate values for this header depending on the context where the request is done: when fetching a CSS stylesheet a different value is set for the request than when fetching an image, video or a script.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept`,
},
{
Name: "Accept-Charset",
Summary: "Which character encodings the client understands.",
Description: `The Accept-Charset request HTTP header advertises which character encodings the client understands. Using content negotiation, the server selects one of the encodings, uses it, and informs the client of its choice within the Content-Type response header, usually in a charset= parameter. Browsers usually don't send this header, as the default value for each resource is usually correct and transmitting it would allow fingerprinting.
If the server cannot serve any character encoding from this request header, it can theoretically send back a 406 Not Acceptable error code. But for a better user experience, this is rarely done and the Accept-Charset header is ignored.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Charset`,
},
{
Name: "Accept-Encoding",
Summary: "The encoding algorithm, usually a compression algorithm, that can be used on the resource sent back.",
Description: `The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. Using content negotiation, the server selects one of the proposals, uses it and informs the client of its choice with the Content-Encoding response header.
Even if both the client and the server supports the same compression algorithms, the server may choose not to compress the body of a response, if the identity value is also acceptable. Two common cases lead to this:
The data to be sent is already compressed and a second compression won't lead to smaller data to be transmitted. This may be the case with some image formats;
The server is overloaded and cannot afford the computational overhead induced by the compression requirement. Typically, Microsoft recommends not to compress if a server uses more than 80% of its computational power.
As long as the identity value, meaning no encoding, is not explicitly forbidden, by an identity;q=0 or a *;q=0 without another explicitly set value for identity, the server must never send back a 406 Not Acceptable error.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding`,
},
{
Name: "Accept-Language",
Summary: "Informs the server about the human language the server is expected to send back. This is a hint and is not necessarily under the full control of the user: the server should always pay attention not to override an explicit user choice (like selecting a language from a dropdown).",
Description: `The Accept-Language request HTTP header advertises which languages the client is able to understand, and which locale variant is preferred. (By languages, we mean natural languages, such as English, and not programming languages.) Using content negotiation, the server then selects one of the proposals, uses it and informs the client of its choice with the Content-Language response header. Browsers set adequate values for this header according to their user interface language and even if a user can change it, this happens rarely (and is frowned upon as it leads to fingerprinting).
This header is a hint to be used when the server has no way of determining the language via another way, like a specific URL, that is controlled by an explicit user decision. It is recommended that the server never overrides an explicit decision. The content of the Accept-Language is often out of the control of the user (like when traveling and using an Internet Cafe in a different country); the user may also want to visit a page in another language than the locale of their user interface.
If the server cannot serve any matching language, it can theoretically send back a 406 (Not Acceptable) error code. But, for a better user experience, this is rarely done and more common way is to ignore the Accept-Language header in this case.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Language`,
},
{
Name: "Controls",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Controls",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Controls`,
},
{
Name: "Expect",
Summary: "Indicates expectations that need to be fulfilled by the server to properly handle the request.",
Description: `The Expect HTTP request header indicates expectations that need to be fulfilled by the server in order to properly handle the request.
The only expectation defined in the specification is Expect: 100-continue, to which the server shall respond with:
100 if the information contained in the header is sufficient to cause an immediate success,
417 (Expectation Failed) if it cannot meet the expectation; or any other 4xx status otherwise.
For example, the server may reject a request if its Content-Length is too large.
No common browsers send the Expect header, but some other clients such as cURL do so by default.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expect`,
},
{
Name: "Max-Forwards",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Controls",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Controls`,
},
{
Name: "Cookies",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Cookies",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Cookies`,
},
{
Name: "Cookie",
Summary: "Contains stored HTTP cookies previously sent by the server with the Set-Cookie header.",
Description: `The Cookie HTTP request header contains stored HTTP cookies previously sent by the server with the Set-Cookie header.
The Cookie header is optional and may be omitted if, for example, the browser's privacy settings block cookies.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cookie`,
},
{
Name: "Set-Cookie",
Summary: "Send cookies from the server to the user-agent.",
Description: `The Set-Cookie HTTP response header is used to send cookies from the server to the user agent, so the user agent can send them back to the server later.
For more information, see the guide on HTTP cookies.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie`,
},
{
Name: "CORS",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#CORS",
Description: `Cross-Origin Resource Sharing (CORS) is a mechanism that uses additional HTTP headers to tell browsers to give a web application running at one origin, access to selected resources from a different origin. A web application executes a cross-origin HTTP request when it requests a resource that has a different origin (domain, protocol, or port) from its own.
An example of a cross-origin request: the front-end JavaScript code served from https://domain-a.com uses XMLHttpRequest to make a request for https://domain-b.com/data.json.
For security reasons, browsers restrict cross-origin HTTP requests initiated from scripts. For example, XMLHttpRequest and the Fetch API follow the same-origin policy. This means that a web application using those APIs can only request resources from the same origin the application was loaded from, unless the response from other origins includes the right CORS headers.
https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS`,
},
{
Name: "Access-Control-Allow-Origin",
Summary: "Indicates whether the response can be shared.",
Description: `The Access-Control-Allow-Origin response header indicates whether the response can be shared with requesting code from the given origin.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin`,
},
{
Name: "Access-Control-Allow-Credentials",
Summary: "Indicates whether the response to the request can be exposed when the credentials flag is true.",
Description: `The Access-Control-Allow-Credentials response header tells browsers whether to expose the response to frontend JavaScript code when the request's credentials mode (Request.credentials) is include.
When a request's credentials mode (Request.credentials) is include, browsers will only expose the response to frontend JavaScript code if the Access-Control-Allow-Credentials value is true.
Credentials are cookies, authorization headers or TLS client certificates.
When used as part of a response to a preflight request, this indicates whether or not the actual request can be made using credentials. Note that simple GET requests are not preflighted, and so if a request is made for a resource with credentials, if this header is not returned with the resource, the response is ignored by the browser and not returned to web content.
The Access-Control-Allow-Credentials header works in conjunction with the XMLHttpRequest.withCredentials property or with the credentials option in the Request() constructor of the Fetch API. For a CORS request with credentials, in order for browsers to expose the response to frontend JavaScript code, both the server (using the Access-Control-Allow-Credentials header) and the client (by setting the credentials mode for the XHR, Fetch, or Ajax request) must indicate that they’re opting in to including credentials.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials`,
},
{
Name: "Access-Control-Allow-Headers",
Summary: "Used in response to a preflight request to indicate which HTTP headers can be used when making the actual request.",
Description: `The Access-Control-Allow-Headers response header is used in response to a preflight request which includes the Access-Control-Request-Headers to indicate which HTTP headers can be used during the actual request.
This header is required if the request has an Access-Control-Request-Headers header.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers`,
},
{
Name: "Access-Control-Allow-Methods",
Summary: "Specifies the methods allowed when accessing the resource in response to a preflight request.",
Description: `The Access-Control-Allow-Methods response header specifies the method or methods allowed when accessing the resource in response to a preflight request.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods`,
},
{
Name: "Access-Control-Expose-Headers",
Summary: "Indicates which headers can be exposed as part of the response by listing their names.",
Description: `The Access-Control-Expose-Headers response header indicates which headers can be exposed as part of the response by listing their names.
By default, only the 6 CORS-safelisted response headers are exposed:
Cache-Control
Content-Language
Content-Type
Expires
Last-Modified
Pragma
If you want clients to be able to access other headers, you have to list them using the Access-Control-Expose-Headers header.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers`,
},
{
Name: "Access-Control-Max-Age",
Summary: "Indicates how long the results of a preflight request can be cached.",
Description: `The Access-Control-Max-Age response header indicates how long the results of a preflight request (that is the information contained in the Access-Control-Allow-Methods and Access-Control-Allow-Headers headers) can be cached.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age`,
},
{
Name: "Access-Control-Request-Headers",
Summary: "Used when issuing a preflight request to let the server know which HTTP headers will be used when the actual request is made.",
Description: `The Access-Control-Request-Headers request header is used by browsers when issuing a preflight request, to let the server know which HTTP headers the client might send when the actual request is made.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Request-Headers`,
},
{
Name: "Origin",
Summary: "Indicates where a fetch originates from.",
Description: `The Origin request header indicates where a fetch originates from. It doesn't include any path information, but only the server name. It is sent with CORS requests, as well as with POST requests. It is similar to the Referer header, but, unlike this header, it doesn't disclose the whole path.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin`,
},
{
Name: "Timing-Allow-Origin",
Summary: "Specifies origins that are allowed to see values of attributes retrieved via features of the Resource Timing API, which would otherwise be reported as zero due to cross-origin restrictions.",
Description: `The Timing-Allow-Origin response header specifies origins that are allowed to see values of attributes retrieved via features of the Resource Timing API, which would otherwise be reported as zero due to cross-origin restrictions.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Timing-Allow-Origin`,
},
{
Name: "Do Not Track",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Do_Not_Track",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Do_Not_Track`,
},
{
Name: "DNT",
Summary: "Expresses the user's tracking preference.",
Description: `The DNT (Do Not Track) request header indicates the user's tracking preference. It lets users indicate whether they would prefer privacy rather than personalized content.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/DNT`,
},
{
Name: "Tk",
Summary: "Indicates the tracking status of the corresponding response.",
Description: `The Tk response header indicates the tracking status that applied to the corresponding request.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Tk`,
},
{
Name: "Downloads",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Downloads",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Downloads`,
},
{
Name: "Content-Disposition",
Summary: "Indicates if the resource transmitted should be displayed inline (default behavior without the header), or if it should be handled like a download and the browser should present a “Save As” dialog.",
Description: `In a regular HTTP response, the Content-Disposition response header is a header indicating if the content is expected to be displayed inline in the browser, that is, as a Web page or as part of a Web page, or as an attachment, that is downloaded and saved locally.
In a multipart/form-data body, the HTTP Content-Disposition general header is a header that can be used on the subpart of a multipart body to give information about the field it applies to. The subpart is delimited by the boundary defined in the Content-Type header. Used on the body itself, Content-Disposition has no effect.
The Content-Disposition header is defined in the larger context of MIME messages for e-mail, but only a subset of the possible parameters apply to HTTP forms and POST requests. Only the value form-data, as well as the optional directive name and filename, can be used in the HTTP context.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition`,
},
{
Name: "Message body information",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Message_body_information",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Message_body_information`,
},
{
Name: "Content-Length",
Summary: "The size of the resource, in decimal number of bytes.",
Description: `The Content-Length entity header indicates the size of the entity-body, in bytes, sent to the recipient.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Length`,
},
{
Name: "Content-Type",
Summary: "Indicates the media type of the resource.",
Description: `The Content-Type entity header is used to indicate the media type of the resource.
In responses, a Content-Type header tells the client what the content type of the returned content actually is. Browsers will do MIME sniffing in some cases and will not necessarily follow the value of this header; to prevent this behavior, the header X-Content-Type-Options can be set to nosniff.
In requests, (such as POST or PUT), the client tells the server what type of data is actually sent.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type`,
},
{
Name: "Content-Encoding",
Summary: "Used to specify the compression algorithm.",
Description: `The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body. It lets the client know how to decode in order to obtain the media-type referenced by the Content-Type header.
The recommendation is to compress data as much as possible and therefore to use this field, but some types of resources, such as jpeg images, are already compressed. Sometimes, using additional compression doesn't reduce payload size and can even make the payload longer.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding`,
},
{
Name: "Content-Language",
Summary: "Describes the human language(s) intended for the audience, so that it allows a user to differentiate according to the users' own preferred language.",
Description: `The Content-Language entity header is used to describe the language(s) intended for the audience, so that it allows a user to differentiate according to the users' own preferred language.
For example, if "Content-Language: de-DE" is set, it says that the document is intended for German language speakers (however, it doesn't indicate the document is written in German. For example, it might be written in English as part of a language course for German speakers. If you want to indicate which language the document is written in, use the lang attribute instead).
If no Content-Language is specified, the default is that the content is intended for all language audiences. Multiple language tags are also possible, as well as applying the Content-Language header to various media types and not only to textual documents.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language`,
},
{
Name: "Content-Location",
Summary: "Indicates an alternate location for the returned data.",
Description: `The Content-Location header indicates an alternate location for the returned data. The principal use is to indicate the URL of a resource transmitted as the result of content negotiation.
Location and Content-Location are different. Location indicates the URL of a redirect, while Content-Location indicates the direct URL to use to access the resource, without further content negotiation in the future. Location is a header associated with the response, while Content-Location is associated with the data returned. This distinction may seem abstract without examples.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Location`,
},
{
Name: "Proxies",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies`,
},
{
Name: "Forwarded",
Summary: "Contains information from the client-facing side of proxy servers that is altered or lost when a proxy is involved in the path of the request.",
Description: `The Forwarded header contains information from the client-facing side of proxy servers that is altered or lost when a proxy is involved in the path of the request.
The alternative and de-facto standard versions of this header are the X-Forwarded-For, X-Forwarded-Host and X-Forwarded-Proto headers.
This header is used for debugging, statistics, and generating location-dependent content and by design it exposes privacy sensitive information, such as the IP address of the client. Therefore the user's privacy must be kept in mind when deploying this header.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Forwarded`,
},
{
Name: "X-Forwarded-For",
Summary: "Identifies the originating IP addresses of a client connecting to a web server through an HTTP proxy or a load balancer.",
Description: `The X-Forwarded-For (XFF) header is a de-facto standard header for identifying the originating IP address of a client connecting to a web server through an HTTP proxy or a load balancer. When traffic is intercepted between clients and servers, server access logs contain the IP address of the proxy or load balancer only. To see the original IP address of the client, the X-Forwarded-For request header is used.
This header is used for debugging, statistics, and generating location-dependent content and by design it exposes privacy sensitive information, such as the IP address of the client. Therefore the user's privacy must be kept in mind when deploying this header.
A standardized version of this header is the HTTP Forwarded header.
X-Forwarded-For is also an email-header indicating that an email-message was forwarded from another account.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For`,
},
{
Name: "X-Forwarded-Host",
Summary: "Identifies the original host requested that a client used to connect to your proxy or load balancer.",
Description: `The X-Forwarded-Host (XFH) header is a de-facto standard header for identifying the original host requested by the client in the Host HTTP request header.
Host names and ports of reverse proxies (load balancers, CDNs) may differ from the origin server handling the request, in that case the X-Forwarded-Host header is useful to determine which Host was originally used.
This header is used for debugging, statistics, and generating location-dependent content and by design it exposes privacy sensitive information, such as the IP address of the client. Therefore the user's privacy must be kept in mind when deploying this header.
A standardized version of this header is the HTTP Forwarded header.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-Host`,
},
{
Name: "X-Forwarded-Proto",
Summary: "Identifies the protocol (HTTP or HTTPS) that a client used to connect to your proxy or load balancer.",
Description: `The X-Forwarded-Proto (XFP) header is a de-facto standard header for identifying the protocol (HTTP or HTTPS) that a client used to connect to your proxy or load balancer. Your server access logs contain the protocol used between the server and the load balancer, but not the protocol used between the client and the load balancer. To determine the protocol used between the client and the load balancer, the X-Forwarded-Proto request header can be used.
A standardized version of this header is the HTTP Forwarded header.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-Proto`,
},
{
Name: "Via",
Summary: "Added by proxies, both forward and reverse proxies, and can appear in the request headers and the response headers.",
Description: `The Via general header is added by proxies, both forward and reverse proxies, and can appear in the request headers and the response headers. It is used for tracking message forwards, avoiding request loops, and identifying the protocol capabilities of senders along the request/response chain.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Via`,
},
{
Name: "Redirects",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Redirects",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Redirects`,
},
{
Name: "Location",
Summary: "Indicates the URL to redirect a page to.",
Description: `The Location response header indicates the URL to redirect a page to. It only provides a meaning when served with a 3xx (redirection) or 201 (created) status response.
In cases of redirection, the HTTP method used to make the new request to fetch the page pointed to by Location depends of the original method and of the kind of redirection:
If 303 (See Also) responses always lead to the use of a GET method, 307 (Temporary Redirect) and 308 (Permanent Redirect) don't change the method used in the original request;
301 (Permanent Redirect) and 302 (Found) doesn't change the method most of the time, though older user-agents may (so you basically don't know).
All responses with one of these status codes send a Location header.
In cases of resource creation, it indicates the URL to the newly created resource.
Location and Content-Location are different: Location indicates the target of a redirection (or the URL of a newly created resource), while Content-Location indicates the direct URL to use to access the resource when content negotiation happened, without the need of further content negotiation. Location is a header associated with the response, while Content-Location is associated with the entity returned.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Location`,
},
{
Name: "Request context",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Request_context",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Request_context`,
},
{
Name: "From",
Summary: "Contains an Internet email address for a human user who controls the requesting user agent.",
Description: `The From request header contains an Internet email address for a human user who controls the requesting user agent.
If you are running a robotic user agent (e.g. a crawler), the From header should be sent, so you can be contacted if problems occur on servers, such as if the robot is sending excessive, unwanted, or invalid requests.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/From`,
},
{
Name: "Host",
Summary: "Specifies the domain name of the server (for virtual hosting), and (optionally) the TCP port number on which the server is listening.",
Description: `The Host request header specifies the domain name of the server (for virtual hosting), and (optionally) the TCP port number on which the server is listening.
If no port is given, the default port for the service requested (e.g., "80" for an HTTP URL) is implied.
A Host header field must be sent in all HTTP/1.1 request messages. A 400 (Bad Request) status code will be sent to any HTTP/1.1 request message that lacks a Host header field or contains more than one.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host`,
},
{
Name: "Referer",
Summary: "The address of the previous web page from which a link to the currently requested page was followed.",
Description: `The Referer request header contains the address of the previous web page from which a link to the currently requested page was followed. The Referer header allows servers to identify where people are visiting them from and may use that data for analytics, logging, or optimized caching, for example.
Important: Although this header has many innocent uses it can have undesirable consequences for user security and privacy. See Referer header: privacy and security concerns for more information and mitigations.
Note that referer is actually a misspelling of the word "referrer". See HTTP referer on Wikipedia for more details.
A Referer header is not sent by browsers if:
The referring resource is a local "file" or "data" URI.
An unsecured HTTP request is used and the referring page was received with a secure protocol (HTTPS).
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referer`,
},
{
Name: "Referrer",
Summary: "See the Referer header",
Description: `This header was spelt incorrectly in the original implementation. See Referer for further details.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referer`,
},
{
Name: "Referrer-Policy",
Summary: "Governs which referrer information sent in the Referer header should be included with requests made.",
Description: `The Referrer-Policy HTTP header controls how much referrer information (sent via the Referer header) should be included with requests.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy`,
},
{
Name: "User-Agent",
Summary: "Contains a characteristic string that allows the network protocol peers to identify the application type, operating system, software vendor or software version of the requesting software user agent. See also the Firefox user agent string reference.",
Description: `The User-Agent request header is a characteristic string that lets servers and network peers identify the application, operating system, vendor, and/or version of the requesting user agent.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent`,
},
{
Name: "Response context",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Response_context",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Response_context`,
},
{
Name: "Allow",
Summary: "Lists the set of HTTP request methods support by a resource.",
Description: `The Allow header lists the set of methods supported by a resource.
This header must be sent if the server responds with a 405 Method Not Allowed status code to indicate which request methods can be used. An empty Allow header indicates that the resource allows no request methods, which might occur temporarily for a given resource, for example.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Allow`,
},
{
Name: "Server",
Summary: "Contains information about the software used by the origin server to handle the request.",
Description: `The Server header contains information about the software used by the origin server to handle the request.
Overly long and detailed Server values should be avoided as they potentially reveal internal implementation details that might make it (slightly) easier for attackers to find and exploit known security holes.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Server`,
},
{
Name: "Range requests",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Range_requests",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Range_requests`,
},
{
Name: "Accept-Ranges",
Summary: "Indicates if the server supports range requests, and if so in which unit the range can be expressed.",
Description: `The Accept-Ranges response HTTP header is a marker used by the server to advertise its support of partial requests. The value of this field indicates the unit that can be used to define a range.
In presence of an Accept-Ranges header, the browser may try to resume an interrupted download, rather than to start it from the start again.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Ranges`,
},
{
Name: "Range",
Summary: "Indicates the part of a document that the server should return.",
Description: `The Range HTTP request header indicates the part of a document that the server should return. Several parts can be requested with one Range header at once, and the server may send back these ranges in a multipart document. If the server sends back ranges, it uses the 206 Partial Content for the response. If the ranges are invalid, the server returns the 416 Range Not Satisfiable error. The server can also ignore the Range header and return the whole document with a 200 status code.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range`,
},
{
Name: "If-Range",
Summary: "Creates a conditional range request that is only fulfilled if the given etag or date matches the remote resource. Used to prevent downloading two ranges from incompatible version of the resource.",
Description: `The If-Range HTTP request header makes a range request conditional: if the condition is fulfilled, the range request will be issued and the server sends back a 206 Partial Content answer with the appropriate body. If the condition is not fulfilled, the full resource is sent back, with a 200 OK status.
This header can be used either with a Last-Modified validator, or with an ETag, but not with both.
The most common use case is to resume a download, to guarantee that the stored resource has not been modified since the last fragment has been received.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Range`,
},
{
Name: "Content-Range",
Summary: "Indicates where in a full body message a partial message belongs.",
Description: `The Content-Range response HTTP header indicates where in a full body message a partial message belongs.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Range`,
},
{
Name: "Security",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Security",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Security`,
},
{
Name: "Cross-Origin-Embedder-Policy (COEP)",
Summary: "Allows a server to declare an embedder policy for a given document.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Security`,
},
{
Name: "Cross-Origin-Opener-Policy (COOP)",
Summary: "Prevents other domains from opening/controlling a window.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Security`,
},
{
Name: "Cross-Origin-Resource-Policy (CORP)",
Summary: "Prevents other domains from reading the response of the resources to which this header is applied.",
Description: `The HTTP Cross-Origin-Resource-Policy response header conveys a desire that the browser blocks no-cors cross-origin/cross-site requests to the given resource.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cross-Origin-Resource-Policy`,
},
{
Name: "Content-Security-Policy (CSP)",
Summary: "Controls resources the user agent is allowed to load for a given page.",
Description: `The HTTP Content-Security-Policy response header allows web site administrators to control resources the user agent is allowed to load for a given page. With a few exceptions, policies mostly involve specifying server origins and script endpoints. This helps guard against cross-site scripting attacks (XSS).
For more information, see the introductory article on Content Security Policy (CSP).
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy`,
},
{
Name: "Content-Security-Policy-Report-Only",
Summary: "Allows web developers to experiment with policies by monitoring, but not enforcing, their effects. These violation reports consist of JSON documents sent via an HTTP POST request to the specified URI.",
Description: `The HTTP Content-Security-Policy-Report-Only response header allows web developers to experiment with policies by monitoring (but not enforcing) their effects. These violation reports consist of JSON documents sent via an HTTP POST request to the specified URI.
For more information, see also this article on Content Security Policy (CSP).
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy-Report-Only`,
},
{
Name: "Expect-CT",
Summary: "Allows sites to opt in to reporting and/or enforcement of Certificate Transparency requirements, which prevents the use of misissued certificates for that site from going unnoticed. When a site enables the Expect-CT header, they are requesting that Chrome check that any certificate for that site appears in public CT logs.",
Description: `The Expect-CT header allows sites to opt in to reporting and/or enforcement of Certificate Transparency requirements, which prevents the use of misissued certificates for that site from going unnoticed.
CT requirements can be satisfied by servers via any one of the following mechanisms:
X.509v3 certificate extension to allow embedding of signed certificate timestamps issued by individual logs
A TLS extension of type signed_certificate_timestamp sent during the handshake
Supporting OCSP stapling (that is, the status_request TLS extension) and providing a SignedCertificateTimestampList
When a site enables the Expect-CT header, they are requesting that the browser check that any certificate for that site appears in public CT logs.
Browsers ignore the Expect-CT header when sent over HTTP, the header only has effect on HTTPS connections.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expect-CT`,
},
{
Name: "Feature-Policy",
Summary: "Provides a mechanism to allow and deny the use of browser features in its own frame, and in iframes that it embeds.",
Description: `The HTTP Feature-Policy header provides a mechanism to allow and deny the use of browser features in its own frame, and in content within any <iframe> elements in the document.
For more information, see the main Feature Policy article.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Feature-Policy`,
},
{
Name: "Public-Key-Pins (HPKP)",
Summary: "Associates a specific cryptographic public key with a certain web server to decrease the risk of MITM attacks with forged certificates.",
Description: `Note: Public Key Pinning mechanism was deprecated in favor of Certificate Transparency and Expect-CT header.
The HTTP Public-Key-Pins response header associates a specific cryptographic public key with a certain web server to decrease the risk of MITM attacks with forged certificates. If one or several keys are pinned and none of them are used by the server, the browser will not accept the response as legitimate, and will not display it.
For more information, see the HTTP Public Key Pinning article.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Public-Key-Pins`,
},
{
Name: "Public-Key-Pins-Report-Only",
Summary: "Sends reports to the report-uri specified in the header and does still allow clients to connect to the server even if the pinning is violated.",
Description: `The HTTP Public-Key-Pins-Report-Only response header sends reports of pinning violation to the report-uri specified in the header but, unlike Public-Key-Pins still allows browsers to connect to the server if the pinning is violated.
For more information, see the Public-Key-Pins header reference page and the HTTP Public Key Pinning article.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Public-Key-Pins-Report-Only`,
},
{
Name: "Strict-Transport-Security (HSTS)",
Summary: "Force communication using HTTPS instead of HTTP.",
Description: `The HTTP Strict-Transport-Security response header (often abbreviated as HSTS) lets a web site tell browsers that it should only be accessed using HTTPS, instead of using HTTP.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security`,
},
{
Name: "Upgrade-Insecure-Requests",
Summary: "Sends a signal to the server expressing the client’s preference for an encrypted and authenticated response, and that it can successfully handle the upgrade-insecure-requests directive.",
Description: `The HTTP Upgrade-Insecure-Requests request header sends a signal to the server expressing the client’s preference for an encrypted and authenticated response, and that it can successfully handle the upgrade-insecure-requests CSP directive.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Upgrade-Insecure-Requests`,
},
{
Name: "X-Content-Type-Options",
Summary: "Disables MIME sniffing and forces browser to use the type given in Content-Type.",
Description: `The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised in the Content-Type headers should not be changed and be followed. This allows to opt-out of MIME type sniffing, or, in other words, it is a way to say that the webmasters knew what they were doing.
This header was introduced by Microsoft in IE 8 as a way for webmasters to block content sniffing that was happening and could transform non-executable MIME types into executable MIME types. Since then, other browsers have introduced it, even if their MIME sniffing algorithms were less aggressive.
Starting with Firefox 72, the opting out of MIME sniffing is also applied to top-level documents if a Content-type is provided. This can cause HTML web pages to be downloaded instead of being rendered when they are served with a MIME type other than text/html. Make sure to set both headers correctly.
Site security testers usually expect this header to be set.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options`,
},
{
Name: "X-Content-Type-Options",
Summary: "Disables MIME sniffing and forces browser to use the type given in Content-Type.",
Description: `The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised in the Content-Type headers should not be changed and be followed. This allows to opt-out of MIME type sniffing, or, in other words, it is a way to say that the webmasters knew what they were doing.
This header was introduced by Microsoft in IE 8 as a way for webmasters to block content sniffing that was happening and could transform non-executable MIME types into executable MIME types. Since then, other browsers have introduced it, even if their MIME sniffing algorithms were less aggressive.
Starting with Firefox 72, the opting out of MIME sniffing is also applied to top-level documents if a Content-type is provided. This can cause HTML web pages to be downloaded instead of being rendered when they are served with a MIME type other than text/html. Make sure to set both headers correctly.
Site security testers usually expect this header to be set.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options`,
},
{
Name: "X-Download-Options",
Summary: "The X-Download-Options HTTP header indicates that the browser (Internet Explorer) should not display the option to \"Open\" a file that has been downloaded from an application, to prevent phishing attacks as the file otherwise would gain access to execute in the context of the application. (Note: related MS Edge bug).",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Security`,
},
{
Name: "X-Frame-Options (XFO)",
Summary: "Indicates whether a browser should be allowed to render a page in a <frame>, <iframe>, <embed> or <object>.",
Description: `The X-Frame-Options HTTP response header can be used to indicate whether or not a browser should be allowed to render a page in a <frame>, <iframe>, <embed> or <object>. Sites can use this to avoid clickjacking attacks, by ensuring that their content is not embedded into other sites.
The added security is only provided if the user accessing the document is using a browser supporting X-Frame-Options.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options`,
},
{
Name: "X-Permitted-Cross-Domain-Policies",
Summary: "Specifies if a cross-domain policy file (crossdomain.xml) is allowed. The file may define a policy to grant clients, such as Adobe's Flash Player, Adobe Acrobat, Microsoft Silverlight, or Apache Flex, permission to handle data across domains that would otherwise be restricted due to the Same-Origin Policy. See the Cross-domain Policy File Specification for more information.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Security`,
},
{
Name: "X-Powered-By",
Summary: "May be set by hosting environments or other frameworks and contains information about them while not providing any usefulness to the application or its visitors. Unset this header to avoid exposing potential vulnerabilities.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Security`,
},
{
Name: "X-XSS-Protection",
Summary: "Enables cross-site scripting filtering.",
Description: `The HTTP X-XSS-Protection response header is a feature of Internet Explorer, Chrome and Safari that stops pages from loading when they detect reflected cross-site scripting (XSS) attacks. Although these protections are largely unnecessary in modern browsers when sites implement a strong Content-Security-Policy that disables the use of inline JavaScript ('unsafe-inline'), they can still provide protections for users of older web browsers that don't yet support CSP.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection`,
},
{
Name: "Server-sent events",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events`,
},
{
Name: "Last-Event-ID",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events`,
},
{
Name: "NEL",
Summary: "Defines a mechanism that enables developers to declare a network error reporting policy.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events`,
},
{
Name: "Ping-From",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events`,
},
{
Name: "Ping-To",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events`,
},
{
Name: "Report-To",
Summary: "Used to specify a server endpoint for the browser to send warning and error reports to.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Server-sent_events`,
},
{
Name: "Transfer coding",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Transfer_coding",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Transfer_coding`,
},
{
Name: "Transfer-Encoding",
Summary: "Specifies the form of encoding used to safely transfer the entity to the user.",
Description: `The Transfer-Encoding header specifies the form of encoding used to safely transfer the payload body to the user.
HTTP/2 doesn't support HTTP 1.1's chunked transfer encoding mechanism, as it provides its own, more efficient, mechanisms for data streaming.
Transfer-Encoding is a hop-by-hop header, that is applied to a message between two nodes, not to a resource itself. Each segment of a multi-node connection can use different Transfer-Encoding values. If you want to compress data over the whole connection, use the end-to-end Content-Encoding header instead.
When present on a response to a HEAD request that has no body, it indicates the value that would have applied to the corresponding GET message.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding`,
},
{
Name: "TE",
Summary: "Specifies the transfer encodings the user agent is willing to accept.",
Description: `The TE request header specifies the transfer encodings the user agent is willing to accept. (you could informally call it Accept-Transfer-Encoding, which would be more intuitive).
In HTTP/2 - the TE header field is only accepted if the trailers value is set.
See also the Transfer-Encoding response header for more details on transfer encodings. Note that chunked is always acceptable for HTTP/1.1 recipients and you don't have to specify "chunked" using the TE header. However, it is useful for setting if the client is accepting trailer fields in a chunked transfer coding using the "trailers" value.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/TE`,
},
{
Name: "Trailer",
Summary: "Allows the sender to include additional fields at the end of chunked message.",
Description: `The Trailer response header allows the sender to include additional fields at the end of chunked messages in order to supply metadata that might be dynamically generated while the message body is sent, such as a message integrity check, digital signature, or post-processing status.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Trailer`,
},
{
Name: "WebSockets",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets`,
},
{
Name: "Sec-WebSocket-Accept",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Sec-WebSocket-Accept",
Description: `The Sec-WebSocket-Accept header is used in the websocket opening handshake. It would appear in the response headers. That is, this is header is sent from server to client to inform that server is willing to initiate a websocket connection.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Sec-WebSocket-Accept`,
},
{
Name: "Sec-WebSocket-Key",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets`,
},
{
Name: "Sec-WebSocket-Extensions",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets`,
},
{
Name: "Sec-WebSocket-Protocol",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets`,
},
{
Name: "Sec-WebSocket-Version",
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#WebSockets`,
},
{
Name: "Other",
IsTitle: true,
Summary: "https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "Accept-Push-Policy",
Summary: "A client can express the desired push policy for a request by sending an Accept-Push-Policy header field in the request.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "Accept-Signature",
Summary: "A client can send the Accept-Signature header field to indicate intention to take advantage of any available signatures and to indicate what kinds of signatures it supports.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "Alt-Svc",
Summary: "Used to list alternate ways to reach this service.",
Description: `The Alt-Svc header is used to list alternate ways to reach this website.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Alt-Svc`,
},
{
Name: "Date",
Summary: "Contains the date and time at which the message was originated.",
Description: `The Date general HTTP header contains the date and time at which the message was originated.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Date`,
},
{
Name: "Large-Allocation",
Summary: "Tells the browser that the page being loaded is going to want to perform a large allocation.",
Description: `The non-standard Large-Allocation response header tells the browser that the page being loaded is going to want to perform a large allocation. It is currently only implemented in Firefox, but is harmless to send to every browser.
WebAssembly or asm.js applications can use large contiguous blocks of allocated memory. For complex games, for example, these allocations can be quite large, sometimes as large as 1GB. The Large-Allocation tells the browser that the web content in the to-be-loaded page is going to want to perform a large contiguous memory allocation and the browser can react to this header by starting a dedicated process for the to-be-loaded document, for example.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Large-Allocation`,
},
{
Name: "Link",
Summary: "The Link entity-header field provides a means for serialising one or more links in HTTP headers. It is semantically equivalent to the HTML <link> element.",
Description: `The HTTP Link entity-header field provides a means for serialising one or more links in HTTP headers. It is semantically equivalent to the HTML <link> element.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link`,
},
{
Name: "Push-Policy",
Summary: "A Push-Policy defines the server behaviour regarding push when processing a request.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "Retry-After",
Summary: "Indicates how long the user agent should wait before making a follow-up request.",
Description: `The Retry-After response HTTP header indicates how long the user agent should wait before making a follow-up request. There are three main cases this header is used:
When sent with a 503 (Service Unavailable) response, this indicates how long the service is expected to be unavailable.
When sent with a 429 (Too Many Requests) response, this indicates how long to wait before making a new request.
When sent with a redirect response, such as 301 (Moved Permanently), this indicates the minimum time that the user agent is asked to wait before issuing the redirected request.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After`,
},
{
Name: "Signature",
Summary: "The Signature header field conveys a list of signatures for an exchange, each one accompanied by information about how to determine the authority of and refresh that signature.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "Signed-Headers",
Summary: "The Signed-Headers header field identifies an ordered list of response header fields to include in a signature.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "Server-Timing",
Summary: "Communicates one or more metrics and descriptions for the given request-response cycle.",
Description: `The Server-Timing header communicates one or more metrics and descriptions for a given request-response cycle. It is used to surface any backend server timing metrics (e.g. database read/write, CPU time, file system access, etc.) in the developer tools in the user's browser or in the PerformanceServerTiming interface.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Server-Timing`,
},
{
Name: "Service-Worker-Allowed",
Summary: "Used to remove the path restriction by including this header in the response of the Service Worker script.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "SourceMap",
Summary: "Links generated code to a source map.",
Description: `The SourceMap HTTP response header links generated code to a source map, enabling the browser to reconstruct the original source and present the reconstructed original in the debugger.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/SourceMap`,
},
{
Name: "Upgrade",
Summary: "The relevant RFC document for the Upgrade header field is RFC 7230, section 6.7. The standard establishes rules for upgrading or changing to a different protocol on the current client, server, transport protocol connection.",
Description: `The relevant RFC document for the Upgrade header field is RFC 7230, section 6.7. The standard establishes rules for upgrading or changing to a different protocol on the current client, server, transport protocol connection. For example, this header standard allows a client to change from HTTP 1.1 to HTTP 2.0, assuming the server decides to acknowledge and implement the Upgrade header field. Neither party is required to accept the terms specified in the Upgrade header field. It can be used in both client and server headers. If the Upgrade header field is specified, then the sender MUST also send the Connection header field with the upgrade option specified. For details on the Connection header field please see section 6.1 of the aforementioned RFC.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "X-DNS-Prefetch-Control",
Summary: "Controls DNS prefetching, a feature by which browsers proactively perform domain name resolution on both links that the user may choose to follow as well as URLs for items referenced by the document, including images, CSS, JavaScript, and so forth.",
Description: `The X-DNS-Prefetch-Control HTTP response header controls DNS prefetching, a feature by which browsers proactively perform domain name resolution on both links that the user may choose to follow as well as URLs for items referenced by the document, including images, CSS, JavaScript, and so forth.
This prefetching is performed in the background, so that the DNS is likely to have been resolved by the time the referenced items are needed. This reduces latency when the user clicks a link.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-DNS-Prefetch-Control`,
},
{
Name: "X-Robots-Tag",
Summary: "The X-Robots-Tag HTTP header is used to indicate how a web page is to be indexed within public search engine results. The header is effectively equivalent to <meta name=\"robots\" content=\"...\">.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
{
Name: "X-UA-Compatible",
Summary: "Used by Internet Explorer to signal which document mode to use.",
Description: `https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Other`,
},
} | headers.go | 0.84367 | 0.434521 | headers.go | starcoder |
package topo
import (
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/internal/set"
)
// VertexOrdering returns the vertex ordering and the k-cores of
// the undirected graph g.
func VertexOrdering(g graph.Undirected) (order []graph.Node, cores [][]graph.Node) {
nodes := g.Nodes()
// The algorithm used here is essentially as described at
// http://en.wikipedia.org/w/index.php?title=Degeneracy_%28graph_theory%29&oldid=640308710
// Initialize an output list L.
var l []graph.Node
// Compute a number d_v for each vertex v in G,
// the number of neighbors of v that are not already in L.
// Initially, these numbers are just the degrees of the vertices.
dv := make(map[int64]int, len(nodes))
var (
maxDegree int
neighbours = make(map[int64][]graph.Node)
)
for _, n := range nodes {
adj := g.From(n)
neighbours[n.ID()] = adj
dv[n.ID()] = len(adj)
if len(adj) > maxDegree {
maxDegree = len(adj)
}
}
// Initialize an array D such that D[i] contains a list of the
// vertices v that are not already in L for which d_v = i.
d := make([][]graph.Node, maxDegree+1)
for _, n := range nodes {
deg := dv[n.ID()]
d[deg] = append(d[deg], n)
}
// Initialize k to 0.
k := 0
// Repeat n times:
s := []int{0}
for range nodes {
// Scan the array cells D[0], D[1], ... until
// finding an i for which D[i] is nonempty.
var (
i int
di []graph.Node
)
for i, di = range d {
if len(di) != 0 {
break
}
}
// Set k to max(k,i).
if i > k {
k = i
s = append(s, make([]int, k-len(s)+1)...)
}
// Select a vertex v from D[i]. Add v to the
// beginning of L and remove it from D[i].
var v graph.Node
v, d[i] = di[len(di)-1], di[:len(di)-1]
l = append(l, v)
s[k]++
delete(dv, v.ID())
// For each neighbor w of v not already in L,
// subtract one from d_w and move w to the
// cell of D corresponding to the new value of d_w.
for _, w := range neighbours[v.ID()] {
dw, ok := dv[w.ID()]
if !ok {
continue
}
for i, n := range d[dw] {
if n.ID() == w.ID() {
d[dw][i], d[dw] = d[dw][len(d[dw])-1], d[dw][:len(d[dw])-1]
dw--
d[dw] = append(d[dw], w)
break
}
}
dv[w.ID()] = dw
}
}
for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
cores = make([][]graph.Node, len(s))
offset := len(l)
for i, n := range s {
cores[i] = l[offset-n : offset]
offset -= n
}
return l, cores
}
// BronKerbosch returns the set of maximal cliques of the undirected graph g.
func BronKerbosch(g graph.Undirected) [][]graph.Node {
nodes := g.Nodes()
// The algorithm used here is essentially BronKerbosch3 as described at
// http://en.wikipedia.org/w/index.php?title=Bron%E2%80%93Kerbosch_algorithm&oldid=656805858
p := make(set.Nodes, len(nodes))
for _, n := range nodes {
p.Add(n)
}
x := make(set.Nodes)
var bk bronKerbosch
order, _ := VertexOrdering(g)
for _, v := range order {
neighbours := g.From(v)
nv := make(set.Nodes, len(neighbours))
for _, n := range neighbours {
nv.Add(n)
}
bk.maximalCliquePivot(g, []graph.Node{v}, make(set.Nodes).Intersect(p, nv), make(set.Nodes).Intersect(x, nv))
p.Remove(v)
x.Add(v)
}
return bk
}
type bronKerbosch [][]graph.Node
func (bk *bronKerbosch) maximalCliquePivot(g graph.Undirected, r []graph.Node, p, x set.Nodes) {
if len(p) == 0 && len(x) == 0 {
*bk = append(*bk, r)
return
}
neighbours := bk.choosePivotFrom(g, p, x)
nu := make(set.Nodes, len(neighbours))
for _, n := range neighbours {
nu.Add(n)
}
for _, v := range p {
if nu.Has(v) {
continue
}
neighbours := g.From(v)
nv := make(set.Nodes, len(neighbours))
for _, n := range neighbours {
nv.Add(n)
}
var found bool
for _, n := range r {
if n.ID() == v.ID() {
found = true
break
}
}
var sr []graph.Node
if !found {
sr = append(r[:len(r):len(r)], v)
}
bk.maximalCliquePivot(g, sr, make(set.Nodes).Intersect(p, nv), make(set.Nodes).Intersect(x, nv))
p.Remove(v)
x.Add(v)
}
}
func (*bronKerbosch) choosePivotFrom(g graph.Undirected, p, x set.Nodes) (neighbors []graph.Node) {
// TODO(kortschak): Investigate the impact of pivot choice that maximises
// |p ⋂ neighbours(u)| as a function of input size. Until then, leave as
// compile time option.
if !tomitaTanakaTakahashi {
for _, n := range p {
return g.From(n)
}
for _, n := range x {
return g.From(n)
}
panic("bronKerbosch: empty set")
}
var (
max = -1
pivot graph.Node
)
maxNeighbors := func(s set.Nodes) {
outer:
for _, u := range s {
nb := g.From(u)
c := len(nb)
if c <= max {
continue
}
for n := range nb {
if _, ok := p[int64(n)]; ok {
continue
}
c--
if c <= max {
continue outer
}
}
max = c
pivot = u
neighbors = nb
}
}
maxNeighbors(p)
maxNeighbors(x)
if pivot == nil {
panic("bronKerbosch: empty set")
}
return neighbors
} | graph/topo/bron_kerbosch.go | 0.604399 | 0.477432 | bron_kerbosch.go | starcoder |
package processor
import (
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/Jeffail/benthos/v3/lib/bloblang/x/field"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/x/docs"
"github.com/Jeffail/gabs/v2"
"github.com/opentracing/opentracing-go"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeJSON] = TypeSpec{
constructor: NewJSON,
Summary: `
DEPRECATED: This processor is now deprecated, and the new
[bloblang processor](/docs/components/processors/bloblang) should be used
instead.`,
Description: `
This processor is useful for applying high performance mutations on JSON data.
For more advanced mapping use cases take a look at the
` + "[`jmespath` processor](/docs/components/processors/jmespath)" + `, and also
the ` + "[`awk` processor](/docs/components/processors/awk)" + `.`,
Footnotes: `
## Operators
### ` + "`append`" + `
Appends a value to an array at a target dot path. If the path does not exist all
objects in the path are created (unless there is a collision).
If a non-array value already exists in the target path it will be replaced by an
array containing the original value as well as the new value.
If the value is an array the elements of the array are expanded into the new
array. E.g. if the target is an array ` + "`[0,1]`" + ` and the value is also an
array ` + "`[2,3]`" + `, the result will be ` + "`[0,1,2,3]`" + ` as opposed to
` + "`[0,1,[2,3]]`" + `.
### ` + "`clean`" + `
Walks the JSON structure and deletes any fields where the value is:
- An empty array
- An empty object
- An empty string
- null
### ` + "`copy`" + `
Copies the value of a target dot path (if it exists) to a location. The
destination path is specified in the ` + "`value`" + ` field. If the destination
path does not exist all objects in the path are created (unless there is a
collision).
### ` + "`delete`" + `
Removes a key identified by the dot path. If the path does not exist this is a
no-op.
### ` + "`explode`" + `
Explodes an array or object within a JSON document.
Exploding arrays results in a root level array containing elements matching the
original document, where the target field of each element is an element of the
exploded array.
Exploding objects results in a root level object where the keys match the target
object, and the values match the original document but with the target field
replaced by the exploded value.
It is then possible to expand the result to create individual messages per
element with the ` + "[`unarchive` processor](/docs/components/processors/unarchive) `json_array` or" + `
` + "`json_object` format." + `.
For example, given the following config:
` + "```yaml" + `
json:
operator: explode
path: value
` + "```" + `
And two input documents:
` + "```json" + `
{"id":1,"value":["foo","bar","baz"]}
{"id":1,"value":{"foo":2,"bar":[3,4],"baz":{"bev":5}}}
` + "```" + `
The respective results would be:
` + "```json" + `
[{"id":1,"value":"foo"},{"id":1,"value":"bar"},{"id":1,"value":"baz"}]
{"foo":{"id":1,"value":2},"bar":{"id":1,"value":[3,4]},"baz":{"id":1,"value":{"bev":5}}}
` + "```" + `
### ` + "`flatten`" + `
Flatten an array or object into an object of key/value pairs for each field,
where the key is the full path of the structured field in
[dot notation](/docs/configuration/field_paths).
E.g. given the input document:
` + "```json" + `
{"foo":[{"bar":"1"},{"bar":"2"}]}
` + "```" + `
Performing ` + "`flatten`" + ` on the root would create:
` + "```json" + `
{"foo.0.bar":"1","foo.1.bar":"2"}
` + "```" + `
### ` + "`flatten_array`" + `
Targets an array within the document and expands the contents of any elements
that are arrays into the target array.
E.g. given the input document:
` + "```json" + `
{"foo":[["first"],["second","third"]]}
` + "```" + `
Performing ` + "`flatten_array`" + ` on the field 'foo' would create:
` + "```json" + `
{"foo":["first","second","third"]}
` + "```" + `
### ` + "`fold_number_array`" + `
Targets an array within the document and attempts to fold the elements into a
single number. All elements must be a number.
### ` + "`fold_string_array`" + `
Targets an array within the document and attempts to fold the elements into a
single string. All elements must be a string.
If a string ` + "`value`" + ` is specified then concatenated strings will be
delimited with its contents.
### ` + "`move`" + `
Moves the value of a target dot path (if it exists) to a new location. The
destination path is specified in the ` + "`value`" + ` field. If the destination
path does not exist all objects in the path are created (unless there is a
collision).
### ` + "`select`" + `
Reads the value found at a dot path and replaces the original contents entirely
by the new value.
### ` + "`set`" + `
Sets the value of a field at a dot path. If the path does not exist all objects
in the path are created (unless there is a collision).
The value can be any type, including objects and arrays. When using YAML
configuration files a YAML object will be converted into a JSON object, i.e.
with the config:
` + "```yaml" + `
json:
operator: set
parts: [0]
path: some.path
value:
foo:
bar: 5
` + "```" + `
The value will be converted into '{"foo":{"bar":5}}'. If the YAML object
contains keys that aren't strings those fields will be ignored.
### ` + "`split`" + `
Splits a string field by a value and replaces the original string with an array
containing the results of the split. This operator requires both the path value
and the contents of the ` + "`value`" + ` field to be strings.`,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("operator", "The [operator](#operators) to apply to messages.").HasOptions(
"append", "clean", "copy", "delete", "explode", "flatten", "flatten_array", "fold_number_array",
"fold_string_array", "move", "select", "set", "split",
),
docs.FieldCommon("path", "A [dot path](/docs/configuration/field_paths) specifying the target within the document to the apply the chosen operator to.", "foo.bar", ".", "some_array.0.id"),
docs.FieldCommon(
"value",
"A value to use with the chosen operator (sometimes not applicable). This is a generic field that can be any type.",
"foo", "${!metadata:kafka_key}", false, 10,
map[string]interface{}{"topic": "${!metadata:kafka_topic}", "key": "${!metadata:kafka_key}"},
),
partsFieldSpec,
},
}
}
//------------------------------------------------------------------------------
type rawJSONValue []byte
func (r *rawJSONValue) UnmarshalJSON(bytes []byte) error {
*r = append((*r)[0:0], bytes...)
return nil
}
func (r rawJSONValue) MarshalJSON() ([]byte, error) {
if r == nil {
return []byte("null"), nil
}
return r, nil
}
func (r *rawJSONValue) UnmarshalYAML(unmarshal func(interface{}) error) error {
var yamlObj interface{}
if err := unmarshal(&yamlObj); err != nil {
return err
}
var convertMap func(m map[interface{}]interface{}) map[string]interface{}
var convertArray func(a []interface{})
convertMap = func(m map[interface{}]interface{}) map[string]interface{} {
newMap := map[string]interface{}{}
for k, v := range m {
keyStr, ok := k.(string)
if !ok {
continue
}
newVal := v
switch t := v.(type) {
case []interface{}:
convertArray(t)
case map[interface{}]interface{}:
newVal = convertMap(t)
}
newMap[keyStr] = newVal
}
return newMap
}
convertArray = func(a []interface{}) {
for i, v := range a {
newVal := v
switch t := v.(type) {
case []interface{}:
convertArray(t)
case map[interface{}]interface{}:
newVal = convertMap(t)
}
a[i] = newVal
}
}
switch t := yamlObj.(type) {
case []interface{}:
convertArray(t)
case map[interface{}]interface{}:
yamlObj = convertMap(t)
}
rawJSON, err := json.Marshal(yamlObj)
if err != nil {
return err
}
*r = append((*r)[0:0], rawJSON...)
return nil
}
func (r rawJSONValue) MarshalYAML() (interface{}, error) {
if r == nil {
return nil, nil
}
var val interface{}
if err := json.Unmarshal(r, &val); err != nil {
return nil, err
}
return val, nil
}
//------------------------------------------------------------------------------
// JSONConfig contains configuration fields for the JSON processor.
type JSONConfig struct {
Parts []int `json:"parts" yaml:"parts"`
Operator string `json:"operator" yaml:"operator"`
Path string `json:"path" yaml:"path"`
Value rawJSONValue `json:"value" yaml:"value"`
}
// NewJSONConfig returns a JSONConfig with default values.
func NewJSONConfig() JSONConfig {
return JSONConfig{
Parts: []int{},
Operator: "clean",
Path: "",
Value: rawJSONValue(`""`),
}
}
//------------------------------------------------------------------------------
type jsonOperator func(body interface{}, value json.RawMessage) (interface{}, error)
func newSetOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
if len(path) == 0 {
var data interface{}
if value != nil {
if err := json.Unmarshal([]byte(value), &data); err != nil {
return nil, fmt.Errorf("failed to parse value: %v", err)
}
}
return data, nil
}
gPart := gabs.Wrap(body)
var data interface{}
if value != nil {
if err := json.Unmarshal([]byte(value), &data); err != nil {
return nil, fmt.Errorf("failed to parse value: %v", err)
}
}
gPart.Set(data, path...)
return gPart.Data(), nil
}
}
func newMoveOperator(srcPath, destPath []string) (jsonOperator, error) {
if len(srcPath) == 0 && len(destPath) == 0 {
return nil, errors.New("an empty source and destination path is not valid for the move operator")
}
return func(body interface{}, value json.RawMessage) (interface{}, error) {
var gPart *gabs.Container
var gSrc interface{}
if len(srcPath) > 0 {
gPart = gabs.Wrap(body)
gSrc = gPart.S(srcPath...).Data()
gPart.Delete(srcPath...)
} else {
gPart = gabs.New()
gSrc = body
}
if gSrc == nil {
return nil, fmt.Errorf("item not found at path '%v'", strings.Join(srcPath, "."))
}
if len(destPath) == 0 {
return gSrc, nil
}
if _, err := gPart.Set(gSrc, destPath...); err != nil {
return nil, fmt.Errorf("failed to set destination path '%v': %v", strings.Join(destPath, "."), err)
}
return gPart.Data(), nil
}, nil
}
func newCopyOperator(srcPath, destPath []string) (jsonOperator, error) {
if len(srcPath) == 0 {
return nil, errors.New("an empty source path is not valid for the copy operator")
}
if len(destPath) == 0 {
return nil, errors.New("an empty destination path is not valid for the copy operator")
}
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gPart := gabs.Wrap(body)
gSrc := gPart.S(srcPath...).Data()
if gSrc == nil {
return nil, fmt.Errorf("item not found at path '%v'", strings.Join(srcPath, "."))
}
if _, err := gPart.Set(gSrc, destPath...); err != nil {
return nil, fmt.Errorf("failed to set destination path '%v': %v", strings.Join(destPath, "."), err)
}
return gPart.Data(), nil
}, nil
}
func newExplodeOperator(path []string) (jsonOperator, error) {
if len(path) == 0 {
return nil, errors.New("explode operator requires a target path")
}
return func(body interface{}, value json.RawMessage) (interface{}, error) {
target := gabs.Wrap(body).Search(path...)
switch t := target.Data().(type) {
case []interface{}:
result := make([]interface{}, len(t))
for i, ele := range t {
exploded, err := message.CopyJSON(body)
if err != nil {
return nil, fmt.Errorf("failed to clone root object to explode: %v", err)
}
gExploded := gabs.Wrap(exploded)
gExploded.Set(ele, path...)
result[i] = gExploded.Data()
}
return result, nil
case map[string]interface{}:
result := make(map[string]interface{})
for key, ele := range t {
exploded, err := message.CopyJSON(body)
if err != nil {
return nil, fmt.Errorf("failed to clone root object to explode: %v", err)
}
gExploded := gabs.Wrap(exploded)
gExploded.Set(ele, path...)
result[key] = gExploded.Data()
}
return result, nil
}
return nil, fmt.Errorf("target value was not an array or a map, found: %T", target.Data())
}, nil
}
func foldStringArray(children []*gabs.Container, value json.RawMessage) (string, error) {
var delim string
if value != nil {
json.Unmarshal(value, &delim)
}
var b strings.Builder
for i, child := range children {
switch t := child.Data().(type) {
case string:
if i > 0 && len(delim) > 0 {
b.WriteString(delim)
}
b.WriteString(t)
default:
return "", fmt.Errorf("mismatched types found in array, expected string, found: %T", t)
}
}
return b.String(), nil
}
func foldArrayArray(children []*gabs.Container) ([]interface{}, error) {
var b []interface{}
for _, child := range children {
switch t := child.Data().(type) {
case []interface{}:
b = append(b, t...)
default:
b = append(b, t)
}
}
return b, nil
}
func foldNumberArray(children []*gabs.Container) (float64, error) {
var b float64
for _, child := range children {
switch t := child.Data().(type) {
case int:
b = b + float64(t)
case int64:
b = b + float64(t)
case float64:
b = b + float64(t)
case json.Number:
f, err := t.Float64()
if err != nil {
i, _ := t.Int64()
f = float64(i)
}
b = b + f
default:
return 0, fmt.Errorf("mismatched types found in array, expected number, found: %T", t)
}
}
return b, nil
}
func newFlattenOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gPart := gabs.Wrap(body)
target := gPart
if len(path) > 0 {
target = gPart.Search(path...)
}
v, err := target.Flatten()
if err != nil {
return nil, err
}
gPart.Set(v, path...)
return gPart.Data(), nil
}
}
func newFlattenArrayOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gPart := gabs.Wrap(body)
target := gPart
if len(path) > 0 {
target = gPart.Search(path...)
}
if _, isArray := target.Data().([]interface{}); !isArray {
return nil, fmt.Errorf("non-array value found at path: %T", target.Data())
}
children := target.Children()
if len(children) == 0 {
return body, nil
}
v, err := foldArrayArray(children)
if err != nil {
return nil, err
}
gPart.Set(v, path...)
return gPart.Data(), nil
}
}
func newFoldNumberArrayOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gPart := gabs.Wrap(body)
target := gPart
if len(path) > 0 {
target = gPart.Search(path...)
}
if _, isArray := target.Data().([]interface{}); !isArray {
return nil, fmt.Errorf("non-array value found at path: %T", target.Data())
}
var v float64
var err error
children := target.Children()
if len(children) > 0 {
v, err = foldNumberArray(children)
}
if err != nil {
return nil, err
}
gPart.Set(v, path...)
return gPart.Data(), nil
}
}
func newFoldStringArrayOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gPart := gabs.Wrap(body)
target := gPart
if len(path) > 0 {
target = gPart.Search(path...)
}
if _, isArray := target.Data().([]interface{}); !isArray {
return nil, fmt.Errorf("non-array value found at path: %T", target.Data())
}
var v string
var err error
children := target.Children()
if len(children) > 0 {
v, err = foldStringArray(children, value)
}
if err != nil {
return nil, err
}
gPart.Set(v, path...)
return gPart.Data(), nil
}
}
func newSelectOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gPart := gabs.Wrap(body)
target := gPart
if len(path) > 0 {
target = gPart.Search(path...)
}
switch t := target.Data().(type) {
case string:
return rawJSONValue(t), nil
case json.Number:
return rawJSONValue(t.String()), nil
}
return target.Data(), nil
}
}
func newDeleteOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
if len(path) == 0 {
return nil, nil
}
gPart := gabs.Wrap(body)
if err := gPart.Delete(path...); err != nil {
return nil, err
}
return gPart.Data(), nil
}
}
func newCleanOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gRoot := gabs.Wrap(body)
var cleanValueFn func(g interface{}) interface{}
var cleanArrayFn func(g []interface{}) []interface{}
var cleanObjectFn func(g map[string]interface{}) map[string]interface{}
cleanValueFn = func(g interface{}) interface{} {
if g == nil {
return nil
}
switch t := g.(type) {
case map[string]interface{}:
if nv := cleanObjectFn(t); len(nv) > 0 {
return nv
}
return nil
case []interface{}:
if na := cleanArrayFn(t); len(na) > 0 {
return na
}
return nil
case string:
if len(t) > 0 {
return t
}
return nil
}
return g
}
cleanArrayFn = func(g []interface{}) []interface{} {
newArray := []interface{}{}
for _, v := range g {
if nv := cleanValueFn(v); nv != nil {
newArray = append(newArray, nv)
}
}
return newArray
}
cleanObjectFn = func(g map[string]interface{}) map[string]interface{} {
newObject := map[string]interface{}{}
for k, v := range g {
if nv := cleanValueFn(v); nv != nil {
newObject[k] = nv
}
}
return newObject
}
if val := cleanValueFn(gRoot.S(path...).Data()); val == nil {
if len(path) == 0 {
switch gRoot.Data().(type) {
case []interface{}:
return []interface{}{}, nil
case map[string]interface{}:
return map[string]interface{}{}, nil
}
return nil, nil
}
gRoot.Delete(path...)
} else {
gRoot.Set(val, path...)
}
return gRoot.Data(), nil
}
}
func newAppendOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gPart := gabs.Wrap(body)
var array []interface{}
var valueParsed interface{}
if value != nil {
if err := json.Unmarshal(value, &valueParsed); err != nil {
return nil, err
}
}
switch t := valueParsed.(type) {
case []interface{}:
array = t
default:
array = append(array, t)
}
if gTarget := gPart.S(path...); gTarget != nil {
switch t := gTarget.Data().(type) {
case []interface{}:
t = append(t, array...)
array = t
case nil:
array = append([]interface{}{t}, array...)
default:
array = append([]interface{}{t}, array...)
}
}
gPart.Set(array, path...)
return gPart.Data(), nil
}
}
func newSplitOperator(path []string) jsonOperator {
return func(body interface{}, value json.RawMessage) (interface{}, error) {
gPart := gabs.Wrap(body)
var valueParsed string
if value != nil {
if err := json.Unmarshal(value, &valueParsed); err != nil {
return nil, err
}
}
if len(valueParsed) == 0 {
return nil, errors.New("value field must be a non-empty string")
}
targetStr, ok := gPart.S(path...).Data().(string)
if !ok {
return nil, errors.New("path value must be a string")
}
var values []interface{}
for _, v := range strings.Split(targetStr, valueParsed) {
values = append(values, v)
}
gPart.Set(values, path...)
return gPart.Data(), nil
}
}
func getOperator(opStr string, path []string, value json.RawMessage) (jsonOperator, error) {
var destPath []string
if opStr == "move" || opStr == "copy" {
var destDotPath string
if err := json.Unmarshal(value, &destDotPath); err != nil {
return nil, fmt.Errorf("failed to parse destination path from value: %v", err)
}
if len(destDotPath) > 0 {
destPath = gabs.DotPathToSlice(destDotPath)
}
}
switch opStr {
case "set":
return newSetOperator(path), nil
case "flatten":
return newFlattenOperator(path), nil
case "flatten_array":
return newFlattenArrayOperator(path), nil
case "fold_number_array":
return newFoldNumberArrayOperator(path), nil
case "fold_string_array":
return newFoldStringArrayOperator(path), nil
case "select":
return newSelectOperator(path), nil
case "split":
return newSplitOperator(path), nil
case "copy":
return newCopyOperator(path, destPath)
case "move":
return newMoveOperator(path, destPath)
case "delete":
return newDeleteOperator(path), nil
case "append":
return newAppendOperator(path), nil
case "clean":
return newCleanOperator(path), nil
case "explode":
return newExplodeOperator(path)
}
return nil, fmt.Errorf("operator not recognised: %v", opStr)
}
//------------------------------------------------------------------------------
// JSON is a processor that performs an operation on a JSON payload.
type JSON struct {
parts []int
value field.Expression
operator jsonOperator
conf Config
log log.Modular
stats metrics.Type
mCount metrics.StatCounter
mErrJSONP metrics.StatCounter
mErrJSONS metrics.StatCounter
mErr metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewJSON returns a JSON processor.
func NewJSON(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
value, err := field.New(string(conf.JSON.Value))
if err != nil {
return nil, fmt.Errorf("failed to parse value expression: %v", err)
}
j := &JSON{
parts: conf.JSON.Parts,
conf: conf,
log: log,
stats: stats,
value: value,
mCount: stats.GetCounter("count"),
mErrJSONP: stats.GetCounter("error.json_parse"),
mErrJSONS: stats.GetCounter("error.json_set"),
mErr: stats.GetCounter("error"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}
splitPath := gabs.DotPathToSlice(conf.JSON.Path)
if len(conf.JSON.Path) == 0 || conf.JSON.Path == "." {
splitPath = []string{}
}
if j.operator, err = getOperator(conf.JSON.Operator, splitPath, json.RawMessage(j.value.Bytes(0, message.New(nil)))); err != nil {
return nil, err
}
return j, nil
}
//------------------------------------------------------------------------------
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (p *JSON) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
p.mCount.Incr(1)
newMsg := msg.Copy()
proc := func(index int, span opentracing.Span, part types.Part) error {
valueBytes := p.value.BytesEscapedLegacy(index, newMsg)
jsonPart, err := part.JSON()
if err == nil {
jsonPart, err = message.CopyJSON(jsonPart)
}
if err != nil {
p.mErrJSONP.Incr(1)
p.mErr.Incr(1)
p.log.Debugf("Failed to parse part into json: %v\n", err)
return err
}
var data interface{}
if data, err = p.operator(jsonPart, json.RawMessage(valueBytes)); err != nil {
p.mErr.Incr(1)
p.log.Debugf("Failed to apply operator: %v\n", err)
return err
}
switch t := data.(type) {
case rawJSONValue:
newMsg.Get(index).Set([]byte(t))
case []byte:
newMsg.Get(index).Set(t)
default:
if err = newMsg.Get(index).SetJSON(data); err != nil {
p.mErrJSONS.Incr(1)
p.mErr.Incr(1)
p.log.Debugf("Failed to convert json into part: %v\n", err)
return err
}
}
return nil
}
IteratePartsWithSpan(TypeJSON, p.parts, newMsg, proc)
msgs := [1]types.Message{newMsg}
p.mBatchSent.Incr(1)
p.mSent.Incr(int64(newMsg.Len()))
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (p *JSON) CloseAsync() {
}
// WaitForClose blocks until the processor has closed down.
func (p *JSON) WaitForClose(timeout time.Duration) error {
return nil
}
//------------------------------------------------------------------------------ | lib/processor/json.go | 0.811713 | 0.789153 | json.go | starcoder |
package bindata
// fmtInt formats v into the tail of buf.
// It returns the index where the output begins.
func fmtInt(buf []byte, v uint64) int {
w := len(buf)
if v == 0 {
w--
buf[w] = '0'
} else {
for v > 0 {
w--
buf[w] = byte(v%10) + '0'
v /= 10
}
}
return w
}
// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
// tail of buf, omitting trailing zeros. it omits the decimal
// point too when the fraction is 0. It returns the index where the
// output bytes begin and the value v/10**prec.
func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) {
// Omit trailing zeros up to and including decimal point.
w := len(buf)
print := false
for i := 0; i < prec; i++ {
digit := v % 10
print = print || digit != 0
if print {
w--
buf[w] = byte(digit) + '0'
}
v /= 10
}
if print {
w--
buf[w] = '.'
}
return w, v
}
const (
bit bits = 1
byte_ = 8 * bit
// https://en.wikipedia.org/wiki/Orders_of_magnitude_(data)
kilobyte = 1000 * byte_
megabyte = 1000 * kilobyte
gigabyte = 1000 * megabyte
terabyte = 1000 * gigabyte
petabyte = 1000 * terabyte
exabyte = 1000 * petabyte
)
// Bits represents a quantity of bits, bytes, kilobytes or megabytes. Bits are
// parsed and formatted using the IEEE / SI standards, which use multiples of
// 1000 to represent kilobytes and megabytes (instead of multiples of 1024). For
// more information see https://en.wikipedia.org/wiki/Megabyte#Definitions.
type bits int64
// Bytes returns the size as a floating point number of bytes.
func (b bits) Bytes() float64 {
bytes := b / byte_
bits := b % byte_
return float64(bytes) + float64(bits)/8
}
// Kilobytes returns the size as a floating point number of kilobytes.
func (b bits) Kilobytes() float64 {
bytes := b / kilobyte
bits := b % kilobyte
return float64(bytes) + float64(bits)/(8*1000)
}
// Megabytes returns the size as a floating point number of megabytes.
func (b bits) Megabytes() float64 {
bytes := b / megabyte
bits := b % megabyte
return float64(bytes) + float64(bits)/(8*1000*1000)
}
// Gigabytes returns the size as a floating point number of gigabytes.
func (b bits) Gigabytes() float64 {
bytes := b / gigabyte
bits := b % gigabyte
return float64(bytes) + float64(bits)/(8*1000*1000*1000)
}
// String returns a string representation of b using the largest unit that has a
// positive number before the decimal. At most three decimal places of precision
// are printed.
func (b bits) String() string {
if b == 0 {
return "0"
}
// Largest value is "-123.150EB"
var buf [10]byte
w := len(buf) - 1
u := uint64(b)
neg := b < 0
if neg {
u = -u
}
if u < uint64(byte_) {
w -= 2
copy(buf[w:], "bit")
w = fmtInt(buf[:w], u)
} else {
switch {
case u < uint64(kilobyte):
w -= 0
buf[w] = 'B'
u = (u * 1e3 / 8)
case u < uint64(megabyte):
w -= 1
copy(buf[w:], "kB")
u /= 8
case u < uint64(gigabyte):
w -= 1
copy(buf[w:], "MB")
u /= 8 * 1e3
case u < uint64(terabyte):
w -= 1
copy(buf[w:], "GB")
u /= 8 * 1e6
case u < uint64(petabyte):
w -= 1
copy(buf[w:], "TB")
u /= 8 * 1e9
case u < uint64(exabyte):
w -= 1
copy(buf[w:], "PB")
u /= 8 * 1e12
case u >= uint64(exabyte):
w -= 1
copy(buf[w:], "EB")
u /= 8 * 1e15
}
w, u = fmtFrac(buf[:w], u, 3)
w = fmtInt(buf[:w], u)
}
if neg {
w--
buf[w] = '-'
}
return string(buf[w:])
} | vendor/github.com/kevinburke/go-bindata/bits.go | 0.762778 | 0.536434 | bits.go | starcoder |
package pixelate
import (
"image"
"image/color"
"image/draw"
"runtime"
"hawx.me/code/img/utils"
)
// Triangle types for Pxl
type Triangle int
const (
// Decide base on closeness of colours in each quadrant
BOTH Triangle = iota
// Create only left triangles
LEFT
// Create only right triangles
RIGHT
)
func pxlWorker(img image.Image, bounds image.Rectangle, dest draw.Image,
size utils.Dimension, triangle Triangle, aliased bool, c chan int) {
ratio := float64(size.H) / float64(size.W)
inTop := func(x, y float64) bool {
return (y > ratio*x) && (y > ratio*-x)
}
inRight := func(x, y float64) bool {
return (y < ratio*x) && (y > ratio*-x)
}
inBottom := func(x, y float64) bool {
return (y < ratio*x) && (y < ratio*-x)
}
inLeft := func(x, y float64) bool {
return (y > ratio*x) && (y < ratio*-x)
}
to := []color.Color{}
ri := []color.Color{}
bo := []color.Color{}
le := []color.Color{}
for y := 0; y < bounds.Dy(); y++ {
for x := 0; x < bounds.Dx(); x++ {
realY := bounds.Min.Y + y
realX := bounds.Min.X + x
yOrigin := float64(y - size.H/2)
xOrigin := float64(x - size.W/2)
if inTop(xOrigin, yOrigin) {
to = append(to, img.At(realX, realY))
} else if inRight(xOrigin, yOrigin) {
ri = append(ri, img.At(realX, realY))
} else if inBottom(xOrigin, yOrigin) {
bo = append(bo, img.At(realX, realY))
} else if inLeft(xOrigin, yOrigin) {
le = append(le, img.At(realX, realY))
}
}
}
ato := utils.Average(to...)
ari := utils.Average(ri...)
abo := utils.Average(bo...)
ale := utils.Average(le...)
if (triangle != LEFT) && (triangle == RIGHT ||
utils.Closeness(ato, ari) > utils.Closeness(ato, ale)) {
topRight := utils.Average(ato, ari)
bottomLeft := utils.Average(abo, ale)
middle := utils.Average(topRight, bottomLeft)
for y := 0; y < bounds.Dy(); y++ {
for x := 0; x < bounds.Dx(); x++ {
realY := bounds.Min.Y + y
realX := bounds.Min.X + x
yOrigin := float64(y - size.H/2)
xOrigin := float64(x - size.W/2)
if yOrigin > ratio*xOrigin {
dest.Set(realX, realY, topRight)
} else if yOrigin == ratio*xOrigin && !aliased {
dest.Set(realX, realY, middle)
} else {
dest.Set(realX, realY, bottomLeft)
}
}
}
} else {
topLeft := utils.Average(ato, ale)
bottomRight := utils.Average(abo, ari)
middle := utils.Average(topLeft, bottomRight)
for y := 0; y < bounds.Dy(); y++ {
for x := 0; x < bounds.Dx(); x++ {
realY := bounds.Min.Y + y
realX := bounds.Min.X + x
yOrigin := float64(y - size.H/2)
xOrigin := float64(x - size.W/2)
// Do this one opposite to above so that the diagonals line up when
// aliased.
if yOrigin < ratio*-xOrigin {
dest.Set(realX, realY, bottomRight)
} else if yOrigin == ratio*-xOrigin && !aliased {
dest.Set(realX, realY, middle)
} else {
dest.Set(realX, realY, topLeft)
}
}
}
}
c <- 1
}
func doPxl(img image.Image, size utils.Dimension, triangle Triangle, style Style, aliased bool) image.Image {
nCPU := runtime.NumCPU()
runtime.GOMAXPROCS(nCPU)
var o draw.Image
b := img.Bounds()
c := make(chan int, nCPU)
i := 0 // number of workers created. there may be a better way...
switch style {
case CROPPED:
cols := b.Dx() / size.W
rows := b.Dy() / size.H
o = image.NewRGBA(image.Rect(0, 0, size.W*cols, size.H*rows))
for j, r := range utils.ChopRectangleToSizes(b, size.H, size.W, utils.IGNORE) {
go pxlWorker(img, r, o, size, triangle, aliased, c)
i = j
}
case FITTED:
o = image.NewRGBA(img.Bounds())
for j, r := range utils.ChopRectangleToSizes(img.Bounds(), size.H, size.W, utils.SEPARATE) {
go pxlWorker(img, r, o, size, triangle, aliased, c)
i = j
}
}
for j := 0; j < i; j++ {
<-c
}
return o
}
// Pxl pixelates an Image into right-angled triangles with the dimensions
// given. The triangle direction can be determined by passing the required value
// as triangle; either BOTH, LEFT or RIGHT.
func Pxl(img image.Image, size utils.Dimension, triangle Triangle, style Style) image.Image {
return doPxl(img, size, triangle, style, false)
}
// AliasedPxl does the same as Pxl, but does not smooth diagonal edges of the
// triangles. It is faster, but will produce bad results if size is non-square.
func AliasedPxl(img image.Image, size utils.Dimension, triangle Triangle, style Style) image.Image {
return doPxl(img, size, triangle, style, true)
} | pixelate/pxl.go | 0.75401 | 0.444444 | pxl.go | starcoder |
package is
import (
"fmt"
"github.com/corbym/gocrest"
"reflect"
"strings"
)
//ValueContaining finds if x is contained in y.
// Acts like "ContainsAll", all elements given must be present (or must match) in actual in the same order as the expected values.
// If "expected" is an array or slice, we assume that actual is the same type.
// assertThat([]T, has.ValueContaining(a,b,c)) is also valid if variadic a,b,c are all type T (or matchers of T).
// For maps, the expected must also be a map or a variadic of expected values (or value matchers) and matches when
// both maps contain all key,values in expected or all variadic values are equal (or matchers match) respectively.
// For string, behaves like strings.Contains.
// Will panic if types cannot be converted correctly.
//Returns the Matcher that returns true if found.
func ValueContaining(expected ...interface{}) *gocrest.Matcher {
match := new(gocrest.Matcher)
correctVariadicExpected := correctExpectedValue(expected...)
match.Describe = fmt.Sprintf("something that contains %v", descriptionFor(expected...))
match.Matches = func(actual interface{}) bool {
expectedAsStr, expectedOk := expected[0].(string)
actualAsStr, actualOk := actual.(string)
if expectedOk && actualOk {
return strings.Contains(actualAsStr, expectedAsStr)
}
actualValue := reflect.ValueOf(actual)
expectedValue := reflect.ValueOf(correctVariadicExpected)
switch actualValue.Kind() {
case reflect.Array, reflect.Slice:
return listContains(expectedValue, actualValue)
case reflect.Map:
if expectedValue.Kind() == reflect.Array || expectedValue.Kind() == reflect.Slice {
return mapContainsList(expectedValue, actualValue)
}
return mapContains(expectedValue, actualValue)
default:
panic("cannot determine type of variadic actual, " + actualValue.String())
}
}
return match
}
func mapContainsList(expected reflect.Value, mapValue reflect.Value) bool {
contains := make(map[interface{}]bool)
for i := 0; i < expected.Len(); i++ {
for _, key := range mapValue.MapKeys() {
itemValue := expected.Index(i).Interface()
typeMatcher, ok := itemValue.(*gocrest.Matcher)
actualValue := mapValue.MapIndex(key).Interface()
if ok {
if typeMatcher.Matches(actualValue) {
contains[itemValue] = true
}
} else {
if actualValue == itemValue {
contains[itemValue] = true
}
}
}
}
return len(contains) == expected.Len()
}
func mapContains(expected reflect.Value, actual reflect.Value) bool {
expectedKeys := expected.MapKeys()
contains := make(map[interface{}]bool)
for i := 0; i < len(expectedKeys); i++ {
val := actual.MapIndex(expectedKeys[i])
if val.IsValid() {
if val.Interface() == expected.MapIndex(expectedKeys[i]).Interface() {
contains[val] = true
}
}
}
return len(contains) == len(expected.MapKeys())
}
func listContains(expectedValue reflect.Value, actualValue reflect.Value) bool {
contains := make(map[interface{}]bool)
for i := 0; i < expectedValue.Len(); i++ {
for y := 0; y < actualValue.Len(); y++ {
exp := expectedValue.Index(i).Interface()
act := actualValue.Index(y).Interface()
typeMatcher, ok := exp.(*gocrest.Matcher)
if ok {
if typeMatcher.Matches(act) {
contains[act] = true
}
} else {
if exp == act {
contains[act] = true
}
}
}
}
return len(contains) == expectedValue.Len()
}
func correctExpectedValue(expected ...interface{}) interface{} {
kind := reflect.ValueOf(expected[0]).Kind()
if kind == reflect.Slice || kind == reflect.Map {
return expected[0]
}
return expected
}
func descriptionFor(expected ...interface{}) interface{} {
kind := reflect.ValueOf(expected[0]).Kind()
if kind == reflect.Slice || kind == reflect.Map {
return expected[0]
}
var description = ""
for x := 0; x < len(expected); x++ {
var matcher, ok = expected[x].(*gocrest.Matcher)
if ok {
description += matcher.Describe
} else {
description += fmt.Sprintf("%s", "<"+expected[x].(string)+">")
}
if x < len(expected)-1 {
description += " and "
}
}
return description
} | is/contains.go | 0.67662 | 0.684429 | contains.go | starcoder |
package dicom
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
)
// BulkDataReference describes the location of a contiguous sequence of bytes in a file
type BulkDataReference struct {
Reference ByteRegion
}
// ByteRegion is a contiguous sequence of bytes in a file described by an Offset and a length
type ByteRegion struct {
Offset int64
Length int64
}
// BulkDataBuffer represents a contiguous sequence of bytes buffered into memory from a file
type BulkDataBuffer interface {
DataElementValue
// Data returns a reference to the underlying data in the BulkDataBuffer. Implementations shall
// guarantee O(1) complexity.
Data() [][]byte
// Length returns the total length of this bulk data.
// Will *not* add a padding byte.
// May return UndefinedLength for encapsulated data. Always returns >= 0 with
// 0 indicating an empty buffer.
Length() int64
write(w io.Writer, syntax transferSyntax) error
}
// NewBulkDataBuffer returns a DataElementValue representing a raw sequence of bytes.
func NewBulkDataBuffer(b ...[]byte) BulkDataBuffer {
return bytesValue(b)
}
// NewEncapsulatedFormatBuffer returns a DataElementValue representing the encapsulated image
// format. The offset table is assumed to be the basic offset table fragment and fragments is
// assumed to be the remaining image fragments (excluding the basic offset table fragment). To
// specify no offsetTable, offsetTable can be set to an empty slice.
func NewEncapsulatedFormatBuffer(offsetTable []byte, fragments ...[]byte) BulkDataBuffer {
buff := [][]byte{offsetTable}
for _, fragment := range fragments {
buff = append(buff, fragment)
}
return encapsulatedFormatBuffer(buff)
}
type bytesValue [][]byte
func (b bytesValue) write(w io.Writer, syntax transferSyntax) error {
totalLength := b.Length()
idx := 0
return writeByteFragments(w, func() (io.Reader, error) {
if idx >= len(b) {
return nil, io.EOF
}
r := bytes.NewReader(b[idx])
if idx == len(b)-1 && totalLength%2 != 0 {
// To achieve even length, append trailing null byte to the last fragment.
r = bytes.NewReader(append(b[idx], 0x00))
}
idx++
return r, nil
})
}
func (b bytesValue) Data() [][]byte {
return b
}
func (b bytesValue) Length() int64 {
totalLength := 0
for _, fragment := range b {
totalLength += len(fragment)
}
return int64(totalLength)
}
type encapsulatedFormatBuffer [][]byte
func (b encapsulatedFormatBuffer) write(w io.Writer, syntax transferSyntax) error {
idx := 0
return writeEncapsulatedFormat(w, syntax.byteOrder(), func() (io.Reader, error) {
if idx >= len(b) {
return nil, io.EOF
}
r := bytes.NewReader(b[idx])
if len(b[idx])%2 != 0 {
r = bytes.NewReader(append(b[idx], 0x00))
}
idx++
return r, nil
})
}
func (b encapsulatedFormatBuffer) Data() [][]byte {
return b
}
func (b encapsulatedFormatBuffer) Length() int64 {
return UndefinedLength
}
// BulkDataReader represents a streamable contiguous sequence of bytes within a file
type BulkDataReader struct {
io.Reader
// Offset is the number of bytes in the file preceding the bulk data described
// by the BulkDataReader
Offset int64
}
// Close discards all bytes in the reader
func (r *BulkDataReader) Close() error {
_, err := io.Copy(ioutil.Discard, r)
return err
}
// BulkDataIterator represents a sequence of BulkDataReaders.
type BulkDataIterator interface {
// Next returns the next BulkDataReader in the iterator and discards all bytes from all previous
// BulkDataReaders returned from Next. If there are no remaining BulkDataReader in the iterator,
// the error io.EOF is returned
Next() (*BulkDataReader, error)
// Close discards all remaining BulkDataReaders in the iterator. Any previously returned
// BulkDataReaders from calls to Next are also emptied.
Close() error
// ToBuffer converts the BulkDataIterator into its equivalent BulkDataBuffer. Behaviour after
// Next has been called is undefined. This will close the iterator.
ToBuffer() (BulkDataBuffer, error)
// Length returns the total length of this BulkData.
// Will *not* add a padding byte.
// May return -1 if length is unknown or UndefinedLength for encapsulate
// data. Required to be >= 0 when Constructing a DataSet with a
// BulkDataIterator. See specific implementations for how to specify
// explicit length.
Length() int64
write(w io.Writer, syntax transferSyntax) error
}
// NewEncapsulatedFormatIterator returns an iterator over byte fragments. r must read the ValueField
// of a DataElement in the encapsulated format as described in the DICOM standard part5 linked
// below. offset is the number of bytes preceding the ValueField in the DICOM file.
// http://dicom.nema.org/medical/dicom/current/output/html/part05.html#sect_A.4
func NewEncapsulatedFormatIterator(r io.Reader, offset int64) BulkDataIterator {
dr := &dcmReader{cr: &countReader{r: r, bytesRead: offset}}
return &encapsulatedFormatIterator{dr, nil, false}
}
// NewBulkDataIterator returns a BulkDataIterator with a single BulkDataReader
// described by r and offset. Offset can safely be set to 0 with the
// understanding that the BulkDataReaders won't have the proper offset set.
func NewBulkDataIterator(r io.Reader, offset int64) BulkDataIterator {
cr := &countReader{r: r, bytesRead: offset}
return &oneShotIterator{cr: cr, empty: false, length: -1}
}
// NewBulkDataIteratorWithLength returns a BulkDataIterator with an explicit
// length. A length is required when Constructing a DataSet and native bulk
// data must write out an explicit length before the bulk data.
func NewBulkDataIteratorWithLength(r io.Reader, offset, length int64) BulkDataIterator {
cr := &countReader{r: r, bytesRead: offset}
return &oneShotIterator{cr: cr, empty: false, length: length}
}
// oneShotIterator is a BulkDataIterator that contains exactly one BulkDataReader
type oneShotIterator struct {
cr *countReader
empty bool
// The length of the underlying data. Might not be available after parsing
// but needs to be present to be able to Construct a DataSet without buffering
// the whole reader into memory.
length int64
}
func (it *oneShotIterator) Next() (*BulkDataReader, error) {
if it.empty {
return nil, io.EOF
}
it.empty = true
return &BulkDataReader{it.cr, it.cr.bytesRead}, nil
}
func (it *oneShotIterator) Close() error {
if _, err := io.Copy(ioutil.Discard, it.cr); err != nil {
return fmt.Errorf("closing bulk data: %v", err)
}
it.empty = true
return nil
}
func (it *oneShotIterator) ToBuffer() (BulkDataBuffer, error) {
b, err := ioutil.ReadAll(it.cr)
if err != nil {
return nil, fmt.Errorf("collecting fragments into memory: %v", err)
}
return NewBulkDataBuffer(b), nil
}
func (it *oneShotIterator) Length() int64 {
return it.length
}
func (it *oneShotIterator) write(w io.Writer, syntax transferSyntax) error {
return writeByteFragments(w, func() (io.Reader, error) {
return it.Next()
})
}
// encapsulatedFormatIterator represents image pixel data (7FE0,0010) in encapsulated format as
// described in http://dicom.nema.org/medical/dicom/current/output/html/part05.html#sect_A.4.
type encapsulatedFormatIterator struct {
dr *dcmReader
currentReader *BulkDataReader
empty bool
}
// Next returns the next fragment of the pixel data. The first return from Next will be the
// Basic Offset Table if present or an empty BulkDataReader otherwise. When Next is called,
// any previously returned BulkDataReaders from previous calls to Next will be emptied. When there
// are no remaining fragments in the iterator, the error io.EOF is returned.
func (it *encapsulatedFormatIterator) Next() (*BulkDataReader, error) {
if it.empty {
return nil, io.EOF
}
if it.currentReader != nil {
if err := it.currentReader.Close(); err != nil {
return nil, err
}
}
tag, err := processItemTag(it.dr, binary.LittleEndian)
if err != nil {
return nil, fmt.Errorf("reading tag in encapsulated format fragment: %v", err)
}
if tag == SequenceDelimitationItemTag {
return nil, it.terminate()
}
length, err := it.dr.UInt32(binary.LittleEndian)
if err != nil {
return nil, err
}
if length >= UndefinedLength {
return nil, fmt.Errorf("expected fragment to be of explicit length")
}
currentReaderBytes := limitCountReader(it.dr.cr, int64(length))
it.currentReader = &BulkDataReader{currentReaderBytes, currentReaderBytes.bytesRead}
return it.currentReader, nil
}
// Close discards all fragments in the iterator
func (it *encapsulatedFormatIterator) Close() error {
for r, err := it.Next(); err != io.EOF; r, err = it.Next() {
if err != nil {
return fmt.Errorf("reading next reader: %v", err)
}
if err := r.Close(); err != nil {
return fmt.Errorf("discarding reader on Close: %v", err)
}
}
return nil
}
func (it *encapsulatedFormatIterator) ToBuffer() (BulkDataBuffer, error) {
fragments, err := CollectFragments(it)
if err != nil {
return nil, fmt.Errorf("collecting fragments of encapsulated format: %v", err)
}
return encapsulatedFormatBuffer(fragments), nil
}
func (it *encapsulatedFormatIterator) Length() int64 {
return UndefinedLength
}
func (it *encapsulatedFormatIterator) write(w io.Writer, syntax transferSyntax) error {
return writeEncapsulatedFormat(w, syntax.byteOrder(), func() (io.Reader, error) {
return it.Next()
})
}
func (it *encapsulatedFormatIterator) terminate() error {
_, err := it.dr.UInt32(binary.LittleEndian)
if err != nil {
return fmt.Errorf("reading 32 bit length of sequence delimitation item: %v", err)
}
it.empty = true
return io.EOF
}
// writeByteFragments writes the concatenated byte fragments in the fragmentProvider to w
func writeByteFragments(w io.Writer, fragmentProvider func() (io.Reader, error)) error {
for fragment, err := fragmentProvider(); err != io.EOF; fragment, err = fragmentProvider() {
if err != nil {
return fmt.Errorf("retrieving next fragment: %v", err)
}
if _, err := io.Copy(w, fragment); err != nil {
return fmt.Errorf("writing fragment: %v", err)
}
}
return nil
}
// writeEncapsulatedFormat writes the byte fragments in the BulkDataIterator in the encapsulated
// format. The first fragment provided by fragmentProvider is assumed to be the basic offset table.
func writeEncapsulatedFormat(w io.Writer, order binary.ByteOrder, fragmentProvider func() (io.Reader, error)) error {
dw := &dcmWriter{w}
for fragment, err := fragmentProvider(); err != io.EOF; fragment, err = fragmentProvider() {
if err != nil {
return err
}
if err := dw.Tag(order, ItemTag); err != nil {
return fmt.Errorf("writing fragment tag: %v", err)
}
// TODO provide way of stream writing the fragments without buffering
buff, err := ioutil.ReadAll(fragment)
if err != nil {
return fmt.Errorf("buffering fragment: %v", err)
}
if err := dw.UInt32(order, uint32(len(buff))); err != nil {
return fmt.Errorf("writing fragment length: %v", err)
}
if err := dw.Bytes(buff); err != nil {
return fmt.Errorf("writing fragment: %v", err)
}
}
if err := dw.Tag(order, SequenceDelimitationItemTag); err != nil {
return fmt.Errorf("writing fragment delimitation tag: %v", err)
}
if err := dw.UInt32(order, 0); err != nil {
return fmt.Errorf("writing delimiter length: %v", err)
}
return nil
} | dicom/bulkdata.go | 0.857932 | 0.458834 | bulkdata.go | starcoder |
package ast
import (
"fmt"
"github.com/gogo/protobuf/proto"
"github.com/katydid/katydid/relapse/token"
"github.com/katydid/katydid/relapse/types"
"strconv"
"strings"
"unicode/utf8"
)
//NewKeyword is a parser utility function that returns a Keyword given a space and a token.
func NewKeyword(space interface{}, v interface{}) *Keyword {
t := v.(*token.Token)
k := &Keyword{
Value: string(t.Lit),
}
if space != nil {
k.Before = space.(*Space)
}
return k
}
//NewSpace is a parser utility function that returns a Space given a token
func NewSpace(s interface{}) *Space {
t := s.(*token.Token)
return &Space{Space: []string{string(t.Lit)}}
}
//NewAppendSpace is a parser utility function that returns a Space by append the given string to the given Space's Space field, which is a list of strings.
func AppendSpace(ss interface{}, s string) *Space {
space := ss.(*Space)
space.Space = append(space.Space, s)
return space
}
//SetTerminalSpace is a parser utility function that takes a Terminal and a Space and places the Space inside the returned Terminal.
func SetTerminalSpace(term interface{}, s interface{}) *Terminal {
terminal := term.(*Terminal)
terminal.Before = s.(*Space)
return terminal
}
//SetRightArrow is a parser utitliy function that takes an Expression and a RightArrow and places the RightArrow inside the returned Expression.
func SetRightArrow(expr interface{}, rightArrow interface{}) *Expr {
e := expr.(*Expr)
e.RightArrow = rightArrow.(*Keyword)
return e
}
//SetExpComma is a parser utility function that takes an expression and a comma Keyword and places the comma inside the returned Expr.
func SetExprComma(e interface{}, c interface{}) *Expr {
expr := e.(*Expr)
expr.Comma = c.(*Keyword)
return expr
}
//Strip is a parser utility function that removes all versions of the given sub string from the slit string and also removes possible surrounding parentheses.
func Strip(slit string, sub string) []byte {
slit = strings.Replace(slit, sub, "", -1)
if slit[0] != '(' {
return []byte(slit)
}
return []byte(slit[1 : len(slit)-1])
}
//NewVariableTerminal is a parser utility function that returns a Terminal given a type.
func NewVariableTerminal(typ types.Type) (*Terminal, error) {
return &Terminal{Variable: &Variable{Type: typ}}, nil
}
//NewBoolTerminal is a parser utility function that returns a Terminal of type bool given a bool.
func NewBoolTerminal(v interface{}) *Terminal {
b := v.(bool)
if b {
return &Terminal{BoolValue: proto.Bool(b), Literal: "true"}
}
return &Terminal{BoolValue: proto.Bool(b), Literal: "false"}
}
//NewStringTerminal is a parser utility function that returns a Terminal of type string given a string literal.
//The input string is also unquoted.
func NewStringTerminal(slit string) (*Terminal, error) {
return &Terminal{StringValue: proto.String(ToString(slit)), Literal: slit}, nil
}
//ToString unquotes a quoted string or returns the original string.
func ToString(s1 string) string {
s, err := strconv.Unquote(s1)
if err != nil {
return s1
}
return s
}
//NewIntTerminal is a parser utility function that parses the int value out of the input string and returns a Terminal of type int.
func NewIntTerminal(slit string) (*Terminal, error) {
return &Terminal{IntValue: ToInt64(Strip(slit, "int")), Literal: slit}, nil
}
//ToInt64 is a parser utility function that returns a pointer to a parsed an int64 or panics.
func ToInt64(tok []byte) *int64 {
i, err := strconv.ParseInt(string(tok), 10, 64)
if err != nil {
panic(err)
}
return &i
}
//NewUintTerminal is a parser utility function that parses the uint value out of the input string and returns a Terminal of type uint.
func NewUintTerminal(slit string) (*Terminal, error) {
return &Terminal{UintValue: ToUint64(Strip(slit, "uint")), Literal: slit}, nil
}
//ToUint64 is a parser utility function that returns a pointer to a parsed an uint64 or panics.
func ToUint64(tok []byte) *uint64 {
i, err := strconv.ParseUint(string(tok), 10, 64)
if err != nil {
panic(err)
}
return &i
}
//NewDoubleTerminal is a parser utility function that parses the double value out of the input string and returns a Terminal of type double.
func NewDoubleTerminal(slit string) (*Terminal, error) {
return &Terminal{DoubleValue: ToFloat64(Strip(slit, "double")), Literal: slit}, nil
}
//ToFloat64 is a parser utility function that returns a pointer to a parsed an float64 or panics.
func ToFloat64(tok []byte) *float64 {
f, err := strconv.ParseFloat(string(tok), 64)
if err != nil {
panic(err)
}
return &f
}
//NewBytesTerminal is a parser utility function that parses the []byte value out of the input string and returns a Terminal of type []byte.
func NewBytesTerminal(stringLit string) (*Terminal, error) {
data, err := parseBytes(stringLit)
if err != nil {
return nil, err
}
return &Terminal{BytesValue: data, Literal: stringLit}, nil
}
func parseBytes(s string) ([]byte, error) {
byteElems := strings.Split(s[strings.Index(s, "{")+1:strings.LastIndex(s, "}")], ",")
data := make([]byte, 0, len(byteElems))
for _, b := range byteElems {
s := strings.TrimSpace(b)
if len(s) == 0 {
continue
}
d, err := parseByte(s)
if err != nil {
return nil, err
}
data = append(data, d)
}
return data, nil
}
func hexToByte(c byte) byte {
if 'a' <= c && c <= 'f' {
return c - 'a' + 10
}
if 'A' <= c && c <= 'F' {
return c - 'A' + 10
}
return c - '0'
}
func hexesToByte(a byte, b byte) byte {
aa, bb := hexToByte(a), hexToByte(b)
return byte(aa*16 + bb)
}
func parseByte(s string) (byte, error) {
if s[0] == '\'' {
r, _ := utf8.DecodeRune([]byte(s)[1:])
if r <= 255 {
return byte(r), nil
}
return 0, fmt.Errorf("rune too large %v", r)
} else if s[0] == '0' {
if len(s) == 1 {
return 0, nil
}
if s[1] == 'x' || s[1] == 'X' {
if len(s) == 4 {
return hexesToByte(s[2], s[3]), nil
} else if len(s) == 3 {
return hexToByte(s[2]), nil
}
return 0, fmt.Errorf("not a hex digit %v", s)
} else {
switch len(s) {
case 4:
o := (int(s[1]-'0') * 64) + (int(s[2]-'0') * 8) + int(s[3]-'0')
if o >= 255 {
return 0, fmt.Errorf("octal too large %d", o)
}
return byte(o), nil
case 3:
return byte((s[1]-'0')*8 + (s[2] - '0')), nil
case 2:
return byte(s[1] - '0'), nil
}
return 0, nil
}
}
i, err := strconv.Atoi(s)
if err != nil {
return 0, err
}
if i >= 0 && i <= 255 {
return byte(i), nil
}
return 0, fmt.Errorf("int too large %d", i)
}
//NewSDTName is a parser utility function that returns a NameExpr given a white space and a terminal value expression.
func NewSDTName(space *Space, term *Terminal) *NameExpr {
name := &NameExpr{
Name: &Name{
Before: space,
},
}
if term.DoubleValue != nil {
name.Name.DoubleValue = term.DoubleValue
} else if term.IntValue != nil {
name.Name.IntValue = term.IntValue
} else if term.UintValue != nil {
name.Name.UintValue = term.UintValue
} else if term.BoolValue != nil {
name.Name.BoolValue = term.BoolValue
} else if term.StringValue != nil {
name.Name.StringValue = term.StringValue
} else if term.BytesValue != nil {
name.Name.BytesValue = term.BytesValue
} else {
panic(fmt.Sprintf("unreachable name type %#v", term))
}
return name
} | relapse/ast/util.go | 0.704364 | 0.477676 | util.go | starcoder |
package gojacego
type optimizer struct {
executor interpreter
}
func (this *optimizer) optimize(op operation, functionRegistry *functionRegistry, constantRegistry *constantRegistry) operation {
return optimize(this.executor, op, functionRegistry, constantRegistry)
}
func optimize(executor interpreter, op operation, functionRegistry *functionRegistry, constantRegistry *constantRegistry) operation {
if _, b := op.(*constantOperation); !op.OperationMetadata().DependsOnVariables && op.OperationMetadata().IsIdempotent && !b {
result, _ := executor.execute(op, nil, functionRegistry, constantRegistry)
return newConstantOperation(floatingPoint, result)
} else {
if cop, ok := op.(*addOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
} else if cop, ok := op.(*subtractionOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
} else if cop, ok := op.(*multiplicationOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop1, ok1 := cop.OperationOne.(*constantOperation)
if ok1 {
if cop1.Metadata.DataType == floatingPoint && cop1.Value == 0.0 {
return newConstantOperation(floatingPoint, 0.0)
} else {
if toFloat64Panic(cop1.Value) == 0.0 {
return newConstantOperation(floatingPoint, 0.0)
}
}
}
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
cop2, ok2 := cop.OperationTwo.(*constantOperation)
if ok2 {
if cop2.Metadata.DataType == floatingPoint && cop2.Value == 0.0 {
return newConstantOperation(floatingPoint, 0.0)
} else {
if toFloat64Panic(cop2.Value) == 0.0 {
return newConstantOperation(floatingPoint, 0.0)
}
}
}
} else if cop, ok := op.(*divisorOperation); ok {
cop.Dividend = optimize(executor, cop.Dividend, functionRegistry, constantRegistry)
cop.Divisor = optimize(executor, cop.Divisor, functionRegistry, constantRegistry)
} else if cop, ok := op.(*exponentiationOperation); ok {
cop.Base = optimize(executor, cop.Base, functionRegistry, constantRegistry)
cop.Exponent = optimize(executor, cop.Exponent, functionRegistry, constantRegistry)
} else if cop, ok := op.(*greaterThanOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
} else if cop, ok := op.(*greaterOrEqualThanOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
} else if cop, ok := op.(*andOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop1, ok1 := cop.OperationOne.(*constantOperation)
if ok1 {
if cop1.Metadata.DataType == floatingPoint && cop1.Value == 0.0 {
return newConstantOperation(floatingPoint, 0.0)
} else {
if toFloat64Panic(cop1.Value) == 0.0 {
return newConstantOperation(floatingPoint, 0.0)
}
}
}
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
cop2, ok2 := cop.OperationTwo.(*constantOperation)
if ok2 {
if cop2.Metadata.DataType == floatingPoint && cop2.Value == 0.0 {
return newConstantOperation(floatingPoint, 0.0)
} else {
if toFloat64Panic(cop2.Value) == 0.0 {
return newConstantOperation(floatingPoint, 0.0)
}
}
}
} else if cop, ok := op.(*orOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop1, ok1 := cop.OperationOne.(*constantOperation)
if ok1 {
if cop1.Metadata.DataType == floatingPoint && cop1.Value == 1.0 {
return newConstantOperation(floatingPoint, 1.0)
} else {
if toFloat64Panic(cop1.Value) == 1.0 {
return newConstantOperation(floatingPoint, 1.0)
}
}
}
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
cop2, ok2 := cop.OperationTwo.(*constantOperation)
if ok2 {
if cop2.Metadata.DataType == floatingPoint && cop2.Value == 1.0 {
return newConstantOperation(floatingPoint, 1.0)
} else {
if toFloat64Panic(cop2.Value) == 1.0 {
return newConstantOperation(floatingPoint, 1.0)
}
}
}
} else if cop, ok := op.(*lessThanOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
} else if cop, ok := op.(*lessOrEqualThanOperation); ok {
cop.OperationOne = optimize(executor, cop.OperationOne, functionRegistry, constantRegistry)
cop.OperationTwo = optimize(executor, cop.OperationTwo, functionRegistry, constantRegistry)
} else if cop, ok := op.(*functionOperation); ok {
optimizedArguments := make([]operation, len(cop.Arguments))
for idx, arg := range cop.Arguments {
ret := optimize(executor, arg, functionRegistry, constantRegistry)
optimizedArguments[idx] = ret
}
cop.Arguments = optimizedArguments
}
return op
}
} | optimizer.go | 0.665193 | 0.417331 | optimizer.go | starcoder |
package tableimage
import (
"image"
"image/draw"
"golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
func (ti *TableImage) setRgba() {
img := image.NewRGBA(image.Rect(0, 0, ti.width, ti.height))
//set image background
draw.Draw(img, img.Bounds(), &image.Uniform{getColorByHex(ti.backgroundColor)}, image.ZP, draw.Src)
ti.img = img
}
func (ti *TableImage) addString(x, y int, label string, color string) {
point := fixed.Point26_6{X: fixed.Int26_6(x * 64), Y: fixed.Int26_6(y * 64)}
d := &font.Drawer{
Dst: ti.img,
Src: image.NewUniform(getColorByHex(color)),
Face: ti.firacode, //basicfont.Face7x13, inconsolata.Bold8x16
Dot: point,
}
d.DrawString(label)
}
//Thx to https://github.com/StephaneBunel/bresenham
func (ti *TableImage) addLine(x1, y1, x2, y2 int, color string) {
var dx, dy, e, slope int
col := getColorByHex(color)
// Because drawing p1 -> p2 is equivalent to draw p2 -> p1,
// I sort points in x-axis order to handle only half of possible cases.
if x1 > x2 {
x1, y1, x2, y2 = x2, y2, x1, y1
}
dx, dy = x2-x1, y2-y1
// Because point is x-axis ordered, dx cannot be negative
if dy < 0 {
dy = -dy
}
switch {
// Is line a point ?
case x1 == x2 && y1 == y2:
ti.img.Set(x1, y1, col)
// Is line an horizontal ?
case y1 == y2:
for ; dx != 0; dx-- {
ti.img.Set(x1, y1, col)
x1++
}
ti.img.Set(x1, y1, col)
// Is line a vertical ?
case x1 == x2:
if y1 > y2 {
y1, y2 = y2, y1
}
for ; dy != 0; dy-- {
ti.img.Set(x1, y1, col)
y1++
}
ti.img.Set(x1, y1, col)
// Is line a diagonal ?
case dx == dy:
if y1 < y2 {
for ; dx != 0; dx-- {
ti.img.Set(x1, y1, col)
x1++
y1++
}
} else {
for ; dx != 0; dx-- {
ti.img.Set(x1, y1, col)
x1++
y1--
}
}
ti.img.Set(x1, y1, col)
// wider than high ?
case dx > dy:
if y1 < y2 {
dy, e, slope = 2*dy, dx, 2*dx
for ; dx != 0; dx-- {
ti.img.Set(x1, y1, col)
x1++
e -= dy
if e < 0 {
y1++
e += slope
}
}
} else {
dy, e, slope = 2*dy, dx, 2*dx
for ; dx != 0; dx-- {
ti.img.Set(x1, y1, col)
x1++
e -= dy
if e < 0 {
y1--
e += slope
}
}
}
ti.img.Set(x2, y2, col)
// higher than wide.
default:
if y1 < y2 {
dx, e, slope = 2*dx, dy, 2*dy
for ; dy != 0; dy-- {
ti.img.Set(x1, y1, col)
y1++
e -= dx
if e < 0 {
x1++
e += slope
}
}
} else {
dx, e, slope = 2*dx, dy, 2*dy
for ; dy != 0; dy-- {
ti.img.Set(x1, y1, col)
y1--
e -= dx
if e < 0 {
x1++
e += slope
}
}
}
ti.img.Set(x2, y2, col)
}
} | table-image/basic.go | 0.542136 | 0.447279 | basic.go | starcoder |
package conformance
import "reflect"
// MutateFields modifies the value of each field of structs recursively.
func MutateFields(x interface{}) {
mut(reflect.TypeOf(x), reflect.ValueOf(x))
}
func mut(t reflect.Type, v reflect.Value) {
switch t.Kind() {
case reflect.String:
v.SetString("blip blop")
case reflect.Uint, reflect.Uint64:
v.SetUint(v.Uint() + 1)
case reflect.Uint8:
v.SetUint(uint64(v.Uint() + 1))
case reflect.Uint16:
v.SetUint(uint64(v.Uint() + 1))
case reflect.Uint32:
v.SetUint(uint64(v.Uint() + 1))
case reflect.Int, reflect.Int64:
v.SetInt(v.Int() + 1)
case reflect.Int8:
v.SetInt(int64(v.Int() + 1))
case reflect.Int16:
v.SetInt(int64(v.Int() + 1))
case reflect.Int32:
v.SetInt(int64(v.Int() + 1))
case reflect.Float32, reflect.Float64:
if x := v.Float(); x != 0.0 && x != -0.0 {
v.SetFloat(-1 * x)
return
}
v.SetFloat(1.0)
case reflect.Bool:
v.SetBool(!v.Bool())
case reflect.Ptr:
if v.IsNil() {
return
}
mut(t.Elem(), v.Elem())
case reflect.Array, reflect.Slice:
n := v.Len()
elemT := t.Elem()
for i := 0; i < n; i++ {
mut(elemT, v.Index(i))
}
if n == 0 {
n = 3
for i := 0; i < n; i++ {
nv := reflect.New(elemT)
mut(elemT, nv.Elem())
v.Set(reflect.Append(reflect.Indirect(v), reflect.Indirect(nv)))
}
}
case reflect.Struct:
for i, n := 0, t.NumField(); i < n; i++ {
elementT := t.Field(i)
elementV := v.Field(i)
if elementV.CanSet() || elementT.Anonymous {
mut(elementT.Type, elementV)
}
}
case reflect.Map:
m := reflect.MakeMap(reflect.MapOf(t.Key(), t.Elem()))
for _, mapKey := range v.MapKeys() {
mapIndex := reflect.New(t.Key()).Elem()
mapIndex.Set(mapKey)
mut(t.Key(), mapIndex)
mapValue := reflect.New(t.Elem()).Elem()
mapValue.Set(v.MapIndex(mapKey))
mut(t.Elem(), mapValue)
m.SetMapIndex(mapIndex, mapValue)
}
v.Set(m)
}
return
}
// VisitWithPredicate deep-visits the given struct
// and returns whether the predicate holds at least once.
func VisitWithPredicate(x interface{}, f func(w interface{}) bool) bool {
return vwp(f, false, reflect.TypeOf(x), reflect.ValueOf(x))
}
func vwp(f func(w interface{}) bool, acc bool, t reflect.Type, v reflect.Value) bool {
switch t.Kind() {
case reflect.String:
return acc || f(v.String())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return acc || f(v.Uint())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return acc || f(v.Int())
case reflect.Float32, reflect.Float64:
return acc || f(v.Float())
case reflect.Bool:
return acc || f(v.Bool())
case reflect.Ptr:
if v.IsNil() {
return acc
}
return acc || vwp(f, acc, t.Elem(), v.Elem())
case reflect.Array, reflect.Slice:
for i, n, elemT := 0, v.Len(), t.Elem(); i < n; i++ {
if acc || vwp(f, acc, elemT, v.Index(i)) {
return true
}
}
case reflect.Struct:
for i, n := 0, t.NumField(); i < n; i++ {
elementT := t.Field(i)
elementV := v.Field(i)
if elementV.CanSet() || elementT.Anonymous {
if acc || vwp(f, acc, elementT.Type, elementV) {
return true
}
}
}
case reflect.Map:
iter := v.MapRange()
for iter.Next() {
if acc || VisitWithPredicate(iter.Key().Interface(), f) || VisitWithPredicate(iter.Value().Interface(), f) {
return true
}
}
}
return acc
} | conformance/internal/conformance/mutate_fields.go | 0.619011 | 0.488649 | mutate_fields.go | starcoder |
package treepalette
import (
"fmt"
)
// Color express A color as A n-dimensional point in the RGBA space for usage in the kd-tree search algorithm.
// This supports both RGBA and RGB(no alpha) spaces since latter would reduce computing for cases where transparency is not important.
type Color interface {
// Dimensions returns the total number of dimensions(3 for RGB, 4 for RGBA).
Dimensions() int
// Dimension returns the value of the i-th dimension, say R,G,B and/or A.
Dimension(i int) uint32
}
// PaletteColor is A Color inside an indexed color palette.
type PaletteColor interface {
Color
// Index returns palette index of the color
Index() int
}
// ColorRGBA Example Color implementation.
type ColorRGBA struct {
R, G, B, A uint32 // R,G,B, and A are considered as dimensions 0,1,2 and 3 respectively.
AlphaChannel bool // If false, alpha values are ignored.
}
// color.Color implementation
func (c ColorRGBA) RGBA() (uint32, uint32, uint32, uint32) {
if c.AlphaChannel {
return c.R, c.G, c.B, c.A
}
return c.R, c.G, c.B, 0xffff
}
func (c ColorRGBA) Dimensions() int {
if c.AlphaChannel {
return 4
} else {
return 3
}
}
func (c ColorRGBA) Dimension(i int) uint32 {
switch i {
case 0:
return c.R
case 1:
return c.G
case 2:
return c.B
case 3:
if c.AlphaChannel {
return c.A
}
fallthrough
default:
panic(fmt.Errorf("invalid dimension %d: expected [0-%d]", i, c.Dimensions()))
}
}
func (c ColorRGBA) String() string {
if c.AlphaChannel {
return fmt.Sprintf("{R:%f, G:%f, B:%f, A:%f}",
float64(c.R)/float64(0xffff)*255,
float64(c.G)/float64(0xffff)*255,
float64(c.B)/float64(0xffff)*255,
float64(c.A)/float64(0xffff),
)
} else {
return fmt.Sprintf("{R:%f, G:%f, B:%f}",
float64(c.R)/float64(0xffff)*255,
float64(c.G)/float64(0xffff)*255,
float64(c.B)/float64(0xffff)*255,
)
}
}
// NewTransparentColor creates a transparent color. R,G,B values are in range [0-255], A in range [0-1]
func NewTransparentColor(R, G, B int, A float64) ColorRGBA {
r, g, b, a := float64(R)/255*float64(0xffff),
float64(G)/255*float64(0xffff),
float64(B)/255*float64(0xffff),
A*float64(0xffff)
return ColorRGBA{
R: uint32(r),
G: uint32(g),
B: uint32(b),
A: uint32(a),
AlphaChannel: true,
}
}
// NewOpaqueColor creates a opaque color. R,G,B values are in range [0-255]
func NewOpaqueColor(R, G, B int) ColorRGBA {
r, g, b := float64(R)/255*float64(0xffff),
float64(G)/255*float64(0xffff),
float64(B)/255*float64(0xffff)
return ColorRGBA{
R: uint32(r),
G: uint32(g),
B: uint32(b),
AlphaChannel: false,
}
}
// IndexedColorRGBA Example PaletteColor implementation.
type IndexedColorRGBA struct {
ColorRGBA
Id int // Id is the color's unique index
Name string // A human readable name. Used in stringer
}
func (ic IndexedColorRGBA) Index() int {
return ic.Id
}
func (ic IndexedColorRGBA) String() string {
return fmt.Sprintf("%s(%d)", ic.Name, ic.Id)
}
// NewTransparentColor creates a transparent palette color.
// R,G,B values are in range [0-255], A in range [0-1].
// id is the unique id for the color across the palette.
// name is just any human readable identifier, no need to be unique.
func NewTransparentPaletteColor(R, G, B int, A float64, id int, name string) IndexedColorRGBA {
return IndexedColorRGBA{
Id: id,
Name: name,
ColorRGBA: NewTransparentColor(R, G, B, A),
}
}
// NewOpaquePaletteColor creates an opaque palette color.
// R,G,B values are in range [0-255].
// id is the unique id for the color across the palette.
// name is just any human readable identifier, no need to be unique.
func NewOpaquePaletteColor(R, G, B int, id int, name string) IndexedColorRGBA {
return IndexedColorRGBA{
Id: id,
Name: name,
ColorRGBA: NewOpaqueColor(R, G, B),
}
} | colors.go | 0.849504 | 0.609437 | colors.go | starcoder |
package trie
import (
"bufio"
"fmt"
"io"
"os"
"strings"
)
// The TrieNode interface provides information about the set of words
// after a particular prefix (which is implicit).
type TrieNode interface {
IsPrefix() bool // Is there any continuation which will result in a word?
IsWord() bool // The the current position a word?
Follow(c byte) TrieNode // Add c to the current prefix.
}
// The Builder interface is used to construct tries. Typically, the trie
// will be populated with words using the ReadWords function.
type Builder interface {
AddWord(s string) error // AddWord adds the given string to the trie.
Root() TrieNode // Return the constructed trie once building is done.
}
// ReadWords reads a file, adding all the words to the trie builder.
func ReadWords(b Builder, in io.Reader) (TrieNode, error) {
br := bufio.NewReader(in)
for {
line, err := br.ReadString('\n')
line = strings.TrimSpace(line)
if len(line) > 0 {
if addErr := b.AddWord(line); addErr != nil {
fmt.Fprintf(os.Stderr, "Failed to add word %s\n", line)
}
}
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
}
return b.Root(), nil
}
/* simple is a naive trie node implementation. */
type simple struct {
isWord bool // Is the current node at the end of a word?
children []*simple // Subtries for each valid continuation.
}
// A CharMap is used to describe the bytes the trie accepts. 0 means the
// byte is not accepted, otherwise bytes with the same number are identified
// within the tree.
// For example, if A=1, B=2, ..., Z=26 and a=1, b=2, ..., z=26, then the trie
// can store and identify words without case sensitivity, and only accept words
// which contain letters.
type CharMap [256]int
type simpleNode struct {
s *simple
cm *CharMap
}
func newSimple(n int) *simple {
return &simple{
children: make([]*simple, n),
}
}
// NewAlphaCharMap creates a default CharMap, suitable for English. It allows A-Z,
// a-z, '-' and apostrophe. It identifies upper and lower case.
func NewAlphaCharMap() *CharMap {
var cm CharMap
for i := 'A'; i <= 'Z'; i++ {
cm[int(i)] = int(i - 'A' + 1)
}
for i := 'a'; i <= 'z'; i++ {
cm[int(i)] = int(i - 'a' + 1)
}
cm[int('-')] = 27
cm[int('\'')] = 28
return &cm
}
func (cm *CharMap) max() int {
r := 0
for i := 0; i < 256; i++ {
if cm[i] > r {
r = cm[i]
}
}
return r
}
// NewSimpleBuilder is a builder for a simple trie.
func NewSimpleBuilder(cm *CharMap) Builder {
return &simpleNode{
s: newSimple(cm.max()),
cm: cm,
}
}
// AddWord adds a word to a simple trie.
func (b *simpleNode) AddWord(s string) error {
// First check that we can add every letter in the string.
for _, c := range s {
if b.cm[c] == 0 {
return fmt.Errorf("Bad word %s", s)
}
}
st := b.s
for _, c := range s {
i := b.cm[c] - 1
if st.children[i] == nil {
st.children[i] = newSimple(b.cm.max())
}
st = st.children[i]
}
st.isWord = true
return nil
}
// Root provides the root of a simpleNode trie.
func (b *simpleNode) Root() TrieNode {
return b
}
// IsPrefix reports whether there are any words that
// have the prefix at the current position.
func (s *simpleNode) IsPrefix() bool {
return s != nil
}
// IsWord reports whether the current location marks
// the end of a word.
func (s *simpleNode) IsWord() bool {
return s != nil && s.s.isWord
}
// Follow descends the trie, by appending the given byte
// to the prefix.
func (s *simpleNode) Follow(c byte) TrieNode {
if s == nil || s.cm[c] == 0 || s.s.children[s.cm[c]-1] == nil {
return nil
}
return &simpleNode{
s: s.s.children[s.cm[c]-1],
cm: s.cm,
}
} | trie.go | 0.680029 | 0.448366 | trie.go | starcoder |
package graph
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
)
// AccessReviewScheduleDefinition provides operations to manage the identityGovernance singleton.
type AccessReviewScheduleDefinition struct {
Entity
// Defines the list of additional users or group members to be notified of the access review progress.
additionalNotificationRecipients []AccessReviewNotificationRecipientItemable;
// User who created this review. Read-only.
createdBy UserIdentityable;
// Timestamp when the access review series was created. Supports $select and $orderBy. Read-only.
createdDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time;
// Description provided by review creators to provide more context of the review to admins. Supports $select.
descriptionForAdmins *string;
// Description provided by review creators to provide more context of the review to reviewers. Reviewers will see this description in the email sent to them requesting their review. Email notifications support up to 256 characters. Supports $select.
descriptionForReviewers *string;
// Name of the access review series. Supports $select and $orderBy. Required on create.
displayName *string;
// This collection of reviewer scopes is used to define the list of fallback reviewers. These fallback reviewers will be notified to take action if no users are found from the list of reviewers specified. This could occur when either the group owner is specified as the reviewer but the group owner does not exist, or manager is specified as reviewer but a user's manager does not exist. See accessReviewReviewerScope. Replaces backupReviewers. Supports $select.
fallbackReviewers []AccessReviewReviewerScopeable;
// This property is required when scoping a review to guest users' access across all Microsoft 365 groups and determines which Microsoft 365 groups are reviewed. Each group will become a unique accessReviewInstance of the access review series. For supported scopes, see accessReviewScope. Supports $select. For examples of options for configuring instanceEnumerationScope, see Configure the scope of your access review definition using the Microsoft Graph API.
instanceEnumerationScope AccessReviewScopeable;
// If the accessReviewScheduleDefinition is a recurring access review, instances represent each recurrence. A review that does not recur will have exactly one instance. Instances also represent each unique resource under review in the accessReviewScheduleDefinition. If a review has multiple resources and multiple instances, each resource will have a unique instance for each recurrence.
instances []AccessReviewInstanceable;
// Timestamp when the access review series was last modified. Supports $select. Read-only.
lastModifiedDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time;
// This collection of access review scopes is used to define who are the reviewers. The reviewers property is only updatable if individual users are assigned as reviewers. Required on create. Supports $select. For examples of options for assigning reviewers, see Assign reviewers to your access review definition using the Microsoft Graph API.
reviewers []AccessReviewReviewerScopeable;
// Defines the entities whose access is reviewed. For supported scopes, see accessReviewScope. Required on create. Supports $select and $filter (contains only). For examples of options for configuring scope, see Configure the scope of your access review definition using the Microsoft Graph API.
scope AccessReviewScopeable;
// The settings for an access review series, see type definition below. Supports $select. Required on create.
settings AccessReviewScheduleSettingsable;
// This read-only field specifies the status of an access review. The typical states include Initializing, NotStarted, Starting, InProgress, Completing, Completed, AutoReviewing, and AutoReviewed. Supports $select, $orderby, and $filter (eq only). Read-only.
status *string;
}
// NewAccessReviewScheduleDefinition instantiates a new accessReviewScheduleDefinition and sets the default values.
func NewAccessReviewScheduleDefinition()(*AccessReviewScheduleDefinition) {
m := &AccessReviewScheduleDefinition{
Entity: *NewEntity(),
}
return m
}
// CreateAccessReviewScheduleDefinitionFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateAccessReviewScheduleDefinitionFromDiscriminatorValue(parseNode i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, error) {
return NewAccessReviewScheduleDefinition(), nil
}
// GetAdditionalNotificationRecipients gets the additionalNotificationRecipients property value. Defines the list of additional users or group members to be notified of the access review progress.
func (m *AccessReviewScheduleDefinition) GetAdditionalNotificationRecipients()([]AccessReviewNotificationRecipientItemable) {
if m == nil {
return nil
} else {
return m.additionalNotificationRecipients
}
}
// GetCreatedBy gets the createdBy property value. User who created this review. Read-only.
func (m *AccessReviewScheduleDefinition) GetCreatedBy()(UserIdentityable) {
if m == nil {
return nil
} else {
return m.createdBy
}
}
// GetCreatedDateTime gets the createdDateTime property value. Timestamp when the access review series was created. Supports $select and $orderBy. Read-only.
func (m *AccessReviewScheduleDefinition) GetCreatedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.createdDateTime
}
}
// GetDescriptionForAdmins gets the descriptionForAdmins property value. Description provided by review creators to provide more context of the review to admins. Supports $select.
func (m *AccessReviewScheduleDefinition) GetDescriptionForAdmins()(*string) {
if m == nil {
return nil
} else {
return m.descriptionForAdmins
}
}
// GetDescriptionForReviewers gets the descriptionForReviewers property value. Description provided by review creators to provide more context of the review to reviewers. Reviewers will see this description in the email sent to them requesting their review. Email notifications support up to 256 characters. Supports $select.
func (m *AccessReviewScheduleDefinition) GetDescriptionForReviewers()(*string) {
if m == nil {
return nil
} else {
return m.descriptionForReviewers
}
}
// GetDisplayName gets the displayName property value. Name of the access review series. Supports $select and $orderBy. Required on create.
func (m *AccessReviewScheduleDefinition) GetDisplayName()(*string) {
if m == nil {
return nil
} else {
return m.displayName
}
}
// GetFallbackReviewers gets the fallbackReviewers property value. This collection of reviewer scopes is used to define the list of fallback reviewers. These fallback reviewers will be notified to take action if no users are found from the list of reviewers specified. This could occur when either the group owner is specified as the reviewer but the group owner does not exist, or manager is specified as reviewer but a user's manager does not exist. See accessReviewReviewerScope. Replaces backupReviewers. Supports $select.
func (m *AccessReviewScheduleDefinition) GetFallbackReviewers()([]AccessReviewReviewerScopeable) {
if m == nil {
return nil
} else {
return m.fallbackReviewers
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *AccessReviewScheduleDefinition) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["additionalNotificationRecipients"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateAccessReviewNotificationRecipientItemFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]AccessReviewNotificationRecipientItemable, len(val))
for i, v := range val {
res[i] = v.(AccessReviewNotificationRecipientItemable)
}
m.SetAdditionalNotificationRecipients(res)
}
return nil
}
res["createdBy"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(CreateUserIdentityFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetCreatedBy(val.(UserIdentityable))
}
return nil
}
res["createdDateTime"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetCreatedDateTime(val)
}
return nil
}
res["descriptionForAdmins"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDescriptionForAdmins(val)
}
return nil
}
res["descriptionForReviewers"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDescriptionForReviewers(val)
}
return nil
}
res["displayName"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDisplayName(val)
}
return nil
}
res["fallbackReviewers"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateAccessReviewReviewerScopeFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]AccessReviewReviewerScopeable, len(val))
for i, v := range val {
res[i] = v.(AccessReviewReviewerScopeable)
}
m.SetFallbackReviewers(res)
}
return nil
}
res["instanceEnumerationScope"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(CreateAccessReviewScopeFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetInstanceEnumerationScope(val.(AccessReviewScopeable))
}
return nil
}
res["instances"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateAccessReviewInstanceFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]AccessReviewInstanceable, len(val))
for i, v := range val {
res[i] = v.(AccessReviewInstanceable)
}
m.SetInstances(res)
}
return nil
}
res["lastModifiedDateTime"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetLastModifiedDateTime(val)
}
return nil
}
res["reviewers"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateAccessReviewReviewerScopeFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]AccessReviewReviewerScopeable, len(val))
for i, v := range val {
res[i] = v.(AccessReviewReviewerScopeable)
}
m.SetReviewers(res)
}
return nil
}
res["scope"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(CreateAccessReviewScopeFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetScope(val.(AccessReviewScopeable))
}
return nil
}
res["settings"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(CreateAccessReviewScheduleSettingsFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetSettings(val.(AccessReviewScheduleSettingsable))
}
return nil
}
res["status"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetStatus(val)
}
return nil
}
return res
}
// GetInstanceEnumerationScope gets the instanceEnumerationScope property value. This property is required when scoping a review to guest users' access across all Microsoft 365 groups and determines which Microsoft 365 groups are reviewed. Each group will become a unique accessReviewInstance of the access review series. For supported scopes, see accessReviewScope. Supports $select. For examples of options for configuring instanceEnumerationScope, see Configure the scope of your access review definition using the Microsoft Graph API.
func (m *AccessReviewScheduleDefinition) GetInstanceEnumerationScope()(AccessReviewScopeable) {
if m == nil {
return nil
} else {
return m.instanceEnumerationScope
}
}
// GetInstances gets the instances property value. If the accessReviewScheduleDefinition is a recurring access review, instances represent each recurrence. A review that does not recur will have exactly one instance. Instances also represent each unique resource under review in the accessReviewScheduleDefinition. If a review has multiple resources and multiple instances, each resource will have a unique instance for each recurrence.
func (m *AccessReviewScheduleDefinition) GetInstances()([]AccessReviewInstanceable) {
if m == nil {
return nil
} else {
return m.instances
}
}
// GetLastModifiedDateTime gets the lastModifiedDateTime property value. Timestamp when the access review series was last modified. Supports $select. Read-only.
func (m *AccessReviewScheduleDefinition) GetLastModifiedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.lastModifiedDateTime
}
}
// GetReviewers gets the reviewers property value. This collection of access review scopes is used to define who are the reviewers. The reviewers property is only updatable if individual users are assigned as reviewers. Required on create. Supports $select. For examples of options for assigning reviewers, see Assign reviewers to your access review definition using the Microsoft Graph API.
func (m *AccessReviewScheduleDefinition) GetReviewers()([]AccessReviewReviewerScopeable) {
if m == nil {
return nil
} else {
return m.reviewers
}
}
// GetScope gets the scope property value. Defines the entities whose access is reviewed. For supported scopes, see accessReviewScope. Required on create. Supports $select and $filter (contains only). For examples of options for configuring scope, see Configure the scope of your access review definition using the Microsoft Graph API.
func (m *AccessReviewScheduleDefinition) GetScope()(AccessReviewScopeable) {
if m == nil {
return nil
} else {
return m.scope
}
}
// GetSettings gets the settings property value. The settings for an access review series, see type definition below. Supports $select. Required on create.
func (m *AccessReviewScheduleDefinition) GetSettings()(AccessReviewScheduleSettingsable) {
if m == nil {
return nil
} else {
return m.settings
}
}
// GetStatus gets the status property value. This read-only field specifies the status of an access review. The typical states include Initializing, NotStarted, Starting, InProgress, Completing, Completed, AutoReviewing, and AutoReviewed. Supports $select, $orderby, and $filter (eq only). Read-only.
func (m *AccessReviewScheduleDefinition) GetStatus()(*string) {
if m == nil {
return nil
} else {
return m.status
}
}
func (m *AccessReviewScheduleDefinition) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *AccessReviewScheduleDefinition) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
if m.GetAdditionalNotificationRecipients() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetAdditionalNotificationRecipients()))
for i, v := range m.GetAdditionalNotificationRecipients() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("additionalNotificationRecipients", cast)
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("createdBy", m.GetCreatedBy())
if err != nil {
return err
}
}
{
err = writer.WriteTimeValue("createdDateTime", m.GetCreatedDateTime())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("descriptionForAdmins", m.GetDescriptionForAdmins())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("descriptionForReviewers", m.GetDescriptionForReviewers())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("displayName", m.GetDisplayName())
if err != nil {
return err
}
}
if m.GetFallbackReviewers() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetFallbackReviewers()))
for i, v := range m.GetFallbackReviewers() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("fallbackReviewers", cast)
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("instanceEnumerationScope", m.GetInstanceEnumerationScope())
if err != nil {
return err
}
}
if m.GetInstances() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetInstances()))
for i, v := range m.GetInstances() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("instances", cast)
if err != nil {
return err
}
}
{
err = writer.WriteTimeValue("lastModifiedDateTime", m.GetLastModifiedDateTime())
if err != nil {
return err
}
}
if m.GetReviewers() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetReviewers()))
for i, v := range m.GetReviewers() {
cast[i] = v.(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable)
}
err = writer.WriteCollectionOfObjectValues("reviewers", cast)
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("scope", m.GetScope())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("settings", m.GetSettings())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("status", m.GetStatus())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalNotificationRecipients sets the additionalNotificationRecipients property value. Defines the list of additional users or group members to be notified of the access review progress.
func (m *AccessReviewScheduleDefinition) SetAdditionalNotificationRecipients(value []AccessReviewNotificationRecipientItemable)() {
if m != nil {
m.additionalNotificationRecipients = value
}
}
// SetCreatedBy sets the createdBy property value. User who created this review. Read-only.
func (m *AccessReviewScheduleDefinition) SetCreatedBy(value UserIdentityable)() {
if m != nil {
m.createdBy = value
}
}
// SetCreatedDateTime sets the createdDateTime property value. Timestamp when the access review series was created. Supports $select and $orderBy. Read-only.
func (m *AccessReviewScheduleDefinition) SetCreatedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.createdDateTime = value
}
}
// SetDescriptionForAdmins sets the descriptionForAdmins property value. Description provided by review creators to provide more context of the review to admins. Supports $select.
func (m *AccessReviewScheduleDefinition) SetDescriptionForAdmins(value *string)() {
if m != nil {
m.descriptionForAdmins = value
}
}
// SetDescriptionForReviewers sets the descriptionForReviewers property value. Description provided by review creators to provide more context of the review to reviewers. Reviewers will see this description in the email sent to them requesting their review. Email notifications support up to 256 characters. Supports $select.
func (m *AccessReviewScheduleDefinition) SetDescriptionForReviewers(value *string)() {
if m != nil {
m.descriptionForReviewers = value
}
}
// SetDisplayName sets the displayName property value. Name of the access review series. Supports $select and $orderBy. Required on create.
func (m *AccessReviewScheduleDefinition) SetDisplayName(value *string)() {
if m != nil {
m.displayName = value
}
}
// SetFallbackReviewers sets the fallbackReviewers property value. This collection of reviewer scopes is used to define the list of fallback reviewers. These fallback reviewers will be notified to take action if no users are found from the list of reviewers specified. This could occur when either the group owner is specified as the reviewer but the group owner does not exist, or manager is specified as reviewer but a user's manager does not exist. See accessReviewReviewerScope. Replaces backupReviewers. Supports $select.
func (m *AccessReviewScheduleDefinition) SetFallbackReviewers(value []AccessReviewReviewerScopeable)() {
if m != nil {
m.fallbackReviewers = value
}
}
// SetInstanceEnumerationScope sets the instanceEnumerationScope property value. This property is required when scoping a review to guest users' access across all Microsoft 365 groups and determines which Microsoft 365 groups are reviewed. Each group will become a unique accessReviewInstance of the access review series. For supported scopes, see accessReviewScope. Supports $select. For examples of options for configuring instanceEnumerationScope, see Configure the scope of your access review definition using the Microsoft Graph API.
func (m *AccessReviewScheduleDefinition) SetInstanceEnumerationScope(value AccessReviewScopeable)() {
if m != nil {
m.instanceEnumerationScope = value
}
}
// SetInstances sets the instances property value. If the accessReviewScheduleDefinition is a recurring access review, instances represent each recurrence. A review that does not recur will have exactly one instance. Instances also represent each unique resource under review in the accessReviewScheduleDefinition. If a review has multiple resources and multiple instances, each resource will have a unique instance for each recurrence.
func (m *AccessReviewScheduleDefinition) SetInstances(value []AccessReviewInstanceable)() {
if m != nil {
m.instances = value
}
}
// SetLastModifiedDateTime sets the lastModifiedDateTime property value. Timestamp when the access review series was last modified. Supports $select. Read-only.
func (m *AccessReviewScheduleDefinition) SetLastModifiedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.lastModifiedDateTime = value
}
}
// SetReviewers sets the reviewers property value. This collection of access review scopes is used to define who are the reviewers. The reviewers property is only updatable if individual users are assigned as reviewers. Required on create. Supports $select. For examples of options for assigning reviewers, see Assign reviewers to your access review definition using the Microsoft Graph API.
func (m *AccessReviewScheduleDefinition) SetReviewers(value []AccessReviewReviewerScopeable)() {
if m != nil {
m.reviewers = value
}
}
// SetScope sets the scope property value. Defines the entities whose access is reviewed. For supported scopes, see accessReviewScope. Required on create. Supports $select and $filter (contains only). For examples of options for configuring scope, see Configure the scope of your access review definition using the Microsoft Graph API.
func (m *AccessReviewScheduleDefinition) SetScope(value AccessReviewScopeable)() {
if m != nil {
m.scope = value
}
}
// SetSettings sets the settings property value. The settings for an access review series, see type definition below. Supports $select. Required on create.
func (m *AccessReviewScheduleDefinition) SetSettings(value AccessReviewScheduleSettingsable)() {
if m != nil {
m.settings = value
}
}
// SetStatus sets the status property value. This read-only field specifies the status of an access review. The typical states include Initializing, NotStarted, Starting, InProgress, Completing, Completed, AutoReviewing, and AutoReviewed. Supports $select, $orderby, and $filter (eq only). Read-only.
func (m *AccessReviewScheduleDefinition) SetStatus(value *string)() {
if m != nil {
m.status = value
}
} | models/microsoft/graph/access_review_schedule_definition.go | 0.671471 | 0.419588 | access_review_schedule_definition.go | starcoder |
package options
import (
"time"
)
// CreateIndexesOptions represents all possible options for the CreateOne() and CreateMany() functions.
type CreateIndexesOptions struct {
MaxTime *time.Duration // The maximum amount of time to allow the query to run.
}
// CreateIndexes creates a new CreateIndexesOptions instance.
func CreateIndexes() *CreateIndexesOptions {
return &CreateIndexesOptions{}
}
// SetMaxTime specifies the maximum amount of time to allow the query to run.
func (c *CreateIndexesOptions) SetMaxTime(d time.Duration) *CreateIndexesOptions {
c.MaxTime = &d
return c
}
// MergeCreateIndexesOptions combines the given *CreateIndexesOptions into a single *CreateIndexesOptions in a last one
// wins fashion.
func MergeCreateIndexesOptions(opts ...*CreateIndexesOptions) *CreateIndexesOptions {
c := CreateIndexes()
for _, opt := range opts {
if opt == nil {
continue
}
if opt.MaxTime != nil {
c.MaxTime = opt.MaxTime
}
}
return c
}
// DropIndexesOptions represents all possible options for the DropIndexes() function.
type DropIndexesOptions struct {
MaxTime *time.Duration
}
// DropIndexes creates a new DropIndexesOptions instance.
func DropIndexes() *DropIndexesOptions {
return &DropIndexesOptions{}
}
// SetMaxTime specifies the maximum amount of time to allow the query to run.
func (d *DropIndexesOptions) SetMaxTime(duration time.Duration) *DropIndexesOptions {
d.MaxTime = &duration
return d
}
// MergeDropIndexesOptions combines the given *DropIndexesOptions into a single *DropIndexesOptions in a last one
// wins fashion.
func MergeDropIndexesOptions(opts ...*DropIndexesOptions) *DropIndexesOptions {
c := DropIndexes()
for _, opt := range opts {
if opt == nil {
continue
}
if opt.MaxTime != nil {
c.MaxTime = opt.MaxTime
}
}
return c
}
// ListIndexesOptions represents all possible options for the ListIndexes() function.
type ListIndexesOptions struct {
BatchSize *int32
MaxTime *time.Duration
}
// ListIndexes creates a new ListIndexesOptions instance.
func ListIndexes() *ListIndexesOptions {
return &ListIndexesOptions{}
}
// SetBatchSize specifies the number of documents to return in every batch.
func (l *ListIndexesOptions) SetBatchSize(i int32) *ListIndexesOptions {
l.BatchSize = &i
return l
}
// SetMaxTime specifies the maximum amount of time to allow the query to run.
func (l *ListIndexesOptions) SetMaxTime(d time.Duration) *ListIndexesOptions {
l.MaxTime = &d
return l
}
// MergeListIndexesOptions combines the given *ListIndexesOptions into a single *ListIndexesOptions in a last one
// wins fashion.
func MergeListIndexesOptions(opts ...*ListIndexesOptions) *ListIndexesOptions {
c := ListIndexes()
for _, opt := range opts {
if opt == nil {
continue
}
if opt.MaxTime != nil {
c.MaxTime = opt.MaxTime
}
}
return c
}
// IndexOptions represents all possible options to configure a new index.
type IndexOptions struct {
Background *bool
ExpireAfterSeconds *int32
Name *string
Sparse *bool
StorageEngine interface{}
Unique *bool
Version *int32
DefaultLanguage *string
LanguageOverride *string
TextVersion *int32
Weights interface{}
SphereVersion *int32
Bits *int32
Max *float64
Min *float64
BucketSize *int32
PartialFilterExpression interface{}
Collation *Collation
}
// Index creates a new *IndexOptions
func Index() *IndexOptions {
return &IndexOptions{}
}
// SetBackground sets the background option. If true, the server will create the index in the background and not block
// other tasks
func (i *IndexOptions) SetBackground(background bool) *IndexOptions {
i.Background = &background
return i
}
// SetExpireAfterSeconds specifies the number of seconds for a document to remain in a collection.
func (i *IndexOptions) SetExpireAfterSeconds(seconds int32) *IndexOptions {
i.ExpireAfterSeconds = &seconds
return i
}
// SetName specifies a name for the index.
// If not set, a name will be generated in the format "[field]_[direction]".
// If multiple indexes are created for the same key pattern with different collations, a name must be provided to avoid
// ambiguity.
func (i *IndexOptions) SetName(name string) *IndexOptions {
i.Name = &name
return i
}
// SetSparse sets the sparse option.
// If true, the index will only reference documents with the specified field in the index.
func (i *IndexOptions) SetSparse(sparse bool) *IndexOptions {
i.Sparse = &sparse
return i
}
// SetStorageEngine specifies the storage engine to use.
// Valid for server versions >= 3.0
func (i *IndexOptions) SetStorageEngine(engine interface{}) *IndexOptions {
i.StorageEngine = engine
return i
}
// SetUnique forces the index to be unique.
func (i *IndexOptions) SetUnique(unique bool) *IndexOptions {
i.Unique = &unique
return i
}
// SetVersion specifies the index version number, either 0 or 1.
func (i *IndexOptions) SetVersion(version int32) *IndexOptions {
i.Version = &version
return i
}
// SetDefaultLanguage specifies the default language for text indexes.
// If not set, this will default to english.
func (i *IndexOptions) SetDefaultLanguage(language string) *IndexOptions {
i.DefaultLanguage = &language
return i
}
// SetLanguageOverride specifies the field in the document to override the language.
func (i *IndexOptions) SetLanguageOverride(override string) *IndexOptions {
i.LanguageOverride = &override
return i
}
// SetTextVersion specifies the text index version number.
// MongoDB version 2.4 can only support version 1.
// MongoDB versions 2.6 and higher can support versions 1 or 2.
func (i *IndexOptions) SetTextVersion(version int32) *IndexOptions {
i.TextVersion = &version
return i
}
// SetWeights specifies fields in the index and their corresponding weight values.
func (i *IndexOptions) SetWeights(weights interface{}) *IndexOptions {
i.Weights = weights
return i
}
// SetSphereVersion specifies the 2dsphere index version number.
// MongoDB version 2.4 can only support version 1.
// MongoDB versions 2.6 and higher can support versions 1 or 2.
func (i *IndexOptions) SetSphereVersion(version int32) *IndexOptions {
i.SphereVersion = &version
return i
}
// SetBits specifies the precision of the stored geo hash in the 2d index, from 1 to 32.
func (i *IndexOptions) SetBits(bits int32) *IndexOptions {
i.Bits = &bits
return i
}
// SetMax specifies the maximum boundary for latitude and longitude in the 2d index.
func (i *IndexOptions) SetMax(max float64) *IndexOptions {
i.Max = &max
return i
}
// SetMin specifies the minimum boundary for latitude and longitude in the 2d index.
func (i *IndexOptions) SetMin(min float64) *IndexOptions {
i.Min = &min
return i
}
// SetBucketSize specifies number of units within which to group the location values in a geo haystack index.
func (i *IndexOptions) SetBucketSize(bucketSize int32) *IndexOptions {
i.BucketSize = &bucketSize
return i
}
// SetPartialFilterExpression specifies a filter for use in a partial index. Only documents that match the filter
// expression are included in the index.
func (i *IndexOptions) SetPartialFilterExpression(expression interface{}) *IndexOptions {
i.PartialFilterExpression = expression
return i
}
// SetCollation specifies a Collation to use for the operation.
// Valid for server versions >= 3.4
func (i *IndexOptions) SetCollation(collation *Collation) *IndexOptions {
i.Collation = collation
return i
}
// MergeIndexOptions combines the given *IndexOptions into a single *IndexOptions in a last one wins fashion.
func MergeIndexOptions(opts ...*IndexOptions) *IndexOptions {
i := Index()
for _, opt := range opts {
if opt.Background != nil {
i.Background = opt.Background
}
if opt.ExpireAfterSeconds != nil {
i.ExpireAfterSeconds = opt.ExpireAfterSeconds
}
if opt.Name != nil {
i.Name = opt.Name
}
if opt.Sparse != nil {
i.Sparse = opt.Sparse
}
if opt.StorageEngine != nil {
i.StorageEngine = opt.StorageEngine
}
if opt.Unique != nil {
i.Unique = opt.Unique
}
if opt.Version != nil {
i.Version = opt.Version
}
if opt.DefaultLanguage != nil {
i.DefaultLanguage = opt.DefaultLanguage
}
if opt.LanguageOverride != nil {
i.LanguageOverride = opt.LanguageOverride
}
if opt.TextVersion != nil {
i.TextVersion = opt.TextVersion
}
if opt.Weights != nil {
i.Weights = opt.Weights
}
if opt.SphereVersion != nil {
i.SphereVersion = opt.SphereVersion
}
if opt.Bits != nil {
i.Bits = opt.Bits
}
if opt.Max != nil {
i.Max = opt.Max
}
if opt.Min != nil {
i.Min = opt.Min
}
if opt.BucketSize != nil {
i.BucketSize = opt.BucketSize
}
if opt.PartialFilterExpression != nil {
i.PartialFilterExpression = opt.PartialFilterExpression
}
if opt.Collation != nil {
i.Collation = opt.Collation
}
}
return i
} | vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go | 0.845528 | 0.607605 | indexoptions.go | starcoder |
package limiter
import (
"github.com/momokatte/go-backoff"
)
/*
FailRateLimiter combines a FailLimiter and a RateLimiter to act as a single FailLimiter.
*/
type FailRateLimiter struct {
failLimiter FailLimiter
rateLimiter RateLimiter
}
/*
NewFailRateLimiter instantiates a new FailRateLimiter with the provided maximum rate and backoff function.
*/
func NewFailRateLimiter(maxRate Rate, backOff func(uint) uint) (l *FailRateLimiter) {
l = &FailRateLimiter{}
l.SetBackOffFunc(backOff)
l.SetMaxRate(maxRate)
return
}
/*
NewHalfJitterFailRateLimiter instantiates a new FailRateLimiter with the provided maximum rate, and provdes a half-jitter backoff function with the provided maximum delay.
*/
func NewHalfJitterFailRateLimiter(maxRate Rate, maxBackOff uint) (l *FailRateLimiter) {
return NewFailRateLimiter(maxRate, backoff.HalfJitter(1, maxBackOff))
}
/*
CheckWait should be called at the beginning of the caller's action.
It blocks if the limiter needs to restrict execution, otherwise it returns immediately. Restriction is typically based on the last received status, but may also be controlled by other factors.
*/
func (l *FailRateLimiter) CheckWait() {
l.failLimiter.CheckWait()
l.rateLimiter.CheckWait()
return
}
/*
Report should be called at the end of the caller's action, providing the limiter with the success/fail status of the action.
Failure statuses should be expected to incur rate throttling on subsequent calls to CheckWait.
*/
func (l *FailRateLimiter) Report(success bool) {
l.failLimiter.Report(success)
}
/*
SetBackOffFunc sets a new backoff function for this limiter.
*/
func (l *FailRateLimiter) SetBackOffFunc(f func(uint) uint) {
l.failLimiter = NewFailBackOffLimiter(f)
}
/*
SetRateLimit sets the maximum rate for this limiter.
*/
func (l *FailRateLimiter) SetMaxRate(rate Rate) {
l.rateLimiter = NewBurstRateLimiter(rate)
}
/*
Invoke enforces this limiter's limits before the invocation of the provided function and uses the function's return value to adjust the backoff rate for subsequent invocations.
*/
func (l *FailRateLimiter) Invoke(f func() error) (err error) {
l.CheckWait()
err = f()
l.Report(err == nil)
return
} | fail-rate.go | 0.580709 | 0.435541 | fail-rate.go | starcoder |
package bloom
import (
"github.com/pmylund/go-bitset"
"hash"
"hash/crc64"
"hash/fnv"
"math"
)
type filter64 struct {
m uint64
k uint64
h hash.Hash64
oh hash.Hash64
}
func (f *filter64) bits(data []byte) []uint64 {
f.h.Reset()
_, _ = f.h.Write(data)
a := f.h.Sum64()
f.oh.Reset()
_, _ = f.oh.Write(data)
b := f.oh.Sum64()
is := make([]uint64, f.k)
for i := uint64(0); i < f.k; i++ {
is[i] = (a + b*i) % f.m
}
return is
}
func newFilter64(m, k uint64) *filter64 {
return &filter64{
m: m,
k: k,
h: fnv.New64(),
oh: crc64.New(crc64.MakeTable(crc64.ECMA)),
}
}
func estimates64(n uint64, p float64) (uint64, uint64) {
nf := float64(n)
log2 := math.Log(2)
m := -1 * nf * math.Log(p) / math.Pow(log2, 2)
k := math.Ceil(log2 * m / nf)
return uint64(m), uint64(k)
}
// Filter64 A standard 64-bit bloom filter using the 64-bit FNV-1a hash function.
type Filter64 struct {
*filter64
b *bitset.Bitset64
}
// Test Check whether data was previously added to the filter. Returns true if
// yes, with a false positive chance near the ratio specified upon creation
// of the filter. The result cannot be falsely negative.
func (f *Filter64) Test(data []byte) bool {
for _, i := range f.bits(data) {
if !f.b.Test(i) {
return false
}
}
return true
}
// Add data to the filter.
func (f *Filter64) Add(data []byte) {
for _, i := range f.bits(data) {
f.b.Set(i)
}
}
// Reset Resets the filter.
func (f *Filter64) Reset() {
f.b.Reset()
}
// New64 Create a bloom filter with an expected n number of items, and an acceptable
// false positive rate of p, e.g. 0.01 for 1%.
func New64(n int64, p float64) *Filter64 {
m, k := estimates64(uint64(n), p)
f := &Filter64{
newFilter64(m, k),
bitset.New64(m),
}
return f
}
// CountingFilter64 A counting bloom filter using the 64-bit FNV-1a hash function. Supports
// removing items from the filter.
type CountingFilter64 struct {
*filter64
b []*bitset.Bitset64
}
// Test Checks whether data was previously added to the filter. Returns true if
// yes, with a false positive chance near the ratio specified upon creation
// of the filter. The result cannot cannot be falsely negative (unless one
// has removed an item that wasn't actually added to the filter previously.)
func (f *CountingFilter64) Test(data []byte) bool {
b := f.b[0]
for _, v := range f.bits(data) {
if !b.Test(v) {
return false
}
}
return true
}
// Add Adds data to the filter.
func (f *CountingFilter64) Add(data []byte) {
for _, v := range f.bits(data) {
done := false
for _, ov := range f.b {
if !ov.Test(v) {
done = true
ov.Set(v)
break
}
}
if !done {
nb := bitset.New64(f.b[0].Len())
f.b = append(f.b, nb)
nb.Set(v)
}
}
}
// Remove Removes data from the filter. This exact data must have been previously added
// to the filter, or future results will be inconsistent.
func (f *CountingFilter64) Remove(data []byte) {
last := len(f.b) - 1
for _, v := range f.bits(data) {
for oi := last; oi >= 0; oi-- {
ov := f.b[oi]
if ov.Test(v) {
ov.Clear(v)
break
}
}
}
}
// Reset Resets the filter.
func (f *CountingFilter64) Reset() {
f.b = f.b[:1]
f.b[0].Reset()
}
// NewCounting64 Create a counting bloom filter with an expected n number of items, and an
// acceptable false positive rate of p. Counting bloom filters support
// the removal of items from the filter.
func NewCounting64(n int64, p float64) *CountingFilter64 {
m, k := estimates64(uint64(n), p)
f := &CountingFilter64{
newFilter64(m, k),
[]*bitset.Bitset64{bitset.New64(m)},
}
return f
}
// LayeredFilter64 A layered bloom filter using the 64-bit FNV-1a hash function.
type LayeredFilter64 struct {
*filter64
b []*bitset.Bitset64
}
// Test Checks whether data was previously added to the filter. Returns the number of
// the last layer where the data was added, e.g. 1 for the first layer, and a
// boolean indicating whether the data was added to the filter at all. The check
// has a false positive chance near the ratio specified upon creation of the
// filter. The result cannot be falsely negative.
func (f *LayeredFilter64) Test(data []byte) (int, bool) {
is := f.bits(data)
for i := len(f.b) - 1; i >= 0; i-- {
v := f.b[i]
last := len(is) - 1
for oi, ov := range is {
if !v.Test(ov) {
break
}
if oi == last {
// Every test was positive at this layer
return i + 1, true
}
}
}
return 0, false
}
// Add Adds data to the filter. Returns the number of the layer where the data
// was added, e.g. 1 for the first layer.
func (f *LayeredFilter64) Add(data []byte) int {
is := f.bits(data)
var (
i int
v *bitset.Bitset64
)
for i, v = range f.b {
here := false
for _, ov := range is {
if here {
v.Set(ov)
} else if !v.Test(ov) {
here = true
v.Set(ov)
}
}
if here {
return i + 1
}
}
nb := bitset.New64(f.b[0].Len())
f.b = append(f.b, nb)
for _, v := range is {
nb.Set(v)
}
return i + 2
}
// Reset Resets the filter.
func (f *LayeredFilter64) Reset() {
f.b = f.b[:1]
f.b[0].Reset()
}
// NewLayered64 Create a layered bloom filter with an expected n number of items, and an
// acceptable false positive rate of p. Layered bloom filters can be used
// to keep track of a certain, arbitrary count of items, e.g. to check if some
// given data was added to the filter 10 times or less.
func NewLayered64(n int64, p float64) *LayeredFilter64 {
m, k := estimates64(uint64(n), p)
f := &LayeredFilter64{
newFilter64(m, k),
[]*bitset.Bitset64{bitset.New64(m)},
}
return f
} | bloom64.go | 0.843638 | 0.424293 | bloom64.go | starcoder |
package ratelimit
import (
"math"
"sync"
"time"
)
// Note: This file is inspired by:
// https://github.com/prashantv/go-bench/blob/master/ratelimit
// Limiter is used to rate limit some process, possibly across goroutines.
// The process is expected to call Take() before every iteration, which
// may block to throttle the process.
type Limiter interface {
// Take should block to make sure that the RPS is met before returning true.
// If the passed in channel is closed, Take should unblock and return false.
Take(cancel <-chan struct{}) bool
}
type timePeriod struct {
sync.Mutex
timer *time.Timer
last time.Time
sleepFor time.Duration
perRequest time.Duration
maxSlack time.Duration
}
// New returns a Limiter that will limit to the given RPS.
func New(rps int) Limiter {
return &timePeriod{
perRequest: time.Second / time.Duration(rps),
maxSlack: -10 * time.Second / time.Duration(rps),
timer: time.NewTimer(time.Duration(math.MaxInt64)),
}
}
// Take blocks to ensure that the time spent between multiple
// Take calls is on average time.Second/rps.
func (t *timePeriod) Take(cancel <-chan struct{}) bool {
t.Lock()
defer t.Unlock()
// If this is our first request, then we allow it.
cur := time.Now()
if t.last.IsZero() {
t.last = cur
return true
}
// sleepFor calculates how much time we should sleep based on
// the perRequest budget and how long the last request took.
// Since the request may take longer than the budget, this number
// can get negative, and is summed across requests.
t.sleepFor += t.perRequest - cur.Sub(t.last)
t.last = cur
// We shouldn't allow sleepFor to get too negative, since it would mean that
// a service that slowed down a lot for a short period of time would get
// a much higher RPS following that.
if t.sleepFor < t.maxSlack {
t.sleepFor = t.maxSlack
}
// If sleepFor is positive, then we should sleep now.
if t.sleepFor > 0 {
t.timer.Reset(t.sleepFor)
select {
case <-t.timer.C:
case <-cancel:
return false
}
t.last = cur.Add(t.sleepFor)
t.sleepFor = 0
}
return true
}
type dummy struct{}
// NewInfinite returns a RateLimiter that is not limited.
func NewInfinite() Limiter {
return dummy{}
}
func (dummy) Take(_ <-chan struct{}) bool { return true } | ratelimit/time_period.go | 0.725843 | 0.446434 | time_period.go | starcoder |
package steven
import (
"image"
"math"
"github.com/thinkofdeath/steven/render"
"github.com/thinkofdeath/steven/type/direction"
)
type processedModel struct {
faces []processedFace
ambientOcclusion bool
weight int
}
type processedFace struct {
cullFace direction.Type
facing direction.Type
vertices []chunkVertex
verticesTexture []render.TextureInfo
indices []int32
shade bool
tintIndex int
}
var faceRotation = []direction.Type{
direction.North,
direction.East,
direction.South,
direction.West,
}
var faceRotationX = []direction.Type{
direction.North,
direction.Down,
direction.South,
direction.Up,
}
func rotateDirection(val direction.Type, offset int, rots []direction.Type, invalid ...direction.Type) direction.Type {
for _, d := range invalid {
if d == val {
return val
}
}
var pos int
for di, d := range rots {
if d == val {
pos = di
break
}
}
return rots[(pos+offset)%len(rots)]
}
func precomputeModel(bm *model) *processedModel {
p := &processedModel{}
p.ambientOcclusion = bm.ambientOcclusion
p.weight = bm.weight
for _, el := range bm.elements {
for i, face := range el.faces {
faceID := direction.Type(i)
if face == nil {
continue
}
pFace := processedFace{}
cullFace := face.cullFace
if bm.x > 0 {
o := int(bm.x) / 90
cullFace = rotateDirection(cullFace, o, faceRotationX, direction.East, direction.West, direction.Invalid)
faceID = rotateDirection(faceID, o, faceRotationX, direction.East, direction.West, direction.Invalid)
}
if bm.y > 0 {
o := int(bm.y) / 90
cullFace = rotateDirection(cullFace, o, faceRotation, direction.Up, direction.Down, direction.Invalid)
faceID = rotateDirection(faceID, o, faceRotation, direction.Up, direction.Down, direction.Invalid)
}
pFace.cullFace = cullFace
pFace.facing = direction.Type(faceID)
pFace.tintIndex = face.tintIndex
pFace.shade = el.shade
vert := faceVertices[i]
tex := bm.lookupTexture(face.texture)
rect := tex.Rect()
ux1 := int16(face.uv[0] * float64(rect.Width))
ux2 := int16(face.uv[2] * float64(rect.Width))
uy1 := int16(face.uv[1] * float64(rect.Height))
uy2 := int16(face.uv[3] * float64(rect.Height))
tw, th := int16(rect.Width), int16(rect.Height)
if face.rotation > 0 {
x := ux1
y := uy1
w := ux2 - ux1
h := uy2 - uy1
switch face.rotation {
case 90:
uy2 = x + w
ux1 = tw*16 - (y + h)
ux2 = tw*16 - y
uy1 = x
case 180:
uy1 = th*16 - (y + h)
uy2 = th*16 - y
ux1 = x + w
ux2 = x
case 270:
uy2 = x
uy1 = x + w
ux2 = y + h
ux1 = y
}
}
var minX, minY, minZ = float32(math.Inf(1)), float32(math.Inf(1)), float32(math.Inf(1))
var maxX, maxY, maxZ = float32(math.Inf(-1)), float32(math.Inf(-1)), float32(math.Inf(-1))
for v := range vert.verts {
pFace.verticesTexture = append(pFace.verticesTexture, tex)
vert.verts[v].TX = uint16(rect.X)
vert.verts[v].TY = uint16(rect.Y)
vert.verts[v].TW = uint16(rect.Width)
vert.verts[v].TH = uint16(rect.Height)
vert.verts[v].TAtlas = int16(tex.Atlas())
if vert.verts[v].X == 0 {
vert.verts[v].X = float32(el.from[0] / 16.0)
} else {
vert.verts[v].X = float32(el.to[0] / 16.0)
}
if vert.verts[v].Y == 0 {
vert.verts[v].Y = float32(el.from[1] / 16.0)
} else {
vert.verts[v].Y = float32(el.to[1] / 16.0)
}
if vert.verts[v].Z == 0 {
vert.verts[v].Z = float32(el.from[2] / 16.0)
} else {
vert.verts[v].Z = float32(el.to[2] / 16.0)
}
if el.rotation != nil {
r := el.rotation
switch r.axis {
case "y":
rotY := -r.angle * (math.Pi / 180)
c := math.Cos(rotY)
s := math.Sin(rotY)
x := float64(vert.verts[v].X) - (r.origin[0] / 16.0)
z := float64(vert.verts[v].Z) - (r.origin[2] / 16.0)
vert.verts[v].X = float32(r.origin[0]/16.0 + (x*c - z*s))
vert.verts[v].Z = float32(r.origin[2]/16.0 + (z*c + x*s))
case "x":
rotX := r.angle * (math.Pi / 180)
c := math.Cos(-rotX)
s := math.Sin(-rotX)
z := float64(vert.verts[v].Z) - (r.origin[2] / 16.0)
y := float64(vert.verts[v].Y) - (r.origin[1] / 16.0)
vert.verts[v].Z = float32(r.origin[2]/16.0 + (z*c - y*s))
vert.verts[v].Y = float32(r.origin[1]/16.0 + (y*c + z*s))
case "z":
rotZ := -r.angle * (math.Pi / 180)
c := math.Cos(-rotZ)
s := math.Sin(-rotZ)
x := float64(vert.verts[v].X) - (r.origin[0] / 16.0)
y := float64(vert.verts[v].Y) - (r.origin[1] / 16.0)
vert.verts[v].X = float32(r.origin[0]/16.0 + (x*c - y*s))
vert.verts[v].Y = float32(r.origin[1]/16.0 + (y*c + x*s))
}
}
if bm.x > 0 {
rotX := bm.x * (math.Pi / 180)
c := float32(math.Cos(rotX))
s := float32(math.Sin(rotX))
z := vert.verts[v].Z - 0.5
y := vert.verts[v].Y - 0.5
vert.verts[v].Z = 0.5 + (z*c - y*s)
vert.verts[v].Y = 0.5 + (y*c + z*s)
}
if bm.y > 0 {
rotY := bm.y * (math.Pi / 180)
c := float32(math.Cos(rotY))
s := float32(math.Sin(rotY))
x := vert.verts[v].X - 0.5
z := vert.verts[v].Z - 0.5
vert.verts[v].X = 0.5 + (x*c - z*s)
vert.verts[v].Z = 0.5 + (z*c + x*s)
}
if vert.verts[v].TOffsetX == 0 {
vert.verts[v].TOffsetX = int16(ux1)
} else {
vert.verts[v].TOffsetX = int16(ux2)
}
if vert.verts[v].TOffsetY == 0 {
vert.verts[v].TOffsetY = int16(uy1)
} else {
vert.verts[v].TOffsetY = int16(uy2)
}
if face.rotation > 0 {
rotY := -float64(face.rotation) * (math.Pi / 180)
c := int16(math.Cos(rotY))
s := int16(math.Sin(rotY))
x := vert.verts[v].TOffsetX - 8*tw
y := vert.verts[v].TOffsetY - 8*th
vert.verts[v].TOffsetX = 8*tw + int16(x*c-y*s)
vert.verts[v].TOffsetY = 8*th + int16(y*c+x*s)
}
if bm.uvLock && bm.y > 0 &&
(pFace.facing == direction.Up || pFace.facing == direction.Down) {
rotY := float64(-bm.y) * (math.Pi / 180)
c := int16(math.Cos(rotY))
s := int16(math.Sin(rotY))
x := vert.verts[v].TOffsetX - 8*16
y := vert.verts[v].TOffsetY - 8*16
vert.verts[v].TOffsetX = 8*16 + int16(x*c+y*s)
vert.verts[v].TOffsetY = 8*16 + int16(y*c-x*s)
}
if bm.uvLock && bm.x > 0 &&
(pFace.facing != direction.Up && pFace.facing != direction.Down) {
rotY := float64(bm.x) * (math.Pi / 180)
c := int16(math.Cos(rotY))
s := int16(math.Sin(rotY))
x := vert.verts[v].TOffsetX - 8*16
y := vert.verts[v].TOffsetY - 8*16
vert.verts[v].TOffsetX = 8*16 + int16(x*c+y*s)
vert.verts[v].TOffsetY = 8*16 + int16(y*c-x*s)
}
if el.rotation != nil && el.rotation.rescale {
if vert.verts[v].X < minX {
minX = vert.verts[v].X
} else if vert.verts[v].X > maxX {
maxX = vert.verts[v].X
}
if vert.verts[v].Y < minY {
minY = vert.verts[v].Y
} else if vert.verts[v].Y > maxY {
maxY = vert.verts[v].Y
}
if vert.verts[v].Z < minZ {
minZ = vert.verts[v].Z
} else if vert.verts[v].Z > maxZ {
maxZ = vert.verts[v].Z
}
}
}
if el.rotation != nil && el.rotation.rescale {
diffX := maxX - minX
diffY := maxY - minY
diffZ := maxZ - minZ
for v := range vert.verts {
vert.verts[v].X = (vert.verts[v].X - minX) / diffX
vert.verts[v].Y = (vert.verts[v].Y - minY) / diffY
vert.verts[v].Z = (vert.verts[v].Z - minZ) / diffZ
}
}
pFace.vertices = vert.verts[:]
pFace.indices = vert.indices[:]
p.faces = append(p.faces, pFace)
}
}
return p
}
func (p processedModel) Render(x, y, z int, bs *blocksSnapshot, buf []chunkVertex, indices *int) []chunkVertex {
this := bs.block(x, y, z)
for _, f := range p.faces {
if f.cullFace != direction.Invalid {
ox, oy, oz := f.cullFace.Offset()
if b := bs.block(x+ox, y+oy, z+oz); b.ShouldCullAgainst() || b == this {
continue
}
}
var cr, cg, cb byte
cr = 255
cg = 255
cb = 255
switch f.tintIndex {
case 0:
if this.TintImage() != nil {
cr, cg, cb = calculateBiome(bs, x, z, this.TintImage())
} else {
cr, cg, cb = this.TintColor()
}
}
if f.facing == direction.West || f.facing == direction.East {
cr = byte(float64(cr) * 0.8)
cg = byte(float64(cg) * 0.8)
cb = byte(float64(cb) * 0.8)
}
*indices += len(f.indices)
for _, vert := range f.vertices {
vert.R = cr
vert.G = cg
vert.B = cb
vert.X += float32(x)
vert.Y += float32(y)
vert.Z += float32(z)
vert.BlockLight, vert.SkyLight = calculateLight(
bs,
x, y, z,
float64(vert.X),
float64(vert.Y),
float64(vert.Z),
f.facing, p.ambientOcclusion, this.ForceShade(),
)
buf = append(buf, vert)
}
}
return buf
}
// Takes an average of the biome colors of the surrounding area
func calculateBiome(bs *blocksSnapshot, x, z int, img *image.NRGBA) (byte, byte, byte) {
count := 0
var r, g, b int
for xx := -2; xx <= 2; xx++ {
for zz := -2; zz <= 2; zz++ {
biome := bs.biome(x+xx, z+zz)
ix := biome.ColorIndex & 0xFF
iy := biome.ColorIndex >> 8
col := img.NRGBAAt(ix, iy)
r += int(col.R)
g += int(col.G)
b += int(col.B)
count++
}
}
return byte(r / count), byte(g / count), byte(b / count)
}
func calculateLight(bs *blocksSnapshot, origX, origY, origZ int,
x, y, z float64, face direction.Type, smooth, force bool) (uint16, uint16) {
ox, oy, oz := face.Offset()
if !bs.block(origX, origY, origZ).ShouldCullAgainst() {
ox, oy, oz = 0, 0, 0
}
sblockLight := bs.blockLight(origX+ox, origY+oy, origZ+oz)
sskyLight := bs.skyLight(origX+ox, origY+oy, origZ+oz)
if !smooth {
return uint16(sblockLight) * 4000, uint16(sskyLight) * 4000
}
blockLight := 0
skyLight := 0
count := 0
dbl := int8(sblockLight) - 8
if dbl < 0 {
dbl = 0
}
sblockLight = byte(dbl)
dsl := int8(sskyLight) - 8
if dsl < 0 {
dsl = 0
}
sskyLight = byte(dsl)
dx, dy, dz := face.Offset()
for ox := -1; ox <= 0; ox++ {
for oy := -1; oy <= 0; oy++ {
for oz := -1; oz <= 0; oz++ {
lx := round(x + float64(ox)*0.6 + float64(dx)*0.6)
ly := round(y + float64(oy)*0.6 + float64(dy)*0.6)
lz := round(z + float64(oz)*0.6 + float64(dz)*0.6)
bl := int(bs.blockLight(lx, ly, lz))
sl := int(bs.skyLight(lx, ly, lz))
if force && !bs.block(lx, ly, lz).Is(Blocks.Air) {
bl = int(sblockLight)
sl = int(sskyLight)
}
if sl == 0 && bl == 0 {
bl = int(sblockLight)
sl = int(sskyLight)
}
blockLight += bl
skyLight += sl
count++
}
}
}
return uint16((float64(blockLight) / float64(count)) * 4000), uint16((float64(skyLight) / float64(count)) * 4000)
}
func round(f float64) int {
if f < 0 {
return int(f - 0.5)
}
return int(f + 0.5)
}
type faceDetails struct {
indices [6]int32
verts [4]chunkVertex
}
// Precomputed face vertices
var faceVertices = [6]faceDetails{
{ // Up
indices: [6]int32{0, 1, 2, 3, 2, 1},
verts: [4]chunkVertex{
{X: 0, Y: 1, Z: 0, TOffsetX: 0, TOffsetY: 0},
{X: 1, Y: 1, Z: 0, TOffsetX: 1, TOffsetY: 0},
{X: 0, Y: 1, Z: 1, TOffsetX: 0, TOffsetY: 1},
{X: 1, Y: 1, Z: 1, TOffsetX: 1, TOffsetY: 1},
},
},
{ // Down
indices: [6]int32{0, 1, 2, 3, 2, 1},
verts: [4]chunkVertex{
{X: 0, Y: 0, Z: 0, TOffsetX: 0, TOffsetY: 1},
{X: 0, Y: 0, Z: 1, TOffsetX: 0, TOffsetY: 0},
{X: 1, Y: 0, Z: 0, TOffsetX: 1, TOffsetY: 1},
{X: 1, Y: 0, Z: 1, TOffsetX: 1, TOffsetY: 0},
},
},
{ // North
indices: [6]int32{0, 1, 2, 3, 2, 1},
verts: [4]chunkVertex{
{X: 0, Y: 0, Z: 0, TOffsetX: 1, TOffsetY: 1},
{X: 1, Y: 0, Z: 0, TOffsetX: 0, TOffsetY: 1},
{X: 0, Y: 1, Z: 0, TOffsetX: 1, TOffsetY: 0},
{X: 1, Y: 1, Z: 0, TOffsetX: 0, TOffsetY: 0},
},
},
{ // South
indices: [6]int32{0, 1, 2, 3, 2, 1},
verts: [4]chunkVertex{
{X: 0, Y: 0, Z: 1, TOffsetX: 0, TOffsetY: 1},
{X: 0, Y: 1, Z: 1, TOffsetX: 0, TOffsetY: 0},
{X: 1, Y: 0, Z: 1, TOffsetX: 1, TOffsetY: 1},
{X: 1, Y: 1, Z: 1, TOffsetX: 1, TOffsetY: 0},
},
},
{ // West
indices: [6]int32{0, 1, 2, 3, 2, 1},
verts: [4]chunkVertex{
{X: 0, Y: 0, Z: 0, TOffsetX: 0, TOffsetY: 1},
{X: 0, Y: 1, Z: 0, TOffsetX: 0, TOffsetY: 0},
{X: 0, Y: 0, Z: 1, TOffsetX: 1, TOffsetY: 1},
{X: 0, Y: 1, Z: 1, TOffsetX: 1, TOffsetY: 0},
},
},
{ // East
indices: [6]int32{0, 1, 2, 3, 2, 1},
verts: [4]chunkVertex{
{X: 1, Y: 0, Z: 0, TOffsetX: 1, TOffsetY: 1},
{X: 1, Y: 0, Z: 1, TOffsetX: 0, TOffsetY: 1},
{X: 1, Y: 1, Z: 0, TOffsetX: 1, TOffsetY: 0},
{X: 1, Y: 1, Z: 1, TOffsetX: 0, TOffsetY: 0},
},
},
} | modelrender.go | 0.523664 | 0.450722 | modelrender.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// CloudPcOnPremisesConnection
type CloudPcOnPremisesConnection struct {
Entity
// The fully qualified domain name (FQDN) of the Active Directory domain you want to join. Optional.
adDomainName *string
// The password associated with adDomainUsername.
adDomainPassword *string
// The username of an Active Directory account (user or service account) that has permissions to create computer objects in Active Directory. Required format: <EMAIL>. Optional.
adDomainUsername *string
// The display name for the Azure network connection.
displayName *string
// The status of the most recent health check done on the Azure network connection. For example, if status is 'passed', the Azure network connection has passed all checks run by the service. Possible values are: pending, running, passed, failed, unknownFutureValue. Read-only.
healthCheckStatus *CloudPcOnPremisesConnectionStatus
// The details of the connection's health checks and the corresponding results. Returned only on $select.For an example that shows how to get the inUse property, see Example 2: Get the selected properties of an Azure network connection, including healthCheckStatusDetails. Read-only.
healthCheckStatusDetails CloudPcOnPremisesConnectionStatusDetailsable
// When true, the Azure network connection is in use. When false, the connection is not in use. You cannot delete a connection that’s in use. Returned only on $select. For an example that shows how to get the inUse property, see Example 2: Get the selected properties of an Azure network connection, including healthCheckStatusDetails. Read-only.
inUse *bool
// Specifies which services manage the Azure network connection. Possible values are: windows365, devBox and unknownFutureValue. Read-only.
managedBy *CloudPcManagementService
// The organizational unit (OU) in which the computer account is created. If left null, the OU that’s configured as the default (a well-known computer object container) in your Active Directory domain (OU) is used. Optional.
organizationalUnit *string
// The ID of the target resource group. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}'.
resourceGroupId *string
// The ID of the target subnet. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkId}/subnets/{subnetName}'.
subnetId *string
// The ID of the target Azure subscription that’s associated with your tenant.
subscriptionId *string
// The name of the target Azure subscription. Read-only.
subscriptionName *string
// Specifies how the provisioned Cloud PC will be joined to Azure Active Directory. Default value is hybridAzureADJoin. Possible values are: azureADJoin, hybridAzureADJoin, unknownFutureValue.
type_escaped *CloudPcOnPremisesConnectionType
// The ID of the target virtual network. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'.
virtualNetworkId *string
}
// NewCloudPcOnPremisesConnection instantiates a new cloudPcOnPremisesConnection and sets the default values.
func NewCloudPcOnPremisesConnection()(*CloudPcOnPremisesConnection) {
m := &CloudPcOnPremisesConnection{
Entity: *NewEntity(),
}
return m
}
// CreateCloudPcOnPremisesConnectionFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateCloudPcOnPremisesConnectionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewCloudPcOnPremisesConnection(), nil
}
// GetAdDomainName gets the adDomainName property value. The fully qualified domain name (FQDN) of the Active Directory domain you want to join. Optional.
func (m *CloudPcOnPremisesConnection) GetAdDomainName()(*string) {
if m == nil {
return nil
} else {
return m.adDomainName
}
}
// GetAdDomainPassword gets the adDomainPassword property value. The password associated with adDomainUsername.
func (m *CloudPcOnPremisesConnection) GetAdDomainPassword()(*string) {
if m == nil {
return nil
} else {
return m.adDomainPassword
}
}
// GetAdDomainUsername gets the adDomainUsername property value. The username of an Active Directory account (user or service account) that has permissions to create computer objects in Active Directory. Required format: <EMAIL>. Optional.
func (m *CloudPcOnPremisesConnection) GetAdDomainUsername()(*string) {
if m == nil {
return nil
} else {
return m.adDomainUsername
}
}
// GetDisplayName gets the displayName property value. The display name for the Azure network connection.
func (m *CloudPcOnPremisesConnection) GetDisplayName()(*string) {
if m == nil {
return nil
} else {
return m.displayName
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *CloudPcOnPremisesConnection) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["adDomainName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAdDomainName(val)
}
return nil
}
res["adDomainPassword"] = func (n i<PASSWORD>1.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAdDomainPassword(val)
}
return nil
}
res["adDomainUsername"] = func (n i878<PASSWORD>330e89d26896388a3f487eef27b0<PASSWORD>6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAdDomainUsername(val)
}
return nil
}
res["displayName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDisplayName(val)
}
return nil
}
res["healthCheckStatus"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseCloudPcOnPremisesConnectionStatus)
if err != nil {
return err
}
if val != nil {
m.SetHealthCheckStatus(val.(*CloudPcOnPremisesConnectionStatus))
}
return nil
}
res["healthCheckStatusDetails"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateCloudPcOnPremisesConnectionStatusDetailsFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetHealthCheckStatusDetails(val.(CloudPcOnPremisesConnectionStatusDetailsable))
}
return nil
}
res["inUse"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetInUse(val)
}
return nil
}
res["managedBy"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseCloudPcManagementService)
if err != nil {
return err
}
if val != nil {
m.SetManagedBy(val.(*CloudPcManagementService))
}
return nil
}
res["organizationalUnit"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOrganizationalUnit(val)
}
return nil
}
res["resourceGroupId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetResourceGroupId(val)
}
return nil
}
res["subnetId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSubnetId(val)
}
return nil
}
res["subscriptionId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSubscriptionId(val)
}
return nil
}
res["subscriptionName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSubscriptionName(val)
}
return nil
}
res["type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseCloudPcOnPremisesConnectionType)
if err != nil {
return err
}
if val != nil {
m.SetType(val.(*CloudPcOnPremisesConnectionType))
}
return nil
}
res["virtualNetworkId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetVirtualNetworkId(val)
}
return nil
}
return res
}
// GetHealthCheckStatus gets the healthCheckStatus property value. The status of the most recent health check done on the Azure network connection. For example, if status is 'passed', the Azure network connection has passed all checks run by the service. Possible values are: pending, running, passed, failed, unknownFutureValue. Read-only.
func (m *CloudPcOnPremisesConnection) GetHealthCheckStatus()(*CloudPcOnPremisesConnectionStatus) {
if m == nil {
return nil
} else {
return m.healthCheckStatus
}
}
// GetHealthCheckStatusDetails gets the healthCheckStatusDetails property value. The details of the connection's health checks and the corresponding results. Returned only on $select.For an example that shows how to get the inUse property, see Example 2: Get the selected properties of an Azure network connection, including healthCheckStatusDetails. Read-only.
func (m *CloudPcOnPremisesConnection) GetHealthCheckStatusDetails()(CloudPcOnPremisesConnectionStatusDetailsable) {
if m == nil {
return nil
} else {
return m.healthCheckStatusDetails
}
}
// GetInUse gets the inUse property value. When true, the Azure network connection is in use. When false, the connection is not in use. You cannot delete a connection that’s in use. Returned only on $select. For an example that shows how to get the inUse property, see Example 2: Get the selected properties of an Azure network connection, including healthCheckStatusDetails. Read-only.
func (m *CloudPcOnPremisesConnection) GetInUse()(*bool) {
if m == nil {
return nil
} else {
return m.inUse
}
}
// GetManagedBy gets the managedBy property value. Specifies which services manage the Azure network connection. Possible values are: windows365, devBox and unknownFutureValue. Read-only.
func (m *CloudPcOnPremisesConnection) GetManagedBy()(*CloudPcManagementService) {
if m == nil {
return nil
} else {
return m.managedBy
}
}
// GetOrganizationalUnit gets the organizationalUnit property value. The organizational unit (OU) in which the computer account is created. If left null, the OU that’s configured as the default (a well-known computer object container) in your Active Directory domain (OU) is used. Optional.
func (m *CloudPcOnPremisesConnection) GetOrganizationalUnit()(*string) {
if m == nil {
return nil
} else {
return m.organizationalUnit
}
}
// GetResourceGroupId gets the resourceGroupId property value. The ID of the target resource group. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}'.
func (m *CloudPcOnPremisesConnection) GetResourceGroupId()(*string) {
if m == nil {
return nil
} else {
return m.resourceGroupId
}
}
// GetSubnetId gets the subnetId property value. The ID of the target subnet. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkId}/subnets/{subnetName}'.
func (m *CloudPcOnPremisesConnection) GetSubnetId()(*string) {
if m == nil {
return nil
} else {
return m.subnetId
}
}
// GetSubscriptionId gets the subscriptionId property value. The ID of the target Azure subscription that’s associated with your tenant.
func (m *CloudPcOnPremisesConnection) GetSubscriptionId()(*string) {
if m == nil {
return nil
} else {
return m.subscriptionId
}
}
// GetSubscriptionName gets the subscriptionName property value. The name of the target Azure subscription. Read-only.
func (m *CloudPcOnPremisesConnection) GetSubscriptionName()(*string) {
if m == nil {
return nil
} else {
return m.subscriptionName
}
}
// GetType gets the type property value. Specifies how the provisioned Cloud PC will be joined to Azure Active Directory. Default value is hybridAzureADJoin. Possible values are: azureADJoin, hybridAzureADJoin, unknownFutureValue.
func (m *CloudPcOnPremisesConnection) GetType()(*CloudPcOnPremisesConnectionType) {
if m == nil {
return nil
} else {
return m.type_escaped
}
}
// GetVirtualNetworkId gets the virtualNetworkId property value. The ID of the target virtual network. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'.
func (m *CloudPcOnPremisesConnection) GetVirtualNetworkId()(*string) {
if m == nil {
return nil
} else {
return m.virtualNetworkId
}
}
// Serialize serializes information the current object
func (m *CloudPcOnPremisesConnection) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteStringValue("adDomainName", m.GetAdDomainName())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("adDomainPassword", m.GetAdDomainPassword())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("adDomainUsername", m.GetAdDomainUsername())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("displayName", m.GetDisplayName())
if err != nil {
return err
}
}
if m.GetHealthCheckStatus() != nil {
cast := (*m.GetHealthCheckStatus()).String()
err = writer.WriteStringValue("healthCheckStatus", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("healthCheckStatusDetails", m.GetHealthCheckStatusDetails())
if err != nil {
return err
}
}
{
err = writer.WriteBoolValue("inUse", m.GetInUse())
if err != nil {
return err
}
}
if m.GetManagedBy() != nil {
cast := (*m.GetManagedBy()).String()
err = writer.WriteStringValue("managedBy", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("organizationalUnit", m.GetOrganizationalUnit())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("resourceGroupId", m.GetResourceGroupId())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("subnetId", m.GetSubnetId())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("subscriptionId", m.GetSubscriptionId())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("subscriptionName", m.GetSubscriptionName())
if err != nil {
return err
}
}
if m.GetType() != nil {
cast := (*m.GetType()).String()
err = writer.WriteStringValue("type", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("virtualNetworkId", m.GetVirtualNetworkId())
if err != nil {
return err
}
}
return nil
}
// SetAdDomainName sets the adDomainName property value. The fully qualified domain name (FQDN) of the Active Directory domain you want to join. Optional.
func (m *CloudPcOnPremisesConnection) SetAdDomainName(value *string)() {
if m != nil {
m.adDomainName = value
}
}
// SetAdDomainPassword sets the adDomainPassword property value. The password associated with adDomainUsername.
func (m *CloudPcOnPremisesConnection) SetAdDomainPassword(value *string)() {
if m != nil {
m.adDomainPassword = value
}
}
// SetAdDomainUsername sets the adDomainUsername property value. The username of an Active Directory account (user or service account) that has permissions to create computer objects in Active Directory. Required format: <EMAIL>. Optional.
func (m *CloudPcOnPremisesConnection) SetAdDomainUsername(value *string)() {
if m != nil {
m.adDomainUsername = value
}
}
// SetDisplayName sets the displayName property value. The display name for the Azure network connection.
func (m *CloudPcOnPremisesConnection) SetDisplayName(value *string)() {
if m != nil {
m.displayName = value
}
}
// SetHealthCheckStatus sets the healthCheckStatus property value. The status of the most recent health check done on the Azure network connection. For example, if status is 'passed', the Azure network connection has passed all checks run by the service. Possible values are: pending, running, passed, failed, unknownFutureValue. Read-only.
func (m *CloudPcOnPremisesConnection) SetHealthCheckStatus(value *CloudPcOnPremisesConnectionStatus)() {
if m != nil {
m.healthCheckStatus = value
}
}
// SetHealthCheckStatusDetails sets the healthCheckStatusDetails property value. The details of the connection's health checks and the corresponding results. Returned only on $select.For an example that shows how to get the inUse property, see Example 2: Get the selected properties of an Azure network connection, including healthCheckStatusDetails. Read-only.
func (m *CloudPcOnPremisesConnection) SetHealthCheckStatusDetails(value CloudPcOnPremisesConnectionStatusDetailsable)() {
if m != nil {
m.healthCheckStatusDetails = value
}
}
// SetInUse sets the inUse property value. When true, the Azure network connection is in use. When false, the connection is not in use. You cannot delete a connection that’s in use. Returned only on $select. For an example that shows how to get the inUse property, see Example 2: Get the selected properties of an Azure network connection, including healthCheckStatusDetails. Read-only.
func (m *CloudPcOnPremisesConnection) SetInUse(value *bool)() {
if m != nil {
m.inUse = value
}
}
// SetManagedBy sets the managedBy property value. Specifies which services manage the Azure network connection. Possible values are: windows365, devBox and unknownFutureValue. Read-only.
func (m *CloudPcOnPremisesConnection) SetManagedBy(value *CloudPcManagementService)() {
if m != nil {
m.managedBy = value
}
}
// SetOrganizationalUnit sets the organizationalUnit property value. The organizational unit (OU) in which the computer account is created. If left null, the OU that’s configured as the default (a well-known computer object container) in your Active Directory domain (OU) is used. Optional.
func (m *CloudPcOnPremisesConnection) SetOrganizationalUnit(value *string)() {
if m != nil {
m.organizationalUnit = value
}
}
// SetResourceGroupId sets the resourceGroupId property value. The ID of the target resource group. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}'.
func (m *CloudPcOnPremisesConnection) SetResourceGroupId(value *string)() {
if m != nil {
m.resourceGroupId = value
}
}
// SetSubnetId sets the subnetId property value. The ID of the target subnet. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkId}/subnets/{subnetName}'.
func (m *CloudPcOnPremisesConnection) SetSubnetId(value *string)() {
if m != nil {
m.subnetId = value
}
}
// SetSubscriptionId sets the subscriptionId property value. The ID of the target Azure subscription that’s associated with your tenant.
func (m *CloudPcOnPremisesConnection) SetSubscriptionId(value *string)() {
if m != nil {
m.subscriptionId = value
}
}
// SetSubscriptionName sets the subscriptionName property value. The name of the target Azure subscription. Read-only.
func (m *CloudPcOnPremisesConnection) SetSubscriptionName(value *string)() {
if m != nil {
m.subscriptionName = value
}
}
// SetType sets the type property value. Specifies how the provisioned Cloud PC will be joined to Azure Active Directory. Default value is hybridAzureADJoin. Possible values are: azureADJoin, hybridAzureADJoin, unknownFutureValue.
func (m *CloudPcOnPremisesConnection) SetType(value *CloudPcOnPremisesConnectionType)() {
if m != nil {
m.type_escaped = value
}
}
// SetVirtualNetworkId sets the virtualNetworkId property value. The ID of the target virtual network. Required format: '/subscriptions/{subscription-id}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'.
func (m *CloudPcOnPremisesConnection) SetVirtualNetworkId(value *string)() {
if m != nil {
m.virtualNetworkId = value
}
} | models/cloud_pc_on_premises_connection.go | 0.717903 | 0.456107 | cloud_pc_on_premises_connection.go | starcoder |
package feature
import (
"fmt"
)
type Table struct {
features []Feature
featureMap map[TypeKey]int
items []*Instance
numColumns int
numRows int
}
func CreateTable(features []Feature) *Table {
table := Table{}
featureMap := map[TypeKey]int{}
for index, feature := range features {
featureMap[feature.TypeKey()] = index
}
table.features = features
table.featureMap = featureMap
table.items = []*Instance{}
table.numColumns = len(features)
return &table
}
func (t *Table) AddStringRow(records []string) {
if len(records) != t.numColumns {
panic(fmt.Sprintf("Number of records added does not equal the number of columns. Expected: %d; Got: %d", t.numColumns, len(records)))
}
row := make([]*Instance, len(t.features))
for index, feature := range t.features {
row[index] = feature.Create(records[index])
}
t.items = append(t.items, row...)
t.numRows += 1
}
func (t *Table) AddColumn(feature Feature, column []*Instance) {
if len(column) != t.NumRows() {
panic(fmt.Sprintf("Number of rows in added column does not equal the number of rows. Expected: %d; Got: %s", t.NumRows(), len(column)))
}
t.features = append(t.features, feature)
t.featureMap[feature.TypeKey()] = len(t.features) - 1
items := make([]*Instance, len(t.items)+len(column))
for rowIndex := 0; rowIndex < t.NumRows(); rowIndex++ {
copy(items[rowIndex*t.numColumns+rowIndex:rowIndex*t.numColumns+t.numColumns+rowIndex], t.items[rowIndex*t.numColumns:rowIndex*t.numColumns+t.numColumns])
items[rowIndex*t.numColumns+t.numColumns+rowIndex] = column[rowIndex]
}
t.items = items
t.numColumns += 1
}
func (t *Table) At(rowIndex, columnIndex int) *Instance {
return t.items[rowIndex*t.numColumns+columnIndex]
}
func (t *Table) LabelFromColumnIndex(columnIndex int) *Feature {
return &t.features[columnIndex]
}
func (t *Table) ColumnIndexFromLabel(typeKey TypeKey) int {
return t.featureMap[typeKey]
}
func (t *Table) NumColumns() int {
return t.numColumns
}
func (t *Table) NumRows() int {
return t.numRows
} | feature/feature_table.go | 0.597021 | 0.44746 | feature_table.go | starcoder |
package util
import (
"bufio"
"fmt"
"log"
"os"
"os/user"
"strconv"
"unsafe"
"golang.org/x/sys/unix"
)
// Ptr converts a Go byte array to a pointer to the start of the array.
func Ptr(slice []byte) unsafe.Pointer {
if len(slice) == 0 {
return nil
}
return unsafe.Pointer(&slice[0])
}
// ByteSlice takes a pointer to some data and views it as a slice of bytes.
// Note, indexing into this slice is unsafe.
func ByteSlice(ptr unsafe.Pointer) []byte {
// Slice must fit in the smallest address space go supports.
return (*[1 << 30]byte)(ptr)[:]
}
// PointerSlice takes a pointer to an array of pointers and views it as a slice
// of pointers. Note, indexing into this slice is unsafe.
func PointerSlice(ptr unsafe.Pointer) []unsafe.Pointer {
// Slice must fit in the smallest address space go supports.
return (*[1 << 28]unsafe.Pointer)(ptr)[:]
}
// Index returns the first index i such that inVal == inArray[i].
// ok is true if we find a match, false otherwise.
func Index(inVal int64, inArray []int64) (index int, ok bool) {
for index, val := range inArray {
if val == inVal {
return index, true
}
}
return 0, false
}
// Lookup finds inVal in inArray and returns the corresponding element in
// outArray. Specifically, if inVal == inArray[i], outVal == outArray[i].
// ok is true if we find a match, false otherwise.
func Lookup(inVal int64, inArray, outArray []int64) (outVal int64, ok bool) {
index, ok := Index(inVal, inArray)
if !ok {
return 0, false
}
return outArray[index], true
}
// MinInt returns the lesser of a and b.
func MinInt(a, b int) int {
if a < b {
return a
}
return b
}
// MaxInt returns the greater of a and b.
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
}
// MinInt64 returns the lesser of a and b.
func MinInt64(a, b int64) int64 {
if a < b {
return a
}
return b
}
// ReadLine returns a line of input from standard input. An empty string is
// returned if the user didn't insert anything or on error.
func ReadLine() (string, error) {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
return scanner.Text(), scanner.Err()
}
// AtoiOrPanic converts a string to an int or it panics. Should only be used in
// situations where the input MUST be a decimal number.
func AtoiOrPanic(input string) int {
i, err := strconv.Atoi(input)
if err != nil {
panic(err)
}
return i
}
// UserFromUID returns the User corresponding to the given user id.
func UserFromUID(uid int64) (*user.User, error) {
return user.LookupId(strconv.FormatInt(uid, 10))
}
// EffectiveUser returns the user entry corresponding to the effective user.
func EffectiveUser() (*user.User, error) {
return UserFromUID(int64(os.Geteuid()))
}
// IsUserRoot checks if the effective user is root.
func IsUserRoot() bool {
return os.Geteuid() == 0
}
// Chown changes the owner of a File to a User.
func Chown(file *os.File, user *user.User) error {
uid := AtoiOrPanic(user.Uid)
gid := AtoiOrPanic(user.Gid)
return file.Chown(uid, gid)
}
// IsKernelVersionAtLeast returns true if the Linux kernel version is at least
// major.minor. If something goes wrong it assumes false.
func IsKernelVersionAtLeast(major, minor int) bool {
var uname unix.Utsname
if err := unix.Uname(&uname); err != nil {
log.Printf("Uname failed [%v], assuming old kernel", err)
return false
}
release := string(uname.Release[:])
log.Printf("Kernel version is %s", release)
var actualMajor, actualMinor int
if n, _ := fmt.Sscanf(release, "%d.%d", &actualMajor, &actualMinor); n != 2 {
log.Printf("Unrecognized uname format %q, assuming old kernel", release)
return false
}
return actualMajor > major ||
(actualMajor == major && actualMinor >= minor)
} | util/util.go | 0.581065 | 0.404155 | util.go | starcoder |
// Package les implements the Light Matrix Subprotocol.
package les
import (
"math/rand"
)
// wrsItem interface should be implemented by any entries that are to be selected from
// a weightedRandomSelect set. Note that recalculating monotonously decreasing item
// weights on-demand (without constantly calling update) is allowed
type wrsItem interface {
Weight() int64
}
// weightedRandomSelect is capable of weighted random selection from a set of items
type weightedRandomSelect struct {
root *wrsNode
idx map[wrsItem]int
}
// newWeightedRandomSelect returns a new weightedRandomSelect structure
func newWeightedRandomSelect() *weightedRandomSelect {
return &weightedRandomSelect{root: &wrsNode{maxItems: wrsBranches}, idx: make(map[wrsItem]int)}
}
// update updates an item's weight, adds it if it was non-existent or removes it if
// the new weight is zero. Note that explicitly updating decreasing weights is not necessary.
func (w *weightedRandomSelect) update(item wrsItem) {
w.setWeight(item, item.Weight())
}
// remove removes an item from the set
func (w *weightedRandomSelect) remove(item wrsItem) {
w.setWeight(item, 0)
}
// setWeight sets an item's weight to a specific value (removes it if zero)
func (w *weightedRandomSelect) setWeight(item wrsItem, weight int64) {
idx, ok := w.idx[item]
if ok {
w.root.setWeight(idx, weight)
if weight == 0 {
delete(w.idx, item)
}
} else {
if weight != 0 {
if w.root.itemCnt == w.root.maxItems {
// add a new level
newRoot := &wrsNode{sumWeight: w.root.sumWeight, itemCnt: w.root.itemCnt, level: w.root.level + 1, maxItems: w.root.maxItems * wrsBranches}
newRoot.items[0] = w.root
newRoot.weights[0] = w.root.sumWeight
w.root = newRoot
}
w.idx[item] = w.root.insert(item, weight)
}
}
}
// choose randomly selects an item from the set, with a chance proportional to its
// current weight. If the weight of the chosen element has been decreased since the
// last stored value, returns it with a newWeight/oldWeight chance, otherwise just
// updates its weight and selects another one
func (w *weightedRandomSelect) choose() wrsItem {
for {
if w.root.sumWeight == 0 {
return nil
}
val := rand.Int63n(w.root.sumWeight)
choice, lastWeight := w.root.choose(val)
weight := choice.Weight()
if weight != lastWeight {
w.setWeight(choice, weight)
}
if weight >= lastWeight || rand.Int63n(lastWeight) < weight {
return choice
}
}
}
const wrsBranches = 8 // max number of branches in the wrsNode tree
// wrsNode is a node of a tree structure that can store wrsItems or further wrsNodes.
type wrsNode struct {
items [wrsBranches]interface{}
weights [wrsBranches]int64
sumWeight int64
level, itemCnt, maxItems int
}
// insert recursively inserts a new item to the tree and returns the item index
func (n *wrsNode) insert(item wrsItem, weight int64) int {
branch := 0
for n.items[branch] != nil && (n.level == 0 || n.items[branch].(*wrsNode).itemCnt == n.items[branch].(*wrsNode).maxItems) {
branch++
if branch == wrsBranches {
panic(nil)
}
}
n.itemCnt++
n.sumWeight += weight
n.weights[branch] += weight
if n.level == 0 {
n.items[branch] = item
return branch
}
var subNode *wrsNode
if n.items[branch] == nil {
subNode = &wrsNode{maxItems: n.maxItems / wrsBranches, level: n.level - 1}
n.items[branch] = subNode
} else {
subNode = n.items[branch].(*wrsNode)
}
subIdx := subNode.insert(item, weight)
return subNode.maxItems*branch + subIdx
}
// setWeight updates the weight of a certain item (which should exist) and returns
// the change of the last weight value stored in the tree
func (n *wrsNode) setWeight(idx int, weight int64) int64 {
if n.level == 0 {
oldWeight := n.weights[idx]
n.weights[idx] = weight
diff := weight - oldWeight
n.sumWeight += diff
if weight == 0 {
n.items[idx] = nil
n.itemCnt--
}
return diff
}
branchItems := n.maxItems / wrsBranches
branch := idx / branchItems
diff := n.items[branch].(*wrsNode).setWeight(idx-branch*branchItems, weight)
n.weights[branch] += diff
n.sumWeight += diff
if weight == 0 {
n.itemCnt--
}
return diff
}
// choose recursively selects an item from the tree and returns it along with its weight
func (n *wrsNode) choose(val int64) (wrsItem, int64) {
for i, w := range n.weights {
if val < w {
if n.level == 0 {
return n.items[i].(wrsItem), n.weights[i]
}
return n.items[i].(*wrsNode).choose(val)
}
val -= w
}
panic(nil)
} | les/randselect.go | 0.7413 | 0.410047 | randselect.go | starcoder |
package assertions
import (
"fmt"
"regexp"
"strings"
"testing"
h "github.com/buildpacks/pack/testhelpers"
)
type OutputAssertionManager struct {
testObject *testing.T
assert h.AssertionManager
output string
}
func NewOutputAssertionManager(t *testing.T, output string) OutputAssertionManager {
return OutputAssertionManager{
testObject: t,
assert: h.NewAssertionManager(t),
output: output,
}
}
func (o OutputAssertionManager) ReportsSuccessfulImageBuild(name string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "Successfully built image '%s'", name)
}
func (o OutputAssertionManager) ReportSuccessfulQuietBuild(name string) {
o.testObject.Helper()
o.testObject.Log("quiet mode")
o.assert.Matches(strings.TrimSpace(o.output), regexp.MustCompile(name+`@sha256:[\w]{12,64}`))
}
func (o OutputAssertionManager) ReportsSuccessfulRebase(name string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "Successfully rebased image '%s'", name)
}
func (o OutputAssertionManager) ReportsUsingBuildCacheVolume() {
o.testObject.Helper()
o.testObject.Log("uses a build cache volume")
o.assert.Contains(o.output, "Using build cache volume")
}
func (o OutputAssertionManager) ReportsSelectingRunImageMirror(mirror string) {
o.testObject.Helper()
o.testObject.Log("selects expected run image mirror")
o.assert.ContainsF(o.output, "Selected run image mirror '%s'", mirror)
}
func (o OutputAssertionManager) ReportsSelectingRunImageMirrorFromLocalConfig(mirror string) {
o.testObject.Helper()
o.testObject.Log("local run-image mirror is selected")
o.assert.ContainsF(o.output, "Selected run image mirror '%s' from local config", mirror)
}
func (o OutputAssertionManager) ReportsSkippingRestore() {
o.testObject.Helper()
o.testObject.Log("skips restore")
o.assert.Contains(o.output, "Skipping 'restore' due to clearing cache")
}
func (o OutputAssertionManager) ReportsRunImageStackNotMatchingBuilder(runImageStack, builderStack string) {
o.testObject.Helper()
o.assert.Contains(
o.output,
fmt.Sprintf("run-image stack id '%s' does not match builder stack '%s'", runImageStack, builderStack),
)
}
func (o OutputAssertionManager) WithoutColors() {
o.testObject.Helper()
o.testObject.Log("has no color")
o.assert.NoMatches(o.output, regexp.MustCompile(`\x1b\[[0-9;]*m`))
}
func (o OutputAssertionManager) ReportsAddingBuildpack(name, version string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "Adding buildpack '%s' version '%s' to builder", name, version)
}
func (o OutputAssertionManager) ReportsPullingImage(image string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "Pulling image '%s'", image)
}
func (o OutputAssertionManager) ReportsImageNotExistingOnDaemon(image string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "image '%s' does not exist on the daemon", image)
}
func (o OutputAssertionManager) ReportsPackageCreation(name string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "Successfully created package '%s'", name)
}
func (o OutputAssertionManager) ReportsPackagePublished(name string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "Successfully published package '%s'", name)
}
func (o OutputAssertionManager) ReportsCommandUnknown(command string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, `unknown command "%s" for "pack"`, command)
}
func (o OutputAssertionManager) IncludesUsagePrompt() {
o.testObject.Helper()
o.assert.Contains(o.output, "Run 'pack --help' for usage.")
}
func (o OutputAssertionManager) ReportsSettingDefaultBuilder(name string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "Builder '%s' is now the default builder", name)
}
func (o OutputAssertionManager) IncludesSuggestedBuildersHeading() {
o.testObject.Helper()
o.assert.Contains(o.output, "Suggested builders:")
}
func (o OutputAssertionManager) IncludesMessageToSetDefaultBuilder() {
o.testObject.Helper()
o.assert.Contains(o.output, "Please select a default builder with:")
}
func (o OutputAssertionManager) IncludesSuggestedStacksHeading() {
o.testObject.Helper()
o.assert.Contains(o.output, "Stacks maintained by the community:")
}
func (o OutputAssertionManager) IncludesTrustedBuildersHeading() {
o.testObject.Helper()
o.assert.Contains(o.output, "Trusted Builders:")
}
const googleBuilder = "gcr.io/buildpacks/builder:v1"
func (o OutputAssertionManager) IncludesGoogleBuilder() {
o.testObject.Helper()
o.assert.Contains(o.output, googleBuilder)
}
func (o OutputAssertionManager) IncludesPrefixedGoogleBuilder() {
o.testObject.Helper()
o.assert.Matches(o.output, regexp.MustCompile(fmt.Sprintf(`Google:\s+'%s'`, googleBuilder)))
}
var herokuBuilders = []string{
"heroku/buildpacks:18",
"heroku/buildpacks:20",
}
func (o OutputAssertionManager) IncludesHerokuBuilders() {
o.testObject.Helper()
o.assert.ContainsAll(o.output, herokuBuilders...)
}
func (o OutputAssertionManager) IncludesPrefixedHerokuBuilders() {
o.testObject.Helper()
for _, builder := range herokuBuilders {
o.assert.Matches(o.output, regexp.MustCompile(fmt.Sprintf(`Heroku:\s+'%s'`, builder)))
}
}
var paketoBuilders = []string{
"paketobuildpacks/builder:base",
"paketobuildpacks/builder:full",
"paketobuildpacks/builder:tiny",
}
func (o OutputAssertionManager) IncludesPaketoBuilders() {
o.testObject.Helper()
o.assert.ContainsAll(o.output, paketoBuilders...)
}
func (o OutputAssertionManager) IncludesPrefixedPaketoBuilders() {
o.testObject.Helper()
for _, builder := range paketoBuilders {
o.assert.Matches(o.output, regexp.MustCompile(fmt.Sprintf(`Paketo Buildpacks:\s+'%s'`, builder)))
}
}
func (o OutputAssertionManager) IncludesDeprecationWarning() {
o.testObject.Helper()
o.assert.Matches(o.output, regexp.MustCompile(fmt.Sprintf(`Warning: Command 'pack [\w-]+' has been deprecated, please use 'pack [\w-\s]+' instead`)))
}
func (o OutputAssertionManager) ReportsSuccesfulRunImageMirrorsAdd(image, mirror string) {
o.testObject.Helper()
o.assert.ContainsF(o.output, "Run Image '%s' configured with mirror '%s'\n", image, mirror)
}
func (o OutputAssertionManager) ReportsReadingConfig() {
o.testObject.Helper()
o.assert.Contains(o.output, "reading config")
}
func (o OutputAssertionManager) ReportsInvalidBuilderToml() {
o.testObject.Helper()
o.assert.Contains(o.output, "invalid builder toml")
} | acceptance/assertions/output.go | 0.688992 | 0.529081 | output.go | starcoder |
package advent
import (
. "github.com/davidparks11/advent2021/internal/advent/day16"
)
type packetDecoder struct {
dailyProblem
}
func NewPacketDecoder() Problem {
return &packetDecoder{
dailyProblem{
day: 16,
},
}
}
func (p *packetDecoder) Solve() interface{} {
input := p.GetInputLines()
var results []int
results = append(results, p.sumVersions(input))
results = append(results, p.evaluatePackets(input))
return results
}
/*
As you leave the cave and reach open waters, you receive a transmission from the Elves back on the ship.
The transmission was sent using the Buoyancy Interchange Transmission System (BITS), a method of packing numeric expressions into a binary sequence. Your submarine's computer has saved the transmission in hexadecimal (your puzzle input).
The first step of decoding the message is to convert the hexadecimal representation into binary. Each character of hexadecimal corresponds to four bits of binary data:
0 = 0000
1 = 0001
2 = 0010
3 = 0011
4 = 0100
5 = 0101
6 = 0110
7 = 0111
8 = 1000
9 = 1001
A = 1010
B = 1011
C = 1100
D = 1101
E = 1110
F = 1111
The BITS transmission contains a single packet at its outermost layer which itself contains many other packets. The hexadecimal representation of this packet might encode a few extra 0 bits at the end; these are not part of the transmission and should be ignored.
Every packet begins with a standard header: the first three bits encode the packet version, and the next three bits encode the packet type ID. These two values are numbers; all numbers encoded in any packet are represented as binary with the most significant bit first. For example, a version encoded as the binary sequence 100 represents the number 4.
Packets with type ID 4 represent a literal value. Literal value packets encode a single binary number. To do this, the binary number is padded with leading zeroes until its length is a multiple of four bits, and then it is broken into groups of four bits. Each group is prefixed by a 1 bit except the last group, which is prefixed by a 0 bit. These groups of five bits immediately follow the packet header. For example, the hexadecimal string D2FE28 becomes:
110100101111111000101000
VVVTTTAAAAABBBBBCCCCC
Below each bit is a label indicating its purpose:
The three bits labeled V (110) are the packet version, 6.
The three bits labeled T (100) are the packet type ID, 4, which means the packet is a literal value.
The five bits labeled A (10111) start with a 1 (not the last group, keep reading) and contain the first four bits of the number, 0111.
The five bits labeled B (11110) start with a 1 (not the last group, keep reading) and contain four more bits of the number, 1110.
The five bits labeled C (00101) start with a 0 (last group, end of packet) and contain the last four bits of the number, 0101.
The three unlabeled 0 bits at the end are extra due to the hexadecimal representation and should be ignored.
So, this packet represents a literal value with binary representation 011111100101, which is 2021 in decimal.
Every other type of packet (any packet with a type ID other than 4) represent an operator that performs some calculation on one or more sub-packets contained within. Right now, the specific operations aren't important; focus on parsing the hierarchy of sub-packets.
An operator packet contains one or more packets. To indicate which subsequent binary data represents its sub-packets, an operator packet can use one of two modes indicated by the bit immediately after the packet header; this is called the length type ID:
If the length type ID is 0, then the next 15 bits are a number that represents the total length in bits of the sub-packets contained by this packet.
If the length type ID is 1, then the next 11 bits are a number that represents the number of sub-packets immediately contained by this packet.
Finally, after the length type ID bit and the 15-bit or 11-bit field, the sub-packets appear.
For example, here is an operator packet (hexadecimal string 38006F45291200) with length type ID 0 that contains two sub-packets:
00111000000000000110111101000101001010010001001000000000
VVVTTTILLLLLLLLLLLLLLLAAAAAAAAAAABBBBBBBBBBBBBBBB
The three bits labeled V (001) are the packet version, 1.
The three bits labeled T (110) are the packet type ID, 6, which means the packet is an operator.
The bit labeled I (0) is the length type ID, which indicates that the length is a 15-bit number representing the number of bits in the sub-packets.
The 15 bits labeled L (000000000011011) contain the length of the sub-packets in bits, 27.
The 11 bits labeled A contain the first sub-packet, a literal value representing the number 10.
The 16 bits labeled B contain the second sub-packet, a literal value representing the number 20.
After reading 11 and 16 bits of sub-packet data, the total length indicated in L (27) is reached, and so parsing of this packet stops.
As another example, here is an operator packet (hexadecimal string EE00D40C823060) with length type ID 1 that contains three sub-packets:
11101110000000001101010000001100100000100011000001100000
VVVTTTILLLLLLLLLLLAAAAAAAAAAABBBBBBBBBBBCCCCCCCCCCC
The three bits labeled V (111) are the packet version, 7.
The three bits labeled T (011) are the packet type ID, 3, which means the packet is an operator.
The bit labeled I (1) is the length type ID, which indicates that the length is a 11-bit number representing the number of sub-packets.
The 11 bits labeled L (00000000011) contain the number of sub-packets, 3.
The 11 bits labeled A contain the first sub-packet, a literal value representing the number 1.
The 11 bits labeled B contain the second sub-packet, a literal value representing the number 2.
The 11 bits labeled C contain the third sub-packet, a literal value representing the number 3.
After reading 3 complete sub-packets, the number of sub-packets indicated in L (3) is reached, and so parsing of this packet stops.
For now, parse the hierarchy of the packets throughout the transmission and add up all of the version numbers.
Here are a few more examples of hexadecimal-encoded transmissions:
8A004A801A8002F478 represents an operator packet (version 4) which contains an operator packet (version 1) which contains an operator packet (version 5) which contains a literal value (version 6); this packet has a version sum of 16.
620080001611562C8802118E34 represents an operator packet (version 3) which contains two sub-packets; each sub-packet is an operator packet that contains two literal values. This packet has a version sum of 12.
C0015000016115A2E0802F182340 has the same structure as the previous example, but the outermost packet uses a different length type ID. This packet has a version sum of 23.
A0016C880162017C3686B18A3D4780 is an operator packet that contains an operator packet that contains an operator packet that contains five literal values; it has a version sum of 31.
Decode the structure of your hexadecimal-encoded BITS transmission; what do you get if you add up the version numbers in all packets?
Your puzzle answer was 904.
*/
func (p *packetDecoder) sumVersions(input []string) int {
bits := ParseInput(input, true)
return bits.Decode()
}
/*
Now that you have the structure of your transmission decoded, you can calculate the value of the expression it represents.
Literal values (type ID 4) represent a single number as described above. The remaining type IDs are more interesting:
Packets with type ID 0 are sum packets - their value is the sum of the values of their sub-packets. If they only have a single sub-packet, their value is the value of the sub-packet.
Packets with type ID 1 are product packets - their value is the result of multiplying together the values of their sub-packets. If they only have a single sub-packet, their value is the value of the sub-packet.
Packets with type ID 2 are minimum packets - their value is the minimum of the values of their sub-packets.
Packets with type ID 3 are maximum packets - their value is the maximum of the values of their sub-packets.
Packets with type ID 5 are greater than packets - their value is 1 if the value of the first sub-packet is greater than the value of the second sub-packet; otherwise, their value is 0. These packets always have exactly two sub-packets.
Packets with type ID 6 are less than packets - their value is 1 if the value of the first sub-packet is less than the value of the second sub-packet; otherwise, their value is 0. These packets always have exactly two sub-packets.
Packets with type ID 7 are equal to packets - their value is 1 if the value of the first sub-packet is equal to the value of the second sub-packet; otherwise, their value is 0. These packets always have exactly two sub-packets.
Using these rules, you can now work out the value of the outermost packet in your BITS transmission.
For example:
C200B40A82 finds the sum of 1 and 2, resulting in the value 3.
04005AC33890 finds the product of 6 and 9, resulting in the value 54.
880086C3E88112 finds the minimum of 7, 8, and 9, resulting in the value 7.
CE00C43D881120 finds the maximum of 7, 8, and 9, resulting in the value 9.
D8005AC2A8F0 produces 1, because 5 is less than 15.
F600BC2D8F produces 0, because 5 is not greater than 15.
9C005AC2F8F0 produces 0, because 5 is not equal to 15.
9C0141080250320F1802104A08 produces 1, because 1 + 3 = 2 * 2.
What do you get if you evaluate the expression represented by your hexadecimal-encoded BITS transmission?
*/
func (p *packetDecoder) evaluatePackets(input []string) int {
bits := ParseInput(input, false)
return bits.Decode()
} | internal/advent/day16.go | 0.806396 | 0.625981 | day16.go | starcoder |
package bed
import (
"bytes"
"log"
"sort"
"strconv"
"strings"
"github.com/edotau/goFish/simpleio"
"github.com/vertgenlab/gonomics/numbers"
)
// Bed interface is a data structure that implements the following methods:
// Chrom() []byte returns the chromosome name
// ChrStart() int returns starting the position
// ChrEnd() int returns ending the position
type Bed interface {
Chrom() string
ChrStart() int
ChrEnd() int
//ToString() string
}
// Simple is the most basic Bed implementation and only records the current chromosome, start and end coordinates.
type Simple struct {
Chr string
Start int
End int
}
// Overlap will compare two bed regions and check if there is any overlap.
func Overlap(alpha Bed, beta Bed) bool {
if (numbers.Max(alpha.ChrStart(), beta.ChrStart()) < numbers.Min(alpha.ChrEnd(), beta.ChrEnd())) && alpha.Chrom() == beta.Chrom() {
return true
} else {
return false
}
}
// overlap is the helper function used as an attempt to reduce method calls to the interface amd attempts to reduce duplication of code for different overlap settings.
func Overlapping(alphaChr string, betaChr string, alphaStart int, alphaEnd int, betaStart int, betaEnd int) bool {
if (numbers.Max(alphaStart, betaStart) < numbers.Min(alphaEnd, betaEnd)) && alphaChr == betaChr {
return true
} else {
return false
}
}
type GenomeInfo struct {
Chr string
Start int
End int
Info bytes.Buffer
Delem byte
}
// GenomeInfo bed struct implements the bed interface with the Chrom() method which returns the chromosome name.
func (b *GenomeInfo) Chrom() string {
return b.Chr
}
// GenomeInfo bed struct implements the bed interface with the ChrStart() method which returns the starting position of the region.
func (b *GenomeInfo) ChrStart() int {
return b.Start
}
// GenomeInfo bed struct implements the bed interface with the ChrEnd() method which returns the starting position of the region.
func (b *GenomeInfo) ChrEnd() int {
return b.End
}
func GenomeInfoToString(b GenomeInfo) string {
buf := strings.Builder{}
buf.WriteString(b.Chr)
buf.WriteByte('\t')
buf.WriteString(simpleio.IntToString(b.ChrStart()))
buf.WriteByte('\t')
buf.WriteString(simpleio.IntToString(b.ChrEnd()))
if b.Info.String() != "" {
buf.WriteByte('\t')
buf.Write(b.Info.Bytes())
}
return buf.String()
}
func ToGenomeInfo(reader *simpleio.SimpleReader) (*GenomeInfo, bool) {
var err bool
reader.Buffer, err = simpleio.ReadLine(reader)
if !err {
columns := strings.SplitN(reader.Buffer.String(), "\t", 4)
ans := GenomeInfo{}
ans.Chr = columns[0]
ans.Start = simpleio.StringToInt(columns[1])
ans.End = simpleio.StringToInt(columns[2])
if len(columns) > 3 {
ans.Info.WriteString(columns[3])
}
return &ans, false
} else {
return nil, true
}
}
func ReadHeader(reader *simpleio.SimpleReader) *strings.Builder {
header := &strings.Builder{}
for i, done := ParseComments(reader); !done; i, done = ParseComments(reader) {
header.Write(i.Bytes())
}
return header
}
func ParseComments(reader *simpleio.SimpleReader) (*bytes.Buffer, bool) {
if b, err := reader.Peek(1); err == nil && b[0] == byte('#') {
return simpleio.ReadLine(reader)
} else {
return nil, true
}
}
// HeadOverlapByLen checks for overlap while modifying the starting coordinates.
func HeadOverlapByLen(alpha Bed, beta Bed, length int) bool {
var alphaStart, betaStart int = alpha.ChrStart() - length, beta.ChrStart() - length
if alphaStart < 0 {
alphaStart = 0
}
if betaStart < 0 {
betaStart = 0
}
return Overlapping(alpha.Chrom(), beta.Chrom(), alphaStart, alpha.ChrEnd(), betaStart, beta.ChrEnd())
}
// TailOverlapByLen checks for overlap while modifying the ending coordinates.
func TailOverlapByLen(alpha Bed, beta Bed, alphaSize int, betaSize int, length int) bool {
var alphaEnd, betaEnd int = alpha.ChrStart() - length, beta.ChrStart() - length
if alphaEnd > alphaSize {
alphaEnd = alphaSize
}
if betaEnd > betaSize {
betaEnd = betaSize
}
return Overlapping(alpha.Chrom(), beta.Chrom(), alpha.ChrStart(), alphaEnd, beta.ChrStart(), betaEnd)
}
// ToSimpleBed will take a simpleReader and return a simpleBed struct
func ToSimpleBed(reader *simpleio.SimpleReader) (*Simple, bool) {
curr, done := simpleio.ReadLine(reader)
if !done {
columns := bytes.Split(curr.Bytes(), []byte{'\t'})
answer := Simple{
Chr: string(columns[0]),
Start: simpleio.StringToInt(string(columns[1])),
End: simpleio.StringToInt(string(columns[2])),
}
return &answer, false
} else {
return nil, true
}
}
// Simple bed struct implements the bed interface with the Chrom() method which returns the chromosome name.
func (bed *Simple) Chrom() string {
return bed.Chr
}
// Simple bed struct implements the bed interface with the ChrStart() method which returns the starting position of the region.
func (bed *Simple) ChrStart() int {
return bed.Start
}
// Simple bed struct implements the bed interface with the ChrEnd() method which returns the starting position of the region.
func (bed *Simple) ChrEnd() int {
return bed.End
}
// Five is a slightly more detailed than the basic Bed implementation and records the current chromosome, start and end coordinates
// a name, and a bed score
type Five struct {
Chr string
Start int
End int
Name string
Score int
}
// Five bed struct implements the bed interface with the Chrom() method which returns the chromosome name.
func (bed *Five) Chrom() string {
return bed.Chr
}
// Five bed struct implements the bed interface with the ChrStart() method which returns the starting position of the region.
func (bed *Five) ChrStart() int {
return bed.Start
}
// Five bed struct implements the bed interface with the ChrEnd() method which returns the ending position of the region.
func (bed *Five) ChrEnd() int {
return bed.End
}
// Six is a slightly more detailed than the basic Bed implementation and records the current chromosome, start and end coordinates
// Similar to five, but includes the strand information.
type Six struct {
Chr string
Start int
End int
Name string
Score int
Strand bool
}
// Six bed struct implements the bed interface with the Chrom() method which returns the chromosome name.
func (bed *Six) Chrom() string {
return bed.Chr
}
// Six bed struct implements the bed interface with the ChrStart() method which returns the starting position of the region.
func (bed *Six) ChrStart() int {
return bed.Start
}
// Six bed struct implements the bed interface with the ChrEnd() method which returns the ending position of the region.
func (bed *Six) ChrEnd() int {
return bed.End
}
// BedPlus is the most detailed of the bed formats and will store all information found in bed formatted files.
// TODO: Consider bed 12 plus format (very useful for working with gtf and gff files)
type BedPlus struct {
Chr string
Start int
End int
Name string
Score int
Info string
}
// BedPlus struct implements the bed interface with the Chrom() method which returns the chromosome name.
func (bed *BedPlus) Chrom() string {
return bed.Chr
}
// BedPlus struct implements the bed interface with the ChrStart() method which returns the starting position of the region.
func (bed *BedPlus) ChrStart() int {
return bed.Start
}
// BedPlus struct implements the bed interface with the ChrEnd() method which returns the ending position of the region.
func (bed *BedPlus) ChrEnd() int {
return bed.End
}
// ToBedPlus will take a simpleReader as an input and returns a BedPlus struct.
func ToBedPlus(reader *simpleio.SimpleReader) (*BedPlus, bool) {
curr, done := simpleio.ReadLine(reader)
if !done {
columns := bytes.SplitN(curr.Bytes(), []byte{'\t'}, 6)
answer := BedPlus{
Chr: string(columns[0]),
Start: simpleio.StringToInt(string(columns[1])),
End: simpleio.StringToInt(string(columns[2])),
Name: string(columns[3]),
Score: simpleio.StringToInt(string(columns[4])),
Info: string(columns[5]),
}
return &answer, false
} else {
return nil, true
}
}
func (b *BedPlus) String() string {
str := strings.Builder{}
str.WriteString(b.Chrom())
str.WriteByte('\t')
str.WriteString(strconv.Itoa(b.ChrStart()))
str.WriteByte('\t')
str.WriteString(strconv.Itoa(b.ChrEnd()))
str.WriteByte('\t')
str.WriteString(b.Name)
str.WriteByte('\t')
str.WriteString(strconv.Itoa(b.Score))
str.WriteByte('\t')
str.WriteString(b.Info)
return str.String()
}
// Pvalue struct is a simple bed interface with an added PValue field
type Pvalue struct {
Chr string
Start int
End int
Name string
PValue float64
}
// Pvalue struct implements the bed interface with the Chrom() method which returns the chromosome name.
func (bed *Pvalue) Chrom() string {
return bed.Chr
}
// Pvalue struct implements the bed interface with the ChrStart() method which returns the starting position of the region.
func (bed *Pvalue) ChrStart() int {
return bed.Start
}
// Pvalue struct implements the bed interface with the ChrEnd() method which returns the starting position of the region.
func (bed *Pvalue) ChrEnd() int {
return bed.End
}
// comaarePValue is a helper function to compare pvalues between two Pvalue beds used in sorting slices.
func comparePValue(a *Pvalue, b *Pvalue) int {
if a.PValue < b.PValue {
return -1
}
if a.PValue > b.PValue {
return 1
}
return 0
}
// SortByPValue performs a Pvalue sort to find the smallest and most significant.
func SortByPValue(peak []*Pvalue) {
sort.Slice(peak, func(i, j int) bool { return comparePValue(peak[i], peak[j]) == -1 })
}
// ToBedPValue will convert bytes read from simpleReader and return a bed Pvalue.
func ToBedPValue(reader *simpleio.SimpleReader) (*Pvalue, bool) {
curr, done := simpleio.ReadLine(reader)
if !done {
columns := bytes.SplitN(curr.Bytes(), []byte{'\t'}, 6)
answer := Pvalue{
Chr: string(columns[0]),
Start: simpleio.StringToInt(string(columns[1])),
End: simpleio.StringToInt(string(columns[2])),
Name: string(columns[3]),
PValue: float64(simpleio.StringToFloat(string(columns[6]))),
}
return &answer, false
} else {
return nil, true
}
}
//To string will cast a bed interface and return a simple string with 3 fields.
func ToString(b Bed) string {
var str strings.Builder
str.WriteString(b.Chrom())
str.WriteByte('\t')
str.WriteString(strconv.Itoa(b.ChrStart()))
str.WriteByte('\t')
str.WriteString(strconv.Itoa(b.ChrEnd()))
return str.String()
}
func ReadBed(filename string) []Simple {
reader := simpleio.NewReader(filename)
var ans []Simple
for i, done := SimpleLine(reader); !done; i, done = SimpleLine(reader) {
ans = append(ans, *i)
}
return ans
}
func SimpleLine(reader *simpleio.SimpleReader) (*Simple, bool) {
buffer, done := simpleio.ReadLine(reader)
if !done {
fields := strings.Split(buffer.String(), "\t")
return &Simple{
Chr: fields[0],
Start: simpleio.StringToInt(fields[1]),
End: simpleio.StringToInt(fields[2]),
}, false
} else {
return nil, true
}
}
func PeakBedReading(reader *simpleio.SimpleReader) (*BedPlus, bool) {
var err bool
reader.Buffer, err = simpleio.ReadLine(reader)
if !err {
columns := strings.SplitN(reader.Buffer.String(), "\t", 6)
ans := BedPlus{}
ans.Chr = columns[0]
ans.Start = simpleio.StringToInt(columns[1])
ans.End = simpleio.StringToInt(columns[2])
ans.Name = columns[3]
ans.Score = simpleio.StringToInt(columns[4])
if ans.Score > 1000 {
log.Fatalf("Error: bed scores should not be greater than 1000...\n")
}
ans.Info = columns[5]
return &ans, false
} else {
return nil, true
}
}
//func WriteBed(b Bed) | bed/bed.go | 0.746231 | 0.413181 | bed.go | starcoder |
package jsonschema
import (
"reflect"
)
const (
tTypeObject = "object"
tTypeString = "string"
tTypeInteger = "integer"
tTypeNumber = "number"
tTypeBoolean = "boolean"
tTypeArray = "array"
)
// Reflect reflects to Schema from a value.
func Reflect(v interface{}) *Schema {
valueOf := reflect.ValueOf(v)
typeOf := reflect.TypeOf(v)
valueOf = reflect.Indirect(valueOf)
definitions := Definitions{}
root := reflectType(definitions, typeOf, valueOf, true)
root.Version = Version
return &Schema{Type: root, Definitions: definitions}
}
func reflectType(definitions Definitions, t reflect.Type, v reflect.Value, root bool) *Type {
if v.Kind() == reflect.Ptr {
v = v.Elem() // deref ptr
if !v.IsValid() {
v = reflect.Zero(t.Elem()) // create zero value
}
}
if v.Kind() == reflect.Interface {
v = reflect.Indirect(v.Elem())
}
switch t {
case typeTime:
return reflectTime(definitions, v)
case typeIP:
return reflectIP(definitions, v)
case typeURI:
return reflectURI(definitions, v)
}
switch true {
case t.Implements(typePBEnum):
return reflectPBEnum(definitions, v)
case t.Implements(typeOneOf):
return reflectOneOf(definitions, v)
case t.Implements(typeAnyOf):
return reflectAnyOf(definitions, v)
case t.Implements(typeAllOf):
return reflectAllOf(definitions, v)
case t.Implements(typeEnum):
return reflectEnum(definitions, v)
}
switch v.Kind() {
case reflect.Struct:
currentType := reflectStruct(definitions, v)
if root {
return currentType
}
definitions[v.Type().Name()] = currentType
return newReference(v.Type().Name())
case reflect.Slice:
return reflectSlice(definitions, v)
case reflect.Map:
return reflectMap(definitions, v)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return reflectInteger(definitions, v)
case reflect.Float32, reflect.Float64:
return reflectNumber(definitions, v)
case reflect.Bool:
return reflectBool(definitions, v)
case reflect.String:
return reflectString(definitions, v)
}
return reflectInterface(definitions, t, v)
}
func reflectStruct(definitions Definitions, v reflect.Value) *Type {
var currentType = newType(tTypeObject)
for i := 0; i < v.NumField(); i++ {
structField := v.Type().Field(i)
structValue := v.Field(i)
// unexported field
if isUnexported(structField) {
continue
}
// embedded field
if isAnonymous(structField) {
typ := reflectType(definitions, structField.Type, structValue, false)
if typ.Type != tTypeObject && v.NumField() == 1 {
return typ
}
for def, info := range typ.Definitions {
definitions[def] = info
}
for def, info := range typ.Properties {
currentType.Properties[def] = info
}
continue
}
tags := parseTags(structField.Tag)
if isIgnored(tags) {
continue
}
fieldType := reflectType(definitions, structField.Type, structValue, false)
if fieldType == nil {
continue
}
applyInfo(fieldType, tags)
applyValidation(fieldType, tags)
currentType.Properties[tags.name] = fieldType
}
return currentType
}
func isUnexported(field reflect.StructField) bool {
return field.PkgPath != ""
}
func isAnonymous(field reflect.StructField) bool {
return field.Anonymous
} | reflect.go | 0.648355 | 0.462959 | reflect.go | starcoder |
// Package fibHeap implements the Fibonacci Heap priority queue.
// This implementation is a bit different from the traditional Fibonacci Heap by having an index map to achieve better encapsulation.
package fibHeap
import (
"bytes"
"container/list"
"errors"
"fmt"
"math"
)
// Value is the interface that all values push into or pop from the FibHeap by value interfaces must implement.
type Value interface {
// Tag returns the unique tag of the value.
// The tag is used in the index map.
Tag() interface{}
// Key returns the key as known as the priority of the value.
// The valid range of the key is (-inf, +inf].
Key() float64
}
// FibHeap represents a Fibonacci Heap.
// Please note that all methods of FibHeap are not concurrent safe.
type FibHeap struct {
roots *list.List
index map[interface{}]*node
treeDegrees map[uint]*list.Element
min *node
num uint
}
type node struct {
self *list.Element
parent *node
children *list.List
marked bool
degree uint
position uint
tag interface{}
key float64
value Value
}
// NewFibHeap creates an initialized Fibonacci Heap.
func NewFibHeap() *FibHeap {
heap := new(FibHeap)
heap.roots = list.New()
heap.index = make(map[interface{}]*node)
heap.treeDegrees = make(map[uint]*list.Element)
heap.num = 0
heap.min = nil
return heap
}
// Num returns the total number of values in the heap.
func (heap *FibHeap) Num() uint {
return heap.num
}
// Insert pushes the input tag and key into the heap.
// Try to insert a duplicate tag value will cause an error return.
// The valid range of the key is (-inf, +inf].
// Try to insert a -inf key value will cause an error return.
// Insert will check the nil interface but not the interface with nil value.
// Try to input of an interface with nil value will cause invalid address panic.
func (heap *FibHeap) Insert(tag interface{}, key float64) error {
if tag == nil {
return errors.New("Input tag is nil ")
}
return heap.insert(tag, key, nil)
}
// InsertValue pushes the input value into the heap.
// The input value must implements the Value interface.
// Try to insert a duplicate tag value will cause an error return.
// The valid range of the value's key is (-inf, +inf].
// Try to insert a -inf key value will cause an error return.
// Insert will check the nil interface but not the interface with nil value.
// Try to input of an interface with nil value will cause invalid address panic.
func (heap *FibHeap) InsertValue(value Value) error {
if value == nil {
return errors.New("Input value is nil ")
}
return heap.insert(value.Tag(), value.Key(), value)
}
// Minimum returns the current minimum tag and key in the heap sorted by the key.
// Minimum will not extract the tag and key so the value will still exists in the heap.
// An empty heap will return nil and -inf.
func (heap *FibHeap) Minimum() (interface{}, float64) {
if heap.num == 0 {
return nil, math.Inf(-1)
}
return heap.min.tag, heap.min.key
}
// MinimumValue returns the current minimum value in the heap sorted by the key.
// MinimumValue will not extract the value so the value will still exists in the heap.
// An empty heap will return nil.
func (heap *FibHeap) MinimumValue() Value {
if heap.num == 0 {
return nil
}
return heap.min.value
}
// ExtractMin returns the current minimum tag and key in the heap and then extracts them from the heap.
// An empty heap will return nil/-inf and extracts nothing.
func (heap *FibHeap) ExtractMin() (interface{}, float64) {
if heap.num == 0 {
return nil, math.Inf(-1)
}
min := heap.extractMin()
return min.tag, min.key
}
// ExtractMinValue returns the current minimum value in the heap and then extracts it from the heap.
// An empty heap will return nil and extracts nothing.
func (heap *FibHeap) ExtractMinValue() Value {
if heap.num == 0 {
return nil
}
min := heap.extractMin()
return min.value
}
// Union merges the input heap in.
// All values of the input heap must not have duplicate tags. Otherwise an error will be returned.
func (heap *FibHeap) Union(anotherHeap *FibHeap) error {
for tag := range anotherHeap.index {
if _, exists := heap.index[tag]; exists {
return errors.New("Duplicate tag is found in the target heap ")
}
}
for _, node := range anotherHeap.index {
heap.InsertValue(node.value)
}
return nil
}
// DecreaseKey updates the tag in the heap by the input key.
// If the input key has a larger key or -inf key, an error will be returned.
// If the input tag is not existed in the heap, an error will be returned.
// DecreaseKey will check the nil interface but not the interface with nil value.
// Try to input of an interface with nil value will cause invalid address panic.
func (heap *FibHeap) DecreaseKey(tag interface{}, key float64) error {
if tag == nil {
return errors.New("Input tag is nil ")
}
if math.IsInf(key, -1) {
return errors.New("Negative infinity key is reserved for internal usage ")
}
if node, exists := heap.index[tag]; exists {
return heap.decreaseKey(node, nil, key)
}
return errors.New("Value is not found ")
}
// DecreaseKeyValue updates the value in the heap by the input value.
// If the input value has a larger key or -inf key, an error will be returned.
// If the tag of the input value is not existed in the heap, an error will be returned.
// DecreaseKeyValue will check the nil interface but not the interface with nil value.
// Try to input of an interface with nil value will cause invalid address panic.
func (heap *FibHeap) DecreaseKeyValue(value Value) error {
if value == nil {
return errors.New("Input value is nil ")
}
if math.IsInf(value.Key(), -1) {
return errors.New("Negative infinity key is reserved for internal usage ")
}
if node, exists := heap.index[value.Tag()]; exists {
return heap.decreaseKey(node, value, value.Key())
}
return errors.New("Value is not found ")
}
// IncreaseKey updates the tag in the heap by the input key.
// If the input key has a smaller key or -inf key, an error will be returned.
// If the input tag is not existed in the heap, an error will be returned.
// IncreaseKey will check the nil interface but not the interface with nil value.
// Try to input of an interface with nil value will cause invalid address panic.
func (heap *FibHeap) IncreaseKey(tag interface{}, key float64) error {
if tag == nil {
return errors.New("Input tag is nil ")
}
if math.IsInf(key, -1) {
return errors.New("Negative infinity key is reserved for internal usage ")
}
if node, exists := heap.index[tag]; exists {
return heap.increaseKey(node, nil, key)
}
return errors.New("Value is not found ")
}
// IncreaseKeyValue updates the value in the heap by the input value.
// If the input value has a smaller key or -inf key, an error will be returned.
// If the tag of the input value is not existed in the heap, an error will be returned.
// IncreaseKeyValue will check the nil interface but not the interface with nil value.
// Try to input of an interface with nil value will cause invalid address panic.
func (heap *FibHeap) IncreaseKeyValue(value Value) error {
if value == nil {
return errors.New("Input value is nil ")
}
if math.IsInf(value.Key(), -1) {
return errors.New("Negative infinity key is reserved for internal usage ")
}
if node, exists := heap.index[value.Tag()]; exists {
return heap.increaseKey(node, value, value.Key())
}
return errors.New("Value is not found ")
}
// Delete deletes the input tag in the heap.
// If the input tag is not existed in the heap, an error will be returned.
// Delete will check the nil interface but not the interface with nil value.
// Try to input of an interface with nil value will cause invalid address panic.
func (heap *FibHeap) Delete(tag interface{}) error {
if tag == nil {
return errors.New("Input tag is nil ")
}
if _, exists := heap.index[tag]; !exists {
return errors.New("Tag is not found ")
}
heap.ExtractValue(tag)
return nil
}
// DeleteValue deletes the value in the heap by the input value.
// If the tag of the input value is not existed in the heap, an error will be returned.
// DeleteValue will check the nil interface but not the interface with nil value.
// Try to input of an interface with nil value will cause invalid address panic.
func (heap *FibHeap) DeleteValue(value Value) error {
if value == nil {
return errors.New("Input value is nil ")
}
if _, exists := heap.index[value.Tag()]; !exists {
return errors.New("Value is not found ")
}
heap.ExtractValue(value.Tag())
return nil
}
// GetTag searches and returns the key in the heap by the input tag.
// If the input tag does not exist in the heap, nil will be returned.
// GetTag will not extract the value so the value will still exist in the heap.
func (heap *FibHeap) GetTag(tag interface{}) (key float64) {
if node, exists := heap.index[tag]; exists {
return node.key
}
return math.Inf(-1)
}
// GetValue searches and returns the value in the heap by the input tag.
// If the input tag does not exist in the heap, nil will be returned.
// GetValue will not extract the value so the value will still exist in the heap.
func (heap *FibHeap) GetValue(tag interface{}) (value Value) {
if node, exists := heap.index[tag]; exists {
value = node.value
}
return
}
// ExtractTag searches and extracts the tag/key in the heap by the input tag.
// If the input tag does not exist in the heap, nil will be returned.
// ExtractTag will extract the value so the value will no longer exist in the heap.
func (heap *FibHeap) ExtractTag(tag interface{}) (key float64) {
if node, exists := heap.index[tag]; exists {
key = node.key
heap.deleteNode(node)
return
}
return math.Inf(-1)
}
// ExtractValue searches and extracts the value in the heap by the input tag.
// If the input tag does not exist in the heap, nil will be returned.
// ExtractValue will extract the value so the value will no longer exist in the heap.
func (heap *FibHeap) ExtractValue(tag interface{}) (value Value) {
if node, exists := heap.index[tag]; exists {
value = node.value
heap.deleteNode(node)
return
}
return nil
}
// String provides some basic debug information of the heap.
// It returns the total number, roots size, index size and current minimum value of the heap.
// It also returns the topology of the trees by dfs search.
func (heap *FibHeap) String() string {
var buffer bytes.Buffer
if heap.num != 0 {
buffer.WriteString(fmt.Sprintf("Total number: %d, Root Size: %d, Index size: %d,\n", heap.num, heap.roots.Len(), len(heap.index)))
buffer.WriteString(fmt.Sprintf("Current minimun: key(%f), tag(%v), value(%v),\n", heap.min.key, heap.min.tag, heap.min.value))
buffer.WriteString(fmt.Sprintf("Heap detail:\n"))
probeTree(&buffer, heap.roots)
buffer.WriteString(fmt.Sprintf("\n"))
} else {
buffer.WriteString(fmt.Sprintf("Heap is empty.\n"))
}
return buffer.String()
}
func probeTree(buffer *bytes.Buffer, tree *list.List) {
buffer.WriteString(fmt.Sprintf("< "))
for e := tree.Front(); e != nil; e = e.Next() {
buffer.WriteString(fmt.Sprintf("%f ", e.Value.(*node).key))
if e.Value.(*node).children.Len() != 0 {
probeTree(buffer, e.Value.(*node).children)
}
}
buffer.WriteString(fmt.Sprintf("> "))
}
func (heap *FibHeap) consolidate() {
for tree := heap.roots.Front(); tree != nil; tree = tree.Next() {
heap.treeDegrees[tree.Value.(*node).position] = nil
}
for tree := heap.roots.Front(); tree != nil; {
if heap.treeDegrees[tree.Value.(*node).degree] == nil {
heap.treeDegrees[tree.Value.(*node).degree] = tree
tree.Value.(*node).position = tree.Value.(*node).degree
tree = tree.Next()
continue
}
if heap.treeDegrees[tree.Value.(*node).degree] == tree {
tree = tree.Next()
continue
}
for heap.treeDegrees[tree.Value.(*node).degree] != nil {
anotherTree := heap.treeDegrees[tree.Value.(*node).degree]
heap.treeDegrees[tree.Value.(*node).degree] = nil
if tree.Value.(*node).key <= anotherTree.Value.(*node).key {
heap.roots.Remove(anotherTree)
heap.link(tree.Value.(*node), anotherTree.Value.(*node))
} else {
heap.roots.Remove(tree)
heap.link(anotherTree.Value.(*node), tree.Value.(*node))
tree = anotherTree
}
}
heap.treeDegrees[tree.Value.(*node).degree] = tree
tree.Value.(*node).position = tree.Value.(*node).degree
}
heap.resetMin()
}
func (heap *FibHeap) insert(tag interface{}, key float64, value Value) error {
if math.IsInf(key, -1) {
return errors.New("Negative infinity key is reserved for internal usage ")
}
if _, exists := heap.index[tag]; exists {
return errors.New("Duplicate tag is not allowed ")
}
node := new(node)
node.children = list.New()
node.tag = tag
node.key = key
node.value = value
node.self = heap.roots.PushBack(node)
heap.index[node.tag] = node
heap.num++
if heap.min == nil || heap.min.key > node.key {
heap.min = node
}
return nil
}
func (heap *FibHeap) extractMin() *node {
min := heap.min
children := heap.min.children
if children != nil {
for e := children.Front(); e != nil; e = e.Next() {
e.Value.(*node).parent = nil
e.Value.(*node).self = heap.roots.PushBack(e.Value.(*node))
}
}
heap.roots.Remove(heap.min.self)
heap.treeDegrees[min.position] = nil
delete(heap.index, heap.min.tag)
heap.num--
if heap.num == 0 {
heap.min = nil
} else {
heap.consolidate()
}
return min
}
func (heap *FibHeap) deleteNode(n *node) {
heap.decreaseKey(n, n.value, math.Inf(-1))
heap.ExtractMinValue()
}
func (heap *FibHeap) link(parent, child *node) {
child.marked = false
child.parent = parent
child.self = parent.children.PushBack(child)
parent.degree++
}
func (heap *FibHeap) resetMin() {
heap.min = heap.roots.Front().Value.(*node)
for tree := heap.min.self.Next(); tree != nil; tree = tree.Next() {
if tree.Value.(*node).key < heap.min.key {
heap.min = tree.Value.(*node)
}
}
}
func (heap *FibHeap) decreaseKey(n *node, value Value, key float64) error {
if key >= n.key {
return errors.New("New key is not smaller than current key ")
}
n.key = key
n.value = value
if n.parent != nil {
parent := n.parent
if n.key < n.parent.key {
heap.cut(n)
heap.cascadingCut(parent)
}
}
if n.parent == nil && n.key < heap.min.key {
heap.min = n
}
return nil
}
func (heap *FibHeap) increaseKey(n *node, value Value, key float64) error {
if key <= n.key {
return errors.New("New key is not larger than current key ")
}
n.key = key
n.value = value
child := n.children.Front()
for child != nil {
childNode := child.Value.(*node)
child = child.Next()
if childNode.key < n.key {
heap.cut(childNode)
heap.cascadingCut(n)
}
}
if heap.min == n {
heap.resetMin()
}
return nil
}
func (heap *FibHeap) cut(n *node) {
n.parent.children.Remove(n.self)
n.parent.degree--
n.parent = nil
n.marked = false
n.self = heap.roots.PushBack(n)
}
func (heap *FibHeap) cascadingCut(n *node) {
if n.parent != nil {
if !n.marked {
n.marked = true
} else {
parent := n.parent
heap.cut(n)
heap.cascadingCut(parent)
}
}
}
func (heap *FibHeap) GetIter() []interface{} {
var valueSlice []interface{}
for _, node := range(heap.index){
valueSlice = append(valueSlice, node.tag)
}
return valueSlice
} | BirthdayServer/src/BirthdayBot/fibonacci/FibonacciHeap.go | 0.87456 | 0.462655 | FibonacciHeap.go | starcoder |
package num
// Int a wrapper to a signed big int
type Int struct {
// The unsigned version of the integer
U *Uint
// The sign of the integer true = positive, false = negative
s bool
}
// IntFromUint ...
func IntFromUint(u *Uint, s bool) *Int {
copy := &Int{s: s,
U: u.Clone()}
return copy
}
// IsNegative tests if the stored value is negative
// true if < 0
// false if >= 0
func (i *Int) IsNegative() bool {
return !i.s && !i.U.IsZero()
}
// IsPositive tests if the stored value is positive
// true if > 0
// false if <= 0
func (i *Int) IsPositive() bool {
return i.s && !i.U.IsZero()
}
// IsZero tests if the stored value is zero
// true if == 0
func (i *Int) IsZero() bool {
return i.U.IsZero()
}
// FlipSign changes the sign of the number from - to + and back again.
func (i *Int) FlipSign() {
i.s = !i.s
}
// Clone creates a copy of the object so nothing is shared
func (i Int) Clone() *Int {
return &Int{U: i.U.Clone(),
s: i.s}
}
// GT returns if i > o
func (i Int) GT(o *Int) bool {
if i.IsNegative() {
if o.IsPositive() || o.IsZero() {
return false
}
return i.U.LT(o.U)
}
if i.IsPositive() {
if o.IsZero() || o.IsNegative() {
return true
}
return i.U.GT(o.U)
}
return o.IsNegative()
}
// LT returns if i < o
func (i Int) LT(o *Int) bool {
if i.IsNegative() {
if o.IsPositive() || o.IsZero() {
return true
}
return i.U.GT(o.U)
}
if i.IsPositive() {
if o.IsZero() || o.IsNegative() {
return false
}
return i.U.LT(o.U)
}
return o.IsPositive()
}
// Int64 ...
func (i Int) Int64() int64 {
val := int64(i.U.Uint64())
if i.IsNegative() {
return -val
}
return val
}
// String returns a string version of the number
func (i Int) String() string {
val := i.U.String()
if i.IsNegative() {
return "-" + val
}
return val
}
// Add will add the passed in value to the base value
// i = i + a
func (i *Int) Add(a *Int) *Int {
// Handle cases where we have a zero
if a.IsZero() {
return i
}
if i.IsZero() {
i.U.Set(a.U)
i.s = a.s
return i
}
// Handle the easy cases were both are the same sign
if i.IsPositive() && a.IsPositive() {
i.U.Add(i.U, a.U)
return i
}
if i.IsNegative() && a.IsNegative() {
i.U.Add(i.U, a.U)
return i
}
// Now the cases where the signs are different
if i.IsNegative() {
if i.U.GTE(a.U) {
// abs(i) >= a
i.U.Sub(i.U, a.U)
} else {
// abs(i) < a
i.U.Sub(a.U, i.U)
i.s = true
}
return i
}
if i.U.GTE(a.U) {
// i >= abs(a)
i.U.Sub(i.U, a.U)
} else {
// i < abs(a)
i.U.Sub(a.U, i.U)
i.s = false
}
return i
}
// Sub will subtract the passed in value from the base value
// i = i - a
func (i *Int) Sub(a *Int) *Int {
a.FlipSign()
i.Add(a)
a.FlipSign()
return i
}
// AddSum adds all of the parameters to i
// i = i + a + b + c
func (i *Int) AddSum(vals ...*Int) *Int {
for _, x := range vals {
i.Add(x)
}
return i
}
// SubSum subtracts all of the parameters from i
// i = i - a - b - c
func (i *Int) SubSum(vals ...*Int) *Int {
for _, x := range vals {
i.Sub(x)
}
return i
}
// NewInt creates a new Int with the value of the
// int64 passed as a parameter.
func NewInt(val int64) *Int {
if val < 0 {
return &Int{U: NewUint(uint64(-val)),
s: false}
}
return &Int{U: NewUint(uint64(val)),
s: true}
}
// NewIntFromUint creates a new Int with the value of the
// uint passed as a parameter.
func NewIntFromUint(val *Uint) *Int {
return &Int{U: val,
s: true}
} | types/num/int.go | 0.758063 | 0.446012 | int.go | starcoder |
package graph
import (
"fmt"
"github.com/cycloidio/inframap/errcode"
)
// Graph defines the standard format of a Graph
type Graph struct {
Edges []*Edge
Nodes []*Node
// nodesCans canonical -> struct{}{}
nodesCans map[string]*Node
// nodesIDs id -> struct{}{}
nodesIDs map[string]*Node
// nodesWithEdge id -> []*Edge
nodesWithEdge map[string][]*Edge
// edgesSourceTarget (source+target) -> struct{}{}
// used to validate that the direction already exists
edgesSourceTarget map[string]*Edge
// edgesIDs id -> struct{}{}
edgesIDs map[string]*Edge
}
// New returns a new initialized Graph
func New() *Graph {
return &Graph{
nodesCans: make(map[string]*Node),
nodesIDs: make(map[string]*Node),
edgesSourceTarget: make(map[string]*Edge),
edgesIDs: make(map[string]*Edge),
nodesWithEdge: make(map[string][]*Edge),
}
}
// AddEdge adds an Edge to the Graph
func (g *Graph) AddEdge(e *Edge) error {
if e.ID == "" {
return errcode.ErrGraphRequiredEdgeID
}
if e.Target == "" {
return errcode.ErrGraphRequiredEdgeTarget
}
if e.Source == "" {
return errcode.ErrGraphRequiredEdgeSource
}
if _, ok := g.nodesIDs[e.Target]; !ok {
return errcode.ErrGraphNotFoundEdgeTarget
}
if _, ok := g.nodesIDs[e.Source]; !ok {
return errcode.ErrGraphNotFoundEdgeSource
}
check := e.Source + e.Target
if _, ok := g.edgesSourceTarget[check]; ok {
return errcode.ErrGraphAlreadyExistsEdge
}
if _, ok := g.edgesIDs[e.ID]; ok {
return errcode.ErrGraphAlreadyExistsEdgeID
}
g.edgesSourceTarget[check] = e
g.edgesIDs[e.ID] = e
g.nodesWithEdge[e.Source] = append(g.nodesWithEdge[e.Source], e)
g.nodesWithEdge[e.Target] = append(g.nodesWithEdge[e.Target], e)
g.Edges = append(g.Edges, e)
return nil
}
// AddNode adds an Node to the Graph
func (g *Graph) AddNode(n *Node) error {
if n.Canonical == "" {
return errcode.ErrGraphRequiredNodeCanonical
}
if n.ID == "" {
return errcode.ErrGraphRequiredNodeID
}
if _, ok := g.nodesCans[n.Canonical]; ok {
return fmt.Errorf("with canonical %q: %w", n.Canonical, errcode.ErrGraphAlreadyExistsNode)
}
if _, ok := g.nodesIDs[n.ID]; ok {
return errcode.ErrGraphAlreadyExistsNodeID
}
g.nodesCans[n.Canonical] = n
g.nodesIDs[n.ID] = n
g.Nodes = append(g.Nodes, n)
return nil
}
// GetNodeByID returns the requested Node with the nID
func (g *Graph) GetNodeByID(nID string) (*Node, error) {
n, ok := g.nodesIDs[nID]
if !ok {
return nil, errcode.ErrGraphNotFoundNode
}
return n, nil
}
// GetNodeByCanonical returns the requested Node with the nCan
func (g *Graph) GetNodeByCanonical(nCan string) (*Node, error) {
n, ok := g.nodesCans[nCan]
if !ok {
return nil, errcode.ErrGraphNotFoundNode
}
return n, nil
}
// Clean removes all the Nodes that do not
// have any edge
func (g *Graph) Clean() {
nodesToRemove := make([]int, 0)
for i, n := range g.Nodes {
if _, ok := g.nodesWithEdge[n.ID]; !ok {
nodesToRemove = append(nodesToRemove, i)
}
}
// For each iteration we have to decrease the next 'idx'
// by 'i' as we removed 'i' elements
for i, idx := range nodesToRemove {
idx -= i
g.removeNodeByIDX(idx)
}
}
// GetEdgesForNode returns all the edges that have relation to this nID
func (g *Graph) GetEdgesForNode(nID string) []*Edge {
return g.nodesWithEdge[nID]
}
// Replace will replace the srcID Node for the repID Node by removing the srcID
// and connecting all the edges from srcID to repID.
// srcID Node and repID Node have to be connected directly
func (g *Graph) Replace(srcID, repID string) error {
srcEdges := g.GetEdgesForNode(srcID)
srcNode, err := g.GetNodeByID(srcID)
if err != nil {
return err
}
repNode, err := g.GetNodeByID(repID)
if err != nil {
return err
}
// mutualEdge is the edge that connects this 2 Nodes
var mutualEdge *Edge
for _, e := range srcEdges {
// Depending on the direction of the connection
// we increase or decrease the Node.Weight
if e.Source == srcID && e.Target == repID {
repNode.Weight--
mutualEdge = e
break
} else if e.Source == repID && e.Target == srcID {
repNode.Weight++
mutualEdge = e
break
}
}
if mutualEdge == nil {
return fmt.Errorf("no mutual edge between srcID %q and repID %s: %w", srcID, repID, errcode.ErrGraphRequiredEdgeBetweenNodes)
}
for _, e := range srcEdges {
if e.ID == mutualEdge.ID {
continue
}
// Replace all the connections from the srcID to the repID
err := e.Replace(srcID, repID)
if err != nil {
return err
}
e.AddCanonicals(append(mutualEdge.Canonicals, srcNode.Canonical)...)
ee, okstt := g.edgesSourceTarget[e.Source+e.Target]
// If the Edge does not exists we register it
// If it does then we remove it as we do not want repeated edges
if !okstt {
g.nodesWithEdge[repID] = append(g.nodesWithEdge[repID], e)
g.edgesSourceTarget[e.Source+e.Target] = e
} else {
// Before removing repeated edges we add the
// canonicals from the edge we want to delete
ee.AddCanonicals(e.Canonicals...)
g.removeEdgeByID(e.ID)
}
}
if err = g.RemoveNodeByID(srcID); err != nil {
return err
}
return nil
}
// InvertEdge inverts the Source and Target of the eID
func (g *Graph) InvertEdge(eID string) {
for _, e := range g.Edges {
if e.ID == eID {
delete(g.edgesSourceTarget, e.Source+e.Target)
src := e.Source
e.Source = e.Target
e.Target = src
g.edgesSourceTarget[e.Source+e.Target] = e
}
}
}
// RemoveNodeByID removes the Node with the ID and the Edges
// associated with it
func (g *Graph) RemoveNodeByID(ID string) error {
for i, n := range g.Nodes {
if n.ID == ID {
idx := i
g.removeNodeByIDX(idx)
return nil
}
}
return errcode.ErrGraphNotFoundNode
}
// removeNodeByIDX removes the idx element (via the copy) and then
// removes the last element as it's not needed. It also removes
// the Edges that where connected to this Node
func (g *Graph) removeNodeByIDX(idx int) {
n := g.Nodes[idx]
delete(g.nodesCans, n.Canonical)
delete(g.nodesIDs, n.ID)
delete(g.nodesWithEdge, n.ID)
lenNodes := len(g.Nodes)
copy(g.Nodes[idx:], g.Nodes[idx+1:])
g.Nodes = g.Nodes[:lenNodes-1]
// Remove the Edges from the Graph
RESTART:
for _, e := range g.Edges {
if e.Target == n.ID || e.Source == n.ID {
g.removeEdgeByID(e.ID)
// We restart the loop because this operation potentially
// changes the g.Edges order/items
goto RESTART
}
}
}
// removeEdgeByID removes the Edge with the ID
func (g *Graph) removeEdgeByID(ID string) {
for i, e := range g.Edges {
if e.ID == ID {
delete(g.edgesSourceTarget, e.Source+e.Target)
delete(g.edgesIDs, e.ID)
lenEdges := len(g.Edges)
copy(g.Edges[i:], g.Edges[i+1:])
g.Edges = g.Edges[:lenEdges-1]
// Remove the edge from the list of edges
// that each node has
sedges := g.nodesWithEdge[e.Source]
for ii, ee := range sedges {
if ee.ID == e.ID {
lenEdges = len(sedges)
copy(sedges[ii:], sedges[ii+1:])
sedges = sedges[:lenEdges-1]
} else if e.Target == ee.Target && e.Source == ee.Source {
ee.AddCanonicals(e.Canonicals...)
}
}
g.nodesWithEdge[e.Source] = sedges
tedges := g.nodesWithEdge[e.Target]
for ii, ee := range tedges {
if ee.ID == e.ID {
lenEdges = len(tedges)
copy(tedges[ii:], tedges[ii+1:])
tedges = tedges[:lenEdges-1]
} else if e.Target == ee.Target && e.Source == ee.Source {
ee.AddCanonicals(e.Canonicals...)
}
}
g.nodesWithEdge[e.Target] = tedges
}
}
} | graph/graph.go | 0.572842 | 0.517327 | graph.go | starcoder |
package less
import "sort"
func lessThan(a, b string, less func(a, b string) bool) bool {
return less(a, b)
}
func lessThanOrEqualTo(a, b string, less func(a, b string) bool) bool {
return less(a, b) || !less(b, a)
}
func greaterThan(a, b string, less func(a, b string) bool) bool {
return less(b, a)
}
func greaterThanOrEqualTo(a, b string, less func(a, b string) bool) bool {
return less(b, a) || !less(a, b)
}
func equalTo(a, b string, less func(a, b string) bool) bool {
return !less(a, b) && !less(b, a)
}
// Less represents a less function that uses string as the parameter type.
type Less func(a, b string) bool
// LessThan returns true for "a < b"
func (less Less) LessThan(a, b string) bool {
return lessThan(a, b, less)
}
// LessThanOrEqualTo returns true for "a <= b"
func (less Less) LessThanOrEqualTo(a, b string) bool {
return lessThanOrEqualTo(a, b, less)
}
// GreaterThan returns true for "a > b"
func (less Less) GreaterThan(a, b string) bool {
return greaterThan(a, b, less)
}
// GreaterThanOrEqualTo returns true for "a >= b"
func (less Less) GreaterThanOrEqualTo(a, b string) bool {
return greaterThanOrEqualTo(a, b, less)
}
// EqualTo returns true for "a == b"
func (less Less) EqualTo(a, b string) bool {
return equalTo(a, b, less)
}
type stringSlice struct {
less func(a, b string) bool
arr []string
}
func (a stringSlice) Len() int {
return len(a.arr)
}
func (a stringSlice) Less(i, j int) bool {
return a.less(a.arr[i], a.arr[j])
}
func (a stringSlice) Swap(i, j int) {
a.arr[i], a.arr[j] = a.arr[j], a.arr[i]
}
// Sort sorts data. The sort is not guaranteed to be stable.
func (less Less) Sort(arr []string) {
sort.Sort(stringSlice{less, arr})
}
// Stable sorts data while keeping the original order of equal elements.
func (less Less) Stable(arr []string) {
sort.Stable(stringSlice{less, arr})
}
func bytesLessThan(a, b []byte, less func(a, b []byte) bool) bool {
return less(a, b)
}
func bytesLessThanOrEqualTo(a, b []byte, less func(a, b []byte) bool) bool {
return less(a, b) || !less(b, a)
}
func bytesGreaterThan(a, b []byte, less func(a, b []byte) bool) bool {
return less(b, a)
}
func bytesGreaterThanOrEqualTo(a, b []byte, less func(a, b []byte) bool) bool {
return less(b, a) || !less(a, b)
}
func bytesEqualTo(a, b []byte, less func(a, b []byte) bool) bool {
return !less(a, b) && !less(b, a)
}
// BytesLess represents a less function that uses []byte as the parameter type.
type BytesLess func(a, b []byte) bool
// LessThan returns true for "a < b"
func (less BytesLess) LessThan(a, b []byte) bool {
return bytesLessThan(a, b, less)
}
// LessThanOrEqualTo returns true for "a <= b"
func (less BytesLess) LessThanOrEqualTo(a, b []byte) bool {
return bytesLessThanOrEqualTo(a, b, less)
}
// GreaterThan returns true for "a > b"
func (less BytesLess) GreaterThan(a, b []byte) bool {
return bytesGreaterThan(a, b, less)
}
// GreaterThanOrEqualTo returns true for "a >= b"
func (less BytesLess) GreaterThanOrEqualTo(a, b []byte) bool {
return bytesGreaterThanOrEqualTo(a, b, less)
}
// EqualTo returns true for "a == b"
func (less BytesLess) EqualTo(a, b []byte) bool {
return bytesEqualTo(a, b, less)
}
type bytesSlice struct {
less func(a, b []byte) bool
arr [][]byte
}
func (a bytesSlice) Len() int {
return len(a.arr)
}
func (a bytesSlice) Less(i, j int) bool {
return a.less(a.arr[i], a.arr[j])
}
func (a bytesSlice) Swap(i, j int) {
a.arr[i], a.arr[j] = a.arr[j], a.arr[i]
}
// Sort sorts data. The sort is not guaranteed to be stable.
func (less BytesLess) Sort(arr [][]byte) {
sort.Sort(bytesSlice{less, arr})
}
// Stable sorts data while keeping the original order of equal elements.
func (less BytesLess) Stable(arr [][]byte) {
sort.Stable(bytesSlice{less, arr})
} | vendor/github.com/tidwall/less/less.go | 0.872143 | 0.688629 | less.go | starcoder |
package marchingsquares
import (
"math/rand"
"time"
"github.com/hajimehoshi/ebiten"
)
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
// MapGenerator contains map generator state
type MapGenerator struct {
width, height int
squareSize float32
seed string
useRandomSeed bool
randomFillPercent int
atlas [][]int
dx, dy int
mg *MeshGenerator
}
// NewMapGenerator instantiates a map
func NewMapGenerator(randomFillPercent, width, height int, squareSize float32) *MapGenerator {
return &MapGenerator{randomFillPercent: randomFillPercent, width: width, height: height, squareSize: squareSize}
}
// GenerateMap generates map by filling, smoothing and generating mesh
func (mg *MapGenerator) GenerateMap() error {
if len(mg.atlas) > 0 {
return nil
}
mg.atlas = RandomFillMap(mg.width, mg.height, mg.randomFillPercent)
for i := 0; i < 4; i++ {
mg.atlas = SmoothMap(mg.atlas, mg.width, mg.height)
}
mg.atlas = InvertMap(mg.atlas, mg.width, mg.height)
mg.mg = &MeshGenerator{}
mg.mg.GenerateMesh(mg.atlas, mg.squareSize)
return nil
}
// GetTriangles returns the verteces and the indices after generating the mesh
func (mg *MapGenerator) GetTriangles() ([]ebiten.Vertex, []uint16) {
return mg.mg.GetTriangles()
}
// RandomFillMap fills map with random values if using random seed
func RandomFillMap(w, h, percent int) (atlas [][]int) {
atlas = make([][]int, w)
for x := 0; x < w; x++ {
atlas[x] = make([]int, h)
for y := 0; y < h; y++ {
if rand.Intn(100) < percent {
atlas[x][y] = 1
}
}
}
return
}
// SmoothMap smoothes map to look like an actual map
func SmoothMap(atlas [][]int, width, height int) [][]int {
neighborWallTiles := 0
for x := 0; x < width; x++ {
for y := 0; y < height; y++ {
neighborWallTiles = CountWalls(atlas, width, height, x, y)
if neighborWallTiles > 4 {
atlas[x][y] = 1
} else if neighborWallTiles < 4 {
atlas[x][y] = 0
}
}
}
return atlas
}
// CountWalls gives the number of neighbors of a cell in a grid
func CountWalls(atlas [][]int, width, height int, gridX, gridY int) int {
wallCount := 0
for neighborX := gridX - 1; neighborX <= gridX+1; neighborX++ {
for neighborY := gridY - 1; neighborY <= gridY+1; neighborY++ {
if neighborX >= 0 && neighborX < width && neighborY >= 0 && neighborY < height {
if neighborX != gridX || neighborY != gridY {
wallCount += atlas[neighborX][neighborY]
}
} else {
wallCount++
}
}
}
return wallCount
}
// InvertMap inverts map
func InvertMap(atlas [][]int, width, height int) [][]int {
for x := 0; x < width; x++ {
for y := 0; y < height; y++ {
if atlas[x][y] == 0 {
atlas[x][y] = 1
} else {
atlas[x][y] = 0
}
}
}
return atlas
} | map_generator.go | 0.716913 | 0.435181 | map_generator.go | starcoder |
package any
import (
"reflect"
"emperror.dev/emperror"
"emperror.dev/errors"
)
// Merge returns the merge of its first and second arguments.
func Merge(fst, snd Value, options ...MergeOption) (Value, error) {
return MergeWithContext(NewMergeContext(options...), fst, snd)
}
// MustMerge returns the merge of its first and second arguments.
// It panics if there were any errors during merging.
func MustMerge(fst, snd Value, options ...MergeOption) Value {
val, err := Merge(fst, snd, options...)
emperror.Panic(err)
return val
}
// MergeWithContext returns the merge of fst and snd using the specified MergeContext.
func MergeWithContext(ctx MergeContext, fst, snd Value) (Value, error) {
if ctx.shouldCheckEquality() && reflect.DeepEqual(fst, snd) {
return fst, nil
}
ctx.depth++
fstT, sndT := reflect.TypeOf(fst), reflect.TypeOf(snd)
for _, tp := range []typePair{{fstT, sndT}, {fstT, nil}, {nil, sndT}, {nil, nil}} {
if ms := ctx.strategies[tp]; ms != nil {
return ms.Merge(ctx, fst, snd)
}
}
return nil, errors.Errorf("cannot merge values of type %T and %T", fst, snd)
}
// MergeContext stores merge state and configuration
type MergeContext struct {
equalityCheck equalityCheckOption
depth int
strategies map[typePair]MergeStrategy
}
// NewMergeContext returns a new MergeContext with the specified MergeOptions applied to it.
func NewMergeContext(options ...MergeOption) MergeContext {
ctx := MergeContext{}
MergeOptions(options).apply(&ctx)
return ctx
}
func (ctx MergeContext) shouldCheckEquality() bool {
switch ctx.equalityCheck {
case WithInitialEqualityCheck:
return ctx.depth == 0
case WithSubtreeEqualityChecks:
return true
}
return false
}
type equalityCheckOption int
const (
// WithoutEqualityChecks option makes the merge skip all pre-merge equality checks.
WithoutEqualityChecks equalityCheckOption = iota
// WithInitialEqualityCheck option makes the merge check for equality of the input values before merging them to potentially skip costly merges.
WithInitialEqualityCheck
// WithSubtreeEqualityChecks option makes the merge check for equality of every value pair before merging them to potentially skip costly merges.
WithSubtreeEqualityChecks
)
func (o equalityCheckOption) apply(ctx *MergeContext) {
ctx.equalityCheck = o
}
type typePair = [2]reflect.Type
// MergeOption represents a merge configuration option.
type MergeOption interface {
apply(*MergeContext)
}
// MergeOptions represent a list of merge configuration options.
type MergeOptions []MergeOption
func (opts MergeOptions) apply(ctx *MergeContext) {
for _, opt := range opts {
opt.apply(ctx)
}
}
// MergeStrategy represents a merge strategy.
type MergeStrategy interface {
// Merge merges two values using the provided MergeContext.
Merge(ctx MergeContext, fst, snd Value) (Value, error)
}
// MergeStrategyFunc adapts a function to a MergeStrategy.
type MergeStrategyFunc func(MergeContext, Value, Value) (Value, error)
// Merge merges two values by delegating to the merge strategy function.
func (fn MergeStrategyFunc) Merge(ctx MergeContext, fst, snd Value) (Value, error) {
return fn(ctx, fst, snd)
}
// MergeStrategyOption represents a merge strategy configuration option.
type MergeStrategyOption struct {
fstType reflect.Type
sndType reflect.Type
strategy MergeStrategy
}
func (o MergeStrategyOption) apply(ctx *MergeContext) {
if ctx.strategies == nil {
ctx.strategies = make(map[typePair]MergeStrategy)
}
ctx.strategies[typePair{o.fstType, o.sndType}] = o.strategy
}
// WithStrategy returns a MergeStrategyOption with the provided parameters.
func WithStrategy(fstType, sndType reflect.Type, strategy MergeStrategy) MergeStrategyOption {
return MergeStrategyOption{
fstType: fstType,
sndType: sndType,
strategy: strategy,
}
}
type useFirstMergeStrategy bool
const (
// UseFirst is a merge strategy that always returns the first value without modifying it.
UseFirst useFirstMergeStrategy = true
// UseSecond is a merge strategy that always returns the second value without modifying it.
UseSecond useFirstMergeStrategy = false
)
func (ms useFirstMergeStrategy) Merge(_ MergeContext, fst, snd Value) (Value, error) {
if ms {
return fst, nil
}
return snd, nil
} | pkg/any/merge.go | 0.717903 | 0.454775 | merge.go | starcoder |
package object
import "fmt"
// DamageType classifies damage.
type DamageType byte
// String returns the textual representation of the value.
func (damageType DamageType) String() string {
if int(damageType) >= len(damageTypeNames) {
return fmt.Sprintf("Unknown 0x%02X", int(damageType))
}
return damageTypeNames[damageType]
}
func (damageType DamageType) mask() DamageTypeMask {
return 1 << damageType
}
// DamageType constants.
const (
DamageTypeExplosion DamageType = 0
DamageTypeEnergy DamageType = 1
DamageTypeMagnetic DamageType = 2
DamageTypeRadiation DamageType = 3
DamageTypeGas DamageType = 4
DamageTypeTranquilizer DamageType = 5
DamageTypeNeedle DamageType = 6
DamageTypeBio DamageType = 7
)
var damageTypeNames = []string{
"Explosion", "Energy", "Magnetic", "Radiation",
"Gas", "Tranquilizer", "Needle", "Bio",
}
// DamageTypes returns all known constants.
func DamageTypes() []DamageType {
return []DamageType{
DamageTypeExplosion, DamageTypeEnergy, DamageTypeMagnetic, DamageTypeRadiation,
DamageTypeGas, DamageTypeTranquilizer, DamageTypeNeedle, DamageTypeBio,
}
}
// DamageTypeMask combines a set of damage types.
type DamageTypeMask byte
// Has returns whether the mask contains the specified value.
func (mask DamageTypeMask) Has(dmg DamageType) bool {
return (mask & dmg.mask()) != 0
}
// With returns a new mask that specifies the combination of this and the given damage type.
func (mask DamageTypeMask) With(dmg DamageType) DamageTypeMask {
return mask | dmg.mask()
}
// Without returns a new mask that specifies the remainder of this, not including the given type.
func (mask DamageTypeMask) Without(dmg DamageType) DamageTypeMask {
return mask & ^dmg.mask()
}
// SpecialDamageType is a combination of "primary" (double) and "super" (quadruple) damage potential.
// The identifier are "freeform" enumerations without public constants.
type SpecialDamageType byte
const (
specialDamageTypePrimaryShift = 0
specialDamageTypePrimaryMask = 0x0F
specialDamageTypeSuperShift = 4
specialDamageTypeSuperMask = 0xF0
// SpecialDamageTypeLimit identifies the maximum value of special damage type.
SpecialDamageTypeLimit = 0x0F
)
// PrimaryValue returns the double damage type.
func (dmg SpecialDamageType) PrimaryValue() int {
return int((dmg & specialDamageTypePrimaryMask) >> specialDamageTypePrimaryShift)
}
// WithPrimaryValue returns a new type instance with the given primary value set.
func (dmg SpecialDamageType) WithPrimaryValue(value int) SpecialDamageType {
result := dmg
if (value >= 0) && (value <= SpecialDamageTypeLimit) {
result = SpecialDamageType((byte(result) & ^byte(specialDamageTypePrimaryMask)) | byte(value<<specialDamageTypePrimaryShift))
}
return result
}
// SuperValue returns the quadruple damage type.
func (dmg SpecialDamageType) SuperValue() int {
return int((dmg & specialDamageTypeSuperMask) >> specialDamageTypeSuperShift)
}
// WithSuperValue returns a new type instance with the given super value set.
func (dmg SpecialDamageType) WithSuperValue(value int) SpecialDamageType {
result := dmg
if (value >= 0) && (value <= SpecialDamageTypeLimit) {
result = SpecialDamageType((byte(result) & ^byte(specialDamageTypeSuperMask)) | byte(value<<specialDamageTypeSuperShift))
}
return result
} | ss1/content/object/DamageType.go | 0.829008 | 0.474266 | DamageType.go | starcoder |
package prayer
import (
"math"
"time"
"github.com/hablullah/go-juliandays"
)
// Times is the result of calculation.
type Times struct {
Fajr time.Time
Sunrise time.Time
Zuhr time.Time
Asr time.Time
Maghrib time.Time
Isha time.Time
}
// TimeCorrections is correction for each prayer time.
type TimeCorrections struct {
Fajr time.Duration
Sunrise time.Duration
Zuhr time.Duration
Asr time.Duration
Maghrib time.Duration
Isha time.Duration
}
// Config is configuration that used to calculate the prayer times.
type Config struct {
// Latitude is the latitude of the location. Positive for north area and negative for south area.
Latitude float64
// Longitude is the longitude of the location. Positive for east area and negative for west area.
Longitude float64
// Elevation is the elevation of the location above sea level. It's used to improve calculation for
// sunrise and sunset by factoring the value of atmospheric refraction. However, apparently most of
// the prayer time calculator doesn't use it so it's fine to omit it.
Elevation float64
// CalculationMethod is the method that used for calculating Fajr and Isha time. It works by specifying
// Fajr angle, Isha angle or Maghrib duration following one of the well-known conventions. By default
// it will use MWL method.
CalculationMethod CalculationMethod
// FajrAngle is the altitude of Sun below horizon which mark the start of Fajr time. If it's specified,
// the Fajr angle that provided by CalculationMethod will be ignored.
FajrAngle float64
// IshaAngle is the altitude of Sun below horizon which mark the start of Isha time. If it's specified,
// the Isha angle that provided by CalculationMethod will be ignored.
IshaAngle float64
// MaghribDuration is the duration between Maghrib and Isha. If it's specified, the Maghrib duration
// that provided by CalculationMethod will be ignored. Isha angle will be ignored as well since the
// Isha time will be calculated from Maghrib time.
MaghribDuration time.Duration
// AsrConvention is the convention that used for calculating Asr time. There are two conventions,
// Shafii and Hanafi. By default it will use Shafii.
AsrConvention AsrConvention
// PreciseToSeconds specify whether output time will omit the seconds or not.
PreciseToSeconds bool
// TimeCorrections is used to corrects calculated time for each specified prayer.
TimeCorrections TimeCorrections
// HighLatitudeMethods is methods that used for calculating Fajr and Isha time in higher latitude area
// (more than 45 degree from equator) where the Sun might never set or rise for an entire season. By
// default it will use angle-based method.
HighLatitudeMethod HighLatitudeMethod
}
// Calculate calculates the prayer time for specified date with specified configuration.
func Calculate(cfg Config, date time.Time) (Times, error) {
cfg = adjustHighLatitudeConfig(cfg)
times, err := calculate(cfg, date)
if err != nil {
return Times{}, err
}
times = applyTimeCorrections(cfg, times)
return times, nil
}
func calculate(cfg Config, date time.Time) (Times, error) {
// For initial calculation, get prayer times using noon as base time
date = time.Date(date.Year(), date.Month(), date.Day(), 12, 0, 0, 0, date.Location())
times, err := calculateByBase(cfg, date)
if err != nil {
return Times{}, err
}
// To increase accuracy, redo calculation for each prayer using initial time as base.
// The increased accuracy is not really much though, only around 1-15 seconds.
if !times.Fajr.IsZero() {
accTimes, _ := calculateByBase(cfg, times.Fajr)
times.Fajr = accTimes.Fajr
}
if !times.Sunrise.IsZero() {
accTimes, _ := calculateByBase(cfg, times.Sunrise)
times.Sunrise = accTimes.Sunrise
}
if !times.Zuhr.IsZero() {
accTimes, _ := calculateByBase(cfg, times.Zuhr)
times.Zuhr = accTimes.Zuhr
}
if !times.Asr.IsZero() {
accTimes, _ := calculateByBase(cfg, times.Asr)
times.Asr = accTimes.Asr
}
if !times.Maghrib.IsZero() {
accTimes, _ := calculateByBase(cfg, times.Maghrib)
times.Maghrib = accTimes.Maghrib
}
if !times.Isha.IsZero() {
accTimes, _ := calculateByBase(cfg, times.Isha)
times.Isha = accTimes.Isha
}
// Adjust prayer time in higher latitude
times = adjustHighLatitudeTimes(cfg, times)
return times, nil
}
func calculateByBase(cfg Config, baseTime time.Time) (Times, error) {
// Calculate Julian Days
jd, err := juliandays.FromTime(baseTime)
if err != nil {
return Times{}, err
}
// Convert Julian Days to Julian Century
jc := (jd - 2451545) / 36525
// Get timezone offset from date
_, utcOffset := baseTime.Zone()
timezone := float64(utcOffset) / 3600
// Calculate position of the sun
earthOrbitEccent := 0.016708634 - jc*(0.000042037+0.0000001267*jc)
sunMeanLongitude := math.Mod(280.46646+jc*(36000.76983+jc*0.0003032), 360)
sunMeanAnomaly := 357.52911 + jc*(35999.05029-0.0001537*jc)
sunEqOfCenter := sin(sunMeanAnomaly)*(1.914602-jc*(0.004817+0.000014*jc)) +
sin(2*sunMeanAnomaly)*(0.019993-0.000101*jc) +
sin(3*sunMeanAnomaly)*0.000289
sunTrueLongitude := sunMeanLongitude + sunEqOfCenter
sunAppLongitude := sunTrueLongitude - 0.00569 - 0.00478*sin(125.04-1934.136*jc)
meanObliqEcliptic := 23 + (26+(21.448-jc*(46.815+jc*(0.00059-jc*0.001813)))/60)/60
obliqCorrection := meanObliqEcliptic + 0.00256*cos(125.04-1934.136*jc)
sunDeclination := asin(sin(obliqCorrection) * sin(sunAppLongitude))
// Calculate equation of time
tmp := tan(obliqCorrection/2) * tan(obliqCorrection/2)
eqOfTime := 4 * degree(tmp*sin(2*sunMeanLongitude)-
2*earthOrbitEccent*sin(sunMeanAnomaly)+
4*earthOrbitEccent*tmp*sin(sunMeanAnomaly)*cos(2*sunMeanLongitude)-
0.5*math.Pow(tmp, 2)*sin(4*sunMeanLongitude)-
1.25*math.Pow(earthOrbitEccent, 2)*sin(2*sunMeanAnomaly))
// Calculate solar noon
solarNoon := 720 - 4*cfg.Longitude - eqOfTime + float64(timezone)*60
// Calculate sunrise and sunset (Maghrib)
sunriseSunAltitude := -0.833333 - 0.0347*math.Sqrt(cfg.Elevation)
haSunrise := getHourAngle(cfg.Latitude, sunriseSunAltitude, sunDeclination)
sunriseTime := solarNoon - haSunrise*4
maghribTime := solarNoon + haSunrise*4
// Calculate Fajr and Isha time
fajrAngle, ishaAngle, maghribDuration := getNightPrayerConfig(cfg)
fajrSunAltitude := -fajrAngle
haFajr := getHourAngle(cfg.Latitude, fajrSunAltitude, sunDeclination)
fajrTime := solarNoon - haFajr*4
var ishaTime float64
if maghribDuration != 0 {
ishaTime = maghribTime + maghribDuration.Minutes()
} else {
ishaSunAltitude := -ishaAngle
haIsha := getHourAngle(cfg.Latitude, ishaSunAltitude, sunDeclination)
ishaTime = solarNoon + haIsha*4
}
// Calculate Asr time
asrSunAltitude := acot(getAsrCoefficient(cfg) + tan(math.Abs(sunDeclination-cfg.Latitude)))
haAsr := getHourAngle(cfg.Latitude, asrSunAltitude, sunDeclination)
asrTime := solarNoon + haAsr*4
// Return all times
return Times{
Fajr: minutesToTime(cfg, baseTime, fajrTime),
Sunrise: minutesToTime(cfg, baseTime, sunriseTime),
Zuhr: minutesToTime(cfg, baseTime, solarNoon),
Asr: minutesToTime(cfg, baseTime, asrTime),
Maghrib: minutesToTime(cfg, baseTime, maghribTime),
Isha: minutesToTime(cfg, baseTime, ishaTime),
}, nil
}
func getNightPrayerConfig(cfg Config) (fajrAngle, ishaAngle float64, maghribDuration time.Duration) {
switch cfg.CalculationMethod {
case MWL, Algerian, Diyanet:
fajrAngle, ishaAngle = 18, 17
case ISNA:
fajrAngle, ishaAngle = 15, 15
case UmmAlQura:
fajrAngle, maghribDuration = 18.5, 90*time.Minute
case Gulf:
fajrAngle, maghribDuration = 19.5, 90*time.Minute
case Karachi, France18, Tunisia:
fajrAngle, ishaAngle = 18, 18
case Egypt:
fajrAngle, ishaAngle = 19.5, 17.5
case EgyptBis, Kemenag, MUIS, JAKIM:
fajrAngle, ishaAngle = 20, 18
case UOIF:
fajrAngle, ishaAngle = 12, 12
case France15:
fajrAngle, ishaAngle = 15, 15
case Tehran:
fajrAngle, ishaAngle = 17.7, 14
case Jafari:
fajrAngle, ishaAngle = 16, 14
}
if cfg.FajrAngle != 0 {
fajrAngle = cfg.FajrAngle
}
if cfg.IshaAngle != 0 {
ishaAngle = cfg.IshaAngle
}
if cfg.MaghribDuration != 0 {
maghribDuration = cfg.MaghribDuration
}
return
}
func getAsrCoefficient(cfg Config) float64 {
if cfg.AsrConvention == Hanafi {
return 2
}
return 1
}
func getHourAngle(latitude, sunAltitude, sunDeclination float64) float64 {
return acos(
(sin(float64(sunAltitude)) - sin(latitude)*sin(sunDeclination)) /
(cos(latitude) * cos(sunDeclination)),
)
}
func minutesToTime(cfg Config, date time.Time, minutes float64) time.Time {
if math.IsNaN(minutes) {
return time.Time{}
}
y := date.Year()
m := date.Month()
d := date.Day()
location := date.Location()
if cfg.PreciseToSeconds {
seconds := math.Round(minutes * 60)
return time.Date(y, m, d, 0, 0, int(seconds), 0, location)
} else {
minutes = math.Round(minutes)
return time.Date(y, m, d, 0, int(minutes), 0, 0, location)
}
}
func adjustHighLatitudeConfig(cfg Config) Config {
// If high latitude convention is forced normal region, any latitude above (or below) 45 N(S)
// will be changed to 45
if cfg.HighLatitudeMethod == ForcedNormalRegion {
if cfg.Latitude > 45 {
cfg.Latitude = 45
} else if cfg.Latitude < -45 {
cfg.Latitude = -45
}
}
return cfg
}
func adjustHighLatitudeTimes(cfg Config, times Times) Times {
switch cfg.HighLatitudeMethod {
case NormalRegion:
// This adjustment only used in area above latitude 45 (north and south)
if math.Abs(cfg.Latitude) <= 45 {
return times
}
// Make sure the fasting time is outside normal
// The normal fasting duration is between 10h 17m and 17h 36m
fastingDuration := times.Maghrib.Sub(times.Fajr).Minutes()
if fastingDuration >= 617 && fastingDuration <= 1056 {
return times
}
// Convert latitude to 45
if cfg.Latitude > 0 {
cfg.Latitude = 45
} else {
cfg.Latitude = -45
}
adjustedTimes, _ := calculate(cfg, times.Zuhr)
return adjustedTimes
case AngleBased, OneSeventhNight, MiddleNight:
// These conventions requires sunrise and sunset to be exist
if times.Sunrise.IsZero() || times.Maghrib.IsZero() {
return times
}
// This adjustment only used in latitude between 48.6 and 66.6 north and south
absLatitude := math.Abs(cfg.Latitude)
if absLatitude < 48.6 || absLatitude > 66.6 {
return times
}
// Get Fajr and Isha angle
fajrAngle, ishaAngle, maghribDuration := getNightPrayerConfig(cfg)
// Calculate night duration
dayDuration := times.Maghrib.Sub(times.Sunrise).Minutes()
nightDuration := (24 * 60) - dayDuration
// Calculate night portion
var fajrPortion, ishaPortion float64
switch cfg.HighLatitudeMethod {
case MiddleNight:
fajrPortion = 0.5
ishaPortion = 0.5
case OneSeventhNight:
fajrPortion = 1 / 7
ishaPortion = 1 / 7
default:
fajrPortion = fajrAngle / 60
ishaPortion = ishaAngle / 60
}
// Calculate new Fajr time
fajrDuration := math.Round(fajrPortion * nightDuration)
newFajr := times.Sunrise.Add(time.Duration(-fajrDuration) * time.Minute)
if times.Fajr.IsZero() || newFajr.After(times.Fajr) {
times.Fajr = newFajr
}
// Calculate new Isha time
if maghribDuration == 0 {
ishaDuration := math.Round(ishaPortion * nightDuration)
newIsha := times.Maghrib.Add(time.Duration(ishaDuration) * time.Minute)
if times.Isha.IsZero() || newIsha.Before(times.Isha) {
times.Isha = newIsha
}
}
return times
default:
return times
}
}
func applyTimeCorrections(cfg Config, times Times) Times {
if !times.Fajr.IsZero() {
times.Fajr = times.Fajr.Add(cfg.TimeCorrections.Fajr)
}
if !times.Sunrise.IsZero() {
times.Sunrise = times.Sunrise.Add(cfg.TimeCorrections.Sunrise)
}
if !times.Zuhr.IsZero() {
times.Zuhr = times.Zuhr.Add(cfg.TimeCorrections.Zuhr)
}
if !times.Asr.IsZero() {
times.Asr = times.Asr.Add(cfg.TimeCorrections.Asr)
}
if !times.Maghrib.IsZero() {
times.Maghrib = times.Maghrib.Add(cfg.TimeCorrections.Maghrib)
}
if !times.Isha.IsZero() {
times.Isha = times.Isha.Add(cfg.TimeCorrections.Isha)
}
return times
} | calculator.go | 0.774583 | 0.51501 | calculator.go | starcoder |
package parser
import (
"github.com/pkg/errors"
"net"
)
func filterPrefix(prefix net.IPNet) error {
// filter submasks with zero size
if size, _ := prefix.Mask.Size(); size == 0 {
return errors.New("prefix size of 0")
}
if !prefix.IP.Mask(prefix.Mask).Equal(prefix.IP) {
return errors.New("prefix contains bits not in mask")
}
if prefix.IP.To4() != nil {
// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
if prefix.IP[0] == 0 || // "This network"
prefix.IP.IsPrivate() ||
(prefix.IP[0] == 100 && prefix.IP[1]&0xC0 == 64) || // Shared Address Space
prefix.IP.IsLoopback() ||
prefix.IP.IsLinkLocalUnicast() ||
(prefix.IP[0] == 192 && prefix.IP[1] == 0 && prefix.IP[2] == 0 && // IETF Protocol Assignments
prefix.IP[3] != 9 && // Port Control Protocol Anycast
prefix.IP[3] != 10) || // Traversal Using Relays around NAT Anycast
(prefix.IP[0] == 192 && prefix.IP[1] == 0 && prefix.IP[2] == 2) || // Documentation (TEST-NET-1)
(prefix.IP[0] == 198 && prefix.IP[1]&0xFE == 18) || // Benchmarking
(prefix.IP[0] == 198 && prefix.IP[1] == 51 && prefix.IP[2] == 100) || // Documentation (TEST-NET-2)
(prefix.IP[0] == 203 && prefix.IP[1] == 0 && prefix.IP[2] == 113) || // Documentation (TEST-NET-3)
(prefix.IP[0]&0xF0 == 240) || // Reserved
(prefix.IP.Equal(net.IPv4bcast)) {
return errors.New("prefix is reserved")
}
} else {
// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
if prefix.IP.IsLoopback() ||
prefix.IP.IsUnspecified() ||
(prefix.IP[:12].Equal(net.IP{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff})) || // IPv4-mapped Address
(prefix.IP[:6].Equal(net.IP{0x00, 0x64, 0xff, 0x9b, 0x00, 0x01})) || // IPv4-IPv6 Translat.
(prefix.IP[:8].Equal(net.IP{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})) || // Discard-Only Address Block
(prefix.IP[0] == 0x20 && prefix.IP[1] == 0x01 && prefix.IP[2]&0xfE == 0x00 && // IETF Protocol Assignments
!(prefix.IP[2] == 0x00 && prefix.IP[3] == 0x00) && // TEREDO
!(prefix.IP.Equal(net.IP{0x20, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})) && // Port Control Protocol Anycast
!(prefix.IP.Equal(net.IP{0x20, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02})) && // Traversal Using Relays around NAT Anycast
!(prefix.IP[2] == 0x00 && prefix.IP[3] == 0x03) && // AMT
!(prefix.IP[2] == 0x00 && prefix.IP[3] == 0x04 && prefix.IP[4] == 0x01 && prefix.IP[5] == 0x12) && // AS112-v6
!(prefix.IP[2] == 0x00 && prefix.IP[3]&0xf0 == 0x20)) || // ORCHIDv2
(prefix.IP[:4].Equal(net.IP{0x20, 0x01, 0x0d, 0xb8})) || // Documentation
prefix.IP.IsPrivate() || // Unique-Local
prefix.IP.IsLinkLocalUnicast() {
return errors.New("prefix is reserved")
}
}
return nil
}
func filterASN(asn int) error {
// https://www.iana.org/assignments/iana-as-numbers-special-registry/iana-as-numbers-special-registry.xhtml
if asn == 0 || // Reserved by [RFC7607]
asn == 112 || // Used by the AS112 project to sink misdirected DNS queries
asn == 23456 || // AS_TRANS
(asn >= 64496 && asn <= 64511) || // For documentation and sample code
(asn >= 64512 && asn <= 65534) || // For private use
asn == 65535 || // Reserved by [RFC7300]
(asn >= 65536 && asn <= 65551) || // For documentation and sample code
(asn >= 4200000000 && asn <= 4294967294) || // For private use
asn == 4294967295 { // Reserved by [RFC7300]
return errors.New("ASN is not public")
}
return nil
} | parser/filter.go | 0.554953 | 0.497864 | filter.go | starcoder |
package internal
import (
"fmt"
"strconv"
)
func ToString(value interface{}) (string, error) {
switch v := value.(type) {
case string:
return v, nil
case []byte:
return string(v), nil
case int:
return strconv.FormatInt(int64(v), 10), nil
case int8:
return strconv.FormatInt(int64(v), 10), nil
case int16:
return strconv.FormatInt(int64(v), 10), nil
case int32:
return strconv.FormatInt(int64(v), 10), nil
case int64:
return strconv.FormatInt(v, 10), nil
case uint:
return strconv.FormatUint(uint64(v), 10), nil
case uint8:
return strconv.FormatUint(uint64(v), 10), nil
case uint16:
return strconv.FormatUint(uint64(v), 10), nil
case uint32:
return strconv.FormatUint(uint64(v), 10), nil
case uint64:
return strconv.FormatUint(v, 10), nil
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32), nil
case float64:
return strconv.FormatFloat(v, 'f', -1, 64), nil
case bool:
return strconv.FormatBool(v), nil
case fmt.Stringer:
return v.String(), nil
case nil:
return "", nil
}
return "", fmt.Errorf("type \"%T\" unsupported", value)
}
func ToFloat64(value interface{}) (float64, error) {
switch v := value.(type) {
case string:
return strconv.ParseFloat(v, 64)
case []byte:
return strconv.ParseFloat(string(v), 64)
case fmt.Stringer:
return strconv.ParseFloat(v.String(), 64)
case int:
return float64(v), nil
case int8:
return float64(v), nil
case int16:
return float64(v), nil
case int32:
return float64(v), nil
case int64:
return float64(v), nil
case uint:
return float64(v), nil
case uint8:
return float64(v), nil
case uint16:
return float64(v), nil
case uint32:
return float64(v), nil
case uint64:
return float64(v), nil
case float32:
return float64(v), nil
case float64:
return v, nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToInt64(value interface{}) (int64, error) {
switch v := value.(type) {
case string:
return strconv.ParseInt(v, 10, 64)
case []byte:
return strconv.ParseInt(string(v), 10, 64)
case fmt.Stringer:
return strconv.ParseInt(v.String(), 10, 64)
case int:
return int64(v), nil
case int8:
return int64(v), nil
case int16:
return int64(v), nil
case int32:
return int64(v), nil
case int64:
return v, nil
case uint:
return int64(v), nil
case uint8:
return int64(v), nil
case uint16:
return int64(v), nil
case uint32:
return int64(v), nil
case uint64:
return int64(v), nil
case float32:
return int64(v), nil
case float64:
return int64(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToUint64(value interface{}) (uint64, error) {
switch v := value.(type) {
case string:
return strconv.ParseUint(v, 10, 64)
case []byte:
return strconv.ParseUint(string(v), 10, 64)
case fmt.Stringer:
return strconv.ParseUint(v.String(), 10, 64)
case int:
return uint64(v), nil
case int8:
return uint64(v), nil
case int16:
return uint64(v), nil
case int32:
return uint64(v), nil
case int64:
return uint64(v), nil
case uint:
return uint64(v), nil
case uint8:
return uint64(v), nil
case uint16:
return uint64(v), nil
case uint32:
return uint64(v), nil
case uint64:
return v, nil
case float32:
return uint64(v), nil
case float64:
return uint64(v), nil
case nil:
return 0, nil
}
return 0, fmt.Errorf("type \"%T\" unsupported", value)
}
func ToBool(value interface{}) (bool, error) {
switch v := value.(type) {
case string:
return strconv.ParseBool(v)
case []byte:
return strconv.ParseBool(string(v))
case fmt.Stringer:
return strconv.ParseBool(v.String())
case int:
return v > 0, nil
case int8:
return v > 0, nil
case int16:
return v > 0, nil
case int32:
return v > 0, nil
case int64:
return v > 0, nil
case uint:
return v > 0, nil
case uint8:
return v > 0, nil
case uint16:
return v > 0, nil
case uint32:
return v > 0, nil
case uint64:
return v > 0, nil
case float32:
return v > 0, nil
case float64:
return v > 0, nil
case nil:
return false, nil
}
return false, fmt.Errorf("type \"%T\" unsupported", value)
} | internal/type_conversions.go | 0.52074 | 0.513059 | type_conversions.go | starcoder |
package policy
import (
"errors"
"github.com/joshdk/callcheck/graph"
)
type Decl struct {
Position string
Name string
Calls []Call
}
type Call struct {
Position string
Name string
Decl Decl
Index int
}
type Goal struct {
startDecl string
endCall string
}
func MatchingPaths(graph map[string]graph.FuncDecl, policy Policy) []Decl {
if policy.Rule == nil {
return nil
}
if graph == nil {
return nil
}
// Our rule does not have had any calls, and must be a decl all on its own.
if len(policy.Rule.Calls) == 0 {
// Check that our rule actually exist in the graph.
if decl, found := graph[policy.Rule.Name]; found {
return []Decl{{Name: decl.Name, Position: decl.Position}}
}
return nil
}
nodeToGoalMapping := splitRule(policy.Rule)
goalToDeclsMapping := make(map[*Goal][]Decl)
for _, goals := range nodeToGoalMapping {
for _, goal := range goals {
decls := walker(goal.startDecl, goal.endCall, graph)
goalToDeclsMapping[goal] = decls
}
}
return genMatches(policy.Rule, nodeToGoalMapping, goalToDeclsMapping)
}
// walker traverses the given call graph from the function named start and
// returns all distinct paths to the function named end. A value of nil is
// returned if no paths are found. All returned paths are guaranteed to be
// linear (do not branch).
func walker(start string, end string, graph map[string]graph.FuncDecl) []Decl {
visited := make(map[string]struct{})
return paths(start, end, visited, graph)
}
// paths is an internal function behind walker.
func paths(current string, end string, visited map[string]struct{}, graph map[string]graph.FuncDecl) []Decl {
if graph == nil {
return nil
}
me := Decl{
Position: graph[current].Position,
Name: current,
}
if current == end {
return []Decl{me}
}
if _, found := visited[current]; found {
return nil
}
startDecl := graph[current]
visited[current] = struct{}{}
var results []Decl
for index, call := range startDecl.Calls {
paths := paths(call.Name, end, visited, graph)
for _, path := range paths {
results = append(results, Decl{
Name: current,
Position: startDecl.Position,
Calls: []Call{
{
call.Position,
path.Name,
path,
index,
},
},
})
}
}
return results
}
func walkRule(node *Node, goals map[*Node][]*Goal) {
if node == nil {
return
}
for _, call := range node.Calls {
if _, found := goals[node]; !found {
goals[node] = []*Goal{}
}
goals[node] = append(goals[node], &Goal{node.Name, call.Name})
walkRule(call, goals)
}
}
// splitRule splits the given node into a list of goals for every sub-node.
func splitRule(node *Node) map[*Node][]*Goal {
goals := make(map[*Node][]*Goal)
walkRule(node, goals)
return goals
}
func combineDecls(first Decl, second Decl, mustMatch string, mustSplit string) (Decl, error) {
// Sanity check declarations.
switch {
case first.Name == "" || second.Name == "":
panic("name is blank")
// Nodes are supposed to match, but didn't
case first.Name == mustMatch && second.Name != first.Name:
return Decl{}, errors.New("declarations required to match but did not")
// Nodes are not supposed to match, but did
case first.Name == mustSplit && second.Name == first.Name:
return Decl{}, errors.New("declarations required to not match but did")
case len(second.Calls) > 1:
panic("more than 1 call")
case first.Name != second.Name:
panic("disjoint declarations")
}
// Nodes are the same, merge
merged, err := combineCalls(first.Calls, second.Calls, mustMatch, mustSplit)
if err != nil {
return Decl{}, err
}
return Decl{
Name: first.Name,
Position: first.Position,
Calls: merged,
}, nil
}
func combineCalls(first []Call, second []Call, mustMatch string, mustSplit string) ([]Call, error) {
// Sanity check calls.
switch {
case len(first) == 0 && len(second) == 0:
return nil, nil
case len(second) >= 2:
panic("more than 1 call")
case len(first) == 0 && len(second) >= 1:
panic("first had no calls but second had calls")
case len(first) >= 1 && len(second) == 0:
panic("first had calls but second had no calls")
}
firstCall, secondCall := first[0], second[0]
// These two calls are the same, merge them.
if firstCall.Name == secondCall.Name {
merged, err := combineDecls(firstCall.Decl, secondCall.Decl, mustMatch, mustSplit)
if err != nil {
return nil, err
}
return []Call{{
Name: firstCall.Name,
Position: firstCall.Position,
Index: firstCall.Index,
Decl: merged,
}}, nil
}
// These two calls are not the same, check if they are ordered.
if firstCall.Index >= secondCall.Index {
return nil, errors.New("calls are not sequential")
}
// Calls are ordered.
return []Call{
firstCall,
secondCall,
}, nil
}
func genMatches(current *Node, nodeToGoalMapping map[*Node][]*Goal, goalToDeclsMapping map[*Goal][]Decl) []Decl {
var all []Decl
split := ""
goals := nodeToGoalMapping[current]
if len(goals) == 0 {
return nil
}
for index, call := range current.Calls {
wraps := genMatches(call, nodeToGoalMapping, goalToDeclsMapping)
wrappers := goalToDeclsMapping[goals[index]]
if len(wrappers) == 0 {
return nil
}
res := wrapDeclSets(wrappers, wraps)
all = combineDeclSets(all, res, current.Name, split)
split = call.Name
}
return all
}
func combineDeclSets(firstSet []Decl, secondSet []Decl, mustMatch string, mustSplit string) []Decl {
if len(secondSet) == 0 {
return nil
}
if len(firstSet) == 0 {
return secondSet
}
results := make([]Decl, 0, len(firstSet)*len(secondSet))
for _, first := range firstSet {
for _, second := range secondSet {
combined, err := combineDecls(first, second, mustMatch, mustSplit)
if err != nil {
continue
}
results = append(results, combined)
}
}
return results
}
// wrapDecl appends the decl tree second onto the end of the linear decl
// first.
func wrapDecl(wrapper Decl, wrapped Decl) Decl {
if len(wrapper.Calls) == 0 {
if wrapper.Name != wrapped.Name {
panic("wrapper name mismatch")
}
return wrapped
}
lastCall := wrapper.Calls[len(wrapper.Calls)-1]
return Decl{
Name: wrapper.Name,
Position: wrapper.Position,
Calls: []Call{
{
Name: lastCall.Name,
Position: lastCall.Position,
Index: lastCall.Index,
Decl: wrapDecl(lastCall.Decl, wrapped),
},
},
}
}
func wrapDeclSets(wrappers []Decl, wraps []Decl) []Decl {
if len(wrappers) == 0 {
return nil
}
if len(wraps) == 0 {
return wrappers
}
results := make([]Decl, 0, len(wrappers)*len(wraps))
for _, wrapper := range wrappers {
for _, wrapped := range wraps {
results = append(results, wrapDecl(wrapper, wrapped))
}
}
return results
} | policy/match.go | 0.664867 | 0.40486 | match.go | starcoder |
package ent
import (
"fmt"
"strings"
"entgo.io/ent/dialect/sql"
"github.com/karashiiro/gacha/ent/series"
)
// Series is the model entity for the Series schema.
type Series struct {
config `json:"-"`
// ID of the ent.
ID uint32 `json:"id,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the SeriesQuery when eager-loading is set.
Edges SeriesEdges `json:"edges"`
}
// SeriesEdges holds the relations/edges for other nodes in the graph.
type SeriesEdges struct {
// Drops holds the value of the drops edge.
Drops []*Drop `json:"drops,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// DropsOrErr returns the Drops value or an error if the edge
// was not loaded in eager-loading.
func (e SeriesEdges) DropsOrErr() ([]*Drop, error) {
if e.loadedTypes[0] {
return e.Drops, nil
}
return nil, &NotLoadedError{edge: "drops"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Series) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case series.FieldID:
values[i] = &sql.NullInt64{}
case series.FieldName:
values[i] = &sql.NullString{}
default:
return nil, fmt.Errorf("unexpected column %q for type Series", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Series fields.
func (s *Series) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case series.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
s.ID = uint32(value.Int64)
case series.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
s.Name = value.String
}
}
}
return nil
}
// QueryDrops queries the "drops" edge of the Series entity.
func (s *Series) QueryDrops() *DropQuery {
return (&SeriesClient{config: s.config}).QueryDrops(s)
}
// Update returns a builder for updating this Series.
// Note that you need to call Series.Unwrap() before calling this method if this Series
// was returned from a transaction, and the transaction was committed or rolled back.
func (s *Series) Update() *SeriesUpdateOne {
return (&SeriesClient{config: s.config}).UpdateOne(s)
}
// Unwrap unwraps the Series entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (s *Series) Unwrap() *Series {
tx, ok := s.config.driver.(*txDriver)
if !ok {
panic("ent: Series is not a transactional entity")
}
s.config.driver = tx.drv
return s
}
// String implements the fmt.Stringer.
func (s *Series) String() string {
var builder strings.Builder
builder.WriteString("Series(")
builder.WriteString(fmt.Sprintf("id=%v", s.ID))
builder.WriteString(", name=")
builder.WriteString(s.Name)
builder.WriteByte(')')
return builder.String()
}
// SeriesSlice is a parsable slice of Series.
type SeriesSlice []*Series
func (s SeriesSlice) config(cfg config) {
for _i := range s {
s[_i].config = cfg
}
} | ent/series.go | 0.694406 | 0.403596 | series.go | starcoder |
package memory
import (
"errors"
"fmt"
"github.com/ottotech/riskmanagement/pkg/adding"
"github.com/ottotech/riskmanagement/pkg/listing"
"image/color"
"os"
"path/filepath"
"time"
)
// matrix setup
const (
imWidth = 600
imHeight = imWidth
matrixNrRows = 3
matrixNrCols = 3
matrixSize = matrixNrRows * matrixNrCols
multiple = imWidth / matrixNrCols
borderWidth = 3
wordWith = 6
wordHeight = 13
)
// mediaPath will store the media path temporary in memory
var mediaPath = ""
// colors
var (
red = color.RGBA{R: 0xff, A: 0xff} // rgb(255, 0, 0) high risk
yellow = color.RGBA{R: 0xff, G: 0xff, A: 0xff} // rgb(255, 255, 0) medium risk
green = color.RGBA{R: 0x90, G: 0xee, B: 0x90, A: 0xff} // rgb(144, 238, 144) low risk
white = color.RGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff} // rgb(255, 255, 255) border color
black = color.RGBA{A: 0xff} // rgb(0, 0, 0) label color
)
// Memory storage keeps data in memory
type Storage struct {
riskMatrixSlice []RiskMatrix
risks []Risk
}
// SaveMediaPath saves the given path that is going to be used to store media files, i.e., the risk matrix pictures.
func (m *Storage) SaveMediaPath(path string) error {
mediaPath = path
return nil
}
// GetMediaPath get the media path stored.
func (m *Storage) GetMediaPath() (string, error) {
if mediaPath == "" {
return "", errors.New("no media path defined in memory")
}
return mediaPath, nil
}
// Add saves the given risk matrix in repository
func (m *Storage) AddRiskMatrix(rm adding.RiskMatrix) error {
newRM := RiskMatrix{
ID: len(m.riskMatrixSlice) + 1,
Path: rm.Path,
Project: rm.Project,
DateCreated: time.Now(),
MatImgWidth: imWidth,
MatImgHeight: imHeight,
MatNrRows: matrixNrRows,
MatNrCols: matrixNrCols,
MatSize: matrixSize,
BorderWidth: borderWidth,
Multiple: multiple,
WordHeight: wordHeight,
WordWidth: wordWith,
HighRiskColor: red,
MediumRiskColor: yellow,
LowRiskColor: green,
RiskLabelColor: black,
BorderColor: white,
}
m.riskMatrixSlice = append(m.riskMatrixSlice, newRM)
return nil
}
// UpdateRiskMatrixSize updates the risk matrix size of a given risk matrix in the repository
func (m *Storage) UpdateRiskMatrixSize(riskMatrixID, newImageWidth int) error {
for i := range m.riskMatrixSlice {
if m.riskMatrixSlice[i].ID == riskMatrixID {
m.riskMatrixSlice[i].MatImgWidth = newImageWidth
m.riskMatrixSlice[i].MatImgHeight = newImageWidth
m.riskMatrixSlice[i].Multiple = newImageWidth / m.riskMatrixSlice[i].MatNrCols
return nil
}
}
return errors.New(fmt.Sprintf("risk matrix not found with id: %v", riskMatrixID))
}
// AddRisk saves the given risk in the repository
func (m *Storage) AddRisk(r adding.Risk) error {
found := false
for rm := range m.riskMatrixSlice {
if m.riskMatrixSlice[rm].ID == r.RiskMatrixID {
found = true
}
}
if found {
created := time.Now()
id := fmt.Sprintf("%d_%d", r.RiskMatrixID, created.Unix())
newR := Risk{
ID: id,
RiskMatrixID: r.RiskMatrixID,
Name: r.Name,
Probability: r.Probability,
Impact: r.Impact,
Classification: r.Classification,
Strategy: r.Strategy,
}
m.risks = append(m.risks, newR)
} else {
return errors.New("risk matrix not found")
}
return nil
}
// Get returns a risk matrix with the specified ID
func (m *Storage) GetRiskMatrix(id int) (listing.RiskMatrix, error) {
var riskMatrix listing.RiskMatrix
for i := range m.riskMatrixSlice {
if m.riskMatrixSlice[i].ID == id {
riskMatrix.ID = m.riskMatrixSlice[i].ID
riskMatrix.Path = m.riskMatrixSlice[i].Path
riskMatrix.Project = m.riskMatrixSlice[i].Project
riskMatrix.MatImgWidth = m.riskMatrixSlice[i].MatImgWidth
riskMatrix.MatImgHeight = m.riskMatrixSlice[i].MatImgHeight
riskMatrix.MatNrRows = m.riskMatrixSlice[i].MatNrRows
riskMatrix.MatNrCols = m.riskMatrixSlice[i].MatNrCols
riskMatrix.MatSize = m.riskMatrixSlice[i].MatSize
riskMatrix.BorderWidth = m.riskMatrixSlice[i].BorderWidth
riskMatrix.Multiple = m.riskMatrixSlice[i].Multiple
riskMatrix.WordHeight = m.riskMatrixSlice[i].WordHeight
riskMatrix.WordWidth = m.riskMatrixSlice[i].WordWidth
riskMatrix.HighRiskColor = m.riskMatrixSlice[i].HighRiskColor
riskMatrix.MediumRiskColor = m.riskMatrixSlice[i].MediumRiskColor
riskMatrix.LowRiskColor = m.riskMatrixSlice[i].LowRiskColor
riskMatrix.RiskLabelColor = m.riskMatrixSlice[i].RiskLabelColor
riskMatrix.BorderColor = m.riskMatrixSlice[i].BorderColor
return riskMatrix, nil
}
}
return riskMatrix, errors.New("risk matrix not found")
}
// GetRiskMatrixByPath returns a risk matrix with the specified image path
func (m *Storage) GetRiskMatrixByPath(p string) (listing.RiskMatrix, error) {
var riskMatrix listing.RiskMatrix
for i := range m.riskMatrixSlice {
if m.riskMatrixSlice[i].Path == p {
riskMatrix.ID = m.riskMatrixSlice[i].ID
riskMatrix.Path = m.riskMatrixSlice[i].Path
riskMatrix.Project = m.riskMatrixSlice[i].Project
riskMatrix.DateCreated = m.riskMatrixSlice[i].DateCreated
riskMatrix.MatImgWidth = m.riskMatrixSlice[i].MatImgWidth
riskMatrix.MatImgHeight = m.riskMatrixSlice[i].MatImgHeight
riskMatrix.MatNrRows = m.riskMatrixSlice[i].MatNrRows
riskMatrix.MatNrCols = m.riskMatrixSlice[i].MatNrCols
riskMatrix.MatSize = m.riskMatrixSlice[i].MatSize
riskMatrix.BorderWidth = m.riskMatrixSlice[i].BorderWidth
riskMatrix.Multiple = m.riskMatrixSlice[i].Multiple
riskMatrix.WordHeight = m.riskMatrixSlice[i].WordHeight
riskMatrix.WordWidth = m.riskMatrixSlice[i].WordWidth
riskMatrix.HighRiskColor = m.riskMatrixSlice[i].HighRiskColor
riskMatrix.MediumRiskColor = m.riskMatrixSlice[i].MediumRiskColor
riskMatrix.LowRiskColor = m.riskMatrixSlice[i].LowRiskColor
riskMatrix.RiskLabelColor = m.riskMatrixSlice[i].RiskLabelColor
riskMatrix.BorderColor = m.riskMatrixSlice[i].BorderColor
return riskMatrix, nil
}
}
return riskMatrix, errors.New(fmt.Sprintf("risk matrix not found by the given path: %v", p))
}
// GetAllRiskMatrix returns all the risk matrix stored in the database
func (m *Storage) GetAllRiskMatrix() []listing.RiskMatrix {
var list []listing.RiskMatrix
for i := range m.riskMatrixSlice {
riskMatrix := listing.RiskMatrix{
ID: m.riskMatrixSlice[i].ID,
Path: m.riskMatrixSlice[i].Path,
Project: m.riskMatrixSlice[i].Project,
DateCreated: m.riskMatrixSlice[i].DateCreated,
MatImgWidth: m.riskMatrixSlice[i].MatImgWidth,
MatImgHeight: m.riskMatrixSlice[i].MatImgHeight,
MatNrRows: m.riskMatrixSlice[i].MatNrRows,
MatNrCols: m.riskMatrixSlice[i].MatNrCols,
MatSize: m.riskMatrixSlice[i].MatSize,
BorderWidth: m.riskMatrixSlice[i].BorderWidth,
Multiple: m.riskMatrixSlice[i].Multiple,
WordHeight: m.riskMatrixSlice[i].WordHeight,
WordWidth: m.riskMatrixSlice[i].WordWidth,
HighRiskColor: m.riskMatrixSlice[i].HighRiskColor,
MediumRiskColor: m.riskMatrixSlice[i].MediumRiskColor,
LowRiskColor: m.riskMatrixSlice[i].LowRiskColor,
RiskLabelColor: m.riskMatrixSlice[i].RiskLabelColor,
BorderColor: m.riskMatrixSlice[i].BorderColor,
}
list = append(list, riskMatrix)
}
return list
}
// GetAll returns all the risks for a given risk matrix
func (m *Storage) GetAllRisks(riskMatrixID int) []listing.Risk {
var list []listing.Risk
for i := range m.risks {
if m.risks[i].RiskMatrixID == riskMatrixID {
r := listing.Risk{
ID: m.risks[i].ID,
RiskMatrixID: m.risks[i].RiskMatrixID,
Name: m.risks[i].Name,
Probability: m.risks[i].Probability,
Impact: m.risks[i].Impact,
Classification: m.risks[i].Classification,
Strategy: m.risks[i].Strategy,
}
list = append(list, r)
}
}
return list
}
// GetRisk returns a risk with the given ID
func (m *Storage) GetRisk(riskID string) (listing.Risk, error) {
for i := range m.risks {
if m.risks[i].ID == riskID {
r := listing.Risk{
ID: m.risks[i].ID,
RiskMatrixID: m.risks[i].RiskMatrixID,
Name: m.risks[i].Name,
Probability: m.risks[i].Probability,
Impact: m.risks[i].Impact,
Classification: m.risks[i].Classification,
Strategy: m.risks[i].Strategy,
}
return r, nil
}
}
return listing.Risk{}, errors.New(fmt.Sprintf("risk not found by the given ID: %v.", riskID))
}
// DeleteRisk deletes a risk with the specified ID
func (m *Storage) DeleteRisk(riskID string) error {
for i := range m.risks {
if m.risks[i].ID == riskID {
m.risks[i] = m.risks[len(m.risks)-1]
m.risks = m.risks[:len(m.risks)-1]
return nil
}
}
return errors.New(fmt.Sprintf("risk not found by the given ID: %v.", riskID))
}
// DeleteMatrix deletes a risk matrix with the specified ID
func (m *Storage) DeleteRiskMatrix(riskMatrixID int) error {
for i := range m.riskMatrixSlice {
if m.riskMatrixSlice[i].ID == riskMatrixID {
// we remove the RiskMatrix image
wd, err := os.Getwd()
if err != nil {
return err
}
path := filepath.Join(wd, "media", m.riskMatrixSlice[i].Path)
_ = os.Remove(path)
// we remove the data of the matrix stored in memory
m.riskMatrixSlice[i] = m.riskMatrixSlice[len(m.riskMatrixSlice)-1]
m.riskMatrixSlice = m.riskMatrixSlice[:len(m.riskMatrixSlice)-1]
return nil
}
}
return errors.New(fmt.Sprintf("risk matrix not found by the give ID: %v.", riskMatrixID))
} | pkg/storage/memory/repository.go | 0.638046 | 0.453988 | repository.go | starcoder |
package stats
// StatType is the type of different statistics
type StatType int
// Different statistics for different benchmark tools.
const (
None StatType = iota
OPS
P99
P95
TPS
QPS
)
// String returns the name of statistics
func (s StatType) String() string {
switch s {
case OPS:
return "ops"
case P99:
return "p99(ms)"
case P95:
return "p95(ms)"
case TPS:
return "tps"
case QPS:
return "qps"
default:
return ""
}
}
// Value saves the statistics value
type Record struct {
OPS float64
TPS float64
QPS float64
P99 float64
P95 float64
}
// Value returns the value of different statistics
func (r *Record) Value(tp StatType) float64 {
switch tp {
case OPS:
return r.OPS
case P99:
return r.P99
case P95:
return r.P95
case TPS:
return r.TPS
case QPS:
return r.QPS
default:
return 0.0
}
}
// DBStat holds all statistics in one benchmark
type DBStat struct {
// Name is the unqiue name used in plotting later.
Name string
// DB is the database name
DB string
// Workload is the benchmark workload name
Workload string
// Summary holds the final output summary record
// The key of the map is the operation in the benchmark.
// E.g, in go-ycsb, the operation may be INSERT, READ
Summary map[string]*Record
// Progress holds the in progess record in benchmarking
Progress map[string][]*Record
}
// Operations returns all the operations in the test
func (s *DBStat) Operations() []string {
names := make([]string, 0, len(s.Summary))
for name, _ := range s.Summary {
names = append(names, name)
}
return names
}
// NewDBStat creates a DBStat.
// We assume we put all logs in one unique directory in each benchmark.
// E.g, we can use Git commit as the parent directory for benchmarking special version,
// use datetime for benchmarking different databases.
// If pathName is empty, we will use db as the name of DBStat.
func NewDBStat(name string, db string, workload string, pathName string) *DBStat {
s := DBStat{
Name: name,
Summary: make(map[string]*Record, 1),
Progress: make(map[string][]*Record, 1),
DB: db,
Workload: workload,
}
return &s
}
// DBStats is the array of DBStat.
type DBStats []*DBStat
func (a DBStats) Len() int { return len(a) }
func (a DBStats) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a DBStats) Less(i, j int) bool {
if a[i].DB < a[j].DB {
return true
} else if a[i].Name < a[j].Name {
return true
}
return false
} | pkg/stats/stats.go | 0.695958 | 0.473231 | stats.go | starcoder |
package collision
import (
"github.com/galaco/kero/framework/physics/collision/bullet"
"github.com/galaco/studiomodel"
"github.com/go-gl/mathgl/mgl32"
)
type CollisionBodyType int8
const (
RigidBodyTypeConvexHull = CollisionBodyType(0)
RigidBodyTypeOrientedBoundingBox = CollisionBodyType(1)
RigidBodyTypeAxisAlignedBoundingBox = CollisionBodyType(2)
)
type RigidBody interface {
CollisionBodyType() CollisionBodyType
BulletHandle() bullet.BulletRigidBodyHandle
GetTransform() mgl32.Mat4
SetTransform(transform mgl32.Mat4)
GetTranslation() mgl32.Vec3
GetOrientation() mgl32.Quat
ApplyImpulse(impulse mgl32.Vec3, localPoint mgl32.Vec3)
}
type ConvexHull struct {
handle bullet.BulletRigidBodyHandle
}
func (body *ConvexHull) CollisionBodyType() CollisionBodyType {
return RigidBodyTypeConvexHull
}
func (body *ConvexHull) BulletHandle() bullet.BulletRigidBodyHandle {
return body.handle
}
func (body *ConvexHull) GetTransform() mgl32.Mat4 {
return bullet.BulletGetOpenGLMatrix(body.handle)
}
func (body *ConvexHull) SetTransform(transform mgl32.Mat4) {
bullet.BulletSetOpenGLMatrix(body.handle, transform)
}
func (body *ConvexHull) GetTranslation() mgl32.Vec3 {
return bullet.BulletGetTranslation(body.handle)
}
func (body *ConvexHull) GetOrientation() mgl32.Quat {
return bullet.BulletGetOrientation(body.handle)
}
// ApplyImpulse implements the core.RigidBody interface
func (body *ConvexHull) ApplyImpulse(impulse mgl32.Vec3, localPoint mgl32.Vec3) {
bullet.BulletApplyImpulse(body.handle, impulse, localPoint)
}
func NewConvexHull() *ConvexHull {
cbody := new(ConvexHull)
h := bullet.BulletNewConvexHullShape()
cbody.handle = bullet.NewRigidBody(1, h)
return cbody
}
func NewConvexHullFromExistingShape(mass float32, shape bullet.BulletCollisionShapeHandle) *ConvexHull {
cbody := new(ConvexHull)
cbody.handle = bullet.NewRigidBody(mass, shape)
return cbody
}
func NewSphericalHull(radius float64) *ConvexHull {
cbody := new(ConvexHull)
h := bullet.BulletNewSphericalHullShape(radius)
cbody.handle = bullet.NewRigidBody(1, h)
return cbody
}
type OrientedBoundingBox struct {
}
func (body *OrientedBoundingBox) CollisionBodyType() CollisionBodyType {
return RigidBodyTypeOrientedBoundingBox
}
type AxisAlignedBoundingBox struct {
Mins, Maxs mgl32.Vec3
}
func (body *AxisAlignedBoundingBox) CollisionBodyType() CollisionBodyType {
return RigidBodyTypeAxisAlignedBoundingBox
}
func NewAxisAlignedBoundingBox(m *studiomodel.StudioModel) *AxisAlignedBoundingBox {
return &AxisAlignedBoundingBox{
Mins: m.Mdl.Header.ViewBBMin,
Maxs: m.Mdl.Header.ViewBBMax,
}
} | framework/physics/collision/rigidBody.go | 0.790085 | 0.507507 | rigidBody.go | starcoder |
package pythonimports
import "fmt"
const (
// None represents the zero value for Kind
None Kind = iota
// Function is the classification for nodes representing functions
Function
// Type is the classification for nodes representing types
Type
// Module is the classification for nodes representing modules
Module
// Descriptor is the classification for nodes representing descriptors
Descriptor
// Object is the classification for nodes that do not fall into any other category
Object
// Root is the classification for the virtual root node (graph.Root)
Root
)
// Kind represents the classification for a node
type Kind int
// String converts a Kind to a string
func (c Kind) String() string {
switch c {
case Function:
return "function"
case Type:
return "type"
case Module:
return "module"
case Descriptor:
return "descriptor"
case Object:
return "object"
default:
return fmt.Sprintf("Kind(%d)", c)
}
}
// ParseKind converts a string to a Kind
func ParseKind(s string) Kind {
switch s {
case "function":
return Function
case "type":
return Type
case "module":
return Module
case "descriptor":
return Descriptor
case "object":
return Object
default:
return None
}
}
// NodeInfo represents information associated with an entry in the Python import graph.
type NodeInfo struct {
ID int64 `json:"id"`
CanonicalName DottedPath `json:"canonical_name"`
Classification Kind `json:"classification"`
Origin Origin
}
// A Node represents information associated with an entry in the Python import graph
type Node struct {
NodeInfo
Type *Node
Members map[string]*Node
// Bases is nil if Classification != Type, and it corresponds to __bases__ in python.
Bases []*Node
}
// NewNode creates a node with a name and a kind
func NewNode(name string, kind Kind) *Node {
return &Node{
Members: make(map[string]*Node),
NodeInfo: NodeInfo{
CanonicalName: NewDottedPath(name),
Classification: kind,
},
}
}
// HasMember checks whether a node has the given node as its member.
func (n *Node) HasMember(m *Node) bool {
if m == nil {
return false
}
for _, node := range n.Members {
if m == node {
return true
}
}
return false
}
// HasUnresolvedBase checks whether a node has any unresolved parent.
// Only a node of `Type` kind can possible have this function return true.
func (n *Node) HasUnresolvedBase() bool {
for _, base := range n.Bases {
if base == nil {
return true
}
}
if n.Type != nil && n.Type != n {
return n.Type.HasUnresolvedBase()
}
return false
}
const maxAttrDepth = 10
func (n *Node) attr(attr string, depth int) (*Node, bool) {
if depth == maxAttrDepth {
return nil, false
}
depth++
if node, exists := n.Members[attr]; exists {
return node, true
}
if n.Type != nil && n.Type != n {
if node, found := n.Type.attr(attr, depth); found {
return node, true
}
}
for _, base := range n.Bases {
if base == nil {
continue
}
if base != n {
if node, found := base.attr(attr, depth); found {
return node, true
}
}
}
return nil, false
}
// Attr evaluates an attribute on this node by first looking in the members map for this
// node, and then looking within the type of this node, just like python does.
func (n *Node) Attr(attr string) (*Node, bool) {
return n.attr(attr, 0)
}
// AttrOf finds attribute of the class that has the given node class type and the given name.
func (n *Node) AttrOf(attr string, kind Kind) (*Node, bool) {
node, found := n.Attr(attr)
if found || node.Classification == kind {
return node, true
}
return nil, false
}
// String returns a short string representation of the node
func (n *Node) String() string {
if n == nil {
return "{Node=nil}"
}
if n.CanonicalName.Empty() {
if n.Type == nil || n.Type.CanonicalName.Empty() {
return fmt.Sprintf("{Node %d}", n.ID)
}
return fmt.Sprintf("{instance of %s}", n.Type.CanonicalName.String())
}
return n.CanonicalName.String()
}
func (n *Node) attrs(steps int) []string {
if steps >= maxAttrDepth {
return nil
}
steps++
var attributes []string
for attr := range n.Members {
attributes = append(attributes, attr)
}
if n.Type != nil && n.Type != n {
attributes = append(attributes, n.Type.attrs(steps)...)
}
for _, base := range n.Bases {
if base != nil && base != n {
attributes = append(attributes, base.attrs(steps)...)
}
}
return attributes
}
// Attrs returns all the attributes of a node.
func (n *Node) Attrs() []string {
return n.attrs(0)
}
func (n *Node) attrsByKind(steps int) map[Kind][]string {
if steps >= maxAttrDepth {
return nil
}
steps++
byKind := make(map[Kind][]string)
for attr, node := range n.Members {
if node != nil {
byKind[node.Classification] = append(byKind[node.Classification], attr)
} else {
byKind[None] = append(byKind[None], attr)
}
}
if n.Type != nil && n.Type != n {
for kind, attrs := range n.Type.attrsByKind(steps) {
byKind[kind] = append(byKind[kind], attrs...)
}
}
for _, base := range n.Bases {
if base != nil && base != n {
for kind, attrs := range base.attrsByKind(steps) {
byKind[kind] = append(byKind[kind], attrs...)
}
}
}
return byKind
}
// AttrsByKind returns the node's attributes by kind.
func (n *Node) AttrsByKind() map[Kind][]string {
return n.attrsByKind(0)
}
// ShallowCopy makes a shallow copy of this node. This includes a copy of the
// members and bases, but none of the referenced nodes are copied.
func (n *Node) ShallowCopy() *Node {
clone := *n
clone.Bases = make([]*Node, len(n.Bases))
for i, base := range n.Bases {
clone.Bases[i] = base
}
clone.Members = make(map[string]*Node)
for attr, child := range n.Members {
clone.Members[attr] = child
}
return &clone
} | kite-go/lang/python/pythonimports/node.go | 0.723993 | 0.516047 | node.go | starcoder |
// Package murmur3 provides functions implementing the Murmur3 hashing algorithm.
// The ClojureJVM version imported the Guava Murmur3 implementation
// and made some changes.
// For ClojureCLR and here, I copied the API stubs, then implemented the API
// based on the algorithm description at
// http://en.wikipedia.org/wiki/MurmurHash.
// See also: https://code.google.com/p/smhasher/wiki/MurmurHash3. </p
// The implementations of HashUnordered and HashOrdered taken from ClojureJVM.
package murmur3
const seed uint32 = 0
const c1 uint32 = 0xcc9e2d51
const c2 uint32 = 0x1b873593
const r1 uint32 = 15
const r2 uint32 = 13
const m uint32 = 5
const n uint32 = 0xe6546b64
// The public interface
// HashInt32 computes a hash value for an int32
func HashInt32(input int32) uint32 {
return HashUInt32(uint32(input))
}
// HashInt64 computes a hash value for an int64
func HashInt64(input int64) uint32 {
return HashUInt64(uint64(input))
}
// HashUInt32 computes a hash value for a uint32
func HashUInt32(input uint32) uint32 {
if input == 0 {
return 0
}
key := MixKey(input)
hash := MixHash(seed, key)
return Finalize(hash, 4)
}
// HashUInt64 computes a hash value for a uint64
func HashUInt64(input uint64) uint32 {
if input == 0 {
return 0
}
low := uint32(input)
high := uint32(input >> 32)
key := MixKey(low)
hash := MixHash(seed, key)
key = MixKey(high)
hash = MixHash(hash, key)
return Finalize(hash, 8)
}
// HashString computes a hash value for a string
func HashString(input string) uint32 {
hash := seed
len := len(input)
// step through the string 4 bytes at a time
for i := 3; i < len; i += 4 {
key := uint32(input[i-3]) | uint32(input[i-2]<<8) | uint32(input[i-1]<<16) | uint32(input[i]<<24)
key = MixKey(key)
hash = MixHash(hash, key)
}
// deal with remaining characters
if len != 0 {
var key uint32
switch len % 4 {
case 1:
key = uint32(input[len-1])
case 2:
key = uint32(input[len-2]) | uint32(input[len-1]<<8)
case 3:
key = uint32(input[len-3]) | uint32(input[len-2]<<8) | uint32(input[len-1]<<16)
}
key = MixKey(key)
hash = MixHash(hash, key)
}
return Finalize(hash, int32(len))
}
// MixKey scrambles the bits in 32-bit value
func MixKey(key uint32) uint32 {
key *= c1
key = rotateLeft(key, r1)
key *= c2
return key
}
// MixHash mixes a new 32-bit value into a given hash value
func MixHash(hash uint32, key uint32) uint32 {
hash ^= key
hash = rotateLeft(hash, r2)
hash = hash*m + n
return hash
}
// Finalize forces all bits of a hash block to avalanche
func Finalize(hash uint32, length int32) uint32 {
hash ^= uint32(length)
hash ^= hash >> 16
hash *= 0x85ebca6b
hash ^= hash >> 13
hash *= 0xc2b2ae35
hash ^= hash >> 16
return hash
}
// FinalizeCollHash forces all bits of a hash block to avalanche, and add in a length count
func FinalizeCollHash(hash uint32, count int32) uint32 {
h1 := seed
k1 := MixKey(hash)
h1 = MixHash(h1, k1)
return Finalize(h1, count)
}
// implementation details
func rotateLeft(x uint32, n uint32) uint32 {
return (x << n) | (x >> (32 - n))
} | murmur3/murmur3.go | 0.725065 | 0.66182 | murmur3.go | starcoder |
package lexer
import "lookageek.com/ode/token"
// Lexer holds the input string, the current tokenizing position
// and next position, and the current character as a byte
type Lexer struct {
input string
position int
readPosition int
ch byte
}
func New(input string) *Lexer {
l := &Lexer{input: input}
l.readChar()
return l
}
// readChar reads the character as a byte in readPosition
// stores it into ch byte and moves both readPosition & position
func (l *Lexer) readChar() {
if l.readPosition >= len(l.input) {
l.ch = 0
} else {
l.ch = l.input[l.readPosition]
}
l.position = l.readPosition
l.readPosition += 1
}
// NextToken parses the current position in the lexer, creates the token
// object based on the character
func (l *Lexer) NextToken() token.Token {
var tok token.Token
l.skipWhitespace()
switch l.ch {
case '=':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.EQ, Literal: literal}
} else {
tok = newToken(token.ASSIGN, l.ch)
}
case ';':
tok = newToken(token.SEMICOLON, l.ch)
case '(':
tok = newToken(token.LPAREN, l.ch)
case ')':
tok = newToken(token.RPAREN, l.ch)
case ',':
tok = newToken(token.COMMA, l.ch)
case '+':
tok = newToken(token.PLUS, l.ch)
case '{':
tok = newToken(token.LBRACE, l.ch)
case '}':
tok = newToken(token.RBRACE, l.ch)
case '-':
tok = newToken(token.MINUS, l.ch)
case '/':
tok = newToken(token.DIVIDE, l.ch)
case '*':
tok = newToken(token.MULTIPLY, l.ch)
case '<':
tok = newToken(token.LESSTHAN, l.ch)
case '>':
tok = newToken(token.GREATERTHAN, l.ch)
case '!':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
literal := string(ch) + string(l.ch)
tok = token.Token{Type: token.NOTEQ, Literal: literal}
} else {
tok = newToken(token.NEGATION, l.ch)
}
case '"':
tok.Type = token.STRING
tok.Literal = l.readString()
case 0:
tok.Literal = ""
tok.Type = token.EOF
case '[':
tok = newToken(token.LBRACKET, l.ch)
case ']':
tok = newToken(token.RBRACKET, l.ch)
case ':':
tok = newToken(token.COLON, l.ch)
default:
if isLetter(l.ch) {
tok.Literal = l.readIdentifier()
tok.Type = token.LookupIdent(tok.Literal)
return tok
} else if isDigit(l.ch) {
tok.Type = token.INT
tok.Literal = l.readNumber()
return tok
} else {
tok = newToken(token.ILLEGAL, l.ch)
}
}
l.readChar()
return tok
}
// skipWhitespace jumps over whitespaces as ode is a whitespace
// agnostic language
func (l *Lexer) skipWhitespace() {
for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
l.readChar()
}
}
// readIdentifier reads a word (either an identifier or a keyword until
// it hits a non-letter character and returns that word as a string
func (l *Lexer) readIdentifier() string {
position := l.position
for isLetter(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
// readNumber reads the number position by position until it ends by moving
// the lexer cursor and returns a string
func (l *Lexer) readNumber() string {
// todo - does not support decimal numbers
position := l.position
for isDigit(l.ch) {
l.readChar()
}
return l.input[position:l.position]
}
// peekChar returns the next character which is at the
// readPosition
func (l *Lexer) peekChar() byte {
if l.readPosition >= len(l.input) {
return 0
} else {
return l.input[l.readPosition]
}
}
func (l *Lexer) readString() string {
position := l.position + 1
for {
l.readChar()
if l.ch == '"' || l.ch == 0 {
break
}
}
return l.input[position:l.position]
}
func newToken(tokenType token.TokenType, ch byte) token.Token {
return token.Token{Type: tokenType, Literal: string(ch)}
}
func isLetter(ch byte) bool {
// todo does not support having numbers in identifiers
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
}
func isDigit(ch byte) bool {
return '0' <= ch && ch <= '9'
} | lexer/lexer.go | 0.548674 | 0.4206 | lexer.go | starcoder |
package spider
// DataProvider allows alternate implementations of a backing service that provides configuration data. Ex. Zookeeper, Consul, etcd.
type DataProvider interface {
Get(key string) (*Node, error) // Get an existing key's currently set value.
Set(key string, value []byte) error // Change an existing key's value with an optional version. Set -1 if no versioning is required.
Create(key string, data []byte) error // Create a key with data at that node.
Delete(key string) error // Delete a key from the configuration.
DeleteR(key string) error // Delete a key and it's configuration path recursively.
Watch(key string) (<-chan interface{}, error) // Watch returns the broadcast channel used for watching changes on a given configuration node.
WatchChildren(key string) (<-chan interface{}, error) // WatchChildren monitor for changes to the children of a given node.
List(key string) ([]string, error) // List the child nodes stored under this key.
Exists(key string) (bool, error) // Exists checks if a node has data or if the node exists.
}
// Data represent the data for a given node.
type Data interface{}
// NewConfigTree initialize the configuration tree and return the root node.
func NewConfigTree() *Node {
return &Node{
Path: "/",
Children: []*Node{},
}
}
// // Create adds or set the given data to the path.
// // This will create all sub-nodes as necessary.
// // `path` is relative to the current node.
// // Returns the newly created node.
// func (n *Node) Create(path string, data Data) *Node {
// newNode := &Node{
// Path: gopath.Join(n.Path, path),
// Parent: n,
// }
// switch data.(type) {
// case int, int16, int32, int64, uint16, uint32, uint64,
// float32, float64, []byte, string:
// newNode.Data = data
// case []string, [][]byte, []interface{}, []Data,
// map[string]string, map[string]interface{}, map[string]Data, map[string][]byte,
// map[string]int, map[string]int16, map[string]int32, map[string]int64,
// map[string]uint16, map[string]uint32, map[string]uint64:
// }
// return newNode
// } | provider.go | 0.629091 | 0.5816 | provider.go | starcoder |
package funcs
import (
"bytes"
)
type float64Ge struct {
V1 Float64
V2 Float64
}
func (this *float64Ge) Eval() bool {
return this.V1.Eval() >= this.V2.Eval()
}
func init() {
Register("ge", new(float64Ge))
}
type float32Ge struct {
V1 Float32
V2 Float32
}
func (this *float32Ge) Eval() bool {
return this.V1.Eval() >= this.V2.Eval()
}
func init() {
Register("ge", new(float32Ge))
}
type int64Ge struct {
V1 Int64
V2 Int64
}
func (this *int64Ge) Eval() bool {
return this.V1.Eval() >= this.V2.Eval()
}
func init() {
Register("ge", new(int64Ge))
}
type uint64Ge struct {
V1 Uint64
V2 Uint64
}
func (this *uint64Ge) Eval() bool {
return this.V1.Eval() >= this.V2.Eval()
}
func init() {
Register("ge", new(uint64Ge))
}
type int32Ge struct {
V1 Int32
V2 Int32
}
func (this *int32Ge) Eval() bool {
return this.V1.Eval() >= this.V2.Eval()
}
func init() {
Register("ge", new(int32Ge))
}
type uint32Ge struct {
V1 Uint32
V2 Uint32
}
func (this *uint32Ge) Eval() bool {
return this.V1.Eval() >= this.V2.Eval()
}
func init() {
Register("ge", new(uint32Ge))
}
type bytesGe struct {
V1 Bytes
V2 Bytes
}
func (this *bytesGe) Eval() bool {
return bytes.Compare(this.V1.Eval(), this.V2.Eval()) >= 0
}
func init() {
Register("ge", new(bytesGe))
}
type float64Gt struct {
V1 Float64
V2 Float64
}
func (this *float64Gt) Eval() bool {
return this.V1.Eval() > this.V2.Eval()
}
func init() {
Register("gt", new(float64Gt))
}
type float32Gt struct {
V1 Float32
V2 Float32
}
func (this *float32Gt) Eval() bool {
return this.V1.Eval() > this.V2.Eval()
}
func init() {
Register("gt", new(float32Gt))
}
type int64Gt struct {
V1 Int64
V2 Int64
}
func (this *int64Gt) Eval() bool {
return this.V1.Eval() > this.V2.Eval()
}
func init() {
Register("gt", new(int64Gt))
}
type uint64Gt struct {
V1 Uint64
V2 Uint64
}
func (this *uint64Gt) Eval() bool {
return this.V1.Eval() > this.V2.Eval()
}
func init() {
Register("gt", new(uint64Gt))
}
type int32Gt struct {
V1 Int32
V2 Int32
}
func (this *int32Gt) Eval() bool {
return this.V1.Eval() > this.V2.Eval()
}
func init() {
Register("gt", new(int32Gt))
}
type uint32Gt struct {
V1 Uint32
V2 Uint32
}
func (this *uint32Gt) Eval() bool {
return this.V1.Eval() > this.V2.Eval()
}
func init() {
Register("gt", new(uint32Gt))
}
type bytesGt struct {
V1 Bytes
V2 Bytes
}
func (this *bytesGt) Eval() bool {
return bytes.Compare(this.V1.Eval(), this.V2.Eval()) > 0
}
func init() {
Register("gt", new(bytesGt))
}
type float64Le struct {
V1 Float64
V2 Float64
}
func (this *float64Le) Eval() bool {
return this.V1.Eval() <= this.V2.Eval()
}
func init() {
Register("le", new(float64Le))
}
type float32Le struct {
V1 Float32
V2 Float32
}
func (this *float32Le) Eval() bool {
return this.V1.Eval() <= this.V2.Eval()
}
func init() {
Register("le", new(float32Le))
}
type int64Le struct {
V1 Int64
V2 Int64
}
func (this *int64Le) Eval() bool {
return this.V1.Eval() <= this.V2.Eval()
}
func init() {
Register("le", new(int64Le))
}
type uint64Le struct {
V1 Uint64
V2 Uint64
}
func (this *uint64Le) Eval() bool {
return this.V1.Eval() <= this.V2.Eval()
}
func init() {
Register("le", new(uint64Le))
}
type int32Le struct {
V1 Int32
V2 Int32
}
func (this *int32Le) Eval() bool {
return this.V1.Eval() <= this.V2.Eval()
}
func init() {
Register("le", new(int32Le))
}
type uint32Le struct {
V1 Uint32
V2 Uint32
}
func (this *uint32Le) Eval() bool {
return this.V1.Eval() <= this.V2.Eval()
}
func init() {
Register("le", new(uint32Le))
}
type bytesLe struct {
V1 Bytes
V2 Bytes
}
func (this *bytesLe) Eval() bool {
return bytes.Compare(this.V1.Eval(), this.V2.Eval()) <= 0
}
func init() {
Register("le", new(bytesLe))
}
type float64Lt struct {
V1 Float64
V2 Float64
}
func (this *float64Lt) Eval() bool {
return this.V1.Eval() < this.V2.Eval()
}
func init() {
Register("lt", new(float64Lt))
}
type float32Lt struct {
V1 Float32
V2 Float32
}
func (this *float32Lt) Eval() bool {
return this.V1.Eval() < this.V2.Eval()
}
func init() {
Register("lt", new(float32Lt))
}
type int64Lt struct {
V1 Int64
V2 Int64
}
func (this *int64Lt) Eval() bool {
return this.V1.Eval() < this.V2.Eval()
}
func init() {
Register("lt", new(int64Lt))
}
type uint64Lt struct {
V1 Uint64
V2 Uint64
}
func (this *uint64Lt) Eval() bool {
return this.V1.Eval() < this.V2.Eval()
}
func init() {
Register("lt", new(uint64Lt))
}
type int32Lt struct {
V1 Int32
V2 Int32
}
func (this *int32Lt) Eval() bool {
return this.V1.Eval() < this.V2.Eval()
}
func init() {
Register("lt", new(int32Lt))
}
type uint32Lt struct {
V1 Uint32
V2 Uint32
}
func (this *uint32Lt) Eval() bool {
return this.V1.Eval() < this.V2.Eval()
}
func init() {
Register("lt", new(uint32Lt))
}
type bytesLt struct {
V1 Bytes
V2 Bytes
}
func (this *bytesLt) Eval() bool {
return bytes.Compare(this.V1.Eval(), this.V2.Eval()) < 0
}
func init() {
Register("lt", new(bytesLt))
}
type float64Eq struct {
V1 Float64
V2 Float64
}
func (this *float64Eq) Eval() bool {
return this.V1.Eval() == this.V2.Eval()
}
func init() {
Register("eq", new(float64Eq))
}
type float32Eq struct {
V1 Float32
V2 Float32
}
func (this *float32Eq) Eval() bool {
return this.V1.Eval() == this.V2.Eval()
}
func init() {
Register("eq", new(float32Eq))
}
type int64Eq struct {
V1 Int64
V2 Int64
}
func (this *int64Eq) Eval() bool {
return this.V1.Eval() == this.V2.Eval()
}
func init() {
Register("eq", new(int64Eq))
}
type uint64Eq struct {
V1 Uint64
V2 Uint64
}
func (this *uint64Eq) Eval() bool {
return this.V1.Eval() == this.V2.Eval()
}
func init() {
Register("eq", new(uint64Eq))
}
type int32Eq struct {
V1 Int32
V2 Int32
}
func (this *int32Eq) Eval() bool {
return this.V1.Eval() == this.V2.Eval()
}
func init() {
Register("eq", new(int32Eq))
}
type uint32Eq struct {
V1 Uint32
V2 Uint32
}
func (this *uint32Eq) Eval() bool {
return this.V1.Eval() == this.V2.Eval()
}
func init() {
Register("eq", new(uint32Eq))
}
type boolEq struct {
V1 Bool
V2 Bool
}
func (this *boolEq) Eval() bool {
return this.V1.Eval() == this.V2.Eval()
}
func init() {
Register("eq", new(boolEq))
}
type stringEq struct {
V1 String
V2 String
}
func (this *stringEq) Eval() bool {
return this.V1.Eval() == this.V2.Eval()
}
func init() {
Register("eq", new(stringEq))
}
type bytesEq struct {
V1 Bytes
V2 Bytes
}
func (this *bytesEq) Eval() bool {
return bytes.Equal(this.V1.Eval(), this.V2.Eval())
}
func init() {
Register("eq", new(bytesEq))
} | funcs/compare.gen.go | 0.742608 | 0.541894 | compare.gen.go | starcoder |
package placement
import (
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/pd/server/core"
)
// RegionFit is the result of fitting a region's peers to rule list.
// All peers are divided into corresponding rules according to the matching
// rules, and the remaining Peers are placed in the OrphanPeers list.
type RegionFit struct {
RuleFits []*RuleFit
OrphanPeers []*metapb.Peer
}
// IsSatisfied returns if the rules are properly satisfied.
// It means all Rules are fulfilled and there is no orphan peers.
func (f *RegionFit) IsSatisfied() bool {
if len(f.RuleFits) == 0 {
return false
}
for _, r := range f.RuleFits {
if !r.IsSatisfied() {
return false
}
}
return len(f.OrphanPeers) == 0
}
// GetRuleFit returns the RuleFit that contains the peer.
func (f *RegionFit) GetRuleFit(peerID uint64) *RuleFit {
for _, rf := range f.RuleFits {
for _, p := range rf.Peers {
if p.GetId() == peerID {
return rf
}
}
}
return nil
}
// CompareRegionFit determines the superiority of 2 fits.
// It returns 1 when the first fit result is better.
func CompareRegionFit(a, b *RegionFit) int {
for i := range a.RuleFits {
if cmp := compareRuleFit(a.RuleFits[i], b.RuleFits[i]); cmp != 0 {
return cmp
}
}
switch {
case len(a.OrphanPeers) < len(b.OrphanPeers):
return 1
case len(a.OrphanPeers) > len(b.OrphanPeers):
return -1
default:
return 0
}
}
// RuleFit is the result of fitting status of a Rule.
type RuleFit struct {
Rule *Rule
// Peers of the Region that are divided to this Rule.
Peers []*metapb.Peer
// PeersWithDifferentRole is subset of `Peers`. It contains all Peers that have
// different Role from configuration (the Role can be migrated to target role
// by scheduling).
PeersWithDifferentRole []*metapb.Peer
// IsolationLevel indicates at which level of labeling these Peers are
// isolated. A larger value indicates a higher isolation level.
IsolationLevel int
}
// IsSatisfied returns if the rule is properly satisfied.
func (f *RuleFit) IsSatisfied() bool {
return len(f.Peers) == f.Rule.Count && len(f.PeersWithDifferentRole) == 0
}
func compareRuleFit(a, b *RuleFit) int {
switch {
case len(a.Peers) < len(b.Peers):
return -1
case len(a.Peers) > len(b.Peers):
return 1
case len(a.PeersWithDifferentRole) > len(b.PeersWithDifferentRole):
return -1
case len(a.PeersWithDifferentRole) < len(b.PeersWithDifferentRole):
return 1
case a.IsolationLevel < b.IsolationLevel:
return -1
case a.IsolationLevel > b.IsolationLevel:
return 1
default:
return 0
}
}
// FitRegion tries to fit peers of a region to the rules.
func FitRegion(stores core.StoreSetInformer, region *core.RegionInfo, rules []*Rule) *RegionFit {
return nil
} | server/schedule/placement/fit.go | 0.763219 | 0.425784 | fit.go | starcoder |
package champ
import (
math "github.com/chewxy/math32"
"github.com/r4stl1n/micro-hal/code/pkg/champ/cbase"
"github.com/r4stl1n/micro-hal/code/pkg/champ/cstructs"
"time"
)
type LegController struct {
quadBase *cbase.QuadBase
trajectoryPlanners [4]*TrajectoryPlanner
phaseGenerator *PhaseGenerator
leftFrontTrajectoryPlanner *TrajectoryPlanner
rightFrontTrajectoryPlanner *TrajectoryPlanner
leftBackTrajectoryPlanner *TrajectoryPlanner
rightBackTrajectoryPlanner *TrajectoryPlanner
}
func (legController *LegController) Init(quadBase *cbase.QuadBase, currentTime time.Time) *LegController {
*legController = LegController{
quadBase: quadBase,
trajectoryPlanners: [4]*TrajectoryPlanner{},
phaseGenerator: new(PhaseGenerator).Init(quadBase, currentTime),
leftFrontTrajectoryPlanner: new(TrajectoryPlanner).Init(quadBase.LeftFront),
rightFrontTrajectoryPlanner: new(TrajectoryPlanner).Init(quadBase.RightFront),
leftBackTrajectoryPlanner: new(TrajectoryPlanner).Init(quadBase.LeftBack),
rightBackTrajectoryPlanner: new(TrajectoryPlanner).Init(quadBase.RightBack),
}
legController.trajectoryPlanners[0] = legController.leftFrontTrajectoryPlanner
legController.trajectoryPlanners[1] = legController.rightFrontTrajectoryPlanner
legController.trajectoryPlanners[2] = legController.leftBackTrajectoryPlanner
legController.trajectoryPlanners[3] = legController.rightBackTrajectoryPlanner
return legController
}
func (legController *LegController) capVelocities(velocity float32, minVelocity float32, maxVelocity float32) float32 {
if velocity < minVelocity {
return minVelocity
}
if velocity > maxVelocity {
return maxVelocity
}
return velocity
}
func (legController *LegController) TransformLeg(quadLeg *cbase.QuadLeg, stepLength float32, rotation float32,
stepX float32, stepY float32, theta float32) (float32, float32) {
transformedStance := quadLeg.ZeroStance()
transformedStance = transformedStance.Translate(stepX, stepY, 0.0)
transformedStance = transformedStance.RotateZ(theta)
zeroStance := quadLeg.ZeroStance()
deltaX := transformedStance.X() - zeroStance.X()
deltaY := transformedStance.Y() - zeroStance.Y()
stepLength = math.Sqrt(math.Pow(deltaX, 2)+math.Pow(deltaY, 2)) * 2.0
rotation = math.Atan2(deltaY, deltaX)
return stepLength, rotation
}
func (legController *LegController) RaibertHeuristic(stanceDuration float32, targetVelocity float32) float32 {
return (stanceDuration / 2.0) * targetVelocity
}
func (legController *LegController) VelocityCommand(footPositions [4]cstructs.Transformation, velocities cstructs.Velocities,
currentTime time.Time) ([4]cstructs.Transformation, cstructs.Velocities) {
velocities.Linear.SetX(
legController.capVelocities(
velocities.Linear.X(),
-legController.quadBase.GaitConfig().MaxLinearVelocity.X(),
legController.quadBase.GaitConfig().MaxLinearVelocity.X()))
velocities.Linear.SetY(
legController.capVelocities(
velocities.Linear.Y(),
-legController.quadBase.GaitConfig().MaxLinearVelocity.Y(),
legController.quadBase.GaitConfig().MaxLinearVelocity.Y()))
velocities.Angular.SetZ(
legController.capVelocities(
velocities.Angular.Z(),
-legController.quadBase.GaitConfig().MaxAngularVelocity,
legController.quadBase.GaitConfig().MaxAngularVelocity))
tangentialVelocity := velocities.Angular.Z() * legController.quadBase.LeftFront.CenterToNominal()
velocity := math.Sqrt(math.Pow(velocities.Linear.X(), 2) + math.Pow(velocities.Linear.Y()+tangentialVelocity, 2))
stepX := legController.RaibertHeuristic(legController.quadBase.GaitConfig().StanceDuration, velocities.Linear.X())
stepY := legController.RaibertHeuristic(legController.quadBase.GaitConfig().StanceDuration, velocities.Linear.Y())
stepTheta := legController.RaibertHeuristic(legController.quadBase.GaitConfig().StanceDuration, tangentialVelocity)
theta := math.Sin((stepTheta/2)/legController.quadBase.LeftFront.CenterToNominal()) * 2
stepLengths := [4]float32{0.0, 0.0, 0.0, 0.0}
trajectoryRotations := [4]float32{0.0, 0.0, 0.0, 0.0}
sumOfSteps := float32(0.0)
for i := 0; i < 4; i++ {
stepLengths[i], trajectoryRotations[i] = legController.TransformLeg(legController.quadBase.Legs[i], stepLengths[i], trajectoryRotations[i], stepX, stepY, theta)
sumOfSteps = sumOfSteps + stepLengths[i]
}
legController.phaseGenerator.Run(velocity, sumOfSteps/4.0, currentTime)
for i := 0; i < 4; i++ {
footPositions[i] = legController.trajectoryPlanners[i].Generate(footPositions[i], stepLengths[i], trajectoryRotations[i],
legController.phaseGenerator.swingPhaseSignal[i], legController.phaseGenerator.stancePhaseSignal[i])
}
return footPositions, velocities
} | code/pkg/champ/leg-controller.go | 0.729038 | 0.447581 | leg-controller.go | starcoder |
package main
import (
"container/list"
"fmt"
"github.com/arbovm/levenshtein"
)
// DistanceFunc a distance function which calculates the distance between two
// strings. This distance function must satisfy a set of axioms in order to
// ensure it's well-behaved. See https://en.wikipedia.org/wiki/Metric_space.
type DistanceFunc func(a, b string) int
// Results list of results
type Results []Result
// Result holds a single result
type Result struct {
Distance int
Value string
}
// Node represents a single node in the BK-Tree
type Node struct {
Value string
Children map[int]*Node
}
// BKTree a data structure specialized to index data in a metric space
type BKTree struct {
Root *Node
distFunc DistanceFunc
}
// Add adds a value into the tree
func (t *BKTree) Add(node string) {
if t.Root == nil {
t.Root = &Node{Value: node, Children: make(map[int]*Node)}
return
}
current, children := t.Root.Value, t.Root.Children
for {
dist := t.distFunc(node, current)
target := children[dist]
if target == nil {
children[dist] = &Node{Value: node, Children: make(map[int]*Node)}
break
}
current, children = target.Value, target.Children
}
}
// Search the tree and return all words closest to a given query word.
func (t *BKTree) Search(node string, radius int) Results {
if t.Root == nil {
return nil
}
var results Results
candidates := list.New()
candidates.PushBack(t.Root)
for e := candidates.Front(); e != nil; e = e.Next() {
v := e.Value.(*Node)
candidate, children := v.Value, v.Children
dist := t.distFunc(node, candidate)
if dist <= radius {
results = append(results, Result{Distance: dist, Value: candidate})
}
low, high := dist-radius, dist+radius
for d, c := range children {
if low <= d && d <= high {
candidates.PushBack(c)
}
}
}
return results
}
// NewBKTree creates a new BK-Tree instance
func NewBKTree(distFunc DistanceFunc) *BKTree {
return &BKTree{distFunc: distFunc}
}
func main() {
tree := NewBKTree(levenshtein.Distance)
words := []string{"some", "soft", "same", "mole", "soda", "salmon"}
for _, w := range words {
tree.Add(w)
}
for _, result := range tree.Search("bole", 2) {
fmt.Println("Value:", result.Value, "Distance:", result.Distance)
}
} | bktree.go | 0.753829 | 0.470433 | bktree.go | starcoder |
package lshensemble
import (
"math"
)
// Computes the expected number of false positives caused by using the
// upper bound set size of the set size interval given by indexes l and u.
func computeNFP(l, u int, sizes, counts []int) float64 {
if l > u {
panic("l must be less or equal to u")
}
var sum float64
for i := l; i <= u; i++ {
sum += float64(sizes[u]-sizes[i]) / float64(sizes[u]) * float64(counts[i])
}
return sum
}
// Computes the matrix of expected number of false positives for all possible
// sub-intervals of the complete sorted domain of size sizes.
func computeNFPs(sizes, counts []int) [][]float64 {
nfps := make([][]float64, len(sizes))
for l := 0; l < len(sizes); l++ {
nfps[l] = make([]float64, len(sizes))
for u := l; u < len(sizes); u++ {
nfps[l][u] = computeNFP(l, u, sizes, counts)
}
}
return nfps
}
// The solution of the sub-problem: total NFPs and the upper bound index of
// the 2nd right-most partition.
type subSolution struct {
totalNFPs float64
u1 int
}
// Computes the optimal partitions given the complete domain of sizes and
// computed number of expected false positives for all sub-intervals.
func computeBestPartitions(numPart int, sizes []int, nfps [][]float64) ([]Partition, float64) {
if numPart < 2 {
panic("numPart cannot be less than 2")
}
if numPart > len(sizes) {
panic("numPart cannot be greater than number of sizes")
}
if numPart == 2 {
// If the number of partitions is 2, then simply find the upper bound of
// the first partition, so that the partitioning produces the smallest
// total expected number of false positives.
minTotalNFPs := math.MaxFloat64
var u int
for u1 := 0; u1 < len(sizes)-1; u1++ {
totalNFPs := nfps[0][u1] + nfps[u1+1][len(sizes)-1]
if totalNFPs < minTotalNFPs {
minTotalNFPs = totalNFPs
u = u1
}
}
return []Partition{
Partition{sizes[0], sizes[u]},
Partition{sizes[u+1], sizes[len(sizes)-1]},
}, minTotalNFPs
}
// Initialize the matrix for storing the sub-problems' solutions.
// The first axis is the upper bound index of the sub-problem, in which
// an optimal partitioning of p number of partitions is to be computed.
// The second axis is the index of p, see p2i below, starting from 2.
sols := make([][]subSolution, numPart-2)
for i := range sols {
sols[i] = make([]subSolution, len(sizes))
}
// p is the number of partitions in a sub-problem.
// p2i translates the number of partitions into the index in the matrix.
var p2i = func(p int) int { return p - 2 }
for p := 2; p < numPart; p++ {
// The possible upper bound indexes of sub problems start from
// p - 1 which is the smallest index to have p partitions.
for u := p - 1; u < len(sizes); u++ {
minTotalNFPs := math.MaxFloat64
var u1Best int
if p == 2 {
for u1 := 0; u1 < u; u1++ {
totalNFPs := nfps[0][u1] + nfps[u1+1][u]
if totalNFPs < minTotalNFPs {
minTotalNFPs = totalNFPs
u1Best = u1
}
}
} else {
for u1 := (p - 1) - 1; u1 < u; u1++ {
totalNFPs := sols[p2i(p-1)][u1].totalNFPs + nfps[u1+1][u]
if totalNFPs < minTotalNFPs {
minTotalNFPs = totalNFPs
u1Best = u1
}
}
}
sols[p2i(p)][u] = subSolution{minTotalNFPs, u1Best}
}
}
// Initialize partitions.
partitions := make([]Partition, 0)
minTotalNFPs := math.MaxFloat64
// Find where to place the right-most partition -- find the upper bound
// index of the 2nd right-most partition.
var u int
p := numPart
for u1 := (p - 1) - 1; u1 < len(sizes)-1; u1++ {
totalNFPs := sols[p2i(p-1)][u1].totalNFPs + nfps[u1+1][len(sizes)-1]
if totalNFPs < minTotalNFPs {
u = u1
minTotalNFPs = totalNFPs
}
}
partitions = append(partitions, Partition{sizes[u+1], sizes[len(sizes)-1]})
p--
// Back-track to find the best partitions using the computed results of
// sub-probelms.
for p > 1 {
// For each sub-problem given p and upper bound index u,
// find the upper bound index (u1) of the 2nd right most partition.
u1 := sols[p2i(p)][u].u1
partitions = append(partitions, Partition{sizes[u1+1], sizes[u]})
// Move on to a smaller sub-problem.
u = u1
p--
}
// The last partition is the first one.
partitions = append(partitions, Partition{sizes[0], sizes[u]})
// Reverse the order so the first comes first.
for i, j := 0, len(partitions)-1; i < j; i, j = i+1, j-1 {
partitions[i], partitions[j] = partitions[j], partitions[i]
}
return partitions, minTotalNFPs
}
// optimalPartitions takes a set size distribution and number of partitions
// as input and returns the optimal partition boundaries (inclusive) for
// minimizing number of false positives.
func optimalPartitions(sizes, counts []int, numPart int) []Partition {
if numPart < 2 {
return []Partition{Partition{sizes[0], sizes[len(sizes)-1]}}
}
if numPart >= len(sizes) {
// If the number of partitions is greater or equal to the complete
// domain of set sizes, return the perfect partitions.
partitions := make([]Partition, len(sizes))
for i := range sizes {
partitions[i] = Partition{sizes[i], sizes[i]}
}
return partitions
}
nfps := computeNFPs(sizes, counts)
partitions, _ := computeBestPartitions(numPart, sizes, nfps)
return partitions
} | optimal_partition.go | 0.755727 | 0.630927 | optimal_partition.go | starcoder |
package ontograph
import (
"errors"
"fmt"
"strconv"
"time"
)
// GenericLiteral represents a generic literal term (i.e. containing a value and a corresponding datatype).
// Generic literals can be parsed into specific literals using the corresponding methods.
type GenericLiteral struct {
value Term
datatype OntologyDatatype
}
// NewGenericLiteral creates a new generic literal from the given term.
func NewGenericLiteral(t Term) *GenericLiteral {
return &GenericLiteral{
value: t,
datatype: OntologyDatatype{
URI: t.Datatype(),
},
}
}
// Term returns the term representation of the literal.
func (l *GenericLiteral) Term() Term {
return l.value
}
// Type returns the ontological datatype of the literal.
func (l *GenericLiteral) Type() OntologyDatatype {
return l.datatype
}
// Value returns a string representation of the value of the literal.
func (l *GenericLiteral) Value() string {
return l.value.Value()
}
// String returns a string representation of the whole literal in NTriple format.
// This method is equivalent to `l.Term().String()`.
func (l *GenericLiteral) String() string {
return l.value.String()
}
// ErrLiteralTypeMismatch is raised when a generic literal is attempted to be converted into a specific literal of a certain datatype, but the datatype does not match.
var ErrLiteralTypeMismatch error = errors.New("The literal is not of the expected type")
// **************
// * xsd:string *
// **************
type XSDStringLiteral string
func (l XSDStringLiteral) Generic() GenericLiteral {
t := NewLiteralTerm(string(l), "", XSDString)
return *NewGenericLiteral(t)
}
// ToXSDString parses the literal into a xsd:string literal. If the literal is not of type xsd:string, an `ErrLiteralTypeMismatch` is returned.
func (l *GenericLiteral) ToXSDString() (XSDStringLiteral, error) {
// Check for type mismatch
if l.Type().URI != XSDString {
return "", ErrLiteralTypeMismatch
}
// Parse literal
return XSDStringLiteral(l.Value()), nil
}
// ***************
// * xsd:integer *
// ***************
type XSDIntegerLiteral int
func (l XSDIntegerLiteral) Generic() GenericLiteral {
t := NewLiteralTerm(strconv.Itoa(int(l)), "", XSDInteger)
return *NewGenericLiteral(t)
}
// ***************
// * xsd:decimal *
// ***************
type XSDDecimalLiteral float64
func (l XSDDecimalLiteral) Generic() GenericLiteral {
t := NewLiteralTerm(fmt.Sprintf("%f", float64(l)), "", XSDDecimal)
return *NewGenericLiteral(t)
}
// ToXSDDecimalLiteral parses the literal into a xsd:decimal literal. If the literal is not a number, an `ErrLiteralTypeMismatch` is returned.
func (l *GenericLiteral) ToXSDDecimal() (XSDDecimalLiteral, error) {
// Check for type mismatch
if l.Type().URI != XSDDecimal {
return 0, ErrLiteralTypeMismatch
}
// Parse literal
val, err := strconv.ParseFloat(l.Value(), 64)
if err != nil {
return 0, err
}
return XSDDecimalLiteral(val), nil
}
// ***************
// * xsd:boolean *
// ***************
type XSDBooleanLiteral bool
func (l XSDBooleanLiteral) Generic() GenericLiteral {
t := NewLiteralTerm(strconv.FormatBool(bool(l)), "", XSDBoolean)
return *NewGenericLiteral(t)
}
// ToXSDBoolean parses the literal into a xsd:boolean literal. If the literal is not of type xsd:boolean, an `ErrLiteralTypeMismatch` is returned.
func (l *GenericLiteral) ToXSDBoolean() (XSDBooleanLiteral, error) {
// Check for type mismatch
if l.Type().URI != XSDBoolean {
return false, ErrLiteralTypeMismatch
}
// Parse literal
val, err := strconv.ParseBool(l.Value())
if err != nil {
return false, err
}
return XSDBooleanLiteral(val), nil
}
// ***************
// * xsd:anyURI *
// ***************
type XSDAnyURILiteral string
func (l XSDAnyURILiteral) Generic() GenericLiteral {
t := NewLiteralTerm(string(l), "", XSDAnyURI)
return *NewGenericLiteral(t)
}
// ToXSDAnyURI parses the literal into a xsd:anyURI literal. If the literal is not of type xsd:anyURI, an `ErrLiteralTypeMismatch` is returned.
func (l *GenericLiteral) ToXSDAnyURI() (XSDAnyURILiteral, error) {
// Check for type mismatch
if l.Type().URI != XSDAnyURI {
return "", ErrLiteralTypeMismatch
}
// Parse literal
return XSDAnyURILiteral(l.Value()), nil
}
// ***************
// * xsd:dateTime *
// ***************
type XSDDateTimeLiteral time.Time
func (l XSDDateTimeLiteral) Generic() GenericLiteral {
t := NewLiteralTerm(time.Time(l).Format(time.RFC3339), "", XSDDateTime)
return *NewGenericLiteral(t)
}
// ToXSDDateTime parses the literal into a xsd:dateTime literal. If the literal is not of type xsd:dateTime, an `ErrLiteralTypeMismatch` is returned. The value must be formatted according to the RFC3339 standard.
func (l *GenericLiteral) ToXSDDateTime() (XSDDateTimeLiteral, error) {
var t time.Time
// Check for type mismatch
if l.Type().URI != XSDDateTime {
return XSDDateTimeLiteral(t), ErrLiteralTypeMismatch
}
// Parse literal
t, err := time.Parse(time.RFC3339, l.Value())
if err != nil {
return XSDDateTimeLiteral(t), err
}
return XSDDateTimeLiteral(t), nil
} | ontology_literal.go | 0.819244 | 0.609757 | ontology_literal.go | starcoder |
package ring
import (
"encoding/binary"
"errors"
"math/bits"
)
// Poly is the structure that contains the coefficients of a polynomial.
type Poly struct {
Coeffs [][]uint64 // Coefficients in CRT representation
}
// NewPoly creates a new polynomial with N coefficients set to zero and nbModuli moduli.
func NewPoly(N, nbModuli int) (pol *Poly) {
pol = new(Poly)
pol.Coeffs = make([][]uint64, nbModuli)
for i := 0; i < nbModuli; i++ {
pol.Coeffs[i] = make([]uint64, N)
}
return
}
// Degree returns the number of coefficients of the polynomial, which equals the degree of the Ring cyclotomic polynomial.
func (pol *Poly) Degree() int {
return len(pol.Coeffs[0])
}
// LenModuli returns the current number of moduli.
func (pol *Poly) LenModuli() int {
return len(pol.Coeffs)
}
// Level returns the current number of moduli minus 1.
func (pol *Poly) Level() int {
return len(pol.Coeffs) - 1
}
// Zero sets all coefficients of the target polynomial to 0.
func (pol *Poly) Zero() {
for i := range pol.Coeffs {
p0tmp := pol.Coeffs[i]
for j := range p0tmp {
p0tmp[j] = 0
}
}
}
// CopyNew creates a new polynomial p1 which is a copy of the target polynomial.
func (pol *Poly) CopyNew() (p1 *Poly) {
p1 = new(Poly)
p1.Coeffs = make([][]uint64, len(pol.Coeffs))
for i := range pol.Coeffs {
p1.Coeffs[i] = make([]uint64, len(pol.Coeffs[i]))
p0tmp, p1tmp := pol.Coeffs[i], p1.Coeffs[i]
for j := range pol.Coeffs[i] {
p1tmp[j] = p0tmp[j]
}
}
return p1
}
// Copy copies the coefficients of p0 on p1 within the given Ring. It requires p1 to be at least as big p0.
func (r *Ring) Copy(p0, p1 *Poly) {
if p0 != p1 {
for i := range r.Modulus {
p0tmp, p1tmp := p0.Coeffs[i], p1.Coeffs[i]
for j := 0; j < r.N; j++ {
p1tmp[j] = p0tmp[j]
}
}
}
}
// CopyLvl copies the coefficients of p0 on p1 within the given Ring for the moduli from 0 to level.
// Requires p1 to be as big as the target Ring.
func (r *Ring) CopyLvl(level int, p0, p1 *Poly) {
if p0 != p1 {
for i := 0; i < level+1; i++ {
p0tmp, p1tmp := p0.Coeffs[i], p1.Coeffs[i]
for j := 0; j < r.N; j++ {
p1tmp[j] = p0tmp[j]
}
}
}
}
// Copy copies the coefficients of p1 on the target polynomial.
func (pol *Poly) Copy(p1 *Poly) {
if pol != p1 {
for i := range p1.Coeffs {
p0tmp, p1tmp := pol.Coeffs[i], p1.Coeffs[i]
for j := range p1.Coeffs[i] {
p0tmp[j] = p1tmp[j]
}
}
}
}
// SetCoefficients sets the coefficients of the polynomial directly from a CRT format (double slice).
func (pol *Poly) SetCoefficients(coeffs [][]uint64) {
for i := range coeffs {
for j := range coeffs[i] {
pol.Coeffs[i][j] = coeffs[i][j]
}
}
}
// GetCoefficients returns a new double slice that contains the coefficients of the polynomial.
func (pol *Poly) GetCoefficients() (coeffs [][]uint64) {
coeffs = make([][]uint64, len(pol.Coeffs))
for i := range pol.Coeffs {
coeffs[i] = make([]uint64, len(pol.Coeffs[i]))
for j := range pol.Coeffs[i] {
coeffs[i][j] = pol.Coeffs[i][j]
}
}
return
}
// WriteCoeffsTo converts a matrix of coefficients to a byte array.
func WriteCoeffsTo(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 3
for i := 0; i < numberModuli; i++ {
for j := 0; j < N; j++ {
binary.BigEndian.PutUint64(data[pointer+(j<<3):pointer+((j+1)<<3)], coeffs[i][j])
}
pointer += tmp
}
return pointer, nil
}
// WriteTo writes the given poly to the data array.
// It returns the number of written bytes, and the corresponding error, if it occurred.
func (pol *Poly) WriteTo(data []byte) (int, error) {
N := pol.Degree()
numberModuli := pol.LenModuli()
if len(data) < pol.GetDataLen(true) {
// The data is not big enough to write all the information
return 0, errors.New("data array is too small to write ring.Poly")
}
data[0] = uint8(bits.Len64(uint64(N)) - 1)
data[1] = uint8(numberModuli)
cnt, err := WriteCoeffsTo(2, N, numberModuli, pol.Coeffs, data)
return cnt, err
}
// WriteTo32 writes the given poly to the data array.
// It returns the number of written bytes, and the corresponding error, if it occurred.
func (pol *Poly) WriteTo32(data []byte) (int, error) {
N := pol.Degree()
numberModuli := pol.LenModuli()
if len(data) < pol.GetDataLen32(true) {
//The data is not big enough to write all the information
return 0, errors.New("data array is too small to write ring.Poly")
}
data[0] = uint8(bits.Len64(uint64(N)) - 1)
data[1] = uint8(numberModuli)
cnt, err := WriteCoeffsTo32(2, N, numberModuli, pol.Coeffs, data)
return cnt, err
}
// WriteCoeffsTo32 converts a matrix of coefficients to a byte array.
func WriteCoeffsTo32(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 2
for i := 0; i < numberModuli; i++ {
for j := 0; j < N; j++ {
binary.BigEndian.PutUint32(data[pointer+(j<<2):pointer+((j+1)<<2)], uint32(coeffs[i][j]))
}
pointer += tmp
}
return pointer, nil
}
// GetDataLen32 returns the number of bytes the polynomial will take when written to data.
// It can take into account meta data if necessary.
func (pol *Poly) GetDataLen32(WithMetadata bool) (cnt int) {
cnt = (pol.LenModuli() * pol.Degree()) << 2
if WithMetadata {
cnt += 2
}
return
}
// WriteCoeffs writes the coefficients to the given data array.
// It fails if the data array is not big enough to contain the ring.Poly
func (pol *Poly) WriteCoeffs(data []byte) (int, error) {
return WriteCoeffsTo(0, pol.Degree(), pol.LenModuli(), pol.Coeffs, data)
}
// GetDataLen returns the number of bytes the polynomial will take when written to data.
// It can take into account meta data if necessary.
func (pol *Poly) GetDataLen(WithMetadata bool) (cnt int) {
cnt = (pol.LenModuli() * pol.Degree()) << 3
if WithMetadata {
cnt += 2
}
return
}
// DecodeCoeffs converts a byte array to a matrix of coefficients.
func DecodeCoeffs(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 3
for i := 0; i < numberModuli; i++ {
for j := 0; j < N; j++ {
coeffs[i][j] = binary.BigEndian.Uint64(data[pointer+(j<<3) : pointer+((j+1)<<3)])
}
pointer += tmp
}
return pointer, nil
}
// DecodeCoeffsNew converts a byte array to a matrix of coefficients.
func DecodeCoeffsNew(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 3
for i := 0; i < numberModuli; i++ {
coeffs[i] = make([]uint64, N)
for j := 0; j < N; j++ {
coeffs[i][j] = binary.BigEndian.Uint64(data[pointer+(j<<3) : pointer+((j+1)<<3)])
}
pointer += tmp
}
return pointer, nil
}
// MarshalBinary encodes the target polynomial on a slice of bytes.
func (pol *Poly) MarshalBinary() (data []byte, err error) {
data = make([]byte, pol.GetDataLen(true))
_, err = pol.WriteTo(data)
return
}
// UnmarshalBinary decodes a slice of byte on the target polynomial.
func (pol *Poly) UnmarshalBinary(data []byte) (err error) {
N := uint64(1 << data[0])
numberModulies := uint64(data[1])
pointer := uint64(2)
if ((uint64(len(data)) - pointer) >> 3) != N*numberModulies {
return errors.New("invalid polynomial encoding")
}
if _, err = pol.DecodePolyNew(data); err != nil {
return err
}
return nil
}
// DecodePolyNew decodes a slice of bytes in the target polynomial returns the number of bytes
// decoded.
func (pol *Poly) DecodePolyNew(data []byte) (pointer int, err error) {
N := int(1 << data[0])
numberModulies := int(data[1])
pointer = 2
if pol.Coeffs == nil {
pol.Coeffs = make([][]uint64, numberModulies)
}
if pointer, err = DecodeCoeffsNew(pointer, N, numberModulies, pol.Coeffs, data); err != nil {
return pointer, err
}
return pointer, nil
}
// DecodePolyNew32 decodes a slice of bytes in the target polynomial returns the number of bytes
// decoded.
func (pol *Poly) DecodePolyNew32(data []byte) (pointer int, err error) {
N := int(1 << data[0])
numberModulies := int(data[1])
pointer = 2
if pol.Coeffs == nil {
pol.Coeffs = make([][]uint64, numberModulies)
}
if pointer, err = DecodeCoeffsNew32(pointer, N, numberModulies, pol.Coeffs, data); err != nil {
return pointer, err
}
return pointer, nil
}
// DecodeCoeffsNew32 converts a byte array to a matrix of coefficients.
func DecodeCoeffsNew32(pointer, N, numberModuli int, coeffs [][]uint64, data []byte) (int, error) {
tmp := N << 2
for i := 0; i < numberModuli; i++ {
coeffs[i] = make([]uint64, N)
for j := 0; j < N; j++ {
coeffs[i][j] = uint64(binary.BigEndian.Uint32(data[pointer+(j<<2) : pointer+((j+1)<<2)]))
}
pointer += tmp
}
return pointer, nil
} | ring/ring_poly.go | 0.781997 | 0.539711 | ring_poly.go | starcoder |
package main
import (
"crypto/sha256"
"encoding/binary"
"fmt"
)
var pc [256]byte
// Length of the returned sha byte slice.
const (
SHA256 = 32
)
func init() {
for i := range pc {
pc[i] = pc[i/2] + byte(i&1)
}
}
func bitCount(x uint64) int {
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
return int(x & 0x7f)
}
// Looping bitwise operations and the uint64 is bing drawn out of the byte
// slice using binary.LittleEndian.Uint64.
func bitComp1(c1, c2 [SHA256]byte) int {
var n uint64
for i := 0; i < 4; i++ {
j := i * 8
x := binary.LittleEndian.Uint64(c1[j : j+8])
y := binary.LittleEndian.Uint64(c2[j : j+8])
x = x ^ y
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
}
return int(n)
}
// Fast bitwise operations and the uint64 is bing drawn out of the byte slice
// using binary.LittleEndian.Uint64.
func bitComp2(c1, c2 [SHA256]byte) int {
var n uint64
x := binary.LittleEndian.Uint64(c1[0:8])
y := binary.LittleEndian.Uint64(c2[0:8])
x = x ^ y
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = binary.LittleEndian.Uint64(c1[8:16])
y = binary.LittleEndian.Uint64(c2[8:16])
x = x ^ y
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = binary.LittleEndian.Uint64(c1[16:24])
y = binary.LittleEndian.Uint64(c2[16:24])
x = x ^ y
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = binary.LittleEndian.Uint64(c1[24:32])
y = binary.LittleEndian.Uint64(c2[24:32])
x = x ^ y
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
return int(n)
}
// To test if it is any faster calling only one line for all the counting.
func bitComp3(c1, c2 [SHA256]byte) int {
x := binary.LittleEndian.Uint64(c1[0:8])
y := binary.LittleEndian.Uint64(c2[0:8])
x2 := binary.LittleEndian.Uint64(c1[8:16])
y2 := binary.LittleEndian.Uint64(c2[8:16])
x3 := binary.LittleEndian.Uint64(c1[16:24])
y3 := binary.LittleEndian.Uint64(c2[16:24])
x4 := binary.LittleEndian.Uint64(c1[24:32])
y4 := binary.LittleEndian.Uint64(c2[24:32])
return int(pc[byte(x>>(0*32))] ^ pc[byte(y>>(0*32))] +
pc[byte(x>>(1*32))] ^ pc[byte(y>>(1*32))] +
pc[byte(x>>(2*32))] ^ pc[byte(y>>(2*32))] +
pc[byte(x>>(3*32))] ^ pc[byte(y>>(3*32))] +
pc[byte(x>>(4*32))] ^ pc[byte(y>>(4*32))] +
pc[byte(x>>(5*32))] ^ pc[byte(y>>(5*32))] +
pc[byte(x>>(6*32))] ^ pc[byte(y>>(6*32))] +
pc[byte(x>>(7*32))] ^ pc[byte(y>>(7*32))] +
pc[byte(x2>>(8*32))] ^ pc[byte(y2>>(8*32))] +
pc[byte(x2>>(9*32))] ^ pc[byte(y2>>(9*32))] +
pc[byte(x2>>(10*32))] ^ pc[byte(y2>>(10*32))] +
pc[byte(x2>>(11*32))] ^ pc[byte(y2>>(11*32))] +
pc[byte(x2>>(12*32))] ^ pc[byte(y2>>(12*32))] +
pc[byte(x2>>(13*32))] ^ pc[byte(y2>>(13*32))] +
pc[byte(x2>>(14*32))] ^ pc[byte(y2>>(14*32))] +
pc[byte(x2>>(15*32))] ^ pc[byte(y2>>(15*32))] +
pc[byte(x3>>(16*32))] ^ pc[byte(y3>>(16*32))] +
pc[byte(x3>>(17*32))] ^ pc[byte(y3>>(17*32))] +
pc[byte(x3>>(18*32))] ^ pc[byte(y3>>(18*32))] +
pc[byte(x3>>(19*32))] ^ pc[byte(y3>>(19*32))] +
pc[byte(x3>>(20*32))] ^ pc[byte(y3>>(20*32))] +
pc[byte(x3>>(21*32))] ^ pc[byte(y3>>(21*32))] +
pc[byte(x3>>(22*32))] ^ pc[byte(y3>>(22*32))] +
pc[byte(x3>>(23*32))] ^ pc[byte(y3>>(23*32))] +
pc[byte(x4>>(24*32))] ^ pc[byte(y4>>(24*32))] +
pc[byte(x4>>(25*32))] ^ pc[byte(y4>>(25*32))] +
pc[byte(x4>>(26*32))] ^ pc[byte(y4>>(26*32))] +
pc[byte(x4>>(27*32))] ^ pc[byte(y4>>(27*32))] +
pc[byte(x4>>(28*32))] ^ pc[byte(y4>>(28*32))] +
pc[byte(x4>>(29*32))] ^ pc[byte(y4>>(29*32))] +
pc[byte(x4>>(30*32))] ^ pc[byte(y4>>(30*32))] +
pc[byte(x4>>(31*32))] ^ pc[byte(y4>>(31*32))])
}
// A test to see if it is any faster to set all the variables before counting
// all the set bits.
func bitComp4(c1, c2 [SHA256]byte) int {
var n uint64
x := binary.LittleEndian.Uint64(c1[0:8])
y := binary.LittleEndian.Uint64(c2[0:8])
x2 := binary.LittleEndian.Uint64(c1[8:16])
y2 := binary.LittleEndian.Uint64(c2[8:16])
x3 := binary.LittleEndian.Uint64(c1[16:24])
y3 := binary.LittleEndian.Uint64(c2[16:24])
x4 := binary.LittleEndian.Uint64(c1[24:32])
y4 := binary.LittleEndian.Uint64(c2[24:32])
x = x ^ y
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = x2 ^ y2
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = x3 ^ y3
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = x4 ^ y4
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
return int(n)
}
type data struct {
A uint64
B uint64
C uint64
D uint64
}
// This code uses the encode.Read function in the calling function to put all
// the uint64 into a struct which is pased into the function. It is far to slow
// when called inside the function.
func bitComp5(d1, d2 data) int {
var n uint64
x, x2, x3, x4 := d1.A, d1.B, d1.C, d1.D
y, y2, y3, y4 := d2.A, d2.B, d2.C, d2.D
x = x ^ y
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = x2 ^ y2
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = x3 ^ y3
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
x = x4 ^ y4
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
n += x & 0x7f
return int(n)
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Funcions found on stack overflow.
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
// An exaple of a looped version of the code taken from stack overflow.
func bitsDifference(h1, h2 *[SHA256]byte) int {
n := 0
for i := range h1 {
for b := h1[i] ^ h2[i]; b != 0; b &= b - 1 {
n++
}
}
return n
}
// bitCount1 counts the number of bits set in x
func bitCount1(x uint8) int {
count := 0
for x != 0 {
x &= x - 1
count++
}
return count
}
// An exaple of a looped version of the code taken from stack overflow.
func differentbits(c1, c2 [SHA256]uint8) int {
var counter int
for x := range c1 {
counter += bitCount1(c1[x] ^ c2[x])
}
return counter
}
func main() {
var n int
c1 := sha256.Sum256([]byte("This"))
c2 := sha256.Sum256([]byte("That"))
n = bitComp2(c1, c2)
fmt.Printf("there are %d differance between the sha's\n", n)
n = bitComp4(c1, c2)
fmt.Printf("there are %d differance between the sha's\n", n)
} | ex_04.01-counting_sha256/counting_sha.go | 0.604632 | 0.429609 | counting_sha.go | starcoder |
package nmea
import "time"
// MagneticCourseOverGround retrieves the magnetic course over ground from the sentence
type MagneticCourseOverGround interface {
GetmagneticCourseOverGround() (float64, error)
}
// MagneticHeading retrieves the magnetic heading from the sentence
type MagneticHeading interface {
GetMagneticHeading() (float64, error)
}
// MagneticVariation retrieves the magnetic variation from the sentence
type MagneticVariation interface {
GetMagneticVariation() (float64, error)
}
// RateOfTurn retrieves the rate of turn from the sentence
type RateOfTurn interface {
GetRateOfTurn() (float64, error)
}
// TrueCourseOverGround retrieves the true course over ground from the sentence
type TrueCourseOverGround interface {
GetTrueCourseOverGround() (float64, error)
}
// TrueHeading retrieves the true heading from the sentence
type TrueHeading interface {
GetTrueHeading() (float64, error)
}
// FixQuality retrieves the fix quality from the sentence
type FixQuality interface {
GetFixQuality() (string, error)
}
// FixType retrieves the fix type from the sentence
type FixType interface {
GetFixType() (string, error)
}
// NumberOfSatellites retrieves the number of satellites from the sentence
type NumberOfSatellites interface {
GetNumberOfSatellites() (int64, error)
}
// Position2D retrieves the 2D position from the sentence
type Position2D interface {
GetPosition2D() (float64, float64, error)
}
// Position3D retrieves the 3D position from the sentence
type Position3D interface {
GetPosition3D() (float64, float64, float64, error)
}
// SpeedOverGround retrieves the speed over ground from the sentence
type SpeedOverGround interface {
GetSpeedOverGround() (float64, error)
}
// SpeedThroughWater retrieves the speed through water from the sentence
type SpeedThroughWater interface {
GetSpeedThroughWater() (float64, error)
}
// DepthBelowSurface retrieves the depth below surface from the sentence
type DepthBelowSurface interface {
GetDepthBelowSurface() (float64, error)
}
// DepthBelowKeel retrieves the depth below keel from the sentence
type DepthBelowKeel interface {
GetDepthBelowKeel() (float64, error)
}
// DepthBelowTransducer retrieves the depth below the transducer from the sentence
type DepthBelowTransducer interface {
GetDepthBelowTransducer() (float64, error)
}
// WaterTemperature retrieves the water temperature from the sentence
type WaterTemperature interface {
GetWaterTemperature() (float64, error)
}
// TrueWindDirection retrieves the true wind direction from the sentence
type TrueWindDirection interface {
GetTrueWindDirection() (float64, error)
}
// MagneticWindDirection retrieves the magnetic wind direction from the sentence
type MagneticWindDirection interface {
GetMagneticWindDirection() (float64, error)
}
// RelativeWindDirection retrieves the relative wind direction from the sentence
type RelativeWindDirection interface {
GetRelativeWindDirection() (float64, error)
}
// WindSpeed retrieves the wind speed from the sentence
type WindSpeed interface {
GetWindSpeed() (float64, error)
}
// OutsideTemperature retrieves the outside air temperature from the sentence
type OutsideTemperature interface {
GetOutsideTemperature() (float64, error)
}
// DewPointTemperature retrieves the dew point temperature from the sentence
type DewPointTemperature interface {
GetDewPointTemperature() (float64, error)
}
// Humidity retrieves the relative humidity from the sentence
type Humidity interface {
GetHumidity() (float64, error)
}
// Heave retrieves the heave from the sentence
type Heave interface {
GetHeave() (float64, error)
}
// DateTime retrieves the date and time in RFC3339Nano format
type DateTime interface {
GetDateTime() (string, error)
}
// CallSign retrieves the call sign of the vessel from the sentence
type CallSign interface {
GetCallSign() (string, error)
}
// ENINumber retrieves the ENI number of the vessel from the sentence
type ENINumber interface {
// https://en.wikipedia.org/wiki/ENI_number
GetENINumber() (string, error)
}
// IMONumber retrieves the IMO number of the vessel from the sentence
type IMONumber interface {
GetIMONumber() (string, error)
}
// MMSI retrieves the MMSI of the vessel from the sentence
type MMSI interface {
GetMMSI() (string, error)
}
// NavigationStatus retrieves the navigation status from the sentence
type NavigationStatus interface {
GetNavigationStatus() (string, error)
}
// VesselLength retrieves the length of the vessel from the sentence
type VesselLength interface {
GetVesselLength() (float64, error)
}
// VesselBeam retrieves the beam of the vessel from the sentence
type VesselBeam interface {
GetVesselBeam() (float64, error)
}
// VesselName retrieves the name of the vessel from the sentence
type VesselName interface {
GetVesselName() (string, error)
}
// VesselType retrieves the type of the vessel from the sentence
type VesselType interface {
GetVesselType() (string, error)
}
// Destination retrieves the destination of the vessel from the sentence
type Destination interface {
GetDestination() (string, error)
}
// ETA retrieves the ETA of the vessel from the sentence
type ETA interface {
GetETA() (time.Time, error)
} | signalk.go | 0.765111 | 0.591753 | signalk.go | starcoder |
package kalman_filter
import (
"github.com/pkg/errors"
"gonum.org/v1/gonum/mat"
)
// KalmanFilterLinear Implementation of linear Kalman filter
// A - Transition State Matrix
// B - Control input
// C - Measure Matrix C
// P - State covariance
// Q - Process covariance
// R - Measurement covariance
// X - State (initial indeed)
type KalmanFilterLinear struct {
A *mat.Dense
B *mat.Dense
C *mat.Dense
P *mat.Dense
Q *mat.Dense
R *mat.Dense
X *mat.Dense
}
// Step Do single step for two stages: prediction and updating
func (filter *KalmanFilterLinear) Step(u *mat.Dense, y *mat.Dense) (mat.Matrix, error) {
filter.Predict(u)
err := filter.Update(y)
if err != nil {
return filter.X, errors.Wrap(err, "Can't execute Step() due the error on updating stage")
}
return filter.X, nil
}
// Predict Prediction stage
func (filter *KalmanFilterLinear) Predict(u *mat.Dense) {
// Evaluate x:
// x = A ⋅ x + B ⋅ u
ar, _ := filter.A.Dims()
_, xc := filter.X.Dims()
AX := mat.NewDense(ar, xc, nil)
AX.Mul(filter.A, filter.X)
br, _ := filter.B.Dims()
_, uc := u.Dims()
BU := mat.NewDense(br, uc, nil)
BU.Mul(filter.B, u)
filter.X.Add(AX, BU)
// Evaluate state covariance as:
// P = A ⋅ P ⋅ Transponse(A) + Q
_, pc := filter.P.Dims()
AP := mat.NewDense(ar, pc, nil)
AP.Mul(filter.A, filter.P)
AP.Mul(AP, filter.A.T())
filter.P.Add(AP, filter.Q)
}
// Update Updating stage
func (filter *KalmanFilterLinear) Update(y *mat.Dense) error {
// Temporary result of
// P ⋅ Transponse(C)
Prows, _ := filter.P.Dims()
Crows, Ccols := filter.C.Dims()
tmpPC := mat.NewDense(Prows, Crows, nil) // using Cr since matrix C would be transponsed
tmpPC.Mul(filter.P, filter.C.T())
// K = tmpPC ⋅ [((C ⋅ tmpPC) + R)^-1]
// p.s. "^-1" - stands for inverse matrix
tmpPCRows, tmpPCCols := tmpPC.Dims()
tmpInversed := mat.NewDense(Crows, tmpPCCols, nil)
tmpInversed.Mul(filter.C, tmpPC)
tmpInversed.Add(tmpInversed, filter.R)
err := tmpInversed.Inverse(tmpInversed)
if err != nil {
return errors.Wrap(err, "Can't execute Update() due the error while gonum's Inverse() execution")
}
_, tmpInversedCols := tmpInversed.Dims()
K := mat.NewDense(tmpPCRows, tmpInversedCols, nil)
K.Mul(tmpPC, tmpInversed)
// Update state as:
// x{k} = x{k-1} + K ⋅ (y - C ⋅ x{k-1})
Krows, _ := K.Dims()
_, Xcols := filter.X.Dims()
CX := mat.NewDense(Crows, Xcols, nil)
CX.Mul(filter.C, filter.X)
yRows, yCols := y.Dims()
yCx := mat.NewDense(yRows, yCols, nil)
yCx.Sub(y, CX)
KyCx := mat.NewDense(Krows, yCols, nil)
KyCx.Mul(K, yCx)
filter.X.Add(filter.X, KyCx)
// Update state covariance as:
// P{k} = (Diag(4, 1) - K ⋅ C) ⋅ P{k-1}
diagonalFullDense := mat.NewDense(4, 4, nil)
diagonalGonum := mat.NewDiagDense(4, []float64{1, 1, 1, 1})
KC := mat.NewDense(Krows, Ccols, nil)
KC.Mul(K, filter.C)
diagonalFullDense.Sub(diagonalGonum, KC)
filter.P.Mul(diagonalFullDense, filter.P)
return nil
} | linear_kalman_filter.go | 0.661048 | 0.483344 | linear_kalman_filter.go | starcoder |
package gollection
func LinkedStackOf[T any](elements ...T) LinkedStack[T] {
var inner = &linkedStack[T]{0, nil}
var stack = LinkedStack[T]{inner}
for _, v := range elements {
stack.Push(v)
}
return stack
}
func LinkedStackFrom[T any](collection Collection[T]) LinkedStack[T] {
var stack = LinkedStackOf[T]()
ForEach(stack.Push, collection.Iter())
return stack
}
type LinkedStack[T any] struct {
inner *linkedStack[T]
}
type linkedStack[T any] struct {
size int
first *oneWayNode[T]
}
type oneWayNode[T any] struct {
value T
next *oneWayNode[T]
}
func (a LinkedStack[T]) Size() int {
return a.inner.size
}
func (a LinkedStack[T]) IsEmpty() bool {
return a.inner.size == 0
}
func (a LinkedStack[T]) Push(element T) {
if a.inner.first == nil {
a.inner.first = &oneWayNode[T]{element, nil}
} else {
a.inner.first = &oneWayNode[T]{element, a.inner.first}
}
a.inner.size++
}
func (a LinkedStack[T]) Pop() T {
if v, ok := a.TryPop().Get(); ok {
return v
}
panic(OutOfBounds)
}
func (a LinkedStack[T]) Peek() T {
if v, ok := a.TryPeek().Get(); ok {
return v
}
panic(OutOfBounds)
}
func (a LinkedStack[T]) TryPop() Option[T] {
if a.inner.first == nil {
return None[T]()
}
a.inner.size--
var item = a.inner.first.value
a.inner.first = a.inner.first.next
return Some(item)
}
func (a LinkedStack[T]) TryPeek() Option[T] {
if a.inner.first == nil {
return None[T]()
}
return Some(a.inner.first.value)
}
func (a LinkedStack[T]) Iter() Iterator[T] {
return &linkedStackIterator[T]{a.inner.first}
}
func (a LinkedStack[T]) ToSlice() []T {
var arr = make([]T, a.Size())
ForEach(func(t T) {
arr = append(arr, t)
}, a.Iter())
return arr
}
func (a LinkedStack[T]) Clone() LinkedStack[T] {
return LinkedStackFrom[T](a)
}
func (a LinkedStack[T]) Clear() {
a.inner.size = 0
a.inner.first = nil
}
type linkedStackIterator[T any] struct {
current *oneWayNode[T]
}
func (a *linkedStackIterator[T]) Next() Option[T] {
if a.current != nil {
var item = a.current.value
a.current = a.current.next
return Some(item)
}
return None[T]()
} | linked_stack.go | 0.726037 | 0.483466 | linked_stack.go | starcoder |
package number_of_ships_in_a_rectangle
/*
1274. 矩形内船只的数目
https://leetcode-cn.com/problems/number-of-ships-in-a-rectangle
(此题是 交互式问题 )
在用笛卡尔坐标系表示的二维海平面上,有一些船。每一艘船都在一个整数点上,且每一个整数点最多只有 1 艘船。
有一个函数 Sea.hasShips(topRight, bottomLeft) ,输入参数为右上角和左下角两个点的坐标,
当且仅当这两个点所表示的矩形区域(包含边界)内至少有一艘船时,这个函数才返回 true ,否则返回 false 。
给你矩形的右上角 topRight 和左下角 bottomLeft 的坐标,请你返回此矩形内船只的数目。题目保证矩形内 至多只有 10 艘船。
调用函数 hasShips 超过400次 的提交将被判为 错误答案(Wrong Answer) 。同时,任何尝试绕过评测系统的行为都将被取消比赛资格。
示例:
输入:
ships = [[1,1],[2,2],[3,3],[5,5]], topRight = [4,4], bottomLeft = [0,0]
输出:3
解释:在 [0,0] 到 [4,4] 的范围内总共有 3 艘船。
提示:
ships 数组只用于评测系统内部初始化。你无法得知 ships 的信息,所以只能通过调用 hasShips 接口来求解。
0 <= bottomLeft[0] <= topRight[0] <= 1000
0 <= bottomLeft[1] <= topRight[1] <= 1000
*/
/**
* // This is Sea's API interface.
* // You should not implement it, or speculate about its implementation
* type Sea struct {
* func hasShips(topRight, bottomLeft []int) bool {}
* }
*/
type Sea interface {
hasShips(topRight, bottomLeft []int) bool
}
/*
分治
将区域划分成四个小区域,减小问题规模
*/
func countShips(sea Sea, topRight, bottomLeft []int) int {
x1, y1 := topRight[0], topRight[1]
x2, y2 := bottomLeft[0], bottomLeft[1]
if x1 < x2 || y1 < y2 || !sea.hasShips(topRight, bottomLeft) {
return 0
}
if x1 == x2 && y1 == y2 {
return 1
}
midX := (x1 + x2) / 2
midY := (y1 + y2) / 2
/*
注意四个小区域的划分,不要把(x1, y1) - (midX, midY)作为一个子区域,
比如(2,2),(1,1)区域,中点是(1,1),会导致无穷无尽的递归
应该把(x1, y1) - (midX-1, midY-1)作为一个子区域
*/
return countShips(sea, []int{midX, midY}, []int{x2, y2}) +
countShips(sea, []int{midX, y1}, []int{x2, midY + 1}) +
countShips(sea, []int{x1, midY}, []int{midX + 1, y2}) +
countShips(sea, []int{x1, y1}, []int{midX + 1, midY + 1})
}
/*
二分
*/
func countShips0(sea Sea, topRight, bottomLeft []int) int {
return help(sea, topRight, bottomLeft)
}
func help(sea Sea, topRight, bottomLeft []int) int {
x1, y1 := topRight[0], topRight[1]
x2, y2 := bottomLeft[0], bottomLeft[1]
if x1 < x2 || y1 < y2 || !sea.hasShips(topRight, bottomLeft) {
return 0
}
if x1 == x2 && y1 == y2 {
return 1
}
if x1 == x2 {
midY := (y1 + y2) / 2
return help(sea, []int{x1, midY}, []int{x1, y2}) + help(sea, []int{x1, y1}, []int{x1, midY + 1})
}
midX := (x1 + x2) / 2
return help(sea, []int{midX, y1}, []int{x2, y2}) + help(sea, []int{x1, y1}, []int{midX + 1, y2})
}
/*
二分优化
如果将区域仅划分为两个小区域 A 和 B,那么当对 A 区域调用 API 返回 False 时,
可以直接断定,对 B 区域调用 API 一定会返回 True,这样就省去了一次 API 的调用。
*/
func countShips1(sea Sea, topRight, bottomLeft []int) int {
return help1(sea, topRight, bottomLeft, false)
}
func help1(sea Sea, topRight, bottomLeft []int, claim bool) int {
x1, y1 := topRight[0], topRight[1]
x2, y2 := bottomLeft[0], bottomLeft[1]
if x1 < x2 || y1 < y2 {
return 0
}
if !claim && !sea.hasShips(topRight, bottomLeft) {
return 0
}
if x1 == x2 && y1 == y2 {
return 1
}
if x1 == x2 {
midY := (y1 + y2) / 2
a := help1(sea, []int{x1, midY}, []int{x1, y2}, false)
return a + help1(sea, []int{x1, y1}, []int{x1, midY + 1}, a == 0)
}
midX := (x1 + x2) / 2
a := help1(sea, []int{midX, y1}, []int{x2, y2}, false)
return a + help1(sea, []int{x1, y1}, []int{midX + 1, y2}, a == 0)
} | solutions/number-of-ships-in-a-rectangle/d.go | 0.572723 | 0.511046 | d.go | starcoder |
package game
import (
"github.com/mokiat/lacking-js/internal"
"github.com/mokiat/lacking/game/graphics"
)
func newAmbientLightShaderSet() graphics.ShaderSet {
vsBuilder := internal.NewShaderSourceBuilder(ambientLightVertexShader)
fsBuilder := internal.NewShaderSourceBuilder(ambientLightFragmentShader)
return graphics.ShaderSet{
VertexShader: vsBuilder.Build,
FragmentShader: fsBuilder.Build,
}
}
const ambientLightVertexShader = `
layout(location = 0) in vec3 coordIn;
smooth out vec2 texCoordInOut;
void main()
{
texCoordInOut = (coordIn.xy + 1.0) / 2.0;
gl_Position = vec4(coordIn.xy, 0.0, 1.0);
}
`
const ambientLightFragmentShader = `
layout(location = 0) out vec4 fbColor0Out;
uniform sampler2D fbColor0TextureIn;
uniform sampler2D fbColor1TextureIn;
uniform sampler2D fbDepthTextureIn;
uniform samplerCube reflectionTextureIn;
uniform samplerCube refractionTextureIn;
uniform mat4 projectionMatrixIn;
uniform mat4 viewMatrixIn;
uniform mat4 cameraMatrixIn;
smooth in vec2 texCoordInOut;
const float pi = 3.141592;
struct ambientFresnelInput {
vec3 reflectanceF0;
vec3 normal;
vec3 viewDirection;
float roughness;
};
vec3 calculateAmbientFresnel(ambientFresnelInput i) {
float normViewDot = clamp(dot(i.normal, i.viewDirection), 0.0, 1.0);
return i.reflectanceF0 + (max(vec3(1.0 - i.roughness), i.reflectanceF0) - i.reflectanceF0) * pow(1.0 - normViewDot, 5.0);
}
struct geometryInput {
float roughness;
};
float calculateGeometry(geometryInput i) {
// TODO: Use better model
return 1.0 / 4.0;
}
struct ambientSetup {
float roughness;
vec3 reflectedColor;
vec3 refractedColor;
vec3 viewDirection;
vec3 normal;
};
vec3 calculateAmbientHDR(ambientSetup s) {
vec3 fresnel = calculateAmbientFresnel(ambientFresnelInput(
s.reflectedColor,
s.normal,
s.viewDirection,
s.roughness
));
vec3 lightDirection = reflect(s.viewDirection, s.normal);
vec3 reflectedLightIntensity = pow(mix(
pow(texture(refractionTextureIn, lightDirection) / pi, vec4(0.25)),
pow(texture(reflectionTextureIn, lightDirection), vec4(0.25)),
pow(1.0 - s.roughness, 4.0)
), vec4(4)).xyz;
float geometry = calculateGeometry(geometryInput(
s.roughness
));
vec3 reflectedHDR = fresnel * s.reflectedColor * reflectedLightIntensity * geometry;
vec3 refractedLightIntensity = texture(refractionTextureIn, -s.normal).xyz;
vec3 refractedHDR = (vec3(1.0) - fresnel) * s.refractedColor * refractedLightIntensity / pi;
return (reflectedHDR + refractedHDR);
}
void main()
{
vec3 ndcPosition = vec3(
(texCoordInOut.x - 0.5) * 2.0,
(texCoordInOut.y - 0.5) * 2.0,
texture(fbDepthTextureIn, texCoordInOut).x * 2.0 - 1.0
);
vec3 clipPosition = vec3(
ndcPosition.x / projectionMatrixIn[0][0],
ndcPosition.y / projectionMatrixIn[1][1],
-1.0
);
vec3 viewPosition = clipPosition * projectionMatrixIn[3][2] / (projectionMatrixIn[2][2] + ndcPosition.z);
vec3 worldPosition = (cameraMatrixIn * vec4(viewPosition, 1.0)).xyz;
vec3 cameraPosition = cameraMatrixIn[3].xyz;
vec4 albedoMetalness = texture(fbColor0TextureIn, texCoordInOut);
vec4 normalRoughness = texture(fbColor1TextureIn, texCoordInOut);
vec3 baseColor = albedoMetalness.xyz;
vec3 normal = normalize(normalRoughness.xyz);
float metalness = albedoMetalness.w;
float roughness = normalRoughness.w;
vec3 refractedColor = baseColor * (1.0 - metalness);
vec3 reflectedColor = mix(vec3(0.02), baseColor, metalness);
vec3 hdr = calculateAmbientHDR(ambientSetup(
roughness,
reflectedColor,
refractedColor,
normalize(cameraPosition - worldPosition),
normal
));
fbColor0Out = vec4(hdr, 1.0);
}
` | game/shader_amb_light.go | 0.638497 | 0.493287 | shader_amb_light.go | starcoder |
package validator
import "fmt"
// DigitsBetweenInt64 returns true if value lies between left and right border
func DigitsBetweenInt64(value, left, right int64) bool {
if left > right {
left, right = right, left
}
return value >= left && value <= right
}
// compareInt64 determine if a comparison passes between the given values.
func compareInt64(first int64, second int64, operator string) bool {
switch operator {
case "<":
return first < second
case ">":
return first > second
case "<=":
return first <= second
case ">=":
return first >= second
case "==":
return first == second
default:
panic(fmt.Sprintf("validator: compareInt64 unsupport operator %s", operator))
}
}
// DistinctInt is the validation function for validating an attribute is unique among other values.
func DistinctInt(v []int) bool {
return inArrayInt(v, v)
}
// DistinctInt8 is the validation function for validating an attribute is unique among other values.
func DistinctInt8(v []int8) bool {
return inArrayInt8(v, v)
}
// DistinctInt16 is the validation function for validating an attribute is unique among other values.
func DistinctInt16(v []int16) bool {
return inArrayInt16(v, v)
}
// DistinctInt32 is the validation function for validating an attribute is unique among other values.
func DistinctInt32(v []int32) bool {
return inArrayInt32(v, v)
}
// DistinctInt64 is the validation function for validating an attribute is unique among other values.
func DistinctInt64(v []int64) bool {
return inArrayInt64(v, v)
}
func inArrayInt(needle []int, haystack []int) bool {
for _, n := range needle {
for _, s := range haystack {
if n == s {
return true
}
}
}
return false
}
func inArrayInt8(needle []int8, haystack []int8) bool {
for _, n := range needle {
for _, s := range haystack {
if n == s {
return true
}
}
}
return false
}
func inArrayInt16(needle []int16, haystack []int16) bool {
for _, n := range needle {
for _, s := range haystack {
if n == s {
return true
}
}
}
return false
}
func inArrayInt32(needle []int32, haystack []int32) bool {
for _, n := range needle {
for _, s := range haystack {
if n == s {
return true
}
}
}
return false
}
func inArrayInt64(needle []int64, haystack []int64) bool {
for _, n := range needle {
for _, s := range haystack {
if n == s {
return true
}
}
}
return false
} | validator_int.go | 0.759315 | 0.554772 | validator_int.go | starcoder |
package types
import (
"fmt"
"time"
)
const (
TERM_BEG_FINISHED int16 = 32
TERM_END_FINISHED int16 = 0
WEEKMON_SUNDAY int16 = 7
TIME_MULTIPLY_SIXTY int16 = 60
WEEKDAYS_COUNT int16 = 7
)
// Max returns the largest of x or y.
func max(x, y int16) int16 {
if x < y {
return y
}
return x
}
// Min returns the smallest of x or y.
func min(x, y int16) int16 {
if x > y {
return y
}
return x
}
// Max returns the largest of x or y.
func max32(x, y int32) int32 {
if x < y {
return y
}
return x
}
// Min returns the smallest of x or y.
func min32(x, y int32) int32 {
if x > y {
return y
}
return x
}
type zipInt32 struct {
a, b int32
}
func zip(a, b []int32) ([]zipInt32, error) {
if len(a) != len(b) {
return nil, fmt.Errorf("zip: arguments must be of same length")
}
r := make([]zipInt32, len(a), len(a))
for i, e := range a {
r[i] = zipInt32{e, b[i]}
}
return r, nil
}
func NewDate(year int16, month int16, day int16) time.Time {
return time.Date(int(year), time.Month(int(month)), int(day), 0, 0, 0, 0, time.UTC)
}
func EmptyMonthSchedule() []int32 {
return []int32 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
}
func TotalWeeksHours(template []int32) int32 {
var result int32 = 0
for idx, x := range template {
if idx < 7 {
result = result + x
}
}
return result
}
func TotalMonthHours(template []int32) int32 {
var result int32 = 0
for idx, x := range template {
if idx < 31 {
result = result + x
}
}
return result
}
func DaysInMonth(period IPeriod) int16 {
date := NewDate(period.Year(), period.Month(), 1)
return int16(date.AddDate(0, 1, -1).Day())
}
func DateOfMonth(period IPeriod, dayOrdinal int16) time.Time {
var periodDay int16 = min(max(1, dayOrdinal), DaysInMonth(period))
return NewDate(period.Year(), period.Month(), periodDay)
}
func DayOfWeekMonToSun(periodDateCwd time.Weekday) int16 {
// DayOfWeek Sunday = 0,
// Monday = 1, Tuesday = 2, Wednesday = 3, Thursday = 4, Friday = 5, Saturday = 6,
var dayOfWeek int16 = 0;
switch periodDateCwd {
case time.Sunday:
dayOfWeek = WEEKMON_SUNDAY
case time.Monday:
dayOfWeek = 1
case time.Tuesday:
dayOfWeek = 2
case time.Wednesday:
dayOfWeek = 3
case time.Thursday:
dayOfWeek = 4
case time.Friday:
dayOfWeek = 5
case time.Saturday:
dayOfWeek = 6
}
return dayOfWeek
}
func DayOfWeekFromOrdinal(dayOrdinal int16, periodBeginCwd int16) int16 {
// dayOrdinal 1..31
// periodBeginCwd 1..7
// dayOfWeek 1..7
dayOfWeek := (((dayOrdinal - 1) + (periodBeginCwd - 1)) % 7) + 1
return dayOfWeek
}
func WeekDayOfMonth(period IPeriod, dayOrdinal int16) int16 {
periodDate := DateOfMonth(period, dayOrdinal)
periodDateCwd := periodDate.Weekday()
return DayOfWeekMonToSun(periodDateCwd)
}
func DateFromInPeriod(period IPeriod, dateFrom *time.Time) int16 {
var dayTermFrom = TERM_BEG_FINISHED
periodDateBeg := NewDate(period.Year(), period.Month(), 1)
if dateFrom != nil {
dayTermFrom = int16(dateFrom.Day())
}
if dateFrom == nil || dateFrom.Before(periodDateBeg) {
dayTermFrom = 1
}
return dayTermFrom
}
func DateStopInPeriod(period IPeriod, dateEnds *time.Time) int16 {
var dayTermEnd = TERM_END_FINISHED
daysPeriod := DaysInMonth(period)
periodDateEnd := NewDate(period.Year(), period.Month(), daysPeriod)
if dateEnds != nil {
dayTermEnd = int16(dateEnds.Day())
}
if dateEnds == nil || dateEnds.After(periodDateEnd) {
dayTermEnd = daysPeriod
}
return dayTermEnd
}
func TimesheetWeekSchedule(period IPeriod, secondsWeekly int32, workdaysWeekly int16) []int32 {
secondsDaily := secondsWeekly / int32(min(workdaysWeekly, WEEKDAYS_COUNT))
secRemainder := secondsWeekly - (secondsDaily * int32(workdaysWeekly))
weekSchedule := []int32{
WeekDaySeconds(1, workdaysWeekly, secondsDaily, secRemainder),
WeekDaySeconds(2, workdaysWeekly, secondsDaily, secRemainder),
WeekDaySeconds(3, workdaysWeekly, secondsDaily, secRemainder),
WeekDaySeconds(4, workdaysWeekly, secondsDaily, secRemainder),
WeekDaySeconds(5, workdaysWeekly, secondsDaily, secRemainder),
WeekDaySeconds(6, workdaysWeekly, secondsDaily, secRemainder),
WeekDaySeconds(7, workdaysWeekly, secondsDaily, secRemainder),
}
return weekSchedule
}
func WeekDaySeconds(dayOrdinal int16, daysOfWork int16, secondsDaily int32, secRemainder int32) int32 {
if dayOrdinal < daysOfWork {
return secondsDaily
} else {
if dayOrdinal == daysOfWork {
return secondsDaily + secRemainder
}
}
return 0
}
func TimesheetFullSchedule(period IPeriod, weekSchedule []int32) []int32 {
periodDaysCount := DaysInMonth(period)
periodBeginCwd := WeekDayOfMonth(period, 1)
var monthSchedule []int32
for x := int16(1); x <= periodDaysCount; x++ {
monthSchedule = append(monthSchedule, SecondsFromWeekSchedule(weekSchedule, x, periodBeginCwd))
}
return monthSchedule
}
func TimesheetWorkSchedule(monthSchedule []int32, dayTermFrom int16, dayTermStop int16) []int32 {
var timeSheet []int32
for idx, x := range monthSchedule {
timeSheet = append(timeSheet, HoursFromCalendar(dayTermFrom, dayTermStop, int16(idx), x))
}
return timeSheet
}
func TimesheetWorkContract(monthContract []int32, monthPosition []int32, dayTermFrom int16, dayTermStop int16) []int32 {
idxFrom := int(dayTermFrom - 1)
idxStop := int(dayTermStop - 1)
var zipedMonth, _ = zip(monthContract, monthPosition)
var result []int32
for idx, z := range zipedMonth {
var res int32 = 0
if idx >= idxFrom && idx <= idxStop {
res = z.a + z.b
}
result = append(result, res)
}
return result
}
func SecondsFromPeriodWeekSchedule(period IPeriod, weekSchedule []int32, dayOrdinal int16) int32 {
periodBeginCwd := WeekDayOfMonth(period, 1)
return SecondsFromWeekSchedule(weekSchedule, dayOrdinal, periodBeginCwd)
}
func SecondsFromWeekSchedule(weekSchedule []int32, dayOrdinal int16, periodBeginCwd int16) int32 {
dayOfWeek := DayOfWeekFromOrdinal(dayOrdinal, periodBeginCwd)
indexWeek := int(dayOfWeek - 1)
if indexWeek < 0 || indexWeek >= len(weekSchedule) {
return 0
}
return weekSchedule[indexWeek]
}
func SecondsFromScheduleSeq(timeTable []int32, dayOrdinal int16, dayFromOrd int16, dayEndsOrd int16) int32 {
if dayOrdinal < dayFromOrd || dayOrdinal > dayEndsOrd {
return 0
}
indexTable := int(dayOrdinal - dayFromOrd)
if indexTable < 0 || indexTable >= len(timeTable) {
return 0
}
return timeTable[indexTable]
}
func ScheduleBaseSubtract(template []int32, subtract []int32, dayFrom int16, dayStop int16) []int32 {
idxFrom := int(dayFrom - 1)
idxStop := int(dayStop - 1)
var zipedMonth, _ = zip(template, subtract)
var result []int32
for idx, z := range zipedMonth {
var res int32 = 0
if idx >= idxFrom && idx <= idxStop {
res = max32(0, z.a - z.b)
}
result = append(result, res)
}
return result
}
func PlusHoursFromCalendar(dayTermFrom int16, dayTermStop int16, dayIndex int16, partSeconds int32, workSeconds int32) int32 {
dayOrdinal := dayIndex + 1
var plusSeconds = workSeconds
if dayTermFrom > dayOrdinal {
plusSeconds = 0
}
if dayTermStop < dayOrdinal {
plusSeconds = 0
}
return plusSeconds + partSeconds
}
func HoursFromCalendar(dayTermFrom int16, dayTermStop int16, dayIndex int16, workSeconds int32) int32 {
dayOrdinal := dayIndex + 1
var workingDay = workSeconds
if dayTermFrom > dayOrdinal {
workingDay = 0
}
if dayTermStop < dayOrdinal {
workingDay = 0
}
return workingDay
} | internal/types/operations_period.go | 0.665519 | 0.431524 | operations_period.go | starcoder |
package avail
import (
"fmt"
"regexp"
"strings"
"time"
)
// fieldType is an enum which represents different parts of a total cron expression.
// For example in the expression "0 10 15 * * *", 0 would be of type "minute".
type fieldType string
const (
minute fieldType = "minute"
hour fieldType = "hour"
day fieldType = "day"
month fieldType = "month"
weekday fieldType = "weekday"
year fieldType = "year"
)
var cronExpressionRegex = regexp.MustCompile(`^((((\d+,)+\d+|(\d+(-)\d+)|\d+|\*) ?){6})$`)
// ParsedExpression represents a breakdown of a given cron time expression
type ParsedExpression struct {
Minutes Field
Hours Field
Days Field
Months Field
Weekdays Field
Years Field
}
// Timeframe represents both the raw cron expression and the datastructures used to represent that
// expression for easy checking
type Timeframe struct {
Expression string // * * * * * * 6 fields - min, hours, day of month, month, day of week, year
ParsedExpression ParsedExpression
}
// New will parse the given cron expression and allow user to check if the time given is within
func New(expression string) (Timeframe, error) {
isMatch := cronExpressionRegex.MatchString(expression)
if !isMatch {
return Timeframe{}, fmt.Errorf("could not parse cron expression: %s; mis-formatted expression", expression)
}
terms := strings.Split(expression, " ")
// we need this extra check to make sure there are the proper amount of fields because I am bad at regex
if len(terms) != 6 {
return Timeframe{}, fmt.Errorf("could not parse cron expression: %s; must have 6 terms", expression)
}
minutes, err := newField(minute, terms[0], 0, 59)
if err != nil {
return Timeframe{}, err
}
hours, err := newField(hour, terms[1], 0, 23)
if err != nil {
return Timeframe{}, err
}
day, err := newField(day, terms[2], 1, 31)
if err != nil {
return Timeframe{}, err
}
month, err := newField(month, terms[3], 1, 12)
if err != nil {
return Timeframe{}, err
}
weekday, err := newField(weekday, terms[4], 0, 6)
if err != nil {
return Timeframe{}, err
}
year, err := newField(year, terms[5], 1970, 2100)
if err != nil {
return Timeframe{}, err
}
return Timeframe{
Expression: expression,
ParsedExpression: ParsedExpression{
Minutes: minutes,
Hours: hours,
Days: day,
Months: month,
Weekdays: weekday,
Years: year,
},
}, nil
}
// Able will evaluate if the time given is within the cron expression.
func (a *Timeframe) Able(time time.Time) bool {
fieldTypes := []fieldType{
minute,
hour,
day,
month,
weekday,
year,
}
for _, field := range fieldTypes {
switch field {
case minute:
if _, ok := a.ParsedExpression.Minutes.Values[time.Minute()]; !ok {
return false
}
case hour:
if _, ok := a.ParsedExpression.Hours.Values[time.Hour()]; !ok {
return false
}
case day:
if _, ok := a.ParsedExpression.Days.Values[time.Day()]; !ok {
return false
}
case month:
if _, ok := a.ParsedExpression.Months.Values[int(time.Month())]; !ok {
return false
}
case weekday:
if _, ok := a.ParsedExpression.Weekdays.Values[int(time.Weekday())]; !ok {
return false
}
case year:
if _, ok := a.ParsedExpression.Years.Values[time.Year()]; !ok {
return false
}
}
}
return true
}
func generateSequentialSet(start, end int) map[int]struct{} {
set := map[int]struct{}{}
for i := start; i < end+1; i++ {
set[i] = struct{}{}
}
return set
} | avail.go | 0.593374 | 0.44083 | avail.go | starcoder |
package elm
import (
"fmt"
"protoc-gen-elm/pkg/stringextras"
"text/template"
"google.golang.org/protobuf/types/descriptorpb"
)
// WellKnownType - information to handle Google well known types
type WellKnownType struct {
Type Type
Encoder VariableName
Decoder VariableName
}
var (
// WellKnownTypeMap - map of Google well known type PB identifier to encoder/decoder info
WellKnownTypeMap = map[string]WellKnownType{
".google.protobuf.Timestamp": {
Type: "Timestamp",
Decoder: "timestampDecoder",
Encoder: "timestampEncoder",
},
".google.protobuf.Int32Value": {
Type: intType,
Decoder: "intValueDecoder",
Encoder: "intValueEncoder",
},
".google.protobuf.Int64Value": {
Type: intType,
Decoder: "intValueDecoder",
Encoder: "numericStringEncoder",
},
".google.protobuf.UInt32Value": {
Type: intType,
Decoder: "intValueDecoder",
Encoder: "intValueEncoder",
},
".google.protobuf.UInt64Value": {
Type: intType,
Decoder: "intValueDecoder",
Encoder: "numericStringEncoder",
},
".google.protobuf.DoubleValue": {
Type: floatType,
Decoder: "floatValueDecoder",
Encoder: "floatValueEncoder",
},
".google.protobuf.FloatValue": {
Type: floatType,
Decoder: "floatValueDecoder",
Encoder: "floatValueEncoder",
},
".google.protobuf.StringValue": {
Type: stringType,
Decoder: "stringValueDecoder",
Encoder: "stringValueEncoder",
},
".google.protobuf.BytesValue": {
Type: bytesType,
Decoder: "bytesValueDecoder",
Encoder: "bytesValueEncoder",
},
".google.protobuf.BoolValue": {
Type: boolType,
Decoder: "boolValueDecoder",
Encoder: "boolValueEncoder",
},
}
reservedKeywords = map[string]bool{
"module": true,
"exposing": true,
"import": true,
"type": true,
"let": true,
"in": true,
"if": true,
"then": true,
"else": true,
"where": true,
"case": true,
"of": true,
"port": true,
"as": true,
}
)
// TypeAlias - defines an Elm type alias (somtimes called a record)
// https://guide.elm-lang.org/types/type_aliases.html
type TypeAlias struct {
Name Type
Decoder VariableName
Encoder VariableName
Fields []TypeAliasField
}
// FieldDecoder used in type alias decdoer (ex. )
type FieldDecoder string
// FieldEncoder used in type alias decdoer (ex. )
type FieldEncoder string
// TypeAliasField - type alias field definition
type TypeAliasField struct {
Name VariableName
Type Type
Number ProtobufFieldNumber
Decoder FieldDecoder
Encoder FieldEncoder
}
func appendUnderscoreToReservedKeywords(in string) string {
if reservedKeywords[in] {
return fmt.Sprintf("%s_", in)
}
return in
}
// FieldName - simple camelcase variable name with first letter lower
func FieldName(in string) VariableName {
return VariableName(appendUnderscoreToReservedKeywords(stringextras.LowerCamelCase(in)))
}
// FieldJSONName - JSON identifier for field decoder/encoding
func FieldJSONName(pb *descriptorpb.FieldDescriptorProto) VariantJSONName {
return VariantJSONName(pb.GetJsonName())
}
func RequiredFieldEncoder(pb *descriptorpb.FieldDescriptorProto) FieldEncoder {
return FieldEncoder(fmt.Sprintf(
"requiredFieldEncoder \"%s\" %s %s v.%s",
FieldJSONName(pb),
BasicFieldEncoder(pb),
BasicFieldDefaultValue(pb),
FieldName(pb.GetName()),
))
}
func RequiredFieldDecoder(pb *descriptorpb.FieldDescriptorProto) FieldDecoder {
return FieldDecoder(fmt.Sprintf(
"required \"%s\" %s %s",
FieldJSONName(pb),
BasicFieldDecoder(pb),
BasicFieldDefaultValue(pb),
))
}
func OneOfEncoder(pb *descriptorpb.OneofDescriptorProto) FieldEncoder {
return FieldEncoder(fmt.Sprintf("%s v.%s",
EncoderName(Type(stringextras.CamelCase(pb.GetName()))),
FieldName(pb.GetName()),
))
}
func OneOfDecoder(pb *descriptorpb.OneofDescriptorProto) FieldDecoder {
return FieldDecoder(fmt.Sprintf(
"field %s",
DecoderName(Type(stringextras.CamelCase(pb.GetName()))),
))
}
func MapType(messagePb *descriptorpb.DescriptorProto) Type {
keyField := messagePb.GetField()[0]
valueField := messagePb.GetField()[1]
return Type(fmt.Sprintf(
"Dict.Dict %s %s",
BasicFieldType(keyField),
BasicFieldType(valueField),
))
}
func MapEncoder(
fieldPb *descriptorpb.FieldDescriptorProto,
messagePb *descriptorpb.DescriptorProto,
) FieldEncoder {
valueField := messagePb.GetField()[1]
return FieldEncoder(fmt.Sprintf(
"mapEntriesFieldEncoder \"%s\" %s v.%s",
FieldJSONName(fieldPb),
BasicFieldEncoder(valueField),
FieldName(fieldPb.GetName()),
))
}
func MapDecoder(
fieldPb *descriptorpb.FieldDescriptorProto,
messagePb *descriptorpb.DescriptorProto,
) FieldDecoder {
valueField := messagePb.GetField()[1]
return FieldDecoder(fmt.Sprintf(
"mapEntries \"%s\" %s",
FieldJSONName(fieldPb),
BasicFieldDecoder(valueField),
))
}
func MaybeType(t Type) Type {
return Type(fmt.Sprintf("Maybe %s", t))
}
func MaybeEncoder(pb *descriptorpb.FieldDescriptorProto) FieldEncoder {
return FieldEncoder(fmt.Sprintf(
"optionalEncoder \"%s\" %s v.%s",
FieldJSONName(pb),
BasicFieldEncoder(pb),
FieldName(pb.GetName()),
))
}
func MaybeDecoder(pb *descriptorpb.FieldDescriptorProto) FieldDecoder {
return FieldDecoder(fmt.Sprintf(
"optional \"%s\" %s",
FieldJSONName(pb),
BasicFieldDecoder(pb),
))
}
func ListType(t Type) Type {
return Type(fmt.Sprintf("List %s", t))
}
func ListEncoder(pb *descriptorpb.FieldDescriptorProto) FieldEncoder {
return FieldEncoder(fmt.Sprintf(
"repeatedFieldEncoder \"%s\" %s v.%s",
FieldJSONName(pb),
BasicFieldEncoder(pb),
FieldName(pb.GetName()),
))
}
func ListDecoder(pb *descriptorpb.FieldDescriptorProto) FieldDecoder {
return FieldDecoder(fmt.Sprintf(
"repeated \"%s\" %s",
FieldJSONName(pb),
BasicFieldDecoder(pb),
))
}
func OneOfType(in string) Type {
return Type(appendUnderscoreToReservedKeywords(stringextras.UpperCamelCase(in)))
}
// TypeAliasTemplate - defines templates for self contained type aliases
func TypeAliasTemplate(t *template.Template) (*template.Template, error) {
return t.Parse(`
{{- define "type-alias" -}}
type alias {{ .Name }} =
{ {{ range $i, $v := .Fields }}
{{- if $i }}, {{ end }}{{ .Name }} : {{ .Type }}{{ if .Number }} -- {{ .Number }}{{ end }}
{{ end }}}
{{ .Decoder }} : JD.Decoder {{ .Name }}
{{ .Decoder }} =
JD.lazy <| \_ -> decode {{ .Name }}{{ range .Fields }}
|> {{ .Decoder }}{{ end }}
{{ .Encoder }} : {{ .Name }} -> JE.Value
{{ .Encoder }} v =
JE.object <| List.filterMap identity <|
[{{ range $i, $v := .Fields }}
{{- if $i }},{{ end }} ({{ .Encoder }})
{{ end }}]
{{- end -}}
`)
} | protoc-gen-elm/pkg/elm/type_alias.go | 0.562177 | 0.483344 | type_alias.go | starcoder |
package util
import (
"time"
)
// Days is the duration of a day.
const Days = time.Hour * 24
// GetLowerBound returns the earlier of two times. A nil time is considered infinitely late so won't be
// used if the other time is not nil.
func GetLowerBound(t1 *time.Time, t2 *time.Time) time.Time {
return getBound(t1, t2, false)
}
// GetUpperBound returns the later of two times. A nil time is considered infinitely early so won't be
// used if the other time is not nil.
func GetUpperBound(t1 *time.Time, t2 *time.Time) time.Time {
return getBound(t1, t2, true)
}
func getBound(t1 *time.Time, t2 *time.Time, lowerOrUpper bool) time.Time {
if t1 != nil && t2 != nil {
if !lowerOrUpper {
if t1.Before(*t2) {
return *t1
}
return *t2
}
if t1.After(*t2) {
return *t1
}
return *t2
} else if t1 != nil {
return *t1
} else {
return *t2
}
}
// SetWeekday changes the given datetime to occur on the given week day.
func SetWeekday(dt time.Time, wd time.Weekday) time.Time {
di := int(wd - dt.Weekday())
return dt.AddDate(0, 0, di)
}
// SetToStartOfDay changes the given datetime to the start of the day (0:00).
func SetToStartOfDay(dt time.Time) time.Time {
y, m, d := dt.Date()
return time.Date(y, m, d, 0, 0, 0, 0, time.Local)
}
// SetToEndOfDay changes the given datetime to the end of the day (23:59).
func SetToEndOfDay(dt time.Time) time.Time {
y, m, d := dt.Date()
return time.Date(y, m, d, 23, 59, 59, 999999999, time.Local)
}
// SetToStartOfWeek changes the given datetime to the start of the week (Monday).
func SetToStartOfWeek(dt time.Time) time.Time {
// shift so monday=0, sunday=6
wd := (dt.Weekday() + 6) % 7
return SetToStartOfDay(dt.AddDate(0, 0, int(-wd)))
}
// SetToEndOfWeek changes the given datetime to the end of the week (Sunday).
func SetToEndOfWeek(dt time.Time) time.Time {
// shift so monday=0, sunday=6
wd := (dt.Weekday() + 6) % 7
return SetToEndOfDay(dt.AddDate(0, 0, int(6-wd)))
}
// SetTime changes the given datetime to the time of the day.
func SetTime(dt time.Time, tm time.Time) time.Time {
y, m, d := dt.Date()
return time.Date(y, m, d, tm.Hour(), tm.Minute(), tm.Second(), tm.Nanosecond(), time.Local)
}
// EpochWeek returns the number of weeks passed since January 1, 1970 UTC.
// Note this does not mean it's aligned for weekdays, i.e. the Monday after
// Sunday does not necessary have a higher EpochWeek number.
func EpochWeek(dt time.Time) int {
return int(dt.UTC().Unix() / 604800)
}
// ParseISODate parses a string of format 2006-01-02.
func ParseISODate(s string) (time.Time, error) {
return time.ParseInLocation("2006-01-02", s, time.Local)
} | util/time_util.go | 0.809615 | 0.460289 | time_util.go | starcoder |
package valuerange
import (
"github.com/iotaledger/hive.go/marshalutil"
"github.com/iotaledger/hive.go/stringify"
"golang.org/x/xerrors"
)
// EndPoint contains information about where ValueRanges start and end. It combines a threshold value with a BoundType.
type EndPoint struct {
value Value
boundType BoundType
}
// NewEndPoint create a new EndPoint from the given details.
func NewEndPoint(value Value, boundType BoundType) *EndPoint {
return &EndPoint{
value: value,
boundType: boundType,
}
}
// EndPointFromBytes unmarshals an EndPoint from a sequence of bytes.
func EndPointFromBytes(endPointBytes []byte) (endPoint *EndPoint, consumedBytes int, err error) {
marshalUtil := marshalutil.New(endPointBytes)
if endPoint, err = EndPointFromMarshalUtil(marshalUtil); err != nil {
err = xerrors.Errorf("failed to parse EndPoint from MarshalUtil: %w", err)
return
}
consumedBytes = marshalUtil.ReadOffset()
return
}
// EndPointFromMarshalUtil unmarshals an EndPoint using a MarshalUtil (for easier unmarshaling).
func EndPointFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (endPoint *EndPoint, err error) {
endPoint = &EndPoint{}
if endPoint.value, err = ValueFromMarshalUtil(marshalUtil); err != nil {
err = xerrors.Errorf("failed to parse Value from MarshalUtil: %w", err)
return
}
if endPoint.boundType, err = BoundTypeFromMarshalUtil(marshalUtil); err != nil {
err = xerrors.Errorf("failed to parse BoundType from MarshalUtil: %w", err)
return
}
return
}
// Value returns the Value of the EndPoint.
func (e *EndPoint) Value() Value {
return e.value
}
// BoundType returns the BoundType of the EndPoint.
func (e *EndPoint) BoundType() BoundType {
return e.boundType
}
// Bytes returns a marshaled version of the EndPoint.
func (e *EndPoint) Bytes() []byte {
return marshalutil.New().
Write(e.value).
Write(e.boundType).
Bytes()
}
// String returns a human readable version of the EndPoint.
func (e *EndPoint) String() string {
return stringify.Struct("EndPoint",
stringify.StructField("value", e.value),
stringify.StructField("boundType", e.boundType),
)
} | datastructure/valuerange/endpoint.go | 0.700588 | 0.493164 | endpoint.go | starcoder |
package graphs
import "sort"
type ccGraph struct {
graph Graph // Input graph
ccIndex []int // ccIndex[v] returns the component ID of a vertex v
ccSize []int // ccSize[i] shows the size of i-th component
marked []bool // internal data structure used for DFS
count int // number of connected components
comp [][]int // comp[i] is a slice of indices that belong to i-th component
}
// ConnectedComponents interface provides the API for getting
// the number of connected components in a graph
type ConnectedComponents interface {
// Graph returns the source graph
Graph() Graph
// Index returns the connected component index for the vertex v
Index(v int) int
// Component returns a sorted slice of vertices in the given connected component
Component(compID int) []int
// CompSize returns size of the given connected component
CompSize(compID int) int
// Count returns total number of connected components
Count() int
// AllIndices returns all connected components: AllIndices()[v] returns
// connected component index for the vertex v
AllIndices() []int
// AllCompSizes returns connected component size: AllCompSizes()[i] returns
// number of vertices in i-th connected component
AllCompSizes() []int
// AllComponents returns all connected components: AllComponents()[i] returns
// a slice of vertices that belong to the i-th connected component
AllComponents() [][]int
}
func (cc *ccGraph) Graph() Graph {
return cc.graph
}
func (cc *ccGraph) Index(v int) int {
return cc.ccIndex[v]
}
func (cc *ccGraph) Component(compID int) []int {
return cc.comp[compID]
}
func (cc *ccGraph) CompSize(compID int) int {
return cc.ccSize[compID]
}
func (cc *ccGraph) Count() int {
return cc.count
}
func (cc *ccGraph) AllIndices() []int {
return cc.ccIndex
}
func (cc *ccGraph) AllCompSizes() []int {
return cc.ccSize
}
func (cc *ccGraph) AllComponents() [][]int {
return cc.comp
}
// InitConnectedComponents searches the given graph for connected components
// and returns arrays c []int and s []int, where the value c[v] is an id
// of a component, to which the given vertex 'v' belongs, and s[i] is the size
// of component with id 'i'.
func InitConnectedComponents(g Graph) ConnectedComponents {
cc := new(ccGraph)
switch g.(type) {
case *digraph:
cc.graph = InitGraphFromGraph(g)
default:
cc.graph = g
}
cc.ccIndex = make([]int, g.VNum(), g.VNum())
cc.ccSize = make([]int, g.VNum(), g.VNum())
cc.comp = make([][]int, 1)
cc.marked = make([]bool, g.VNum(), g.VNum())
cc.count = 0
for v := 0; v < g.VNum(); v++ {
if !cc.marked[v] {
runDFS(cc, v)
cc.count++
cc.comp = append(cc.comp, []int{})
}
}
cc.comp = cc.comp[:cc.count]
for i := range cc.comp {
sort.Ints(cc.comp[i])
}
return cc
}
func runDFS(cc *ccGraph, v int) {
cc.marked[v] = true
cc.ccIndex[v] = cc.count
cc.ccSize[cc.count]++
cc.comp[cc.count] = append(cc.comp[cc.count], v)
for _, w := range cc.graph.Edges(v) {
if !cc.marked[w] {
runDFS(cc, w)
}
}
} | connected_components.go | 0.705278 | 0.523238 | connected_components.go | starcoder |
package assert
import (
"fmt"
"reflect"
"runtime"
"testing"
"unicode/utf8"
)
var callStackAdjust = 0
func CallStackAdjust(l int) {
callStackAdjust = l
}
func CallStackReset() {
callStackAdjust = 0
}
// Equal tries to establish if the two values are compareEquality via reflection and if that fails then via conversion to string
func Equal(t *testing.T, expected, actual interface{}, message ...string) {
if !compareEquality(expected, actual) {
t.Errorf("%v\nExpected \n\t[%#v]\nto be\n\t[%#v]\n%v ", message, actual, expected, callerInfo(2 +callStackAdjust))
}
}
// NotEqual utilises the same method as Equal but returns the complement
func NotEqual(t *testing.T, expected, actual interface{}, message ...string) {
if compareEquality(expected, actual) {
t.Errorf("%v\nExpected \n\t[%#v]\n NOT to be\n\t[%#v]\n%v ", message, actual, expected, callerInfo(2 +callStackAdjust))
}
}
// Checks that the supplied expected and actual objects are compareEquality
// this code is a copy of the ObjectsAreEqual method from :
// https://github.com/stretchr/testify/blob/master/assert/assertions.go
// Copyright (c) 2012 - 2013 <NAME> and <NAME>
func compareEquality(expected, actual interface{}) bool {
if expected == nil || actual == nil {
return expected == actual
}
if reflect.DeepEqual(expected, actual) {
return true
}
expectedValue := reflect.ValueOf(expected)
actualValue := reflect.ValueOf(actual)
if expectedValue == actualValue {
return true
}
// Attempt comparison after type conversion
if actualValue.Type().ConvertibleTo(expectedValue.Type()) && expectedValue == actualValue.Convert(expectedValue.Type()) {
return true
}
// Last ditch effort
if fmt.Sprintf("%#v", expected) == fmt.Sprintf("%#v", actual) {
return true
}
return false
}
// Nil checks that the actual value is nil
func Nil(t *testing.T, actual interface{}, message ...string) {
if !reflect.ValueOf(actual).IsNil() {
t.Errorf("%v\n Expected \n\t[%#v]\nto be\n\tnil\n%v ", message, actual, callerInfo(2 +callStackAdjust))
}
}
// NotNil checks that the actual value is not nil
func NotNil(t *testing.T, actual interface{}, message ...string) {
if reflect.ValueOf(actual).IsNil() {
t.Errorf("%v Expected not to be nil\n%v ", message, callerInfo(2 +callStackAdjust))
}
}
// True checks that the actual value is true
func True(t *testing.T, actual bool, message ...string) {
if actual != true {
t.Errorf("%v\n Expected \n\t[%#v]\nto be\n\tTrue\n%v ", message, actual, callerInfo(2 +callStackAdjust))
}
}
// False checks that the actual value is false
func False(t *testing.T, actual bool, message ...string) {
if actual != false {
t.Errorf("%v\n Expected \n\t[%#v]\nto be\n\tFalse\n%v ", message, actual, callerInfo(2 +callStackAdjust))
}
}
// Error checks that the actual error is not nil (compiler will check it supports the error interface)
func Error(t *testing.T, actual error, message ...string) {
if actual == nil {
t.Errorf("%v\n Expected \n\t[%#v]\nto be an error\n%v ", message, actual, callerInfo(2 +callStackAdjust))
}
}
// Error checks that the actual error is nil (compiler will check it supports the error interface)
func NotError(t *testing.T, actual error, message ...string) {
if actual != nil {
t.Errorf("%v\n Expected \n\t[%#v]\nto not be an error\n%v ", message, actual, callerInfo(2 +callStackAdjust))
}
}
// Checks that the lengths of the supplied Slice | Map | String are the same
func Len(t *testing.T, expected int, actual interface{}, message ...string) {
if !compareLength(actual, expected) {
t.Errorf("%v\n Expected length \n\t[%#v]\nto be\n\t[%#v]\n%v ", message, actual, expected, callerInfo(2 +callStackAdjust))
}
}
// compares lengths dependant on teh type of variables passed in for comparison
// panics if the type is not Slice | Map | String
func compareLength(a interface{}, b int) bool {
switch reflect.TypeOf(a).Kind() {
case reflect.Slice:
s := reflect.ValueOf(a)
return s.Len() == b
case reflect.Map:
m := reflect.ValueOf(a)
return m.Len() == b
case reflect.String:
m := reflect.ValueOf(a).String()
return utf8.RuneCountInString(m) == b
}
panic("parameter 'a' does not have a Len")
}
// callerInfo returns the file and line number from the caller stack
func callerInfo(skip int) string {
_, file, line, _ := runtime.Caller(skip)
return fmt.Sprintf("%v:%v", file, line)
} | assert.go | 0.655777 | 0.526038 | assert.go | starcoder |
package metrics
import (
"net/http"
"sort"
)
// Namespaced wraps a child metrics exporter and exposes a Type API that
// adds namespacing labels and name prefixes to new
type Namespaced struct {
labels map[string]string
mappings []*Mapping
child Type
}
// NewNamespaced wraps a metrics exporter and adds prefixes and custom labels.
func NewNamespaced(child Type) *Namespaced {
return &Namespaced{
child: child,
}
}
// Noop returns a namespaced metrics aggregator with a noop child.
func Noop() *Namespaced {
return &Namespaced{
child: DudType{},
}
}
// WithStats returns a namespaced metrics exporter with a different stats
// implementation.
func (n *Namespaced) WithStats(s Type) *Namespaced {
newNs := *n
newNs.child = s
return &newNs
}
// WithLabels returns a namespaced metrics exporter with a new set of labels,
// which are added to any prior labels.
func (n *Namespaced) WithLabels(labels ...string) *Namespaced {
newLabels := map[string]string{}
for k, v := range n.labels {
newLabels[k] = v
}
for i := 0; i < len(labels)-1; i += 2 {
newLabels[labels[i]] = labels[i+1]
}
newNs := *n
newNs.labels = newLabels
return &newNs
}
// WithMapping returns a namespaced metrics exporter with a new mapping.
// Mappings are applied _before_ the prefix and static labels are applied.
// Mappings already added are executed after this new mapping.
func (n *Namespaced) WithMapping(m *Mapping) *Namespaced {
newNs := *n
newMappings := make([]*Mapping, 0, len(n.mappings)+1)
newMappings = append(newMappings, m)
newMappings = append(newMappings, n.mappings...)
newNs.mappings = newMappings
return &newNs
}
//------------------------------------------------------------------------------
// Child returns the underlying metrics type.
func (n *Namespaced) Child() Type {
return n.child
}
// HandlerFunc returns the http handler of the child.
func (n *Namespaced) HandlerFunc() http.HandlerFunc {
return n.child.HandlerFunc()
}
//------------------------------------------------------------------------------
func (n *Namespaced) getPathAndLabels(path string) (newPath string, labelKeys, labelValues []string) {
newPath = path
if n.labels != nil && len(n.labels) > 0 {
labelKeys = make([]string, 0, len(n.labels))
for k := range n.labels {
labelKeys = append(labelKeys, k)
}
sort.Strings(labelKeys)
labelValues = make([]string, 0, len(n.labels))
for _, k := range labelKeys {
labelValues = append(labelValues, n.labels[k])
}
}
for _, mapping := range n.mappings {
if newPath, labelKeys, labelValues = mapping.mapPath(newPath, labelKeys, labelValues); newPath == "" {
return
}
}
return
}
type counterVecWithStatic struct {
staticValues []string
child StatCounterVec
}
func (c *counterVecWithStatic) With(values ...string) StatCounter {
newValues := make([]string, 0, len(c.staticValues)+len(values))
newValues = append(newValues, c.staticValues...)
newValues = append(newValues, values...)
return c.child.With(newValues...)
}
type timerVecWithStatic struct {
staticValues []string
child StatTimerVec
}
func (c *timerVecWithStatic) With(values ...string) StatTimer {
newValues := make([]string, 0, len(c.staticValues)+len(values))
newValues = append(newValues, c.staticValues...)
newValues = append(newValues, values...)
return c.child.With(newValues...)
}
type gaugeVecWithStatic struct {
staticValues []string
child StatGaugeVec
}
func (c *gaugeVecWithStatic) With(values ...string) StatGauge {
newValues := make([]string, 0, len(c.staticValues)+len(values))
newValues = append(newValues, c.staticValues...)
newValues = append(newValues, values...)
return c.child.With(newValues...)
}
//------------------------------------------------------------------------------
// GetCounter returns an editable counter stat for a given path.
func (n *Namespaced) GetCounter(path string) StatCounter {
path, labelKeys, labelValues := n.getPathAndLabels(path)
if path == "" {
return DudStat{}
}
if len(labelKeys) > 0 {
return n.child.GetCounterVec(path, labelKeys...).With(labelValues...)
}
return n.child.GetCounter(path)
}
// GetCounterVec returns an editable counter stat for a given path with labels,
// these labels must be consistent with any other metrics registered on the same
// path.
func (n *Namespaced) GetCounterVec(path string, labelNames ...string) StatCounterVec {
path, staticKeys, staticValues := n.getPathAndLabels(path)
if path == "" {
return FakeCounterVec(func(...string) StatCounter {
return DudStat{}
})
}
if len(staticKeys) > 0 {
newNames := make([]string, 0, len(staticKeys)+len(labelNames))
newNames = append(newNames, staticKeys...)
newNames = append(newNames, labelNames...)
return &counterVecWithStatic{
staticValues: staticValues,
child: n.child.GetCounterVec(path, newNames...),
}
}
return n.child.GetCounterVec(path, labelNames...)
}
// GetTimer returns an editable timer stat for a given path.
func (n *Namespaced) GetTimer(path string) StatTimer {
path, labelKeys, labelValues := n.getPathAndLabels(path)
if path == "" {
return DudStat{}
}
if len(labelKeys) > 0 {
return n.child.GetTimerVec(path, labelKeys...).With(labelValues...)
}
return n.child.GetTimer(path)
}
// GetTimerVec returns an editable timer stat for a given path with labels,
// these labels must be consistent with any other metrics registered on the same
// path.
func (n *Namespaced) GetTimerVec(path string, labelNames ...string) StatTimerVec {
path, staticKeys, staticValues := n.getPathAndLabels(path)
if path == "" {
return FakeTimerVec(func(...string) StatTimer {
return DudStat{}
})
}
if len(staticKeys) > 0 {
newNames := make([]string, 0, len(staticKeys)+len(labelNames))
newNames = append(newNames, staticKeys...)
newNames = append(newNames, labelNames...)
return &timerVecWithStatic{
staticValues: staticValues,
child: n.child.GetTimerVec(path, newNames...),
}
}
return n.child.GetTimerVec(path, labelNames...)
}
// GetGauge returns an editable gauge stat for a given path.
func (n *Namespaced) GetGauge(path string) StatGauge {
path, labelKeys, labelValues := n.getPathAndLabels(path)
if path == "" {
return DudStat{}
}
if len(labelKeys) > 0 {
return n.child.GetGaugeVec(path, labelKeys...).With(labelValues...)
}
return n.child.GetGauge(path)
}
// GetGaugeVec returns an editable gauge stat for a given path with labels,
// these labels must be consistent with any other metrics registered on the same
// path.
func (n *Namespaced) GetGaugeVec(path string, labelNames ...string) StatGaugeVec {
path, staticKeys, staticValues := n.getPathAndLabels(path)
if path == "" {
return FakeGaugeVec(func(...string) StatGauge {
return DudStat{}
})
}
if len(staticKeys) > 0 {
newNames := make([]string, 0, len(staticKeys)+len(labelNames))
newNames = append(newNames, staticKeys...)
newNames = append(newNames, labelNames...)
return &gaugeVecWithStatic{
staticValues: staticValues,
child: n.child.GetGaugeVec(path, newNames...),
}
}
return n.child.GetGaugeVec(path, labelNames...)
}
// Close stops aggregating stats and cleans up resources.
func (n *Namespaced) Close() error {
return n.child.Close()
} | internal/component/metrics/namespaced.go | 0.814754 | 0.500916 | namespaced.go | starcoder |
package parser
import (
"fmt"
"errors"
"strings"
"github.com/xrash/gonf/tokens"
)
type state func(p *Parser) error
func err(got tokens.Token, expected ...string) error {
msg := "Expected %s at line %d:%d. Got %s."
for k, e := range(expected) {
expected[k] = "'" + e + "'"
}
return errors.New(fmt.Sprintf(msg, strings.Join(expected, " OR "), got.Line(), got.Column(), got))
}
func pairState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_STRING:
p.stack.push(buildPairNode)
p.stack.push(pairState)
p.stack.push(valueState)
p.stack.push(keyState)
case tokens.T_QUOTE:
p.stack.push(buildPairNode)
p.stack.push(pairState)
p.stack.push(valueState)
p.stack.push(keyState)
case tokens.T_TABLE_END:
case tokens.T_EOF:
default:
return err(token, "STRING", "{", "EOF", "\"")
}
return nil
}
func keyState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_STRING:
p.stack.push(buildKeyNode)
p.stack.push(stringState)
case tokens.T_QUOTE:
p.stack.push(buildKeyNode)
p.stack.push(stringState)
default:
return err(token, "STRING", "\"")
}
return nil
}
func valueState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_STRING:
p.stack.push(buildValueStringNode)
p.stack.push(stringState)
case tokens.T_QUOTE:
p.stack.push(buildValueStringNode)
p.stack.push(stringState)
case tokens.T_ARRAY_START:
p.stack.push(buildValueArrayNode)
p.stack.push(arrayState)
case tokens.T_TABLE_START:
p.stack.push(buildValueTableNode)
p.stack.push(tableState)
default:
return err(token, "STRING", "[", "{", "\"")
}
return nil
}
func arrayState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_ARRAY_START:
p.stack.push(buildArrayNode)
p.stack.push(arrayEndState)
p.stack.push(valuesState)
p.stack.push(arrayStartState)
default:
return err(token, "[")
}
return nil
}
func valuesState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_STRING:
p.stack.push(buildValuesNode)
p.stack.push(valuesState)
p.stack.push(valueState)
case tokens.T_QUOTE:
p.stack.push(buildValuesNode)
p.stack.push(valuesState)
p.stack.push(valueState)
case tokens.T_ARRAY_START:
p.stack.push(buildValuesNode)
p.stack.push(valuesState)
p.stack.push(valueState)
case tokens.T_TABLE_START:
p.stack.push(buildValuesNode)
p.stack.push(valuesState)
p.stack.push(valueState)
case tokens.T_ARRAY_END:
default:
return err(token, "STRING", "[", "{", "]", "\"")
}
return nil
}
func tableState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_TABLE_START:
p.stack.push(buildTableNode)
p.stack.push(tableEndState)
p.stack.push(pairState)
p.stack.push(tableStartState)
default:
return err(token, "{")
}
return nil
}
func stringState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_STRING:
p.stack.push(unquotedStringState)
case tokens.T_QUOTE:
p.stack.push(quotedStringState)
default:
return err(token, "STRING", "\"")
}
return nil
}
func unquotedStringState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_STRING:
p.stack.push(literalState)
default:
return err(token, "STRING")
}
return nil
}
func quotedStringState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_QUOTE:
p.stack.push(quoteState)
p.stack.push(literalState)
p.stack.push(quoteState)
default:
return err(token, "STRING")
}
return nil
}
func literalState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_STRING:
p.next()
default:
return err(token, "STRING")
}
p.nodeStack.push(NewStringNode(token.Value()))
return nil
}
func quoteState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_QUOTE:
p.next()
default:
return err(token, "\"")
}
return nil
}
func arrayStartState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_ARRAY_START:
p.next()
default:
return err(token, "[")
}
return nil
}
func arrayEndState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_ARRAY_END:
p.next()
default:
return err(token, "]")
}
return nil
}
func tableStartState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_TABLE_START:
p.next()
default:
return err(token, "{")
}
return nil
}
func tableEndState(p *Parser) error {
token := p.lookup()
switch token.Type() {
case tokens.T_TABLE_END:
p.next()
default:
return err(token, "}")
}
return nil
}
func buildPairNode(p *Parser) error {
var pn *PairNode
if node := p.nodeStack.pop(); node.Kind() == PAIR_NODE {
pn = node.(*PairNode)
} else {
p.nodeStack.push(node)
}
vn := p.nodeStack.pop().(*ValueNode)
kn := p.nodeStack.pop().(*KeyNode)
p.nodeStack.push(NewPairNode(kn, vn, pn))
return nil
}
func buildValueStringNode(p *Parser) error {
sn := p.nodeStack.pop().(*StringNode)
p.nodeStack.push(NewValueNode(sn, nil, nil))
return nil
}
func buildValueArrayNode(p *Parser) error {
vn := p.nodeStack.pop().(*ValuesNode)
an := NewArrayNode(vn)
p.nodeStack.push(NewValueNode(nil, nil, an))
return nil
}
func buildKeyNode(p *Parser) error {
sn := p.nodeStack.pop().(*StringNode)
p.nodeStack.push(NewKeyNode(sn))
return nil
}
func buildValuesNode(p *Parser) error {
var values *ValuesNode
if node := p.nodeStack.pop(); node.Kind() == VALUES_NODE {
values = node.(*ValuesNode)
} else {
p.nodeStack.push(node)
}
vn := p.nodeStack.pop().(*ValueNode)
p.nodeStack.push(NewValuesNode(vn, values))
return nil
}
func buildArrayNode(p *Parser) error {
return nil
}
func buildValueTableNode(p *Parser) error {
tn := p.nodeStack.pop().(*TableNode)
p.nodeStack.push(NewValueNode(nil, tn, nil))
return nil
}
func buildTableNode(p *Parser) error {
pn := p.nodeStack.pop().(*PairNode)
p.nodeStack.push(NewTableNode(pn))
return nil
} | parser/states.go | 0.650467 | 0.483526 | states.go | starcoder |
package semantic
// rInt is an instance of the int primitive type, necessary where a *PrimitiveType is required.
var rInt = PrimitiveTypeInt
// rByte is an instance of the byte primitive type, necessary where a *PrimitiveType is required.
var rByte = PrimitiveTypeByte
// stdlib is the collection of standard library functions in Alan.
var stdlib = []struct {
ID
FunctionType
}{
{
ID: "writeInteger",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: PrimitiveTypeInt,
},
},
},
},
{
ID: "writeByte",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: PrimitiveTypeByte,
},
},
},
},
{
ID: "writeChar",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: PrimitiveTypeByte,
},
},
},
},
{
ID: "writeString",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
},
},
},
{
ID: "readInteger",
FunctionType: FunctionType{
Parameters: []ParameterType{},
Return: &rInt,
},
},
{
ID: "readByte",
FunctionType: FunctionType{
Parameters: []ParameterType{},
Return: &rByte,
},
},
{
ID: "readChar",
FunctionType: FunctionType{
Parameters: []ParameterType{},
Return: &rByte,
},
},
{
ID: "readString",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: PrimitiveTypeInt,
},
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
},
},
},
{
ID: "extend",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: PrimitiveTypeByte,
},
},
Return: &rInt,
},
},
{
ID: "shrink",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: PrimitiveTypeInt,
},
},
Return: &rByte,
},
},
{
ID: "strlen",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
},
Return: &rInt,
},
},
{
ID: "strcmp",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
},
Return: &rInt,
},
},
{
ID: "strcpy",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
},
},
},
{
ID: "strcat",
FunctionType: FunctionType{
Parameters: []ParameterType{
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
{
DType: ArrayType{
PrimitiveType: PrimitiveTypeByte,
},
IsRef: true,
},
},
},
},
} | semantic/stdlib.go | 0.529993 | 0.597549 | stdlib.go | starcoder |
package continuous
import (
"github.com/jtejido/ggsl/specfunc"
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
smath "github.com/jtejido/stats/math"
"math"
"math/rand"
)
// Q-Weibull distribution
// https://en.wikipedia.org/wiki/Q-exponential_distribution
type QWeibull struct {
rate, shape, q float64 // λ, κ, q
src rand.Source
}
func NewQWeibull(rate, shape, q float64) (*QWeibull, error) {
return NewQWeibullWithSource(rate, shape, q, nil)
}
func NewQWeibullWithSource(rate, shape, q float64, src rand.Source) (*QWeibull, error) {
if rate <= 0 || shape <= 0 || q >= 2 {
return nil, err.Invalid()
}
r := new(QWeibull)
r.rate = rate
r.shape = shape
r.q = q
r.src = src
return r, nil
}
func (q *QWeibull) String() string {
return "QWeibull: Parameters - " + q.Parameters().String() + ", Support(x) - " + q.Support().String()
}
// λ ∈ (0,∞)
// κ ∈ (0,∞)
// q ∈ (-∞,3)
func (q *QWeibull) Parameters() stats.Limits {
return stats.Limits{
"λ": stats.Interval{0, math.Inf(1), true, true},
"κ": stats.Interval{0, math.Inf(1), true, true},
"q": stats.Interval{math.Inf(-1), 2, true, true},
}
}
// x ∈ [0,∞) for q >= 1
// x ∈ [0,λ/(1-q)^1/κ]
func (q *QWeibull) Support() stats.Interval {
if q.q >= 1 {
return stats.Interval{0, math.Inf(1), false, true}
}
return stats.Interval{0, q.rate / math.Pow(1-q.q, 1/q.shape), false, true}
}
func (q *QWeibull) Probability(x float64) float64 {
if x >= 0 {
return (2 - q.q) * (q.shape / q.rate) * math.Pow(x/q.rate, q.shape-1) * smath.Expq(-math.Pow(x/q.rate, q.shape), q.q)
}
return 0
}
func (q *QWeibull) Distribution(x float64) float64 {
if x >= 0 {
qp := 1 / (2 - q.q)
rp := q.rate / math.Pow(2-q.q, 1/q.shape)
return 1 - smath.Expq(-math.Pow(x/rp, q.shape), qp)
}
return 0
}
func (q *QWeibull) Inverse(p float64) float64 {
if p <= 0 {
return 0
}
if p >= 1 {
if q.q >= 1 {
return math.Inf(1)
}
return q.rate / math.Pow(1-q.q, 1/q.shape)
}
return math.Pow((1-math.Pow(1-p, (1-q.q)/(2-q.q)))/(1-q.q), 1/q.shape) * q.rate
}
func (q *QWeibull) Mean() float64 {
if q.q < 1 {
return q.rate * (2 + (1 / (1 - q.q)) + (1 / q.shape)) * math.Pow(1-q.q, -1/q.shape) * specfunc.Beta(1+(1/q.shape), 2+(1/(1-q.q)))
} else if q.q == 1 {
return q.rate * specfunc.Gamma(1+(1/q.shape))
} else if 1 < q.q && q.q < 1+((1+2*q.shape)/(1+q.shape)) {
return q.rate * (2 - q.q) * math.Pow(q.q-1, -(1+q.shape)/q.shape) * specfunc.Beta(1+(1/q.shape), -(1+(1/(1-q.q))+(1/q.shape)))
} else if 1+(q.shape/(q.shape+1)) <= q.q && q.q < 2 {
return math.Inf(1)
} else {
return math.NaN()
}
}
func (q *QWeibull) Rand() float64 {
var rnd func() float64
if q.src != nil {
rnd = rand.New(q.src).Float64
} else {
rnd = rand.Float64
}
return q.Inverse(rnd())
} | dist/continuous/q_weibull.go | 0.748536 | 0.486941 | q_weibull.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// ExpressionInputObject
type ExpressionInputObject struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Definition of the test object.
definition ObjectDefinitionable
// Property values of the test object.
properties []StringKeyObjectValuePairable
}
// NewExpressionInputObject instantiates a new expressionInputObject and sets the default values.
func NewExpressionInputObject()(*ExpressionInputObject) {
m := &ExpressionInputObject{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateExpressionInputObjectFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateExpressionInputObjectFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewExpressionInputObject(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ExpressionInputObject) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetDefinition gets the definition property value. Definition of the test object.
func (m *ExpressionInputObject) GetDefinition()(ObjectDefinitionable) {
if m == nil {
return nil
} else {
return m.definition
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *ExpressionInputObject) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["definition"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateObjectDefinitionFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetDefinition(val.(ObjectDefinitionable))
}
return nil
}
res["properties"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateStringKeyObjectValuePairFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]StringKeyObjectValuePairable, len(val))
for i, v := range val {
res[i] = v.(StringKeyObjectValuePairable)
}
m.SetProperties(res)
}
return nil
}
return res
}
// GetProperties gets the properties property value. Property values of the test object.
func (m *ExpressionInputObject) GetProperties()([]StringKeyObjectValuePairable) {
if m == nil {
return nil
} else {
return m.properties
}
}
// Serialize serializes information the current object
func (m *ExpressionInputObject) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteObjectValue("definition", m.GetDefinition())
if err != nil {
return err
}
}
if m.GetProperties() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetProperties()))
for i, v := range m.GetProperties() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("properties", cast)
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ExpressionInputObject) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetDefinition sets the definition property value. Definition of the test object.
func (m *ExpressionInputObject) SetDefinition(value ObjectDefinitionable)() {
if m != nil {
m.definition = value
}
}
// SetProperties sets the properties property value. Property values of the test object.
func (m *ExpressionInputObject) SetProperties(value []StringKeyObjectValuePairable)() {
if m != nil {
m.properties = value
}
} | models/expression_input_object.go | 0.680348 | 0.420064 | expression_input_object.go | starcoder |
package fastly
import (
"fmt"
"sort"
)
// Snippet is the Fastly Snippet object
type Snippet struct {
// Priority determines the ordering for multiple snippets. Lower numbers execute first.
Priority int `mapstructure:"priority"`
// Dynamic sets the snippet version to regular (0) or dynamic (1).
Dynamic int `mapstructure:"dynamic"`
// SnippetName is the name for the snippet.
SnippetName string `mapstructure:"name"`
// Content is the VCL code that specifies exactly what the snippet does.
Content string `mapstructure:"content"`
// SnippetID is the snippet ID
SnippetID string `mapstructure:"id"`
// Type is the location in generated VCL where the snippet should be placed.
Type string `mapstructure:"type"`
// ServiceID is the ID of the Service to add the snippet to.
ServiceID string `mapstructure:"service_id"`
// Version is the editable version of the service.
Version int `mapstructure:"version"`
DeletedAt string `mapstructure:"deleted_at"`
CreatedAt string `mapstructure:"created_at"`
UpdatedAt string `mapstructure:"updated_at"`
}
// CreateSnippetInput is the input for CreateSnippet
type CreateSnippetInput struct {
// Priority determines the ordering for multiple snippets. Lower numbers execute first.
Priority int `form:"priority"`
// Version is the editable version of the service
Version int
// Dynamic sets the snippet version to regular (0) or dynamic (1).
Dynamic int `form:"dynamic"`
// SnippetName is the name for the snippet.
SnippetName string `form:"name"`
// Content is the VCL code that specifies exactly what the snippet does.
Content string `form:"content"`
// ServiceID is the ID of the Service to add the snippet to.
ServiceID string
// Type is the location in generated VCL where the snippet should be placed.
Type string `form:"type"`
}
// CreateSnippet creates a new snippet or dynamic snippet on a unlocked version
func (c *Client) CreateSnippet(i *CreateSnippetInput) (*Snippet, error) {
if i.ServiceID == "" {
return nil, ErrMissingServiceID
}
if i.Version == 0 {
return nil, ErrMissingVersion
}
if i.SnippetName == "" {
return nil, ErrMissingSnippetName
}
if i.Dynamic == 0 && i.Content == "" {
return nil, ErrMissingSnippetContent
}
if i.Type == "" {
return nil, ErrMissingSnippetType
}
path := fmt.Sprintf("/service/%s/version/%d/snippet", i.ServiceID, i.Version)
resp, err := c.PostForm(path, i, nil)
if err != nil {
return nil, err
}
var snippet *Snippet
if err := decodeJSON(&snippet, resp.Body); err != nil {
return nil, err
}
return snippet, err
}
// DynamicSnippet is the object returned when updating or retrieving a Dynamic Snippet
type DynamicSnippet struct {
// ServiceID is the ID of the Service to add the snippet to.
ServiceID string `mapstructure:"service_id"`
// SnippetID is the ID of the Snippet to modify
SnippetID string `mapstructure:"snippet_id"`
// Content is the VCL code that specifies exactly what the snippet does.
Content string `mapstructure:"content"`
CreatedAt string `mapstructure:"created_at"`
UpdatedAt string `mapstructure:"updated_at"`
}
// UpdateDynamicSnippetInput is the input for UpdateDynamicSnippet
type UpdateDynamicSnippetInput struct {
// ServiceID is the ID of the Service to add the snippet to.
ServiceID string
// SnippetID is the ID of the Snippet to modify
SnippetID string
// Content is the VCL code that specifies exactly what the snippet does.
Content string `form:"content"`
}
// UpdateDynamicSnippet replaces the content of a Dynamic Snippet
func (c *Client) UpdateDynamicSnippet(i *UpdateDynamicSnippetInput) (*DynamicSnippet, error) {
if i.ServiceID == "" {
return nil, ErrMissingService
}
if i.SnippetID == "" {
return nil, ErrMissingSnippetID
}
path := fmt.Sprintf("/service/%s/snippet/%s", i.ServiceID, i.SnippetID)
resp, err := c.PutForm(path, i, nil)
if err != nil {
return nil, err
}
var updateSnippet *DynamicSnippet
if err := decodeJSON(&updateSnippet, resp.Body); err != nil {
return nil, err
}
return updateSnippet, err
}
type DeleteSnippetInput struct {
// ServiceID is the ID of the Service to add the snippet to.
ServiceID string
// SnippetName is the Name of the Snippet to Delete
SnippetName string
// Version is the editable version of the service
Version int
}
func (c *Client) DeleteSnippet(i *DeleteSnippetInput) error {
if i.ServiceID == "" {
return ErrMissingService
}
if i.Version == 0 {
return ErrMissingVersion
}
if i.SnippetName == "" {
return ErrMissingSnippetName
}
path := fmt.Sprintf("/service/%s/version/%d/snippet/%s", i.ServiceID, i.Version, i.SnippetName)
resp, err := c.Delete(path, nil)
if err != nil {
return err
}
var r *statusResp
if err := decodeJSON(&r, resp.Body); err != nil {
return err
}
if !r.Ok() {
return fmt.Errorf("Not Ok")
}
return nil
}
// ListSnippetsInput is used as input to the ListSnippets function.
type ListSnippetsInput struct {
// ServiceID is the ID of the service (required).
ServiceID string
// Version is the specific configuration version (required).
Version int
}
// snippetsByName is a sortable list of Snippets.
type snippetsByName []*Snippet
// Len, Swap, and Less implement the sortable interface.
func (s snippetsByName) Len() int { return len(s) }
func (s snippetsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s snippetsByName) Less(i, j int) bool {
return s[i].SnippetName < s[j].SnippetName
}
// ListSnippets returns the list of Snippets for the configuration version. Content is not displayed for Dynmanic Snippets due to them being
// versionless, use the GetDynamicSnippet function to show current content.
func (c *Client) ListSnippets(i *ListSnippetsInput) ([]*Snippet, error) {
if i.ServiceID == "" {
return nil, ErrMissingServiceID
}
if i.Version == 0 {
return nil, ErrMissingVersion
}
path := fmt.Sprintf("/service/%s/version/%d/snippet", i.ServiceID, i.Version)
resp, err := c.Get(path, nil)
if err != nil {
return nil, err
}
var snippets []*Snippet
if err := decodeJSON(&snippets, resp.Body); err != nil {
return nil, err
}
sort.Stable(snippetsByName(snippets))
return snippets, nil
}
// GetSnippetInput is used as input to the GetSnippet function.
type GetSnippetInput struct {
// ServiceID is the ID of the service. Version is the specific configuration
// version. Both fields are required.
ServiceID string
Version int
// SnippetName is the name of the Snippet to fetch.
SnippetName string
}
// GetSnippet gets the Snippet configuration with the given parameters. Dynamic Snippets will not show content due to them
// being versionless, use GetDynamicSnippet to see content.
func (c *Client) GetSnippet(i *GetSnippetInput) (*Snippet, error) {
if i.ServiceID == "" {
return nil, ErrMissingServiceID
}
if i.Version == 0 {
return nil, ErrMissingVersion
}
if i.SnippetName == "" {
return nil, ErrMissingSnippetName
}
path := fmt.Sprintf("/service/%s/version/%d/snippet/%s", i.ServiceID, i.Version, i.SnippetName)
resp, err := c.Get(path, nil)
if err != nil {
return nil, err
}
var snippet *Snippet
if err := decodeJSON(&snippet, resp.Body); err != nil {
return nil, err
}
return snippet, nil
}
// GetDynamicSnippetInput is used as input to the GetDynamicSnippet function.
type GetDynamicSnippetInput struct {
// ServiceID is the ID of the service.
ServiceID string
// SnippetID is the ID of the Snippet to fetch.
SnippetID string
}
// GetDynamicSnippet gets the Snippet configuration with the given parameters. This will show the current content
// associated with a Dynamic Snippet.
func (c *Client) GetDynamicSnippet(i *GetDynamicSnippetInput) (*DynamicSnippet, error) {
if i.ServiceID == "" {
return nil, ErrMissingServiceID
}
if i.SnippetID == "" {
return nil, ErrMissingSnippetID
}
path := fmt.Sprintf("/service/%s/snippet/%s", i.ServiceID, i.SnippetID)
resp, err := c.Get(path, nil)
if err != nil {
return nil, err
}
var snippet *DynamicSnippet
if err := decodeJSON(&snippet, resp.Body); err != nil {
return nil, err
}
return snippet, nil
} | fastly/vcl_snippets.go | 0.728748 | 0.439567 | vcl_snippets.go | starcoder |
package eventstore
import (
"context"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/get-eventually/go-eventually"
"github.com/get-eventually/go-eventually/eventstore/stream"
"github.com/get-eventually/go-eventually/internal"
)
var (
firstInstance = stream.ID{
Type: "first-type",
Name: "my-instance",
}
secondInstance = stream.ID{
Type: "second-type",
Name: "my-instance",
}
expectedStreamAll = []Event{
{
Stream: firstInstance,
Version: 1,
SequenceNumber: 1,
Event: eventually.Event{Payload: internal.IntPayload(1)},
},
{
Stream: secondInstance,
Version: 1,
SequenceNumber: 2,
Event: eventually.Event{Payload: internal.IntPayload(1)},
},
{
Stream: firstInstance,
Version: 2,
SequenceNumber: 3,
Event: eventually.Event{Payload: internal.IntPayload(2)},
},
{
Stream: secondInstance,
Version: 2,
SequenceNumber: 4,
Event: eventually.Event{Payload: internal.IntPayload(2)},
},
{
Stream: firstInstance,
Version: 3,
SequenceNumber: 5,
Event: eventually.Event{Payload: internal.IntPayload(3)},
},
{
Stream: secondInstance,
Version: 3,
SequenceNumber: 6,
Event: eventually.Event{Payload: internal.IntPayload(3)},
},
}
expectedStreamFirstInstance = []Event{
{
Stream: firstInstance,
Version: 1,
SequenceNumber: 1,
Event: eventually.Event{Payload: internal.IntPayload(1)},
},
{
Stream: firstInstance,
Version: 2,
SequenceNumber: 3,
Event: eventually.Event{Payload: internal.IntPayload(2)},
},
{
Stream: firstInstance,
Version: 3,
SequenceNumber: 5,
Event: eventually.Event{Payload: internal.IntPayload(3)},
},
}
expectedStreamSecondInstance = []Event{
{
Stream: secondInstance,
Version: 1,
SequenceNumber: 2,
Event: eventually.Event{Payload: internal.IntPayload(1)},
},
{
Stream: secondInstance,
Version: 2,
SequenceNumber: 4,
Event: eventually.Event{Payload: internal.IntPayload(2)},
},
{
Stream: secondInstance,
Version: 3,
SequenceNumber: 6,
Event: eventually.Event{Payload: internal.IntPayload(3)},
},
}
)
// StoreSuite is a full testing suite for an eventstore.Store instance.
type StoreSuite struct {
suite.Suite
storeFactory func() Store
eventStore Store // NOTE: this instance is initialized in SetupTest.
}
// NewStoreSuite creates a new Event Store testing suite using the provided
// eventstore.Store type.
func NewStoreSuite(factory func() Store) *StoreSuite {
ss := new(StoreSuite)
ss.storeFactory = factory
return ss
}
// SetupTest creates a new, fresh Event Store instance for each test in the suite.
func (ss *StoreSuite) SetupTest() {
ss.eventStore = ss.storeFactory()
}
// TestStream tests all the eventstore.Streamer functions using the provided
// Event Store instance.
func (ss *StoreSuite) TestStream() {
t := ss.T()
ctx := context.Background()
// Make sure the Event Store is completely empty.
streamAll, err := StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.All{}, SelectFromBeginning)
})
assert.Empty(t, streamAll)
if !assert.NoError(t, err) {
return
}
// Append some events for the two test event stream types.
if err = ss.appendEvents(ctx); !assert.NoError(t, err) {
return
}
// Make sure the Event Store has recorded the events as expected.
streamAll, err = StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.All{}, SelectFromBeginning)
})
assert.NoError(t, err)
streamAllByTypes, err := StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByTypes{firstInstance.Type, secondInstance.Type}, SelectFromBeginning)
})
assert.NoError(t, err)
streamFirstType, err := StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByType(firstInstance.Type), SelectFromBeginning)
})
assert.NoError(t, err)
streamSecondType, err := StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByType(secondInstance.Type), SelectFromBeginning)
})
assert.NoError(t, err)
streamFirstInstance, err := StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByID(firstInstance), SelectFromBeginning)
})
assert.NoError(t, err)
streamSecondInstance, err := StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByID(secondInstance), SelectFromBeginning)
})
assert.NoError(t, err)
assert.Equal(t, expectedStreamAll, ss.skipMetadata(streamAll))
assert.Equal(t, expectedStreamAll, ss.skipMetadata(streamAllByTypes))
assert.Equal(t, expectedStreamFirstInstance, ss.skipMetadata(streamFirstType))
assert.Equal(t, expectedStreamFirstInstance, ss.skipMetadata(streamFirstInstance))
assert.Equal(t, expectedStreamSecondInstance, ss.skipMetadata(streamSecondType))
assert.Equal(t, expectedStreamSecondInstance, ss.skipMetadata(streamSecondInstance))
// Streaming with an out-of-bound Select will yield empty elements.
streamAll, err = StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.All{}, Select{From: 7})
})
assert.NoError(t, err)
streamAllByTypes, err = StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByTypes{firstInstance.Type, secondInstance.Type}, Select{From: 7})
})
assert.NoError(t, err)
streamFirstType, err = StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByType(firstInstance.Type), Select{From: 7})
})
assert.NoError(t, err)
streamSecondType, err = StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByType(secondInstance.Type), Select{From: 7})
})
assert.NoError(t, err)
streamFirstInstance, err = StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByID(firstInstance), Select{From: 4})
})
assert.NoError(t, err)
streamSecondInstance, err = StreamToSlice(ctx, func(ctx context.Context, es EventStream) error {
return ss.eventStore.Stream(ctx, es, stream.ByID(secondInstance), Select{From: 4})
})
assert.NoError(t, err)
assert.Empty(t, streamAll)
assert.Empty(t, streamAllByTypes)
assert.Empty(t, streamFirstType)
assert.Empty(t, streamSecondType)
assert.Empty(t, streamFirstInstance)
assert.Empty(t, streamSecondInstance)
}
func (ss *StoreSuite) appendEvents(ctx context.Context) error {
for i := 1; i < 4; i++ {
if _, err := ss.eventStore.Append(
ctx,
firstInstance,
VersionCheck(int64(i-1)),
eventually.Event{Payload: internal.IntPayload(i)},
); err != nil {
return fmt.Errorf("appendEvents: failed on first instance, event %d: %w", i, err)
}
if _, err := ss.eventStore.Append(
ctx,
secondInstance,
VersionCheck(int64(i-1)),
eventually.Event{Payload: internal.IntPayload(i)},
); err != nil {
return fmt.Errorf("appendEvents: failed on second instance, event %d: %w", i, err)
}
}
return nil
}
func (*StoreSuite) skipMetadata(events []Event) []Event {
mapped := make([]Event, 0, len(events))
for _, event := range events {
newEvent := event
newEvent.Metadata = nil
mapped = append(mapped, newEvent)
}
return mapped
} | eventstore/store_suite.go | 0.689096 | 0.451689 | store_suite.go | starcoder |
package numgo
import (
"fmt"
"math"
"runtime"
"sync"
"github.com/Kunde21/numgo/internal"
)
var nan float64
func init() {
nan = math.NaN()
}
// Add performs element-wise addition
// Arrays must be the same size or able to broadcast.
// This will modify the source array.
func (a *Array64) Add(b *Array64) *Array64 {
if a.valRith(b, "Add") {
return a
}
if b.shape[len(b.shape)-1] == a.shape[len(a.shape)-1] {
asm.Add(a.data, b.data)
return a
}
st := a.strides[len(a.strides)-1] * a.shape[len(a.shape)-1]
for i := 0; i < len(b.data); i++ {
asm.AddC(b.data[i], a.data[i*st:(i+1)*st])
}
return a
}
// AddC adds a constant to all elements of the array.
func (a *Array64) AddC(b float64) *Array64 {
if a.HasErr() {
return a
}
asm.AddC(b, a.data)
return a
}
// Subtr performs element-wise subtraction.
// Arrays must be the same size or albe to broadcast.
// This will modify the source array.
func (a *Array64) Subtr(b *Array64) *Array64 {
if a.valRith(b, "Subtr") {
return a
}
if b.shape[len(b.shape)-1] == a.shape[len(a.shape)-1] {
asm.Subtr(a.data, b.data)
return a
}
st := a.strides[len(a.strides)-1] * a.shape[len(a.shape)-1]
for i := 0; i < len(b.data); i++ {
asm.SubtrC(b.data[i], a.data[i*st:(i+1)*st])
}
return a
}
// SubtrC subtracts a constant from all elements of the array.
func (a *Array64) SubtrC(b float64) *Array64 {
if a.HasErr() {
return a
}
asm.SubtrC(b, a.data)
return a
}
// Mult performs element-wise multiplication.
// Arrays must be the same size or able to broadcast.
// This will modify the source array.
func (a *Array64) Mult(b *Array64) *Array64 {
if a.valRith(b, "Mult") {
return a
}
if b.shape[len(b.shape)-1] == a.shape[len(a.shape)-1] {
asm.Mult(a.data, b.data)
return a
}
st := a.strides[len(a.strides)-1] * a.shape[len(a.shape)-1]
for i := 0; i < len(b.data); i++ {
asm.MultC(b.data[i], a.data[i*st:(i+1)*st])
}
return a
}
// MultC multiplies all elements of the array by a constant.
func (a *Array64) MultC(b float64) *Array64 {
if a.HasErr() {
return a
}
asm.MultC(b, a.data)
return a
}
// Div performs element-wise division
// Arrays must be the same size or able to broadcast.
// Division by zero conforms to IEEE 754
// 0/0 = NaN, +x/0 = +Inf, -x/0 = -Inf
// This will modify the source array.
func (a *Array64) Div(b *Array64) *Array64 {
if a.valRith(b, "Div") {
return a
}
if b.shape[len(b.shape)-1] == a.shape[len(a.shape)-1] {
asm.Div(a.data, b.data)
return a
}
st := a.strides[len(a.strides)-1] * a.shape[len(a.shape)-1]
for i := 0; i < len(b.data); i++ {
asm.DivC(b.data[i], a.data[i*st:(i+1)*st])
}
return a
}
// DivC divides all elements of the array by a constant.
// Division by zero conforms to IEEE 754
// 0/0 = NaN, +x/0 = +Inf, -x/0 = -Inf
func (a *Array64) DivC(b float64) *Array64 {
switch {
case a.HasErr():
return a
}
asm.DivC(b, a.data)
return a
}
// Pow raises elements of a to the corresponding power in b.
// Arrays must be the same size or able to broadcast.
// This will modify the source array.
func (a *Array64) Pow(b *Array64) *Array64 {
if a.valRith(b, "Pow") {
return a
}
if b.shape[len(b.shape)-1] == a.shape[len(a.shape)-1] {
lna, lnb := len(a.data), len(b.data)
for i, j := 0, 0; i < lna; i, j = i+1, j+1 {
if j >= lnb {
j = 0
}
a.data[i] = math.Pow(a.data[i], b.data[j])
}
return a
}
st := a.strides[len(a.strides)-1] * a.shape[len(a.shape)-1]
for i := 0; i < len(b.data); i++ {
for j := i * st; j < (i+1)*st; j++ {
a.data[j] = math.Pow(a.data[j], b.data[i])
}
}
return a
}
// PowC raises all elements to a constant power.
// Negative powers will result in a math.NaN() values.
func (a *Array64) PowC(b float64) *Array64 {
if a.HasErr() {
return a
}
for i := 0; i < len(a.data); i++ {
a.data[i] = math.Pow(a.data[i], b)
}
return a
}
// FMA12 is the fuse multiply add functionality.
// Array x will contain a[i] = x*a[i]+b[i]
func (a *Array64) FMA12(x float64, b *Array64) *Array64 {
if a.valRith(b, "FMA") {
return a
}
if b.strides[0] != a.strides[0] {
cmp, mul := new(sync.WaitGroup), len(a.data)/len(b.data)
cmp.Add(mul)
for k := 0; k < mul; k++ {
go func(m int) {
asm.Fma12(x, a.data[m:m+len(b.data)], b.data)
cmp.Done()
}(k * len(b.data))
}
cmp.Wait()
return a
}
asm.Fma12(x, a.data, b.data)
return a
}
// FMA21 is the fuse multiply add functionality.
// Array x will contain a[i] = a[i]*b[i]+x
func (a *Array64) FMA21(x float64, b *Array64) *Array64 {
if a.valRith(b, "FMA") {
return a
}
if b.strides[0] != a.strides[0] {
cmp, mul := new(sync.WaitGroup), len(a.data)/len(b.data)
cmp.Add(mul)
for k := 0; k < mul; k++ {
go func(m int) {
asm.Fma21(x, a.data[m:m+len(b.data)], b.data)
cmp.Done()
}(k * len(b.data))
}
cmp.Wait()
return a
}
asm.Fma21(x, a.data, b.data)
return a
}
// valAr needs to be called before
func (a *Array64) valRith(b *Array64, mthd string) bool {
var flag bool
switch {
case a.HasErr():
return true
case b == nil:
a.err = NilError
if debug {
a.debug = "Array received by " + mthd + "() is a Nil pointer."
a.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return true
case b.HasErr():
a.err = b.err
if debug {
a.debug = "Array received by " + mthd + "() is in error."
a.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return true
case len(a.shape) < len(b.shape):
goto shape
}
for i, j := len(b.shape)-1, len(a.shape)-1; i >= 0; i, j = i-1, j-1 {
if a.shape[j] != b.shape[i] {
flag = true
break
}
}
if !flag {
return false
}
if len(b.shape) != len(a.shape) || b.shape[len(b.shape)-1] != 1 {
goto shape
}
for i := 0; i < len(a.shape)-1; i++ {
if a.shape[i] != b.shape[i] {
goto shape
}
}
return false
shape:
a.err = ShapeError
if debug {
a.debug = fmt.Sprintf("Array received by %s() can not be broadcast. Shape: %v Val shape: %v",
mthd, a.shape, b.shape)
a.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return true
} | arithmetic.go | 0.723798 | 0.453383 | arithmetic.go | starcoder |
package ckks
import (
"github.com/ldsec/lattigo/ring"
"math"
"math/big"
)
// Encoder is an interface implenting the encoding algorithms.
type Encoder interface {
Encode(plaintext *Plaintext, values []complex128, slots uint64)
EncodeNew(values []complex128, slots uint64) (plaintext *Plaintext)
Decode(plaintext *Plaintext, slots uint64) (res []complex128)
}
// encoder is a struct storing the necessary parameters to encode a slice of complex number on a Plaintext.
type encoder struct {
params *Parameters
ckksContext *Context
values []complex128
valuesfloat []float64
bigintCoeffs []*big.Int
qHalf *big.Int
polypool *ring.Poly
m uint64
roots []complex128
rotGroup []uint64
}
// NewEncoder creates a new Encoder that is used to encode a slice of complex values of size at most N/2 (the number of slots) on a Plaintext.
func NewEncoder(params *Parameters) Encoder {
if !params.isValid {
panic("cannot newEncoder: parameters are invalid (check if the generation was done properly)")
}
m := uint64(2 << params.LogN)
rotGroup := make([]uint64, m>>1)
fivePows := uint64(1)
for i := uint64(0); i < m>>2; i++ {
rotGroup[i] = fivePows
fivePows *= GaloisGen
fivePows &= (m - 1)
}
var angle float64
roots := make([]complex128, m+1)
for i := uint64(0); i < m; i++ {
angle = 2 * 3.141592653589793 * float64(i) / float64(m)
roots[i] = complex(math.Cos(angle), math.Sin(angle))
}
roots[m] = roots[0]
ckksContext := newContext(params)
return &encoder{
params: params.Copy(),
ckksContext: ckksContext,
values: make([]complex128, m>>2),
valuesfloat: make([]float64, m>>1),
bigintCoeffs: make([]*big.Int, m>>1),
qHalf: ring.NewUint(0),
polypool: ckksContext.contextQ.NewPoly(),
m: m,
rotGroup: rotGroup,
roots: roots,
}
}
func (encoder *encoder) EncodeNew(values []complex128, slots uint64) (plaintext *Plaintext) {
plaintext = NewPlaintext(encoder.params, encoder.params.MaxLevel(), encoder.params.Scale)
encoder.Encode(plaintext, values, slots)
return
}
// Encode takes a slice of complex128 values of size at most N/2 (the number of slots) and encodes it in the receiver Plaintext.
func (encoder *encoder) Encode(plaintext *Plaintext, values []complex128, slots uint64) {
if uint64(len(values)) > encoder.ckksContext.maxSlots || uint64(len(values)) > slots {
panic("cannot Encode: too many values for the given number of slots")
}
if slots == 0 && slots&(slots-1) == 0 {
panic("cannot Encode: slots must be a power of two between 1 and N/2")
}
if uint64(len(values)) != slots {
panic("cannot Encode: number of values must be equal to slots")
}
for i := uint64(0); i < slots; i++ {
encoder.values[i] = values[i]
}
encoder.invfft(encoder.values, slots)
gap := encoder.ckksContext.maxSlots / slots
for i, jdx, idx := uint64(0), encoder.ckksContext.maxSlots, uint64(0); i < slots; i, jdx, idx = i+1, jdx+gap, idx+gap {
encoder.valuesfloat[idx] = real(encoder.values[i])
encoder.valuesfloat[jdx] = imag(encoder.values[i])
}
scaleUpVecExact(encoder.valuesfloat, plaintext.scale, encoder.ckksContext.contextQ.Modulus[:plaintext.Level()+1], plaintext.value.Coeffs)
encoder.ckksContext.contextQ.NTTLvl(plaintext.Level(), plaintext.value, plaintext.value)
for i := uint64(0); i < encoder.ckksContext.maxSlots; i++ {
encoder.values[i] = 0
}
for i := uint64(0); i < encoder.ckksContext.n; i++ {
encoder.valuesfloat[i] = 0
}
}
// Decode decodes the Plaintext values to a slice of complex128 values of size at most N/2.
func (encoder *encoder) Decode(plaintext *Plaintext, slots uint64) (res []complex128) {
encoder.ckksContext.contextQ.InvNTTLvl(plaintext.Level(), plaintext.value, encoder.polypool)
encoder.ckksContext.contextQ.PolyToBigint(encoder.polypool, encoder.bigintCoeffs)
Q := encoder.ckksContext.bigintChain[plaintext.Level()]
maxSlots := encoder.ckksContext.maxSlots
encoder.qHalf.Set(Q)
encoder.qHalf.Rsh(encoder.qHalf, 1)
gap := encoder.ckksContext.maxSlots / slots
var sign int
for i, idx := uint64(0), uint64(0); i < slots; i, idx = i+1, idx+gap {
// Centers the value around the current modulus
encoder.bigintCoeffs[idx].Mod(encoder.bigintCoeffs[idx], Q)
sign = encoder.bigintCoeffs[idx].Cmp(encoder.qHalf)
if sign == 1 || sign == 0 {
encoder.bigintCoeffs[idx].Sub(encoder.bigintCoeffs[idx], Q)
}
// Centers the value around the current modulus
encoder.bigintCoeffs[idx+maxSlots].Mod(encoder.bigintCoeffs[idx+maxSlots], Q)
sign = encoder.bigintCoeffs[idx+maxSlots].Cmp(encoder.qHalf)
if sign == 1 || sign == 0 {
encoder.bigintCoeffs[idx+maxSlots].Sub(encoder.bigintCoeffs[idx+maxSlots], Q)
}
encoder.values[i] = complex(scaleDown(encoder.bigintCoeffs[idx], plaintext.scale), scaleDown(encoder.bigintCoeffs[idx+maxSlots], plaintext.scale))
}
encoder.fft(encoder.values, slots)
res = make([]complex128, slots)
for i := range res {
res[i] = encoder.values[i]
}
for i := uint64(0); i < encoder.ckksContext.maxSlots; i++ {
encoder.values[i] = 0
}
return
}
func (encoder *encoder) invfftlazy(values []complex128, N uint64) {
var lenh, lenq, gap, idx uint64
var u, v complex128
for len := N; len >= 1; len >>= 1 {
for i := uint64(0); i < N; i += len {
lenh = len >> 1
lenq = len << 2
gap = encoder.m / lenq
for j := uint64(0); j < lenh; j++ {
idx = (lenq - (encoder.rotGroup[j] % lenq)) * gap
u = values[i+j] + values[i+j+lenh]
v = values[i+j] - values[i+j+lenh]
v *= encoder.roots[idx]
values[i+j] = u
values[i+j+lenh] = v
}
}
}
sliceBitReverseInPlaceComplex128(values, N)
}
func (encoder *encoder) invfft(values []complex128, N uint64) {
encoder.invfftlazy(values, N)
for i := uint64(0); i < N; i++ {
values[i] /= complex(float64(N), 0)
}
}
func (encoder *encoder) fft(values []complex128, N uint64) {
var lenh, lenq, gap, idx uint64
var u, v complex128
sliceBitReverseInPlaceComplex128(values, N)
for len := uint64(2); len <= N; len <<= 1 {
for i := uint64(0); i < N; i += len {
lenh = len >> 1
lenq = len << 2
gap = encoder.m / lenq
for j := uint64(0); j < lenh; j++ {
idx = (encoder.rotGroup[j] % lenq) * gap
u = values[i+j]
v = values[i+j+lenh]
v *= encoder.roots[idx]
values[i+j] = u + v
values[i+j+lenh] = u - v
}
}
}
} | ckks/encoder.go | 0.711932 | 0.459258 | encoder.go | starcoder |
package pipelines
import (
"encoding/json"
"time"
)
// PipelineStage A pipeline stage definition.
type PipelineStage struct {
// The date the pipeline stage was created. The stages on default pipelines will have createdAt = 0.
CreatedAt time.Time `json:"createdAt"`
// The date the pipeline was archived. `archivedAt` will only be present if the pipeline is archived.
ArchivedAt *time.Time `json:"archivedAt,omitempty"`
// The date the pipeline stage was last updated.
UpdatedAt time.Time `json:"updatedAt"`
// Whether the pipeline is archived.
Archived bool `json:"archived"`
// A label used to organize pipeline stages in HubSpot's UI. Each pipeline stage's label must be unique within that pipeline.
Label string `json:"label"`
// The order for displaying this pipeline stage. If two pipeline stages have a matching `displayOrder`, they will be sorted alphabetically by label.
DisplayOrder int32 `json:"displayOrder"`
// A JSON object containing properties that are not present on all object pipelines. For `deals` pipelines, the `probability` field is required (`{ \"probability\": 0.5 }`), and represents the likelihood a deal will close. Possible values are between 0.0 and 1.0 in increments of 0.1. For `tickets` pipelines, the `ticketState` field is optional (`{ \"ticketState\": \"OPEN\" }`), and represents whether the ticket remains open or has been closed by a member of your Support team. Possible values are `OPEN` or `CLOSED`.
Metadata map[string]string `json:"metadata"`
// A unique identifier generated by HubSpot that can be used to retrieve and update the pipeline stage.
Id string `json:"id"`
}
// NewPipelineStage instantiates a new PipelineStage object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewPipelineStage(createdAt time.Time, updatedAt time.Time, archived bool, label string, displayOrder int32, metadata map[string]string, id string) *PipelineStage {
this := PipelineStage{}
this.CreatedAt = createdAt
this.UpdatedAt = updatedAt
this.Archived = archived
this.Label = label
this.DisplayOrder = displayOrder
this.Metadata = metadata
this.Id = id
return &this
}
// NewPipelineStageWithDefaults instantiates a new PipelineStage object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPipelineStageWithDefaults() *PipelineStage {
this := PipelineStage{}
return &this
}
// GetCreatedAt returns the CreatedAt field value
func (o *PipelineStage) GetCreatedAt() time.Time {
if o == nil {
var ret time.Time
return ret
}
return o.CreatedAt
}
// GetCreatedAtOk returns a tuple with the CreatedAt field value
// and a boolean to check if the value has been set.
func (o *PipelineStage) GetCreatedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return &o.CreatedAt, true
}
// SetCreatedAt sets field value
func (o *PipelineStage) SetCreatedAt(v time.Time) {
o.CreatedAt = v
}
// GetArchivedAt returns the ArchivedAt field value if set, zero value otherwise.
func (o *PipelineStage) GetArchivedAt() time.Time {
if o == nil || o.ArchivedAt == nil {
var ret time.Time
return ret
}
return *o.ArchivedAt
}
// GetArchivedAtOk returns a tuple with the ArchivedAt field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *PipelineStage) GetArchivedAtOk() (*time.Time, bool) {
if o == nil || o.ArchivedAt == nil {
return nil, false
}
return o.ArchivedAt, true
}
// HasArchivedAt returns a boolean if a field has been set.
func (o *PipelineStage) HasArchivedAt() bool {
if o != nil && o.ArchivedAt != nil {
return true
}
return false
}
// SetArchivedAt gets a reference to the given time.Time and assigns it to the ArchivedAt field.
func (o *PipelineStage) SetArchivedAt(v time.Time) {
o.ArchivedAt = &v
}
// GetUpdatedAt returns the UpdatedAt field value
func (o *PipelineStage) GetUpdatedAt() time.Time {
if o == nil {
var ret time.Time
return ret
}
return o.UpdatedAt
}
// GetUpdatedAtOk returns a tuple with the UpdatedAt field value
// and a boolean to check if the value has been set.
func (o *PipelineStage) GetUpdatedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return &o.UpdatedAt, true
}
// SetUpdatedAt sets field value
func (o *PipelineStage) SetUpdatedAt(v time.Time) {
o.UpdatedAt = v
}
// GetArchived returns the Archived field value
func (o *PipelineStage) GetArchived() bool {
if o == nil {
var ret bool
return ret
}
return o.Archived
}
// GetArchivedOk returns a tuple with the Archived field value
// and a boolean to check if the value has been set.
func (o *PipelineStage) GetArchivedOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.Archived, true
}
// SetArchived sets field value
func (o *PipelineStage) SetArchived(v bool) {
o.Archived = v
}
// GetLabel returns the Label field value
func (o *PipelineStage) GetLabel() string {
if o == nil {
var ret string
return ret
}
return o.Label
}
// GetLabelOk returns a tuple with the Label field value
// and a boolean to check if the value has been set.
func (o *PipelineStage) GetLabelOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Label, true
}
// SetLabel sets field value
func (o *PipelineStage) SetLabel(v string) {
o.Label = v
}
// GetDisplayOrder returns the DisplayOrder field value
func (o *PipelineStage) GetDisplayOrder() int32 {
if o == nil {
var ret int32
return ret
}
return o.DisplayOrder
}
// GetDisplayOrderOk returns a tuple with the DisplayOrder field value
// and a boolean to check if the value has been set.
func (o *PipelineStage) GetDisplayOrderOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.DisplayOrder, true
}
// SetDisplayOrder sets field value
func (o *PipelineStage) SetDisplayOrder(v int32) {
o.DisplayOrder = v
}
// GetMetadata returns the Metadata field value
func (o *PipelineStage) GetMetadata() map[string]string {
if o == nil {
var ret map[string]string
return ret
}
return o.Metadata
}
// GetMetadataOk returns a tuple with the Metadata field value
// and a boolean to check if the value has been set.
func (o *PipelineStage) GetMetadataOk() (*map[string]string, bool) {
if o == nil {
return nil, false
}
return &o.Metadata, true
}
// SetMetadata sets field value
func (o *PipelineStage) SetMetadata(v map[string]string) {
o.Metadata = v
}
// GetId returns the Id field value
func (o *PipelineStage) GetId() string {
if o == nil {
var ret string
return ret
}
return o.Id
}
// GetIdOk returns a tuple with the Id field value
// and a boolean to check if the value has been set.
func (o *PipelineStage) GetIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Id, true
}
// SetId sets field value
func (o *PipelineStage) SetId(v string) {
o.Id = v
}
func (o PipelineStage) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["createdAt"] = o.CreatedAt
}
if o.ArchivedAt != nil {
toSerialize["archivedAt"] = o.ArchivedAt
}
if true {
toSerialize["updatedAt"] = o.UpdatedAt
}
if true {
toSerialize["archived"] = o.Archived
}
if true {
toSerialize["label"] = o.Label
}
if true {
toSerialize["displayOrder"] = o.DisplayOrder
}
if true {
toSerialize["metadata"] = o.Metadata
}
if true {
toSerialize["id"] = o.Id
}
return json.Marshal(toSerialize)
}
type NullablePipelineStage struct {
value *PipelineStage
isSet bool
}
func (v NullablePipelineStage) Get() *PipelineStage {
return v.value
}
func (v *NullablePipelineStage) Set(val *PipelineStage) {
v.value = val
v.isSet = true
}
func (v NullablePipelineStage) IsSet() bool {
return v.isSet
}
func (v *NullablePipelineStage) Unset() {
v.value = nil
v.isSet = false
}
func NewNullablePipelineStage(val *PipelineStage) *NullablePipelineStage {
return &NullablePipelineStage{value: val, isSet: true}
}
func (v NullablePipelineStage) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullablePipelineStage) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | generated/pipelines/model_pipeline_stage.go | 0.867289 | 0.535645 | model_pipeline_stage.go | starcoder |
package client
import (
"encoding/json"
)
// MonthlyScheduleSettings struct for MonthlyScheduleSettings
type MonthlyScheduleSettings struct {
TimeLocal Time `json:"timeLocal"`
DayNumberInMonth DayNumbersInMonth `json:"dayNumberInMonth"`
DayOfWeek *DaysOfWeek `json:"dayOfWeek,omitempty"`
DayOfMonth *int32 `json:"dayOfMonth,omitempty"`
SnapshotOptions MonthlySnapshotScheduleSettings `json:"snapshotOptions"`
BackupOptions *MonthlyBackupScheduleSettings `json:"backupOptions,omitempty"`
ReplicaOptions *MonthlyReplicaScheduleSettings `json:"replicaOptions,omitempty"`
}
// NewMonthlyScheduleSettings instantiates a new MonthlyScheduleSettings object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewMonthlyScheduleSettings(timeLocal Time, dayNumberInMonth DayNumbersInMonth, snapshotOptions MonthlySnapshotScheduleSettings) *MonthlyScheduleSettings {
this := MonthlyScheduleSettings{}
this.TimeLocal = timeLocal
this.DayNumberInMonth = dayNumberInMonth
this.SnapshotOptions = snapshotOptions
return &this
}
// NewMonthlyScheduleSettingsWithDefaults instantiates a new MonthlyScheduleSettings object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewMonthlyScheduleSettingsWithDefaults() *MonthlyScheduleSettings {
this := MonthlyScheduleSettings{}
return &this
}
// GetTimeLocal returns the TimeLocal field value
func (o *MonthlyScheduleSettings) GetTimeLocal() Time {
if o == nil {
var ret Time
return ret
}
return o.TimeLocal
}
// GetTimeLocalOk returns a tuple with the TimeLocal field value
// and a boolean to check if the value has been set.
func (o *MonthlyScheduleSettings) GetTimeLocalOk() (*Time, bool) {
if o == nil {
return nil, false
}
return &o.TimeLocal, true
}
// SetTimeLocal sets field value
func (o *MonthlyScheduleSettings) SetTimeLocal(v Time) {
o.TimeLocal = v
}
// GetDayNumberInMonth returns the DayNumberInMonth field value
func (o *MonthlyScheduleSettings) GetDayNumberInMonth() DayNumbersInMonth {
if o == nil {
var ret DayNumbersInMonth
return ret
}
return o.DayNumberInMonth
}
// GetDayNumberInMonthOk returns a tuple with the DayNumberInMonth field value
// and a boolean to check if the value has been set.
func (o *MonthlyScheduleSettings) GetDayNumberInMonthOk() (*DayNumbersInMonth, bool) {
if o == nil {
return nil, false
}
return &o.DayNumberInMonth, true
}
// SetDayNumberInMonth sets field value
func (o *MonthlyScheduleSettings) SetDayNumberInMonth(v DayNumbersInMonth) {
o.DayNumberInMonth = v
}
// GetDayOfWeek returns the DayOfWeek field value if set, zero value otherwise.
func (o *MonthlyScheduleSettings) GetDayOfWeek() DaysOfWeek {
if o == nil || o.DayOfWeek == nil {
var ret DaysOfWeek
return ret
}
return *o.DayOfWeek
}
// GetDayOfWeekOk returns a tuple with the DayOfWeek field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *MonthlyScheduleSettings) GetDayOfWeekOk() (*DaysOfWeek, bool) {
if o == nil || o.DayOfWeek == nil {
return nil, false
}
return o.DayOfWeek, true
}
// HasDayOfWeek returns a boolean if a field has been set.
func (o *MonthlyScheduleSettings) HasDayOfWeek() bool {
if o != nil && o.DayOfWeek != nil {
return true
}
return false
}
// SetDayOfWeek gets a reference to the given DaysOfWeek and assigns it to the DayOfWeek field.
func (o *MonthlyScheduleSettings) SetDayOfWeek(v DaysOfWeek) {
o.DayOfWeek = &v
}
// GetDayOfMonth returns the DayOfMonth field value if set, zero value otherwise.
func (o *MonthlyScheduleSettings) GetDayOfMonth() int32 {
if o == nil || o.DayOfMonth == nil {
var ret int32
return ret
}
return *o.DayOfMonth
}
// GetDayOfMonthOk returns a tuple with the DayOfMonth field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *MonthlyScheduleSettings) GetDayOfMonthOk() (*int32, bool) {
if o == nil || o.DayOfMonth == nil {
return nil, false
}
return o.DayOfMonth, true
}
// HasDayOfMonth returns a boolean if a field has been set.
func (o *MonthlyScheduleSettings) HasDayOfMonth() bool {
if o != nil && o.DayOfMonth != nil {
return true
}
return false
}
// SetDayOfMonth gets a reference to the given int32 and assigns it to the DayOfMonth field.
func (o *MonthlyScheduleSettings) SetDayOfMonth(v int32) {
o.DayOfMonth = &v
}
// GetSnapshotOptions returns the SnapshotOptions field value
func (o *MonthlyScheduleSettings) GetSnapshotOptions() MonthlySnapshotScheduleSettings {
if o == nil {
var ret MonthlySnapshotScheduleSettings
return ret
}
return o.SnapshotOptions
}
// GetSnapshotOptionsOk returns a tuple with the SnapshotOptions field value
// and a boolean to check if the value has been set.
func (o *MonthlyScheduleSettings) GetSnapshotOptionsOk() (*MonthlySnapshotScheduleSettings, bool) {
if o == nil {
return nil, false
}
return &o.SnapshotOptions, true
}
// SetSnapshotOptions sets field value
func (o *MonthlyScheduleSettings) SetSnapshotOptions(v MonthlySnapshotScheduleSettings) {
o.SnapshotOptions = v
}
// GetBackupOptions returns the BackupOptions field value if set, zero value otherwise.
func (o *MonthlyScheduleSettings) GetBackupOptions() MonthlyBackupScheduleSettings {
if o == nil || o.BackupOptions == nil {
var ret MonthlyBackupScheduleSettings
return ret
}
return *o.BackupOptions
}
// GetBackupOptionsOk returns a tuple with the BackupOptions field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *MonthlyScheduleSettings) GetBackupOptionsOk() (*MonthlyBackupScheduleSettings, bool) {
if o == nil || o.BackupOptions == nil {
return nil, false
}
return o.BackupOptions, true
}
// HasBackupOptions returns a boolean if a field has been set.
func (o *MonthlyScheduleSettings) HasBackupOptions() bool {
if o != nil && o.BackupOptions != nil {
return true
}
return false
}
// SetBackupOptions gets a reference to the given MonthlyBackupScheduleSettings and assigns it to the BackupOptions field.
func (o *MonthlyScheduleSettings) SetBackupOptions(v MonthlyBackupScheduleSettings) {
o.BackupOptions = &v
}
// GetReplicaOptions returns the ReplicaOptions field value if set, zero value otherwise.
func (o *MonthlyScheduleSettings) GetReplicaOptions() MonthlyReplicaScheduleSettings {
if o == nil || o.ReplicaOptions == nil {
var ret MonthlyReplicaScheduleSettings
return ret
}
return *o.ReplicaOptions
}
// GetReplicaOptionsOk returns a tuple with the ReplicaOptions field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *MonthlyScheduleSettings) GetReplicaOptionsOk() (*MonthlyReplicaScheduleSettings, bool) {
if o == nil || o.ReplicaOptions == nil {
return nil, false
}
return o.ReplicaOptions, true
}
// HasReplicaOptions returns a boolean if a field has been set.
func (o *MonthlyScheduleSettings) HasReplicaOptions() bool {
if o != nil && o.ReplicaOptions != nil {
return true
}
return false
}
// SetReplicaOptions gets a reference to the given MonthlyReplicaScheduleSettings and assigns it to the ReplicaOptions field.
func (o *MonthlyScheduleSettings) SetReplicaOptions(v MonthlyReplicaScheduleSettings) {
o.ReplicaOptions = &v
}
func (o MonthlyScheduleSettings) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["timeLocal"] = o.TimeLocal
}
if true {
toSerialize["dayNumberInMonth"] = o.DayNumberInMonth
}
if o.DayOfWeek != nil {
toSerialize["dayOfWeek"] = o.DayOfWeek
}
if o.DayOfMonth != nil {
toSerialize["dayOfMonth"] = o.DayOfMonth
}
if true {
toSerialize["snapshotOptions"] = o.SnapshotOptions
}
if o.BackupOptions != nil {
toSerialize["backupOptions"] = o.BackupOptions
}
if o.ReplicaOptions != nil {
toSerialize["replicaOptions"] = o.ReplicaOptions
}
return json.Marshal(toSerialize)
}
type NullableMonthlyScheduleSettings struct {
value *MonthlyScheduleSettings
isSet bool
}
func (v NullableMonthlyScheduleSettings) Get() *MonthlyScheduleSettings {
return v.value
}
func (v *NullableMonthlyScheduleSettings) Set(val *MonthlyScheduleSettings) {
v.value = val
v.isSet = true
}
func (v NullableMonthlyScheduleSettings) IsSet() bool {
return v.isSet
}
func (v *NullableMonthlyScheduleSettings) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableMonthlyScheduleSettings(val *MonthlyScheduleSettings) *NullableMonthlyScheduleSettings {
return &NullableMonthlyScheduleSettings{value: val, isSet: true}
}
func (v NullableMonthlyScheduleSettings) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableMonthlyScheduleSettings) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | client/model_monthly_schedule_settings.go | 0.787482 | 0.470676 | model_monthly_schedule_settings.go | starcoder |
package alt
// #include "Module.h"
import "C"
import "math"
const (
DefaultDimension int32 = 0
MaxDimension int32 = math.MaxInt32
MinDimension int32 = math.MinInt32
)
type WorldObject struct {
BaseObject
}
type World interface {
Position() *Vector3
SetPosition()
Dimension() int32
SetDimension()
}
func (w WorldObject) Position() Vector3 {
var pos Vector3
if w.Type == PlayerObject {
cPos := C.player_get_position(w.Ptr)
pos = Vector3{X: float32(cPos.x), Y: float32(cPos.y), Z: float32(cPos.z)}
} else if w.Type == CheckpointObject {
cPos := C.checkpoint_get_position(w.Ptr)
pos = Vector3{X: float32(cPos.x), Y: float32(cPos.y), Z: float32(cPos.z)}
} else if w.Type == ColshapeObject {
cPos := C.col_shape_get_position(w.Ptr)
pos = Vector3{X: float32(cPos.x), Y: float32(cPos.y), Z: float32(cPos.z)}
} else if w.Type == VehicleObject {
cPos := C.vehicle_get_position(w.Ptr)
pos = Vector3{X: float32(cPos.x), Y: float32(cPos.y), Z: float32(cPos.z)}
}
return pos
}
func (w WorldObject) SetPosition(pos Vector3) {
if w.Type == PlayerObject {
C.player_set_position(w.Ptr, C.float(pos.X), C.float(pos.Y), C.float(pos.Z))
} else if w.Type == CheckpointObject {
C.checkpoint_set_position(w.Ptr, C.float(pos.X), C.float(pos.Y), C.float(pos.Z))
} else if w.Type == ColshapeObject {
C.col_shape_set_position(w.Ptr, C.float(pos.X), C.float(pos.Y), C.float(pos.Z))
} else if w.Type == VehicleObject {
C.vehicle_set_position(w.Ptr, C.float(pos.X), C.float(pos.Y), C.float(pos.Z))
}
}
func (w WorldObject) Dimension() int32 {
var dimension int32
if w.Type == PlayerObject {
dimension = int32(C.player_get_dimension(w.Ptr))
} else if w.Type == CheckpointObject {
dimension = int32(C.checkpoint_get_dimension(w.Ptr))
} else if w.Type == ColshapeObject {
dimension = int32(C.col_shape_get_dimension(w.Ptr))
} else if w.Type == VehicleObject {
dimension = int32(C.vehicle_get_dimension(w.Ptr))
}
return dimension
}
func (w WorldObject) SetDimension(dimension int32) {
if w.Type == PlayerObject {
C.player_set_dimension(w.Ptr, C.long(dimension))
} else if w.Type == CheckpointObject {
C.checkpoint_set_dimension(w.Ptr, C.long(dimension))
} else if w.Type == ColshapeObject {
C.col_shape_set_dimension(w.Ptr, C.long(dimension))
} else if w.Type == VehicleObject {
C.vehicle_set_dimension(w.Ptr, C.long(dimension))
}
} | alt/world_object.go | 0.517571 | 0.560373 | world_object.go | starcoder |
Package airac provides calculations on Aeronautical Information Regulation And
Control (AIRAC) cycles, i.e. cycle identifiers and effective calendar dates.
Regular, planned Aeronautical Information Publications (AIP) as defined by the
International Civil Aviation Organization (ICAO) are published and become
effective at fixed dates. This package implements the AIRAC cycle definition as
published in the ICAO Aeronautical Information Services Manual (DOC 8126;
AN/872; 6th Edition; 2003). Test cases validate documented dates from 1998 until
2020, including the rare case of a 14th cycle in the year 2020.
Licensed under the Apache License, Version 2.0.
*/
package airac
// nolint:godox
/* BUG(jwkohnen): The two digit year identifier of the FromString method will
interpret the year as between 1964 and 2063. Other methods than FromString do
not show this range restriction. This time window is more or less arbitrary and
may change. */
// nolint:godox
/* BUG(jwkohnen): This package assumes that AIRAC cycles are effective from
the effective date at 00:00:00 UTC until 27 days later at 23:59:59.999999999
UTC. That is not correct:
ICAO DOC 8126, 6th Edition (2003), paragraph 2.6.4:
"In addition to the use of a predetermined schedule of effective AIRAC dates,
Coordinated Universal Time (UTC) must also be used to indicate the time when
the AIRAC information will become effective. Since Annex 15, paragraph 3.2.3
specifies that the Gregorian calendar and UTC must be used as the temporal
reference system for international civil aviation, in addition to AIRAC
dates, 00:01 UTC must be used to indicate the time when the AIRAC-based
information will become effective."
However I won't "fix" this, because that may just confuse users. */
// nolint:godox
/* BUG(jwkohnen): Calculations that include calendar dates before the internal
epoch (1901-01-10; 63 years before the AIRAC system was introduced by the ICAO)
and after year 2192 may silently produce wrong data. */
// nolint:godox
/* BUG(jwkohnen): This package only provides calculations on effective dates,
not publication or reception dates etc. Although effective dates are clearly
defined and are consistent at least between 1998 until 2020, the derivative
dates changed historically.[citation needed] */ | doc.go | 0.711431 | 0.724974 | doc.go | starcoder |
package qif
import "github.com/pkg/errors"
// A BankingTransaction contains the information associated with non-investment
// transactions (i.e. Cash, Bank and CCard account types).
type BankingTransaction interface {
Transaction
// Num contains the check or reference number for the transaction. Wikipedia
// suggests this may also contain "Deposit", "Transfer", "Print", "ATM", or
// "EFT".
Num() string
// Payee describes the recipient of the transaction.
Payee() string
// Address contains no more than five address lines for the payee. Wikipedia
// suggests the first entry is usually the same as the Payee field.
Address() []string
// AddressMessage contains an additional message associated with the payee
// address. This is only non-empty if the transaction address contained a
// special sixth line.
AddressMessage() string
// Category of the transaction.
Category() string
// Splits contains zero or more fragments of the transaction (AFAIK).
Splits() []Split
}
type bankingTransaction struct {
transaction
num string
payee string
address []string
addressMessage string
category string
splits []Split
}
func (t *bankingTransaction) Num() string {
return t.num
}
func (t *bankingTransaction) Payee() string {
return t.payee
}
func (t *bankingTransaction) Address() []string {
return t.address
}
func (t *bankingTransaction) AddressMessage() string {
return t.addressMessage
}
func (t *bankingTransaction) Category() string {
return t.category
}
func (t *bankingTransaction) Splits() []Split {
return t.splits
}
func (t *bankingTransaction) parseBankingTransactionField(line string,
config Config) error {
if line == "" {
return errors.New("line is empty")
}
err := t.parseTransactionField(line, config)
if err == nil {
// Must have been a field from our embedded struct
return nil
}
if _, ok := err.(UnsupportedFieldError); !ok {
// An actual error happened
return err
}
// Otherwise, try and parse it here
switch line[0] {
case 'N':
t.num = line[1:]
return nil
case 'P':
t.payee = line[1:]
return nil
case 'A':
// How many do we have already?
if len(t.address) >= 5 {
t.addressMessage = line[1:]
} else {
t.address = append(t.address, line[1:])
}
return nil
case 'L':
t.category = line[1:]
return nil
// These split fields must be in order, based on statement "The
// non-split items can be in any sequence" from the spec.
case 'S': // Category
split := Split{}
cat := line[1:]
split.Category = &cat
t.splits = append(t.splits, split)
return nil
case 'E': // Memo
// This could be the first element of a new split, but only if there
// isn't an existing split, or the existing split already has an 'E' or
// a '$' field.
if len(t.splits) == 0 || t.splits[len(t.splits)-1].Memo != nil ||
t.splits[len(t.splits)-1].Amount != nil {
t.splits = append(t.splits, Split{})
}
memo := line[1:]
t.splits[len(t.splits)-1].Memo = &memo
return nil
case '$': // Amount
amt, err := parseAmount(line[1:])
if err != nil {
return errors.Wrap(err, "failed to parse split amount")
}
// This could be the first element of a new split, but only if there
// isn't an existing split, or the existing split already has '$' field.
if len(t.splits) == 0 || t.splits[len(t.splits)-1].Amount != nil {
t.splits = append(t.splits, Split{})
}
t.splits[len(t.splits)-1].Amount = &amt
return nil
default:
return UnsupportedFieldError(
errors.Errorf("cannot process line '%s'", line))
}
}
// A Split is used to tag part of a transaction with a separate category and
// description.
type Split struct {
// Category of this transaction split.
Category *string
// Memo is a string description of the transaction split.
Memo *string
// Amount stores the transaction split value in minor currency units. For
// instance, a $12.99 transaction will be 1299.
Amount *int
} | banking_transaction.go | 0.621081 | 0.47317 | banking_transaction.go | starcoder |
package exec
// sortedDistinctInt64Op runs a distinct on the column in sortedDistinctCol,
// writing true to the resultant bool column for every value that differs from
// the previous one.
type sortedDistinctInt64Op struct {
input Operator
// sortedDistinctCol is the index of the column to distinct upon.
sortedDistinctCol int
// outputColIdx is the index of the boolean output column in the input batch.
outputColIdx int
// Set to true at runtime when we've seen the first row. Distinct always
// outputs the first row that it sees.
foundFirstRow bool
// lastVal is the last value seen by the operator, so that the distincting
// still works across batch boundaries.
lastVal int64 // template
}
var _ Operator = &sortedDistinctInt64Op{}
func (p *sortedDistinctInt64Op) Init() {}
func (p *sortedDistinctInt64Op) Next() ColBatch {
batch := p.input.Next()
if batch.Length() == 0 {
return batch
}
outputCol := batch.ColVec(p.outputColIdx).Bool()
col := batch.ColVec(p.sortedDistinctCol).Int64()
// We always output the first row.
lastVal := p.lastVal
sel := batch.Selection()
if !p.foundFirstRow {
if sel != nil {
lastVal = col[sel[0]]
outputCol[sel[0]] = true
} else {
lastVal = col[0]
outputCol[0] = true
}
}
startIdx := uint16(0)
if !p.foundFirstRow {
startIdx = 1
}
n := batch.Length()
if sel != nil {
// Bounds check elimination.
sel = sel[startIdx:n]
for _, i := range sel {
v := col[i]
// Note that not inlining this unique var actually makes a non-trivial
// performance difference.
unique := v != lastVal
outputCol[i] = outputCol[i] || unique
lastVal = v
}
} else {
// Bounds check elimination.
col = col[startIdx:n]
outputCol = outputCol[startIdx:n]
for i := range col {
v := col[i]
// Note that not inlining this unique var actually makes a non-trivial
// performance difference.
unique := v != lastVal
outputCol[i] = outputCol[i] || unique
lastVal = v
}
}
p.lastVal = lastVal
p.foundFirstRow = true
return batch
}
// This finalizer op transforms the vector in outputColIdx to the selection
// vector, by adding an index to the selection for each true value in the column
// at outputColIdx.
type sortedDistinctFinalizerOp struct {
input Operator
// outputColIdx is the index of the boolean output column from previous
// distinct ops in the input batch.
outputColIdx int
}
var _ Operator = &sortedDistinctFinalizerOp{}
func (p *sortedDistinctFinalizerOp) Next() ColBatch {
// Loop until we have non-zero amount of output to return, or our input's been
// exhausted.
for {
batch := p.input.Next()
if batch.Length() == 0 {
return batch
}
outputCol := batch.ColVec(p.outputColIdx).Bool()
// Convert outputCol to a selection vector by outputting the index of each
// tuple whose outputCol value is true.
// Note that, if the input already had a selection vector, the output
// selection vector will be a subset of the input selection vector.
idx := uint16(0)
n := batch.Length()
if sel := batch.Selection(); sel != nil {
for s := uint16(0); s < n; s++ {
i := sel[s]
if outputCol[i] {
sel[idx] = i
idx++
}
}
} else {
batch.SetSelection(true)
sel := batch.Selection()
for i := uint16(0); i < n; i++ {
if outputCol[i] {
sel[idx] = i
idx++
}
}
}
if idx == 0 {
continue
}
batch.SetLength(idx)
return batch
}
}
func (p *sortedDistinctFinalizerOp) Init() {} | pkg/sql/exec/distinct.go | 0.76366 | 0.571468 | distinct.go | starcoder |
package f5api
// This describes a message sent to or received from some operations
type LtmMonitorFirepass struct {
// The application service to which the object belongs.
AppService string `json:"appService,omitempty"`
// Specifies the username, if the monitored target requires authentication.
Username string `json:"username,omitempty"`
// Specifies the IP address and service port of the resource that is the destination of this monitor. Possible values are: *:* (Specifies to perform a health check on the IP address and port supplied by a pool member), *:port (Specifies to perform a health check on the server with the IP address supplied by the pool member and the port you specify.), and IP : port (Specifies to mark a pool member up or down based on the response of the server at the IP address and port you specify.)
Destination string `json:"destination,omitempty"`
// User defined description.
Description string `json:"description,omitempty"`
// Specifies how often in seconds that the system issues the monitor check when the node is up. The default value is the same as the (down) interval.
UpInterval int64 `json:"upInterval,omitempty"`
// Specifies the maximum percentage of licensed connections currently in use under which the monitor marks the FirePass system up. For example, a value of 95 percent means that the monitor marks the FirePass system up until 95 percent of licensed connections are in use. When the number of in-use licensed connections exceeds 95 percent, the monitor marks the FirePass system down. The default value is 95.
ConcurrencyLimit int64 `json:"concurrencyLimit,omitempty"`
// Specifies the number that the monitor uses to mark the FirePass system up or down. The system compares value of this option against a one-minute average of the FirePass system load. When the FirePass system-load average falls within the specified value, the monitor marks the FirePass system up. When the average exceeds the setting, the monitor marks the system down. The default value is 12.0.
MaxLoadAverage float32 `json:"maxLoadAverage,omitempty"`
// Specifies the password, if the monitored target requires authentication.
Password string `json:"password,omitempty"`
// Settings will be initialized from this monitor. The default is \"firepass\".
DefaultsFrom string `json:"defaultsFrom,omitempty"`
// Kind of entity
Kind string `json:"kind,omitempty"`
// Name of entity
Name string `json:"name,omitempty"`
// Specifies the frequency at which the system issues the monitor check. The default value is 5 seconds.
Interval int64 `json:"interval,omitempty"`
// Displays the administrative partition within which the monitor resides.
Partition string `json:"partition,omitempty"`
// Specifies the amount of time in seconds after the first successful response before a node will be marked up. A value of 0 will cause a node to be marked up immediately after a valid response is received from the node. The default setting is 0.
TimeUntilUp int64 `json:"timeUntilUp,omitempty"`
// Specifies the number of seconds the target has in which to respond to the monitor request. The default is 16 seconds. If the target responds within the set time period, it is considered up. If the target does not respond within the set time period, it is considered down. Also, if the target responds with a RESET packet, the system flags the target as down immediately, without waiting for the timeout interval to expire. Note that the Timeout setting should be 3 times the Interval setting, plus 1 second.
Timeout int64 `json:"timeout,omitempty"`
// Specifies the list of ciphers for this monitor. The default list is HIGH:!ADH.
Cipherlist string `json:"cipherlist,omitempty"`
} | ltm_monitor_firepass.go | 0.86806 | 0.504455 | ltm_monitor_firepass.go | starcoder |
package interpreter
import (
"errors"
)
var (
zeroi = IntegerLiteral{0}
zerof = FloatLiteral{0.0}
emptys = StringLiteral{""}
falseb = BooleanLiteral{false}
)
/**
* Shorthand functions to easily create instances of supported types.
*/
func NewString(str string) Value {
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{StringT, StringLiteral{str}, zeroi, zerof, Name{}, falseb, Function{}, emptyl, emptym, false}
}
func NewInteger(n int64) Value {
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{IntegerT, emptys, IntegerLiteral{n}, zerof, Name{}, falseb, Function{}, emptyl, emptym, false}
}
func NewFloat(n float64) Value {
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{FloatT, emptys, zeroi, FloatLiteral{n}, Name{}, falseb, Function{}, emptyl, emptym, false}
}
func NewName(identifier string) Value {
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{NameT, emptys, zeroi, zerof, Name{identifier}, falseb, Function{}, emptyl, emptym, false}
}
func NewBoolean(value bool) Value {
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{BooleanT, emptys, zeroi, zerof, Name{}, BooleanLiteral{value}, Function{}, emptyl, emptym, false}
}
func NewSExpression(formName string, values ...interface{}) SExpression {
emptyArray := make([]interface{}, 0)
sexp := SExpression{Name{formName}, SExpressionT, emptyArray}
for _, value := range values {
sexp.Values = append(sexp.Values, value)
}
return sexp
}
func NewCallableFunction(name string, argNames []string, fn Builtin) Value {
names := make([]Name, len(argNames))
for i, arg := range argNames {
names[i] = Name{arg}
}
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{FunctionT, emptys, zeroi, zerof, Name{}, falseb, Function{Name{name}, names, SExpression{}, true, Environment{}, fn}, emptyl, emptym, false}
}
func NewFunction(name string, argNames []string, body interface{}) Value {
names := make([]Name, len(argNames))
for i, arg := range argNames {
names[i] = Name{arg}
}
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{FunctionT, emptys, zeroi, zerof, Name{}, falseb, Function{Name{name}, names, body, false, Environment{}, nil}, emptyl, emptym, false}
}
func NewList() Value {
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{ListT, emptys, zeroi, zerof, Name{}, falseb, Function{}, emptyl, emptym, false}
}
func NewMap() Value {
emptyl := List{[]Value{}}
emptym := Mapping{map[string]Value{}}
return Value{MapT, emptys, zeroi, zerof, Name{}, falseb, Function{}, emptyl, emptym, false}
}
/**
* Unwrap a value to make the contents (stuff Go can compute with) available to a function.
*/
func Unwrap(value Value) interface{} {
switch value.Type {
case StringT:
return value.String.Contained
case IntegerT:
return value.Integer.Contained
case FloatT:
return value.Float.Contained
case NameT:
return value.Name.Contained
case BooleanT:
return value.Boolean.Contained
case ListT:
values := make([]interface{}, len(value.List.Data))
for i, val := range value.List.Data {
values[i] = Unwrap(val)
}
return values
case MapT:
unwrapped := make(map[string]interface{})
for key, val := range value.Map.Data {
unwrapped[key] = Unwrap(val)
}
return unwrapped
}
return nil
}
/**
* Wrap a value back into one of Unicorn's Value instances.
*/
func Wrap(thing interface{}) (Value, error) {
switch thing.(type) {
case int64:
return NewInteger(thing.(int64)), nil
case float64:
return NewFloat(thing.(float64)), nil
case string:
return NewString(thing.(string)), nil
case bool:
value := thing.(bool)
if value {
return NewName("true"), nil
}
return NewName("false"), nil
case []interface{}:
list := NewList()
thingList := thing.([]interface{})
for _, v := range thingList {
wrapped, err := Wrap(v)
if err != nil {
return list, err
}
list.List.Data = append(list.List.Data, wrapped)
}
return list, nil
case map[string]interface{}:
mapping := NewMap()
thingMap := thing.(map[string]interface{})
for k, v := range thingMap {
wrapped, err := Wrap(v)
if err != nil {
return mapping, err
}
mapping.Map.Data[k] = wrapped
}
return mapping, nil
}
return Value{}, errors.New("Cannot wrap values of the type of the argument provided.")
} | src/interpreter/helpers.go | 0.671901 | 0.436502 | helpers.go | starcoder |
package trie
import (
"encoding/hex"
"sort"
"sync"
"github.com/qlcchain/go-qlc/common/types"
"github.com/qlcchain/go-qlc/common/util"
)
const (
UnknownNode = byte(iota)
FullNode
ShortNode
ValueNode
HashNode
)
type TrieNode struct {
hash *types.Hash
nodeType byte
// fullNode
children map[byte]*TrieNode
lock sync.RWMutex
// shortNode
key []byte
child *TrieNode
// hashNode and valueNode
value []byte
}
func NewFullNode(children map[byte]*TrieNode) *TrieNode {
if children == nil {
children = make(map[byte]*TrieNode)
}
node := &TrieNode{
children: children,
nodeType: FullNode,
}
return node
}
func NewShortNode(key []byte, child *TrieNode) *TrieNode {
node := &TrieNode{
key: key,
child: child,
nodeType: ShortNode,
}
return node
}
func NewHashNode(hash *types.Hash) *TrieNode {
node := &TrieNode{
value: hash[:],
nodeType: HashNode,
}
return node
}
func NewValueNode(value []byte) *TrieNode {
node := &TrieNode{
value: value,
nodeType: ValueNode,
}
return node
}
func (t *TrieNode) Value() []byte {
return t.value
}
func (t *TrieNode) LeafCallback(completeFunc func()) {
t.lock.Lock()
defer t.lock.Unlock()
if t.nodeType == FullNode {
if t.child != nil {
if t.child.nodeType == UnknownNode {
completeFunc()
}
return
}
for _, child := range t.children {
if child.nodeType == UnknownNode {
completeFunc()
}
return
}
}
}
func (t *TrieNode) Clone(copyHash bool) *TrieNode {
t.lock.RLock()
defer t.lock.RUnlock()
newNode := &TrieNode{
nodeType: t.nodeType,
child: t.child,
}
if t.children != nil {
newNode.children = make(map[byte]*TrieNode)
for key, child := range t.children {
newNode.children[key] = child
}
}
newNode.key = make([]byte, len(t.key))
copy(newNode.key, t.key)
newNode.value = make([]byte, len(t.value))
copy(newNode.value, t.value)
if copyHash && t.hash != nil {
newHash := *(t.hash)
newNode.hash = &newHash
}
return newNode
}
func (t *TrieNode) IsLeafNode() bool {
return t.nodeType == HashNode || t.nodeType == ValueNode
}
func (t *TrieNode) Hash() *types.Hash {
if t.hash == nil {
var source []byte
switch t.NodeType() {
case FullNode:
source = []byte{FullNode}
if t.child != nil {
source = append(source, t.child.Hash()[:]...)
}
sc := sortChildren(t.children)
for _, c := range sc {
source = append(source, c.Key)
source = append(source, c.Value.Hash()[:]...)
}
case ShortNode:
source = []byte{ShortNode}
source = append(source, t.key[:]...)
source = append(source, t.child.Hash()[:]...)
case HashNode:
source = []byte{HashNode}
source = t.value
case ValueNode:
source = []byte{ValueNode}
source = t.value
}
hash := types.HashData(source)
t.hash = &hash
}
return t.hash
}
func (t *TrieNode) SetChild(child *TrieNode) {
t.child = child
}
func (t *TrieNode) NodeType() byte {
return t.nodeType
}
func (t *TrieNode) Children() map[byte]*TrieNode {
return t.children
}
func (t *TrieNode) SortedChildren() []*TrieNode {
scKV := sortChildren(t.children)
var scRet []*TrieNode
for _, sc := range scKV {
scRet = append(scRet, sc.Value)
}
return scRet
}
func (t *TrieNode) String() string {
tn := &types.TrieNode{
Type: t.NodeType(),
}
tn.Hash = t.Hash()
switch t.NodeType() {
case FullNode:
tn.Children = t.serializeChildren(t.children)
if t.child != nil {
tn.Child = t.child.Hash()[:]
}
case ShortNode:
tn.Key = t.key
tn.Child = t.child.Hash()[:]
case HashNode:
fallthrough
case ValueNode:
tn.Value = t.value
}
return util.ToIndentString(tn)
}
func (t *TrieNode) serializeChildren(children map[byte]*TrieNode) map[string][]byte {
if children == nil {
return nil
}
var parsedChildren = make(map[string][]byte, len(children))
for key, child := range children {
ks := hex.EncodeToString([]byte{key})
parsedChildren[ks] = child.Hash()[:]
}
return parsedChildren
}
func (t *TrieNode) Serialize() ([]byte, error) {
tn := &types.TrieNode{
Type: t.NodeType(),
}
tn.Hash = t.Hash()
switch t.NodeType() {
case FullNode:
tn.Children = t.serializeChildren(t.children)
if t.child != nil {
tn.Child = t.child.Hash()[:]
}
case ShortNode:
tn.Key = t.key
tn.Child = t.child.Hash()[:]
case HashNode:
fallthrough
case ValueNode:
tn.Value = t.value
}
return tn.MarshalMsg(nil)
}
func (t *TrieNode) parseChildren(children map[string][]byte) (map[byte]*TrieNode, error) {
var result = make(map[byte]*TrieNode)
for key, child := range children {
childHash, err := types.BytesToHash(child)
if err != nil {
return nil, err
}
tmp, err := hex.DecodeString(key)
if err != nil {
return nil, err
}
result[tmp[0]] = &TrieNode{
hash: &childHash,
}
}
return result, nil
}
func (t *TrieNode) Deserialize(buf []byte) error {
tn := &types.TrieNode{}
if _, err := tn.UnmarshalMsg(buf); err != nil {
return err
}
t.nodeType = tn.Type
t.hash = tn.Hash
switch tn.Type {
case FullNode:
var err error
t.children, err = t.parseChildren(tn.Children)
if err != nil {
return err
}
if len(tn.Child) > 0 {
childHash, err := types.BytesToHash(tn.Child)
if err != nil {
return err
}
t.child = &TrieNode{
hash: &childHash,
}
}
case ShortNode:
t.key = tn.Key
childHash, err := types.BytesToHash(tn.Child)
if err != nil {
return err
}
t.child = &TrieNode{
hash: &childHash,
}
case HashNode:
fallthrough
case ValueNode:
t.value = tn.Value
}
return nil
}
type children struct {
Key byte
Value *TrieNode
}
func sortChildren(c map[byte]*TrieNode) []*children {
var s []*children
for key, child := range c {
s = append(s, &children{
Key: key,
Value: child,
})
}
sort.Slice(s, func(i, j int) bool {
return s[i].Key < s[j].Key
})
return s
} | trie/node.go | 0.555194 | 0.425307 | node.go | starcoder |
package core
import (
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/stat"
"math"
)
/* Table */
// DataTable is an array of (userId, itemId, rating).
type DataTable struct {
Ratings []float64
Users []int
Items []int
}
// NewDataTable creates a new raw Data set.
func NewDataTable(users, items []int, ratings []float64) *DataTable {
table := new(DataTable)
table.Users = users
table.Items = items
table.Ratings = ratings
return table
}
// Len returns the length of the DataTable.
func (dataTable *DataTable) Len() int {
if dataTable == nil {
return 0
}
return len(dataTable.Ratings)
}
// Get the i-th rating in the DataTable.
func (dataTable *DataTable) Get(i int) (int, int, float64) {
return dataTable.Users[i], dataTable.Items[i], dataTable.Ratings[i]
}
// ForEach iterates ratings in the DataTable.
func (dataTable *DataTable) ForEach(f func(userId, itemId int, rating float64)) {
for i := 0; i < dataTable.Len(); i++ {
f(dataTable.Users[i], dataTable.Items[i], dataTable.Ratings[i])
}
}
// Mean returns the mean of ratings in the DataTable.
func (dataTable *DataTable) Mean() float64 {
return stat.Mean(dataTable.Ratings, nil)
}
// StdDev returns the standard deviation of ratings in the DataTable.
func (dataTable *DataTable) StdDev() float64 {
mean := dataTable.Mean()
sum := 0.0
dataTable.ForEach(func(userId, itemId int, rating float64) {
sum += (rating - mean) * (rating - mean)
})
return math.Sqrt(sum / float64(dataTable.Len()))
}
// Min returns the minimal ratings in the DataTable.
func (dataTable *DataTable) Min() float64 {
return floats.Min(dataTable.Ratings)
}
// Max returns the maximal ratings in the DataTable.
func (dataTable *DataTable) Max() float64 {
return floats.Max(dataTable.Ratings)
}
// SubSet returns a subset of the DataTable.
func (dataTable *DataTable) SubSet(indices []int) Table {
return NewVirtualTable(dataTable, indices)
}
// VirtualTable is a virtual subset of DataTable, which saves indices pointed to a DataTable.
type VirtualTable struct {
Data *DataTable
Index []int
}
// NewVirtualTable creates a new virtual table.
func NewVirtualTable(dataSet *DataTable, index []int) *VirtualTable {
table := new(VirtualTable)
table.Data = dataSet
table.Index = index
return table
}
// Len returns the length of VirtualTable.
func (vTable *VirtualTable) Len() int {
if vTable == nil {
return 0
}
return len(vTable.Index)
}
// Get the i-th ratings in the VirtualTable.
func (vTable *VirtualTable) Get(i int) (int, int, float64) {
indexInData := vTable.Index[i]
return vTable.Data.Get(indexInData)
}
// ForEach iterates ratings in the VirtualTable.
func (vTable *VirtualTable) ForEach(f func(userId, itemId int, rating float64)) {
for i := 0; i < vTable.Len(); i++ {
userId, itemId, rating := vTable.Get(i)
f(userId, itemId, rating)
}
}
// Mean returns the mean of ratings in the VirtualTable.
func (vTable *VirtualTable) Mean() float64 {
mean := 0.0
vTable.ForEach(func(userId, itemId int, rating float64) {
mean += rating
})
return mean / float64(vTable.Len())
}
// StdDev returns the standard deviation of ratings in the VirtualTable.
func (vTable *VirtualTable) StdDev() float64 {
mean := vTable.Mean()
sum := 0.0
vTable.ForEach(func(userId, itemId int, rating float64) {
sum += (rating - mean) * (rating - mean)
})
return math.Sqrt(sum / float64(vTable.Len()))
}
// Min returns the minimal ratings in the VirtualTable.
func (vTable *VirtualTable) Min() float64 {
_, _, min := vTable.Get(0)
vTable.ForEach(func(userId, itemId int, rating float64) {
if rating < min {
min = rating
}
})
return min
}
// Max returns the maximal ratings in the VirtualTable.
func (vTable *VirtualTable) Max() float64 {
_, _, max := vTable.Get(0)
vTable.ForEach(func(userId, itemId int, rating float64) {
if rating > max {
max = rating
}
})
return max
}
// SubSet returns a subset of ratings in the VirtualTable.
func (vTable *VirtualTable) SubSet(indices []int) Table {
rawIndices := make([]int, len(indices))
for i, index := range indices {
rawIndices[i] = vTable.Index[index]
}
return NewVirtualTable(vTable.Data, rawIndices)
} | core/table.go | 0.721253 | 0.411288 | table.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// UserExperienceAnalyticsBatteryHealthDevicePerformance
type UserExperienceAnalyticsBatteryHealthDevicePerformance struct {
Entity
// Estimated battery age. Unit in days. Valid values -2147483648 to 2147483647
batteryAgeInDays *int32
// A weighted average of a device’s maximum capacity score and runtime estimate score. Values range from 0-100. Valid values -2147483648 to 2147483647
deviceBatteryHealthScore *int32
// The unique identifier of the device, Intune DeviceID.
deviceId *string
// Device friendly name.
deviceName *string
// The estimated runtime of the device when the battery is fully charged. Unit in minutes. Valid values -2147483648 to 2147483647
estimatedRuntimeInMinutes *int32
// The overall battery health status of the device. Possible values are: unknown, insufficientData, needsAttention, meetingGoals.
healthStatus *UserExperienceAnalyticsHealthState
// The manufacturer name of the device.
manufacturer *string
// Ratio of current capacity and design capacity of the battery with the lowest capacity. Unit in percentage and values range from 0-100. Valid values -2147483648 to 2147483647
maxCapacityPercentage *int32
// The model name of the device.
model *string
}
// NewUserExperienceAnalyticsBatteryHealthDevicePerformance instantiates a new userExperienceAnalyticsBatteryHealthDevicePerformance and sets the default values.
func NewUserExperienceAnalyticsBatteryHealthDevicePerformance()(*UserExperienceAnalyticsBatteryHealthDevicePerformance) {
m := &UserExperienceAnalyticsBatteryHealthDevicePerformance{
Entity: *NewEntity(),
}
return m
}
// CreateUserExperienceAnalyticsBatteryHealthDevicePerformanceFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateUserExperienceAnalyticsBatteryHealthDevicePerformanceFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewUserExperienceAnalyticsBatteryHealthDevicePerformance(), nil
}
// GetBatteryAgeInDays gets the batteryAgeInDays property value. Estimated battery age. Unit in days. Valid values -2147483648 to 2147483647
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetBatteryAgeInDays()(*int32) {
if m == nil {
return nil
} else {
return m.batteryAgeInDays
}
}
// GetDeviceBatteryHealthScore gets the deviceBatteryHealthScore property value. A weighted average of a device’s maximum capacity score and runtime estimate score. Values range from 0-100. Valid values -2147483648 to 2147483647
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetDeviceBatteryHealthScore()(*int32) {
if m == nil {
return nil
} else {
return m.deviceBatteryHealthScore
}
}
// GetDeviceId gets the deviceId property value. The unique identifier of the device, Intune DeviceID.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetDeviceId()(*string) {
if m == nil {
return nil
} else {
return m.deviceId
}
}
// GetDeviceName gets the deviceName property value. Device friendly name.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetDeviceName()(*string) {
if m == nil {
return nil
} else {
return m.deviceName
}
}
// GetEstimatedRuntimeInMinutes gets the estimatedRuntimeInMinutes property value. The estimated runtime of the device when the battery is fully charged. Unit in minutes. Valid values -2147483648 to 2147483647
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetEstimatedRuntimeInMinutes()(*int32) {
if m == nil {
return nil
} else {
return m.estimatedRuntimeInMinutes
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["batteryAgeInDays"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetBatteryAgeInDays(val)
}
return nil
}
res["deviceBatteryHealthScore"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetDeviceBatteryHealthScore(val)
}
return nil
}
res["deviceId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDeviceId(val)
}
return nil
}
res["deviceName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDeviceName(val)
}
return nil
}
res["estimatedRuntimeInMinutes"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetEstimatedRuntimeInMinutes(val)
}
return nil
}
res["healthStatus"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseUserExperienceAnalyticsHealthState)
if err != nil {
return err
}
if val != nil {
m.SetHealthStatus(val.(*UserExperienceAnalyticsHealthState))
}
return nil
}
res["manufacturer"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetManufacturer(val)
}
return nil
}
res["maxCapacityPercentage"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetMaxCapacityPercentage(val)
}
return nil
}
res["model"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetModel(val)
}
return nil
}
return res
}
// GetHealthStatus gets the healthStatus property value. The overall battery health status of the device. Possible values are: unknown, insufficientData, needsAttention, meetingGoals.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetHealthStatus()(*UserExperienceAnalyticsHealthState) {
if m == nil {
return nil
} else {
return m.healthStatus
}
}
// GetManufacturer gets the manufacturer property value. The manufacturer name of the device.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetManufacturer()(*string) {
if m == nil {
return nil
} else {
return m.manufacturer
}
}
// GetMaxCapacityPercentage gets the maxCapacityPercentage property value. Ratio of current capacity and design capacity of the battery with the lowest capacity. Unit in percentage and values range from 0-100. Valid values -2147483648 to 2147483647
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetMaxCapacityPercentage()(*int32) {
if m == nil {
return nil
} else {
return m.maxCapacityPercentage
}
}
// GetModel gets the model property value. The model name of the device.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) GetModel()(*string) {
if m == nil {
return nil
} else {
return m.model
}
}
// Serialize serializes information the current object
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteInt32Value("batteryAgeInDays", m.GetBatteryAgeInDays())
if err != nil {
return err
}
}
{
err = writer.WriteInt32Value("deviceBatteryHealthScore", m.GetDeviceBatteryHealthScore())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("deviceId", m.GetDeviceId())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("deviceName", m.GetDeviceName())
if err != nil {
return err
}
}
{
err = writer.WriteInt32Value("estimatedRuntimeInMinutes", m.GetEstimatedRuntimeInMinutes())
if err != nil {
return err
}
}
if m.GetHealthStatus() != nil {
cast := (*m.GetHealthStatus()).String()
err = writer.WriteStringValue("healthStatus", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("manufacturer", m.GetManufacturer())
if err != nil {
return err
}
}
{
err = writer.WriteInt32Value("maxCapacityPercentage", m.GetMaxCapacityPercentage())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("model", m.GetModel())
if err != nil {
return err
}
}
return nil
}
// SetBatteryAgeInDays sets the batteryAgeInDays property value. Estimated battery age. Unit in days. Valid values -2147483648 to 2147483647
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetBatteryAgeInDays(value *int32)() {
if m != nil {
m.batteryAgeInDays = value
}
}
// SetDeviceBatteryHealthScore sets the deviceBatteryHealthScore property value. A weighted average of a device’s maximum capacity score and runtime estimate score. Values range from 0-100. Valid values -2147483648 to 2147483647
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetDeviceBatteryHealthScore(value *int32)() {
if m != nil {
m.deviceBatteryHealthScore = value
}
}
// SetDeviceId sets the deviceId property value. The unique identifier of the device, Intune DeviceID.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetDeviceId(value *string)() {
if m != nil {
m.deviceId = value
}
}
// SetDeviceName sets the deviceName property value. Device friendly name.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetDeviceName(value *string)() {
if m != nil {
m.deviceName = value
}
}
// SetEstimatedRuntimeInMinutes sets the estimatedRuntimeInMinutes property value. The estimated runtime of the device when the battery is fully charged. Unit in minutes. Valid values -2147483648 to 2147483647
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetEstimatedRuntimeInMinutes(value *int32)() {
if m != nil {
m.estimatedRuntimeInMinutes = value
}
}
// SetHealthStatus sets the healthStatus property value. The overall battery health status of the device. Possible values are: unknown, insufficientData, needsAttention, meetingGoals.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetHealthStatus(value *UserExperienceAnalyticsHealthState)() {
if m != nil {
m.healthStatus = value
}
}
// SetManufacturer sets the manufacturer property value. The manufacturer name of the device.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetManufacturer(value *string)() {
if m != nil {
m.manufacturer = value
}
}
// SetMaxCapacityPercentage sets the maxCapacityPercentage property value. Ratio of current capacity and design capacity of the battery with the lowest capacity. Unit in percentage and values range from 0-100. Valid values -2147483648 to 2147483647
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetMaxCapacityPercentage(value *int32)() {
if m != nil {
m.maxCapacityPercentage = value
}
}
// SetModel sets the model property value. The model name of the device.
func (m *UserExperienceAnalyticsBatteryHealthDevicePerformance) SetModel(value *string)() {
if m != nil {
m.model = value
}
} | models/user_experience_analytics_battery_health_device_performance.go | 0.718002 | 0.444203 | user_experience_analytics_battery_health_device_performance.go | starcoder |
package integer
import "sort"
type Int32Slice []int32
func (x Int32Slice) Len() int { return len(x) }
func (x Int32Slice) Less(i, j int) bool { return x[i] < x[j] }
func (x Int32Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
type Int64Slice []int64
func (x Int64Slice) Len() int { return len(x) }
func (x Int64Slice) Less(i, j int) bool { return x[i] < x[j] }
func (x Int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func sliceIntEqual(v1, v2 []int, strict bool) bool {
// 都为nil, 则相等
v1Nil := v1 == nil
v2Nil := v2 == nil
if v1Nil != v2Nil {
return false
}
if v1Nil {
return true
}
// 长度不等, 则不相等
if len(v1) != len(v2) {
return false
}
if !strict {
sort.Sort(sort.IntSlice(v1))
sort.Sort(sort.IntSlice(v2))
}
// 遍历比较内部元素
for index, item1 := range v1 {
item2 := v2[index]
if item2 != item1 {
return false
}
}
return true
}
func SliceIntEqualStrict(v1, v2 []int) bool {
return sliceIntEqual(v1, v2, true)
}
func SliceIntEqual(v1, v2 []int) bool {
return sliceIntEqual(v1, v2, false)
}
func sliceInt32Equal(v1, v2 []int32, strict bool) bool {
// 都为nil, 则相等
v1Nil := v1 == nil
v2Nil := v2 == nil
if v1Nil != v2Nil {
return false
}
if v1Nil {
return true
}
// 长度不等, 则不相等
if len(v1) != len(v2) {
return false
}
if !strict {
sort.Sort(Int32Slice(v1))
sort.Sort(Int32Slice(v2))
}
// 遍历比较内部元素
for index, item1 := range v1 {
item2 := v2[index]
if item2 != item1 {
return false
}
}
return true
}
func SliceInt32EqualStrict(v1, v2 []int32) bool {
return sliceInt32Equal(v1, v2, true)
}
func SliceInt32Equal(v1, v2 []int32) bool {
return sliceInt32Equal(v1, v2, false)
}
func sliceInt64Equal(v1, v2 []int64, strict bool) bool {
// 都为nil, 则相等
v1Nil := v1 == nil
v2Nil := v2 == nil
if v1Nil != v2Nil {
return false
}
if v1Nil {
return true
}
// 长度不等, 则不相等
if len(v1) != len(v2) {
return false
}
if !strict {
sort.Sort(Int64Slice(v1))
sort.Sort(Int64Slice(v2))
}
// 遍历比较内部元素
for index, item1 := range v1 {
item2 := v2[index]
if item2 != item1 {
return false
}
}
return true
}
func SliceInt64EqualStrict(v1, v2 []int64) bool {
return sliceInt64Equal(v1, v2, true)
}
func SliceInt64Equal(v1, v2 []int64) bool {
return sliceInt64Equal(v1, v2, false)
} | helper/basic/integer/equal.go | 0.517815 | 0.437884 | equal.go | starcoder |
package packet
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
)
// TFTPOpCode defines the TFTP packet operation.
type TFTPOpCode uint16
const (
_ TFTPOpCode = iota
// TFTPOpRead performs a read operation.
TFTPOpRead
// TFTPOpWrite performs a write operation.
TFTPOpWrite
// TFTPOpData performs a data operation.
TFTPOpData
// TFTPOpAck acknowledges a statement.
TFTPOpAck
// TFTPOpError performs an error operation.
TFTPOpError
)
// TFTPErrorCode defines the TFTP error codes.
// These messages are meant to be user customized.
type TFTPErrorCode uint16
const (
_ TFTPErrorCode = iota
)
// LayerTypeTFTP defines a GoPacket application layer.
var LayerTypeTFTP = gopacket.RegisterLayerType(1000, gopacket.LayerTypeMetadata{
Name: "TFTP", Decoder: gopacket.DecodeFunc(decodeTFTP),
})
// TFTP defines a TFTP packet layer.
type TFTP struct {
layers.BaseLayer
TFTPOpCode
Filename, Mode string
Block uint16
Data []byte
Last bool
TFTPErrorCode
ErrorMessage string
}
// LayerType returns LayerTypeTFTP.
func (t *TFTP) LayerType() gopacket.LayerType {
return LayerTypeTFTP
}
// DecodeFromBytes decodes packet data and populates the TFTP structure.
func (t *TFTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
if len(data) > 516 {
return fmt.Errorf("TFTP packet longer than 516 (total) bytes")
}
t.BaseLayer = layers.BaseLayer{Contents: data[:len(data)]}
t.TFTPOpCode = TFTPOpCode(binary.BigEndian.Uint16(data[:2]))
switch t.TFTPOpCode {
case TFTPOpRead, TFTPOpWrite:
filemode := bytes.Split(data[2:], []byte{0x0})
t.Filename = string(filemode[0])
t.Mode = string(filemode[1])
case TFTPOpData:
if len(data[4:]) < 512 {
t.Last = true
}
t.Block = binary.BigEndian.Uint16(data[2:4])
t.Data = data[4:]
case TFTPOpAck:
t.Block = binary.BigEndian.Uint16(data[2:4])
case TFTPOpError:
t.TFTPErrorCode = TFTPErrorCode(binary.BigEndian.Uint16(data[2:4]))
t.ErrorMessage = string(data[4:])
default:
panic(fmt.Sprintf("Unknown TFTPOpCode (%d)", t.TFTPOpCode))
}
return nil
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (t *TFTP) SerializeTo(
b gopacket.SerializeBuffer,
opts gopacket.SerializeOptions,
) error {
bytes, err := b.PrependBytes(t.getLayerLength())
if err != nil {
return err
}
binary.BigEndian.PutUint16(bytes, uint16(t.TFTPOpCode))
switch t.TFTPOpCode {
case TFTPOpRead, TFTPOpWrite:
modeLoc := 3 + len(t.Filename)
copy(bytes[2:modeLoc-1], t.Filename)
copy(bytes[modeLoc:modeLoc+len(t.Mode)], t.Mode)
case TFTPOpData:
binary.BigEndian.PutUint16(bytes[2:], uint16(t.Block))
copy(bytes[4:4+len(t.Data)], t.Data)
case TFTPOpAck:
binary.BigEndian.PutUint16(bytes[2:], uint16(t.Block))
case TFTPOpError:
binary.BigEndian.PutUint16(bytes[2:], uint16(t.TFTPErrorCode))
copy(bytes[4:4+len(t.ErrorMessage)], t.ErrorMessage)
default:
// Not Needed getLayerLength() errors for us
}
return nil
}
// CanDecode returns what LayerType TFTP can decode.
func (t *TFTP) CanDecode() gopacket.LayerClass {
return LayerTypeTFTP
}
// NextLayerType returns gopacket.LayerTypePayload.
func (t *TFTP) NextLayerType() gopacket.LayerType {
return gopacket.LayerTypePayload
}
// Payload returns the remaining Payload (should be nil)
func (t *TFTP) Payload() []byte {
return nil
}
func (t *TFTP) getLayerLength() int {
switch t.TFTPOpCode {
case TFTPOpRead, TFTPOpWrite:
return 4 + len([]byte(t.Filename)) + len([]byte(t.Mode))
case TFTPOpData:
return 4 + len(t.Data)
case TFTPOpAck:
return 4
case TFTPOpError:
return 4 + len([]byte(t.ErrorMessage))
default:
panic(fmt.Sprintf("Unknown TFTPOpCode (%d)", t.TFTPOpCode))
}
}
func decodeTFTP(data []byte, p gopacket.PacketBuilder) error {
t := new(TFTP)
if err := t.DecodeFromBytes(data, p); err != nil {
return err
}
p.AddLayer(t)
p.SetApplicationLayer(t)
return nil
} | tftp.go | 0.677581 | 0.417331 | tftp.go | starcoder |
package toml
import (
"fmt"
"reflect"
"strings"
"time"
)
var typeOfStringSlice = reflect.TypeOf([]string(nil))
var typeOfIntSlice = reflect.TypeOf([]int(nil))
func isPointer(v interface{}) bool {
vkind := reflect.ValueOf(v).Type().Kind()
return vkind == reflect.Ptr
}
// Same as PrimitiveDecode but adds a strict verification.
func PrimitiveDecodeStrict(primValue Primitive,
v interface{},
ignore_fields map[string]interface{}) (err error) {
// Only accept pointer types.
if !isPointer(v) {
return fmt.Errorf("Must use pointer type for strict decoding: [%s]", v)
}
err = PrimitiveDecode(primValue, v)
if err != nil {
return
}
thestruct := reflect.ValueOf(v).Elem().Interface()
return CheckType(primValue, thestruct, ignore_fields)
}
// The same as Decode, except that parsed data that cannot be mapped will
// throw an error.
func DecodeStrict(data string,
v interface{},
ignore_fields map[string]interface{}) (m MetaData, err error) {
// Only accept pointer types.
if !isPointer(v) {
err = fmt.Errorf("Must use pointer type for strict decoding: [%s]", v)
return
}
m, err = Decode(data, v)
if err != nil {
return
}
thestruct := reflect.ValueOf(v).Elem().Interface()
err = CheckType(m.mapping, thestruct, ignore_fields)
return
}
func Contains(list []string, elem string) bool {
for _, t := range list {
if t == elem {
return true
}
}
return false
}
func CheckType(data interface{},
thestruct interface{},
ignore_fields map[string]interface{}) (err error) {
var dType reflect.Type
var structAsType reflect.Type
var structAsTypeOk bool
var structAsValue reflect.Value
var structAsValueType reflect.Type
dType = reflect.TypeOf(data)
structAsType, structAsTypeOk = thestruct.(reflect.Type)
structAsValue = reflect.ValueOf(thestruct)
structAsValueType = structAsValue.Type()
// Special case. Go's `time.Time` is a struct, which we don't want
// to confuse with a user struct.
timeType := rvalue(time.Time{}).Type()
if dType == timeType && thestruct == timeType {
return nil
}
if structAsTypeOk {
return checkTypeStructAsType(data,
structAsType,
ignore_fields)
} else {
return checkTypeStructAsType(data,
structAsValueType,
ignore_fields)
}
}
func checkTypeStructAsType(data interface{},
structAsType reflect.Type,
ignore_fields map[string]interface{}) (err error) {
dType := reflect.ValueOf(data).Type()
dKind := dType.Kind()
// Handle all the int types
dIsInt := (dKind >= reflect.Int && dKind <= reflect.Uint64)
sIsInt := (structAsType.Kind() >= reflect.Int && structAsType.Kind() <= reflect.Uint64)
if dIsInt && sIsInt {
return nil
}
structKind := structAsType.Kind()
switch structKind {
case reflect.Map:
dataMap, ok := data.(map[string]interface{})
if !ok {
return fmt.Errorf("Expected data to be a map: [%s]", data)
}
// Check the elem, which is the type inside the structAsType
// container
structMapElem := structAsType.Elem()
for _, v := range dataMap {
// Check each of the items in our dataMap against the
// underlying type of the slice type we are mapping onto
elemType := structMapElem.(reflect.Type)
if err = CheckType(v, elemType, ignore_fields); err != nil {
return err
}
}
return nil
case reflect.Slice:
dataSlice := data.([]interface{})
// Get the underlying type of the slice in the struct
structSliceElem := structAsType.Elem()
for _, v := range dataSlice {
// Check each of the items in our dataslice against the
// underlying type of the slice type we are mapping onto
elemType := structSliceElem.(reflect.Type)
if err = CheckType(v, elemType, ignore_fields); err != nil {
return err
}
}
return nil
case reflect.String:
_, ok := data.(string)
if ok {
return nil
}
return fmt.Errorf("Incoming type didn't match gotype string")
case reflect.Bool:
_, ok := data.(bool)
if ok {
return nil
}
return fmt.Errorf("Incoming type didn't match gotype bool")
case reflect.Interface:
if structAsType.NumMethod() == 0 {
return nil
} else {
return fmt.Errorf("We don't write data to non-empty interfaces around here")
}
case reflect.Float32, reflect.Float64:
var ok bool
_, ok = data.(float32)
if ok {
return nil
}
_, ok = data.(float64)
if ok {
return nil
}
return fmt.Errorf("Incoming type didn't match gotype float32/float64")
case reflect.Array:
return fmt.Errorf("*** This shouldn't happen")
case reflect.Struct:
dataMap := data.(map[string]interface{})
// need to iterate over each key in the data to make
// sure it exists in structAsType
mapKeys := make([]string, 0)
for k, _ := range dataMap {
mapKeys = append(mapKeys, strings.ToLower(k))
}
structKeys := make([]string, 0)
var fieldName string
for i := 0; i < structAsType.NumField(); i++ {
f := structAsType.Field(i)
fieldName = f.Tag.Get("toml")
if len(fieldName) == 0 {
fieldName = f.Name
}
structKeys = append(structKeys, strings.ToLower(fieldName))
}
for _, k := range mapKeys {
if !Contains(structKeys, k) {
if _, ok := insensitiveGet(ignore_fields, k); !ok {
return e("Configuration contains key [%s] "+
"which doesn't exist in struct", k)
}
}
}
// Check each struct field against incoming data if
// available
for i := 0; i < structAsType.NumField(); i++ {
f := structAsType.Field(i)
fieldName := f.Name
mapdata, ok := insensitiveGet(dataMap, fieldName)
if ok {
err = CheckType(mapdata, f.Type, ignore_fields)
if err != nil {
return err
}
}
}
return nil
default:
return fmt.Errorf("Unrecognized struct kind: [%s]", structKind)
}
return nil
} | vendor/github.com/bbangert/toml/decode_strict.go | 0.667906 | 0.436022 | decode_strict.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.