code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package roleutil
import (
"sort"
"github.com/bwmarrin/discordgo"
)
// SortRoles sorts a given array of discordgo.Role
// object references by position in ascending order.
// If reversed, the order is descending.
func SortRoles(r []*discordgo.Role, reversed bool) {
var f func(i, j int) bool
if reversed {
f = func(i, j int) bool {
return r[i].Position > r[j].Position
}
} else {
f = func(i, j int) bool {
return r[i].Position < r[j].Position
}
}
sort.Slice(r, f)
}
// GetSortedMemberRoles tries to fetch the roles of a given
// member on a given guild and returns the role array in
// sorted ascending order by position.
// If any error occurs, the error is returned as well.
// If reversed, the order is descending.
func GetSortedMemberRoles(s *discordgo.Session, guildID, memberID string, reversed bool, includeEveryone bool) ([]*discordgo.Role, error) {
member, err := s.GuildMember(guildID, memberID)
if err != nil {
return nil, err
}
roles, err := s.GuildRoles(guildID)
if err != nil {
return nil, err
}
rolesMap := make(map[string]*discordgo.Role)
for _, r := range roles {
rolesMap[r.ID] = r
}
membRoles := make([]*discordgo.Role, len(member.Roles)+1)
applied := 0
for _, rID := range member.Roles {
if r, ok := rolesMap[rID]; ok {
membRoles[applied] = r
applied++
}
}
if includeEveryone {
membRoles[applied] = rolesMap[guildID]
applied++
}
membRoles = membRoles[:applied]
SortRoles(membRoles, reversed)
return membRoles, nil
}
// GetSortedGuildRoles tries to fetch the roles of a given
// guild and returns the role array in sorted ascending
// order by position.
// If any error occurs, the error is returned as well.
// If reversed, the order is descending.
func GetSortedGuildRoles(s *discordgo.Session, guildID string, reversed bool) ([]*discordgo.Role, error) {
roles, err := s.GuildRoles(guildID)
if err != nil {
return nil, err
}
SortRoles(roles, reversed)
return roles, nil
}
// PositionDiff : m1 position - m2 position
// PositionDiff returns the difference number between
// the top most role of member m1 and member m2 on
// the specified guild g by subtracting
// m1MaxPos - m2MaxPos.
func PositionDiff(m1 *discordgo.Member, m2 *discordgo.Member, g *discordgo.Guild) int {
m1MaxPos, m2MaxPos := -1, -1
rolePositions := make(map[string]int)
for _, rG := range g.Roles {
rolePositions[rG.ID] = rG.Position
}
for _, r := range m1.Roles {
p := rolePositions[r]
if p > m1MaxPos || m1MaxPos == -1 {
m1MaxPos = p
}
}
for _, r := range m2.Roles {
p := rolePositions[r]
if p > m2MaxPos || m2MaxPos == -1 {
m2MaxPos = p
}
}
return m1MaxPos - m2MaxPos
} | pkg/roleutil/roleutil.go | 0.725065 | 0.409929 | roleutil.go | starcoder |
package one4
// num can represent the mantissa as well as the sign bit. Note that there need
// only be as many didits as `prec` (the precision) however. Due to prec, to
// get a full range of reperesentaiton of digits, we need pow, so that
// `base`^`pow` gives the appropriate number.
type Float struct {
num Int
prec uint
pow int
}
// Return Float a+b. The `prec` of result will be max(`a.prec`, `b.prec`)
func SumFloat(a Float, b Float) Float {
return Float{}
}
// Return Float a*b. The `prec` of result will be max(`a.prec`, `b.prec`)
// This means that the result may not fit in the result
func MultFloat(a Float, b Float) Float {
return Float{}
}
// Return Float a*b. The `prec` of the result will be the `prec` arg. If
// the arguement `prec` is 0, the precision will large enough to fit the
// representation of Float a*b without any rounding error
func MultFloatPrec(a Float, b Float, prec uint) Float {
return Float{}
}
// TODO Consolodate MultFloat & MultFloatPrec into 1 function?
// Return Sqrt(a). The `prec` of the result will be the `prec` arg.
func SqRootFloat(a Float, prec uint) Float {
return Float{}
}
// Return Float `n/d`. The `prec` of the result will be
// max(`n.prec`, `d.prec`).
func DivFloat(n Float, d Float) Float {
return Float{}
}
// Return Float `n/d`. The `prec` of the resilt will be the `prec` arg. If the
// `prec` arg is 0, the `prec` of the result will be large enough to fit the
// representation of Float `n/d` without any round error, that is, unless
// there is some infinitly repeating sequence after some point, which will
// then be truncated and rounded at an as of yet undetermined point.
func DivFloatPrec(n Float, d Float, prec uint) Float {
return Float{}
}
// TOOD Consolodate DivFloat and DivFloatPrec into 1 function?
// Return as Float the quotient of n/d
func QuoFloat(n Float, d Float) Float {
return Float{}
}
// Return as Float the remainder of n/d
func RemFloat(n Float, d Float) Float{
return Float{}
} | Float.go | 0.792344 | 0.685288 | Float.go | starcoder |
package masapi
/***************
Business
Display and Logic
Layer for Interest
Rate Module
****************/
//Initialised a Financial for controller
var customisedFinancialPeriod = InitiatizeFinancialPeriod("","")
//This attribute is stop situation where financial period yields 0 result
var customisedFinancialPeriodSize = 0
/*
Function creates the financial period for user
*/
func CreateFinancialPeriod(fromDateStr string,toDateStr string) string{
//Convert and Check validity of from and to dates
fromDateFormatted,fromDateValidity := ConvertStrToDate(fromDateStr)
toDateFormatted,toDateValidity := ConvertStrToDate(toDateStr)
if fromDateValidity == false && toDateValidity == false{
return GetRepliesText(1)
}else if fromDateValidity == false{
return GetRepliesText(2)
}else if toDateValidity == false{
return GetRepliesText(3)
}
//Check if the End Date is before Start Date
isAfter := TestFromAndToDateValidity(fromDateStr,toDateStr);
//To Date is after From Date yield false -> Detect
if isAfter == false{
return GetRepliesText(4)
}
//After validate, we shall create the financial period
customisedFinancialPeriod = InitiatizeFinancialPeriod(fromDateFormatted,toDateFormatted)
customisedFinancialPeriodSize = len(customisedFinancialPeriod.interestRateArr)
return GetRepliesText(5)
}
/*
Function to list all interest rate comparison by months
*/
func VisualizeIRComparisonByMonth() string{
return customisedFinancialPeriod.VisualiseData();
}
/*
Function to sieve out display months that fc beats banks for interest rates
*/
func VisualizeMonthsThatFCsWin() string{
listOfFCWinningMonth := customisedFinancialPeriod.MonthsWithFCHigherThanBanksIR()
var str = "Date | Banks Interest Rate (Normalised) | FCs Interest Rate (Normalised) | Overall Rate (Normalised)|"
if len(listOfFCWinningMonth) == 0 {
return str +"\n\n"+ GetRepliesText(6)
}
for i:=0;i<len(listOfFCWinningMonth);i++{
var eachIRObj = listOfFCWinningMonth[i]
str += "\n" + eachIRObj.GetDisplay()
}
return str
}
/*
Function to compare the average interest rates
*/
func ShowOverallBanksVersusFCsAvg() string{
bankAvg,fcAvg := customisedFinancialPeriod.RetrieveAvgOfBankAndFCRatesForPeriod()
//This shows the data retrieved is invalid and empty
if bankAvg == -9999.99 && fcAvg == -9999.99{
return GetRepliesText(7)
}
return "The overall interest rate average (Financial Period) for Banks Versus Financial Companies is as follow,\nBanks: "+FloatToStr(bankAvg)+" percent\nFinancial Companies: "+FloatToStr(fcAvg)+" percent\n"
}
/*
Function to display trend for stated financial period.
*/
func ShowTrend() string{
str := customisedFinancialPeriod.RetrieveIRTrendForPeriod()
if str == "UpTrend"{
return GetRepliesText(9)
}else if str == "DownTrend"{
return GetRepliesText(10)
}else if str == "Steady"{
return GetRepliesText(11)
}
return str
} | internal/masapi/finance_controller.go | 0.598312 | 0.447823 | finance_controller.go | starcoder |
package main
var schemas = `
{
"API": {
"createAsset": {
"description": "Create an asset. One argument, a JSON encoded event. The 'assetID' property is required with zero or more writable properties. Establishes an initial asset state.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "The set of writable properties that define an asset's state. For asset creation, the only mandatory property is the 'assetID'. Updates should include at least one other writable property. This exemplifies the IoT contract pattern 'partial state as event'.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "Device timestamp.",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "createAsset function",
"enum": [
"createAsset"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"deleteAllAssets": {
"description": "Delete the state of all assets. No arguments are accepted. For each managed asset, the state and history are erased, and the asset is removed if necessary from recent states.",
"properties": {
"args": {
"description": "accepts no arguments",
"items": {},
"maxItems": 0,
"minItems": 0,
"type": "array"
},
"function": {
"description": "deleteAllAssets function",
"enum": [
"deleteAllAssets"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"deleteAsset": {
"description": "Delete an asset, its history, and any recent state activity. Argument is a JSON encoded string containing only an 'assetID'.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "An object containing only an 'assetID' for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "deleteAsset function",
"enum": [
"deleteAsset"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"deletePropertiesFromAsset": {
"description": "Delete one or more properties from an asset's state. Argument is a JSON encoded string containing an 'assetID' and an array of qualified property names. For example, in an event object containing common and custom properties objects, the argument might look like {'assetID':'A1',['common.location', 'custom.carrier', 'custom.temperature']} and the result of that invoke would be the removal of the location, carrier and temperature properties. The missing temperature would clear a 'OVERTEMP' alert when the rules engine runs.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "Requested 'assetID' with a list of qualified property names.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"qualPropsToDelete": {
"items": {
"description": "The qualified name of a property. E.g. 'event.common.carrier', 'event.custom.temperature', etc.",
"type": "string"
},
"type": "array"
}
},
"required": [
"assetID",
"qualPropsToDelete"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "deletePropertiesFromAsset function",
"enum": [
"deletePropertiesFromAsset"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"init": {
"description": "Initializes the contract when started, either by deployment or by peer restart.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "event sent to init on deployment",
"properties": {
"nickname": {
"default": "TRADELANE",
"description": "The nickname of the current contract",
"type": "string"
},
"version": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"required": [
"version"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "init function",
"enum": [
"init"
],
"type": "string"
},
"method": "deploy"
},
"type": "object"
},
"readAllAssets": {
"description": "Returns the state of all assets as an array of JSON encoded strings. Accepts no arguments. For each managed asset, the state is read from the ledger and added to the returned array. Array is sorted by 'assetID'.",
"properties": {
"args": {
"description": "accepts no arguments",
"items": {},
"maxItems": 0,
"minItems": 0,
"type": "array"
},
"function": {
"description": "readAllAssets function",
"enum": [
"readAllAssets"
],
"type": "string"
},
"method": "query",
"result": {
"description": "an array of states, often for different assets",
"items": {
"description": "A set of properties that constitute a complete asset state. Includes event properties and any other calculated properties such as compliance related alerts.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"compliant": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "Device timestamp.",
"type": "string"
},
"txntimestamp": {
"description": "Transaction timestamp matching that in the blockchain.",
"type": "string"
},
"txnuuid": {
"description": "Transaction UUID matching that in the blockchain.",
"type": "string"
}
},
"type": "object"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"readAsset": {
"description": "Returns the state an asset. Argument is a JSON encoded string. The arg is an 'assetID' property.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "An object containing only an 'assetID' for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "readAsset function",
"enum": [
"readAsset"
],
"type": "string"
},
"method": "query",
"result": {
"description": "A set of properties that constitute a complete asset state. Includes event properties and any other calculated properties such as compliance related alerts.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"compliant": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "Device timestamp.",
"type": "string"
},
"txntimestamp": {
"description": "Transaction timestamp matching that in the blockchain.",
"type": "string"
},
"txnuuid": {
"description": "Transaction UUID matching that in the blockchain.",
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"readAssetHistory": {
"description": "Requests a specified number of history states for an assets. Returns an array of states sorted with the most recent first. The 'assetID' property is required and the count property is optional. A missing count, a count of zero, or too large a count returns all existing history states.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "Requested 'assetID' with item 'count'.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"count": {
"type": "integer"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "readAssetHistory function",
"enum": [
"readAssetHistory"
],
"type": "string"
},
"method": "query",
"result": {
"description": "an array of states for one asset sorted by timestamp with the most recent entry first",
"items": {
"description": "A set of properties that constitute a complete asset state. Includes event properties and any other calculated properties such as compliance related alerts.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"compliant": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "Device timestamp.",
"type": "string"
},
"txntimestamp": {
"description": "Transaction timestamp matching that in the blockchain.",
"type": "string"
},
"txnuuid": {
"description": "Transaction UUID matching that in the blockchain.",
"type": "string"
}
},
"type": "object"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"readRecentStates": {
"description": "Returns the state of recently updated assets as an array of objects sorted with the most recently updated asset first. Each asset appears exactly once up to a maxmum of 20 in this version of the contract.",
"properties": {
"args": {
"description": "accepts no arguments",
"items": {},
"maxItems": 0,
"minItems": 0,
"type": "array"
},
"function": {
"description": "readRecentStates function",
"enum": [
"readRecentStates"
],
"type": "string"
},
"method": "query",
"result": {
"description": "an array of states for one asset sorted by timestamp with the most recent entry first",
"items": {
"description": "A set of properties that constitute a complete asset state. Includes event properties and any other calculated properties such as compliance related alerts.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"compliant": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "Device timestamp.",
"type": "string"
},
"txntimestamp": {
"description": "Transaction timestamp matching that in the blockchain.",
"type": "string"
},
"txnuuid": {
"description": "Transaction UUID matching that in the blockchain.",
"type": "string"
}
},
"type": "object"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"setCreateOnUpdate": {
"description": "Allow updateAsset to redirect to createAsset when 'assetID' does not exist.",
"properties": {
"args": {
"description": "True for redirect allowed, false for error on asset does not exist.",
"items": {
"setCreateOnUpdate": {
"type": "boolean"
}
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "setCreateOnUpdate function",
"enum": [
"setCreateOnUpdate"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"setLoggingLevel": {
"description": "Sets the logging level in the contract.",
"properties": {
"args": {
"description": "logging levels indicate what you see",
"items": {
"logLevel": {
"enum": [
"CRITICAL",
"ERROR",
"WARNING",
"NOTICE",
"INFO",
"DEBUG"
],
"type": "string"
}
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "setLoggingLevel function",
"enum": [
"setLoggingLevel"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"updateAsset": {
"description": "Update the state of an asset. The one argument is a JSON encoded event. The 'assetID' property is required along with one or more writable properties. Establishes the next asset state. ",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "The set of writable properties that define an asset's state. For asset creation, the only mandatory property is the 'assetID'. Updates should include at least one other writable property. This exemplifies the IoT contract pattern 'partial state as event'.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "Device timestamp.",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "updateAsset function",
"enum": [
"updateAsset"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
}
},
"objectModelSchemas": {
"assetIDKey": {
"description": "An object containing only an 'assetID' for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"assetIDandCount": {
"description": "Requested 'assetID' with item 'count'.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"count": {
"type": "integer"
}
},
"required": [
"assetID"
],
"type": "object"
},
"event": {
"description": "The set of writable properties that define an asset's state. For asset creation, the only mandatory property is the 'assetID'. Updates should include at least one other writable property. This exemplifies the IoT contract pattern 'partial state as event'.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "Device timestamp.",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"initEvent": {
"description": "event sent to init on deployment",
"properties": {
"nickname": {
"default": "TRADELANE",
"description": "The nickname of the current contract",
"type": "string"
},
"version": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"required": [
"version"
],
"type": "object"
},
"state": {
"description": "A set of properties that constitute a complete asset state. Includes event properties and any other calculated properties such as compliance related alerts.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"compliant": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "Device timestamp.",
"type": "string"
},
"txntimestamp": {
"description": "Transaction timestamp matching that in the blockchain.",
"type": "string"
},
"txnuuid": {
"description": "Transaction UUID matching that in the blockchain.",
"type": "string"
}
},
"type": "object"
}
}
}` | contracts/advanced/iot_sample_contract/schemas.go | 0.868199 | 0.522568 | schemas.go | starcoder |
package interpreter
import (
"github.com/influxdata/flux/values"
)
type Scope interface {
// Lookup a name in the current scope
Lookup(name string) (values.Value, bool)
// Bind a variable in the current scope
Set(name string, v values.Value)
// Create a new scope by nesting the current scope
// If the passed in object is not nil, its values will be added to the new nested scope.
Nest(values.Object) Scope
// Return the parent of the current scope
Pop() Scope
// Number of visible names in scope
Size() int
// Range over all variable bindings in scope applying f
Range(f func(k string, v values.Value))
// Range over all variable bindings only in the current scope
LocalRange(f func(k string, v values.Value))
// Set the return value of the scope
SetReturn(values.Value)
// Retrieve the return values of the scope
Return() values.Value
// Create a copy of the scope
Copy() Scope
}
type scope struct {
parent Scope
values values.Object
returnValue values.Value
}
func NewScope() Scope {
return &scope{
values: values.NewObject(),
}
}
func NewNestedScope(s Scope, obj values.Object) Scope {
if obj == nil {
obj = values.NewObject()
}
return &scope{
parent: s,
values: obj,
}
}
func (s *scope) Lookup(name string) (values.Value, bool) {
v, ok := s.values.Get(name)
if !ok && s.parent != nil {
return s.parent.Lookup(name)
}
return v, ok
}
func (s *scope) Set(name string, v values.Value) {
s.values.Set(name, v)
}
func (s *scope) Nest(obj values.Object) Scope {
return NewNestedScope(s, obj)
}
func (s *scope) Pop() Scope {
return s.parent
}
func (s *scope) Size() int {
if s.parent == nil {
return s.values.Len()
}
return s.values.Len() + s.parent.Size()
}
func (s *scope) Range(f func(k string, v values.Value)) {
s.values.Range(f)
if s.parent != nil {
s.parent.Range(f)
}
}
func (s *scope) LocalRange(f func(k string, v values.Value)) {
s.values.Range(f)
}
func (s *scope) SetReturn(v values.Value) {
s.returnValue = v
}
func (s *scope) Return() values.Value {
return s.returnValue
}
func (s *scope) Copy() Scope {
obj := values.NewObjectWithBacking(s.values.Len())
s.values.Range(func(k string, v values.Value) {
obj.Set(k, v)
})
var parent Scope
if s.parent != nil {
parent = s.parent.Copy()
}
return &scope{
values: obj,
parent: parent,
}
} | interpreter/scope.go | 0.669313 | 0.504455 | scope.go | starcoder |
package main
import (
"math"
"github.com/ewancook/reactor/thermo"
)
func dFCH4dW(T, denominator float64, partials map[string]float64) float64 {
return -reaction1(T, denominator, partials) - reaction3(T, denominator, partials)
}
func dFH2OdW(T, denominator float64, partials map[string]float64) float64 {
return -reaction1(T, denominator, partials) -
reaction2(T, denominator, partials) -
2*reaction3(T, denominator, partials) - 2*reaction4(T, partials)
}
func dFH2dW(T, denominator float64, partials map[string]float64) float64 {
return 3*reaction1(T, denominator, partials) +
reaction2(T, denominator, partials) +
4*reaction3(T, denominator, partials) + 5*reaction4(T, partials)
}
func dFCOdW(T, denominator float64, partials map[string]float64) float64 {
return reaction1(T, denominator, partials) -
reaction2(T, denominator, partials) + 2*reaction4(T, partials)
}
func dFCO2dW(T, denominator float64, partials map[string]float64) float64 {
return reaction2(T, denominator, partials) +
reaction3(T, denominator, partials)
}
func dFC2H6dW(T float64, partials map[string]float64) float64 {
return -reaction4(T, partials)
}
func dTdW(U, D, ρb, Tα, T, reaction_denominator float64, flows, partials map[string]float64) float64 {
var denominator float64
for compound, flow := range flows {
denominator += thermo.SpecificHeat(compound, T) * flow
}
heats := reaction1(T, reaction_denominator, partials)*reaction1Enthalpy(T) +
reaction2(T, reaction_denominator, partials)*reaction2Enthalpy(T) +
reaction3(T, reaction_denominator, partials)*reaction3Enthalpy(T) + reaction4(T, partials)*reaction4Enthalpy(T)
return (U*(4/D)/ρb*(Tα-T) - heats*1000) / denominator
}
func dPdW(alpha, P, P0, T, T0, F, F0 float64) float64 {
return -alpha / 2 * P0 / (P / P0) * (T / T0) * (F / F0)
}
func α(beta, area, ρc, ϕ, P0 float64) float64 {
return 2 * beta / (area * ρc * (1 - ϕ) * P0 * 1000)
}
func β(ϕ, G, Dp, μ, ρg float64) float64 {
return (G * (1 - ϕ) / (ρg * Dp * math.Pow(ϕ, 3))) * (1.75*G + 150*(1-ϕ)*μ/Dp)
}
func dTαdW(U, D, ρb, T, Tα, mc, aveCP float64) float64 {
return U * 4 / D / ρb * (T - Tα) / (mc * aveCP)
} | ode.go | 0.798305 | 0.403009 | ode.go | starcoder |
package utils
import (
"errors"
"regexp"
"strings"
)
const variableNamingRegexPattern = `([a-zA-Z0-9\-\_]+)`
const descriptionNamingRegexPattern = `([a-zA-Z]+[a-zA-Z0-9\-\_ ]*)`
const variableDeclarationRegexPattern = `^< *` + variableNamingRegexPattern + ` *\| *` + descriptionNamingRegexPattern + ` *>$`
const variableReferenceRegexPattern = `\$<` + variableNamingRegexPattern + `>`
// IsValidVariableDeclaration receives a string and returns a boolean value indicating
// whether or not the string is a valid variable declaration expression
func IsValidVariableDeclaration(expression string) bool {
matched, _ := regexp.Match(variableDeclarationRegexPattern, []byte(expression))
return matched
}
// DoesExpressionContainsVariableReference receives a string and returns a boolean value indicating
// whether or not the string contains a variable reference
func DoesExpressionContainsVariableReference(expression string) bool {
matched, _ := regexp.Match(variableReferenceRegexPattern, []byte(expression))
return matched
}
// ExtractVariableNameFromVariableDeclaration receives a string and returns the string representing
// the variable name in the declaration expression. It returns an error if the expression is not valid
func ExtractVariableNameFromVariableDeclaration(expression string) (string, error) {
if !IsValidVariableDeclaration(expression) {
return "", errors.New("Invalid variable declaration:" + expression)
}
rx := regexp.MustCompile(variableDeclarationRegexPattern)
return rx.FindStringSubmatch(expression)[1], nil
}
// EvaluateVariablesInExpression receives an expression and a replacementMap and returns the expression with all
// its variables replaced. It returns an error if the expression does not contain a variable reference or if a variable
// reference is not in the replacement map
func EvaluateVariablesInExpression(expression string, replacementMap map[string]interface{}) (string, error) {
if !DoesExpressionContainsVariableReference(expression) {
return expression, nil
}
rx := regexp.MustCompile(variableReferenceRegexPattern)
matches := rx.FindAllStringSubmatch(expression, -1)
for _, match := range matches {
if _, ok := replacementMap[match[1]]; !ok {
return "", errors.New("Variable: " + match[0] + " could not be evaluated")
}
expression = strings.ReplaceAll(expression, match[0], replacementMap[match[1]].(string))
}
return expression, nil
} | internal/utils/parsers.go | 0.806014 | 0.430447 | parsers.go | starcoder |
package schedule
import (
"fmt"
"github.com/marcsantiago/gocron"
"strings"
"time"
)
// Definition holds the data defining a schedule definition
type Definition struct {
// Internal value (every 1 minute would be expressed with an interval of 1). Must be set explicitly or implicitly (a weekday value implicitly sets the interval to 1)
Interval uint64
// Must be set explicitly or implicitly ("weeks" is implicitly set when "Weekday" is set). Valid time units are: "weeks", "hours", "days", "minutes", "seconds"
Unit IntervalUnit
// Optional day of the week. If set, unit and interval are ignored and implicitly considered to be "every 1 week"
Weekday string
// Optional "at time" value (i.e. "10:30")
AtTime string
}
// DayOfWeek is the type definition for a string value of days of the week (based on time.Day.String())
type DayOfWeek string
// IntervalUnit is the type definition for a string value representing an interval unit
type IntervalUnit string
// IntervalUnit values
const (
Weeks = IntervalUnit("weeks")
Hours = IntervalUnit("hours")
Days = IntervalUnit("days")
Minutes = IntervalUnit("minutes")
Seconds = IntervalUnit("seconds")
)
var weekdayToNumeral = map[string]time.Weekday{
time.Monday.String(): time.Monday,
time.Tuesday.String(): time.Tuesday,
time.Wednesday.String(): time.Wednesday,
time.Thursday.String(): time.Thursday,
time.Friday.String(): time.Friday,
time.Saturday.String(): time.Saturday,
time.Sunday.String(): time.Sunday,
}
// Returns a human-friendly string for the schedule definition
func (d Definition) String() string {
var b strings.Builder
fmt.Fprintf(&b, "Every ")
if d.Weekday != "" {
fmt.Fprintf(&b, "%s", d.Weekday)
} else if d.Interval == 1 {
fmt.Fprintf(&b, "%s", strings.TrimSuffix(string(d.Unit), "s"))
} else {
fmt.Fprintf(&b, "%d %s", d.Interval, d.Unit)
}
if d.AtTime != "" {
fmt.Fprintf(&b, " at %s", d.AtTime)
}
return b.String()
}
// ScheduleDefinitionBuilder holds a schedule Definition to build
type ScheduleDefinitionBuilder struct {
definition Definition
}
// New creates a new ScheduleDefinitionBuilder to set up a schedule Definition
func New() (sdb *ScheduleDefinitionBuilder) {
sdb = new(ScheduleDefinitionBuilder)
sdb.definition = Definition{Interval: 1}
return sdb
}
// WithInterval sets the schedule interval and unit (every week would be interval 1 and unit Weeks)
func (sdb *ScheduleDefinitionBuilder) WithInterval(interval uint64, unit IntervalUnit) *ScheduleDefinitionBuilder {
sdb.definition.Interval = interval
sdb.definition.Unit = unit
return sdb
}
// WithUnit sets the schedule interval unit. Can't be set along with weekday (via Every)
func (sdb *ScheduleDefinitionBuilder) WithUnit(unit IntervalUnit) *ScheduleDefinitionBuilder {
sdb.definition.Unit = unit
return sdb
}
// Every sets the day of the week to run on. Use time.<Day>.String() values. Can't be set along with WithUnit
func (sdb *ScheduleDefinitionBuilder) Every(weekday string) *ScheduleDefinitionBuilder {
sdb.definition.Weekday = weekday
return sdb
}
// AtTime sets the time of the day to run. Only makes sense for schedules with an interval larger than 1 day
func (sdb *ScheduleDefinitionBuilder) AtTime(atTime string) *ScheduleDefinitionBuilder {
sdb.definition.AtTime = atTime
return sdb
}
// Build returns the schedule Definition
func (sdb *ScheduleDefinitionBuilder) Build() Definition {
return sdb.definition
}
// Option defines an option for a Slackscot
type scheduleOption func(j *gocron.Job)
// optionWeekday sets the weekday of a recurring job
func optionWeekday(weekday string) scheduleOption {
return func(j *gocron.Job) {
switch weekday {
case time.Monday.String():
j = j.Monday()
case time.Tuesday.String():
j = j.Tuesday()
case time.Wednesday.String():
j = j.Wednesday()
case time.Thursday.String():
j = j.Thursday()
case time.Friday.String():
j = j.Friday()
case time.Saturday.String():
j = j.Saturday()
case time.Sunday.String():
j = j.Sunday()
}
}
}
// optionUnit sets the unit of a recurring job
func optionUnit(unit IntervalUnit) scheduleOption {
return func(j *gocron.Job) {
switch unit {
case Weeks:
j = j.Weeks()
case Hours:
j = j.Hours()
case Days:
j = j.Days()
case Minutes:
j = j.Minutes()
case Seconds:
j = j.Seconds()
}
}
}
// optionAtTime sets the AtTime of a recurring job
func optionAtTime(atTime string) scheduleOption {
return func(j *gocron.Job) {
j = j.At(atTime)
}
}
// NewJob sets up the gocron.Job with the schedule and leaves the task undefined for the caller to set up
func NewJob(s *gocron.Scheduler, def Definition) (j *gocron.Job, err error) {
j = s.Every(def.Interval, false)
scheduleOptions := make([]scheduleOption, 0)
if def.Weekday != "" {
scheduleOptions = append(scheduleOptions, optionWeekday(def.Weekday))
} else if def.Unit != "" {
scheduleOptions = append(scheduleOptions, optionUnit(def.Unit))
}
if def.AtTime != "" {
if def.Unit == Minutes || def.Unit == Hours || def.Unit == Seconds {
return nil, fmt.Errorf("Can't run job on schedule [%s] with AtTime in conjunction with a sub-day IntervalUnit", def)
}
scheduleOptions = append(scheduleOptions, optionAtTime(def.AtTime))
}
for _, option := range scheduleOptions {
option(j)
}
if j.Err() != nil {
return nil, j.Err()
}
return j, nil
} | schedule/schedule.go | 0.788176 | 0.477676 | schedule.go | starcoder |
package document
import (
"bytes"
"encoding/json"
"errors"
"io"
"reflect"
)
// ErrValueNotFound must be returned by Array implementations, when calling the GetByIndex method and
// the index wasn't found in the array.
var ErrValueNotFound = errors.New("value not found")
// An Array contains a set of values.
type Array interface {
// Iterate goes through all the values of the array and calls the given function by passing each one of them.
// If the given function returns an error, the iteration stops.
Iterate(fn func(i int, value Value) error) error
// GetByIndex returns a value by index of the array.
GetByIndex(i int) (Value, error)
}
// ArrayLength returns the length of an array.
func ArrayLength(a Array) (int, error) {
if vb, ok := a.(ValueBuffer); ok {
return len(vb), nil
}
var len int
err := a.Iterate(func(_ int, _ Value) error {
len++
return nil
})
return len, err
}
// ValueBuffer is an array that holds values in memory.
type ValueBuffer []Value
// NewValueBuffer creates a buffer of values.
func NewValueBuffer(values ...Value) ValueBuffer {
return ValueBuffer(values)
}
// Iterate over all the values of the buffer. It implements the Array interface.
func (vb ValueBuffer) Iterate(fn func(i int, value Value) error) error {
for i, v := range vb {
err := fn(i, v)
if err != nil {
return err
}
}
return nil
}
// GetByIndex returns a value set at the given index. If the index is out of range it returns an error.
func (vb ValueBuffer) GetByIndex(i int) (Value, error) {
if i >= len(vb) {
return Value{}, ErrValueNotFound
}
return vb[i], nil
}
// Append a value to the buffer and return a new buffer.
func (vb ValueBuffer) Append(v Value) ValueBuffer {
return append(vb, v)
}
// ScanArray copies all the values of a to the buffer.
func (vb *ValueBuffer) ScanArray(a Array) error {
return a.Iterate(func(i int, v Value) error {
*vb = append(*vb, v)
return nil
})
}
// Copy deep copies all the values from the given array.
// If a value is a document or an array, it will be stored as a FieldBuffer or ValueBuffer respectively.
func (vb *ValueBuffer) Copy(a Array) error {
err := vb.ScanArray(a)
if err != nil {
return err
}
for _, v := range *vb {
switch v.Type {
case DocumentValue:
var buf FieldBuffer
err = buf.Copy(v.V.(Document))
if err != nil {
return err
}
*vb = vb.Append(NewDocumentValue(&buf))
case ArrayValue:
var buf ValueBuffer
err = buf.Copy(v.V.(Array))
if err != nil {
return err
}
*vb = vb.Append(NewArrayValue(&buf))
}
}
return nil
}
// Replace the value of the index by v.
func (vb *ValueBuffer) Replace(index int, v Value) error {
if len(*vb) <= index {
return ErrFieldNotFound
}
(*vb)[index] = v
return nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (vb *ValueBuffer) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
if err == io.EOF {
return err
}
return parseJSONArray(dec, t, vb)
}
type sliceArray struct {
ref reflect.Value
}
var _ Array = (*sliceArray)(nil)
func (s sliceArray) Iterate(fn func(i int, v Value) error) error {
l := s.ref.Len()
for i := 0; i < l; i++ {
f := s.ref.Index(i)
v, err := reflectValueToValue(f)
if err == errUnsupportedType {
continue
}
if err != nil {
return err
}
err = fn(i, v)
if err != nil {
return err
}
}
return nil
}
func (s sliceArray) GetByIndex(i int) (Value, error) {
if i >= s.ref.Len() {
return Value{}, ErrFieldNotFound
}
v := s.ref.Index(i)
if !v.IsValid() {
return Value{}, ErrFieldNotFound
}
return reflectValueToValue(v)
} | document/array.go | 0.694924 | 0.422624 | array.go | starcoder |
package object
import (
"fmt"
"hash/fnv"
)
// A structure that represents a Null object
type Null struct{}
// A method of Null that returns the Null value type
func (n *Null) Type() ObjectType { return NULL_OBJ }
// A method of Null that returns the string value of the Null
func (n *Null) Inspect() string { return "null" }
// A structure that represents an Integer object
type Integer struct {
// Represents the value of the Integer
Value int64
}
// A method of Integer that returns the Integer value type
func (i *Integer) Type() ObjectType { return INTEGER_OBJ }
// A method of Integer that returns the string value of the Integer
func (i *Integer) Inspect() string { return fmt.Sprintf("%d", i.Value) }
// A method of Integer that return the HashKey of the object
func (i *Integer) HashKey() HashKey {
// Create and return the HashKey object from the integer value
return HashKey{Type: i.Type(), Value: uint64(i.Value)}
}
// A structure that represents a Boolean object
type Boolean struct {
// Represents the value of the Boolean
Value bool
}
// A method of Boolean that returns the Boolean value type
func (b *Boolean) Type() ObjectType { return BOOLEAN_OBJ }
// A method of Boolean that returns the string value of the Boolean
func (b *Boolean) Inspect() string { return fmt.Sprintf("%t", b.Value) }
// A method of Boolean that returns the HashKey of the object
func (b *Boolean) HashKey() HashKey {
// Declare an unsigned int64
var value uint64
if b.Value {
// If the Boolean is true, set the value to 1
value = 1
} else {
// Else set the value to 0
value = 0
}
// Create and return the HashKey object
return HashKey{Type: b.Type(), Value: value}
}
// A structure that represents a String object
type String struct {
// Represents the value of the String
Value string
}
// A method of String that returns the String value type
func (s *String) Type() ObjectType { return STRING_OBJ }
// A method of String that returns the string value of the String
func (s *String) Inspect() string { return s.Value }
// A method of String that returns the HashKey of the object
func (s *String) HashKey() HashKey {
// Create new 64bit FNV hasher
h := fnv.New64a()
// Write the string value to the hasher
h.Write([]byte(s.Value))
// Return the HashKey object
return HashKey{Type: s.Type(), Value: h.Sum64()}
} | object/datatypes.go | 0.81648 | 0.436442 | datatypes.go | starcoder |
package trieutil
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/mathutil"
)
// SparseMerkleTrie implements a sparse, general purpose Merkle trie to be used
// across ETH2.0 Phase 0 functionality.
type SparseMerkleTrie struct {
depth uint
branches [][][]byte
originalItems [][]byte // list of provided items before hashing them into leaves.
}
// NewTrie returns a new merkle trie filled with zerohashes to use.
func NewTrie(depth uint64) (*SparseMerkleTrie, error) {
var zeroBytes [32]byte
items := [][]byte{zeroBytes[:]}
return GenerateTrieFromItems(items, depth)
}
// CreateTrieFromProto creates a Sparse Merkle Trie from its corresponding merkle trie.
func CreateTrieFromProto(trieObj *protodb.SparseMerkleTrie) *SparseMerkleTrie {
trie := &SparseMerkleTrie{
depth: uint(trieObj.Depth),
originalItems: trieObj.OriginalItems,
}
branches := make([][][]byte, len(trieObj.Layers))
for i, layer := range trieObj.Layers {
branches[i] = layer.Layer
}
trie.branches = branches
return trie
}
// GenerateTrieFromItems constructs a Merkle trie from a sequence of byte slices.
func GenerateTrieFromItems(items [][]byte, depth uint64) (*SparseMerkleTrie, error) {
if len(items) == 0 {
return nil, errors.New("no items provided to generate Merkle trie")
}
leaves := items
layers := make([][][]byte, depth+1)
transformedLeaves := make([][]byte, len(leaves))
for i := range leaves {
arr := bytesutil.ToBytes32(leaves[i])
transformedLeaves[i] = arr[:]
}
layers[0] = transformedLeaves
for i := uint64(0); i < depth; i++ {
if len(layers[i])%2 == 1 {
layers[i] = append(layers[i], ZeroHashes[i][:])
}
updatedValues := make([][]byte, 0)
for j := 0; j < len(layers[i]); j += 2 {
concat := hashutil.Hash(append(layers[i][j], layers[i][j+1]...))
updatedValues = append(updatedValues, concat[:])
}
layers[i+1] = updatedValues
}
return &SparseMerkleTrie{
branches: layers,
originalItems: items,
depth: uint(depth),
}, nil
}
// Items returns the original items passed in when creating the Merkle trie.
func (m *SparseMerkleTrie) Items() [][]byte {
return m.originalItems
}
// Root returns the top-most, Merkle root of the trie.
func (m *SparseMerkleTrie) Root() [32]byte {
enc := [32]byte{}
binary.LittleEndian.PutUint64(enc[:], uint64(len(m.originalItems)))
return hashutil.Hash(append(m.branches[len(m.branches)-1][0], enc[:]...))
}
// Insert an item into the trie.
func (m *SparseMerkleTrie) Insert(item []byte, index int) {
for index >= len(m.branches[0]) {
m.branches[0] = append(m.branches[0], ZeroHashes[0][:])
}
someItem := bytesutil.ToBytes32(item)
m.branches[0][index] = someItem[:]
if index >= len(m.originalItems) {
m.originalItems = append(m.originalItems, someItem[:])
} else {
m.originalItems[index] = someItem[:]
}
currentIndex := index
root := bytesutil.ToBytes32(item)
for i := 0; i < int(m.depth); i++ {
isLeft := currentIndex%2 == 0
neighborIdx := currentIndex ^ 1
var neighbor []byte
if neighborIdx >= len(m.branches[i]) {
neighbor = ZeroHashes[i][:]
} else {
neighbor = m.branches[i][neighborIdx]
}
if isLeft {
parentHash := hashutil.Hash(append(root[:], neighbor...))
root = parentHash
} else {
parentHash := hashutil.Hash(append(neighbor, root[:]...))
root = parentHash
}
parentIdx := currentIndex / 2
if len(m.branches[i+1]) == 0 || parentIdx >= len(m.branches[i+1]) {
newItem := root
m.branches[i+1] = append(m.branches[i+1], newItem[:])
} else {
newItem := root
m.branches[i+1][parentIdx] = newItem[:]
}
currentIndex = parentIdx
}
}
// MerkleProof computes a proof from a trie's branches using a Merkle index.
func (m *SparseMerkleTrie) MerkleProof(index int) ([][]byte, error) {
merkleIndex := uint(index)
leaves := m.branches[0]
if index >= len(leaves) {
return nil, fmt.Errorf("merkle index out of range in trie, max range: %d, received: %d", len(leaves), index)
}
proof := make([][]byte, m.depth+1)
for i := uint(0); i < m.depth; i++ {
subIndex := (merkleIndex / (1 << i)) ^ 1
if subIndex < uint(len(m.branches[i])) {
item := bytesutil.ToBytes32(m.branches[i][subIndex])
proof[i] = item[:]
} else {
proof[i] = ZeroHashes[i][:]
}
}
enc := [32]byte{}
binary.LittleEndian.PutUint64(enc[:], uint64(len(m.originalItems)))
proof[len(proof)-1] = enc[:]
return proof, nil
}
// HashTreeRoot of the Merkle trie as defined in the deposit contract.
// Spec Definition:
// sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))
func (m *SparseMerkleTrie) HashTreeRoot() [32]byte {
var zeroBytes [32]byte
depositCount := uint64(len(m.originalItems))
if len(m.originalItems) == 1 && bytes.Equal(m.originalItems[0], zeroBytes[:]) {
// Accounting for empty tries
depositCount = 0
}
newNode := append(m.branches[len(m.branches)-1][0], bytesutil.Bytes8(depositCount)...)
newNode = append(newNode, zeroBytes[:24]...)
return hashutil.Hash(newNode)
}
// ToProto converts the underlying trie into its corresponding
// proto object
func (m *SparseMerkleTrie) ToProto() *protodb.SparseMerkleTrie {
trie := &protodb.SparseMerkleTrie{
Depth: uint64(m.depth),
Layers: make([]*protodb.TrieLayer, len(m.branches)),
OriginalItems: m.originalItems,
}
for i, l := range m.branches {
trie.Layers[i] = &protodb.TrieLayer{
Layer: l,
}
}
return trie
}
// VerifyMerkleBranch verifies a Merkle branch against a root of a trie.
func VerifyMerkleBranch(root, item []byte, merkleIndex int, proof [][]byte, depth uint64) bool {
if len(proof) != int(depth)+1 {
return false
}
node := bytesutil.ToBytes32(item)
for i := 0; i <= int(depth); i++ {
if (uint64(merkleIndex) / mathutil.PowerOf2(uint64(i)) % 2) != 0 {
node = hashutil.Hash(append(proof[i], node[:]...))
} else {
node = hashutil.Hash(append(node[:], proof[i]...))
}
}
return bytes.Equal(root, node[:])
}
// Copy performs a deep copy of the trie.
func (m *SparseMerkleTrie) Copy() *SparseMerkleTrie {
dstBranches := make([][][]byte, len(m.branches))
for i1, srcB1 := range m.branches {
dstBranches[i1] = bytesutil.Copy2dBytes(srcB1)
}
return &SparseMerkleTrie{
depth: m.depth,
branches: dstBranches,
originalItems: bytesutil.Copy2dBytes(m.originalItems),
}
} | .docker/Prysm/prysm-spike/shared/trieutil/sparse_merkle.go | 0.695235 | 0.413418 | sparse_merkle.go | starcoder |
package nock
import (
"strconv"
"strings"
)
// A Noun is an atom or a cell. An atom is any natural number. A cell is any
// ordered pair of nouns.
type Noun struct {
atom *int
cell *[2]Noun
}
// IsAtom returns true if n is an atom.
func (n Noun) IsAtom() bool { return n.atom != nil }
// IsCell returns true if n is a cell.
func (n Noun) IsCell() bool { return n.cell != nil }
// Num returns the integer value of n, which must be an atom.
func (n Noun) Num() int { return *n.atom }
// Head returns the head of n, which must be a cell.
func (n Noun) Head() Noun { return n.cell[0] }
// Tail returns the tail of n, which must be a cell.
func (n Noun) Tail() Noun { return n.cell[1] }
// String implements the fmt.Stringer interface.
func (n Noun) String() string {
if n.IsAtom() {
return strconv.Itoa(n.Num())
}
return "[" + n.Head().String() + " " + n.Tail().String() + "]"
}
// Atom returns an atom with value i.
func Atom(i int) Noun { return Noun{atom: &i} }
// Cell returns a cell that pairs head with tail.
func Cell(head, tail Noun) Noun { return Noun{cell: &[2]Noun{head, tail}} }
// Loobean returns the atom 0 if b is true, and the atom 1 if b is false.
func Loobean(b bool) Noun { return Atom(map[bool]int{true: 0, false: 1}[b]) }
func wut(n Noun) Noun { return Loobean(n.IsCell()) }
func lus(n Noun) Noun { return Atom(1 + n.Num()) }
func tis(n Noun) Noun { return Loobean(n.Head().String() == n.Tail().String()) }
func fas(i int, n Noun) Noun {
switch i {
case 1:
return n
case 2:
return n.Head()
case 3:
return n.Tail()
default:
return fas(2+i%2, fas(i/2, n))
}
}
func hax(i int, n Noun) Noun {
// #[1 a b] a
if i == 1 {
return n.Head()
}
a := i / 2
b := n.Head()
c := n.Tail()
if i%2 == 0 {
// #[(a + a) b c] #[a [b /[(a + a + 1) c]] c]
return hax(a, Cell(Cell(b, fas(a+a+1, c)), c))
} else {
// #[(a + a + 1) b c] #[a [/[(a + a) c] b] c]
return hax(a, Cell(Cell(fas(a+a, c), b), c))
}
}
func tar5(sub, form Noun) Noun {
// Distribution rule.
// *[a [b c] d] [*[a b c] *[a d]]
if form.Head().IsCell() {
return Cell(tar5(sub, form.Head()), tar5(sub, form.Tail()))
}
inst, arg := form.Head(), form.Tail()
switch inst.Num() {
case 0:
// Read memory slot.
// *[a 0 b] /[b a]
return fas(arg.Num(), sub)
case 1:
// Quote.
// *[a 1 b] b
return arg
case 2:
// Change subject.
// *[a 2 b c] *[*[a b] *[a c]]
return tar5(tar5(sub, arg.Head()), tar5(sub, arg.Tail()))
case 3:
// Cell test.
// *[a 3 b] ?*[a b]
return wut(tar5(sub, arg))
case 4:
// Increment.
// *[a 4 b] +*[a b]
return lus(tar5(sub, arg))
case 5:
// Equality test.
// *[a 5 b] =*[a b]
return tis(Cell(tar5(sub, arg.Head()), tar5(sub, arg.Tail())))
case 6:
// If/else.
// *[a 6 b c d] *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b]
if tar5(sub, arg.Head()).Num() == 0 {
return tar5(sub, fas(6, arg))
}
return tar5(sub, fas(7, arg))
case 7:
// Compose.
// *[a 7 b c] *[a 2 b 1 c]
return tar5(tar5(sub, arg.Head()), arg.Tail())
case 8:
// Add value to head of subject.
// *[a 8 b c] *[a 7 [[7 [0 1] b] 0 1] c]
return tar5(Cell(tar5(sub, arg.Head()), sub), arg.Tail())
case 9:
// Create a core and run one of its arms.
// *[a 9 b c] *[a 7 c 2 [0 1] 0 b]
d := tar5(sub, arg.Tail())
return tar5(d, fas(arg.Head().Num(), d))
case 10:
// Hints.
// *[a 10 [b c] d] *[a 8 c 7 [0 3] d]
// *[a 10 b c] *[a c]
if b := arg.Head(); b.IsCell() {
_ = tar5(sub, b.Tail())
}
return tar5(sub, arg.Tail())
default:
panic("Invalid instruction " + strconv.Itoa(inst.Num()))
}
}
// Nock5 evaluates the nock function on n using Nock 5.
func Nock5(n Noun) Noun {
return tar5(n.Head(), n.Tail())
}
func tar4(sub, form Noun) Noun {
// Distribution rule.
// *[a [b c] d] [*[a b c] *[a d]]
if form.Head().IsCell() {
return Cell(tar4(sub, form.Head()), tar4(sub, form.Tail()))
}
inst, arg := form.Head(), form.Tail()
switch inst.Num() {
case 0:
// Read memory slot.
// *[a 0 b] /[b a]
return fas(arg.Num(), sub)
case 1:
// Quote.
// *[a 1 b] b
return arg
case 2:
// Change subject.
// *[a 2 b c] *[*[a b] *[a c]]
return tar4(tar4(sub, arg.Head()), tar4(sub, arg.Tail()))
case 3:
// Cell test.
// *[a 3 b] ?*[a b]
return wut(tar4(sub, arg))
case 4:
// Increment.
// *[a 4 b] +*[a b]
return lus(tar4(sub, arg))
case 5:
// Equality test.
// *[a 5 b c] =[*[a b] *[a c]]
return tis(Cell(tar4(sub, arg.Head()), tar4(sub, arg.Tail())))
case 6:
// If/else.
// *[a 6 b c d] *[a *[[c d] 0 *[[2 3] 0 *[a 4 4 b]]]]
if tar4(sub, arg.Head()).Num() == 0 {
return tar4(sub, fas(6, arg))
}
return tar4(sub, fas(7, arg))
case 7:
// Compose.
// *[a 7 b c] *[*[a b] c]
return tar4(tar4(sub, arg.Head()), arg.Tail())
case 8:
// Add value to head of subject.
// *[a 8 b c] *[[*[a b] a] c]
return tar4(Cell(tar4(sub, arg.Head()), sub), arg.Tail())
case 9:
// Create a core and run one of its arms.
// *[a 9 b c] *[*[a c] 2 [0 1] 0 b]
d := tar4(sub, arg.Tail())
return tar4(d, fas(arg.Head().Num(), d))
case 10:
// Replace memory slot.
// *[a 10 [b c] d] #[b *[a c] *[a d]]
b := arg.Head().Head()
c := arg.Head().Tail()
d := arg.Tail()
return hax(b.Num(), Cell(tar4(sub, c), tar4(sub, d)))
case 11:
// Hints.
// *[a 11 [b c] d] *[[*[a c] *[a d]] 0 3]
// *[a 11 b c] *[a c]
if b := arg.Head(); b.IsCell() {
_ = tar4(sub, b.Tail())
}
return tar4(sub, arg.Tail())
default:
panic("Invalid instruction " + strconv.Itoa(inst.Num()))
}
}
// Nock4 evaluates the nock function on n using Nock 4.
func Nock4(n Noun) Noun {
return tar4(n.Head(), n.Tail())
}
// Nock evaluates the nock function on n using the latest Nock version.
func Nock(n Noun) Noun {
// With Kelvin versioning the smallest number is the latest.
return Nock4(n)
}
// Parse parses a Nock program.
func Parse(s string) Noun {
s = strings.Replace(s, "[", " [ ", -1)
s = strings.Replace(s, "]", " ] ", -1)
n, _ := parseNoun(strings.Fields(strings.TrimSpace(s)))
return n
}
func parseNoun(s []string) (Noun, []string) {
if s[0] == "[" {
return parseCell(s)
}
return parseAtom(s)
}
func parseCell(s []string) (Noun, []string) {
s = s[1:]
var elems []Noun
for s[0] != "]" {
var e Noun
e, s = parseNoun(s)
elems = append(elems, e)
}
for len(elems) > 1 {
elems = append(elems[:len(elems)-2], Cell(elems[len(elems)-2], elems[len(elems)-1]))
}
return elems[0], s[1:]
}
func parseAtom(s []string) (Noun, []string) {
i, _ := strconv.Atoi(s[0])
return Atom(i), s[1:]
} | nock.go | 0.710929 | 0.620794 | nock.go | starcoder |
package display
import (
"fmt"
mgl "github.com/go-gl/mathgl/mgl32"
"github.com/inkyblackness/shocked-model"
"github.com/inkyblackness/shocked-client/graphics"
"github.com/inkyblackness/shocked-client/opengl"
)
var mapTileSlopeVertexShaderSource = `
#version 150
precision mediump float;
in vec3 vertexPosition;
uniform mat4 modelMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
out float hue;
void main(void) {
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(vertexPosition.xy, 0.0, 1.0);
hue = vertexPosition.z;
}
`
var mapTileSlopeFragmentShaderSource = `
#version 150
precision mediump float;
in float hue;
out vec4 fragColor;
vec3 hsv2rgb(vec3 c) {
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
void main(void) {
fragColor = vec4(hsv2rgb(vec3(hue, 1.0, 0.8)), 0.5);
}
`
// TileSlopeMapRenderable is a renderable for the tile slopes.
type TileSlopeMapRenderable struct {
context *graphics.RenderContext
program uint32
vao *opengl.VertexArrayObject
vertexPositionBuffer uint32
vertexPositionAttrib int32
modelMatrixUniform opengl.Matrix4Uniform
viewMatrixUniform opengl.Matrix4Uniform
projectionMatrixUniform opengl.Matrix4Uniform
tiles [][]*model.TileProperties
}
// NewTileSlopeMapRenderable returns a new instance of a renderable for tile slopes.
func NewTileSlopeMapRenderable(context *graphics.RenderContext) *TileSlopeMapRenderable {
gl := context.OpenGl()
program, programErr := opengl.LinkNewStandardProgram(gl, mapTileSlopeVertexShaderSource, mapTileSlopeFragmentShaderSource)
if programErr != nil {
panic(fmt.Errorf("TileSlopeMapRenderable shader failed: %v", programErr))
}
renderable := &TileSlopeMapRenderable{
context: context,
program: program,
vao: opengl.NewVertexArrayObject(gl, program),
vertexPositionBuffer: gl.GenBuffers(1)[0],
vertexPositionAttrib: gl.GetAttribLocation(program, "vertexPosition"),
modelMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "modelMatrix")),
viewMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "viewMatrix")),
projectionMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "projectionMatrix")),
tiles: make([][]*model.TileProperties, int(tilesPerMapSide))}
for i := 0; i < len(renderable.tiles); i++ {
renderable.tiles[i] = make([]*model.TileProperties, int(tilesPerMapSide))
}
renderable.vao.OnShader(func() {
dotHalf := float32(0.05)
dotBase := float32(0.5) - (dotHalf * 2.0)
floorHue := float32(0.3)
ceilingHue := float32(0.0)
top := dotBase + dotHalf
topEnd := dotBase - dotHalf
left := -dotBase - dotHalf
leftEnd := -dotBase + dotHalf
right := top
rightEnd := topEnd
bottom := left
bottomEnd := leftEnd
vertices := []float32{
left, top, floorHue, leftEnd, top, floorHue, left, topEnd, floorHue,
leftEnd, top, ceilingHue, leftEnd, topEnd, ceilingHue, left, topEnd, ceilingHue,
right, top, floorHue, right, topEnd, floorHue, rightEnd, top, floorHue,
rightEnd, top, ceilingHue, right, topEnd, ceilingHue, rightEnd, topEnd, ceilingHue,
right, bottom, floorHue, rightEnd, bottom, floorHue, right, bottomEnd, floorHue,
right, bottomEnd, ceilingHue, rightEnd, bottom, ceilingHue, rightEnd, bottomEnd, ceilingHue,
left, bottomEnd, floorHue, leftEnd, bottom, floorHue, left, bottom, floorHue,
left, bottomEnd, ceilingHue, leftEnd, bottomEnd, ceilingHue, leftEnd, bottom, ceilingHue}
gl.BindBuffer(opengl.ARRAY_BUFFER, renderable.vertexPositionBuffer)
gl.BufferData(opengl.ARRAY_BUFFER, len(vertices)*4, vertices, opengl.STATIC_DRAW)
gl.BindBuffer(opengl.ARRAY_BUFFER, 0)
})
renderable.vao.WithSetter(func(gl opengl.OpenGl) {
gl.EnableVertexAttribArray(uint32(renderable.vertexPositionAttrib))
gl.BindBuffer(opengl.ARRAY_BUFFER, renderable.vertexPositionBuffer)
gl.VertexAttribOffset(uint32(renderable.vertexPositionAttrib), 3, opengl.FLOAT, false, 0, 0)
gl.BindBuffer(opengl.ARRAY_BUFFER, 0)
})
return renderable
}
// Dispose releases any internal resources
func (renderable *TileSlopeMapRenderable) Dispose() {
gl := renderable.context.OpenGl()
gl.DeleteProgram(renderable.program)
gl.DeleteBuffers([]uint32{renderable.vertexPositionBuffer})
renderable.vao.Dispose()
}
// SetTile sets the properties for the specified tile coordinate.
func (renderable *TileSlopeMapRenderable) SetTile(x, y int, properties *model.TileProperties) {
renderable.tiles[y][x] = properties
}
// Clear resets all tiles.
func (renderable *TileSlopeMapRenderable) Clear() {
for _, row := range renderable.tiles {
for index := 0; index < len(row); index++ {
row[index] = nil
}
}
}
var slopeTicksByType = map[model.TileType][]int{
model.SlopeSouthToNorth: []int{0, 1},
model.SlopeWestToEast: []int{1, 2},
model.SlopeNorthToSouth: []int{2, 3},
model.SlopeEastToWest: []int{3, 0},
model.ValleySouthEastToNorthWest: []int{3, 0, 1},
model.ValleySouthWestToNorthEast: []int{0, 1, 2},
model.ValleyNorthWestToSouthEast: []int{1, 2, 3},
model.ValleyNorthEastToSouthWest: []int{2, 3, 0},
model.RidgeSouthEastToNorthWest: []int{0},
model.RidgeSouthWestToNorthEast: []int{1},
model.RidgeNorthWestToSouthEast: []int{2},
model.RidgeNorthEastToSouthWest: []int{3}}
var invertedTileTypes = map[model.TileType]model.TileType{
model.SlopeSouthToNorth: model.SlopeNorthToSouth,
model.SlopeWestToEast: model.SlopeEastToWest,
model.SlopeNorthToSouth: model.SlopeSouthToNorth,
model.SlopeEastToWest: model.SlopeWestToEast,
model.ValleySouthEastToNorthWest: model.RidgeNorthWestToSouthEast,
model.ValleySouthWestToNorthEast: model.RidgeNorthEastToSouthWest,
model.ValleyNorthWestToSouthEast: model.RidgeSouthEastToNorthWest,
model.ValleyNorthEastToSouthWest: model.RidgeSouthWestToNorthEast,
model.RidgeSouthEastToNorthWest: model.ValleyNorthWestToSouthEast,
model.RidgeSouthWestToNorthEast: model.ValleyNorthEastToSouthWest,
model.RidgeNorthWestToSouthEast: model.ValleySouthEastToNorthWest,
model.RidgeNorthEastToSouthWest: model.ValleySouthWestToNorthEast}
// Render renders
func (renderable *TileSlopeMapRenderable) Render() {
gl := renderable.context.OpenGl()
floorStarts := []int32{0, 6, 12, 18}
ceilingStarts := []int32{3, 9, 15, 21}
renderable.vao.OnShader(func() {
renderable.viewMatrixUniform.Set(gl, renderable.context.ViewMatrix())
renderable.projectionMatrixUniform.Set(gl, renderable.context.ProjectionMatrix())
for y, row := range renderable.tiles {
for x, tile := range row {
if tile != nil && (*tile.SlopeHeight > 0) {
modelMatrix := mgl.Ident4().
Mul4(mgl.Translate3D((float32(x)+0.5)*fineCoordinatesPerTileSide, (float32(y)+0.5)*fineCoordinatesPerTileSide, 0.0)).
Mul4(mgl.Scale3D(fineCoordinatesPerTileSide, fineCoordinatesPerTileSide, 1.0))
floorTicks := []int{}
ceilingTicks := []int{}
if *tile.SlopeControl != model.SlopeFloorFlat {
floorTicks = slopeTicksByType[*tile.Type]
}
if *tile.SlopeControl == model.SlopeCeilingMirrored {
ceilingTicks = slopeTicksByType[*tile.Type]
} else if *tile.SlopeControl != model.SlopeCeilingFlat {
if invertedType, inversible := invertedTileTypes[*tile.Type]; inversible {
ceilingTicks = slopeTicksByType[invertedType]
}
}
renderable.modelMatrixUniform.Set(gl, &modelMatrix)
for _, index := range floorTicks {
gl.DrawArrays(opengl.TRIANGLES, floorStarts[index], 3)
}
for _, index := range ceilingTicks {
gl.DrawArrays(opengl.TRIANGLES, ceilingStarts[index], 3)
}
}
}
}
})
} | src/github.com/inkyblackness/shocked-client/editor/display/TileSlopeMapRenderable.go | 0.786049 | 0.510558 | TileSlopeMapRenderable.go | starcoder |
package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
var (
_ MinBetweener = StdEng{}
_ MaxBetweener = StdEng{}
)
func (e StdEng) MinBetween(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, ordTypes); err != nil {
return nil, errors.Wrapf(err, "MinBetween failed")
}
var reuse DenseTensor
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.MinBetween")
}
// check to see if anything needs to be created
if reuse == nil {
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && reuse == nil:
err = e.E.MinBetweenIter(typ, dataA, dataB, ait, bit)
retVal = a
case safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MinBetweenIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
panic("Unreachable")
}
return
}
// standard
switch {
case !safe && reuse == nil:
err = e.E.MinBetween(typ, dataA, dataB)
retVal = a
case safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MinBetween(typ, dataReuse, dataB)
retVal = reuse
default:
panic("Unreachable")
}
return
}
func (e StdEng) MaxBetween(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, ordTypes); err != nil {
return nil, errors.Wrapf(err, "MaxBetween failed")
}
var reuse DenseTensor
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.MaxBetween")
}
// check to see if anything needs to be created
if reuse == nil {
if swap {
reuse = NewDense(b.Dtype(), b.Shape().Clone(), WithEngine(e))
} else {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
}
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && reuse == nil:
err = e.E.MaxBetweenIter(typ, dataA, dataB, ait, bit)
retVal = a
case safe && reuse != nil:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MaxBetweenIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
panic("Unreachable")
}
return
}
// standard
switch {
case !safe && reuse == nil:
err = e.E.MaxBetween(typ, dataA, dataB)
retVal = a
case safe && reuse != nil:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MaxBetween(typ, dataReuse, dataB)
retVal = reuse
default:
panic("Unreachable")
}
return
}
func (e StdEng) MinBetweenScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, "MinBetween failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "MinBetween failed")
}
var reuse DenseTensor
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.MinBetween")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.MinBetween")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
if reuse == nil {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && reuse == nil:
err = e.E.MinBetweenIter(typ, dataA, dataB, ait, bit)
retVal = a
case safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.MinBetweenIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MinBetweenIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
panic("Unreachable")
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MinBetween(typ, dataReuse, dataB)
retVal = reuse
return
case safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.MinBetween(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && reuse == nil:
err = e.E.MinBetween(typ, dataA, dataB)
retVal = a
case safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MinBetween(typ, dataReuse, dataB)
retVal = reuse
case safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.MinBetween(typ, dataA, dataReuse)
retVal = reuse
default:
panic("Unreachable")
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
func (e StdEng) MaxBetweenScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, "MaxBetween failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "MaxBetween failed")
}
var reuse DenseTensor
var safe bool
if reuse, safe, _, _, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.MaxBetween")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.MaxBetween")
}
scalarHeader = dataA
}
// check to see if anything needs to be created
if reuse == nil {
reuse = NewDense(a.Dtype(), a.Shape().Clone(), WithEngine(e))
dataReuse = reuse.hdr()
if useIter {
iit = IteratorFromDense(reuse)
}
}
if useIter {
switch {
case !safe && reuse == nil:
err = e.E.MaxBetweenIter(typ, dataA, dataB, ait, bit)
retVal = a
case safe && reuse != nil && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
bit.Reset()
iit.Reset()
err = e.E.MaxBetweenIter(typ, dataA, dataReuse, ait, bit)
retVal = reuse
case safe && reuse != nil && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MaxBetweenIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
default: // safe && bool
panic("Unreachable")
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// handle special case where A and B have both len 1
if len(dataA.Raw) == int(typ.Size()) && len(dataB.Raw) == int(typ.Size()) {
switch {
case safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MaxBetween(typ, dataReuse, dataB)
retVal = reuse
return
case safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.MaxBetween(typ, dataReuse, dataA)
retVal = reuse
return
}
}
// standard
switch {
case !safe && reuse == nil:
err = e.E.MaxBetween(typ, dataA, dataB)
retVal = a
case safe && reuse != nil && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.MaxBetween(typ, dataReuse, dataB)
retVal = reuse
case safe && reuse != nil && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.MaxBetween(typ, dataA, dataReuse)
retVal = reuse
default:
panic("Unreachable")
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
} | defaultengine_minmax.go | 0.525369 | 0.532243 | defaultengine_minmax.go | starcoder |
package basic
// PMapIONumber is template to generate itself for different combination of data type.
func PMapIONumber() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : add 1 to the list
expectedList := []<OUTPUT_TYPE>{2, 3, 4}
newList := PMap<FINPUT_TYPE><FOUTPUT_TYPE>(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{1, 2, 3})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] || newList[2] != expectedList[2] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil)) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
expectedList = []<OUTPUT_TYPE>{2, 3, 4}
newList = PMap<FINPUT_TYPE><FOUTPUT_TYPE>(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{1, 2, 3}, Optional{FixedPool: 2, RandomOrder: true})
count := 0
for _, v := range expectedList {
for _, x := range newList {
if v == x {
count++
}
}
}
if count != len(expectedList) {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed.expected len=%v, actual len=%v", len(expectedList), count)
}
}
func plusOne<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) <OUTPUT_TYPE> {
return <OUTPUT_TYPE>(num + 1)
}
`
}
// PMapIOStrNumber is template to generate itself for different combination of data type.
func PMapIOStrNumber() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{10}
newList := PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{"ten"})
if newList[0] != expectedList[0] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil)) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
expectedList = []<OUTPUT_TYPE>{10}
newList = PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{"ten", "one"}, Optional{FixedPool: 1, RandomOrder: true})
if newList[0] != expectedList[0] || newList[1] != 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) <OUTPUT_TYPE> {
if num == "ten" {
return <OUTPUT_TYPE>(10)
}
return 0
}
`
}
// PMapIONumberStr is template to generate itself for different combination of data type.
func PMapIONumberStr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{"10"}
newList := PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{10})
if newList[0] != expectedList[0] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil)) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
expectedList = []<OUTPUT_TYPE>{"10"}
newList = PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{10, 20}, Optional{FixedPool: 1, RandomOrder: true})
if newList[0] != expectedList[0] || newList[1] != "0" {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) <OUTPUT_TYPE> {
if num == 10 {
return <OUTPUT_TYPE>("10")
}
return "0"
}
`
}
// PMapIONumberBool is template to generate itself for different combination of data type.
func PMapIONumberBool() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{true, false}
newList := PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{10, 0})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil)) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
expectedList = []<OUTPUT_TYPE>{true, false}
newList = PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{10, 0}, Optional{FixedPool: 1, RandomOrder: true})
if newList[0] != expectedList[0] || newList[1] != false {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) <OUTPUT_TYPE> {
if num > 0 {
return true
}
return false
}
`
}
// PMapIOStrBool is template to generate itself for different combination of data type.
func PMapIOStrBool() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{true, false}
newList := PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{"10", "0"})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil)) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
expectedList = []<OUTPUT_TYPE>{true, false}
newList = PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{"10", "0"}, Optional{FixedPool: 1, RandomOrder: true})
if newList[0] != expectedList[0] || newList[1] != false {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) <OUTPUT_TYPE> {
if num != "0" {
return true
}
return false
}
`
}
// PMapIOBoolNumber is template to generate itself for different combination of data type.
func PMapIOBoolNumber() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{10, 0}
newList := PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{true, false})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil)) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
expectedList = []<OUTPUT_TYPE>{10, 0}
newList = PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{true, false}, Optional{FixedPool: 1, RandomOrder: true})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) <OUTPUT_TYPE> {
if num == true {
return 10
}
return 0
}
`
}
// PMapIOBoolStr is template to generate itself for different combination of data type.
func PMapIOBoolStr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{"10", "0"}
newList := PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{true, false})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil)) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(PMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
expectedList = []<OUTPUT_TYPE>{"10", "0"}
newList = PMap<FINPUT_TYPE><FOUTPUT_TYPE>(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{true, false}, Optional{FixedPool: 1, RandomOrder: true})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) <OUTPUT_TYPE> {
if num == true {
return "10"
}
return "0"
}
`
}
// PMapIONumberErr is template to generate itself for different combination of data type.
func PMapIONumberErr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : add 1 to the list
expectedList := []<OUTPUT_TYPE>{2, 3}
newList, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{1, 2})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{1, 2, 3})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 3, 2}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 3, 2}, Optional{FixedPool: 2})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 1, 3}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{1, 2, 3}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
expectedList = []<OUTPUT_TYPE>{2, 3}
newList, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{1, 2}, Optional{RandomOrder: true})
counter := 0
for i := 0; i < len(expectedList); i++ {
for j := 0; j < len(newList); j++ {
if expectedList[i] == newList[j] {
counter++
break
}
}
}
if counter != len(expectedList) {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{1, 2, 3}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{1, 2, 3}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 3, 1}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 2, 3}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{1, 2, 3}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
`
}
// PMapIOStrNumberErr is template to generate itself for different combination of data type.
func PMapIOStrNumberErr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{10}
newList, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"ten"})
if newList[0] != expectedList[0] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
_, err := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"ten", "0"})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"0", "0", "ten"}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"0", "0", "ten"}, Optional{FixedPool: 2})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"0", "ten", "0"}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"ten", "ten", "0"}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
expectedList = []<OUTPUT_TYPE>{10, 10}
newList, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"ten", "ten"}, Optional{RandomOrder: true})
counter := 0
for i := 0; i < len(expectedList); i++ {
for j := 0; j < len(newList); j++ {
if expectedList[i] == newList[j] {
counter++
break
}
}
}
if counter != len(expectedList) {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"ten", "ten", "0"}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"ten", "ten", "0"}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"0", "0", "ten"}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"0", "ten", "0"}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"ten", "ten", "0"}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
`
}
// PMapIONumberStrErr is template to generate itself for different combination of data type.
func PMapIONumberStrErr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{"10"}
newList, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10})
if newList[0] != expectedList[0] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 0})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 0}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{0, 0, 10}, Optional{FixedPool: 2})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{0, 10, 0}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 0}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
expectedList = []<OUTPUT_TYPE>{"10", "10"}
newList, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10}, Optional{RandomOrder: true})
counter := 0
for i := 0; i < len(expectedList); i++ {
for j := 0; j < len(newList); j++ {
if expectedList[i] == newList[j] {
counter++
break
}
}
}
if counter != len(expectedList) {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 0}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 0}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{0, 0, 10}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{0, 10, 0}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 0}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
`
}
// PMapIONumberBoolErr is template to generate itself for different combination of data type.
func PMapIONumberBoolErr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{true, false}
newList, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 0})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 3, 3})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 3}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 3, 10}, Optional{FixedPool: 2})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 10, 3}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 3}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
expectedList = []<OUTPUT_TYPE>{true, true}
newList, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10}, Optional{RandomOrder: true})
counter := 0
for i := 0; i < len(expectedList); i++ {
for j := 0; j < len(newList); j++ {
if expectedList[i] == newList[j] {
counter++
break
}
}
}
if counter != len(expectedList) {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 3}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 3}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 3, 10}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{3, 10, 3}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{10, 10, 3}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
`
}
// PMapIOStrBoolErr is template to generate itself for different combination of data type.
func PMapIOStrBoolErr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{true, false}
newList, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"10", "0"})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"10", "0", "3"})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"10", "10", "3"}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"3", "3", "10"}, Optional{FixedPool: 2})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"3", "10", "3"}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"10", "10", "3"}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
expectedList = []<OUTPUT_TYPE>{true, true}
newList, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"10", "10"}, Optional{RandomOrder: true})
counter := 0
for i := 0; i < len(expectedList); i++ {
for j := 0; j < len(newList); j++ {
if expectedList[i] == newList[j] {
counter++
break
}
}
}
if counter != len(expectedList) {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"10", "10", "3"}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"10", "10", "3"}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"3", "3", "10"}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"3", "10", "3"}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{"10", "10", "3"}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
`
}
// PMapIOBoolNumberErr is template to generate itself for different combination of data type.
func PMapIOBoolNumberErr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{10, 10}
newList, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, false})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{false, false, true}, Optional{FixedPool: 2})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{false, true, false}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
expectedList = []<OUTPUT_TYPE>{10, 10}
newList, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true}, Optional{RandomOrder: true})
counter := 0
for i := 0; i < len(expectedList); i++ {
for j := 0; j < len(newList); j++ {
if expectedList[i] == newList[j] {
counter++
break
}
}
}
if counter != len(expectedList) {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{false, false, true}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{false, true, false}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
`
}
// PMapIOBoolStrErr is template to generate itself for different combination of data type.
func PMapIOBoolStrErr() string {
return `
func TestPmap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{"10", "10"}
newList, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, false})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{false, false, true}, Optional{FixedPool: 2})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{false, true, false}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 1})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
expectedList = []<OUTPUT_TYPE>{"10", "10"}
newList, _ = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true}, Optional{RandomOrder: true})
counter := 0
for i := 0; i < len(expectedList); i++ {
for j := 0; j < len(newList); j++ {
if expectedList[i] == newList[j] {
counter++
break
}
}
}
if counter != len(expectedList) {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{false, false, true}, Optional{FixedPool: 1, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{false, true, false}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{true, true, false}, Optional{FixedPool: 2, RandomOrder: true})
if err == nil {
t.Errorf("PMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
`
} | internal/template/basic/pmapiotest.go | 0.602997 | 0.495606 | pmapiotest.go | starcoder |
package iso20022
// Tax related to an investment fund order.
type InformativeTax1 struct {
// Amount included in the dividend that corresponds to gains directly or indirectly derived from interest payment in the scope of the European Directive on taxation of savings income in the form of interest payments.
TaxableIncomePerDividend *ActiveCurrencyAndAmount `xml:"TaxblIncmPerDvdd,omitempty"`
// Specifies whether capital gain is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June), or an income realised upon sale, a refund or redemption of shares and units, etc.
EUCapitalGain *EUCapitalGain3Choice `xml:"EUCptlGn,omitempty"`
// Specifies whether dividend is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June), or an income realised upon sale, a refund or redemption of shares and units, etc.
EUDividendStatus *EUDividendStatusType2Choice `xml:"EUDvddSts,omitempty"`
// Percentage of the underlying assets of the funds that represents a debt and is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June).
PercentageOfDebtClaim *PercentageRate `xml:"PctgOfDebtClm,omitempty"`
// Information related to a specific tax that is provided for information purposes.
IndividualTax []*Tax32 `xml:"IndvTax,omitempty"`
}
func (i *InformativeTax1) SetTaxableIncomePerDividend(value, currency string) {
i.TaxableIncomePerDividend = NewActiveCurrencyAndAmount(value, currency)
}
func (i *InformativeTax1) AddEUCapitalGain() *EUCapitalGain3Choice {
i.EUCapitalGain = new(EUCapitalGain3Choice)
return i.EUCapitalGain
}
func (i *InformativeTax1) AddEUDividendStatus() *EUDividendStatusType2Choice {
i.EUDividendStatus = new(EUDividendStatusType2Choice)
return i.EUDividendStatus
}
func (i *InformativeTax1) SetPercentageOfDebtClaim(value string) {
i.PercentageOfDebtClaim = (*PercentageRate)(&value)
}
func (i *InformativeTax1) AddIndividualTax() *Tax32 {
newValue := new(Tax32)
i.IndividualTax = append(i.IndividualTax, newValue)
return newValue
} | InformativeTax1.go | 0.791821 | 0.64607 | InformativeTax1.go | starcoder |
package otkafka
import (
"time"
"github.com/go-kit/kit/metrics"
"github.com/segmentio/kafka-go"
)
type readerCollector struct {
factory ReaderFactory
stats *ReaderStats
interval time.Duration
}
// ThreeStats is a gauge group struct.
type ThreeStats struct {
Min metrics.Gauge
Max metrics.Gauge
Avg metrics.Gauge
}
// ReaderStats is a collection of metrics for kafka reader info.
type ReaderStats struct {
Dials metrics.Counter
Fetches metrics.Counter
Messages metrics.Counter
Bytes metrics.Counter
Rebalances metrics.Counter
Timeouts metrics.Counter
Errors metrics.Counter
Offset metrics.Gauge
Lag metrics.Gauge
MinBytes metrics.Gauge
MaxBytes metrics.Gauge
MaxWait metrics.Gauge
QueueLength metrics.Gauge
QueueCapacity metrics.Gauge
DialTime ThreeStats
ReadTime ThreeStats
WaitTime ThreeStats
FetchSize ThreeStats
FetchBytes ThreeStats
}
// newCollector creates a new kafka reader wrapper containing the name of the reader.
func newReaderCollector(factory ReaderFactory, stats *ReaderStats, interval time.Duration) *readerCollector {
return &readerCollector{
factory: factory,
stats: stats,
interval: interval,
}
}
// collectConnectionStats collects kafka reader info for Prometheus to scrape.
func (d *readerCollector) collectConnectionStats() {
for k, v := range d.factory.List() {
reader := v.Conn.(*kafka.Reader)
stats := reader.Stats()
withValues := []string{"reader", k, "client_id", stats.ClientID, "topic", stats.Topic, "partition", stats.Partition}
d.stats.Dials.With(withValues...).Add(float64(stats.Dials))
d.stats.Fetches.With(withValues...).Add(float64(stats.Fetches))
d.stats.Messages.With(withValues...).Add(float64(stats.Messages))
d.stats.Bytes.With(withValues...).Add(float64(stats.Bytes))
d.stats.Rebalances.With(withValues...).Add(float64(stats.Rebalances))
d.stats.Timeouts.With(withValues...).Add(float64(stats.Timeouts))
d.stats.Errors.With(withValues...).Add(float64(stats.Errors))
d.stats.Offset.With(withValues...).Set(float64(stats.Offset))
d.stats.Lag.With(withValues...).Set(float64(stats.Lag))
d.stats.MinBytes.With(withValues...).Set(float64(stats.MinBytes))
d.stats.MaxBytes.With(withValues...).Set(float64(stats.MaxBytes))
d.stats.MaxWait.With(withValues...).Set(stats.MaxWait.Seconds())
d.stats.QueueLength.With(withValues...).Set(float64(stats.QueueLength))
d.stats.QueueCapacity.With(withValues...).Set(float64(stats.QueueCapacity))
d.stats.DialTime.Min.With(withValues...).Set(stats.DialTime.Min.Seconds())
d.stats.DialTime.Max.With(withValues...).Set(stats.DialTime.Max.Seconds())
d.stats.DialTime.Avg.With(withValues...).Set(stats.DialTime.Avg.Seconds())
d.stats.ReadTime.Min.With(withValues...).Set(stats.ReadTime.Min.Seconds())
d.stats.ReadTime.Max.With(withValues...).Set(stats.ReadTime.Max.Seconds())
d.stats.ReadTime.Avg.With(withValues...).Set(stats.ReadTime.Avg.Seconds())
d.stats.WaitTime.Min.With(withValues...).Set(stats.WaitTime.Min.Seconds())
d.stats.WaitTime.Max.With(withValues...).Set(stats.WaitTime.Max.Seconds())
d.stats.WaitTime.Avg.With(withValues...).Set(stats.WaitTime.Avg.Seconds())
d.stats.FetchSize.Min.With(withValues...).Set(float64(stats.FetchSize.Min))
d.stats.FetchSize.Max.With(withValues...).Set(float64(stats.FetchSize.Max))
d.stats.FetchSize.Avg.With(withValues...).Set(float64(stats.FetchSize.Avg))
d.stats.FetchBytes.Min.With(withValues...).Set(float64(stats.FetchBytes.Min))
d.stats.FetchBytes.Max.With(withValues...).Set(float64(stats.FetchBytes.Max))
d.stats.FetchBytes.Avg.With(withValues...).Set(float64(stats.FetchBytes.Avg))
}
} | otkafka/reader_metrics.go | 0.568775 | 0.539226 | reader_metrics.go | starcoder |
package nbs
import (
"fmt"
"github.com/dolthub/dolt/go/store/metrics"
)
type Stats struct {
OpenLatency metrics.Histogram
CommitLatency metrics.Histogram
IndexReadLatency metrics.Histogram
IndexBytesPerRead metrics.Histogram
GetLatency metrics.Histogram
ChunksPerGet metrics.Histogram
FileReadLatency metrics.Histogram
FileBytesPerRead metrics.Histogram
S3ReadLatency metrics.Histogram
S3BytesPerRead metrics.Histogram
MemReadLatency metrics.Histogram
MemBytesPerRead metrics.Histogram
DynamoReadLatency metrics.Histogram
DynamoBytesPerRead metrics.Histogram
HasLatency metrics.Histogram
AddressesPerHas metrics.Histogram
PutLatency metrics.Histogram
PersistLatency metrics.Histogram
BytesPerPersist metrics.Histogram
ChunksPerPersist metrics.Histogram
CompressedChunkBytesPerPersist metrics.Histogram
UncompressedChunkBytesPerPersist metrics.Histogram
ConjoinLatency metrics.Histogram
BytesPerConjoin metrics.Histogram
ChunksPerConjoin metrics.Histogram
TablesPerConjoin metrics.Histogram
ReadManifestLatency metrics.Histogram
WriteManifestLatency metrics.Histogram
}
func NewStats() *Stats {
return &Stats{
OpenLatency: metrics.NewTimeHistogram(),
CommitLatency: metrics.NewTimeHistogram(),
IndexReadLatency: metrics.NewTimeHistogram(),
IndexBytesPerRead: metrics.NewByteHistogram(),
GetLatency: metrics.NewTimeHistogram(),
FileReadLatency: metrics.NewTimeHistogram(),
FileBytesPerRead: metrics.NewByteHistogram(),
S3ReadLatency: metrics.NewTimeHistogram(),
S3BytesPerRead: metrics.NewByteHistogram(),
MemReadLatency: metrics.NewTimeHistogram(),
MemBytesPerRead: metrics.NewByteHistogram(),
DynamoReadLatency: metrics.NewTimeHistogram(),
DynamoBytesPerRead: metrics.NewByteHistogram(),
HasLatency: metrics.NewTimeHistogram(),
PutLatency: metrics.NewTimeHistogram(),
PersistLatency: metrics.NewTimeHistogram(),
BytesPerPersist: metrics.NewByteHistogram(),
CompressedChunkBytesPerPersist: metrics.NewByteHistogram(),
UncompressedChunkBytesPerPersist: metrics.NewByteHistogram(),
ConjoinLatency: metrics.NewTimeHistogram(),
BytesPerConjoin: metrics.NewByteHistogram(),
ReadManifestLatency: metrics.NewTimeHistogram(),
WriteManifestLatency: metrics.NewTimeHistogram(),
}
}
func (s Stats) String() string {
return fmt.Sprintf(`---NBS Stats---
OpenLatecy: %s
CommitLatency: %s
IndexReadLatency: %s
IndexBytesPerRead: %s
GetLatency: %s
ChunksPerGet: %s
FileReadLatency: %s
FileBytesPerRead: %s
S3ReadLatency: %s
S3BytesPerRead: %s
MemReadLatency: %s
MemBytesPerRead: %s
DynamoReadLatency: %s
DynamoBytesPerRead: %s
HasLatency: %s
AddressesHasGet: %s
PutLatency: %s
PersistLatency: %s
BytesPerPersist: %s
ChunksPerPersist: %s
CompressedChunkBytesPerPersist: %s
UncompressedChunkBytesPerPersist: %s
ConjoinLatency: %s
BytesPerConjoin: %s
ChunksPerConjoin: %s
TablesPerConjoin: %s
ReadManifestLatency: %s
WriteManifestLatency: %s
`,
s.OpenLatency,
s.CommitLatency,
s.IndexReadLatency,
s.IndexBytesPerRead,
s.GetLatency,
s.ChunksPerGet,
s.FileReadLatency,
s.FileBytesPerRead,
s.S3ReadLatency,
s.S3BytesPerRead,
s.MemReadLatency,
s.MemBytesPerRead,
s.DynamoReadLatency,
s.DynamoBytesPerRead,
s.HasLatency,
s.AddressesPerHas,
s.PutLatency,
s.PersistLatency,
s.BytesPerPersist,
s.ChunksPerPersist,
s.CompressedChunkBytesPerPersist,
s.UncompressedChunkBytesPerPersist,
s.ConjoinLatency,
s.BytesPerConjoin,
s.ChunksPerConjoin,
s.TablesPerConjoin,
s.ReadManifestLatency,
s.WriteManifestLatency)
} | go/store/nbs/stats.go | 0.504639 | 0.627609 | stats.go | starcoder |
package synthetics
import (
"encoding/json"
"time"
)
// V202101beta1Health struct for V202101beta1Health
type V202101beta1Health struct {
Health *string `json:"health,omitempty"`
Time *time.Time `json:"time,omitempty"`
}
// NewV202101beta1Health instantiates a new V202101beta1Health object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewV202101beta1Health() *V202101beta1Health {
this := V202101beta1Health{}
return &this
}
// NewV202101beta1HealthWithDefaults instantiates a new V202101beta1Health object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewV202101beta1HealthWithDefaults() *V202101beta1Health {
this := V202101beta1Health{}
return &this
}
// GetHealth returns the Health field value if set, zero value otherwise.
func (o *V202101beta1Health) GetHealth() string {
if o == nil || o.Health == nil {
var ret string
return ret
}
return *o.Health
}
// GetHealthOk returns a tuple with the Health field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1Health) GetHealthOk() (*string, bool) {
if o == nil || o.Health == nil {
return nil, false
}
return o.Health, true
}
// HasHealth returns a boolean if a field has been set.
func (o *V202101beta1Health) HasHealth() bool {
if o != nil && o.Health != nil {
return true
}
return false
}
// SetHealth gets a reference to the given string and assigns it to the Health field.
func (o *V202101beta1Health) SetHealth(v string) {
o.Health = &v
}
// GetTime returns the Time field value if set, zero value otherwise.
func (o *V202101beta1Health) GetTime() time.Time {
if o == nil || o.Time == nil {
var ret time.Time
return ret
}
return *o.Time
}
// GetTimeOk returns a tuple with the Time field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1Health) GetTimeOk() (*time.Time, bool) {
if o == nil || o.Time == nil {
return nil, false
}
return o.Time, true
}
// HasTime returns a boolean if a field has been set.
func (o *V202101beta1Health) HasTime() bool {
if o != nil && o.Time != nil {
return true
}
return false
}
// SetTime gets a reference to the given time.Time and assigns it to the Time field.
func (o *V202101beta1Health) SetTime(v time.Time) {
o.Time = &v
}
func (o V202101beta1Health) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Health != nil {
toSerialize["health"] = o.Health
}
if o.Time != nil {
toSerialize["time"] = o.Time
}
return json.Marshal(toSerialize)
}
type NullableV202101beta1Health struct {
value *V202101beta1Health
isSet bool
}
func (v NullableV202101beta1Health) Get() *V202101beta1Health {
return v.value
}
func (v *NullableV202101beta1Health) Set(val *V202101beta1Health) {
v.value = val
v.isSet = true
}
func (v NullableV202101beta1Health) IsSet() bool {
return v.isSet
}
func (v *NullableV202101beta1Health) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableV202101beta1Health(val *V202101beta1Health) *NullableV202101beta1Health {
return &NullableV202101beta1Health{value: val, isSet: true}
}
func (v NullableV202101beta1Health) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableV202101beta1Health) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | kentikapi/synthetics/model_v202101beta1_health.go | 0.73431 | 0.400486 | model_v202101beta1_health.go | starcoder |
package main
import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
type ipvsCollector struct {
metrics map[string]*ipvsMetric
}
type ipvsMetric struct {
Desc *prometheus.Desc
ValType prometheus.ValueType
}
//NewIpvsCollector a new ipvsCollector instance
func NewIpvsCollector(namespace string) *ipvsCollector {
labels := []string{"vip", "vport", "rip", "rport", "protocol"}
return &ipvsCollector{
metrics: map[string]*ipvsMetric{
"ipvs_active_connections": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "active_connections"), "ipvs active connection counter", labels, nil),
ValType: prometheus.GaugeValue,
},
"ipvs_inactive_connections": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "inactive_connections"), "ipvs inactive connection counter", labels, nil),
ValType: prometheus.GaugeValue,
},
"ipvs_rate_cps": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "rate_cps"), "ipvs new connection counter per second", labels, nil),
ValType: prometheus.GaugeValue,
},
"ipvs_bytes_in": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "bytes_in"), "ipvs ingress bytes", labels, nil),
ValType: prometheus.CounterValue,
},
"ipvs_bytes_out": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "bytes_out"), "ipvs egress bytes", labels, nil),
ValType: prometheus.CounterValue,
},
"ipvs_packets_in": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "packets_in"), "ipvs ingress packets", labels, nil),
ValType: prometheus.CounterValue,
},
"ipvs_packets_out": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "packets_out"), "ipvs egress packets", labels, nil),
ValType: prometheus.CounterValue,
},
"ipvs_rate_inbps": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "rate_inbps"), "ipvs ingress rate bits per second", labels, nil),
ValType: prometheus.CounterValue,
},
"ipvs_rate_outbps": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "rate_outbps"), "ipvs egress rate bits per second", labels, nil),
ValType: prometheus.CounterValue,
},
"ipvs_rate_inpps": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "rate_inpps"), "ipvs ingress rate packets per second", labels, nil),
ValType: prometheus.CounterValue,
},
"ipvs_rate_outpps": {
Desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "rate_outpps"), "ipvs egress rate packets per second", labels, nil),
ValType: prometheus.CounterValue,
},
},
}
}
//Describe output ipvs metric descriptions
func (c *ipvsCollector) Describe(ch chan<- *prometheus.Desc) {
for _, metric := range c.metrics {
ch <- metric.Desc
}
}
//Collect output ipvs metric values
func (c *ipvsCollector) Collect(ch chan<- prometheus.Metric) {
ipvs, err := NewIpvsWrapper()
if err != nil {
logrus.Errorf("fetch ipvs handler err:%s", err)
return
}
defer ipvs.Close()
svcs, err := ipvs.GetServices()
if err != nil {
logrus.Errorf("fetch ipvs services err:%s", err)
return
}
for _, svc := range svcs {
labels := []string{svc.Address.String(), strconv.Itoa(int(svc.Port)), "", "", ipvs.Protocol(svc.Protocol)}
stats := svc.Stats
metric := c.metrics["ipvs_active_connections"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.Connections), labels...)
metric = c.metrics["ipvs_rate_cps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.CPS), labels...)
metric = c.metrics["ipvs_bytes_in"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.BytesIn), labels...)
metric = c.metrics["ipvs_bytes_out"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.BytesOut), labels...)
metric = c.metrics["ipvs_packets_in"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.PacketsIn), labels...)
metric = c.metrics["ipvs_packets_out"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.PacketsOut), labels...)
metric = c.metrics["ipvs_rate_inbps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.BPSIn), labels...)
metric = c.metrics["ipvs_rate_outbps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.BPSOut), labels...)
metric = c.metrics["ipvs_rate_inpps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.PPSIn), labels...)
metric = c.metrics["ipvs_rate_outpps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.PPSOut), labels...)
dests, err := ipvs.GetDestinations(svc)
if err != nil {
logrus.Errorf("fetch destinations err:%s", err)
} else {
for _, dest := range dests {
labels[2] = dest.Address.String()
labels[3] = strconv.Itoa(int(dest.Port))
stats := dest.Stats
metric = c.metrics["ipvs_active_connections"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.Connections), labels...)
metric = c.metrics["ipvs_inactive_connections"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(dest.InactiveConnections), labels...)
metric = c.metrics["ipvs_rate_cps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.CPS), labels...)
metric = c.metrics["ipvs_bytes_in"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.BytesIn), labels...)
metric = c.metrics["ipvs_bytes_out"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.BytesOut), labels...)
metric = c.metrics["ipvs_packets_in"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.PacketsIn), labels...)
metric = c.metrics["ipvs_packets_out"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.PacketsOut), labels...)
metric = c.metrics["ipvs_rate_inbps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.BPSIn), labels...)
metric = c.metrics["ipvs_rate_outbps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.BPSOut), labels...)
metric = c.metrics["ipvs_rate_inpps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.PPSIn), labels...)
metric = c.metrics["ipvs_rate_outpps"]
ch <- prometheus.MustNewConstMetric(metric.Desc, metric.ValType, float64(stats.PPSOut), labels...)
}
}
}
} | exporter.go | 0.683208 | 0.42483 | exporter.go | starcoder |
package polygo
/*
This file contains polynomial solvers and related algorithms.
*/
import (
"errors"
)
// CountRootsWithin returns the number of roots of the current instance on the closed interval [a, b].
// If there are an infinite amount of roots, -1 is returned.
func (rp *RealPolynomial) CountRootsWithin(a, b float64) int {
if rp == nil {
panic("received nil *RealPolynomial")
}
if a > b {
panic("invalid interval")
}
if rp.Degree() == 0 && rp.coeffs[0] == 0.0 {
return -1
}
return rp.countRootsWithinWSC(a, b, rp.sturmChain())
}
// FindRootWithin returns ANY root of the current instance existing on the closed interval [a, b].
// If there are no roots on the provided interval, an error is set.
func (rp *RealPolynomial) FindRootWithin(a, b float64) (float64, error) {
if rp == nil {
panic("received nil *RealPolynomial")
}
if a > b {
panic("invalid interval")
}
// Since findRootsWithinAcc operates on the half-open interval (a, b], manually check if a is a root.
if rp.At(a) == 0.0 {
return a, nil
}
sturmChain := rp.sturmChain()
nRootsWithin := rp.countRootsWithinWSC(a, b, sturmChain)
if nRootsWithin == 0 {
return 0.0, errors.New("the polynomial has no solutions in the provided interval")
}
if nRootsWithin < 0 { // Infinite amount of roots
return 0.0, nil
}
return rp.findRootWithinWSC(a, b, sturmChain)
}
// FindRootsWithin returns ALL roots of the current instance existing on the closed interval [a, b].
// Unlike FindRootWithin, no error is set if there are no solutions on the provided interval. Instead, an empty slice is returned.
// If there are an infinite number of solutions on [a, b], an error is set.
func (rp *RealPolynomial) FindRootsWithin(a, b float64) ([]float64, error) {
if rp == nil {
panic("received nil *RealPolynomial")
}
// The only polynomial with infinitely many roots is P(x) = 0
// https://math.stackexchange.com/questions/1137190/is-there-a-polynomial-that-has-infinitely-many-roots
if rp.IsZero() {
return nil, errors.New("infinitely many solutions")
}
// Since findRootsWithinAcc operates on the half-open interval (a, b], manually check if a is a root.
if rp.At(a) == 0.0 {
return append(rp.findRootsWithinAcc(a, b, nil, rp.sturmChain()), a), nil
}
return rp.findRootsWithinAcc(a, b, nil, rp.sturmChain()), nil
}
// findRootsWithinAcc is an accumulative implmentation of a hybrid Bisection Method through recursion.
// Wrapped by FindRootsWithin.
func (rp *RealPolynomial) findRootsWithinAcc(a, b float64, roots []float64, chain []*RealPolynomial) []float64 {
nRoots := rp.countRootsWithinWSC(a, b, chain)
if nRoots > 1 {
mp := (a + b) / 2.0
return append(
rp.findRootsWithinAcc(a, mp, roots, chain),
rp.findRootsWithinAcc(mp, b, roots, chain)...,
)
} else if nRoots == 1 {
root, _ := rp.findRootWithinWSC(a, b, chain)
roots = append(roots, root)
}
return roots
}
// FindIntersectionWithin returns ANY intersection point of the current instance and rp2 existing on the closed interval [a, b].
// If there are no intersections on the provided interval, an error is set.
func (rp *RealPolynomial) FindIntersectionWithin(a, b float64, rp2 *RealPolynomial) (Point, error) {
tmp := *rp
root, err := (&tmp).Sub(rp2).FindRootWithin(a, b)
if err != nil {
return Point{}, err
}
point := Point{root, rp.At(root)}
return point, nil
}
// FindIntersectionsWithin returns ALL intersection point of the current instance and rp2 existing on the closed interval [a, b].
// Unlike FindIntersectionWithin, no error is set if there are no intersections on the provided interval. Instead, an empty slice is returned.
// If there are an infinite number or solutions, an error is set.
func (rp *RealPolynomial) FindIntersectionsWithin(a, b float64, rp2 *RealPolynomial) ([]Point, error) {
if rp == nil || rp2 == nil {
panic("received nil *RealPolynomial")
}
tmp := *rp
roots, err := tmp.Sub(rp2).FindRootsWithin(a, b)
if err != nil {
return nil, err
}
points := make([]Point, len(roots))
for i, x := range roots {
points[i] = Point{x, rp.At(x)}
}
return points, nil
}
/*
Since a non-changing Sturm Chain is used repetitevly through multiple functions, the following private functions
with suffix "WSC" are such that the overhead caused by recomputing the Sturm chain every use is avoided by making the chain an input.
*/
func (rp *RealPolynomial) findRootWithinWSC(a, b float64, chain []*RealPolynomial) (float64, error) {
nRootsWithin := rp.countRootsWithinWSC(a, b, chain)
abMid := (a + b) / 2.0
if nRootsWithin == 0 {
return 0.0, errors.New("the polynomial has no solutions in the provided interval")
}
if nRootsWithin == -1 { // Infinite amount of roots
return 0.0, nil
}
// Implement Newton's Method
deriv := rp.Derivative()
guess := abMid
var derivAtGuess float64
for i := 0; i < globalNewtonIterations; i++ {
derivAtGuess = deriv.At(guess)
// In the case that the derivative evaluates to zero, return the current guess.
if derivAtGuess == 0.0 {
return guess, nil
}
guess -= rp.At(guess) / derivAtGuess
}
// Operate on a half-open interval.
if guess == a {
return 0.0, errors.New("the polynomial has no solutions in the provided interval")
}
// Recall that Newton's method does not operate on an interval.
// In the case that we've found a solution outside of the given interval, bisect the interval and try on each half.
if !(a < guess && guess <= b) {
retryLeft, err1 := rp.findRootWithinWSC(a, abMid, chain)
retryRight, err2 := rp.findRootWithinWSC(abMid, b, chain)
if err1 == nil {
return retryLeft, nil
} else if err2 == nil {
return retryRight, nil
}
}
return guess, nil
}
func (rp *RealPolynomial) countRootsWithinWSC(a, b float64, chain []*RealPolynomial) int {
// Generate sequence A and B to count sign variations
var seqA, seqB []float64
for _, p := range chain {
seqA = append(seqA, p.At(a))
seqB = append(seqB, p.At(b))
}
return countSignVariations(seqA) - countSignVariations(seqB)
}
func (rp *RealPolynomial) sturmChain() []*RealPolynomial {
// Implement Sturm's Theorem
var sturmChain []*RealPolynomial
var rem *RealPolynomial
var tmp RealPolynomial
sturmChain = append(sturmChain, rp)
deriv := rp.Derivative()
sturmChain = append(sturmChain, deriv)
for i := 1; i < rp.Degree(); i++ {
if sturmChain[i].Degree() == 0 {
break
}
tmp = *sturmChain[i-1]
_, rem = tmp.EuclideanDiv(sturmChain[i])
sturmChain = append(sturmChain, rem.MulS(-1))
}
return sturmChain
}
// Counts sign variations in s: https://en.wikipedia.org/wiki/Budan%27s_theorem#Sign_variation
func countSignVariations(s []float64) int {
// Filter zeroes in s.
var filtered []float64
for i := 0; i < len(s); i++ {
if s[i] != 0.0 {
filtered = append(filtered, s[i])
}
}
// Count sign changes.
var count int
for i := 0; i < len(filtered)-1; i++ {
if filtered[i]*filtered[i+1] < 0 {
count++
}
}
return count
}
/*
End "WSC"-suffixed (and related) functions.
*/ | solve.go | 0.836655 | 0.538983 | solve.go | starcoder |
package util
import (
"io"
"math"
"github.com/attestantio/go-execution-client/types"
)
// Encodings are based upon the rules at https://eth.wiki/en/fundamentals/rlp
// singleBytes contains byte slices of single byte values, to reduce memory allocations.
var singleBytes = [][]byte{
{0x00}, {0x01}, {0x02}, {0x03}, {0x04}, {0x05}, {0x06}, {0x07}, {0x08}, {0x09}, {0x0a}, {0x0b}, {0x0c}, {0x0d}, {0x0e}, {0x0f},
{0x10}, {0x11}, {0x12}, {0x13}, {0x14}, {0x15}, {0x16}, {0x17}, {0x18}, {0x19}, {0x1a}, {0x1b}, {0x1c}, {0x1d}, {0x1e}, {0x1f},
{0x20}, {0x21}, {0x22}, {0x23}, {0x24}, {0x25}, {0x26}, {0x27}, {0x28}, {0x29}, {0x2a}, {0x2b}, {0x2c}, {0x2d}, {0x2e}, {0x2f},
{0x30}, {0x31}, {0x32}, {0x33}, {0x34}, {0x35}, {0x36}, {0x37}, {0x38}, {0x39}, {0x3a}, {0x3b}, {0x3c}, {0x3d}, {0x3e}, {0x3f},
{0x40}, {0x41}, {0x42}, {0x43}, {0x44}, {0x45}, {0x46}, {0x47}, {0x48}, {0x49}, {0x4a}, {0x4b}, {0x4c}, {0x4d}, {0x4e}, {0x4f},
{0x50}, {0x51}, {0x52}, {0x53}, {0x54}, {0x55}, {0x56}, {0x57}, {0x58}, {0x59}, {0x5a}, {0x5b}, {0x5c}, {0x5d}, {0x5e}, {0x5f},
{0x60}, {0x61}, {0x62}, {0x63}, {0x64}, {0x65}, {0x66}, {0x67}, {0x68}, {0x69}, {0x6a}, {0x6b}, {0x6c}, {0x6d}, {0x6e}, {0x6f},
{0x70}, {0x71}, {0x72}, {0x73}, {0x74}, {0x75}, {0x76}, {0x77}, {0x78}, {0x79}, {0x7a}, {0x7b}, {0x7c}, {0x7d}, {0x7e}, {0x7f},
{0x80}, {0x81}, {0x82}, {0x83}, {0x84}, {0x85}, {0x86}, {0x87}, {0x88}, {0x89}, {0x8a}, {0x8b}, {0x8c}, {0x8d}, {0x8e}, {0x8f},
{0x90}, {0x91}, {0x92}, {0x93}, {0x94}, {0x95}, {0x96}, {0x97}, {0x98}, {0x99}, {0x9a}, {0x9b}, {0x9c}, {0x9d}, {0x9e}, {0x9f},
{0xa0}, {0xa1}, {0xa2}, {0xa3}, {0xa4}, {0xa5}, {0xa6}, {0xa7}, {0xa8}, {0xa9}, {0xaa}, {0xab}, {0xac}, {0xad}, {0xae}, {0xaf},
{0xb0}, {0xb1}, {0xb2}, {0xb3}, {0xb4}, {0xb5}, {0xb6}, {0xb7}, {0xb8}, {0xb9}, {0xba}, {0xbb}, {0xbc}, {0xbd}, {0xbe}, {0xbf},
{0xc0}, {0xc1}, {0xc2}, {0xc3}, {0xc4}, {0xc5}, {0xc6}, {0xc7}, {0xc8}, {0xc9}, {0xca}, {0xcb}, {0xcc}, {0xcd}, {0xce}, {0xcf},
{0xd0}, {0xd1}, {0xd2}, {0xd3}, {0xd4}, {0xd5}, {0xd6}, {0xd7}, {0xd8}, {0xd9}, {0xda}, {0xdb}, {0xdc}, {0xdd}, {0xde}, {0xdf},
{0xe0}, {0xe1}, {0xe2}, {0xe3}, {0xe4}, {0xe5}, {0xe6}, {0xe7}, {0xe8}, {0xe9}, {0xea}, {0xeb}, {0xec}, {0xed}, {0xee}, {0xef},
{0xf0}, {0xf1}, {0xf2}, {0xf3}, {0xf4}, {0xf5}, {0xf6}, {0xf7}, {0xf8}, {0xf9}, {0xfa}, {0xfb}, {0xfc}, {0xfd}, {0xfe}, {0xff},
}
// RLPAddress appends the RLP encoding of an address to the buffer.
func RLPAddress(buf io.Writer, input types.Address) {
_, _ = buf.Write(singleBytes[0x94])
_, _ = buf.Write(input[:])
}
// RLPBytes appends the RLP encoding of the input to the buffer.
func RLPBytes(buf io.Writer, input []byte) {
if len(input) == 0 {
_, _ = buf.Write(singleBytes[0x80])
return
}
if len(input) == 1 && input[0] < 0x80 {
_, _ = buf.Write(input)
return
}
rlpLength(buf, len(input), 0x80)
_, _ = buf.Write(input)
}
func rlpLength(buf io.Writer, length int, offset int) {
if length < 56 {
_, _ = buf.Write(singleBytes[length+offset])
return
}
numBytes := int(math.Log2(float64(length)))/8 + 1
_, _ = buf.Write(singleBytes[numBytes+offset+55])
for i := 0; i < numBytes; i++ {
msb := length >> ((numBytes - i - 1) * 8)
_, _ = buf.Write(singleBytes[msb])
length -= msb << ((numBytes - i - 1) * 8)
}
}
// RLPList appends the RLP encoding of a list of items to the buffer.
func RLPList(buf io.Writer, items []byte) {
rlpLength(buf, len(items), 0xc0)
_, _ = buf.Write(items)
}
// RLPNil appends the RLP encoding of nil to the buffer.
func RLPNil(buf io.Writer) {
_, _ = buf.Write(singleBytes[0x80])
}
// RLPUint64 appends the RLP encoding of the input to the buffer.
func RLPUint64(buf io.Writer, input uint64) {
if input == 0 {
_, _ = buf.Write(singleBytes[0x80])
return
}
if input <= 0x7f {
_, _ = buf.Write(singleBytes[input])
return
}
numBytes := int(math.Log2(float64(input)))/8 + 1
res := make([]byte, numBytes)
for i := numBytes - 1; i >= 0; i-- {
res[i] = byte(input % 256)
input >>= 8
}
RLPBytes(buf, res)
} | util/rlphelpers.go | 0.571049 | 0.640833 | rlphelpers.go | starcoder |
package factor
import (
"github.com/jesand/stats"
"github.com/jesand/stats/dist"
"github.com/jesand/stats/variable"
)
// A connecting node in a factor graph. A factor is a node with edges to
// random variable nodes, and which has a corresponding function to score the
// values of those random variables.
type Factor interface {
// The adjacent random variables
Adjacent() []variable.RandomVariable
// The factor's current score, based on the values of adjacent variables
Score() float64
}
// Create a new factor which scores based on a probability distribution.
// The variables are split into "variables" and "parameters" using the
// distribution's NumVars() and NumParams() values.
func NewDistFactor(vars []variable.RandomVariable, distr dist.Dist) *DistFactor {
return &DistFactor{
Vars: vars,
Dist: distr,
}
}
// A factor which scores variables based on a probability distribution
type DistFactor struct {
Vars []variable.RandomVariable
Dist dist.Dist
}
// The adjacent random variables
func (factor DistFactor) Adjacent() []variable.RandomVariable {
return factor.Vars
}
// The log probability of the variables given the parameters
func (factor DistFactor) Score() float64 {
var (
numVars = factor.Dist.NumVars()
numParams = factor.Dist.NumParams()
)
if len(factor.Vars) != numVars+numParams {
panic(stats.ErrfFactorVarNum(numVars, numParams, len(factor.Vars)))
}
var (
vars = make([]float64, numVars)
params = make([]float64, numParams)
)
for i, rv := range factor.Vars {
if i < len(vars) {
vars[i] = rv.Val()
} else {
params[i-len(vars)] = rv.Val()
}
}
return factor.Dist.Score(vars, params)
}
// Create a new factor which always returns the same score
func NewConstFactor(vars []variable.RandomVariable, value float64) *ConstFactor {
return &ConstFactor{
Vars: vars,
Value: value,
}
}
// A Factor which always returns the same value
type ConstFactor struct {
Vars []variable.RandomVariable
Value float64
}
// The adjacent random variables
func (factor ConstFactor) Adjacent() []variable.RandomVariable {
return factor.Vars
}
// The log probability of the variables given the parameters
func (factor ConstFactor) Score() float64 {
return factor.Value
} | factor/factor.go | 0.7478 | 0.53783 | factor.go | starcoder |
package core
import "unsafe"
// TextureTarget specifies a texture target type (1D, 2D, 2DArray, Cubemap, etc)
type TextureTarget int
// TextureTargetXXX are the different texture types
const (
TextureTarget1D TextureTarget = iota
TextureTarget1DArray
TextureTarget2D
TextureTarget2DArray
TextureTargetCubemapXPositive
TextureTargetCubemapXNegative
TextureTargetCubemapYPositive
TextureTargetCubemapYNegative
TextureTargetCubemapZPositive
TextureTargetCubemapZNegative
)
// TextureFormat holds the texture component layout
type TextureFormat int
// These are the several supported texture component layouts
const (
TextureFormatR TextureFormat = iota
TextureFormatRG
TextureFormatRGB
TextureFormatRGBA
TextureFormatDEPTH
)
// TextureSizedFormat specified the format and size of a texture's components
type TextureSizedFormat int
// These are the several supportex texture component sizes
const (
TextureSizedFormatR8 TextureSizedFormat = iota
TextureSizedFormatR16F
TextureSizedFormatR32F
TextureSizedFormatRG8
TextureSizedFormatRG16F
TextureSizedFormatRG32F
TextureSizedFormatRGB8
TextureSizedFormatRGB16F
TextureSizedFormatRGB32F
TextureSizedFormatRGBA8
TextureSizedFormatRGBA16F
TextureSizedFormatRGBA32F
TextureSizedFormatDEPTH32F
)
// TextureComponentType specifies the texture component storage type
type TextureComponentType int
// These are the supported texture component storate types
const (
TextureComponentTypeUNSIGNEDBYTE TextureComponentType = iota
TextureComponentTypeFLOAT
)
// TextureWrapMode specifies the type of wrap around a sampler of this texture will use
type TextureWrapMode int
// These are the supported texture wrap modes
const (
TextureWrapModeClampEdge TextureWrapMode = iota
TextureWrapModeClampBorder
TextureWrapModeRepeat
)
// TextureFilter specifies the type of interpolation a sampler of this texture will use
type TextureFilter int
// These are the supported texture filtering modes
const (
TextureFilterNearest TextureFilter = iota
TextureFilterLinear
TextureFilterMipmapLinear
)
// TextureDescriptor contains the full description of a texture and its sampling parameters
// It is used as input to texture creation functions and at runtime inside rendersystems
// to setup samplers and memory allocation
type TextureDescriptor struct {
Width uint32
Height uint32
Mipmaps bool
Target TextureTarget
Format TextureFormat
SizedFormat TextureSizedFormat
ComponentType TextureComponentType
Filter TextureFilter
WrapMode TextureWrapMode
}
// Texture is an interface which wraps both a texture and settings for samplers sampling it
type Texture interface {
Descriptor() TextureDescriptor
Handle() unsafe.Pointer
// Lt is used for sorting
Lt(Texture) bool
// Gt
Gt(Texture) bool
// SetFilter
SetFilter(TextureFilter)
// SetWrapMode
SetWrapMode(TextureWrapMode)
} | core/texture.go | 0.526586 | 0.483222 | texture.go | starcoder |
package metricsext
import (
"time"
"github.com/cep21/gometrics/metrics"
)
// DurationObserver wraps an observer to allow reporting durations, rather than flat float64 objects
type DurationObserver struct {
observer metrics.Observer
}
// Observe reports to the wrapped observer the duration as a Second time value
func (t *DurationObserver) Observe(d time.Duration) {
t.observer.Observe(d.Seconds())
}
// Duration is similar to Float, but attaches metadata of the unit "seconds" to time series
func Duration(a metrics.BaseRegistry, metricName string, dimensions map[string]string) *DurationObserver {
ts := a.TimeSeries(metrics.TimeSeriesIdentifier{
MetricName: metricName,
Dimensions: dimensions,
}, func(_ metrics.TimeSeriesIdentifier, tsmd metrics.TimeSeriesMetadata) metrics.TimeSeriesMetadata {
return tsmd.WithValue(metrics.MetaDataUnit, "Seconds")
})
obs := a.Observer(ts)
return &DurationObserver{
observer: obs,
}
}
// Counter returns an observer set with the metric type counter
func Counter(a metrics.BaseRegistry, metricName string, dimensions map[string]string) metrics.Observer {
ts := a.TimeSeries(metrics.TimeSeriesIdentifier{
MetricName: metricName,
Dimensions: dimensions,
}, func(_ metrics.TimeSeriesIdentifier, tsmd metrics.TimeSeriesMetadata) metrics.TimeSeriesMetadata {
return tsmd.WithValue(metrics.MetaDataTimeSeriesType, metrics.TSTypeCounter)
})
return a.Observer(ts)
}
// Gauge returns an observer set with the metric type Gauge
func Gauge(a metrics.BaseRegistry, metricName string, dimensions map[string]string) metrics.Observer {
ts := a.TimeSeries(metrics.TimeSeriesIdentifier{
MetricName: metricName,
Dimensions: dimensions,
}, func(_ metrics.TimeSeriesIdentifier, tsmd metrics.TimeSeriesMetadata) metrics.TimeSeriesMetadata {
return tsmd.WithValue(metrics.MetaDataTimeSeriesType, metrics.TSTypeGauge)
})
return a.Observer(ts)
}
// Float simply returns an observer for a time series with no special metadata
func Float(a metrics.BaseRegistry, metricName string, dimensions map[string]string) metrics.Observer {
ts := a.TimeSeries(metrics.TimeSeriesIdentifier{
MetricName: metricName,
Dimensions: dimensions,
}, nil)
return a.Observer(ts)
}
// WithDimensions wraps a registry with a registry that adds default dimensions to created time series
func WithDimensions(a metrics.BaseRegistry, dimensions map[string]string) metrics.BaseRegistry {
if asW, ok := a.(*wrappedRegistry); ok {
return &wrappedRegistry{
BaseRegistry: asW.BaseRegistry,
dimensions: mergeMapsFast(dimensions, asW.dimensions),
metadata: asW.metadata,
}
}
return &wrappedRegistry{
BaseRegistry: a,
dimensions: dimensions,
}
}
// WithMetadata wraps a registry with a metadata constructor for all time series
func WithMetadata(a metrics.BaseRegistry, metadata metrics.MetadataConstructor) metrics.BaseRegistry {
if metadata == nil {
return a
}
if asW, ok := a.(*wrappedRegistry); ok {
return &wrappedRegistry{
BaseRegistry: asW.BaseRegistry,
dimensions: asW.dimensions,
metadata: func(tsi metrics.TimeSeriesIdentifier, mtd metrics.TimeSeriesMetadata) metrics.TimeSeriesMetadata {
if asW.metadata == nil {
return metadata(tsi, mtd)
}
return asW.metadata(tsi, metadata(tsi, mtd))
},
}
}
return &wrappedRegistry{
BaseRegistry: a,
metadata: metadata,
}
}
type wrappedRegistry struct {
metrics.BaseRegistry
dimensions map[string]string
metadata metrics.MetadataConstructor
}
// TimeSeries returns the unique time series for an identifier
func (u *wrappedRegistry) TimeSeries(tsi metrics.TimeSeriesIdentifier, metadata metrics.MetadataConstructor) *metrics.TimeSeries {
tsi.Dimensions = mergeMapsFast(u.dimensions, tsi.Dimensions)
return u.BaseRegistry.TimeSeries(tsi, func(tsi metrics.TimeSeriesIdentifier, mtd metrics.TimeSeriesMetadata) metrics.TimeSeriesMetadata {
if metadata == nil && u.metadata == nil {
return mtd
}
if u.metadata == nil {
return metadata(tsi, mtd)
}
if metadata == nil {
return u.metadata(tsi, mtd)
}
return u.metadata(tsi, metadata(tsi, mtd))
})
}
// SingleValue helps create a TimeWindowAggregation of a single value at the current timestamp
func SingleValue(value float64) metrics.TimeWindowAggregation {
va := LocklessValueAggregator{}
va.Observe(value)
return metrics.TimeWindowAggregation{
Va: va.Aggregate(),
Tw: metrics.TimeWindow{
Start: time.Now(),
},
}
}
type onDemandFlush struct {
o metrics.OnDemandFlushable
r metrics.TimeSeriesSource
}
func (o onDemandFlush) FlushMetrics() []metrics.TimeSeriesAggregation {
return o.o.CurrentMetrics(o.r)
}
// CustomAggregation creates an aggregation source from an object that can collect metrics on demand
func CustomAggregation(r metrics.TimeSeriesSource, o metrics.OnDemandFlushable) metrics.AggregationSource {
return onDemandFlush{
o: o,
r: r,
}
}
func mergeMapsCopy(m1 map[string]string, m2 map[string]string) map[string]string {
ret := make(map[string]string, len(m1)+len(m2))
for k, v := range m1 {
ret[k] = v
}
for k, v := range m2 {
ret[k] = v
}
return ret
}
func mergeMapsFast(m1 map[string]string, m2 map[string]string) map[string]string {
if len(m1) == 0 {
return m2
}
if len(m2) == 0 {
return m1
}
return mergeMapsCopy(m1, m2)
} | metrics/metricsext/basehelpers.go | 0.89924 | 0.460168 | basehelpers.go | starcoder |
package view
import (
"fmt"
"github.com/protolambda/ztyp/codec"
. "github.com/protolambda/ztyp/tree"
)
type BasicVectorTypeDef struct {
ElemType BasicTypeDef
VectorLength uint64
ComplexTypeBase
}
func BasicVectorType(elemType BasicTypeDef, length uint64) *BasicVectorTypeDef {
size := length * elemType.TypeByteLength()
return &BasicVectorTypeDef{
ElemType: elemType,
VectorLength: length,
ComplexTypeBase: ComplexTypeBase{
MinSize: size,
MaxSize: size,
Size: size,
IsFixedSize: true,
},
}
}
func (td *BasicVectorTypeDef) FromElements(v ...BasicView) (*BasicVectorView, error) {
length := uint64(len(v))
if length > td.VectorLength {
return nil, fmt.Errorf("expected no more than %d elements, got %d", td.VectorLength, length)
}
bottomNodes, err := td.ElemType.PackViews(v)
if err != nil {
return nil, err
}
depth := CoverDepth(td.BottomNodeLength())
rootNode, _ := SubtreeFillToContents(bottomNodes, depth)
listView, _ := td.ViewFromBacking(rootNode, nil)
return listView.(*BasicVectorView), nil
}
func (td *BasicVectorTypeDef) ElementType() TypeDef {
return td.ElemType
}
func (td *BasicVectorTypeDef) Length() uint64 {
return td.VectorLength
}
func (td *BasicVectorTypeDef) DefaultNode() Node {
depth := CoverDepth(td.BottomNodeLength())
return SubtreeFillToDepth(&ZeroHashes[0], depth)
}
func (td *BasicVectorTypeDef) ViewFromBacking(node Node, hook BackingHook) (View, error) {
depth := CoverDepth(td.BottomNodeLength())
return &BasicVectorView{
SubtreeView: SubtreeView{
BackedView: BackedView{
ViewBase: ViewBase{
TypeDef: td,
},
Hook: hook,
BackingNode: node,
},
depth: depth,
},
BasicVectorTypeDef: td,
}, nil
}
func (td *BasicVectorTypeDef) ElementsPerBottomNode() uint64 {
return 32 / td.ElemType.TypeByteLength()
}
func (td *BasicVectorTypeDef) BottomNodeLength() uint64 {
perNode := td.ElementsPerBottomNode()
return (td.VectorLength + perNode - 1) / perNode
}
func (td *BasicVectorTypeDef) TranslateIndex(index uint64) (nodeIndex uint64, intraNodeIndex uint8) {
perNode := td.ElementsPerBottomNode()
return index / perNode, uint8(index & (perNode - 1))
}
func (td *BasicVectorTypeDef) Default(hook BackingHook) View {
v, _ := td.ViewFromBacking(td.DefaultNode(), hook)
return v
}
func (td *BasicVectorTypeDef) New() *BasicVectorView {
return td.Default(nil).(*BasicVectorView)
}
func (td *BasicVectorTypeDef) Deserialize(dr *codec.DecodingReader) (View, error) {
scope := dr.Scope()
if td.Size != scope {
return nil, fmt.Errorf("expected size %d does not match scope %d", td.Size, scope)
}
contents := make([]byte, scope, scope)
if _, err := dr.Read(contents); err != nil {
return nil, err
}
bottomNodes, err := BytesIntoNodes(contents)
if err != nil {
return nil, err
}
depth := CoverDepth(td.BottomNodeLength())
rootNode, _ := SubtreeFillToContents(bottomNodes, depth)
listView, _ := td.ViewFromBacking(rootNode, nil)
return listView.(*BasicVectorView), nil
}
func (td *BasicVectorTypeDef) String() string {
return fmt.Sprintf("Vector[%s, %d]", td.ElemType.String(), td.VectorLength)
}
type BasicVectorView struct {
SubtreeView
*BasicVectorTypeDef
}
func AsBasicVector(v View, err error) (*BasicVectorView, error) {
if err != nil {
return nil, err
}
bv, ok := v.(*BasicVectorView)
if !ok {
return nil, fmt.Errorf("view is not a basic vector: %v", v)
}
return bv, nil
}
func (tv *BasicVectorView) subviewNode(i uint64) (r *Root, bottomIndex uint64, subIndex uint8, err error) {
bottomIndex, subIndex = tv.TranslateIndex(i)
v, err := tv.SubtreeView.GetNode(bottomIndex)
if err != nil {
return nil, 0, 0, err
}
r, ok := v.(*Root)
if !ok {
return nil, 0, 0, fmt.Errorf("basic vector bottom node is not a root, at index %d", i)
}
return r, bottomIndex, subIndex, nil
}
func (tv *BasicVectorView) Get(i uint64) (BasicView, error) {
if i >= tv.VectorLength {
return nil, fmt.Errorf("basic vector has length %d, cannot get index %d", tv.VectorLength, i)
}
r, _, subIndex, err := tv.subviewNode(i)
if err != nil {
return nil, err
}
return tv.ElemType.BasicViewFromBacking(r, subIndex)
}
func (tv *BasicVectorView) Set(i uint64, v BasicView) error {
if i >= tv.VectorLength {
return fmt.Errorf("cannot set item at element index %d, basic vector only has %d elements", i, tv.VectorLength)
}
r, bottomIndex, subIndex, err := tv.subviewNode(i)
if err != nil {
return err
}
return tv.SubtreeView.SetNode(bottomIndex, v.BackingFromBase(r, subIndex))
}
func (tv *BasicVectorView) Copy() (View, error) {
tvCopy := *tv
tvCopy.Hook = nil
return &tvCopy, nil
}
func (tv *BasicVectorView) Iter() ElemIter {
i := uint64(0)
return ElemIterFn(func() (elem View, ok bool, err error) {
if i < tv.VectorLength {
elem, err = tv.Get(i)
ok = true
i += 1
return
} else {
return nil, false, nil
}
})
}
func (tv *BasicVectorView) ReadonlyIter() ElemIter {
return basicElemReadonlyIter(tv.BackingNode, tv.VectorLength, tv.depth, tv.ElemType)
}
func (tv *BasicVectorView) ValueByteLength() (uint64, error) {
return tv.Size, nil
}
func (tv *BasicVectorView) Serialize(w *codec.EncodingWriter) error {
contents := make([]byte, tv.Size, tv.Size)
if err := SubtreeIntoBytes(tv.BackingNode, tv.depth, tv.BottomNodeLength(), contents); err != nil {
return err
}
return w.Write(contents)
} | view/basic_vector.go | 0.54698 | 0.497986 | basic_vector.go | starcoder |
package rules
import "github.com/butuzov/mirror/internal/checker"
func NewBytesChecker() *checker.Checker {
return checker.New("bytes").
WithFunctions(BytesFunctions).
WithStructMethods("bytes.Buffer", BytesBufferMethods)
}
var (
BytesFunctions = map[string]checker.Violation{
"NewBuffer": {
Type: checker.Function,
Message: "avoid allocations with bytes.NewBufferString",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "bytes",
Function: "NewBufferString",
},
Generate: &checker.Generate{
Pattern: `NewBuffer($0)`,
Returns: 1,
},
},
"NewBufferString": {
Type: checker.Function,
Message: "avoid allocations with bytes.NewBuffer",
Args: []int{0},
StringTargeted: true,
Alternative: checker.Alternative{
Package: "bytes",
Function: "NewBuffer",
},
Generate: &checker.Generate{
Pattern: `NewBufferString($0)`,
Returns: 1,
},
},
"Compare": {
Type: checker.Function,
Message: "avoid allocations with strings.Compare",
Args: []int{0, 1},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "Compare",
},
Generate: &checker.Generate{
Pattern: `Compare($0, $1)`,
Returns: 1,
},
},
"Contains": {
Type: checker.Function,
Message: "avoid allocations with strings.Contains",
Args: []int{0, 1},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "Contains",
},
Generate: &checker.Generate{
Pattern: `Contains($0, $1)`,
Returns: 1,
},
},
"ContainsAny": {
Type: checker.Function,
Message: "avoid allocations with strings.ContainsAny",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "ContainsAny",
},
Generate: &checker.Generate{
Pattern: `ContainsAny($0, "f")`,
Returns: 1,
},
},
"ContainsRune": {
Type: checker.Function,
Message: "avoid allocations with strings.ContainsRune",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "ContainsRune",
},
Generate: &checker.Generate{
Pattern: `ContainsRune($0, rune('ф'))`,
Returns: 1,
},
},
"Count": {
Type: checker.Function,
Message: "avoid allocations with strings.Count",
Args: []int{0, 1},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "Count",
},
Generate: &checker.Generate{
Pattern: `Count($0, $1)`,
Returns: 1,
},
},
"EqualFold": {
Type: checker.Function,
Message: "avoid allocations with strings.EqualFold",
Args: []int{0, 1},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "EqualFold",
},
Generate: &checker.Generate{
Pattern: `EqualFold($0, $1)`,
Returns: 1,
},
},
"HasPrefix": {
Type: checker.Function,
Message: "avoid allocations with strings.HasPrefix",
Args: []int{0, 1},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "HasPrefix",
},
Generate: &checker.Generate{
Pattern: `HasPrefix($0, $1)`,
Returns: 1,
},
},
"HasSuffix": {
Type: checker.Function,
Message: "avoid allocations with strings.HasSuffix",
Args: []int{0, 1},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "HasSuffix",
},
Generate: &checker.Generate{
Pattern: `HasSuffix($0, $1)`,
Returns: 1,
},
},
"Index": {
Type: checker.Function,
Message: "avoid allocations with strings.Index",
Args: []int{0, 1},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "Index",
},
Generate: &checker.Generate{
Pattern: `Index($0, $1)`,
Returns: 1,
},
},
"IndexAny": {
Type: checker.Function,
Message: "avoid allocations with strings.IndexAny",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "IndexAny",
},
Generate: &checker.Generate{
Pattern: `IndexAny($0, "f")`,
Returns: 1,
},
},
"IndexByte": {
Type: checker.Function,
Message: "avoid allocations with strings.IndexByte",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "IndexByte",
},
Generate: &checker.Generate{
Pattern: `IndexByte($0, 'f')`,
Returns: 1,
},
},
"IndexFunc": {
Type: checker.Function,
Message: "avoid allocations with strings.IndexFunc",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "IndexFunc",
},
Generate: &checker.Generate{
Pattern: `IndexFunc($0, func(rune) bool {return true })`,
Returns: 1,
},
},
"IndexRune": {
Type: checker.Function,
Message: "avoid allocations with strings.IndexRune",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "IndexRune",
},
Generate: &checker.Generate{
Pattern: `IndexRune($0, rune('ф'))`,
Returns: 1,
},
},
"LastIndex": {
Type: checker.Function,
Message: "avoid allocations with strings.LastIndex",
Args: []int{0, 1},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "LastIndex",
},
Generate: &checker.Generate{
Pattern: `LastIndex($0, $1)`,
Returns: 1,
},
},
"LastIndexAny": {
Type: checker.Function,
Message: "avoid allocations with strings.LastIndexAny",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "LastIndexAny",
},
Generate: &checker.Generate{
Pattern: `LastIndexAny($0, "ф")`,
Returns: 1,
},
},
"LastIndexByte": {
Type: checker.Function,
Message: "avoid allocations with strings.LastIndexByte",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "LastIndexByte",
},
Generate: &checker.Generate{
Pattern: `LastIndexByte($0, 'f')`,
Returns: 1,
},
},
"LastIndexFunc": {
Type: checker.Function,
Message: "avoid allocations with strings.LastIndexAny",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Package: "strings",
Function: "LastIndexAny",
},
Generate: &checker.Generate{
Pattern: `LastIndexFunc($0, func(rune) bool {return true })`,
Returns: 1,
},
},
}
BytesBufferMethods = map[string]checker.Violation{
"Write": {
Type: checker.Method,
Message: "avoid allocations with (*bytees.Buffer).WriteString",
Args: []int{0},
StringTargeted: false,
Alternative: checker.Alternative{
Method: "WriteString",
},
Generate: &checker.Generate{
PreCondition: `bb := bytes.Buffer{}`,
Pattern: `Write($0)`,
Returns: 2,
},
},
"WriteString": {
Type: checker.Method,
Message: "avoid allocations with (*bytees.Buffer).Write",
Args: []int{0},
StringTargeted: true,
Alternative: checker.Alternative{
Method: "Write",
},
Generate: &checker.Generate{
PreCondition: `bb := bytes.Buffer{}`,
Pattern: `WriteString($0)`,
Returns: 2,
},
},
}
) | internal/rules/bytes.go | 0.646237 | 0.411702 | bytes.go | starcoder |
package bayes
import (
"github.com/WinPooh32/math"
"github.com/WinPooh32/ml"
)
type (
DType = ml.DType
Class = ml.Class
Column = ml.Column
Row = ml.Row
)
type NaiveBayes struct {
classes []string
dim int
prob []DType
mean []Row
variance []Row
}
func New(classes []string, prob []DType, featuresDim int) *NaiveBayes {
return &NaiveBayes{
classes: classes,
dim: featuresDim,
prob: prob,
mean: makeRows(len(classes), featuresDim),
variance: makeRows(len(classes), featuresDim),
}
}
func NewFromDataset(data ml.Dataset) *NaiveBayes {
var lables = data.Lables()
var distrib = make([]DType, 0, len(lables))
for _, v := range lables {
distrib = append(distrib, data.Distribution(v))
}
class, _, _ := data.Class(lables[0])
nb := New(lables, distrib, len(class))
var ds = make([][]Column, 0, len(lables))
for _, v := range lables {
class, _, _ := data.Class(v)
ds = append(ds, class)
}
nb.Fit(ds)
return nb
}
func makeRows(n int, dim int) []Row {
m := make([]Row, n)
for i := 0; i < n; i++ {
m[Class(i)] = make(Row, dim)
}
return m
}
func (nb *NaiveBayes) SetProb(prob []DType) {
nb.prob = prob
}
func (nb *NaiveBayes) Fit(dataset [][]Column) {
if len(dataset) == 0 || len(dataset[0]) != int(nb.dim) {
panic("mismatched diminitions!")
}
for class, object := range dataset {
for feature, column := range object {
mean := nb.calcMean(column)
nb.mean[class][feature] = mean
nb.variance[class][feature] = nb.calcVariance(column, mean)
}
}
}
func (nb *NaiveBayes) PredictTo(probs []DType, object []DType) {
clCount := len(nb.classes)
clProbs := nb.prob
clMeans := nb.mean
clVaris := nb.variance
if len(object) != int(nb.dim) ||
len(probs) != clCount ||
len(clProbs) != clCount ||
len(clMeans) != clCount ||
len(clVaris) != clCount {
panic("mismatched diminitions!")
}
var sum DType
for class := range nb.classes {
proba := clProbs[class]
means := clMeans[class]
variances := clVaris[class]
p := nb.calcPosterior(proba, object, means, variances)
sum += p
probs[class] = p
}
clCount = len(nb.classes)
if len(probs) != clCount {
panic("")
}
}
func (nb *NaiveBayes) calcPosterior(
proba DType,
object []DType,
means,
variances Row,
) (p DType) {
// Satisfy bounds checker.
size := nb.dim
if len(object) < size ||
len(means) < size ||
len(variances) < size {
panic("bad size")
}
p = -math.Log2(proba)
for feat := 0; feat < size; feat++ {
value := object[feat]
mean := means[feat]
vari := variances[feat]
if vari == 0 {
continue
}
g := nb.calcGauss(value, mean, vari)
if g == 0 {
continue
}
p -= math.Log2(g)
}
return p
}
func (nb *NaiveBayes) calcGauss(val DType, mean DType, vari DType) DType {
s := val - mean
m := -(s * s) / (2.0 * vari)
g := math.Exp(m)
return g / math.Sqrt(2*math.Pi*vari)
}
func (nb *NaiveBayes) calcMean(col Column) DType {
var c = 1.0 / DType(len(col))
var mean DType = 0.0
for _, val := range col {
mean += val * c
}
return mean
}
func (nb *NaiveBayes) calcVariance(col Column, mean DType) DType {
var c DType = 1.0 / DType(len(col))
var vari DType = 0.0
for _, val := range col {
s := val - mean
vari += s * s * c
}
return vari
} | bayes/bayes.go | 0.568176 | 0.400427 | bayes.go | starcoder |
package main
import (
"github.com/DarkFighterLuke/covidgraphs"
"log"
"strconv"
"time"
)
// Returns the caption for the national trend plot image
func setCaptionAndamentoNazionale() string {
lastIndex := len(nationData) - 1
_, nuoviTotale := covidgraphs.CalculateDelta(nationData[lastIndex-1].Totale_positivi, nationData[lastIndex].Totale_positivi)
_, nuoviGuariti := covidgraphs.CalculateDelta(nationData[lastIndex-1].Dimessi_guariti, nationData[lastIndex].Dimessi_guariti)
_, nuoviMorti := covidgraphs.CalculateDelta(nationData[lastIndex-1].Deceduti, nationData[lastIndex].Deceduti)
_, nuoviPositivi := covidgraphs.CalculateDelta(nationData[lastIndex-1].Nuovi_positivi, nationData[lastIndex].Nuovi_positivi)
data, err := time.Parse("2006-01-02T15:04:05", nationData[lastIndex].Data)
if err != nil {
log.Println("error parsing data in setCaptionAndamentoNazionale()")
}
msg := "<b>Andamento nazionale " + data.Format("2006-01-02") + "</b>\n\n" +
"\n<b>Attualmente positivi: </b>" + strconv.Itoa(nationData[lastIndex].Totale_positivi) + " (<i>" + nuoviTotale + "</i>)" +
"\n<b>Guariti: </b>" + strconv.Itoa(nationData[lastIndex].Dimessi_guariti) + " (<i>" + nuoviGuariti + "</i>)" +
"\n<b>Morti: </b>" + strconv.Itoa(nationData[lastIndex].Deceduti) + " (<i>" + nuoviMorti + "</i>)" +
"\n\n<b>Nuovi positivi: </b>" + strconv.Itoa(nationData[lastIndex].Nuovi_positivi) + " (<i>" + nuoviPositivi + "</i>)"
if nationData[len(nationData)-1].Note_it != "" {
i, err := covidgraphs.FindFirstOccurrenceNote(&datiNote, "codice", nationData[len(nationData)-1].Note_it)
if err != nil {
log.Println("errore nella ricerca della nota col codice indicato")
} else {
var campoProvincia string
if datiNote[i].Provincia != "" {
campoProvincia = ", " + datiNote[i].Provincia
}
var notesField string
if datiNote[i].Note != "" {
notesField = ", " + datiNote[i].Note
}
msg += "\n\n<b>Note:</b>\n[<i>" + datiNote[i].Tipologia_avviso + "] " + datiNote[i].Regione + campoProvincia + ": " + datiNote[i].Avviso + notesField + "</i>"
}
}
return msg
}
// Returns the caption for the regions top 10
func setCaptionTopRegions() string {
top := covidgraphs.GetTopTenRegionsTotaleContagi(®ionsData)
var msg = "<b>Top " + strconv.Itoa(nTopRegions) + " regioni per contagi</b>\n\n"
for i := 0; i < nTopRegions; i++ {
msg += "<b>" + strconv.Itoa(i+1) + ". </b>" + (*top)[i].Denominazione_regione + " (<code>" + strconv.Itoa((*top)[i].Totale_casi) + "</code>)\n"
}
return msg
}
// Returns the caption for the provinces top 10
func setCaptionTopProvinces() string {
top := covidgraphs.GetTopTenProvincesTotaleContagi(&provincesData)
var msg = "<b>Top " + strconv.Itoa(nTopRegions) + " province per contagi</b>\n\n"
for i := 0; i < nTopRegions; i++ {
msg += "<b>" + strconv.Itoa(i+1) + ". </b>" + (*top)[i].Denominazione_provincia + " (<code>" + strconv.Itoa((*top)[i].Totale_casi) + "</code>)\n"
}
return msg
}
// Returns the caption for a regional trend plot image
func setCaptionRegion(regionId int) string {
_, nuoviTotale := covidgraphs.CalculateDelta(regionsData[regionId-21].Totale_casi, regionsData[regionId].Totale_casi)
_, nuoviGuariti := covidgraphs.CalculateDelta(regionsData[regionId-21].Dimessi_guariti, regionsData[regionId].Dimessi_guariti)
_, nuoviMorti := covidgraphs.CalculateDelta(regionsData[regionId-21].Deceduti, regionsData[regionId].Deceduti)
_, nuoviPositivi := covidgraphs.CalculateDelta(regionsData[regionId-21].Nuovi_positivi, regionsData[regionId].Nuovi_positivi)
_, nuoviRicoveratiConSintomi := covidgraphs.CalculateDelta(regionsData[regionId-21].Ricoverati_con_sintomi, regionsData[regionId].Ricoverati_con_sintomi)
_, nuoviTerapiaIntensiva := covidgraphs.CalculateDelta(regionsData[regionId-21].Terapia_intensiva, regionsData[regionId].Terapia_intensiva)
_, nuoviOspedalizzati := covidgraphs.CalculateDelta(regionsData[regionId-21].Totale_ospedalizzati, regionsData[regionId].Totale_ospedalizzati)
_, nuoviIsolamentoDomiciliare := covidgraphs.CalculateDelta(regionsData[regionId-21].Isolamento_domiciliare, regionsData[regionId].Isolamento_domiciliare)
_, nuoviTamponi := covidgraphs.CalculateDelta(regionsData[regionId-21].Tamponi, regionsData[regionId].Tamponi)
data, err := time.Parse("2006-01-02T15:04:05", regionsData[regionId].Data)
if err != nil {
log.Println("error parsing data in setCaptionRegion()")
}
msg := "<b>Andamento regione " + regionsData[regionId].Denominazione_regione + " " + data.Format("2006-01-02") + "</b>\n\n" +
"\n<b>Totale positivi: </b>" + strconv.Itoa(regionsData[regionId].Totale_casi) + " (<i>" + nuoviTotale + "</i>)" +
"\n<b>Guariti: </b>" + strconv.Itoa(regionsData[regionId].Dimessi_guariti) + " (<i>" + nuoviGuariti + "</i>)" +
"\n<b>Morti: </b>" + strconv.Itoa(regionsData[regionId].Deceduti) + " (<i>" + nuoviMorti + "</i>)" +
"\n<b>Nuovi positivi: </b>" + strconv.Itoa(regionsData[regionId].Nuovi_positivi) + " (<i>" + nuoviPositivi + "</i>)" +
"\n\n<b>Ricoverati con sintomi: </b>" + strconv.Itoa(regionsData[regionId].Ricoverati_con_sintomi) + " (<i>" + nuoviRicoveratiConSintomi + "</i>)" +
"\n<b>Terapia intensiva: </b>" + strconv.Itoa(regionsData[regionId].Terapia_intensiva) + " (<i>" + nuoviTerapiaIntensiva + "</i>)" +
"\n<b>Totale ospedalizzati: </b>" + strconv.Itoa(regionsData[regionId].Totale_ospedalizzati) + " (<i>" + nuoviOspedalizzati + "</i>)" +
"\n<b>Isolamento domiciliare: </b>" + strconv.Itoa(regionsData[regionId].Isolamento_domiciliare) + " (<i>" + nuoviIsolamentoDomiciliare + "</i>)" +
"\n<b>Tamponi effettuati: </b>" + strconv.Itoa(regionsData[regionId].Tamponi) + " (<i>" + nuoviTamponi + "</i>)"
if regionsData[regionId].Note_it != "" {
i, err := covidgraphs.FindFirstOccurrenceNote(&datiNote, "codice", regionsData[regionId].Note_it)
if err != nil {
log.Println("errore nella ricerca della nota col codice indicato")
}
var campoProvincia string
if datiNote[i].Provincia != "" {
campoProvincia = ", " + datiNote[i].Provincia
}
var notesField string
if datiNote[i].Note != "" {
notesField = ", " + datiNote[i].Note
}
msg += "\n\n<b>Note:</b>\n[<i>" + datiNote[i].Tipologia_avviso + "] " + datiNote[i].Regione + campoProvincia + ": " + datiNote[i].Avviso + notesField + "</i>"
}
return msg
}
// Returns the caption for a provincial trend plot image
func setCaptionProvince(provinceId int) string {
provinceIndexes := covidgraphs.GetProvinceIndexesByName(&provincesData, provincesData[provinceId].Denominazione_provincia)
todayIndex := (*provinceIndexes)[len(*provinceIndexes)-1]
yesterdayIndex := (*provinceIndexes)[len(*provinceIndexes)-2]
_, nuoviTotale := covidgraphs.CalculateDelta(provincesData[yesterdayIndex].Totale_casi, provincesData[todayIndex].Totale_casi)
_, nuoviPositivi := covidgraphs.CalculateDelta(provincesData[yesterdayIndex].NuoviCasi, provincesData[todayIndex].NuoviCasi)
data, err := time.Parse("2006-01-02T15:04:05", provincesData[provinceId].Data)
if err != nil {
log.Println("error parsing data in setCaptionAndamentoNazionale()")
}
msg := "<b>Andamento provincia di " + provincesData[provinceId].Denominazione_provincia + " " + data.Format("2006-01-02") + "</b>\n\n" +
"\n<b>Totale positivi: </b>" + strconv.Itoa(provincesData[provinceId].Totale_casi) + " (<i>" + nuoviTotale + "</i>)" +
"\n\n<b>Nuovi positivi: </b>" + strconv.Itoa(provincesData[provinceId].NuoviCasi) + " (<i>" + nuoviPositivi + "</i>)"
if provincesData[provinceId].Note_it != "" {
i, err := covidgraphs.FindFirstOccurrenceNote(&datiNote, "codice", provincesData[provinceId].Note_it)
if err != nil {
log.Println("errore nella ricerca della nota col codice indicato")
}
var campoProvincia string
if datiNote[i].Provincia != "" {
campoProvincia = ", " + datiNote[i].Provincia
}
var notesField string
if datiNote[i].Note != "" {
notesField = ", " + datiNote[i].Note
}
msg += "\n\n<b>Note:</b>\n[<i>" + datiNote[i].Tipologia_avviso + "] " + datiNote[i].Regione + campoProvincia + ": " + datiNote[i].Avviso + notesField
}
return msg
}
// Returns the caption for the requested regional fields comparison plot
func setCaptionConfrontoRegione(regionId int, fieldsNames []string) string {
_, nuoviTotale := covidgraphs.CalculateDelta(regionsData[regionId-21].Totale_casi, regionsData[regionId].Totale_casi)
_, nuoviGuariti := covidgraphs.CalculateDelta(regionsData[regionId-21].Dimessi_guariti, regionsData[regionId].Dimessi_guariti)
_, nuoviMorti := covidgraphs.CalculateDelta(regionsData[regionId-21].Deceduti, regionsData[regionId].Deceduti)
_, nuoviTotalePositivi := covidgraphs.CalculateDelta(regionsData[regionId-21].Totale_positivi, regionsData[regionId].Totale_positivi)
_, nuoviPositivi := covidgraphs.CalculateDelta(regionsData[regionId-21].Nuovi_positivi, regionsData[regionId].Nuovi_positivi)
_, nuoviRicoveratiConSintomi := covidgraphs.CalculateDelta(regionsData[regionId-21].Ricoverati_con_sintomi, regionsData[regionId].Ricoverati_con_sintomi)
_, nuoviTerapiaIntensiva := covidgraphs.CalculateDelta(regionsData[regionId-21].Terapia_intensiva, regionsData[regionId].Terapia_intensiva)
_, nuoviOspedalizzati := covidgraphs.CalculateDelta(regionsData[regionId-21].Totale_ospedalizzati, regionsData[regionId].Totale_ospedalizzati)
_, nuoviIsolamentoDomiciliare := covidgraphs.CalculateDelta(regionsData[regionId-21].Isolamento_domiciliare, regionsData[regionId].Isolamento_domiciliare)
_, nuoviTamponi := covidgraphs.CalculateDelta(regionsData[regionId-21].Tamponi, regionsData[regionId].Tamponi)
data, err := time.Parse("2006-01-02T15:04:05", regionsData[regionId].Data)
if err != nil {
log.Println("error parsing data in region caption")
}
msg := "<b>Andamento regione " + regionsData[regionId].Denominazione_regione + " " + data.Format("2006-01-02") + "</b>\n"
for _, v := range fieldsNames {
if v == "totale_casi" {
msg += "\n<b>Totale positivi: </b>" + strconv.Itoa(regionsData[regionId].Totale_casi) + " (<i>" + nuoviTotale + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "dimessi_guariti" {
msg += "\n<b>Guariti: </b>" + strconv.Itoa(regionsData[regionId].Dimessi_guariti) + " (<i>" + nuoviGuariti + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "deceduti" {
msg += "\n<b>Morti: </b>" + strconv.Itoa(regionsData[regionId].Deceduti) + " (<i>" + nuoviMorti + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "attualmente_positivi" {
msg += "\n<b>Attualmente positivi: </b>" + strconv.Itoa(regionsData[regionId].Totale_positivi) + " (<i>" + nuoviTotalePositivi + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "nuovi_positivi" {
msg += "\n<b>Nuovi positivi: </b>" + strconv.Itoa(regionsData[regionId].Nuovi_positivi) + " (<i>" + nuoviPositivi + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "ricoverati_con_sintomi" {
msg += "\n<b>Ricoverati con sintomi: </b>" + strconv.Itoa(regionsData[regionId].Ricoverati_con_sintomi) + " (<i>" + nuoviRicoveratiConSintomi + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "terapia_intensiva" {
msg += "\n<b>Terapia intensiva: </b>" + strconv.Itoa(regionsData[regionId].Terapia_intensiva) + " (<i>" + nuoviTerapiaIntensiva + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "totale_ospedalizzati" {
msg += "\n<b>Totale ospedalizzati: </b>" + strconv.Itoa(regionsData[regionId].Totale_ospedalizzati) + " (<i>" + nuoviOspedalizzati + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "isolamento_domiciliare" {
msg += "\n<b>Isolamento domiciliare: </b>" + strconv.Itoa(regionsData[regionId].Isolamento_domiciliare) + " (<i>" + nuoviIsolamentoDomiciliare + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "tamponi" {
msg += "\n<b>Tamponi effettuati: </b>" + strconv.Itoa(regionsData[regionId].Tamponi) + " (<i>" + nuoviTamponi + "</i>)"
}
}
return msg
}
// Returns the caption for the requested national fields comparison plot
func setCaptionConfrontoNazione(nationId int, fieldsNames []string) string {
_, nuoviTotale := covidgraphs.CalculateDelta(nationData[nationId-1].Totale_casi, nationData[nationId].Totale_casi)
_, nuoviGuariti := covidgraphs.CalculateDelta(nationData[nationId-1].Dimessi_guariti, nationData[nationId].Dimessi_guariti)
_, nuoviMorti := covidgraphs.CalculateDelta(nationData[nationId-1].Deceduti, nationData[nationId].Deceduti)
_, nuoviTotalePositivi := covidgraphs.CalculateDelta(nationData[nationId-1].Totale_positivi, nationData[nationId].Totale_positivi)
_, nuoviPositivi := covidgraphs.CalculateDelta(nationData[nationId-1].Nuovi_positivi, nationData[nationId].Nuovi_positivi)
_, nuoviRicoveratiConSintomi := covidgraphs.CalculateDelta(nationData[nationId-1].Ricoverati_con_sintomi, nationData[nationId].Ricoverati_con_sintomi)
_, nuoviTerapiaIntensiva := covidgraphs.CalculateDelta(nationData[nationId-1].Terapia_intensiva, nationData[nationId].Terapia_intensiva)
_, nuoviOspedalizzati := covidgraphs.CalculateDelta(nationData[nationId-1].Totale_ospedalizzati, nationData[nationId].Totale_ospedalizzati)
_, nuoviIsolamentoDomiciliare := covidgraphs.CalculateDelta(nationData[nationId-1].Isolamento_domiciliare, nationData[nationId].Isolamento_domiciliare)
_, nuoviTamponi := covidgraphs.CalculateDelta(nationData[nationId-1].Tamponi, nationData[nationId].Tamponi)
data, err := time.Parse("2006-01-02T15:04:05", nationData[nationId].Data)
if err != nil {
log.Println("error parsing data in nation caption")
}
msg := "<b>Andamento nazione " + data.Format("2006-01-02") + "</b>\n"
for _, v := range fieldsNames {
if v == "totale_casi" {
msg += "\n<b>Totale positivi: </b>" + strconv.Itoa(nationData[nationId].Totale_casi) + " (<i>" + nuoviTotale + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "dimessi_guariti" {
msg += "\n<b>Guariti: </b>" + strconv.Itoa(nationData[nationId].Dimessi_guariti) + " (<i>" + nuoviGuariti + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "deceduti" {
msg += "\n<b>Morti: </b>" + strconv.Itoa(nationData[nationId].Deceduti) + " (<i>" + nuoviMorti + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "attualmente_positivi" {
msg += "\n<b>Attualmente positivi: </b>" + strconv.Itoa(nationData[nationId].Totale_positivi) + " (<i>" + nuoviTotalePositivi + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "nuovi_positivi" {
msg += "\n<b>Nuovi positivi: </b>" + strconv.Itoa(nationData[nationId].Nuovi_positivi) + " (<i>" + nuoviPositivi + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "ricoverati_con_sintomi" {
msg += "\n<b>Ricoverati con sintomi: </b>" + strconv.Itoa(nationData[nationId].Ricoverati_con_sintomi) + " (<i>" + nuoviRicoveratiConSintomi + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "terapia_intensiva" {
msg += "\n<b>Terapia intensiva: </b>" + strconv.Itoa(nationData[nationId].Terapia_intensiva) + " (<i>" + nuoviTerapiaIntensiva + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "totale_ospedalizzati" {
msg += "\n<b>Totale ospedalizzati: </b>" + strconv.Itoa(nationData[nationId].Totale_ospedalizzati) + " (<i>" + nuoviOspedalizzati + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "isolamento_domiciliare" {
msg += "\n<b>Isolamento domiciliare: </b>" + strconv.Itoa(nationData[nationId].Isolamento_domiciliare) + " (<i>" + nuoviIsolamentoDomiciliare + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "tamponi" {
msg += "\n<b>Tamponi effettuati: </b>" + strconv.Itoa(nationData[nationId].Tamponi) + " (<i>" + nuoviTamponi + "</i>)"
}
}
return msg
}
// Returns a caption with the selected province fields data
func setCaptionConfrontoProvincia(provinceId int, fieldsNames []string) string {
provinceIndexes := covidgraphs.GetProvinceIndexesByName(&provincesData, provincesData[provinceId].Denominazione_provincia)
todayIndex := (*provinceIndexes)[len(*provinceIndexes)-1]
yesterdayIndex := (*provinceIndexes)[len(*provinceIndexes)-2]
_, nuoviTotale := covidgraphs.CalculateDelta(provincesData[yesterdayIndex].Totale_casi, provincesData[todayIndex].Totale_casi)
_, nuoviPositivi := covidgraphs.CalculateDelta(provincesData[yesterdayIndex].NuoviCasi, provincesData[todayIndex].NuoviCasi)
data, err := time.Parse("2006-01-02T15:04:05", provincesData[provinceId].Data)
if err != nil {
log.Println("error parsing data for province caption")
}
msg := "<b>Andamento provincia di " + provincesData[provinceId].Denominazione_provincia + " " + data.Format("2006-01-02") + "</b>\n"
for _, v := range fieldsNames {
if v == "totale_casi" {
msg += "\n<b>Totale positivi: </b>" + strconv.Itoa(provincesData[provinceId].Totale_casi) + " (<i>" + nuoviTotale + "</i>)"
}
}
for _, v := range fieldsNames {
if v == "nuovi_positivi" {
msg += "\n<b>Nuovi positivi: </b>" + strconv.Itoa(provincesData[provinceId].NuoviCasi) + " (<i>" + nuoviPositivi + "</i>)"
}
}
return msg
} | captions.go | 0.527073 | 0.503601 | captions.go | starcoder |
package mandelbrot
import (
"image"
"image/color"
"sync"
)
type pointLocation complex128
// If |current|^2 > max then we know that the point is _not_ in
// the Mandelbrot set.
const max float64 = 4.0
const maxIterations = 5000
func newPointLocation(r float64, i float64) pointLocation {
return pointLocation(complex(r, i))
}
func (c pointLocation) abs2() float64 {
r := real(complex128(c))
i := imag(complex128(c))
return r*r + i*i
}
type Point struct {
location pointLocation
iteration int64
current pointLocation
inSet bool
processed bool
}
// Take the point through one iteration of Z^2 + C
func (p *Point) Iterate() {
p.iteration = p.iteration + 1
p.current = p.current*p.current + p.location
}
func (p *Point) IsMandelbrot() bool {
return p.inSet
}
func (p *Point) DetermineMembership() bool {
if p.processed {
return p.inSet
}
i := 0
for i < maxIterations && p.current.abs2() < max {
p.Iterate()
i++
}
if p.current.abs2() < max {
p.inSet = true
} else {
p.inSet = false
}
p.processed = true
return p.inSet
}
// Returns a new point at (r, i) with 0 iterations done so far.
func NewPoint(r float64, i float64) Point {
return Point{
location: newPointLocation(r, i),
iteration: 0,
current: newPointLocation(0, 0),
processed: false,
}
}
type Grid struct {
center complex128
width, height int64
pixelWidth float64
// [0][0] is bottom left
// [width-1]][0] is bottom right
// [0][height-1] is top left
// [width-1][height-1] is top right
points [][]*Point
}
func NewGrid(center complex128, width, height int64, pixelWidth float64) Grid {
points := make([][]*Point, width)
halfWidth := 0.5 * pixelWidth * float64(width)
halfHeight := 0.5 * pixelWidth * float64(height)
bottomLeft := center - complex(halfWidth, halfHeight)
for col := range points {
points[col] = make([]*Point, height)
for row := range points[col] {
point := NewPoint(
real(bottomLeft)+float64(col)*pixelWidth,
imag(bottomLeft)+float64(row)*pixelWidth,
)
points[col][row] = &point
}
}
return Grid{
center: center,
width: width,
height: height,
pixelWidth: pixelWidth,
points: points,
}
}
func (g *Grid) Points() [][]*Point {
return g.points
}
func (g *Grid) IterateAll() {
var wg sync.WaitGroup
for _, row := range g.points {
for _, point := range row {
wg.Add(1)
go func(point *Point) {
defer wg.Done()
point.DetermineMembership()
}(point)
}
}
wg.Wait()
}
func (g *Grid) GenerateImageWithPalette(p ColorPalette) image.Image {
r := image.Rect(0, 0, len(g.points), len(g.points[0]))
im := image.NewCMYK(r)
for i, row := range g.points {
for j, point := range row {
im.Set(i, j, p.Color(*point))
}
}
return im
}
func (g *Grid) GenerateImage() image.Image {
green := color.RGBA{
R: 0,
G: 0xff,
B: 0,
A: 0xff,
}
p := NewLinearPalette(g, color.Black, green, color.Black)
return g.GenerateImageWithPalette(p)
} | mandelbrot.go | 0.777553 | 0.468791 | mandelbrot.go | starcoder |
package telemetry
import (
"github.com/prometheus/client_golang/prometheus"
)
// Counter tracks how many times something is happening.
type Counter interface {
// Initialize creates the counter with the given tags and initializes it to 0.
// This method is intended to be used when the counter value is important to
// send even before any incrementing/addition is done on it.
Initialize(tagsValue ...string)
// Inc increments the counter with the given tags value.
Inc(tagsValue ...string)
// Add adds the given value to the counter with the given tags value.
Add(value float64, tagsValue ...string)
// Delete deletes the value for the counter with the given tags value.
Delete(tagsValue ...string)
// IncWithTags increments the counter with the given tags.
// Even if less convenient, this signature could be used in hot path
// instead of Inc(...string) to avoid escaping the parameters on the heap.
IncWithTags(tags map[string]string)
// AddWithTags adds the given value to the counter with the given tags.
// Even if less convenient, this signature could be used in hot path
// instead of Add(float64, ...string) to avoid escaping the parameters on the heap.
AddWithTags(value float64, tags map[string]string)
// DeleteWithTags deletes the value for the counter with the given tags.
// Even if less convenient, this signature could be used in hot path
// instead of Delete(...string) to avoid escaping the parameters on the heap.
DeleteWithTags(tags map[string]string)
// WithValues returns SimpleCounter for this metric with the given tag values.
WithValues(tagsValue ...string) SimpleCounter
// WithTags returns SimpleCounter for this metric with the given tqg values.
WithTags(tags map[string]string) SimpleCounter
}
// NewCounter creates a Counter with default options for telemetry purpose.
// Current implementation used: Prometheus Counter
func NewCounter(subsystem, name string, tags []string, help string) Counter {
return NewCounterWithOpts(subsystem, name, tags, help, DefaultOptions)
}
// NewCounterWithOpts creates a Counter with the given options for telemetry purpose.
// See NewCounter()
func NewCounterWithOpts(subsystem, name string, tags []string, help string, opts Options) Counter {
name = opts.NameWithSeparator(subsystem, name)
c := &promCounter{
pc: prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: name,
Help: help,
},
tags,
),
}
telemetryRegistry.MustRegister(c.pc)
return c
} | pkg/telemetry/counter.go | 0.687105 | 0.409427 | counter.go | starcoder |
package netconf
import (
"time"
)
// NetworkEpoch globally defines a verification epoch (signing plus validation
// epoch) on the network.
type NetworkEpoch struct {
QuorumM uint64 // the quorum
NumberOfMintsN uint64 // total number of mints
SignStart time.Time // start of signing epoch
SignEnd time.Time // end of signing epoch
ValidateEnd time.Time // end of validation epoch
MintsAdded []IdentityKey `json:",omitempty"` // mints added in this epoch
MintsRemoved []IdentityKey `json:",omitempty"` // mints removed in this epoch
MintsReplaced []KeyReplacement `json:",omitempty"` // mints replaced in this epoch
DBCTypesAdded []DBCType `json:",omitempty"` // DBC types added in this epoch
DBCTypesRemoved []DBCType `json:",omitempty"` // DBC types removed in this epoch
}
// Validate the network epoch.
func (e *NetworkEpoch) Validate() error {
// m > 0
if e.QuorumM == 0 {
return ErrZeroM
}
// n > 0
if e.NumberOfMintsN == 0 {
return ErrZeroN
}
// m <= n
if e.QuorumM > e.NumberOfMintsN {
return ErrMGreaterN
}
// m > n/2
if e.QuorumM <= e.NumberOfMintsN/2 {
return ErrQuorumTooSmall
}
// sign epoch start < sign epoch end
if !e.SignStart.Before(e.SignEnd) {
return ErrSignEpochStartNotBeforeSignEnd
}
// sign epoch end < validation epoch end
if !e.SignEnd.Before(e.ValidateEnd) {
return ErrSignEpochEndNotBeforeValidateEnd
}
return nil
}
// MintsDisjunct make sure the MintsAdded, MintsRemoved, and MintsReplaced
// sets are disjunct.
func (e *NetworkEpoch) MintsDisjunct() error {
addedMints := make(map[string]bool)
removedMints := make(map[string]bool)
replacedMints := make(map[string]bool)
// fill maps
for _, add := range e.MintsAdded {
addedMints[add.MarshalID()] = true
}
for _, remove := range e.MintsRemoved {
removedMints[remove.MarshalID()] = true
}
for _, replace := range e.MintsReplaced {
replacedMints[replace.OldKey.MarshalID()] = true
}
// check
for _, replace := range e.MintsReplaced {
newID := replace.NewKey.MarshalID()
oldID := replace.OldKey.MarshalID()
if addedMints[newID] || removedMints[newID] || replacedMints[newID] {
return ErrMintsOverlap
}
if addedMints[oldID] || removedMints[oldID] {
return ErrMintsOverlap
}
}
for _, remove := range e.MintsRemoved {
if addedMints[remove.MarshalID()] {
return ErrMintsOverlap
}
}
return nil
}
// DBCTypesDisjunct makes sure the DBCTypesAdded and DBCTypesRemoved sets from the epoch are disjunct.
func (e *NetworkEpoch) DBCTypesDisjunct() error {
dbcTypes := make(map[DBCType]bool)
for _, add := range e.DBCTypesAdded {
dbcTypes[add] = true
}
for _, remove := range e.DBCTypesRemoved {
if dbcTypes[remove] {
return ErrDBCTypesOverlap
}
}
return nil
} | netconf/network_epoch.go | 0.734881 | 0.441673 | network_epoch.go | starcoder |
package multiregexp
import (
"regexp"
)
// Regexps is a set of regular expression.
type Regexps []*regexp.Regexp
// Match reports whether the byte slice b contains any match in the set of the regular expression res.
// When matchType is AND, the result of each match will be logically joined with AND.
// Otherwise the result of each match will be joined with OR.
func (rex Regexps) Match(b []byte, matchType ...string) bool {
if len(matchType) != 0 && matchType[0] == "AND" {
for _, re := range rex {
if !re.Match(b) {
return false
}
}
return true
}
for _, re := range rex {
if re.Match(b) {
return true
}
}
return false
}
// MatchWhich reports the index of matched regular expression in the set.
// It returns an empty slice when no regular expression is matched.
func (rex Regexps) MatchWhich(b []byte) []int {
var match []int
for i, re := range rex {
if re.Match(b) {
match = append(match, i)
}
}
return match
}
// MatchString reports whether the string s contains any match in the set of the regular expression res.
// When matchType is AND, the result of each match will be logically joined with AND.
// Otherwise the result of each match will be joined with OR.
func (rex Regexps) MatchString(s string, matchType ...string) bool {
if len(matchType) != 0 && matchType[0] == "AND" {
for _, re := range rex {
if !re.MatchString(s) {
return false
}
}
return true
}
for _, re := range rex {
if re.MatchString(s) {
return true
}
}
return false
}
// MatchStringWhich reports the index of matched regular expression in the set.
// It returns an empty slice when no regular expression is matched.
func (rex Regexps) MatchStringWhich(s string) []int {
var match []int
for i, re := range rex {
if re.MatchString(s) {
match = append(match, i)
}
}
return match
}
// Append adds regular expression into the set of the regular expression res.
func Append(rex Regexps, regs ...*regexp.Regexp) Regexps {
for _, re := range regs {
rex = append(rex, re)
}
return rex
}
// Append adds regular expression in Regexps into another Regexps.
func (rex Regexps) Append(rexs ...Regexps) Regexps {
for _, regexps := range rexs {
for _, re := range regexps {
rex = append(rex, re)
}
}
return rex
} | multiregexp.go | 0.721253 | 0.484197 | multiregexp.go | starcoder |
package yologo
import (
"fmt"
"github.com/chewxy/hm"
"github.com/pkg/errors"
"gorgonia.org/gorgonia"
"gorgonia.org/tensor"
)
// Wrap yoloOp
type yoloDiffOp struct {
yoloOp
}
/* Methods to match gorgonia.Op interface */
func (op *yoloDiffOp) Arity() int { return 2 }
func (op *yoloDiffOp) Type() hm.Type {
a := hm.TypeVariable('a')
t := gorgonia.TensorType{Dims: 4, Of: a}
o := gorgonia.TensorType{Dims: 3, Of: a}
return hm.NewFnType(t, o, t)
}
func (op *yoloDiffOp) ReturnsPtr() bool { return true }
func (op *yoloDiffOp) CallsExtern() bool { return false }
func (op *yoloDiffOp) OverwritesInput() int { return -1 }
func (op *yoloDiffOp) InferShape(inputs ...gorgonia.DimSizer) (tensor.Shape, error) {
s := inputs[0].(tensor.Shape).Clone()
return s, nil
}
func (op *yoloDiffOp) Do(inputs ...gorgonia.Value) (gorgonia.Value, error) {
if op.training == nil {
return nil, fmt.Errorf("Training parameters for yoloOp were not set")
}
if op.training.inputs == nil {
return nil, fmt.Errorf("Training parameter 'inputs' for yoloOp were not set")
}
if op.training.scales == nil {
return nil, fmt.Errorf("Training parameter 'scales' for yoloOp were not set")
}
if op.training.targets == nil {
return nil, fmt.Errorf("Training parameter 'targets' for yoloOp were not set")
}
if op.training.bboxes == nil {
return nil, fmt.Errorf("Training parameter 'bboxes' for yoloOp were not set")
}
in := inputs[0]
output := inputs[1]
inGrad := tensor.New(tensor.Of(in.Dtype()), tensor.WithShape(output.Shape().Clone()...), tensor.WithEngine(in.(tensor.Tensor).Engine()))
switch in.Dtype() {
case tensor.Float32:
inGradData := inGrad.Data().([]float32)
outGradData := output.Data().([]float32)
op.f32(inGradData, outGradData, op.training.scales, op.training.inputs, op.training.targets, op.training.bboxes)
break
case tensor.Float64:
return nil, fmt.Errorf("yoloDiffOp for Float64 is not implemented yet")
default:
return nil, fmt.Errorf("yoloDiffOp supports only Float32/Float64 types")
}
err := inGrad.Reshape(1, op.gridSize*op.gridSize, (op.numClasses+5)*len(op.masks))
if err != nil {
return nil, errors.Wrap(err, "Can't reshape in yoloDiffOp (1)")
}
err = inGrad.T(0, 2, 1)
if err != nil {
return nil, errors.Wrap(err, "Can't safely transponse in yoloDiffOp (1)")
}
err = inGrad.Transpose()
if err != nil {
return nil, errors.Wrap(err, "Can't transponse in yoloDiffOp (1)")
}
err = inGrad.Reshape(1, len(op.masks)*(5+op.numClasses), op.gridSize, op.gridSize)
if err != nil {
return nil, errors.Wrap(err, "Can't reshape in yoloDiffOp (2)")
}
return inGrad, nil
}
func (op *yoloOp) DoDiff(ctx gorgonia.ExecutionContext, inputs gorgonia.Nodes, output *gorgonia.Node) (err error) {
return fmt.Errorf("DoDiff for yoloOp is not implemented")
}
func (op *yoloOp) DiffWRT(inputs int) []bool { return []bool{true} }
func (op *yoloOp) SymDiff(inputs gorgonia.Nodes, output, grad *gorgonia.Node) (retVal gorgonia.Nodes, err error) {
if err = checkArity(op, len(inputs)); err != nil {
return
}
in := inputs[0]
var op2 yoloOp
op2 = *op
diff := &yoloDiffOp{op2}
var ret *gorgonia.Node
if ret, err = gorgonia.ApplyOp(diff, in, grad); err != nil {
return nil, err
}
return gorgonia.Nodes{ret}, nil
}
/* Unexported methods */
func (op *yoloDiffOp) f32(inGradData, outGradData, scales, inputs, targets, bboxes []float32) {
for i := range inGradData {
inGradData[i] = 0
}
for i := 0; i < len(outGradData); i = i + 5 + op.numClasses {
for j := 0; j < 4; j++ {
inGradData[i+j] = outGradData[i+j] * (scales[i+j] * scales[i+j] * (inputs[i+j] - targets[i+j]))
}
for j := 4; j < 5+op.numClasses; j++ {
if outGradData[i+j] != 0 {
if targets[i+j] == 0 {
inGradData[i+j] = outGradData[i+j] * (bboxes[i+j])
} else {
inGradData[i+j] = outGradData[i+j] * (1 - bboxes[i+j])
}
}
}
}
} | yolo_diff_op.go | 0.549882 | 0.443239 | yolo_diff_op.go | starcoder |
package position
import (
"fmt"
"strings"
"unicode/utf8"
)
//Position represents the position of a character in a file.
type Position struct {
Offset int
Line int
Column int
}
//Increment is an incrementer of positions
type Increment = Position
//IsValid returns true if the position is valid.
//To be valid, a position should have lines > 0 and columns/offset ≥ 0.
func (p Position) IsValid() bool {
return p.Line > 0 && p.Column >= 0 && p.Offset >= 0
}
//Next retuns the position next to the given character.
func (p Position) Next(r rune) Position {
w := utf8.RuneLen(r)
if w <= 0 {
return p
}
p.Offset += w
if r == '\n' {
p.Line++
p.Column = 0
} else {
p.Column++
}
return p
}
//Prev returns the position before the given character.
func (p Position) Prev(r rune) Position {
w := utf8.RuneLen(r)
if r == '\n' || w <= 0 {
return p
}
p.Offset -= w
p.Column--
return p
}
//NextString returns the cursor position next to the given string.
func (p Position) NextString(s string) Position {
for _, r := range s {
p = p.Next(r)
}
return p
}
//String returns a string representation of the position
//on the form L{line},C{column}
func (p Position) String() string {
return fmt.Sprintf("L%d,C%d", p.Line, p.Column)
}
//Increment returns a new position with line, column and offset
//incremented by the given increment.
func (p Position) Increment(inc Increment) Position {
p.Line += inc.Line
p.Column += inc.Column
p.Offset += inc.Offset
return p
}
//IncrementPosition returns a new position with line, column and offset
//incremented by the given increments.
func (p Position) IncrementPosition(incLine, incColumn, incOffset int) Position {
return p.Increment(New(incLine, incColumn, incOffset))
}
//New returns a position with the given line, column and offset.
func New(line, column, offset int) Position {
return Position{
Line: line,
Column: column,
Offset: offset,
}
}
//Cmp compares 2 positions. It returns :
//- -1 if p1 before p2
//- 1 if p1 after p2
//- 0 otherwise
func (p1 Position) Cmp(p2 Position) int {
switch {
case p1.Line < p2.Line:
return -1
case p1.Line > p2.Line:
return 1
case p1.Column < p2.Column:
return -1
case p1.Column > p2.Column:
return 1
}
return 0
}
func (p1 Position) Eq(p2 Position) bool {
return p1.Cmp(p2) == 0
}
func (p1 Position) Ne(p2 Position) bool {
return p1.Cmp(p2) != 0
}
func (p1 Position) Ge(p2 Position) bool {
return p1.Cmp(p2) >= 0
}
func (p1 Position) Le(p2 Position) bool {
return p1.Cmp(p2) <= 0
}
func (p1 Position) Gt(p2 Position) bool {
return p1.Cmp(p2) > 0
}
func (p1 Position) Lt(p2 Position) bool {
return p1.Cmp(p2) < 0
}
func (p Position) Between(p1, p2 Position) bool {
return p.Ge(p1) && p.Le(p2)
}
//Diff returns p1 - p2
func (p1 Position) Diff(p2 Position) Increment {
return New(
p1.Line-p2.Line,
p1.Column-p2.Column,
p1.Offset-p2.Offset,
)
}
//Blank returns a string corresponding to the space
//between p1 and p2.
func (p1 Position) Blank(p2 Position) string {
inc := p2.Diff(p1)
if inc.Line == 0 && inc.Column > 0 {
return strings.Repeat(" ", inc.Column)
} else if inc.Line > 0 {
s := strings.Repeat("\n", inc.Line)
if p2.Column > 0 {
s += strings.Repeat(" ", p2.Column)
}
return s
}
return ""
} | position/position.go | 0.723212 | 0.471041 | position.go | starcoder |
package rawp
import (
"fmt"
"image"
"image/color"
"image/draw"
"reflect"
imageExt "github.com/chai2010/image"
colorExt "github.com/chai2010/image/color"
)
type pixDecoder struct {
Channels int // 1/2/3/4
DataType reflect.Kind // Uint8/Uint16/Int32/Int64/Float32/Float64
Width int // need for Decode
Height int // need for Decode
}
func (p *pixDecoder) Decode(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
// Gray/Gray16/Gray32i/Gray32f/Gray64i/Gray64f
if p.Channels == 1 && p.DataType == reflect.Uint8 {
return p.decodeGray(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Uint16 {
return p.decodeGray16(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Int32 {
return p.decodeGray32f(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Float32 {
return p.decodeGray32f(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Int64 {
return p.decodeGray64f(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Float64 {
return p.decodeGray64f(data, buf)
}
// GrayA/GrayA32/GrayA64i/GrayA64f/GrayA128i/GrayA128f
if p.Channels == 1 && p.DataType == reflect.Uint8 {
return p.decodeGrayA(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Uint16 {
return p.decodeGrayA32(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Int32 {
return p.decodeGrayA64i(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Float32 {
return p.decodeGrayA64f(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Int64 {
return p.decodeGrayA128i(data, buf)
}
if p.Channels == 1 && p.DataType == reflect.Float64 {
return p.decodeGrayA128f(data, buf)
}
// RGB/RGB48/RGB96i/RGB96f/RGB192i/RGB192f
if p.Channels == 3 && p.DataType == reflect.Uint8 {
return p.decodeRGB(data, buf)
}
if p.Channels == 3 && p.DataType == reflect.Uint16 {
return p.decodeRGB48(data, buf)
}
if p.Channels == 3 && p.DataType == reflect.Int32 {
return p.decodeRGB96i(data, buf)
}
if p.Channels == 3 && p.DataType == reflect.Float32 {
return p.decodeRGB96f(data, buf)
}
if p.Channels == 3 && p.DataType == reflect.Int64 {
return p.decodeRGB192i(data, buf)
}
if p.Channels == 3 && p.DataType == reflect.Float64 {
return p.decodeRGB192f(data, buf)
}
// RGBA/RGBA64/RGBA128f
if p.Channels == 4 && p.DataType == reflect.Uint8 {
return p.decodeRGBA(data, buf)
}
if p.Channels == 4 && p.DataType == reflect.Uint16 {
return p.decodeRGBA64(data, buf)
}
if p.Channels == 4 && p.DataType == reflect.Int32 {
return p.decodeRGBA128i(data, buf)
}
if p.Channels == 4 && p.DataType == reflect.Float32 {
return p.decodeRGBA128f(data, buf)
}
if p.Channels == 4 && p.DataType == reflect.Int64 {
return p.decodeRGBA256i(data, buf)
}
if p.Channels == 4 && p.DataType == reflect.Float64 {
return p.decodeRGBA256f(data, buf)
}
// Unknown
err = fmt.Errorf(
"image/rawp: Decode, unknown image format, channels = %v, dataType = %v",
p.Channels, p.DataType,
)
return
}
func (p *pixDecoder) getPixelSize() int {
switch p.DataType {
case reflect.Uint8:
return p.Channels * 1
case reflect.Uint16:
return p.Channels * 2
case reflect.Int32:
return p.Channels * 4
case reflect.Float32:
return p.Channels * 4
case reflect.Int64:
return p.Channels * 8
case reflect.Float64:
return p.Channels * 8
}
panic("image/rawp: getPixelSize, unreachable")
}
func (p *pixDecoder) getImageDataSize() int {
return p.getPixelSize() * p.Width * p.Height
}
func (p *pixDecoder) decodeGray(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeGray, bad data size, expect = %d, got = %d", size, len(data))
return
}
gray := newGray(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
copy(gray.Pix[y*gray.Stride:][:p.Width], data[off:])
off += p.Width
}
m = gray
return
}
func (p *pixDecoder) decodeGray16(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeGray16, bad data size, expect = %d, got = %d", size, len(data))
return
}
gray16 := newGray16(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
u16Pix := builtin.Slice(data[off:], reflect.TypeOf([]uint16(nil))).([]uint16)
for x := 0; x < p.Width; x++ {
gray16.SetGray16(x, y, color.Gray16{u16Pix[x]})
}
off += p.Width * 2
}
m = gray16
return
}
func (p *pixDecoder) decodeGray32f(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeGray32f, bad data size, expect = %d, got = %d", size, len(data))
return
}
gray32f := newGray32f(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
copy(gray32f.Pix[y*gray32f.Stride:][:p.Width*4], data[off:])
off += p.Width * 4
}
m = gray32f
return
}
func (p *pixDecoder) decodeRGB(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeRGB, bad data size, expect = %d, got = %d", size, len(data))
return
}
rgb := newRGB(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
for x := 0; x < p.Width; x++ {
rgb.SetRGB(x, y, colorExt.RGB{
R: data[off+0],
G: data[off+1],
B: data[off+2],
})
off += 3
}
}
m = rgb
return
}
func (p *pixDecoder) decodeRGB48(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeRGB48, bad data size, expect = %d, got = %d", size, len(data))
return
}
rgb48 := newRGB48(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
u16Pix := builtin.Slice(data[off:], reflect.TypeOf([]uint16(nil))).([]uint16)
for x := 0; x < p.Width; x++ {
rgb48.SetRGB48(x, y, colorExt.RGB48{
R: u16Pix[x*3+0],
G: u16Pix[x*3+1],
B: u16Pix[x*3+2],
})
}
off += p.Width * 6
}
m = rgb48
return
}
func (p *pixDecoder) decodeRGB96f(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeRGB96f, bad data size, expect = %d, got = %d", size, len(data))
return
}
rgb96f := newRGB96f(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
for x := 0; x < p.Width; x++ {
rgb96f.SetRGB96f(x, y, colorExt.RGB96f{
R: builtin.Float32(data[off+0:]),
G: builtin.Float32(data[off+4:]),
B: builtin.Float32(data[off+8:]),
})
off += 12
}
}
m = rgb96f
return
}
func (p *pixDecoder) decodeRGBA(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeRGBA, bad data size, expect = %d, got = %d", size, len(data))
return
}
rgba := newRGBA(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
copy(rgba.Pix[y*rgba.Stride:][:p.Width*4], data[off:])
off += p.Width * 4
}
m = rgba
return
}
func (p *pixDecoder) decodeRGBA64(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeRGBA64, bad data size, expect = %d, got = %d", size, len(data))
return
}
rgba64 := newRGBA64(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
u16Pix := builtin.Slice(data[off:], reflect.TypeOf([]uint16(nil))).([]uint16)
for x := 0; x < p.Width; x++ {
rgba64.SetRGBA64(x, y, color.RGBA64{
R: u16Pix[x*4+0],
G: u16Pix[x*4+1],
B: u16Pix[x*4+2],
A: u16Pix[x*4+3],
})
}
off += p.Width * 8
}
m = rgba64
return
}
func (p *pixDecoder) decodeRGBA128f(data []byte, buf imageExt.Buffer) (m draw.Image, err error) {
if size := p.getImageDataSize(); len(data) != size {
err = fmt.Errorf("image/rawp: decodeRGBA128f, bad data size, expect = %d, got = %d", size, len(data))
return
}
rgba128f := newRGBA128f(image.Rect(0, 0, p.Width, p.Height), buf)
var off = 0
for y := 0; y < p.Height; y++ {
copy(rgba128f.Pix[y*rgba128f.Stride:][:p.Width*16], data[off:])
off += p.Width * 16
}
m = rgba128f
return
} | rawp/pix_decode.go | 0.52975 | 0.513181 | pix_decode.go | starcoder |
package array
// arrTypes is a list of all the types that can be used in arrays.
type arrTypes interface {
~bool | ~uint | ~uint16 | ~uint32 | ~uint64 | ~int8 | ~int | ~int16 | ~int64 | ~float32 | ~float64 | ~complex64 | ~complex128 | ~uintptr | ~string | ~rune | ~byte
}
// In returns the index of the first element in the array that equals the searchKey.
func In[T arrTypes](arr []T, key T) (exists bool, i int, val T) {
return in(arr, key)
}
// private func for In
func in[T arrTypes](arr []T, key T) (exists bool, i int, val T) {
for i, val = range arr {
if val == key {
return true, i, val
}
}
return false, i, key
}
// Replace replaces the first element in the array that equals the searchKey with the newValue.
func Replace[T arrTypes](arr []T, key T, replacer T) []T {
_, i, _ := in(arr, key)
arr[i] = replacer
return arr
}
// Map applies the function f to each element of the array and returns the result.
func Map[T arrTypes](arr []T, f func(T) T) []T {
for i, v := range arr {
arr[i] = f(v)
}
return arr
}
// Reduce applies the function f to each element of the array and returns the result.
func Reduce[T arrTypes](a []T, f func(T, T) T) T {
for i := 1; i < len(a); i++ {
a[i] = f(a[i-1], a[i])
}
return a[len(a)-1]
}
// Filter returns a new array containing only the elements that satisfy the predicate f.
func Filter[T arrTypes](a []T, f func(T) bool) []T {
var b []T
for _, v := range a {
if f(v) {
b = append(b, v)
}
}
return b
}
// RemoveElem removes the first element in the array that equals the searchKey.
func RemoveElem[T arrTypes](a []T, searchKey T) []T {
_, i, _ := in(a, searchKey)
// check if i is out of bounds
if i+1 > len(a) {
return a[:i]
}
return append(a[:i], a[i+1:]...)
}
// RemoveAt removes the element at the index i.
func RemoveAt[T arrTypes](a []T, i int) []T {
if i+1 > len(a) {
return a[:i]
}
return append(a[:i], a[i+1:]...)
}
// InsertAt inserts the newValue at the index i.
func InsertAt[T arrTypes](a []T, i int, v T) []T {
// check if i is out of bounds
if i > len(a) {
return append(a, v)
}
return append(a[:i], append([]T{v}, a[i:]...)...)
}
// AppendAt appends the newValue at the index i.
func AppendAt[T arrTypes](a []T, i int, v []T) []T {
// check if i is out of bounds
if i > len(a) {
return append(a, v...)
}
return append(a[:i], append(v, a[i:]...)...)
} | array/array.go | 0.651909 | 0.511534 | array.go | starcoder |
package rs485
import (
"github.com/volkszaehler/mbmd/encoding"
)
// RTUTransform functions convert RTU bytes to meaningful data types.
type RTUTransform func([]byte) float64
// RTUIeee754ToFloat64 converts 32 bit IEEE 754 float readings
func RTUIeee754ToFloat64(b []byte) float64 {
return float64(encoding.Float32(b))
}
// RTUIeee754ToFloat64Swapped converts 32 bit IEEE 754 float readings
func RTUIeee754ToFloat64Swapped(b []byte) float64 {
return float64(encoding.Float32LswFirst(b))
}
// RTUFloat64ToFloat64 converts 64 bit float readings
func RTUFloat64ToFloat64(b []byte) float64 {
return encoding.Float64(b)
}
// RTUUint16ToFloat64 converts 16 bit unsigned integer readings
func RTUUint16ToFloat64(b []byte) float64 {
return float64(encoding.Uint16(b))
}
// RTUUint32ToFloat64 converts 32 bit unsigned integer readings
func RTUUint32ToFloat64(b []byte) float64 {
return float64(encoding.Uint32(b))
}
// RTUUint32ToFloat64Swapped converts 32 bit unsigned integer readings with swapped word order
func RTUUint32ToFloat64Swapped(b []byte) float64 {
return float64(encoding.Uint32LswFirst(b))
}
// RTUUint64ToFloat64 converts 64 bit unsigned integer readings
func RTUUint64ToFloat64(b []byte) float64 {
return float64(encoding.Uint64(b))
}
// RTUInt16ToFloat64 converts 16 bit signed integer readings
func RTUInt16ToFloat64(b []byte) float64 {
return float64(encoding.Int16(b))
}
// RTUInt32ToFloat64 converts 32 bit signed integer readings
func RTUInt32ToFloat64(b []byte) float64 {
return float64(encoding.Int32(b))
}
// RTUInt32ToFloat64Swapped converts 32 bit unsigned integer readings with swapped word order
func RTUInt32ToFloat64Swapped(b []byte) float64 {
return float64(encoding.Int32LswFirst(b))
}
// RTUInt64ToFloat64 converts 64 bit signed integer readings
func RTUInt64ToFloat64(b []byte) float64 {
return float64(encoding.Int64(b))
}
// MakeScaledTransform creates an RTUTransform with applied scaler
func MakeScaledTransform(transform RTUTransform, scaler float64) RTUTransform {
return RTUTransform(func(b []byte) float64 {
unscaled := transform(b)
f := unscaled / scaler
return f
})
} | meters/rs485/transform.go | 0.767603 | 0.489931 | transform.go | starcoder |
package pl
// BuiltInPkg is the package name of the builtin package.
const BuiltInPkg = "/std/asm/builtin"
// BuiltInSrc is the file that needs to be presented
// as /std/asm/builtin/builtin.s
const BuiltInSrc = `
// a char is sent in via r1
func PrintChar {
// use r2 and r3
addi sp sp -8
sw r2 sp
sw r3 sp 4
ori r2 r0 0x2000 // the address of serial port
.wait
lbu r3 r2 1
bne r3 r0 .wait // wait for invalid
mov r3 r1
ori r3 r3 0x100
sw r3 r2
// restore r2 and r3
lw r2 sp
lw r3 sp 4
addi sp sp 8
mov pc ret
}
// Print a 32-bit unsigned integer
// when array is implemented, this will be rewritten in glang
func PrintUint32 {
// saving used registers
sw ret sp -4
addi sp sp -28
sw r1 sp
sw r2 sp 4
sw r3 sp 8
bne r1 r0 .nonzero
.zero
addi r1 r0 0x30 // '0'
jal PrintChar
j .end
.nonzero
addi r2 sp 12
ori r4 r0 10
.divloop
modu r3 r1 r4
sb r3 r2 0
divu r1 r1 r4
beq r1 r0 .print
addi r2 r2 1
j .divloop
.print
addi r3 sp 12 // base
.printloop
lbu r1 r2 0 // load
addi r1 r1 0x30
jal PrintChar
beq r3 r2 .end
addi r2 r2 -1
j .printloop
.end
addi r1 r0 0xa
jal PrintChar // print a end line
lw r2 sp 4
lw r3 sp 8
addi sp sp 28
lw pc sp -4
}
// Print a 32-bit signed integer
// when array is implemented, this will be rewritten in glang
func PrintInt32 {
// saving used registers
sw ret sp -4
addi sp sp -16
sw r1 sp
sw r2 sp 4
sw r3 sp 8
slt r2 r1 r0 // r2 = r1 < 0
beq r2 r0 .skipsign
addi r1 r0 0x2d // '-'
jal PrintChar
lw r1 sp
sub r1 r0 r1 // revert
.skipsign
jal PrintUint32
lw r2 sp 4
lw r3 sp 8
addi sp sp 16
lw pc sp -4
}
// Panic halts the system immediately with panic exception
func Panic {
panic
mov pc ret
}
// IOCall performs an IO call
func IOCall {
iocall
mov pc ret
}
// Assert panics if the condition is not met
func Assert {
bne r1 r0 .ret
panic
.ret
mov pc ret
}
// MemCopy copies a range of memory
// r1 - destination address
// r2 - source address
// r3 - number of bytes
func MemCopy {
beq r3 r0 .ret
beq r1 r2 .ret
sltu r4 r1 r2
beq r4 r0 .rev
.loop
lbu r4 r2
sb r4 r1
addi r1 r1 1
addi r2 r2 1
addi r3 r3 -1
bne r3 r0 .loop
j .ret
.rev
add r1 r1 r3
add r2 r2 r3
.revloop
addi r1 r1 -1
addi r2 r2 -1
lbu r4 r2
sb r4 r1
addi r3 r3 -1
bne r3 r0 .revloop
.ret
mov pc ret
}
// MemSet sets a range of memory to the same byte
// r1 - destination address
// r2 - the byte
// r3 - number of bytes
func MemSet {
beq r3 r0 .ret
.loop
sb r2 r1
addi r1 r1 1
addi r3 r3 -1
bne r3 r0 .loop
.ret
mov pc ret
}
// MemClear sets a range of memory to zero
// r1 - destination address
// r2 - number of bytes
func MemClear {
beq r2 r0 .ret
.loop
sb r0 r1
addi r1 r1 1
addi r2 r2 -1
bne r2 r0 .loop
.ret
mov pc ret
}
` | pl/builtin_s.go | 0.602646 | 0.431584 | builtin_s.go | starcoder |
package storage
import "github.com/prometheus/prometheus/pkg/labels"
// Boilerplate on purpose. Generics some day...
type genericQuerierAdapter struct {
baseQuerier
// One-of. If both are set, Querier will be used.
q Querier
cq ChunkQuerier
}
type genericSeriesSetAdapter struct {
SeriesSet
}
func (a *genericSeriesSetAdapter) At() Labeled {
return a.SeriesSet.At().(Labeled)
}
type genericChunkSeriesSetAdapter struct {
ChunkSeriesSet
}
func (a *genericChunkSeriesSetAdapter) At() Labeled {
return a.ChunkSeriesSet.At().(Labeled)
}
func (q *genericQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (genericSeriesSet, Warnings, error) {
if q.q != nil {
s, w, err := q.q.Select(sortSeries, hints, matchers...)
return &genericSeriesSetAdapter{s}, w, err
}
s, w, err := q.cq.Select(sortSeries, hints, matchers...)
return &genericChunkSeriesSetAdapter{s}, w, err
}
func newGenericQuerierFrom(q Querier) genericQuerier {
return &genericQuerierAdapter{baseQuerier: q, q: q}
}
func newGenericQuerierFromChunk(cq ChunkQuerier) genericQuerier {
return &genericQuerierAdapter{baseQuerier: cq, cq: cq}
}
type querierAdapter struct {
genericQuerier
}
type seriesSetAdapter struct {
genericSeriesSet
}
func (a *seriesSetAdapter) At() Series {
return a.genericSeriesSet.At().(Series)
}
func (q *querierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (SeriesSet, Warnings, error) {
s, w, err := q.genericQuerier.Select(sortSeries, hints, matchers...)
return &seriesSetAdapter{s}, w, err
}
type chunkQuerierAdapter struct {
genericQuerier
}
type chunkSeriesSetAdapter struct {
genericSeriesSet
}
func (a *chunkSeriesSetAdapter) At() ChunkSeries {
return a.genericSeriesSet.At().(ChunkSeries)
}
func (q *chunkQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (ChunkSeriesSet, Warnings, error) {
s, w, err := q.genericQuerier.Select(sortSeries, hints, matchers...)
return &chunkSeriesSetAdapter{s}, w, err
}
type seriesMergerAdapter struct {
VerticalSeriesMerger
buf []Series
}
func (a *seriesMergerAdapter) Merge(s ...Labeled) Labeled {
a.buf = a.buf[:0]
for _, ser := range s {
a.buf = append(a.buf, ser.(Series))
}
return a.VerticalSeriesMerger.Merge(a.buf...)
}
type chunkSeriesMergerAdapter struct {
VerticalChunkSeriesMerger
buf []ChunkSeries
}
func (a *chunkSeriesMergerAdapter) Merge(s ...Labeled) Labeled {
a.buf = a.buf[:0]
for _, ser := range s {
a.buf = append(a.buf, ser.(ChunkSeries))
}
return a.VerticalChunkSeriesMerger.Merge(a.buf...)
} | storage/generic.go | 0.765155 | 0.482795 | generic.go | starcoder |
package intelhex
import (
"fmt"
"io"
"strconv"
)
func isContiguous(b1, b2 ByteBlock) bool {
return (b1.Address+uint16(len(b1.Data)) == b2.Address) ||
(b2.Address+uint16(len(b2.Data)) == b1.Address)
}
// assumes blocks are contiguous - order of supplied blocks not important
func joinByteBlocks(b1, b2 ByteBlock) ByteBlock {
if b1.Address > b2.Address {
b1, b2 = b2, b1
}
r := make([]byte, len(b1.Data)+len(b2.Data))
copy(r, b1.Data)
copy(r[len(b1.Data):], b2.Data)
return ByteBlock{Address: b1.Address, Data: r}
}
func verifyCheckSum(data []byte) bool {
chk := checksum{}
chk.addBytes(data[:len(data)-1])
return chk.value() == data[len(data)-1]
}
func processLineData(line string) (ByteBlock, error) {
result := ByteBlock{}
if len(line) < 11 {
return result, fmt.Errorf("input line too short: %s", line)
}
if line[0] != ':' {
return result, fmt.Errorf("input line does not start with colon: %s", line)
}
data, err := hexStrToBytes(line[1:])
if err != nil {
return result, err
}
if !verifyCheckSum(data) {
return result, fmt.Errorf("input line bad checksum: %s", line)
}
dataLen := data[0]
addr := uint16(data[1])<<8 + uint16(data[2])
recType := data[3]
if recType == 0 {
result.Address = addr
result.Data = data[4 : 4+dataLen]
} else if recType == 1 {
return result, io.EOF
} else {
return result, fmt.Errorf("Unsupported record type: %02x", recType)
}
return result, nil
}
func writeDataLine(w io.Writer, data []byte, address uint16, offset, maxlen int) (nextOffset int, nextAddr uint16, err error) {
chk := checksum{}
length := maxlen
if length+offset > len(data) {
length = len(data) - offset
}
chk.addByte(byte(length))
chk.addWord(address)
_, err = fmt.Fprintf(w, ":%02X%04X00", length, address)
if err != nil {
return
}
for n := 0; n < length; n++ {
b := data[offset+n]
chk.addByte(b)
_, err = fmt.Fprintf(w, "%02X", b)
if err != nil {
return
}
}
_, err = fmt.Fprintf(w, "%02X\n", chk.value())
if err != nil {
return
}
nextOffset = offset + length
nextAddr = address + uint16(length)
return
}
func writeEOFLine(w io.Writer) error {
_, err := fmt.Fprintln(w, ":00000001FF")
return err
}
func hexStrToBytes(hex string) ([]byte, error) {
result := []byte{}
for len(hex) >= 2 {
val, err := strconv.ParseUint(hex[0:2], 16, 8)
if err != nil {
return nil, err
}
result = append(result, byte(val))
hex = hex[2:]
}
if len(hex) > 0 {
return nil, fmt.Errorf("Uneven number of characters supplied")
}
return result, nil
} | impl.go | 0.548915 | 0.400105 | impl.go | starcoder |
package main
import (
"github.com/thoas/go-funk"
)
// Map x to map of y to 'isBlack' bool
type tileMap map[int]map[int]bool
func (t *tileMap) getBlackTiles() int {
blackTiles := 0
for _, x := range *t {
for _, isBlack := range x {
if isBlack {
blackTiles++
}
}
}
return blackTiles
}
func (t *tileMap) flipTiles() {
for day := 1; day <= 100; day++ {
nextState := tileMap{}
// Find all possible hexagon tile positions
opts := []pos{}
for x, yBlackMap := range *t {
for y := range yBlackMap {
opts = append(opts, pos{x, y})
for _, dp := range directions {
nextX := x + dp.x
nextY := y + dp.y
opts = append(opts, pos{x: nextX, y: nextY})
}
}
}
// > Every day, the tiles are all flipped according to the following rules:
for _, pos := range opts {
adj := 0
for _, dp := range directions {
if (*t)[pos.x+dp.x] != nil {
if (*t)[pos.x+dp.x][pos.y+dp.y] {
adj++
}
}
}
if nextState[pos.x] == nil {
nextState[pos.x] = map[int]bool{}
}
isBlack := (*t)[pos.x][pos.y]
// > Any black tile with zero or more than 2 black tiles immediately adjacent to it is flipped to white.
if isBlack && (adj == 0 || adj > 2) {
nextState[pos.x][pos.y] = false
// > Any white tile with exactly 2 black tiles immediately adjacent to it is flipped to black.
} else if !isBlack && adj == 2 {
nextState[pos.x][pos.y] = true
} else {
nextState[pos.x][pos.y] = isBlack
}
}
// > The rules are applied simultaneously to every tile; put another way, it is first determined which tiles need
// > to be flipped, then they are all flipped at the same time.
*t = nextState
}
}
func loadTiles(lines []string) tileMap {
tiles := tileMap{}
for _, line := range lines {
pos := getTilePos(line)
if tiles[pos.x] == nil {
tiles[pos.x] = map[int]bool{}
}
tiles[pos.x][pos.y] = !tiles[pos.x][pos.y]
}
return tiles
}
type pos struct {
x int
y int
}
var directions = map[string]pos{
"se": {x: 0, y: 1},
"sw": {x: -1, y: 1},
"ne": {x: 1, y: -1},
"nw": {x: 0, y: -1},
"e": {x: 1, y: 0},
"w": {x: -1, y: 0},
}
func getTilePos(line string) pos {
x, y := 0, 0
dirs := []byte(line)
for l := len(dirs); l > 0; l = len(dirs) {
spliceAmount := 2
if funk.Contains(directions, string(dirs[0])) {
spliceAmount = 1
}
dir := directions[string(dirs[0:spliceAmount])]
x += dir.x
y += dir.y
dirs = dirs[spliceAmount:]
}
return pos{x, y}
} | calendar/day24/tile.go | 0.625667 | 0.433862 | tile.go | starcoder |
package slog
// region --- INFO Level Sugars ---
// Note logs out a message in INFO level and with Operation NOTE. Returns an instance of operation NOTE
func (i *slogInstance) Note(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(NOTE).Info(str, v...)
}
// Await logs out a message in INFO level and with Operation AWAIT. Returns an instance of operation AWAIT
func (i *slogInstance) Await(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(AWAIT).Info(str, v...)
}
// Done logs out a message in INFO level and with Operation DONE. Returns an instance of operation DONE
func (i *slogInstance) Done(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(DONE).Info(str, v...)
}
// Success logs out a message in INFO level and with Operation DONE. Returns an instance of operation DONE
func (i *slogInstance) Success(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(DONE).Info(str, v...)
}
// IO logs out a message in INFO level and with Operation IO. Returns an instance of operation IO
func (i *slogInstance) IO(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(IO).Info(str, v...)
}
// endregion
// region --- WARN Level Sugars ---
// Note logs out a message in WARN level and with Operation NOTE. Returns an instance of operation NOTE
func (i *slogInstance) WarnNote(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(NOTE).Warn(str, v...)
}
// Await logs out a message in WARN level and with Operation AWAIT. Returns an instance of operation AWAIT
func (i *slogInstance) WarnAwait(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(AWAIT).Warn(str, v...)
}
// Done logs out a message in WARN level and with Operation DONE. Returns an instance of operation DONE
func (i *slogInstance) WarnDone(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(DONE).Warn(str, v...)
}
// Success logs out a message in WARN level and with Operation DONE. Returns an instance of operation DONE
func (i *slogInstance) WarnSuccess(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(DONE).Warn(str, v...)
}
// IO logs out a message in WARN level and with Operation IO. Returns an instance of operation IO
func (i *slogInstance) WarnIO(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(IO).Warn(str, v...)
}
// endregion
// region --- ERROR Level Sugars ---
// Note logs out a message in ERROR level and with Operation NOTE. Returns an instance of operation NOTE
func (i *slogInstance) ErrorNote(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(NOTE).Error(str, v...)
}
// Await logs out a message in ERROR level and with Operation AWAIT. Returns an instance of operation AWAIT
func (i *slogInstance) ErrorAwait(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(AWAIT).Error(str, v...)
}
// Done logs out a message in ERROR level and with Operation DONE. Returns an instance of operation DONE
func (i *slogInstance) ErrorDone(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(DONE).Error(str, v...)
}
// Success logs out a message in ERROR level and with Operation DONE. Returns an instance of operation DONE
func (i *slogInstance) ErrorSuccess(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(DONE).Error(str, v...)
}
// IO logs out a message in ERROR level and with Operation IO. Returns an instance of operation IO
func (i *slogInstance) ErrorIO(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(IO).Error(str, v...)
}
// endregion
// region --- DEBUG Level Sugars ---
// Note logs out a message in DEBUG level and with Operation NOTE. Returns an instance of operation NOTE
func (i *slogInstance) DebugNote(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(NOTE).Debug(str, v...)
}
// Await logs out a message in DEBUG level and with Operation AWAIT. Returns an instance of operation AWAIT
func (i *slogInstance) DebugAwait(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(AWAIT).Debug(str, v...)
}
// Done logs out a message in DEBUG level and with Operation DONE. Returns an instance of operation DONE
func (i *slogInstance) DebugDone(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(DONE).Debug(str, v...)
}
// Success logs out a message in DEBUG level and with Operation DONE. Returns an instance of operation DONE
func (i *slogInstance) DebugSuccess(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(DONE).Debug(str, v...)
}
// IO logs out a message in DEBUG level and with Operation IO. Returns an instance of operation IO
func (i *slogInstance) DebugIO(str interface{}, v ...interface{}) Instance {
return i.clone().incStackOffset().Operation(IO).Debug(str, v...)
}
// endregion | instancesugars.go | 0.625438 | 0.492066 | instancesugars.go | starcoder |
package core
import (
"math/rand"
)
const ShapePieces = 4
type Piece = int
const (
IShape Piece = iota + 1
JShape
LShape
OShape
SShape
TShape
ZShape
ShapesCount
)
// movement directions
const (
Left int = iota - 1
Center
Right // also used for down
)
type Point struct {
X, Y int
}
// Shape is a type containing four points, which represents the four points
// making a contiguous 'piece'.
type Shape struct {
points [ShapePieces]Point
color Color
}
var ActiveShape Shape
var NextShape Shape
var HoldedShape Shape
var Buffer Shape
// every figure can be described in
// 2x4 matrix and every matrix elements(figure)
// can be described using the idecies of matrices
var figures = [ShapesCount][ShapePieces]int{
{0, 0, 0, 0}, // empty
{1, 3, 5, 7}, // I
{2, 4, 5, 7}, // Z
{3, 5, 4, 6}, // S
{3, 5, 4, 7}, // T
{2, 3, 5, 7}, // L
{3, 5, 7, 6}, // J
{2, 3, 4, 5}} // O
var colors = [7]string{"I", "Z", "S", "T", "L", "J", "O"}
func holdShape() {
if HoldedShape.color == Empty {
ActiveShape.resetShape()
HoldedShape.copyFrom(&ActiveShape)
ActiveShape.copyFrom(&NextShape)
ActiveShape.moveShape(4, Center)
NextShape = generateNewShape()
} else {
var temp Shape
ActiveShape.resetShape()
temp.copyFrom(&ActiveShape)
ActiveShape.copyFrom(&HoldedShape)
HoldedShape.copyFrom(&temp)
ActiveShape.moveShape(4, Center)
}
}
func (shape *Shape) rotate() {
if shape.color == 7 {
return
}
centerOfRot := shape.points[1] // center of rotation
var buff Shape
buff.copyFrom(shape)
for i := 0; i < ShapePieces; i++ {
x := buff.points[i].Y - centerOfRot.Y
y := buff.points[i].X - centerOfRot.X
buff.points[i].X = centerOfRot.X - x
buff.points[i].Y = centerOfRot.Y + y
}
if !buff.isInside(gameBoard) {
buff.moveShape(1, 0)
if buff.isInside(gameBoard) {
shape.copyFrom(&buff)
return
}
buff.moveShape(-2, 0)
if buff.isInside(gameBoard) {
shape.copyFrom(&buff)
return
}
} else {
shape.copyFrom(&buff)
}
}
func (shape *Shape) moveLeftRight(direction int, buffer *Shape) {
buffer.copyFrom(shape) // save the positions before inBoardCheck
shape.moveShape(direction, Center)
}
func (shape *Shape) copyFrom(other *Shape) {
copy(shape.points[:], other.points[:])
shape.color = other.color
}
func (shape *Shape) isInside(board Board) bool {
for i := 0; i < ShapePieces; i++ {
if shape.points[i].X < 0 || shape.points[i].X >= BoardCols || shape.points[i].Y >= BoardRows {
return false
} else if board[shape.points[i].Y][shape.points[i].X] != Empty {
return false
}
}
return true
}
func (shape *Shape) moveShape(r, c int) {
for i := 0; i < ShapePieces; i++ {
shape.points[i].X += r
shape.points[i].Y += c
}
}
func (shape *Shape) applyGravity() {
shape.moveShape(Center, Right)
}
func (shape *Shape) resetShape() {
for i := 0; i < ShapePieces; i++ {
shape.points[i].X = figures[shape.color][i] % 2
shape.points[i].Y = figures[shape.color][i] / 2
}
}
func generateNewShape() Shape {
colorNum := rand.Int()%7 + 1
var newShape Shape
for i := 0; i < ShapePieces; i++ {
newShape.points[i].X = figures[colorNum][i] % 2
newShape.points[i].Y = figures[colorNum][i] / 2
}
newShape.color = colorNum
return newShape
} | core/shape.go | 0.649134 | 0.471041 | shape.go | starcoder |
package nodes
import (
"github.com/wdevore/RangerGo/api"
"github.com/wdevore/RangerGo/engine/geometry"
"github.com/wdevore/RangerGo/engine/maths"
)
// Transform holds the transform properties and methods.
type Transform struct {
position api.IPoint
rotation float64
scale api.IPoint
aft api.IAffineTransform
inverse api.IAffineTransform
}
func (t *Transform) initializeTransform() {
t.position = geometry.NewPoint()
t.scale = geometry.NewPointUsing(1.0, 1.0)
t.aft = maths.NewTransform()
t.inverse = maths.NewTransform()
}
// AffineTransform returns this node's transform matrix
func (t *Transform) AffineTransform() api.IAffineTransform {
return t.aft
}
// InverseTransform returns an inverted cached version of "transform"
func (t *Transform) InverseTransform() api.IAffineTransform {
return t.inverse
}
// SetPosition set the translation components of the matrix
func (t *Transform) SetPosition(x, y float64) {
t.position.SetByComp(x, y)
}
// Position returns the position independent of the matrix
func (t *Transform) Position() api.IPoint {
return t.position
}
// SetRotation set the rotation given as radians
func (t *Transform) SetRotation(radians float64) {
t.rotation = radians
}
// Rotation is the current rotation in radians
func (t *Transform) Rotation() float64 {
return t.rotation
}
// SetScale sets the scale uniformly for x and y
func (t *Transform) SetScale(scale float64) {
t.scale.SetByComp(scale, scale)
}
// Scale returns the X scale component for uniform scales.
func (t *Transform) Scale() float64 {
return t.scale.X()
}
// CalcFilteredTransform performs a filter transform calculation.
func (t *Transform) CalcFilteredTransform(excludeTranslation bool,
excludeRotation bool,
excludeScale bool,
aft api.IAffineTransform) {
aft.ToIdentity()
if !excludeTranslation {
aft.MakeTranslate(t.position.X(), t.position.Y())
}
if !excludeRotation && t.rotation != 0.0 {
aft.Rotate(t.rotation)
}
if !excludeScale && (t.scale.X() != 0.0 || t.scale.Y() != 0.0) {
aft.Scale(t.scale.X(), t.scale.Y())
}
} | engine/nodes/transform.go | 0.890788 | 0.512876 | transform.go | starcoder |
package containers
import (
"time"
"github.com/influxdata/telegraf"
)
// Accumulator is an implementation of telegraf.Accumulator. It passes all
// calls through to its inner accumulator, but adds a container_id tag to any
// metric on the way through.
type Accumulator struct {
Accumulator *telegraf.Accumulator
CId string
}
// AddFields adds a metric to the accumulator with the given measurement
func (a *Accumulator) AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time) {
(*a.Accumulator).AddFields(measurement, fields, a.ctags(tags), t...)
}
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
func (a *Accumulator) AddGauge(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time) {
(*a.Accumulator).AddGauge(measurement, fields, a.ctags(tags), t...)
}
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
func (a *Accumulator) AddCounter(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time) {
(*a.Accumulator).AddCounter(measurement, fields, a.ctags(tags), t...)
}
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
func (a *Accumulator) AddSummary(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time) {
(*a.Accumulator).AddSummary(measurement, fields, a.ctags(tags), t...)
}
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
func (a *Accumulator) AddHistogram(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time) {
(*a.Accumulator).AddHistogram(measurement, fields, a.ctags(tags), t...)
}
func (a *Accumulator) AddMetric(m telegraf.Metric) {
metric := m.Copy()
metric.AddTag("container_id", a.CId)
(*a.Accumulator).AddMetric(metric)
}
func (a *Accumulator) SetPrecision(precision, interval time.Duration) {
(*a.Accumulator).SetPrecision(precision, interval)
}
func (a *Accumulator) AddError(err error) {
(*a.Accumulator).AddError(err)
}
func (a *Accumulator) WithTracking(maxTracking int) telegraf.TrackingAccumulator {
return (*a.Accumulator).WithTracking(maxTracking)
}
// ctags updates an array of tags with the container_id
func (a *Accumulator) ctags(tags map[string]string) map[string]string {
result := map[string]string{"container_id": a.CId}
for k, v := range tags {
result[k] = v
}
return result
} | plugins/inputs/dcos_statsd/containers/accumulator.go | 0.727395 | 0.506286 | accumulator.go | starcoder |
package query
import (
"fmt"
"github.com/gallactic/gallactic/core/consensus/tendermint"
"github.com/gallactic/gallactic/crypto"
"github.com/gallactic/gallactic/txs"
"github.com/tendermint/tendermint/consensus"
consensusTypes "github.com/tendermint/tendermint/consensus/types"
tmEd25519 "github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
type NodeView struct {
tmNode *tendermint.Node
}
func NewNodeView(tmNode *tendermint.Node) *NodeView {
return &NodeView{
tmNode: tmNode,
}
}
func (nv *NodeView) NodeInfo() p2p.NodeInfo {
return nv.tmNode.NodeInfo()
}
func (nv *NodeView) Peers() p2p.IPeerSet {
return nv.tmNode.Switch().Peers()
}
func (nv *NodeView) BlockStore() state.BlockStoreRPC {
return nv.tmNode.BlockStore()
}
// Pass -1 to get all available transactions
func (nv *NodeView) MempoolTransactions(maxTxs int) ([]*txs.Envelope, error) {
var transactions []*txs.Envelope
for _, txBytes := range nv.tmNode.MempoolReactor().Mempool.ReapMaxTxs(maxTxs) {
txEnv := new(txs.Envelope)
if err := txEnv.Decode(txBytes); err != nil {
return nil, err
}
transactions = append(transactions, txEnv)
}
return transactions, nil
}
func (nv *NodeView) RoundState() *consensusTypes.RoundState {
return nv.tmNode.ConsensusState().GetRoundState()
}
func (nv *NodeView) RoundStateJSON() ([]byte, error) {
return nv.tmNode.ConsensusState().GetRoundStateJSON()
}
func (nv *NodeView) PeerRoundStates() ([]*consensusTypes.PeerRoundState, error) {
peers := nv.tmNode.Switch().Peers().List()
peerRoundStates := make([]*consensusTypes.PeerRoundState, len(peers))
for i, peer := range peers {
peerState, ok := peer.Get(types.PeerStateKey).(*consensus.PeerState)
if !ok {
return nil, fmt.Errorf("could not get PeerState for peer: %s", peer)
}
peerRoundStates[i] = peerState.GetRoundState()
}
return peerRoundStates, nil
}
func (nv *NodeView) PrivValidatorPublicKey() (crypto.PublicKey, error) {
pub := nv.tmNode.PrivValidator().GetPubKey().(tmEd25519.PubKeyEd25519)
return crypto.PublicKeyFromRawBytes(pub[:])
}
// func (nv *NodeView) DefaultNodeInfo() p2p.DefaultNodeInfo {
// return nv.tmNode.NodeInfo()
// } | core/consensus/tendermint/query/node_view.go | 0.54577 | 0.457561 | node_view.go | starcoder |
package types
// Contains any policy cache related functionality
// This functionality is used in handlers to implement the policy management component
import (
"sync"
fTypes "github.com/openfaas/faas-provider/types"
log "github.com/sirupsen/logrus"
)
// The Policy structure expresses constraints and features that can be enforced upon Deployment
// Subset of faas-provider/types/FunctionDeployment
type Policy struct {
Name string `yaml:"name"`
EnvVars *map[string]string `yaml:"environment"`
Constraints *[]string `yaml:"constraints"`
Secrets *[]string `yaml:"secrets"`
Labels *map[string]string `yaml:"labels"`
Annotations *map[string]string `yaml:"annotations"`
Limits *fTypes.FunctionResources `yaml:"limits"`
Requests *fTypes.FunctionResources `yaml:"requests"`
ReadOnlyRootFilesystem *bool `yaml:"readOnlyRootFilesystem"`
Namespace *string `yaml:"namespace,omitempty"`
}
type PolicyFunction struct {
InternalName string
Policy string
}
// Implement this interface to realise the Policy Store
// Defines an interface to access the policy cache
type PolicyController interface {
// Resolve a requested functionName and policyName to the underlying service name
// The underlying service name is the name as it is known to by the service provider (Kubernetes, etc ...)
// Return the position in the policy map, the internal service name and error
GetPolicyFunction(functionName string, policyName string) (int, string, error)
// The lookUpName describes the external functionName as it is accessed by the user over the URL
// The PolicyFunction struct captures the internal service name and under which policy the service was deployed
AddPolicyFunction(lookUpName string, function PolicyFunction) string
AddPolicy(policy Policy) string
AddPolicies(policies []Policy)
GetPolicy(policyName string) (Policy, bool)
ReloadFromCache(functions []*fTypes.FunctionDeployment)
// Defines how a policy conforming deployment spec is built from a FunctionDeployment type and a Policy type
// The FunctionDeployment deployment is the deployment spec under which the root version of the service was deployed
// The PolicyFunction function contains the internal service name and the policy the deployment has to adhere to
// It can define how the Policy type takes precedence and overwrites the original FunctionDeployment
// The function has to at least set the internalName contained in function as the service name of the new deployment
BuildDeployment(function *PolicyFunction, deployment *fTypes.FunctionDeployment) (*fTypes.FunctionDeployment, *PolicyFunction)
DeleteFunction(function *fTypes.FunctionDeployment)
}
type PolicyStore struct {
lookUp map[string][]PolicyFunction
policies map[string]Policy
lock sync.RWMutex
}
func NewPolicyStore() *PolicyStore {
return &PolicyStore{
lookUp: make(map[string][]PolicyFunction),
policies: make(map[string]Policy),
}
}
func (p *PolicyStore) GetPolicyFunction(lookUpName string, policyName string) (int, string, error) {
p.lock.RLock()
defer p.lock.RUnlock()
log.Infof("[policy] get policy function policy %s", lookUpName)
functions, ok := p.lookUp[lookUpName]
if !ok {
return -1, "", &FunctionError{}
}
for i, function := range functions {
if policyName == function.Policy {
return i, function.InternalName, nil
}
}
return -1, "", &PolicyError{}
}
func (p *PolicyStore) AddPolicyFunction(lookUpName string, function PolicyFunction) string {
p.lock.Lock()
defer p.lock.Unlock()
log.Infof("[policy] add function to policy cache: lookup for %s with %s", lookUpName, function.InternalName)
if p.lookUp == nil {
p.lookUp = make(map[string][]PolicyFunction)
}
p.lookUp[lookUpName] = append(p.lookUp[lookUpName], function)
return function.InternalName
}
func (p *PolicyStore) AddPolicy(policy Policy) string {
p.lock.Lock()
defer p.lock.Unlock()
log.Infof("add policy %s", policy.Name)
if p.policies == nil {
p.policies = make(map[string]Policy)
}
p.policies[policy.Name] = policy
return policy.Name
}
func (p *PolicyStore) AddPolicies(policies []Policy) {
for _, policy := range policies {
p.AddPolicy(policy)
}
}
func (p *PolicyStore) GetPolicy(policyName string) (Policy, bool) {
p.lock.RLock()
defer p.lock.RUnlock()
policy, ok := p.policies[policyName]
return policy, ok
}
func (p *PolicyStore) BuildDeployment(function *PolicyFunction,
deployment *fTypes.FunctionDeployment) (*fTypes.FunctionDeployment, *PolicyFunction) {
name := deployment.Service + "-" + function.Policy
if deployment.Annotations == nil {
deployment.Annotations = new(map[string]string)
}
policy, _ := p.GetPolicy(function.Policy) // TODO: Error Handling
log.Debug("[policy] Merge annotations")
if policy.Annotations != nil {
MergeMap(*deployment.Annotations, *policy.Annotations)
}
log.Debug("[policy] Merge Environment")
if policy.EnvVars != nil {
if deployment.EnvVars == nil {
deployment.EnvVars = *(policy.EnvVars)
} else {
MergeMap(deployment.EnvVars, *policy.EnvVars)
}
}
log.Debug("[policy] Merge labels")
if policy.Labels != nil {
if deployment.Labels == nil {
deployment.Labels = policy.Labels
} else {
MergeMap(*deployment.Labels, *policy.Labels)
}
}
log.Debug("[policy] append constraints")
if policy.Constraints != nil {
deployment.Constraints = append(deployment.Constraints, *policy.Constraints...)
}
log.Debug("[policy] append secrets")
if policy.Secrets != nil {
deployment.Secrets = append(deployment.Secrets, *policy.Secrets...)
}
log.Debug("[policy] overwrite limits")
if policy.Limits != nil {
deployment.Limits = policy.Limits
}
log.Debug("[policy] overwrite requests")
if policy.Requests != nil {
deployment.Requests = policy.Requests
}
log.Debug("[policy] overwrite ready only root filesystem definition")
if policy.ReadOnlyRootFilesystem != nil {
deployment.ReadOnlyRootFilesystem = *policy.ReadOnlyRootFilesystem
}
log.Debug("[policy] overwrite namespace")
if policy.Namespace != nil {
deployment.Namespace = *policy.Namespace
}
// Keep these last to override any illegal statements
(*deployment.Annotations)["policy"] = function.Policy
(*deployment.Annotations)["parent_function"] = deployment.Service
(*deployment.Labels)["faas_function"] = name
function.InternalName = name
deployment.Service = name
log.Debug(deployment)
return deployment, function
}
func (p *PolicyStore) ReloadFromCache(functions []*fTypes.FunctionDeployment) {
log.Info("[policy] reload policy cache ...")
for _, f := range functions {
if *f.Annotations == nil {
log.Infof("[policy] no annotations found for %s", f.Service)
return
}
if fPolicy, ok := (*f.Annotations)["policy"]; ok {
if _, ok := p.policies[fPolicy]; ok {
parent_name, ok := (*f.Annotations)["parent_function"]
if ok {
p.AddPolicyFunction(parent_name, PolicyFunction{f.Service, fPolicy})
}
}
}
}
log.Info("[policy] policy cache reloaded successfully")
}
func (p *PolicyStore) DeleteFunction(f *fTypes.FunctionDeployment) {
log.Infof("[policy] Attempting to delete %s from policy cache", f.Service)
if *f.Annotations == nil {
log.Infof("[policy] no annotations found for %s", f.Service)
return
}
parent_name, ok := (*f.Annotations)["parent_function"]
if !ok {
log.Warnf("[policy] no parent_function found for %s", f.Service)
return
}
policy, ok := (*f.Annotations)["policy"]
if !ok {
log.Warnf("[policy] no policy found for %s", f.Service)
return
}
log.Infof("[policy] Attempting to delete %s from policy cache %s", parent_name, policy)
i, _, err := p.GetPolicyFunction(parent_name, policy)
if err != nil {
log.Warnf("[policy] no policy function found for %s with %s", parent_name, policy)
return
}
p.lock.Lock()
defer p.lock.Unlock()
if err == nil {
log.Infof("[policy] delete function from policy cache: lookup for %s with %s", parent_name, f.Service)
p.lookUp[parent_name] = append(p.lookUp[parent_name][:i], p.lookUp[parent_name][i+1:]...) // delete
return
}
log.Infof("[policy] Not able to delete %s from policy cache %s", parent_name, policy)
} | types/policy.go | 0.614972 | 0.400573 | policy.go | starcoder |
package sqlparser
import (
"fmt"
"regexp"
"strings"
"github.com/author/sqlparser/query"
)
// Parse takes a string representing a SQL query and parses it into a query.Query struct. It may fail.
func Parse(sqls string) (query.Query, error) {
qs, err := ParseMany([]string{sqls})
if len(qs) == 0 {
return query.Query{}, err
}
return qs[0], err
}
// ParseMany takes a string slice representing many SQL queries and parses them into a query.Query struct slice.
// It may fail. If it fails, it will stop at the first failure.
func ParseMany(sqls []string) ([]query.Query, error) {
qs := []query.Query{}
for _, sql := range sqls {
q, err := parse(sql)
if err != nil {
return qs, err
}
qs = append(qs, q)
}
return qs, nil
}
func parse(sql string) (query.Query, error) {
return (&parser{0, strings.TrimSpace(sql), stepType, query.Query{}, nil, ""}).parse()
}
type step int
const (
stepType step = iota
stepSelectField
stepSelectFrom
stepSelectComma
stepSelectFromTable
stepInsertTable
stepInsertFieldsOpeningParens
stepInsertFields
stepInsertFieldsCommaOrClosingParens
stepInsertValuesOpeningParens
stepInsertValuesRWord
stepInsertValues
stepInsertValuesCommaOrClosingParens
stepInsertValuesCommaBeforeOpeningParens
stepUpdateTable
stepUpdateSet
stepUpdateField
stepUpdateEquals
stepUpdateValue
stepUpdateComma
stepDeleteFromTable
stepWhere
stepWhereField
stepWhereOperator
stepWhereValue
stepWhereAnd
)
type parser struct {
i int
sql string
step step
query query.Query
err error
nextUpdateField string
}
func (p *parser) parse() (query.Query, error) {
q, err := p.doParse()
p.err = err
if p.err == nil {
p.err = p.validate()
}
p.logError()
return q, p.err
}
func (p *parser) doParse() (query.Query, error) {
for {
if p.i >= len(p.sql) {
return p.query, p.err
}
switch p.step {
case stepType:
switch strings.ToUpper(p.peek()) {
case "SELECT":
p.query.Type = query.Select
p.pop()
p.step = stepSelectField
case "INSERT INTO":
p.query.Type = query.Insert
p.pop()
p.step = stepInsertTable
case "UPDATE":
p.query.Type = query.Update
p.query.Updates = map[string]string{}
p.pop()
p.step = stepUpdateTable
case "DELETE FROM":
p.query.Type = query.Delete
p.pop()
p.step = stepDeleteFromTable
default:
return p.query, fmt.Errorf("invalid query type")
}
case stepSelectField:
identifier := p.peek()
if !isIdentifierOrAsterisk(identifier) {
return p.query, fmt.Errorf("at SELECT: expected field to SELECT")
}
p.query.Fields = append(p.query.Fields, identifier)
p.pop()
maybeFrom := p.peek()
if strings.ToUpper(maybeFrom) == "AS" {
p.pop()
alias := p.peek()
if !isIdentifier(alias) {
return p.query, fmt.Errorf("at SELECT: expected field alias for \"" + identifier + " as\" to SELECT")
}
if p.query.Aliases == nil {
p.query.Aliases = make(map[string]string)
}
p.query.Aliases[identifier] = alias
p.pop()
maybeFrom = p.peek()
}
if strings.ToUpper(maybeFrom) == "FROM" {
p.step = stepSelectFrom
continue
}
p.step = stepSelectComma
case stepSelectComma:
commaRWord := p.peek()
if commaRWord != "," {
return p.query, fmt.Errorf("at SELECT: expected comma or FROM")
}
p.pop()
p.step = stepSelectField
case stepSelectFrom:
fromRWord := p.peek()
if strings.ToUpper(fromRWord) != "FROM" {
return p.query, fmt.Errorf("at SELECT: expected FROM")
}
p.pop()
p.step = stepSelectFromTable
case stepSelectFromTable:
tableName := p.peek()
if len(tableName) == 0 {
return p.query, fmt.Errorf("at SELECT: expected quoted table name")
}
p.query.TableName = tableName
p.pop()
p.step = stepWhere
case stepInsertTable:
tableName := p.peek()
if len(tableName) == 0 {
return p.query, fmt.Errorf("at INSERT INTO: expected quoted table name")
}
p.query.TableName = tableName
p.pop()
p.step = stepInsertFieldsOpeningParens
case stepDeleteFromTable:
tableName := p.peek()
if len(tableName) == 0 {
return p.query, fmt.Errorf("at DELETE FROM: expected quoted table name")
}
p.query.TableName = tableName
p.pop()
p.step = stepWhere
case stepUpdateTable:
tableName := p.peek()
if len(tableName) == 0 {
return p.query, fmt.Errorf("at UPDATE: expected quoted table name")
}
p.query.TableName = tableName
p.pop()
p.step = stepUpdateSet
case stepUpdateSet:
setRWord := p.peek()
if setRWord != "SET" {
return p.query, fmt.Errorf("at UPDATE: expected 'SET'")
}
p.pop()
p.step = stepUpdateField
case stepUpdateField:
identifier := p.peek()
if !isIdentifier(identifier) {
return p.query, fmt.Errorf("at UPDATE: expected at least one field to update")
}
p.nextUpdateField = identifier
p.pop()
p.step = stepUpdateEquals
case stepUpdateEquals:
equalsRWord := p.peek()
if equalsRWord != "=" {
return p.query, fmt.Errorf("at UPDATE: expected '='")
}
p.pop()
p.step = stepUpdateValue
case stepUpdateValue:
quotedValue, ln := p.peekQuotedStringWithLength()
if ln == 0 {
return p.query, fmt.Errorf("at UPDATE: expected quoted value")
}
p.query.Updates[p.nextUpdateField] = quotedValue
p.nextUpdateField = ""
p.pop()
maybeWhere := p.peek()
if strings.ToUpper(maybeWhere) == "WHERE" {
p.step = stepWhere
continue
}
p.step = stepUpdateComma
case stepUpdateComma:
commaRWord := p.peek()
if commaRWord != "," {
return p.query, fmt.Errorf("at UPDATE: expected ','")
}
p.pop()
p.step = stepUpdateField
case stepWhere:
whereRWord := p.peek()
if strings.ToUpper(whereRWord) != "WHERE" {
return p.query, fmt.Errorf("expected WHERE")
}
p.pop()
p.step = stepWhereField
case stepWhereField:
identifier := p.peek()
if !isIdentifier(identifier) {
return p.query, fmt.Errorf("at WHERE: expected field")
}
p.query.Conditions = append(p.query.Conditions, query.Condition{Operand1: identifier, Operand1IsField: true})
p.pop()
p.step = stepWhereOperator
case stepWhereOperator:
operator := p.peek()
currentCondition := p.query.Conditions[len(p.query.Conditions)-1]
switch operator {
case "=":
currentCondition.Operator = query.Eq
case ">":
currentCondition.Operator = query.Gt
case ">=":
currentCondition.Operator = query.Gte
case "<":
currentCondition.Operator = query.Lt
case "<=":
currentCondition.Operator = query.Lte
case "!=":
currentCondition.Operator = query.Ne
default:
return p.query, fmt.Errorf("at WHERE: unknown operator")
}
p.query.Conditions[len(p.query.Conditions)-1] = currentCondition
p.pop()
p.step = stepWhereValue
case stepWhereValue:
currentCondition := p.query.Conditions[len(p.query.Conditions)-1]
identifier := p.peek()
if isIdentifier(identifier) {
currentCondition.Operand2 = identifier
currentCondition.Operand2IsField = true
} else {
quotedValue, ln := p.peekQuotedStringWithLength()
if ln == 0 {
return p.query, fmt.Errorf("at WHERE: expected quoted value")
}
currentCondition.Operand2 = quotedValue
currentCondition.Operand2IsField = false
}
p.query.Conditions[len(p.query.Conditions)-1] = currentCondition
p.pop()
p.step = stepWhereAnd
case stepWhereAnd:
andRWord := p.peek()
if strings.ToUpper(andRWord) != "AND" {
return p.query, fmt.Errorf("expected AND")
}
p.pop()
p.step = stepWhereField
case stepInsertFieldsOpeningParens:
openingParens := p.peek()
if len(openingParens) != 1 || openingParens != "(" {
return p.query, fmt.Errorf("at INSERT INTO: expected opening parens")
}
p.pop()
p.step = stepInsertFields
case stepInsertFields:
identifier := p.peek()
if !isIdentifier(identifier) {
return p.query, fmt.Errorf("at INSERT INTO: expected at least one field to insert")
}
p.query.Fields = append(p.query.Fields, identifier)
p.pop()
p.step = stepInsertFieldsCommaOrClosingParens
case stepInsertFieldsCommaOrClosingParens:
commaOrClosingParens := p.peek()
if commaOrClosingParens != "," && commaOrClosingParens != ")" {
return p.query, fmt.Errorf("at INSERT INTO: expected comma or closing parens")
}
p.pop()
if commaOrClosingParens == "," {
p.step = stepInsertFields
continue
}
p.step = stepInsertValuesRWord
case stepInsertValuesRWord:
valuesRWord := p.peek()
if strings.ToUpper(valuesRWord) != "VALUES" {
return p.query, fmt.Errorf("at INSERT INTO: expected 'VALUES'")
}
p.pop()
p.step = stepInsertValuesOpeningParens
case stepInsertValuesOpeningParens:
openingParens := p.peek()
if openingParens != "(" {
return p.query, fmt.Errorf("at INSERT INTO: expected opening parens")
}
p.query.Inserts = append(p.query.Inserts, []string{})
p.pop()
p.step = stepInsertValues
case stepInsertValues:
quotedValue, ln := p.peekQuotedStringWithLength()
if ln == 0 {
return p.query, fmt.Errorf("at INSERT INTO: expected quoted value")
}
p.query.Inserts[len(p.query.Inserts)-1] = append(p.query.Inserts[len(p.query.Inserts)-1], quotedValue)
p.pop()
p.step = stepInsertValuesCommaOrClosingParens
case stepInsertValuesCommaOrClosingParens:
commaOrClosingParens := p.peek()
if commaOrClosingParens != "," && commaOrClosingParens != ")" {
return p.query, fmt.Errorf("at INSERT INTO: expected comma or closing parens")
}
p.pop()
if commaOrClosingParens == "," {
p.step = stepInsertValues
continue
}
currentInsertRow := p.query.Inserts[len(p.query.Inserts)-1]
if len(currentInsertRow) < len(p.query.Fields) {
return p.query, fmt.Errorf("at INSERT INTO: value count doesn't match field count")
}
p.step = stepInsertValuesCommaBeforeOpeningParens
case stepInsertValuesCommaBeforeOpeningParens:
commaRWord := p.peek()
if strings.ToUpper(commaRWord) != "," {
return p.query, fmt.Errorf("at INSERT INTO: expected comma")
}
p.pop()
p.step = stepInsertValuesOpeningParens
}
}
}
func (p *parser) peek() string {
peeked, _ := p.peekWithLength()
return peeked
}
func (p *parser) pop() string {
peeked, len := p.peekWithLength()
p.i += len
p.popWhitespace()
return peeked
}
func (p *parser) popWhitespace() {
for ; p.i < len(p.sql) && p.sql[p.i] == ' '; p.i++ {
}
}
var reservedWords = []string{
"(", ")", ">=", "<=", "!=", ",", "=", ">", "<", "SELECT", "INSERT INTO", "VALUES", "UPDATE", "DELETE FROM",
"WHERE", "FROM", "SET", "AS",
}
func (p *parser) peekWithLength() (string, int) {
if p.i >= len(p.sql) {
return "", 0
}
for _, rWord := range reservedWords {
token := strings.ToUpper(p.sql[p.i:min(len(p.sql), p.i+len(rWord))])
if token == rWord {
return token, len(token)
}
}
if p.sql[p.i] == '\'' { // Quoted string
return p.peekQuotedStringWithLength()
}
return p.peekIdentifierWithLength()
}
func (p *parser) peekQuotedStringWithLength() (string, int) {
if len(p.sql) < p.i || p.sql[p.i] != '\'' {
return "", 0
}
for i := p.i + 1; i < len(p.sql); i++ {
if p.sql[i] == '\'' && p.sql[i-1] != '\\' {
return p.sql[p.i+1 : i], len(p.sql[p.i+1:i]) + 2 // +2 for the two quotes
}
}
return "", 0
}
func (p *parser) peekIdentifierWithLength() (string, int) {
for i := p.i; i < len(p.sql); i++ {
if matched, _ := regexp.MatchString(`[a-zA-Z0-9_*]`, string(p.sql[i])); !matched {
return p.sql[p.i:i], len(p.sql[p.i:i])
}
}
return p.sql[p.i:], len(p.sql[p.i:])
}
func (p *parser) validate() error {
if len(p.query.Conditions) == 0 && p.step == stepWhereField {
return fmt.Errorf("at WHERE: empty WHERE clause")
}
if p.query.Type == query.UnknownType {
return fmt.Errorf("query type cannot be empty")
}
if p.query.TableName == "" {
return fmt.Errorf("table name cannot be empty")
}
if len(p.query.Conditions) == 0 && (p.query.Type == query.Update || p.query.Type == query.Delete) {
return fmt.Errorf("at WHERE: WHERE clause is mandatory for UPDATE & DELETE")
}
for _, c := range p.query.Conditions {
if c.Operator == query.UnknownOperator {
return fmt.Errorf("at WHERE: condition without operator")
}
if c.Operand1 == "" && c.Operand1IsField {
return fmt.Errorf("at WHERE: condition with empty left side operand")
}
if c.Operand2 == "" && c.Operand2IsField {
return fmt.Errorf("at WHERE: condition with empty right side operand")
}
}
if p.query.Type == query.Insert && len(p.query.Inserts) == 0 {
return fmt.Errorf("at INSERT INTO: need at least one row to insert")
}
if p.query.Type == query.Insert {
for _, i := range p.query.Inserts {
if len(i) != len(p.query.Fields) {
return fmt.Errorf("at INSERT INTO: value count doesn't match field count")
}
}
}
return nil
}
func (p *parser) logError() {
if p.err == nil {
return
}
fmt.Println(p.sql)
fmt.Println(strings.Repeat(" ", p.i) + "^")
fmt.Println(p.err)
}
func isIdentifier(s string) bool {
for _, rw := range reservedWords {
if strings.ToUpper(s) == rw {
return false
}
}
matched, _ := regexp.MatchString("[a-zA-Z_][a-zA-Z_0-9]*", s)
return matched
}
func isIdentifierOrAsterisk(s string) bool {
return isIdentifier(s) || s == "*"
}
func min(a, b int) int {
if a < b {
return a
}
return b
} | sql.go | 0.55929 | 0.437763 | sql.go | starcoder |
package apitest
import (
"context"
"sync"
"testing"
"time"
"go.opentelemetry.io/otel/api/trace"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/internal/matchers"
"go.opentelemetry.io/otel/label"
)
type Harness struct {
t *testing.T
}
func NewHarness(t *testing.T) *Harness {
return &Harness{
t: t,
}
}
func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) {
h.t.Run("#Start", func(t *testing.T) {
t.Run("propagates the original context", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
ctxKey := testCtxKey{}
ctxValue := "ctx value"
ctx := context.WithValue(context.Background(), ctxKey, ctxValue)
ctx, _ = subject.Start(ctx, "test")
e.Expect(ctx.Value(ctxKey)).ToEqual(ctxValue)
})
t.Run("returns a span containing the expected properties", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, span := subject.Start(context.Background(), "test")
e.Expect(span).NotToBeNil()
e.Expect(span.Tracer()).ToEqual(subject)
e.Expect(span.SpanContext().IsValid()).ToBeTrue()
})
t.Run("stores the span on the provided context", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
ctx, span := subject.Start(context.Background(), "test")
e.Expect(span).NotToBeNil()
e.Expect(span.SpanContext()).NotToEqual(trace.EmptySpanContext())
e.Expect(trace.SpanFromContext(ctx)).ToEqual(span)
})
t.Run("starts spans with unique trace and span IDs", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, span1 := subject.Start(context.Background(), "span1")
_, span2 := subject.Start(context.Background(), "span2")
sc1 := span1.SpanContext()
sc2 := span2.SpanContext()
e.Expect(sc1.TraceID).NotToEqual(sc2.TraceID)
e.Expect(sc1.SpanID).NotToEqual(sc2.SpanID)
})
t.Run("records the span if specified", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, span := subject.Start(context.Background(), "span", trace.WithRecord())
e.Expect(span.IsRecording()).ToBeTrue()
})
t.Run("propagates a parent's trace ID through the context", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
ctx, parent := subject.Start(context.Background(), "parent")
_, child := subject.Start(ctx, "child")
psc := parent.SpanContext()
csc := child.SpanContext()
e.Expect(csc.TraceID).ToEqual(psc.TraceID)
e.Expect(csc.SpanID).NotToEqual(psc.SpanID)
})
t.Run("ignores parent's trace ID when new root is requested", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
ctx, parent := subject.Start(context.Background(), "parent")
_, child := subject.Start(ctx, "child", trace.WithNewRoot())
psc := parent.SpanContext()
csc := child.SpanContext()
e.Expect(csc.TraceID).NotToEqual(psc.TraceID)
e.Expect(csc.SpanID).NotToEqual(psc.SpanID)
})
t.Run("propagates remote parent's trace ID through the context", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, remoteParent := subject.Start(context.Background(), "remote parent")
parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext())
_, child := subject.Start(parentCtx, "child")
psc := remoteParent.SpanContext()
csc := child.SpanContext()
e.Expect(csc.TraceID).ToEqual(psc.TraceID)
e.Expect(csc.SpanID).NotToEqual(psc.SpanID)
})
t.Run("ignores remote parent's trace ID when new root is requested", func(t *testing.T) {
t.Parallel()
e := matchers.NewExpecter(t)
subject := subjectFactory()
_, remoteParent := subject.Start(context.Background(), "remote parent")
parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext())
_, child := subject.Start(parentCtx, "child", trace.WithNewRoot())
psc := remoteParent.SpanContext()
csc := child.SpanContext()
e.Expect(csc.TraceID).NotToEqual(psc.TraceID)
e.Expect(csc.SpanID).NotToEqual(psc.SpanID)
})
})
h.testSpan(subjectFactory)
}
func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
var methods = map[string]func(span trace.Span){
"#End": func(span trace.Span) {
span.End()
},
"#AddEvent": func(span trace.Span) {
span.AddEvent(context.Background(), "test event")
},
"#AddEventWithTimestamp": func(span trace.Span) {
span.AddEventWithTimestamp(context.Background(), time.Now(), "test event")
},
"#SetStatus": func(span trace.Span) {
span.SetStatus(codes.Error, "internal")
},
"#SetName": func(span trace.Span) {
span.SetName("new name")
},
"#SetAttributes": func(span trace.Span) {
span.SetAttributes(label.String("key1", "value"), label.Int("key2", 123))
},
}
var mechanisms = map[string]func() trace.Span{
"Span created via Tracer#Start": func() trace.Span {
tracer := tracerFactory()
_, subject := tracer.Start(context.Background(), "test")
return subject
},
}
for mechanismName, mechanism := range mechanisms {
h.t.Run(mechanismName, func(t *testing.T) {
for methodName, method := range methods {
t.Run(methodName, func(t *testing.T) {
t.Run("is thread-safe", func(t *testing.T) {
t.Parallel()
span := mechanism()
wg := &sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
method(span)
}()
go func() {
defer wg.Done()
method(span)
}()
wg.Wait()
})
})
}
t.Run("#End", func(t *testing.T) {
t.Run("can be called multiple times", func(t *testing.T) {
t.Parallel()
span := mechanism()
span.End()
span.End()
})
})
})
}
}
type testCtxKey struct{} | api/apitest/harness.go | 0.622 | 0.534795 | harness.go | starcoder |
package models
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// RiskDetection
type RiskDetection struct {
Entity
// Indicates the activity type the detected risk is linked to. The possible values are signin, user, unknownFutureValue.
activity *ActivityType
// Date and time that the risky activity occurred. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
activityDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Additional information associated with the risk detection in JSON format.
additionalInfo *string
// Correlation ID of the sign-in associated with the risk detection. This property is null if the risk detection is not associated with a sign-in.
correlationId *string
// Date and time that the risk was detected. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
detectedDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Timing of the detected risk (real-time/offline). The possible values are notDefined, realtime, nearRealtime, offline, unknownFutureValue.
detectionTimingType *RiskDetectionTimingType
// Provides the IP address of the client from where the risk occurred.
ipAddress *string
// Date and time that the risk detection was last updated.
lastUpdatedDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Location of the sign-in.
location SignInLocationable
// Request ID of the sign-in associated with the risk detection. This property is null if the risk detection is not associated with a sign-in.
requestId *string
// Details of the detected risk. The possible values are none, adminGeneratedTemporaryPassword, userPerformedSecuredPasswordChange, userPerformedSecuredPasswordReset, adminConfirmedSigninSafe, aiConfirmedSigninSafe, userPassedMFADrivenByRiskBasedPolicy, adminDismissedAllRiskForUser, adminConfirmedSigninCompromised, hidden, adminConfirmedUserCompromised, unknownFutureValue. Note: Details for this property are only available for Azure AD Premium P2 customers. P1 customers will be returned hidden.
riskDetail *RiskDetail
// The type of risk event detected. The possible values are unlikelyTravel, anonymizedIPAddress, maliciousIPAddress, unfamiliarFeatures, malwareInfectedIPAddress, suspiciousIPAddress, leakedCredentials, investigationsThreatIntelligence, generic,adminConfirmedUserCompromised, mcasImpossibleTravel, mcasSuspiciousInboxManipulationRules, investigationsThreatIntelligenceSigninLinked, maliciousIPAddressValidCredentialsBlockedIP, and unknownFutureValue. For more information about each value, see riskEventType values.
riskEventType *string
// Level of the detected risk. The possible values are low, medium, high, hidden, none, unknownFutureValue. Note: Details for this property are only available for Azure AD Premium P2 customers. P1 customers will be returned hidden.
riskLevel *RiskLevel
// The state of a detected risky user or sign-in. The possible values are none, confirmedSafe, remediated, dismissed, atRisk, confirmedCompromised, and unknownFutureValue.
riskState *RiskState
// Source of the risk detection. For example, activeDirectory.
source *string
// Indicates the type of token issuer for the detected sign-in risk. The possible values are AzureAD, ADFederationServices, and unknownFutureValue.
tokenIssuerType *TokenIssuerType
// Name of the user.
userDisplayName *string
// Unique ID of the user. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
userId *string
// The user principal name (UPN) of the user.
userPrincipalName *string
}
// NewRiskDetection instantiates a new riskDetection and sets the default values.
func NewRiskDetection()(*RiskDetection) {
m := &RiskDetection{
Entity: *NewEntity(),
}
return m
}
// CreateRiskDetectionFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateRiskDetectionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewRiskDetection(), nil
}
// GetActivity gets the activity property value. Indicates the activity type the detected risk is linked to. The possible values are signin, user, unknownFutureValue.
func (m *RiskDetection) GetActivity()(*ActivityType) {
if m == nil {
return nil
} else {
return m.activity
}
}
// GetActivityDateTime gets the activityDateTime property value. Date and time that the risky activity occurred. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *RiskDetection) GetActivityDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.activityDateTime
}
}
// GetAdditionalInfo gets the additionalInfo property value. Additional information associated with the risk detection in JSON format.
func (m *RiskDetection) GetAdditionalInfo()(*string) {
if m == nil {
return nil
} else {
return m.additionalInfo
}
}
// GetCorrelationId gets the correlationId property value. Correlation ID of the sign-in associated with the risk detection. This property is null if the risk detection is not associated with a sign-in.
func (m *RiskDetection) GetCorrelationId()(*string) {
if m == nil {
return nil
} else {
return m.correlationId
}
}
// GetDetectedDateTime gets the detectedDateTime property value. Date and time that the risk was detected. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *RiskDetection) GetDetectedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.detectedDateTime
}
}
// GetDetectionTimingType gets the detectionTimingType property value. Timing of the detected risk (real-time/offline). The possible values are notDefined, realtime, nearRealtime, offline, unknownFutureValue.
func (m *RiskDetection) GetDetectionTimingType()(*RiskDetectionTimingType) {
if m == nil {
return nil
} else {
return m.detectionTimingType
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *RiskDetection) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["activity"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseActivityType)
if err != nil {
return err
}
if val != nil {
m.SetActivity(val.(*ActivityType))
}
return nil
}
res["activityDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetActivityDateTime(val)
}
return nil
}
res["additionalInfo"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAdditionalInfo(val)
}
return nil
}
res["correlationId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCorrelationId(val)
}
return nil
}
res["detectedDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetDetectedDateTime(val)
}
return nil
}
res["detectionTimingType"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseRiskDetectionTimingType)
if err != nil {
return err
}
if val != nil {
m.SetDetectionTimingType(val.(*RiskDetectionTimingType))
}
return nil
}
res["ipAddress"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetIpAddress(val)
}
return nil
}
res["lastUpdatedDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetLastUpdatedDateTime(val)
}
return nil
}
res["location"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateSignInLocationFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetLocation(val.(SignInLocationable))
}
return nil
}
res["requestId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetRequestId(val)
}
return nil
}
res["riskDetail"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseRiskDetail)
if err != nil {
return err
}
if val != nil {
m.SetRiskDetail(val.(*RiskDetail))
}
return nil
}
res["riskEventType"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetRiskEventType(val)
}
return nil
}
res["riskLevel"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseRiskLevel)
if err != nil {
return err
}
if val != nil {
m.SetRiskLevel(val.(*RiskLevel))
}
return nil
}
res["riskState"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseRiskState)
if err != nil {
return err
}
if val != nil {
m.SetRiskState(val.(*RiskState))
}
return nil
}
res["source"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSource(val)
}
return nil
}
res["tokenIssuerType"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseTokenIssuerType)
if err != nil {
return err
}
if val != nil {
m.SetTokenIssuerType(val.(*TokenIssuerType))
}
return nil
}
res["userDisplayName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetUserDisplayName(val)
}
return nil
}
res["userId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetUserId(val)
}
return nil
}
res["userPrincipalName"] = func (n i<PASSWORD>52208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetUserPrincipalName(val)
}
return nil
}
return res
}
// GetIpAddress gets the ipAddress property value. Provides the IP address of the client from where the risk occurred.
func (m *RiskDetection) GetIpAddress()(*string) {
if m == nil {
return nil
} else {
return m.ipAddress
}
}
// GetLastUpdatedDateTime gets the lastUpdatedDateTime property value. Date and time that the risk detection was last updated.
func (m *RiskDetection) GetLastUpdatedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.lastUpdatedDateTime
}
}
// GetLocation gets the location property value. Location of the sign-in.
func (m *RiskDetection) GetLocation()(SignInLocationable) {
if m == nil {
return nil
} else {
return m.location
}
}
// GetRequestId gets the requestId property value. Request ID of the sign-in associated with the risk detection. This property is null if the risk detection is not associated with a sign-in.
func (m *RiskDetection) GetRequestId()(*string) {
if m == nil {
return nil
} else {
return m.requestId
}
}
// GetRiskDetail gets the riskDetail property value. Details of the detected risk. The possible values are none, adminGeneratedTemporaryPassword, userPerformedSecuredPasswordChange, userPerformedSecuredPasswordReset, adminConfirmedSigninSafe, aiConfirmedSigninSafe, userPassedMFADrivenByRiskBasedPolicy, adminDismissedAllRiskForUser, adminConfirmedSigninCompromised, hidden, adminConfirmedUserCompromised, unknownFutureValue. Note: Details for this property are only available for Azure AD Premium P2 customers. P1 customers will be returned hidden.
func (m *RiskDetection) GetRiskDetail()(*RiskDetail) {
if m == nil {
return nil
} else {
return m.riskDetail
}
}
// GetRiskEventType gets the riskEventType property value. The type of risk event detected. The possible values are unlikelyTravel, anonymizedIPAddress, maliciousIPAddress, unfamiliarFeatures, malwareInfectedIPAddress, suspiciousIPAddress, leakedCredentials, investigationsThreatIntelligence, generic,adminConfirmedUserCompromised, mcasImpossibleTravel, mcasSuspiciousInboxManipulationRules, investigationsThreatIntelligenceSigninLinked, maliciousIPAddressValidCredentialsBlockedIP, and unknownFutureValue. For more information about each value, see riskEventType values.
func (m *RiskDetection) GetRiskEventType()(*string) {
if m == nil {
return nil
} else {
return m.riskEventType
}
}
// GetRiskLevel gets the riskLevel property value. Level of the detected risk. The possible values are low, medium, high, hidden, none, unknownFutureValue. Note: Details for this property are only available for Azure AD Premium P2 customers. P1 customers will be returned hidden.
func (m *RiskDetection) GetRiskLevel()(*RiskLevel) {
if m == nil {
return nil
} else {
return m.riskLevel
}
}
// GetRiskState gets the riskState property value. The state of a detected risky user or sign-in. The possible values are none, confirmedSafe, remediated, dismissed, atRisk, confirmedCompromised, and unknownFutureValue.
func (m *RiskDetection) GetRiskState()(*RiskState) {
if m == nil {
return nil
} else {
return m.riskState
}
}
// GetSource gets the source property value. Source of the risk detection. For example, activeDirectory.
func (m *RiskDetection) GetSource()(*string) {
if m == nil {
return nil
} else {
return m.source
}
}
// GetTokenIssuerType gets the tokenIssuerType property value. Indicates the type of token issuer for the detected sign-in risk. The possible values are AzureAD, ADFederationServices, and unknownFutureValue.
func (m *RiskDetection) GetTokenIssuerType()(*TokenIssuerType) {
if m == nil {
return nil
} else {
return m.tokenIssuerType
}
}
// GetUserDisplayName gets the userDisplayName property value. Name of the user.
func (m *RiskDetection) GetUserDisplayName()(*string) {
if m == nil {
return nil
} else {
return m.userDisplayName
}
}
// GetUserId gets the userId property value. Unique ID of the user. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *RiskDetection) GetUserId()(*string) {
if m == nil {
return nil
} else {
return m.userId
}
}
// GetUserPrincipalName gets the userPrincipalName property value. The user principal name (UPN) of the user.
func (m *RiskDetection) GetUserPrincipalName()(*string) {
if m == nil {
return nil
} else {
return m.userPrincipalName
}
}
// Serialize serializes information the current object
func (m *RiskDetection) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
if m.GetActivity() != nil {
cast := (*m.GetActivity()).String()
err = writer.WriteStringValue("activity", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteTimeValue("activityDateTime", m.GetActivityDateTime())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("additionalInfo", m.GetAdditionalInfo())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("correlationId", m.GetCorrelationId())
if err != nil {
return err
}
}
{
err = writer.WriteTimeValue("detectedDateTime", m.GetDetectedDateTime())
if err != nil {
return err
}
}
if m.GetDetectionTimingType() != nil {
cast := (*m.GetDetectionTimingType()).String()
err = writer.WriteStringValue("detectionTimingType", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("ipAddress", m.GetIpAddress())
if err != nil {
return err
}
}
{
err = writer.WriteTimeValue("lastUpdatedDateTime", m.GetLastUpdatedDateTime())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("location", m.GetLocation())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("requestId", m.GetRequestId())
if err != nil {
return err
}
}
if m.GetRiskDetail() != nil {
cast := (*m.GetRiskDetail()).String()
err = writer.WriteStringValue("riskDetail", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("riskEventType", m.GetRiskEventType())
if err != nil {
return err
}
}
if m.GetRiskLevel() != nil {
cast := (*m.GetRiskLevel()).String()
err = writer.WriteStringValue("riskLevel", &cast)
if err != nil {
return err
}
}
if m.GetRiskState() != nil {
cast := (*m.GetRiskState()).String()
err = writer.WriteStringValue("riskState", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("source", m.GetSource())
if err != nil {
return err
}
}
if m.GetTokenIssuerType() != nil {
cast := (*m.GetTokenIssuerType()).String()
err = writer.WriteStringValue("tokenIssuerType", &cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("userDisplayName", m.GetUserDisplayName())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("userId", m.GetUserId())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("userPrincipalName", m.GetUserPrincipalName())
if err != nil {
return err
}
}
return nil
}
// SetActivity sets the activity property value. Indicates the activity type the detected risk is linked to. The possible values are signin, user, unknownFutureValue.
func (m *RiskDetection) SetActivity(value *ActivityType)() {
if m != nil {
m.activity = value
}
}
// SetActivityDateTime sets the activityDateTime property value. Date and time that the risky activity occurred. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *RiskDetection) SetActivityDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.activityDateTime = value
}
}
// SetAdditionalInfo sets the additionalInfo property value. Additional information associated with the risk detection in JSON format.
func (m *RiskDetection) SetAdditionalInfo(value *string)() {
if m != nil {
m.additionalInfo = value
}
}
// SetCorrelationId sets the correlationId property value. Correlation ID of the sign-in associated with the risk detection. This property is null if the risk detection is not associated with a sign-in.
func (m *RiskDetection) SetCorrelationId(value *string)() {
if m != nil {
m.correlationId = value
}
}
// SetDetectedDateTime sets the detectedDateTime property value. Date and time that the risk was detected. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *RiskDetection) SetDetectedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.detectedDateTime = value
}
}
// SetDetectionTimingType sets the detectionTimingType property value. Timing of the detected risk (real-time/offline). The possible values are notDefined, realtime, nearRealtime, offline, unknownFutureValue.
func (m *RiskDetection) SetDetectionTimingType(value *RiskDetectionTimingType)() {
if m != nil {
m.detectionTimingType = value
}
}
// SetIpAddress sets the ipAddress property value. Provides the IP address of the client from where the risk occurred.
func (m *RiskDetection) SetIpAddress(value *string)() {
if m != nil {
m.ipAddress = value
}
}
// SetLastUpdatedDateTime sets the lastUpdatedDateTime property value. Date and time that the risk detection was last updated.
func (m *RiskDetection) SetLastUpdatedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.lastUpdatedDateTime = value
}
}
// SetLocation sets the location property value. Location of the sign-in.
func (m *RiskDetection) SetLocation(value SignInLocationable)() {
if m != nil {
m.location = value
}
}
// SetRequestId sets the requestId property value. Request ID of the sign-in associated with the risk detection. This property is null if the risk detection is not associated with a sign-in.
func (m *RiskDetection) SetRequestId(value *string)() {
if m != nil {
m.requestId = value
}
}
// SetRiskDetail sets the riskDetail property value. Details of the detected risk. The possible values are none, adminGeneratedTemporaryPassword, userPerformedSecuredPasswordChange, userPerformedSecuredPasswordReset, adminConfirmedSigninSafe, aiConfirmedSigninSafe, userPassedMFADrivenByRiskBasedPolicy, adminDismissedAllRiskForUser, adminConfirmedSigninCompromised, hidden, adminConfirmedUserCompromised, unknownFutureValue. Note: Details for this property are only available for Azure AD Premium P2 customers. P1 customers will be returned hidden.
func (m *RiskDetection) SetRiskDetail(value *RiskDetail)() {
if m != nil {
m.riskDetail = value
}
}
// SetRiskEventType sets the riskEventType property value. The type of risk event detected. The possible values are unlikelyTravel, anonymizedIPAddress, maliciousIPAddress, unfamiliarFeatures, malwareInfectedIPAddress, suspiciousIPAddress, leakedCredentials, investigationsThreatIntelligence, generic,adminConfirmedUserCompromised, mcasImpossibleTravel, mcasSuspiciousInboxManipulationRules, investigationsThreatIntelligenceSigninLinked, maliciousIPAddressValidCredentialsBlockedIP, and unknownFutureValue. For more information about each value, see riskEventType values.
func (m *RiskDetection) SetRiskEventType(value *string)() {
if m != nil {
m.riskEventType = value
}
}
// SetRiskLevel sets the riskLevel property value. Level of the detected risk. The possible values are low, medium, high, hidden, none, unknownFutureValue. Note: Details for this property are only available for Azure AD Premium P2 customers. P1 customers will be returned hidden.
func (m *RiskDetection) SetRiskLevel(value *RiskLevel)() {
if m != nil {
m.riskLevel = value
}
}
// SetRiskState sets the riskState property value. The state of a detected risky user or sign-in. The possible values are none, confirmedSafe, remediated, dismissed, atRisk, confirmedCompromised, and unknownFutureValue.
func (m *RiskDetection) SetRiskState(value *RiskState)() {
if m != nil {
m.riskState = value
}
}
// SetSource sets the source property value. Source of the risk detection. For example, activeDirectory.
func (m *RiskDetection) SetSource(value *string)() {
if m != nil {
m.source = value
}
}
// SetTokenIssuerType sets the tokenIssuerType property value. Indicates the type of token issuer for the detected sign-in risk. The possible values are AzureAD, ADFederationServices, and unknownFutureValue.
func (m *RiskDetection) SetTokenIssuerType(value *TokenIssuerType)() {
if m != nil {
m.tokenIssuerType = value
}
}
// SetUserDisplayName sets the userDisplayName property value. Name of the user.
func (m *RiskDetection) SetUserDisplayName(value *string)() {
if m != nil {
m.userDisplayName = value
}
}
// SetUserId sets the userId property value. Unique ID of the user. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *RiskDetection) SetUserId(value *string)() {
if m != nil {
m.userId = value
}
}
// SetUserPrincipalName sets the userPrincipalName property value. The user principal name (UPN) of the user.
func (m *RiskDetection) SetUserPrincipalName(value *string)() {
if m != nil {
m.userPrincipalName = value
}
} | models/risk_detection.go | 0.734976 | 0.48438 | risk_detection.go | starcoder |
package slang
import (
"fmt"
"reflect"
"strings"
)
// LangType base type to interface with the language types of slang.
type LangType interface{}
// Algebraic is an interface for algebraic operations. Types that implement this interface can
// overload the behavior of an "algebraic" operator (+, -, *, /). Only Algebraic types can be added,
// subtracted, multiplied, and divided by other Algrebraic types.
type Algebraic interface {
Plus(obj Algebraic) (Algebraic, error)
Minus(obj Algebraic) (Algebraic, error)
Multiply(obj Algebraic) (Algebraic, error)
Divide(obj Algebraic) (Algebraic, error)
}
// Comparable is an interface for comparision operations. Types that implement this interface can
// overload the behavior of a comparision operator (>, <, >=, <=). Only Comparable types can be
// compared against other Comparable types.
type Comparable interface {
GreaterThan(obj Comparable) (bool, error)
LessThan(obj Comparable) (bool, error)
GreaterThanOrEqualTo(obj Comparable) (bool, error)
LessThanOrEqualTo(obj Comparable) (bool, error)
}
// Symbol slang symbol type
type Symbol string
// Number is a slang number type.
type Number float64
func (n Number) String() string {
return fmt.Sprintf("%v", float64(n))
}
// Plus returns the sum of two numbers or the concatenation of the number and a string.
func (n Number) Plus(obj Algebraic) (Algebraic, error) {
switch t := obj.(type) {
case Number:
return n + t, nil
case Str:
return Str(fmt.Sprint(n)) + t, nil
default:
return nil, fmt.Errorf("Cannot add number and %T", t)
}
}
// Minus returns the difference of two numbers.
func (n Number) Minus(obj Algebraic) (Algebraic, error) {
switch t := obj.(type) {
case Number:
return n - t, nil
default:
return nil, fmt.Errorf("Cannot subtract number and %T", t)
}
}
// Multiply returns the product of two numbers.
func (n Number) Multiply(obj Algebraic) (Algebraic, error) {
switch t := obj.(type) {
case Number:
return n * t, nil
default:
return nil, fmt.Errorf("Cannot multiply number and %T", t)
}
}
// Divide returns the quotient of two numbers.
func (n Number) Divide(obj Algebraic) (Algebraic, error) {
switch t := obj.(type) {
case Number:
return n / t, nil
default:
return nil, fmt.Errorf("Cannot divide number and %T", t)
}
}
// GreaterThan returns true if number is greater than the given number.
func (n Number) GreaterThan(obj Comparable) (bool, error) {
switch t := obj.(type) {
case Number:
return n > t, nil
default:
return false, fmt.Errorf("Cannot compare number and %T", t)
}
}
// LessThan returns true if number is less than the given number.
func (n Number) LessThan(obj Comparable) (bool, error) {
switch t := obj.(type) {
case Number:
return n < t, nil
default:
return false, fmt.Errorf("Cannot compare number and %T", t)
}
}
// GreaterThanOrEqualTo returns true if number is greater than or equal to the given number.
func (n Number) GreaterThanOrEqualTo(obj Comparable) (bool, error) {
switch t := obj.(type) {
case Number:
return n >= t, nil
default:
return false, fmt.Errorf("Cannot compare number and %T", t)
}
}
// LessThanOrEqualTo returns true if number is less than or equal to the given number.
func (n Number) LessThanOrEqualTo(obj Comparable) (bool, error) {
switch t := obj.(type) {
case Number:
return n <= t, nil
default:
return false, fmt.Errorf("Cannot compare number and %T", t)
}
}
// Str is a slang string type.
type Str string
func (s Str) String() string {
return string("\"" + s + "\"")
}
// Plus returns a new, concatenated string.
// Concatenating a non-string uses the default format verb from the fmt package.
func (s Str) Plus(obj Algebraic) (Algebraic, error) {
switch t := obj.(type) {
case Str:
return s + t, nil
default:
return s + Str(fmt.Sprint(t)), nil
}
}
// Minus returns an invalid operation error when an attempt to subtract a string occurs.
func (s Str) Minus(obj Algebraic) (Algebraic, error) {
return nil, fmt.Errorf("Subtraction operator is not defined on string")
}
// Multiply returns a new string with repeat count copies.
// If the repeat count is a negative number the absolute value is used.
func (s Str) Multiply(obj Algebraic) (Algebraic, error) {
if obj == nil {
return nil, fmt.Errorf("Repeat count expected")
}
switch t := obj.(type) {
case Number:
if t < 0 {
t *= -1
}
repeated := strings.Repeat(string(s), int(t))
return Str(repeated), nil
default:
return nil, fmt.Errorf("Repeat expects a number")
}
}
// Divide returns an invalid operation error when an attempt to divide a string occurs.
func (s Str) Divide(obj Algebraic) (Algebraic, error) {
return nil, fmt.Errorf("Division operator is not defined on string")
}
// Modulo returns an invalid operation error when an attempt to mod a string occurs.
func (s Str) Modulo(obj Algebraic) (Algebraic, error) {
return nil, fmt.Errorf("Modulo operator is not defined on string")
}
// Subroutine a slang function that is implemented in the host language, Go!
type Subroutine struct {
Func func(...LangType) (LangType, error)
}
// Apply applies arguments to the subroutine and returns the evaluation.
func (subr Subroutine) Apply(args ...LangType) (LangType, error) {
return subr.Func(args...)
}
func (subr Subroutine) String() string {
return "<procedure>"
}
// Lambda a slang function type. Use MakeLambda to construct a Lambda.
type Lambda struct {
params Vector
body List
env Env
}
func (lambda Lambda) String() string {
return "<procedure>"
}
// MakeLambda makes a new Lambda function with N-arity. When applied, arguments are bound to its
// environment frame (A.K.A. closure) and the body is evaluated. The evaluation of the final, or
// only, expression in the body is used as the return value.
// Usage: `(lambda [params...] body...)`
func MakeLambda(env Env, params Vector, body List) (Lambda, error) {
if body.Len() == 0 {
return Lambda{}, fmt.Errorf("Lambda body expected")
}
return Lambda{
params: params,
body: body,
env: env,
}, nil
}
// NumberP returns true if object is a number.
// Usage: `(procedure? x)`
func NumberP(x LangType) bool {
_, isNumber := x.(Number)
return isNumber
}
// ProcedureP returns true if object is a slang lambda or Go subroutine.
// Usage: `(procedure? x)`
func ProcedureP(x LangType) bool {
switch x.(type) {
case Lambda, Subroutine:
return true
default:
return false
}
}
// StringP returns true if object is a string.
// Usage: `(string? x)`
func StringP(x LangType) bool {
_, isString := x.(Str)
return isString
}
// SymbolP returns true if object is a Symbol.
// Usage: `(symbol? x)`
func SymbolP(x LangType) bool {
_, isSymbol := x.(Symbol)
return isSymbol
}
// Eq is a conditional operator that returns true if lhs is equal to rhs. If lhs and rhs are
// sequences, their items are compared one-to-one for equality.
// Usage: `(= x y)`
func Eq(lhs, rhs LangType) bool {
if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) {
return false
}
switch t1 := lhs.(type) {
case List:
t2 := rhs.(List)
if t1.Len() != t2.Len() {
return false
}
t1Node := t1
t2Node := t2
for t1Node.Len() > 0 {
if !Eq(t1Node.First(), t2Node.First()) {
return false
}
t1Node = t1Node.Rest().(List)
t2Node = t2Node.Rest().(List)
}
return true
case Vector:
t2 := rhs.(Vector)
if t1.Len() != t2.Len() {
return false
}
for i, item := range t1 {
if !Eq(item, t2[i]) {
return false
}
}
return true
default:
return lhs == rhs
}
}
// Gt is a conditional operator that returns true if lhs is greater than rhs.
// Usage: `(> x y)`
func Gt(lhs Comparable, rhs Comparable) (LangType, error) {
return lhs.GreaterThan(rhs)
}
// Lt is a conditional operator that returns true if lhs is less than rhs.
// Usage: `(< x y)`
func Lt(lhs, rhs Comparable) (LangType, error) {
return lhs.LessThan(rhs)
}
// Gte is a conditional operator that returns true if lhs is greater than or equal to rhs.
// Usage: `(>= x y)`
func Gte(lhs, rhs Comparable) (LangType, error) {
return lhs.GreaterThanOrEqualTo(rhs)
}
// Lte is a conditional operator that returns true if lhs is less than or equal to than rhs.
// Usage: `(<= x y)`
func Lte(lhs, rhs Comparable) (LangType, error) {
return lhs.LessThanOrEqualTo(rhs)
}
// Add is the addition operator.
// Usage: `(+ x y)`
func Add(x, y Algebraic) (LangType, error) {
return x.Plus(y)
}
// Sub is the subtraction operator.
// Usage: `(- x y)`
func Sub(x, y Algebraic) (LangType, error) {
return x.Minus(y)
}
// Mul is the multiplication operator.
// Usage: `(* x y)`
func Mul(x, y Algebraic) (LangType, error) {
return x.Multiply(y)
}
// Div is the division operator.
// Usage: `(/ x y)`
func Div(x, y Algebraic) (LangType, error) {
return x.Divide(y)
}
// Mod is the modulus operator. Modulo of two numbers returns the remainder of the quotient. This
// operator cannot be overloaded.
// Usage: `(% x y)`
func Mod(x, y Number) (LangType, error) {
return Number(int(x) % int(y)), nil
} | primitives.go | 0.831964 | 0.487673 | primitives.go | starcoder |
package weatherhist
import (
"time"
"strconv"
"github.com/PuerkitoBio/goquery"
"github.com/pkg/errors"
)
const DailyPath = "daily_%s1.php"
type Daily struct {
Days []Day `json:"days"`
}
type StringWithQuality struct {
Value *string
IsBadQuality bool
}
type FloatWithQuality struct {
Value *float32
IsBadQuality bool
}
type Day struct {
Date time.Time `json:"date"`
Precipitation Precipitation `json:"precipitation"`
Temperature Temperature `json:"temperature"`
Aerovane Aerovane `json:"aerovane"`
HoursOfSunlight FloatWithQuality `json:"hoursOfSunlight"`
Snow Snow `json:"snow"`
AirPressure AirPressure `json:"airPressure"`
Humidity Humidity `json:"humidity"`
}
type Precipitation struct {
Total FloatWithQuality `json:"total"`
MaxPrecipitation MaxPrecipitation `json:"maxPrecipitation"`
}
type MaxPrecipitation struct {
Hourly FloatWithQuality `json:"hourly"`
EveryTenMinutes FloatWithQuality `json:"everyTenMinutes"`
}
type Temperature struct {
Average FloatWithQuality `json:"average"`
Highest FloatWithQuality `json:"highest"`
Lowest FloatWithQuality `json:"lowest"`
}
type Aerovane struct {
AverageWindSpeed FloatWithQuality `json:"averageWindSpeed"`
MaxWindSpeed MaxWindSpeed `json:"maxWindSpeed"`
MaxInstantaneousSpeed MaxInstantaneousSpeed `json:"maxInstantaneousSpeed"`
MostFrequentWindDirection StringWithQuality `json:"mostFrequentWindDirection"`
}
type MaxWindSpeed struct {
Speed FloatWithQuality `json:"speed"`
Direction StringWithQuality `json:"direction"`
}
type MaxInstantaneousSpeed struct {
Speed FloatWithQuality `json:"speed"`
Direction StringWithQuality `json:"direction"`
}
type Snow struct {
SnowFall SnowFall `json:"snowFall"`
DeepestSnow DeepestSnow `json:"deepestSnow"`
}
type SnowFall struct {
Total FloatWithQuality `json:"total"`
}
type DeepestSnow struct {
Value FloatWithQuality `json:"value"`
}
type AirPressure struct {
FieldPressure FieldPressure `json:"fieldPressure"`
SeaSurfacePressure SeaSurfacePressure `json:"seaSurfacePressure"`
}
type FieldPressure struct {
Average FloatWithQuality `json:"average"`
}
type SeaSurfacePressure struct {
Average FloatWithQuality `json:"average"`
}
type Humidity struct {
Average FloatWithQuality `json:"average"`
Lowest FloatWithQuality `json:"lowest"`
}
func init() {
loc, err := time.LoadLocation("Asia/Tokyo")
if err != nil {
panic(err)
}
time.Local = loc
}
func (c *Client) GetDailyData(s Station, targetDate time.Time) (Daily, error) {
daily, err := c.getDailyDataFromPage(s, targetDate)
if err != nil {
return daily, err
}
return daily, nil
}
func (c *Client) getDailyDataFromPage(st Station, targetDate time.Time) (Daily, error) {
url := c.getFullURL(DailyPath, st, targetDate)
daily := Daily{}
doc, err := goquery.NewDocument(url)
if err != nil {
return daily, err
}
switch st.Type {
case StationTypeS:
daily, err = getDailyDataFromPageTypeS(st, targetDate, doc)
case StationTypeA:
daily, err = getDailyDataFromPageTypeA(st, targetDate, doc)
default:
return daily, errors.Wrapf(err, "unkown station type: %s", st.Type)
}
return daily, nil
}
func getDailyDataFromPageTypeS(st Station, targetDate time.Time, doc *goquery.Document) (Daily, error) {
daily := Daily{}
doc.Find("#tablefix1 > tbody > tr").Each(func(i int, s *goquery.Selection) {
if i == 0 || i == 1 || i == 2 {
return
}
day := Day{}
s.Find("td").Each(func(i int, s *goquery.Selection) {
switch i {
case 0:
date, _ := strconv.Atoi(s.Text())
t := time.Date(targetDate.Year(), targetDate.Month(), date, 0, 0, 0, 0, time.Local)
day.Date = t
case 1:
day.AirPressure.FieldPressure.Average = getFloatValueWithQuality(s.Text())
case 2:
day.AirPressure.SeaSurfacePressure.Average = getFloatValueWithQuality(s.Text())
case 3:
day.Precipitation.Total = getFloatValueWithQuality(s.Text())
case 4:
day.Precipitation.MaxPrecipitation.Hourly = getFloatValueWithQuality(s.Text())
case 5:
day.Precipitation.MaxPrecipitation.EveryTenMinutes = getFloatValueWithQuality(s.Text())
case 6:
day.Temperature.Average = getFloatValueWithQuality(s.Text())
case 7:
day.Temperature.Highest = getFloatValueWithQuality(s.Text())
case 8:
day.Temperature.Lowest = getFloatValueWithQuality(s.Text())
case 9:
day.Humidity.Average = getFloatValueWithQuality(s.Text())
case 10:
day.Humidity.Lowest = getFloatValueWithQuality(s.Text())
case 11:
day.Aerovane.AverageWindSpeed = getFloatValueWithQuality(s.Text())
case 12:
day.Aerovane.MaxWindSpeed.Speed = getFloatValueWithQuality(s.Text())
case 13:
day.Aerovane.MaxWindSpeed.Direction = getStringValueWithQuality(s.Text())
case 14:
day.Aerovane.MaxInstantaneousSpeed.Speed = getFloatValueWithQuality(s.Text())
case 15:
day.Aerovane.MaxInstantaneousSpeed.Direction = getStringValueWithQuality(s.Text())
case 16:
day.HoursOfSunlight = getFloatValueWithQuality(s.Text())
case 17:
day.Snow.SnowFall.Total = getFloatValueWithQuality(s.Text())
case 18:
day.Snow.DeepestSnow.Value = getFloatValueWithQuality(s.Text())
daily.Days = append(daily.Days, day)
default:
return
}
})
})
return daily, nil
}
func getDailyDataFromPageTypeA(st Station, targetDate time.Time, doc *goquery.Document) (Daily, error) {
daily := Daily{}
doc.Find("#tablefix1 > tbody > tr").Each(func(i int, s *goquery.Selection) {
if i == 0 || i == 1 || i == 2 {
return
}
day := Day{}
s.Find("td").Each(func(i int, s *goquery.Selection) {
switch i {
case 0:
date, _ := strconv.Atoi(s.Text())
t := time.Date(targetDate.Year(), targetDate.Month(), date, 0, 0, 0, 0, time.Local)
day.Date = t
case 1:
day.Precipitation.Total = getFloatValueWithQuality(s.Text())
case 2:
day.Precipitation.MaxPrecipitation.Hourly = getFloatValueWithQuality(s.Text())
case 3:
day.Precipitation.MaxPrecipitation.EveryTenMinutes = getFloatValueWithQuality(s.Text())
case 4:
day.Temperature.Average = getFloatValueWithQuality(s.Text())
case 5:
day.Temperature.Highest = getFloatValueWithQuality(s.Text())
case 6:
day.Temperature.Lowest = getFloatValueWithQuality(s.Text())
case 7:
day.Aerovane.AverageWindSpeed = getFloatValueWithQuality(s.Text())
case 8:
day.Aerovane.MaxWindSpeed.Speed = getFloatValueWithQuality(s.Text())
case 9:
day.Aerovane.MaxWindSpeed.Direction = getStringValueWithQuality(s.Text())
case 10:
day.Aerovane.MaxInstantaneousSpeed.Speed = getFloatValueWithQuality(s.Text())
case 11:
day.Aerovane.MaxInstantaneousSpeed.Direction = getStringValueWithQuality(s.Text())
case 12:
day.Aerovane.MostFrequentWindDirection = getStringValueWithQuality(s.Text())
case 13:
day.HoursOfSunlight = getFloatValueWithQuality(s.Text())
case 14:
day.Snow.SnowFall.Total = getFloatValueWithQuality(s.Text())
case 15:
day.Snow.DeepestSnow.Value = getFloatValueWithQuality(s.Text())
daily.Days = append(daily.Days, day)
default:
return
}
})
})
return daily, nil
} | daily.go | 0.601945 | 0.427636 | daily.go | starcoder |
package geography
import (
"math"
)
var emptyBound = Bound{Min: Point{1, 1}, Max: Point{-1, -1}}
type Bound struct {
Min Point
Max Point
}
func (b Bound) ToGeom() Geom {
return b
}
func (b Bound) Clip(bound Bound) Geom {
return Bound{
Min: Point{
math.Max(b.Min[0], bound.Min[0]),
math.Max(b.Min[1], bound.Min[1]),
},
Max: Point{
math.Min(b.Max[0], bound.Max[0]),
math.Min(b.Max[1], bound.Max[1]),
},
}
}
func (b Bound) Cap() int {
return b.AsPolygon().Cap()
}
func (b Bound) Geometry() []uint32 {
return b.AsPolygon().Geometry()
}
func (b Bound) Project(transform Transform) Geom {
return Bound{
Min: transform(b.Min),
Max: transform(b.Max),
}
}
func (Bound) Type() string {
return "Polygon"
}
func (b Bound) IsEmpty() bool {
return b.Min[0] > b.Max[0] || b.Min[1] > b.Max[1]
}
func (b Bound) Equal(g Geom) bool {
switch bound := g.(type) {
case Bound:
return b.Min.Equal(bound.Min) && bound.Max.Equal(bound.Max)
}
return false
}
func (b Bound) Bound() Bound {
return b
}
func (b Bound) AsPolygon() *Polygon {
return &Polygon{{
b.Min,
{b.Max[0], b.Min[1]},
b.Max,
{b.Min[0], b.Max[1]},
b.Min,
}}
}
func (Bound) DataType(driverName string) string {
return "Polygon"
}
func (b Bound) Top() float64 {
return b.Max[1]
}
func (b Bound) Bottom() float64 {
return b.Min[1]
}
func (b Bound) Right() float64 {
return b.Max[0]
}
func (b Bound) Left() float64 {
return b.Min[0]
}
func (b Bound) LeftTop() Point {
return Point{b.Left(), b.Top()}
}
func (b Bound) RightBottom() Point {
return Point{b.Right(), b.Bottom()}
}
func (b Bound) Intersects(bound Bound) bool {
return !((b.Max[0] < bound.Min[0]) || (b.Min[0] > bound.Max[0]) || (b.Max[1] < bound.Min[1]) || (b.Min[1] > bound.Max[1]))
}
func (b Bound) Contains(point Point) bool {
if point[1] < b.Min[1] || b.Max[1] < point[1] {
return false
}
if point[0] < b.Min[0] || b.Max[0] < point[0] {
return false
}
return true
}
func (b Bound) Extend(point Point) Bound {
if b.Contains(point) {
return b
}
return Bound{
Min: Point{
math.Min(b.Min[0], point[0]),
math.Min(b.Min[1], point[1]),
},
Max: Point{
math.Max(b.Max[0], point[0]),
math.Max(b.Max[1], point[1]),
},
}
}
func (b Bound) Union(other Bound) Bound {
if other.IsEmpty() {
return b
}
nextB := b.Extend(other.Min)
nextB = b.Extend(other.Max)
nextB = b.Extend(other.LeftTop())
nextB = b.Extend(other.RightBottom())
return nextB
}
func (b Bound) Center() Point {
return Point{
(b.Min[0] + b.Max[0]) / 2.0,
(b.Min[1] + b.Max[1]) / 2.0,
}
}
func (b Bound) Pad(d float64) Bound {
b.Min[0] -= d
b.Min[1] -= d
b.Max[0] += d
b.Max[1] += d
return b
} | geom_bound.go | 0.751192 | 0.673651 | geom_bound.go | starcoder |
package extend
import (
"fmt"
"github.com/matrixorigin/matrixone/pkg/container/types"
"github.com/matrixorigin/matrixone/pkg/sql/colexec/extend/overload"
)
var FunctionRegistry = map[string]int{}
var UnaryReturnTypes = map[int]func(Extend) types.T{
overload.UnaryMinus: func(e Extend) types.T {
return e.ReturnType()
},
overload.Not: func(e Extend) types.T {
return overload.GetUnaryOpReturnType(overload.Not, e.ReturnType())
},
}
var BinaryReturnTypes = map[int]func(Extend, Extend) types.T{
overload.Or: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.And: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.EQ: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.NE: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.LT: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.LE: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.GT: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.GE: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.Like: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.NotLike: func(_ Extend, _ Extend) types.T {
return types.T_sel
},
overload.Typecast: func(_ Extend, r Extend) types.T {
return r.ReturnType()
},
overload.Plus: func(l Extend, r Extend) types.T {
return overload.GetBinOpReturnType(overload.Plus, l.ReturnType(), r.ReturnType())
},
overload.Minus: func(l Extend, r Extend) types.T {
return overload.GetBinOpReturnType(overload.Minus, l.ReturnType(), r.ReturnType())
},
overload.Mult: func(l Extend, r Extend) types.T {
return overload.GetBinOpReturnType(overload.Mult, l.ReturnType(), r.ReturnType())
},
overload.Div: func(l Extend, r Extend) types.T {
return overload.GetBinOpReturnType(overload.Div, l.ReturnType(), r.ReturnType())
},
overload.IntegerDiv: func(l Extend, r Extend) types.T {
return overload.GetBinOpReturnType(overload.IntegerDiv, l.ReturnType(), r.ReturnType())
},
overload.Mod: func(l Extend, r Extend) types.T {
return overload.GetBinOpReturnType(overload.Mod, l.ReturnType(), r.ReturnType())
},
}
var MultiReturnTypes = map[int]func([]Extend) types.T{}
var UnaryStrings = map[int]func(Extend) string{
overload.UnaryMinus: func(e Extend) string {
return "-" + e.String()
},
overload.Not: func(e Extend) string {
return fmt.Sprintf("not(%s)", e)
},
}
var BinaryStrings = map[int]func(Extend, Extend) string{
overload.Like: func(l Extend, r Extend) string {
return fmt.Sprintf("like(%s, %s)", l.String(), r.String())
},
overload.NotLike: func(l Extend, r Extend) string {
return fmt.Sprintf("notLike(%s, %s)", l.String(), r.String())
},
overload.EQ: func(l Extend, r Extend) string {
return fmt.Sprintf("%s = %s", l.String(), r.String())
},
overload.LT: func(l Extend, r Extend) string {
return fmt.Sprintf("%s < %s", l.String(), r.String())
},
overload.GT: func(l Extend, r Extend) string {
return fmt.Sprintf("%s > %s", l.String(), r.String())
},
overload.LE: func(l Extend, r Extend) string {
return fmt.Sprintf("%s <= %s", l.String(), r.String())
},
overload.GE: func(l Extend, r Extend) string {
return fmt.Sprintf("%s >= %s", l.String(), r.String())
},
overload.NE: func(l Extend, r Extend) string {
return fmt.Sprintf("%s <> %s", l.String(), r.String())
},
overload.Or: func(l Extend, r Extend) string {
return fmt.Sprintf("%s or %s", l.String(), r.String())
},
overload.And: func(l Extend, r Extend) string {
return fmt.Sprintf("%s and %s", l.String(), r.String())
},
overload.Div: func(l Extend, r Extend) string {
return fmt.Sprintf("%s / %s", l.String(), r.String())
},
overload.IntegerDiv: func(l Extend, r Extend) string {
return fmt.Sprintf("%s div %s", l.String(), r.String())
},
overload.Mod: func(l Extend, r Extend) string {
return fmt.Sprintf("%s %% %s", l.String(), r.String())
},
overload.Plus: func(l Extend, r Extend) string {
return fmt.Sprintf("%s + %s", l.String(), r.String())
},
overload.Mult: func(l Extend, r Extend) string {
return fmt.Sprintf("%s * %s", l.String(), r.String())
},
overload.Minus: func(l Extend, r Extend) string {
return fmt.Sprintf("%s - %s", l.String(), r.String())
},
overload.Typecast: func(l Extend, r Extend) string {
return fmt.Sprintf("cast(%s as %s)", l.String(), r.ReturnType())
},
}
var MultiStrings = map[int]func([]Extend) string{}
func AndExtends(e Extend, es []Extend) []Extend {
switch v := e.(type) {
case *UnaryExtend:
return nil
case *ParenExtend:
return AndExtends(v.E, es)
case *Attribute:
return es
case *ValueExtend:
return es
case *BinaryExtend:
switch v.Op {
case overload.EQ:
return append(es, v)
case overload.NE:
return append(es, v)
case overload.LT:
return append(es, v)
case overload.LE:
return append(es, v)
case overload.GT:
return append(es, v)
case overload.GE:
return append(es, v)
case overload.And:
switch {
case isOrExtend(v.Left) && isOrExtend(v.Right):
es = append(es, v.Left)
es = append(es, v.Right)
return es
case !isOrExtend(v.Left) && isOrExtend(v.Right):
es = AndExtends(v.Left, es)
return append(es, v.Right)
case isOrExtend(v.Left) && !isOrExtend(v.Right):
es = AndExtends(v.Right, es)
return append(es, v.Left)
}
left, right := AndExtends(v.Left, es), AndExtends(v.Right, es)
if left == nil || right == nil {
return nil
}
return append(left, right...)
}
}
return nil
}
func isOrExtend(e Extend) bool {
v, ok := e.(*BinaryExtend)
if !ok {
return false
}
return v.Op == overload.Or
} | pkg/sql/colexec/extend/extend.go | 0.516352 | 0.454654 | extend.go | starcoder |
package benchmark
import (
"reflect"
"testing"
)
func isBoolToInt8FuncCalibrated(supplier func() bool) bool {
return isCalibrated(reflect.Bool, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isIntToInt8FuncCalibrated(supplier func() int) bool {
return isCalibrated(reflect.Int, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isInt8ToInt8FuncCalibrated(supplier func() int8) bool {
return isCalibrated(reflect.Int8, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isInt16ToInt8FuncCalibrated(supplier func() int16) bool {
return isCalibrated(reflect.Int16, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isInt32ToInt8FuncCalibrated(supplier func() int32) bool {
return isCalibrated(reflect.Int32, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isInt64ToInt8FuncCalibrated(supplier func() int64) bool {
return isCalibrated(reflect.Int64, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isUintToInt8FuncCalibrated(supplier func() uint) bool {
return isCalibrated(reflect.Uint, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isUint8ToInt8FuncCalibrated(supplier func() uint8) bool {
return isCalibrated(reflect.Uint8, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isUint16ToInt8FuncCalibrated(supplier func() uint16) bool {
return isCalibrated(reflect.Uint16, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isUint32ToInt8FuncCalibrated(supplier func() uint32) bool {
return isCalibrated(reflect.Uint32, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func isUint64ToInt8FuncCalibrated(supplier func() uint64) bool {
return isCalibrated(reflect.Uint64, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setBoolToInt8FuncCalibrated(supplier func() bool) {
setCalibrated(reflect.Bool, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setIntToInt8FuncCalibrated(supplier func() int) {
setCalibrated(reflect.Int, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setInt8ToInt8FuncCalibrated(supplier func() int8) {
setCalibrated(reflect.Int8, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setInt16ToInt8FuncCalibrated(supplier func() int16) {
setCalibrated(reflect.Int16, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setInt32ToInt8FuncCalibrated(supplier func() int32) {
setCalibrated(reflect.Int32, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setInt64ToInt8FuncCalibrated(supplier func() int64) {
setCalibrated(reflect.Int64, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setUintToInt8FuncCalibrated(supplier func() uint) {
setCalibrated(reflect.Uint, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setUint8ToInt8FuncCalibrated(supplier func() uint8) {
setCalibrated(reflect.Uint8, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setUint16ToInt8FuncCalibrated(supplier func() uint16) {
setCalibrated(reflect.Uint16, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setUint32ToInt8FuncCalibrated(supplier func() uint32) {
setCalibrated(reflect.Uint32, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
func setUint64ToInt8FuncCalibrated(supplier func() uint64) {
setCalibrated(reflect.Uint64, reflect.Int8, reflect.ValueOf(supplier).Pointer())
}
// BoolToInt8Func benchmarks a function with the signature:
// func(bool) int8
// ID: B-3-1
func BoolToInt8Func(b *testing.B, supplier func() bool, toInt8Func func(bool) int8) {
if !isBoolSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isBoolToInt8FuncCalibrated(supplier) {
panic("BoolToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// IntToInt8Func benchmarks a function with the signature:
// func(int) int8
// ID: B-3-2
func IntToInt8Func(b *testing.B, supplier func() int, toInt8Func func(int) int8) {
if !isIntSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isIntToInt8FuncCalibrated(supplier) {
panic("IntToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// Int8ToInt8Func benchmarks a function with the signature:
// func(int8) int8
// ID: B-3-3
func Int8ToInt8Func(b *testing.B, supplier func() int8, toInt8Func func(int8) int8) {
if !isInt8SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt8ToInt8FuncCalibrated(supplier) {
panic("Int8ToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// Int16ToInt8Func benchmarks a function with the signature:
// func(int16) int8
// ID: B-3-4
func Int16ToInt8Func(b *testing.B, supplier func() int16, toInt8Func func(int16) int8) {
if !isInt16SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt16ToInt8FuncCalibrated(supplier) {
panic("Int16ToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// Int32ToInt8Func benchmarks a function with the signature:
// func(int32) int8
// ID: B-3-5
func Int32ToInt8Func(b *testing.B, supplier func() int32, toInt8Func func(int32) int8) {
if !isInt32SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt32ToInt8FuncCalibrated(supplier) {
panic("Int32ToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// Int64ToInt8Func benchmarks a function with the signature:
// func(int64) int8
// ID: B-3-6
func Int64ToInt8Func(b *testing.B, supplier func() int64, toInt8Func func(int64) int8) {
if !isInt64SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt64ToInt8FuncCalibrated(supplier) {
panic("Int64ToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// UintToInt8Func benchmarks a function with the signature:
// func(uint) int8
// ID: B-3-7
func UintToInt8Func(b *testing.B, supplier func() uint, toInt8Func func(uint) int8) {
if !isUintSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUintToInt8FuncCalibrated(supplier) {
panic("UintToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// Uint8ToInt8Func benchmarks a function with the signature:
// func(uint8) int8
// ID: B-3-8
func Uint8ToInt8Func(b *testing.B, supplier func() uint8, toInt8Func func(uint8) int8) {
if !isUint8SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint8ToInt8FuncCalibrated(supplier) {
panic("Uint8ToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// Uint16ToInt8Func benchmarks a function with the signature:
// func(uint16) int8
// ID: B-3-9
func Uint16ToInt8Func(b *testing.B, supplier func() uint16, toInt8Func func(uint16) int8) {
if !isUint16SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint16ToInt8FuncCalibrated(supplier) {
panic("Uint16ToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// Uint32ToInt8Func benchmarks a function with the signature:
// func(uint32) int8
// ID: B-3-10
func Uint32ToInt8Func(b *testing.B, supplier func() uint32, toInt8Func func(uint32) int8) {
if !isUint32SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint32ToInt8FuncCalibrated(supplier) {
panic("Uint32ToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
}
// Uint64ToInt8Func benchmarks a function with the signature:
// func(int8) int8
// ID: B-3-11
func Uint64ToInt8Func(b *testing.B, supplier func() uint64, toInt8Func func(uint64) int8) {
if !isUint64SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint64ToInt8FuncCalibrated(supplier) {
panic("Uint64ToInt8Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt8Func(supplier())
}
} | common/benchmark/03_to_int8_func.go | 0.694821 | 0.737501 | 03_to_int8_func.go | starcoder |
package tpch
import (
"database/sql"
"fmt"
"math"
"strconv"
)
type precision int
const (
str precision = iota
sum
avg
cnt
num
rat
)
var queryColPrecisions = map[string][]precision{
// Comment 4: In cases where validation output data is from the aggregate SUM(l_quantity) (e.g. queries 1 and 18),
// the precision for this validation output data must exactly match the query validation data.
"q1": {str, str, str, sum, sum, sum, avg, avg, avg, cnt},
"q2": {num, str, str, str, str, str, str, str},
"q3": {str, sum, str, str},
"q4": {str, cnt},
"q5": {str, sum},
"q6": {sum},
"q7": {str, str, str, sum},
"q8": {str, rat},
"q9": {str, str, sum},
"q10": {str, str, sum, num, str, str, str, str},
"q11": {str, sum},
// Comment 2: In cases where validation output data resembles a row count operation by summing up 0 and 1 using a
// SUM aggregate (e.g. query 12), the precision for this validation output data must adhere to bullet a) above.
"q12": {str, cnt, cnt},
"q13": {cnt, cnt},
"q14": {rat},
// Comment 3: In cases were validation output data is selected from views without any further computation (e.g. total
// revenue in Query 15), the precision for this validation output data must adhere to bullet c) above.
"q15": {str, str, str, str, sum},
"q16": {str, str, num, cnt},
"q17": {avg},
// Comment 4: In cases where validation output data is from the aggregate SUM(l_quantity) (e.g. queries 1 and 18),
// the precision for this validation output data must exactly match the query validation data.
"q18": {str, str, str, str, num, str},
"q19": {sum},
"q20": {str, str},
"q21": {str, cnt},
"q22": {num, cnt, sum},
}
func (w Workloader) checkQueryResult(queryName string, rows *sql.Rows) error {
defer rows.Close()
var got [][]string
cols, err := rows.Columns()
if err != nil {
return nil
}
for rows.Next() {
rawResult := make([][]byte, len(cols))
row := make([]string, len(cols))
dest := make([]interface{}, len(cols))
for i := range rawResult {
dest[i] = &rawResult[i]
}
if err := rows.Scan(dest...); err != nil {
return fmt.Errorf("scan %s failed %v", queryName, err)
}
for i, raw := range rawResult {
if raw == nil {
row[i] = "\\N"
} else {
row[i] = string(raw)
}
}
got = append(got, row)
}
return checkOutput(queryColPrecisions[queryName], ans[queryName], got)
}
func checkOutput(colPrecisions []precision, expect [][]string, got [][]string) (ret error) {
if len(expect) != len(got) {
return fmt.Errorf("expect %d rows, got %d rows", len(expect), len(got))
}
for i, row := range got {
for j, column := range row {
expectStr := expect[i][j]
ret = fmt.Errorf("expect %s at row %d column %d, got %s", expectStr, i, j, column)
// 2.1.3.5
switch colPrecisions[j] {
case cnt:
// For singleton column values and results from COUNT aggregates, the values must exactly match the query
// validation output data.
fallthrough
case num:
fallthrough
case str:
if expectStr != column {
return
}
continue
}
expectFloat, err := strconv.ParseFloat(expectStr, 64)
if err != nil {
return
}
gotFloat, err := strconv.ParseFloat(column, 64)
if err != nil {
return
}
switch colPrecisions[j] {
case sum:
// For results from SUM aggregates, the resulting values must be within $100 of the query validation output
// data
if math.Abs(expectFloat-gotFloat) > 100.0 {
return
}
case avg:
// For results from AVG aggregates, the resulting values r must be within 1% of the query validation output
// data when rounded to the nearest 1/100th. That is, 0.99*v<=round(r,2)<=1.01*v.
fallthrough
case rat:
// For ratios, results r must be within 1% of the query validation output data v when rounded to the nearest
// 1/100th. That is, 0.99*v<=round(r,2)<=1.01*v
if math.Abs(math.Round(gotFloat*1000)/1000-math.Round(expectFloat*1000)/1000) > 0.01 {
return
}
default:
panic("unreachable")
}
}
}
return nil
} | tpch/check.go | 0.579162 | 0.443841 | check.go | starcoder |
package bitcoin
import (
"encoding/binary"
"encoding/hex"
"bitbucket.org/simon_ordish/cryptolib"
)
type input struct {
hash [32]byte // The previous utxo being spent.
index uint32 // The previous utxo index being spent.
unlockingScript []byte // A script-language script which satisfies the conditions placed in the outpoint’s pubkey script. Should only contain data pushes; see https://bitcoin.org/en/developer-reference#signature_script_modification_warning.
sequence uint32 // Sequence number. Default for Bitcoin Core and almost all other programs is 0xffffffff. See https://bitcoin.org/en/glossary/sequence-number
}
func (i *input) toHex() []byte {
var b []byte
b = append(b, cryptolib.ReverseBytes(i.hash[:])...)
b = append(b, cryptolib.GetLittleEndianBytes(i.index, 4)...)
b = append(b, cryptolib.VarInt(uint64(len(i.unlockingScript)))...)
b = append(b, i.unlockingScript...)
b = append(b, cryptolib.GetLittleEndianBytes(i.sequence, 4)...)
return b
}
func inputFromBytes(b []byte) (*input, int) {
pos := 0
var previousOutput [32]byte
copy(previousOutput[:], cryptolib.ReverseBytes(b[pos:pos+32]))
pos += 32
index := binary.LittleEndian.Uint32(b[pos : pos+4])
pos += 4
scriptLen, size := cryptolib.DecodeVarInt(b[pos:])
pos += size
len := int(scriptLen)
script := b[pos : pos+len]
pos += len
sequence := binary.LittleEndian.Uint32(b[pos : pos+4])
pos += 4
return &input{
hash: previousOutput,
index: index,
unlockingScript: script,
sequence: sequence,
}, pos
}
type output struct {
value uint64 // Number of satoshis to spend. May be zero; the sum of all outputs may not exceed the sum of satoshis previously spent to the outpoints provided in the input section. (Exception: coinbase transactions spend the block subsidy and collected transaction fees.)
lockingScript []byte // Defines the conditions which must be satisfied to spend this output.
}
func (o *output) toHex() []byte {
var b []byte
value := make([]byte, 8)
binary.LittleEndian.PutUint64(value, o.value)
b = append(b, value...)
b = append(b, cryptolib.VarInt(uint64(len(o.lockingScript)))...)
b = append(b, o.lockingScript...)
return b
}
func outputFromBytes(b []byte) (*output, int) {
pos := 0
value := binary.LittleEndian.Uint64(b[pos : pos+8])
pos += 8
scriptLen, size := cryptolib.DecodeVarInt(b[pos:])
pos += size
len := int(scriptLen)
script := b[pos : pos+len]
pos += len
return &output{
value: value,
lockingScript: script,
}, pos
}
type transaction struct {
Hash string
Version int32 // Transaction version number (note, this is signed); currently version 1 or 2. Programs creating transactions using newer consensus rules may use higher version numbers. Version 2 means that BIP 68 applies.
Inputs []input // Transaction inputs.
Outputs []output // Transaction outputs.
LockTime uint32 // A time (Unix epoch time) or block number. See https://bitcoin.org/en/transactions-guide#locktime_parsing_rules
}
// TransactionFromHex takes a hex string and constructs a Transaction object
func TransactionFromHex(h string) (*transaction, int) {
s, _ := hex.DecodeString(h)
return TransactionFromBytes(s)
}
// TransactionFromBytes takes a slice of bytes and constructs a Transaction object
func TransactionFromBytes(b []byte) (*transaction, int) {
pos := 0
// extract the version
version := binary.LittleEndian.Uint32(b[0:4])
pos += 4
// Get the number of inputs
numberOfInputs, size := cryptolib.DecodeVarInt(b[pos:])
pos += size
var inputs []input
for i := uint64(0); i < numberOfInputs; i++ {
input, size := inputFromBytes(b[pos:])
pos += size
inputs = append(inputs, *input)
}
// Get the number of outputs
numberOfOutputs, size := cryptolib.DecodeVarInt(b[pos:])
pos += size
var outputs []output
for i := uint64(0); i < numberOfOutputs; i++ {
output, size := outputFromBytes(b[pos:])
pos += size
outputs = append(outputs, *output)
}
locktime := binary.LittleEndian.Uint32(b[pos : pos+4])
pos += 4
hash := cryptolib.Sha256d(b[0:pos])
return &transaction{
Hash: hex.EncodeToString(cryptolib.ReverseBytes(hash)),
Version: int32(version),
Inputs: inputs,
Outputs: outputs,
LockTime: locktime,
}, pos
}
func (t *transaction) InputCount() int {
return len(t.Inputs)
}
func (t *transaction) OutputCount() int {
return len(t.Outputs)
}
func (t *transaction) ToHex() []byte {
var b []byte
b = append(b, cryptolib.GetLittleEndianBytes(uint32(t.Version), 4)...)
b = append(b, cryptolib.VarInt(uint64(t.InputCount()))...)
for _, input := range t.Inputs {
b = append(b, input.toHex()...)
}
b = append(b, cryptolib.VarInt(uint64(t.OutputCount()))...)
for _, output := range t.Outputs {
b = append(b, output.toHex()...)
}
b = append(b, cryptolib.GetLittleEndianBytes(t.LockTime, 4)...)
return b
} | scratch.go | 0.696165 | 0.460774 | scratch.go | starcoder |
package pure
import (
"context"
"encoding/json"
"fmt"
jmespath "github.com/jmespath/go-jmespath"
"github.com/benthosdev/benthos/v4/internal/bundle"
"github.com/benthosdev/benthos/v4/internal/component/processor"
"github.com/benthosdev/benthos/v4/internal/docs"
"github.com/benthosdev/benthos/v4/internal/log"
"github.com/benthosdev/benthos/v4/internal/message"
)
func init() {
err := bundle.AllProcessors.Add(func(conf processor.Config, mgr bundle.NewManagement) (processor.V1, error) {
p, err := newJMESPath(conf.JMESPath, mgr)
if err != nil {
return nil, err
}
return processor.NewV2ToV1Processor("jmespath", p, mgr.Metrics()), nil
}, docs.ComponentSpec{
Name: "jmespath",
Categories: []string{
"Mapping",
},
Summary: `
Executes a [JMESPath query](http://jmespath.org/) on JSON documents and replaces
the message with the resulting document.`,
Description: `
:::note Try out Bloblang
For better performance and improved capabilities try out native Benthos mapping with the [bloblang processor](/docs/components/processors/bloblang).
:::
`,
Examples: []docs.AnnotatedExample{
{
Title: "Mapping",
Summary: `
When receiving JSON documents of the form:
` + "```json" + `
{
"locations": [
{"name": "Seattle", "state": "WA"},
{"name": "New York", "state": "NY"},
{"name": "Bellevue", "state": "WA"},
{"name": "Olympia", "state": "WA"}
]
}
` + "```" + `
We could collapse the location names from the state of Washington into a field ` + "`Cities`" + `:
` + "```json" + `
{"Cities": "Bellevue, Olympia, Seattle"}
` + "```" + `
With the following config:`,
Config: `
pipeline:
processors:
- jmespath:
query: "locations[?state == 'WA'].name | sort(@) | {Cities: join(', ', @)}"
`,
},
},
Config: docs.FieldComponent().WithChildren(
docs.FieldString("query", "The JMESPath query to apply to messages.").HasDefault(""),
),
})
if err != nil {
panic(err)
}
}
type jmespathProc struct {
query *jmespath.JMESPath
log log.Modular
}
func newJMESPath(conf processor.JMESPathConfig, mgr bundle.NewManagement) (processor.V2, error) {
query, err := jmespath.Compile(conf.Query)
if err != nil {
return nil, fmt.Errorf("failed to compile JMESPath query: %v", err)
}
j := &jmespathProc{
query: query,
log: mgr.Logger(),
}
return j, nil
}
func safeSearch(part interface{}, j *jmespath.JMESPath) (res interface{}, err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("jmespath panic: %v", r)
}
}()
return j.Search(part)
}
// JMESPath doesn't like json.Number so we walk the tree and replace them.
func clearNumbers(v interface{}) (interface{}, bool) {
switch t := v.(type) {
case map[string]interface{}:
for k, v := range t {
if nv, ok := clearNumbers(v); ok {
t[k] = nv
}
}
case []interface{}:
for i, v := range t {
if nv, ok := clearNumbers(v); ok {
t[i] = nv
}
}
case json.Number:
f, err := t.Float64()
if err != nil {
if i, err := t.Int64(); err == nil {
return i, true
}
}
return f, true
}
return nil, false
}
func (p *jmespathProc) Process(ctx context.Context, msg *message.Part) ([]*message.Part, error) {
newMsg := msg.Copy()
jsonPart, err := newMsg.JSON()
if err != nil {
p.log.Debugf("Failed to parse part into json: %v\n", err)
return nil, err
}
if v, replace := clearNumbers(jsonPart); replace {
jsonPart = v
}
var result interface{}
if result, err = safeSearch(jsonPart, p.query); err != nil {
p.log.Debugf("Failed to search json: %v\n", err)
return nil, err
}
newMsg.SetJSON(result)
return []*message.Part{newMsg}, nil
}
func (p *jmespathProc) Close(context.Context) error {
return nil
} | internal/impl/pure/processor_jmespath.go | 0.600657 | 0.615319 | processor_jmespath.go | starcoder |
package nbtconv
import (
"github.com/df-mc/dragonfly/server/block/cube"
"github.com/df-mc/dragonfly/server/item"
"github.com/df-mc/dragonfly/server/world"
"github.com/go-gl/mathgl/mgl64"
)
// MapSlice reads an interface slice from a map at the key passed.
func MapSlice(m map[string]any, key string) []any {
b, _ := m[key].([]any)
return b
}
// MapString reads a string from a map at the key passed.
func MapString(m map[string]any, key string) string {
b, _ := m[key].(string)
return b
}
// MapInt16 reads an int16 from a map at the key passed.
func MapInt16(m map[string]any, key string) int16 {
b, _ := m[key].(int16)
return b
}
// MapInt32 reads an int32 from a map at the key passed.
func MapInt32(m map[string]any, key string) int32 {
b, _ := m[key].(int32)
return b
}
// MapInt64 reads an int64 from a map at the key passed.
func MapInt64(m map[string]any, key string) int64 {
b, _ := m[key].(int64)
return b
}
// MapByte reads a byte from a map at the key passed.
//noinspection GoCommentLeadingSpace
func MapByte(m map[string]any, key string) byte {
b, _ := m[key].(byte)
return b
}
// MapFloat32 reads a float32 from a map at the key passed.
//noinspection GoCommentLeadingSpace
func MapFloat32(m map[string]any, key string) float32 {
b, _ := m[key].(float32)
return b
}
// MapVec3 converts x, y and z values in an NBT map to an mgl64.Vec3.
func MapVec3(x map[string]any, k string) mgl64.Vec3 {
if i, ok := x[k].([]any); ok {
if len(i) != 3 {
return mgl64.Vec3{}
}
var v mgl64.Vec3
for index, f := range i {
f32, _ := f.(float32)
v[index] = float64(f32)
}
return v
} else if i, ok := x[k].([]float32); ok {
if len(i) != 3 {
return mgl64.Vec3{}
}
return mgl64.Vec3{float64(i[0]), float64(i[1]), float64(i[2])}
}
return mgl64.Vec3{}
}
// Vec3ToFloat32Slice converts an mgl64.Vec3 to a []float32 with 3 elements.
func Vec3ToFloat32Slice(x mgl64.Vec3) []float32 {
return []float32{float32(x[0]), float32(x[1]), float32(x[2])}
}
// MapPos converts x, y and z values in an NBT map to a cube.Pos.
func MapPos(x map[string]any, k string) cube.Pos {
if i, ok := x[k].([]any); ok {
if len(i) != 3 {
return cube.Pos{}
}
var v cube.Pos
for index, f := range i {
f32, _ := f.(int32)
v[index] = int(f32)
}
return v
} else if i, ok := x[k].([]int32); ok {
if len(i) != 3 {
return cube.Pos{}
}
return cube.Pos{int(i[0]), int(i[1]), int(i[2])}
}
return cube.Pos{}
}
// PosToInt32Slice converts a cube.Pos to a []int32 with 3 elements.
func PosToInt32Slice(x cube.Pos) []int32 {
return []int32{int32(x[0]), int32(x[1]), int32(x[2])}
}
// MapBlock converts a block's name and properties in a map obtained by decoding NBT to a world.Block.
func MapBlock(x map[string]any, k string) world.Block {
if m, ok := x[k].(map[string]any); ok {
return ReadBlock(m)
}
return nil
}
// MapItem converts an item's name, count, damage (and properties when it is a block) in a map obtained by decoding NBT
// to a world.Item.
func MapItem(x map[string]any, k string) item.Stack {
if m, ok := x[k].(map[string]any); ok {
s := readItemStack(m)
readDamage(m, &s, true)
readEnchantments(m, &s)
readDisplay(m, &s)
readDragonflyData(m, &s)
return s
}
return item.Stack{}
} | server/internal/nbtconv/mapread.go | 0.721351 | 0.440108 | mapread.go | starcoder |
package metrics
import (
"sync"
"time"
"github.com/palantir/go-metrics"
)
// getOrRegisterMicroSecondsTimer returns an existing Timer or constructs and registers a new microSecondsTimer.
// Be sure to unregister the meter from the registry once it is of no use to allow for garbage collection.
// Based on metrics.GetOrRegisterTimer.
func getOrRegisterMicroSecondsTimer(name string, r metrics.Registry) metrics.Timer {
if nil == r {
r = metrics.DefaultRegistry
}
return r.GetOrRegister(name, newMicroSecondsTimer).(metrics.Timer)
}
// newMicroSecondsTimer creates a new microSecondsTimer. It is based on metrics.NewTimer.
func newMicroSecondsTimer() metrics.Timer {
if metrics.UseNilMetrics {
return metrics.NilTimer{}
}
return µSecondsTimer{
histogram: metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015)),
meter: metrics.NewMeter(),
}
}
// microSecondsTimer is a timer that records its metrics in microseconds (as opposed to the regular metrics.Timer,
// which records its units in nanoseconds). It is based on metrics.StandardTimer.
type microSecondsTimer struct {
histogram metrics.Histogram
meter metrics.Meter
mutex sync.Mutex
}
// Count returns the number of events recorded.
func (t *microSecondsTimer) Count() int64 {
return t.histogram.Count()
}
// Max returns the maximum value in the sample.
func (t *microSecondsTimer) Max() int64 {
return t.histogram.Max()
}
// Mean returns the mean of the values in the sample.
func (t *microSecondsTimer) Mean() float64 {
return t.histogram.Mean()
}
// Min returns the minimum value in the sample.
func (t *microSecondsTimer) Min() int64 {
return t.histogram.Min()
}
// Percentile returns an arbitrary percentile of the values in the sample.
func (t *microSecondsTimer) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of the values in the
// sample.
func (t *microSecondsTimer) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second.
func (t *microSecondsTimer) Rate1() float64 {
return t.meter.Rate1()
}
// Rate5 returns the five-minute moving average rate of events per second.
func (t *microSecondsTimer) Rate5() float64 {
return t.meter.Rate5()
}
// Rate15 returns the fifteen-minute moving average rate of events per second.
func (t *microSecondsTimer) Rate15() float64 {
return t.meter.Rate15()
}
// RateMean returns the meter's mean rate of events per second.
func (t *microSecondsTimer) RateMean() float64 {
return t.meter.RateMean()
}
// Snapshot returns a read-only copy of the timer.
func (t *microSecondsTimer) Snapshot() metrics.Timer {
t.mutex.Lock()
defer t.mutex.Unlock()
return &timerSnapshot{
histogram: t.histogram.Snapshot().(*metrics.HistogramSnapshot),
meter: t.meter.Snapshot().(*metrics.MeterSnapshot),
}
}
// StdDev returns the standard deviation of the values in the sample.
func (t *microSecondsTimer) StdDev() float64 {
return t.histogram.StdDev()
}
// Stop stops the meter.
func (t *microSecondsTimer) Stop() {
t.meter.Stop()
}
// Sum returns the sum in the sample.
func (t *microSecondsTimer) Sum() int64 {
return t.histogram.Sum()
}
// Record the duration of the execution of the given function.
func (t *microSecondsTimer) Time(f func()) {
ts := time.Now()
f()
t.Update(time.Since(ts))
}
// Record the duration of an event.
func (t *microSecondsTimer) Update(d time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.histogram.Update(int64(d / time.Microsecond))
t.meter.Mark(1)
}
// Record the duration of an event that started at a time and ends now.
func (t *microSecondsTimer) UpdateSince(ts time.Time) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.histogram.Update(int64(time.Since(ts) / time.Microsecond))
t.meter.Mark(1)
}
// Variance returns the variance of the values in the sample.
func (t *microSecondsTimer) Variance() float64 {
return t.histogram.Variance()
}
// timerSnapshot is a read-only copy of another Timer. Based on metrics.TimerSnapshot.
type timerSnapshot struct {
histogram *metrics.HistogramSnapshot
meter *metrics.MeterSnapshot
}
// Count returns the number of events recorded at the time the snapshot was
// taken.
func (t *timerSnapshot) Count() int64 { return t.histogram.Count() }
// Max returns the maximum value at the time the snapshot was taken.
func (t *timerSnapshot) Max() int64 { return t.histogram.Max() }
// Mean returns the mean value at the time the snapshot was taken.
func (t *timerSnapshot) Mean() float64 { return t.histogram.Mean() }
// Min returns the minimum value at the time the snapshot was taken.
func (t *timerSnapshot) Min() int64 { return t.histogram.Min() }
// Percentile returns an arbitrary percentile of sampled values at the time the
// snapshot was taken.
func (t *timerSnapshot) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of sampled values at
// the time the snapshot was taken.
func (t *timerSnapshot) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
func (t *timerSnapshot) Rate1() float64 { return t.meter.Rate1() }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
func (t *timerSnapshot) Rate5() float64 { return t.meter.Rate5() }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
func (t *timerSnapshot) Rate15() float64 { return t.meter.Rate15() }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
func (t *timerSnapshot) RateMean() float64 { return t.meter.RateMean() }
// Snapshot returns the snapshot.
func (t *timerSnapshot) Snapshot() metrics.Timer { return t }
// StdDev returns the standard deviation of the values at the time the snapshot
// was taken.
func (t *timerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
// Stop is a no-op.
func (t *timerSnapshot) Stop() {}
// Sum returns the sum at the time the snapshot was taken.
func (t *timerSnapshot) Sum() int64 { return t.histogram.Sum() }
// Time panics.
func (*timerSnapshot) Time(func()) {
panic("Time called on a timerSnapshot")
}
// Update panics.
func (*timerSnapshot) Update(time.Duration) {
panic("Update called on a timerSnapshot")
}
// UpdateSince panics.
func (*timerSnapshot) UpdateSince(time.Time) {
panic("UpdateSince called on a timerSnapshot")
}
// Variance returns the variance of the values at the time the snapshot was
// taken.
func (t *timerSnapshot) Variance() float64 { return t.histogram.Variance() } | vendor/github.com/palantir/pkg/metrics/microsecondstimer.go | 0.902475 | 0.410815 | microsecondstimer.go | starcoder |
package spriteutils
import (
"github.com/hajimehoshi/ebiten"
"image"
"math"
)
// SimpleSprite is a sprite interface with methods to draw and update
type SimpleSprite interface {
// Update the sprite
Update()
// Draw the sprite to screen
Draw(screen *ebiten.Image) error
}
// Sprite represents an image with position, rotation, and velocity
type Sprite struct {
// Image is the ebiten image to draw for this sprite
Image *ebiten.Image
// X is the sprites x-axis position in 2D space
X int
// Y is the sprites y-axis position in 2D space
Y int
// XVelocity is the sprite's velocity in the x-axis
XVelocity float64
// YVelocity is the sprite's velocity in the y-axis
YVelocity float64
// Rotation is the sprite's rotation in radians
Rotation float64
}
// Update the sprite by applying its velocity to position
func (sprite *Sprite) Update() {
sprite.Y += int(sprite.YVelocity)
sprite.X += int(sprite.XVelocity)
}
// Draw the sprite to screen after applying rotation and translation transformations
func (sprite *Sprite) Draw(screen *ebiten.Image) error {
op := &ebiten.DrawImageOptions{}
// make sure rotation occurs around mid-point
width, height := sprite.Image.Size()
op.GeoM.Translate(-float64(width)/2.0, -float64(height)/2.0)
op.GeoM.Rotate(sprite.Rotation)
op.GeoM.Translate(float64(width)/2.0, float64(height)/2.0)
op.GeoM.Translate(float64(sprite.X), float64(sprite.Y))
return screen.DrawImage(sprite.Image, op)
}
// IsColliding determines whether there is a collision between this sprite and another.
// A collision is determined to have occurred if the non-transparent sprite images are touching
func (sprite *Sprite) IsColliding(otherSprite *Sprite) bool {
// The "hitbox" is considered to be the real bounds of the image
spriteHitbox := image.Rectangle{
Min: image.Point{X: sprite.X, Y: sprite.Y},
Max: image.Point{X: sprite.X + sprite.Image.Bounds().Dx(), Y: sprite.Y + sprite.Image.Bounds().Dy()},
}
otherSpriteHitbox := image.Rectangle{
Min: image.Point{X: otherSprite.X, Y: otherSprite.Y},
Max: image.Point{X: otherSprite.X + otherSprite.Image.Bounds().Dx(), Y: otherSprite.Y + otherSprite.Image.Bounds().Dy()},
}
// If the hitboxes don't overlap then there can be no collision
if !spriteHitbox.Overlaps(otherSpriteHitbox) {
return false
}
// Get the rectangle representing the overlap of the two sprites hitboxes
intersection := spriteHitbox.Intersect(otherSpriteHitbox.Bounds())
// Go through each pixel in the intersection rectangle. If the corresponding pixel in both images is non-transparent
// then we consider this to be a collision.
for i := intersection.Min.X; i < intersection.Max.X; i++ {
for y := intersection.Min.Y; y < intersection.Max.Y; y++ {
var _, _, _, spritePixelAlpha = sprite.Image.At(rotatePoint(i-sprite.X, y-sprite.Y, sprite.Rotation, sprite.Image.Bounds().Dx()/2, sprite.Image.Bounds().Dy()/2)).RGBA()
var _, _, _, otherSpritePixelAlpha = otherSprite.Image.At(rotatePoint(i-otherSprite.X, y-otherSprite.Y, otherSprite.Rotation, otherSprite.Image.Bounds().Dx()/2, otherSprite.Image.Bounds().Dy()/2)).RGBA()
if spritePixelAlpha != 0 && otherSpritePixelAlpha != 0 {
return true
}
}
}
return false
}
// ApplyImpulse applies a 2d vector force represented by (xVelocity, yVelocity) to the sprite
func (sprite *Sprite) ApplyImpulse(xVelocity, yVelocity float64) {
sprite.XVelocity += xVelocity
sprite.YVelocity += yVelocity
}
// rotatePoint takes a point (x,y) and rotates it by theta (radians) around the origin point (originX, originY)
// to get the resulting point post-rotation
func rotatePoint(x int, y int, theta float64, originX int, originY int) (int, int) {
sinTheta := math.Sin(theta)
cosTheta := math.Cos(theta)
tx := x - originX
ty := y - originY
rx := int(float64(tx)*cosTheta-float64(ty)*sinTheta) + originX
ry := int(float64(tx)*sinTheta+float64(ty)*cosTheta) + originY
return rx, ry
} | sprite.go | 0.823009 | 0.601974 | sprite.go | starcoder |
//go:build gofuzz
// +build gofuzz
package uint256
import (
"fmt"
"math/big"
"reflect"
"runtime"
"strings"
)
const (
opUdivrem = iota
opMul
opLsh
opAdd
opSub
opMulmod
)
type opDualArgFunc func(*Int, *Int, *Int) *Int
type bigDualArgFunc func(*big.Int, *big.Int, *big.Int) *big.Int
type opThreeArgFunc func(*Int, *Int, *Int, *Int) *Int
type bigThreeArgFunc func(*big.Int, *big.Int, *big.Int, *big.Int) *big.Int
func crash(op interface{}, msg string, args ...Int) {
fn := runtime.FuncForPC(reflect.ValueOf(op).Pointer())
fnName := fn.Name()
fnFile, fnLine := fn.FileLine(fn.Entry())
var strArgs []string
for i, arg := range args {
strArgs = append(strArgs, fmt.Sprintf("%d: %x", i, &arg))
}
panic(fmt.Sprintf("%s\nfor %s (%s:%d)\n%v",
msg, fnName, fnFile, fnLine, strings.Join(strArgs, "\n")))
}
func checkDualArgOp(op opDualArgFunc, bigOp bigDualArgFunc, x, y Int) {
origX := x
origY := y
var result Int
ret := op(&result, &x, &y)
if ret != &result {
crash(op, "returned not the pointer receiver", x, y)
}
if x != origX {
crash(op, "first argument modified", x, y)
}
if y != origY {
crash(op, "second argument modified", x, y)
}
expected, _ := FromBig(bigOp(new(big.Int), x.ToBig(), y.ToBig()))
if result != *expected {
crash(op, "unexpected result", x, y)
}
// Test again when the receiver is not zero.
var garbage Int
garbage.Xor(&x, &y)
ret = op(&garbage, &x, &y)
if ret != &garbage {
crash(op, "returned not the pointer receiver", x, y)
}
if garbage != *expected {
crash(op, "unexpected result", x, y)
}
if x != origX {
crash(op, "first argument modified", x, y)
}
if y != origY {
crash(op, "second argument modified", x, y)
}
// Test again with the receiver aliasing arguments.
ret = op(&x, &x, &y)
if ret != &x {
crash(op, "returned not the pointer receiver", x, y)
}
if x != *expected {
crash(op, "unexpected result", x, y)
}
ret = op(&y, &origX, &y)
if ret != &y {
crash(op, "returned not the pointer receiver", x, y)
}
if y != *expected {
crash(op, "unexpected result", x, y)
}
}
func checkThreeArgOp(op opThreeArgFunc, bigOp bigThreeArgFunc, x, y, z Int) {
origX := x
origY := y
origZ := z
var result Int
ret := op(&result, &x, &y, &z)
if ret != &result {
crash(op, "returned not the pointer receiver", x, y, z)
}
switch {
case x != origX:
crash(op, "first argument modified", x, y, z)
case y != origY:
crash(op, "second argument modified", x, y, z)
case z != origZ:
crash(op, "third argument modified", x, y, z)
}
expected, _ := FromBig(bigOp(new(big.Int), x.ToBig(), y.ToBig(), z.ToBig()))
if have, want := result, *expected; have != want {
crash(op, fmt.Sprintf("unexpected result: have %v want %v", have, want), x, y, z)
}
// Test again when the receiver is not zero.
var garbage Int
garbage.Xor(&x, &y)
ret = op(&garbage, &x, &y, &z)
if ret != &garbage {
crash(op, "returned not the pointer receiver", x, y, z)
}
if have, want := garbage, *expected; have != want {
crash(op, fmt.Sprintf("unexpected result: have %v want %v", have, want), x, y, z)
}
switch {
case x != origX:
crash(op, "first argument modified", x, y, z)
case y != origY:
crash(op, "second argument modified", x, y, z)
case z != origZ:
crash(op, "third argument modified", x, y, z)
}
// Test again with the receiver aliasing arguments.
ret = op(&x, &x, &y, &z)
if ret != &x {
crash(op, "returned not the pointer receiver", x, y, z)
}
if have, want := x, *expected; have != want {
crash(op, fmt.Sprintf("unexpected result: have %v want %v", have, want), x, y, z)
}
ret = op(&y, &origX, &y, &z)
if ret != &y {
crash(op, "returned not the pointer receiver", x, y, z)
}
if y != *expected {
crash(op, "unexpected result", x, y, z)
}
ret = op(&z, &origX, &origY, &z)
if ret != &z {
crash(op, "returned not the pointer receiver", x, y, z)
}
if z != *expected {
crash(op, fmt.Sprintf("unexpected result: have %v want %v", z.ToBig(), expected), x, y, z)
}
}
func Fuzz(data []byte) int {
switch len(data) {
case 64:
return fuzzBinaryOp(data)
case 96:
return fuzzTernaryOp(data)
}
return -1
}
func fuzzBinaryOp(data []byte) int {
var x, y Int
x.SetBytes(data[0:32])
y.SetBytes(data[32:])
if !y.IsZero() { // uDivrem
checkDualArgOp((*Int).Div, (*big.Int).Div, x, y)
checkDualArgOp((*Int).Mod, (*big.Int).Mod, x, y)
}
{ // opMul
checkDualArgOp((*Int).Mul, (*big.Int).Mul, x, y)
}
{ // opLsh
lsh := func(z, x, y *Int) *Int {
return z.Lsh(x, uint(y[0]))
}
bigLsh := func(z, x, y *big.Int) *big.Int {
n := uint(y.Uint64())
if n > 256 {
n = 256
}
return z.Lsh(x, n)
}
checkDualArgOp(lsh, bigLsh, x, y)
}
{ // opAdd
checkDualArgOp((*Int).Add, (*big.Int).Add, x, y)
}
{ // opSub
checkDualArgOp((*Int).Sub, (*big.Int).Sub, x, y)
}
return 1
}
func bigMulMod(b1, b2, b3, b4 *big.Int) *big.Int {
return b1.Mod(big.NewInt(0).Mul(b2, b3), b4)
}
func intMulMod(f1, f2, f3, f4 *Int) *Int {
return f1.MulMod(f2, f3, f4)
}
func bigAddMod(b1, b2, b3, b4 *big.Int) *big.Int {
return b1.Mod(big.NewInt(0).Add(b2, b3), b4)
}
func intAddMod(f1, f2, f3, f4 *Int) *Int {
return f1.AddMod(f2, f3, f4)
}
func fuzzTernaryOp(data []byte) int {
var x, y, z Int
x.SetBytes(data[:32])
y.SetBytes(data[32:64])
z.SetBytes(data[64:])
if z.IsZero() {
return 0
}
{ // mulMod
checkThreeArgOp(intMulMod, bigMulMod, x, y, z)
}
{ // addMod
checkThreeArgOp(intAddMod, bigAddMod, x, y, z)
}
return 1
} | vendor/github.com/holiman/uint256/fuzz.go | 0.520009 | 0.434341 | fuzz.go | starcoder |
package rtfdoc
import (
"fmt"
"strings"
)
// SetMarginLeft function sets Table left margin
func (t *Table) SetMarginLeft(value int) *Table {
t.marginLeft = value
return t
}
// SetMarginRight function sets Table right margin
func (t *Table) SetMarginRight(value int) *Table {
t.marginRight = value
return t
}
// SetMarginTop function sets Table top margin
func (t *Table) SetMarginTop(value int) *Table {
t.marginTop = value
return t
}
// SetMarginBottom function sets Table bottom margin
func (t *Table) SetMarginBottom(value int) *Table {
t.marginBottom = value
//tp.margins += fmt.Sprintf(" \\trpaddb%d", value)
return t
}
// SetPaddingLeft function sets Table left margin
func (t *Table) SetPaddingLeft(value int) *Table {
t.paddingLeft = value
return t
}
// SetPaddingRight function sets Table right padding
func (t *Table) SetPaddingRight(value int) *Table {
t.paddingRight = value
return t
}
// SetPaddingTop function sets Table top padding
func (t *Table) SetPaddingTop(value int) *Table {
t.paddingTop = value
return t
}
// SetPaddingBottom function sets Table bottom padding
func (t *Table) SetPaddingBottom(value int) *Table {
t.paddingBottom = value
//tp.paddings += fmt.Sprintf(" \\trpaddb%d", value)
return t
}
// SetPadding function sets all Table paddings
func (t *Table) SetPadding(value int) *Table {
return t.SetPaddingBottom(value).SetPaddingLeft(value).SetPaddingRight(value).SetPaddingTop(value)
}
// SetAlign sets Table aligning (c/center, l/left, r/right)
func (t *Table) SetAlign(align string) *Table {
for _, i := range []string{AlignCenter, AlignLeft, AlignRight} {
if i == align {
t.align = i
}
}
return t
}
// AddTable returns Table instance
func (doc *Document) AddTable() *Table {
t := Table{
align: AlignCenter,
docWidth: doc.maxWidth,
}
t.SetMarginLeft(100).SetMarginRight(100).SetMarginTop(100).SetMarginBottom(100)
t.colorTable = doc.colorTable
t.fontColor = doc.fontColor
t.SetBorderLeft(true).
SetBorderRight(true).
SetBorderTop(true).
SetBorderBottom(true).
SetBorderStyle(BorderSingleThickness).
SetBorderColor(ColorBlack).
SetBorderWidth(15)
t.updateMaxWidth()
doc.content = append(doc.content, &t)
return &t
}
func (t *Table) updateMaxWidth() *Table {
t.maxWidth = t.docWidth - t.marginLeft - t.marginRight
return t
}
func (t Table) compose() string {
var res strings.Builder
var align = ""
if t.align != "" {
align = fmt.Sprintf("\\trq%s", t.align)
}
for _, tr := range t.data {
res.WriteString(fmt.Sprintf("\n{\\trowd %s", align))
res.WriteString(fmt.Sprintf("\n\\trpaddl%d \\trpaddr%d \\trpaddt%d \\trpaddb%d\n", t.paddingLeft, t.paddingRight, t.paddingTop, t.paddingBottom))
//res += t.getMargins()
res.WriteString(tr.encode())
res.WriteString("\\row}")
}
return res.String()
}
// AddTableRow returns new Table row instance
func (t *Table) AddTableRow() *TableRow {
tr := TableRow{
generalSettings: generalSettings{
fontColor: t.fontColor,
colorTable: t.colorTable,
},
tableWidth: t.maxWidth,
}
tr.SetBorderLeft(t.borderLeft).
SetBorderRight(t.borderRight).
SetBorderTop(t.borderTop).
SetBorderBottom(t.borderBottom).
SetBorderStyle(t.borderStyle).
SetBorderColor(t.borderColor).
SetBorderWidth(t.borderWidth)
t.updateMaxWidth()
t.data = append(t.data, &tr)
return &tr
}
func (tr *TableRow) updateMaxWidth() *TableRow {
tr.maxWidth = tr.tableWidth
return tr
}
// SetBorderLeft function sets Table left border presence
func (t *Table) SetBorderLeft(isBorder bool) *Table {
t.borderLeft = isBorder
return t
}
// SetBorderRight function sets Table right border presence
func (t *Table) SetBorderRight(isBorder bool) *Table {
t.borderRight = isBorder
return t
}
// SetBorderTop function sets Table top border presence
func (t *Table) SetBorderTop(isBorder bool) *Table {
t.borderTop = isBorder
return t
}
// SetBorderBottom function sets Table bottom border presence
func (t *Table) SetBorderBottom(isBorder bool) *Table {
t.borderBottom = isBorder
return t
}
// SetBorder function sets Table bottom border presence
func (t *Table) SetBorder(isBorder bool) *Table {
t.borderBottom = isBorder
return t.SetBorderBottom(isBorder).SetBorderTop(isBorder).SetBorderLeft(isBorder).SetBorderRight(isBorder)
}
// SetBorderStyle function sets Table left border style
func (t *Table) SetBorderStyle(bStyle string) *Table {
for _, i := range []string{
BorderDashSmall,
BorderDashed,
BorderDotDash,
BorderDotDotDash,
BorderDotted,
BorderDouble,
BorderDoubleThickness,
BorderWavyDouble,
BorderEmboss,
BorderEngrave,
BorderHairline,
BorderInset,
BorderOutset,
BorderShadowed,
BorderSingleThickness,
BorderStripped,
BorderThickThinLarge,
BorderThickThinMedium,
BorderThickThinSmall,
BorderThinThickLarge,
BorderThinThickMedium,
BorderThinThickSmall,
BorderThinThickThinLarge,
BorderThinThickThinMedium,
BorderTriple,
BorderWavy,
} {
if bStyle == i {
t.borderStyle = i
for tr := range t.data {
t.data[tr].SetBorderStyle(i)
}
break
}
}
return t
}
// SetBorderColor function sets color of the Table's border and it's rows and cells
func (t *Table) SetBorderColor(color string) *Table {
t.borderColor = color
for tr := range t.data {
t.data[tr].SetBorderColor(color)
}
return t
}
// SetBorderWidth function sets width of the Table's border and it's rows and cells
func (t *Table) SetBorderWidth(value int) *Table {
t.borderWidth = value
for tr := range t.data {
t.data[tr].SetBorderWidth(value)
}
return t
}
// SetWidth sets width of Table
func (t *Table) SetWidth(width int) *Table {
t.width = width
return t
}
// SetBorderLeft function sets left border presence
func (tr *TableRow) SetBorderLeft(isBorder bool) *TableRow {
tr.borderLeft = isBorder
return tr
}
// SetBorderRight function sets right border presence
func (tr *TableRow) SetBorderRight(isBorder bool) *TableRow {
tr.borderRight = isBorder
return tr
}
// SetBorderTop function sets top border presence
func (tr *TableRow) SetBorderTop(isBorder bool) *TableRow {
tr.borderTop = isBorder
return tr
}
// SetBorderBottom function sets bottom border presence
func (tr *TableRow) SetBorderBottom(isBorder bool) *TableRow {
tr.borderBottom = isBorder
return tr
}
// SetBorder function sets bottom borders
func (tr *TableRow) SetBorder(isBorder bool) *TableRow {
return tr.SetBorderBottom(isBorder).SetBorderTop(isBorder).SetBorderLeft(isBorder).SetBorderRight(isBorder)
}
// SetBorderStyle function sets border style
func (tr *TableRow) SetBorderStyle(bStyle string) *TableRow {
for _, i := range []string{
BorderDashSmall,
BorderDashed,
BorderDotDash,
BorderDotDotDash,
BorderDotted,
BorderDouble,
BorderDoubleThickness,
BorderWavyDouble,
BorderEmboss,
BorderEngrave,
BorderHairline,
BorderInset,
BorderOutset,
BorderShadowed,
BorderSingleThickness,
BorderStripped,
BorderThickThinLarge,
BorderThickThinMedium,
BorderThickThinSmall,
BorderThinThickLarge,
BorderThinThickMedium,
BorderThinThickSmall,
BorderThinThickThinLarge,
BorderThinThickThinMedium,
BorderTriple,
BorderWavy,
} {
if bStyle == i {
tr.borderStyle = i
for c := range tr.cells {
tr.cells[c].SetBorderStyle(i)
}
break
}
}
return tr
}
// SetBorderColor sets border color of the row (and recursevely on its cells)
func (tr *TableRow) SetBorderColor(color string) *TableRow {
tr.borderColor = color
for c := range tr.cells {
tr.cells[c].SetBorderColor(color)
}
return tr
}
// SetBorderWidth sets border width (and recursevely on its cells)
func (tr *TableRow) SetBorderWidth(value int) *TableRow {
tr.borderWidth = value
for c := range tr.cells {
tr.cells[c].SetBorderWidth(value)
}
return tr
}
func (tr *TableRow) encode() string {
var res strings.Builder
// Border settings
bTempl := "\n\\trbrdr%s\\brdrw%d\\brdr%s"
for c := range *tr.colorTable {
if ((*tr.colorTable)[c]).name == tr.borderColor {
bTempl += fmt.Sprintf("\\brdrcf%d", c+1)
}
}
if tr.borderLeft {
res.WriteString(fmt.Sprintf(bTempl, "l", tr.borderWidth, tr.borderStyle))
}
if tr.borderRight {
res.WriteString(fmt.Sprintf(bTempl, "r", tr.borderWidth, tr.borderStyle))
}
if tr.borderTop {
res.WriteString(fmt.Sprintf(bTempl, "t", tr.borderWidth, tr.borderStyle))
}
if tr.borderBottom {
res.WriteString(fmt.Sprintf(bTempl, "b", tr.borderWidth, tr.borderStyle))
}
if len(tr.cells) != 0 {
cellLengthPosition := 0
for _, tc := range tr.cells {
cellLengthPosition += tc.getCellWidth()
res.WriteString(tc.cellComposeProperties())
res.WriteString(fmt.Sprintf("\\cellx%d", cellLengthPosition))
}
res.WriteString("\n")
for _, tc := range tr.cells {
res.WriteString(tc.cellComposeData())
}
}
return res.String()
}
// AddDataCell returns new DataCell for current Table row
func (tr *TableRow) AddDataCell(width int) *TableCell {
dc := TableCell{
cellWidth: width,
maxWidth: width,
}
dc.fontColor = tr.fontColor
dc.colorTable = tr.colorTable
dc.SetBorderLeft(tr.borderLeft).
SetBorderRight(tr.borderRight).
SetBorderTop(tr.borderTop).
SetBorderBottom(tr.borderBottom).
SetBorderStyle(tr.borderStyle).
SetBorderColor(tr.borderColor).
SetBorderWidth(tr.borderWidth)
dc.updateMaxWidth()
tr.cells = append(tr.cells, &dc)
return &dc
}
func (dc *TableCell) updateMaxWidth() *TableCell {
dc.maxWidth = dc.cellWidth - dc.marginLeft - dc.marginRight
return dc
}
// SetWidth sets width of the cell
func (dc *TableCell) SetWidth(cellWidth int) *TableCell {
dc.cellWidth = cellWidth
return dc
}
// AddParagraph creates cell's paragraph
func (dc *TableCell) AddParagraph() *Paragraph {
p := Paragraph{
isTable: true,
align: "l",
indent: "\\fl360",
generalSettings: generalSettings{
colorTable: dc.colorTable,
fontColor: dc.fontColor,
},
allowedWidth: dc.maxWidth,
}
p.updateMaxWidth()
dc.content = append(dc.content, &p)
return &p
}
func (dc TableCell) cellComposeProperties() string {
var res strings.Builder
// Тута свойства ячейки (границы, все дела...)
bTempl := "\n\\clbrdr%s\\brdrw%d\\brdr%s"
for c := range *dc.colorTable {
if ((*dc.colorTable)[c]).name == dc.borderColor {
bTempl += fmt.Sprintf("\\brdrcf%d", c+1)
}
}
if dc.borderLeft {
res.WriteString(fmt.Sprintf(bTempl, "l", dc.borderWidth, dc.borderStyle))
}
if dc.borderRight {
res.WriteString(fmt.Sprintf(bTempl, "r", dc.borderWidth, dc.borderStyle))
}
if dc.borderTop {
res.WriteString(fmt.Sprintf(bTempl, "t", dc.borderWidth, dc.borderStyle))
}
if dc.borderBottom {
res.WriteString(fmt.Sprintf(bTempl, "b", dc.borderWidth, dc.borderStyle))
}
// Margins
res.WriteString(fmt.Sprintf("\n\\clpadl%d\\clpadr%d\\clpadt%d\\clpadb%d",
dc.paddingLeft, dc.paddingRight, dc.paddingTop, dc.paddingBottom,
))
// Vertical Merged
if dc.verticalMerged != "" {
res.WriteString(fmt.Sprintf("\\clvm%s", dc.verticalMerged))
}
// Aligning insite cell
res.WriteString(fmt.Sprintf("\\clvertal%s", dc.vTextAlign))
// Background Color
if dc.backgroundColor != "" {
for c := range *dc.colorTable {
if ((*dc.colorTable)[c]).name == dc.backgroundColor {
res.WriteString(fmt.Sprintf("\\clcbpat%d", c+1))
}
}
}
return res.String()
}
func (dc TableCell) cellComposeData() string {
var res strings.Builder
if len(dc.content) == 0 {
dc.AddParagraph()
}
for _, p := range dc.content {
res.WriteString(fmt.Sprintf("%s \n", p.compose()))
}
res.WriteString("\\cell")
return res.String()
}
func (dc TableCell) getCellWidth() int {
return dc.cellWidth
}
// SetBorders sets borders to
// datacell
// SetBorderLeft function set left border to be visible
func (dc *TableCell) SetBorderLeft(value bool) *TableCell {
dc.borderLeft = value
return dc
}
// SetBorderRight function sets right border to be visible
func (dc *TableCell) SetBorderRight(value bool) *TableCell {
dc.borderRight = value
return dc
}
// SetBorderTop function sets top border to be visible
func (dc *TableCell) SetBorderTop(value bool) *TableCell {
dc.borderTop = value
return dc
}
// SetBorderBottom function sets bottom border to be visible
func (dc *TableCell) SetBorderBottom(value bool) *TableCell {
dc.borderBottom = value
return dc
}
// SetBorder function sets bottom borders
func (dc *TableCell) SetBorder(isBorder bool) *TableCell {
return dc.SetBorderBottom(isBorder).SetBorderTop(isBorder).SetBorderLeft(isBorder).SetBorderRight(isBorder)
}
// SetBorderWidth function sets cell's border width px
func (dc *TableCell) SetBorderWidth(value int) *TableCell {
dc.borderWidth = value
return dc
}
// SetBorderStyle function sets cell's border style
func (dc *TableCell) SetBorderStyle(bStyle string) *TableCell {
bStyle = BorderSingleThickness
for _, i := range []string{
BorderDashSmall,
BorderDashed,
BorderDotDash,
BorderDotDotDash,
BorderDotted,
BorderDouble,
BorderDoubleThickness,
BorderWavyDouble,
BorderEmboss,
BorderEngrave,
BorderHairline,
BorderInset,
BorderOutset,
BorderShadowed,
BorderSingleThickness,
BorderStripped,
BorderThickThinLarge,
BorderThickThinMedium,
BorderThickThinSmall,
BorderThinThickLarge,
BorderThinThickMedium,
BorderThinThickSmall,
BorderThinThickThinLarge,
BorderThinThickThinMedium,
BorderTriple,
BorderWavy,
} {
if bStyle == i {
dc.borderStyle = i
break
}
}
return dc
}
// GetTableCellWidthByRatio returns slice of cell widths
func (t *Table) GetTableCellWidthByRatio(ratio ...float64) []int {
cellRatioSum := 0.0
for _, cellRatio := range ratio {
cellRatioSum += cellRatio
}
var cellWidth = make([]int, len(ratio))
for i := range ratio {
cellWidth[i] = int(ratio[i] * (float64(t.width) / cellRatioSum))
}
return cellWidth
}
// SetVerticalMergedFirst sets this cell to be first in vertical merging.
func (dc *TableCell) SetVerticalMergedFirst() *TableCell {
dc.verticalMerged = "gf"
return dc
}
// SetVerticalMergedNext sets this cell to be not first cell in vertical merging.
func (dc *TableCell) SetVerticalMergedNext() *TableCell {
dc.verticalMerged = "rg"
return dc
}
// func (dc TableCell) getVerticalMergedProperty() string {
// return dc.verticalMerged
// }
// SetMarginLeft function sets this cell's left margin
func (dc *TableCell) SetMarginLeft(value int) *TableCell {
dc.marginLeft = value
return dc
}
// SetMarginRight function sets this cell's right margin
func (dc *TableCell) SetMarginRight(value int) *TableCell {
dc.marginRight = value
return dc
}
// SetMarginTop function sets this cell's top margin
func (dc *TableCell) SetMarginTop(value int) *TableCell {
dc.marginTop = value
return dc
}
// SetMarginBottom function sets this cell's bottom margin
func (dc *TableCell) SetMarginBottom(value int) *TableCell {
dc.marginBottom = value
return dc
}
// SetPaddingLeft function sets this cell's left padding
func (dc *TableCell) SetPaddingLeft(value int) *TableCell {
dc.paddingLeft = value
return dc
}
// SetPaddingRight function sets this cell's right padding
func (dc *TableCell) SetPaddingRight(value int) *TableCell {
dc.paddingRight = value
return dc
}
// SetPaddingTop function sets this cell's top padding
func (dc *TableCell) SetPaddingTop(value int) *TableCell {
dc.paddingTop = value
return dc
}
// SetPaddingBottom function sets this cell's bottom padding
func (dc *TableCell) SetPaddingBottom(value int) *TableCell {
dc.paddingBottom = value
return dc
}
// SetPadding - function sets all paddings to value
func (dc *TableCell) SetPadding(value int) *TableCell {
return dc.SetPaddingBottom(value).SetPaddingLeft(value).SetPaddingRight(value).SetPaddingTop(value)
}
// SetVAlign sets align
func (dc *TableCell) SetVAlign(valign string) *TableCell {
dc.vTextAlign = "t"
for _, i := range []string{VAlignBottom, VAlignMiddle, VAlignTop} {
if valign == i {
dc.vTextAlign = i
}
}
return dc
}
// SetBorderColor function sets cell's border color
func (dc *TableCell) SetBorderColor(color string) *TableCell {
dc.borderColor = color
return dc
}
// SetBackgroundColor function sets cell's background color
func (dc *TableCell) SetBackgroundColor(color string) *TableCell {
dc.backgroundColor = color
return dc
} | table.go | 0.721841 | 0.405154 | table.go | starcoder |
package main
import (
"github.com/gen2brain/raylib-go/raylib"
)
const (
maxColumns = 20
)
func main() {
rl.InitWindow(800, 450, "raylib [core] example - 3d camera first person")
camera := rl.Camera3D{}
camera.Position = rl.NewVector3(4.0, 2.0, 4.0)
camera.Target = rl.NewVector3(0.0, 1.8, 0.0)
camera.Up = rl.NewVector3(0.0, 1.0, 0.0)
camera.Fovy = 60.0
camera.Type = rl.CameraPerspective
// Generates some random columns
heights := make([]float32, maxColumns)
positions := make([]rl.Vector3, maxColumns)
colors := make([]rl.Color, maxColumns)
for i := 0; i < maxColumns; i++ {
heights[i] = float32(rl.GetRandomValue(1, 12))
positions[i] = rl.NewVector3(float32(rl.GetRandomValue(-15, 15)), heights[i]/2, float32(rl.GetRandomValue(-15, 15)))
colors[i] = rl.NewColor(uint8(rl.GetRandomValue(20, 255)), uint8(rl.GetRandomValue(10, 55)), 30, 255)
}
rl.SetCameraMode(camera, rl.CameraFirstPerson) // Set a first person camera mode
rl.SetTargetFPS(60)
for !rl.WindowShouldClose() {
rl.UpdateCamera(&camera) // Update camera
rl.BeginDrawing()
rl.ClearBackground(rl.RayWhite)
rl.BeginMode3D(camera)
rl.DrawPlane(rl.NewVector3(0.0, 0.0, 0.0), rl.NewVector2(32.0, 32.0), rl.LightGray) // Draw ground
rl.DrawCube(rl.NewVector3(-16.0, 2.5, 0.0), 1.0, 5.0, 32.0, rl.Blue) // Draw a blue wall
rl.DrawCube(rl.NewVector3(16.0, 2.5, 0.0), 1.0, 5.0, 32.0, rl.Lime) // Draw a green wall
rl.DrawCube(rl.NewVector3(0.0, 2.5, 16.0), 32.0, 5.0, 1.0, rl.Gold) // Draw a yellow wall
// Draw some cubes around
for i := 0; i < maxColumns; i++ {
rl.DrawCube(positions[i], 2.0, heights[i], 2.0, colors[i])
rl.DrawCubeWires(positions[i], 2.0, heights[i], 2.0, rl.Maroon)
}
rl.EndMode3D()
rl.DrawRectangle(10, 10, 220, 70, rl.Fade(rl.SkyBlue, 0.5))
rl.DrawRectangleLines(10, 10, 220, 70, rl.Blue)
rl.DrawText("First person camera default controls:", 20, 20, 10, rl.Black)
rl.DrawText("- Move with keys: W, A, S, D", 40, 40, 10, rl.DarkGray)
rl.DrawText("- Mouse move to look around", 40, 60, 10, rl.DarkGray)
rl.EndDrawing()
}
rl.CloseWindow()
} | examples/core/3d_camera_first_person/main.go | 0.593138 | 0.417212 | main.go | starcoder |
package columns
import (
"fmt"
)
// CategoryIDColumn is the wrapper of `tf.feature_column.categorical_column_with_identity`
type CategoryIDColumn struct {
Key string
BucketSize int
Delimiter string
Dtype string
}
// SequenceCategoryIDColumn is the wrapper of `tf.feature_column.sequence_categorical_column_with_identity`
// NOTE: only used in tf >= 2.0 versions.
type SequenceCategoryIDColumn struct {
Key string
BucketSize int
Delimiter string
Dtype string
}
// GenerateCode implements the FeatureColumn interface.
func (cc *CategoryIDColumn) GenerateCode(cs *ColumnSpec) ([]string, error) {
return []string{fmt.Sprintf("tf.feature_column.categorical_column_with_identity(key=\"%s\", num_buckets=%d)",
cc.Key, cc.BucketSize)}, nil
}
// GetKey implements the FeatureColumn interface.
func (cc *CategoryIDColumn) GetKey() string {
return cc.Key
}
// GetDelimiter implements the FeatureColumn interface.
func (cc *CategoryIDColumn) GetDelimiter() string {
return cc.Delimiter
}
// GetDtype implements the FeatureColumn interface.
func (cc *CategoryIDColumn) GetDtype() string {
return cc.Dtype
}
// GetInputShape implements the FeatureColumn interface.
func (cc *CategoryIDColumn) GetInputShape() string {
return fmt.Sprintf("[%d]", cc.BucketSize)
}
// GetColumnType implements the FeatureColumn interface.
func (cc *CategoryIDColumn) GetColumnType() int {
return ColumnTypeCategoryID
}
// GenerateCode implements the FeatureColumn interface.
func (cc *SequenceCategoryIDColumn) GenerateCode(cs *ColumnSpec) ([]string, error) {
return []string{fmt.Sprintf("tf.feature_column.sequence_categorical_column_with_identity(key=\"%s\", num_buckets=%d)",
cc.Key, cc.BucketSize)}, nil
}
// GetDelimiter implements the FeatureColumn interface.
func (cc *SequenceCategoryIDColumn) GetDelimiter() string {
return cc.Delimiter
}
// GetDtype implements the FeatureColumn interface.
func (cc *SequenceCategoryIDColumn) GetDtype() string {
return cc.Dtype
}
// GetKey implements the FeatureColumn interface.
func (cc *SequenceCategoryIDColumn) GetKey() string {
return cc.Key
}
// GetInputShape implements the FeatureColumn interface.
func (cc *SequenceCategoryIDColumn) GetInputShape() string {
return fmt.Sprintf("[%d]", cc.BucketSize)
}
// GetColumnType implements the FeatureColumn interface.
func (cc *SequenceCategoryIDColumn) GetColumnType() int {
return ColumnTypeSeqCategoryID
}
// func parseCategoryColumnKey(el *exprlist) (*columnSpec, error) {
// if (*el)[1].typ == 0 {
// // explist, maybe DENSE/SPARSE expressions
// subExprList := (*el)[1].sexp
// isSparse := subExprList[0].val == sparse
// return resolveColumnSpec(&subExprList, isSparse)
// }
// return nil, nil
// } | pkg/sql/columns/category_id_column.go | 0.698227 | 0.433802 | category_id_column.go | starcoder |
package dsp
import (
"math"
)
// Spectrum is an audio spectrum in a buffer
type Spectrum struct {
Bins []Bin // bins for processing
SampleSize int // number of samples per slice
binCount int // number of bins we look at
fftSize int // number of fft bins
OldValues [][]float64 // old values used for smoothing
SampleRate float64 // audio sample rate
winVar float64 // window variable
smoothFactor float64 // smothing factor
smoothScale float64 // smoothing pow
}
// Bin is a helper struct for spectrum
type Bin struct {
powVal float64 // powpow
eqVal float64 // equalizer value
floorFFT int // floor fft index
ceilFFT int // ceiling fft index
// widthFFT int // fft floor-ceiling index delta
}
// Frequencies are the dividing frequencies
var Frequencies = []float64{
// sub sub bass
20.0, // 0
// sub bass
60.0, // 1
// bass
250.0, // 2
// midrange
4000.0, // 3
// treble
8000.0, // 4
// brilliance
22050.0, // 5
// everything else
}
// BinCount returns the number of bins each stream has
func (sp *Spectrum) BinCount() int {
return sp.binCount
}
func (sp *Spectrum) ProcessBin(ch, idx int, src []complex128) float64 {
mag := 0.0
bin := sp.Bins[idx]
fftFloor, fftCeil := bin.floorFFT, bin.ceilFFT
if fftCeil > sp.fftSize {
fftCeil = sp.fftSize
}
src = src[fftFloor:fftCeil]
for _, cmplx := range src {
power := math.Hypot(real(cmplx), imag(cmplx))
if mag < power {
mag = power
}
}
// time smoothing
mag = math.Pow(mag, bin.powVal) * (1.0 - sp.smoothScale)
value := (sp.OldValues[ch][idx] * sp.smoothScale) + mag
sp.OldValues[ch][idx] = value
return value * bin.eqVal
}
// Recalculate rebuilds our frequency bins
func (sp *Spectrum) Recalculate(binCount int) int {
if sp.fftSize == 0 {
sp.fftSize = sp.SampleSize/2 + 1
}
switch {
case binCount >= sp.fftSize:
binCount = sp.fftSize - 1
case binCount == sp.binCount:
return binCount
}
sp.binCount = binCount
// clean the binCount
for idx := range sp.Bins[:binCount] {
sp.Bins[idx] = Bin{
powVal: 0.65,
eqVal: 1.0,
}
}
sp.distribute(binCount)
var bassCut = sp.freqToIdx(Frequencies[2], math.Floor)
var fBassCut = float64(bassCut)
// set widths
for idx, b := range sp.Bins[:binCount] {
if b.ceilFFT >= sp.fftSize {
sp.Bins[idx].ceilFFT = sp.fftSize - 1
}
// sp.Bins[idx].widthFFT = b.ceilFFT - b.floorFFT
if b.ceilFFT <= bassCut {
sp.Bins[idx].powVal *= math.Max(0.5, float64(b.ceilFFT)/fBassCut)
}
}
return binCount
}
func (sp *Spectrum) distribute(bins int) {
var lo = Frequencies[1]
var hi = math.Min(sp.SampleRate/2, Frequencies[4])
var loLog = math.Log10(lo)
var hiLog = math.Log10(hi)
var cF = (hiLog - loLog) / float64(bins)
var cCoef = 100.0 / float64(bins+1)
for idx := range sp.Bins[:bins+1] {
frequency := ((float64(idx) * cF) + loLog)
frequency = math.Pow(10.0, frequency)
fftIdx := sp.freqToIdx(frequency, math.Floor)
sp.Bins[idx].floorFFT = fftIdx
sp.Bins[idx].eqVal = math.Log2(float64(fftIdx)+14) * cCoef
// sp.Bins[idx].eqVal = 1.0
if idx > 0 {
if sp.Bins[idx-1].floorFFT >= sp.Bins[idx].floorFFT {
sp.Bins[idx].floorFFT = sp.Bins[idx-1].floorFFT + 1
}
sp.Bins[idx-1].ceilFFT = sp.Bins[idx].floorFFT
}
}
}
type mathFunc func(float64) float64
func (sp *Spectrum) freqToIdx(freq float64, round mathFunc) int {
var b = int(round(freq / (sp.SampleRate / float64(sp.SampleSize))))
if b < sp.fftSize {
return b
}
return sp.fftSize - 1
}
// SetWinVar sets the winVar used for distribution spread
func (sp *Spectrum) SetWinVar(g float64) {
if g <= 0.0 {
sp.winVar = 1.0
return
}
sp.winVar = g
}
// SetSmoothing sets the smoothing parameters
func (sp *Spectrum) SetSmoothing(factor float64) {
if factor <= 0.0 {
factor = math.SmallestNonzeroFloat64
}
sp.smoothFactor = factor
var sf = math.Pow(10.0, (1.0-factor)*(-25.0))
sp.smoothScale = math.Pow(sf, float64(sp.SampleSize)/sp.SampleRate)
} | dsp/spectrum.go | 0.72662 | 0.431285 | spectrum.go | starcoder |
package colorthief
import (
"github.com/wattb/imt"
)
const sigbits = 5
const rshift = 8 - sigbits
const max_iteration = 1000
const fract_by_populations = 0.75
func getColorIndex(r, g, b int) int {
return (r << 2 * sigbits) + (g << sigbits) + b
}
// getHistogram outputs a map with the number of pixels in each quantized region of colour space.
func getHistogram(pixels []imt.Color) map[int]int {
histo := make(map[int]int)
for _, p := range pixels {
r, g, b, _ := p.RGBA()
rval := int(r >> rshift)
gval := int(g >> rshift)
bval := int(b >> rshift)
v := getColorIndex(rval, gval, bval)
if _, ok := histo[v]; ok {
histo[v] += 1
} else {
histo[v] = 0
}
}
return histo
}
func min(nums ...int) int {
min := 0
for _, num := range nums {
if num <= min {
min = num
}
}
return min
}
func max(nums ...int) int {
max := 0
for _, num := range nums {
if num >= max {
max = num
}
}
return max
}
type vbox struct {
r1 int
r2 int
g1 int
g2 int
b1 int
b2 int
histo map[int]int
}
func (v *vbox) volume() int {
sub_r := v.r2 - v.r1 + 1
sub_g := v.g2 - v.g1 + 1
sub_b := v.b2 - v.b1 + 1
return sub_r * sub_g * sub_b
}
func (v *vbox) avg() (r_avg, g_avg, b_avg int) {
var ntot, r_sum, g_sum, b_sum float32
mult := float32(1 << (8 - sigbits))
for i := v.r1; i < v.r2+1; i++ {
for j := v.g1; j < v.g2+1; j++ {
for k := v.b1; k < v.b2+1; k++ {
index := getColorIndex(i, j, k)
hval := float32(v.histo[index])
ntot += hval
r_sum += hval * (float32(i) + 0.5) * mult
g_sum += hval * (float32(j) + 0.5) * mult
b_sum += hval * (float32(k) + 0.5) * mult
}
}
}
if ntot > 0 {
r_avg = int(r_sum / ntot)
g_avg = int(g_sum / ntot)
b_avg = int(b_sum / ntot)
} else {
r_avg = int(int(mult) * (v.r1 + v.r2 + 1) / 2)
g_avg = int(int(mult) * (v.g1 + v.g2 + 1) / 2)
b_avg = int(int(mult) * (v.b1 + v.b2 + 1) / 2)
}
return r_avg, g_avg, b_avg
}
func (v *vbox) contains(pixel imt.Color) bool {
r, g, b, _ := pixel.RGBAint()
return r >= v.r1 && r <= v.r2 && g >= v.g1 && r <= v.g2 && b >= v.b1 && b <= v.b2
}
func (v *vbox) count() int {
npix := 0
for i := v.r1; i < v.r2+1; i++ {
for j := v.g1; j < v.g2+1; j++ {
for k := v.b1; k < v.b2+1; k++ {
index := getColorIndex(i, j, k)
npix += v.histo[index]
}
}
}
return npix
}
func vboxFromPixels(pixels []imt.Color, histo map[int]int) vbox {
rmin, gmin, bmin := 1000000, 1000000, 1000000
rmax, gmax, bmax := 0, 0, 0
for _, p := range pixels {
r, g, b, _ := p.RGBA()
rval := int(r >> rshift)
gval := int(g >> rshift)
bval := int(b >> rshift)
rmin = min(rval, rmin)
rmax = max(rval, rmax)
gmin = min(gval, gmin)
gmax = max(gval, gmax)
bmin = min(bval, bmin)
bmax = max(bval, bmax)
}
return vbox{
r1: rmin,
r2: rmax,
g1: gmin,
g2: gmax,
b1: bmin,
b2: bmax,
histo: histo,
}
}
func medianCutApply(v vbox) (v1, v2 vbox) {
if v.count() == 0 {
return v1, v2
}
rw := v.r2 - v.r1 + 1
gw := v.g2 - v.g1 + 1
bw := v.b2 - v.b1 + 1
maxw := max(rw, gw, bw)
if v.count() == 1 {
return v, v2
}
var cutColor string
var total, sum int
partialsum := make(map[int]int)
lookaheadsum := make(map[int]int)
if maxw == rw {
cutColor = "r"
for i := v.r1; i < v.r2+1; i++ {
sum = 0
for j := v.g1; j < v.g2+1; j++ {
for k := v.b1; k < v.b2+1; k++ {
index := getColorIndex(i, j, k)
sum += v.histo[index]
}
}
total += sum
partialsum[i] = total
}
} else if maxw == gw {
cutColor = "g"
for i := v.g1; i < v.g2+1; i++ {
sum = 0
for j := v.r1; j < v.r2+1; j++ {
for k := v.b1; k < v.b2+1; k++ {
index := getColorIndex(j, i, k)
sum += v.histo[index]
}
}
total += sum
partialsum[i] = total
}
} else {
cutColor = "b"
for i := v.b1; i < v.b2+1; i++ {
sum = 0
for j := v.r1; j < v.r2+1; j++ {
for k := v.g1; k < v.g2+1; k++ {
index := getColorIndex(j, k, i)
sum += v.histo[index]
}
}
total += sum
partialsum[i] = total
}
}
for i, d := range partialsum {
lookaheadsum[i] = total - d
}
var dim1, dim2 int
switch cutColor {
case "r":
dim1, dim2 = v.r1, v.r2
case "g":
dim1, dim2 = v.g1, v.g2
case "b":
dim1, dim2 = v.b1, v.b2
default:
}
for i := dim1; i < dim2+1; i++ {
if partialsum[i] > (total / 2) {
vbox1 := v
vbox2 := v
left := i - dim1
right := dim2 - i
if left <= right {
d2 := min(dim2-1, i+(right/2))
} else {
d2 := max(dim1, i-1-(left/2))
}
// Avoid 0-count boxes
}
}
return v1, v2
} | mccq.go | 0.571408 | 0.410756 | mccq.go | starcoder |
package framework
import (
"fmt"
"io/ioutil"
"os"
"strings"
"time"
"github.com/loft-sh/devspace/e2e/new/kube"
"github.com/onsi/gomega"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/wait"
)
// ExpectEqual expects the specified two are the same, otherwise an exception raises
func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)
}
// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises
func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...)
}
// ExpectError expects an error happens, otherwise an exception raises
func ExpectError(err error, explain ...interface{}) {
gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...)
}
// ExpectMatchError expects an error happens and has a message matching the given string, otherwise an exception raises
func ExpectErrorMatch(err error, msg string, explain ...interface{}) {
gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...)
gomega.ExpectWithOffset(1, err, explain...).To(gomega.MatchError(msg), explain...)
}
// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error.
func ExpectNoError(err error, explain ...interface{}) {
ExpectNoErrorWithOffset(1, err, explain...)
}
// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
}
// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter.
func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...)
}
// ExpectHaveKey expects the actual map has the key in the keyset
func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...)
}
// ExpectEmpty expects actual is empty
func ExpectEmpty(actual interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...)
}
func ExpectRemoteFileContents(imageSelector string, namespace string, filePath string, contents string) {
kubeClient, err := kube.NewKubeHelper()
ExpectNoErrorWithOffset(1, err)
err = wait.PollImmediate(time.Second, time.Minute*2, func() (done bool, err error) {
out, err := kubeClient.ExecByImageSelector(imageSelector, namespace, []string{"cat", filePath})
if err != nil {
return false, nil
}
return out == contents, nil
})
ExpectNoErrorWithOffset(1, err)
}
func ExpectRemoteFileNotFound(imageSelector string, namespace string, filePath string) {
kubeClient, err := kube.NewKubeHelper()
ExpectNoErrorWithOffset(1, err)
fileExists := "file exists"
fileNotFound := "file not found"
err = wait.PollImmediate(time.Second, time.Minute*2, func() (done bool, err error) {
test := []string{"sh", "-c", fmt.Sprintf("test -e %s && echo %s || echo %s", filePath, fileExists, fileNotFound)}
out, err := kubeClient.ExecByImageSelector(imageSelector, namespace, test)
if err != nil {
return false, err
}
out = strings.Trim(out, "\n")
if out == fileExists {
return false, errors.New("file should not exist")
}
return out == fileNotFound, nil
})
ExpectNoErrorWithOffset(1, err)
}
func ExpectRemoteContainerFileContents(labelSelector, container string, namespace string, filePath string, contents string) {
kubeClient, err := kube.NewKubeHelper()
ExpectNoErrorWithOffset(1, err)
err = wait.PollImmediate(time.Second, time.Minute*2, func() (done bool, err error) {
out, err := kubeClient.ExecByContainer(labelSelector, container, namespace, []string{"cat", filePath})
if err != nil {
return false, nil
}
return out == contents, nil
})
ExpectNoErrorWithOffset(1, err)
}
func ExpectLocalFileContents(filePath string, contents string) {
err := wait.PollImmediate(time.Second, time.Minute*2, func() (done bool, err error) {
out, err := ioutil.ReadFile(filePath)
if err != nil {
if !os.IsNotExist(err) {
return false, err
}
return false, nil
}
return string(out) == contents, nil
})
ExpectNoErrorWithOffset(1, err)
}
func ExpectLocalFileNotFound(filePath string) {
_, err := os.Stat(filePath)
gomega.ExpectWithOffset(1, os.IsNotExist(err)).Should(gomega.BeTrue())
} | e2e/new/framework/helper.go | 0.688678 | 0.509337 | helper.go | starcoder |
package memory
import (
"io"
"sort"
"github.com/jccatrinck/cartesian/services/points"
"github.com/jccatrinck/cartesian/services/points/model"
)
type memoryPoints struct {
points []model.Point
xAxis []int
yAxis []int
}
// LoadPoints implements points.Storage interface
func (m *memoryPoints) LoadPoints(reader io.ReadSeeker) (err error) {
pw, err := points.NewWalker(reader)
if err != nil {
return
}
err = pw.Run(func(chunk []model.Point) (err error) {
m.points = append(m.points, chunk...)
return
})
if err != nil {
return
}
m.xAxis = make([]int, 0, len(m.points))
for i := range m.points {
m.xAxis = append(m.xAxis, i)
}
sort.Slice(m.xAxis, func(i, j int) bool {
a := m.xAxis[i]
b := m.xAxis[j]
return m.points[a].X < m.points[b].X
})
m.yAxis = make([]int, 0, len(m.points))
for i := range m.points {
m.yAxis = append(m.yAxis, i)
}
sort.Slice(m.yAxis, func(i, j int) bool {
a := m.yAxis[i]
b := m.yAxis[j]
return m.points[a].Y < m.points[b].Y
})
return
}
// GetPointsByDistance implements points.Storage interface
func (m *memoryPoints) GetPointsByDistance(pointA model.Point, distance int) (relativePoints []model.RelativePoint, err error) {
sortedList := model.RelativePointSortedList{}
// Get nearest point relative to X as start point
xAxisIndex, ok := divideAndConquer(m.xAxis, pointA.X, 0, func(i int) int {
return m.points[i].X
})
// Taxicab circle
xAxisDiameter := map[int]struct{}{}
if ok {
// Taxicab circle radius - down
for i := xAxisIndex - 1; i > 0; i-- {
pointIndex := m.xAxis[i]
distanceX := pointA.X - m.points[pointIndex].X
if distanceX > distance {
break
}
xAxisDiameter[pointIndex] = struct{}{}
}
// Taxicab circle radius - up
for i := xAxisIndex; i < len(m.xAxis); i++ {
pointIndex := m.xAxis[i]
distanceX := m.points[pointIndex].X - pointA.X
if distanceX > distance {
break
}
xAxisDiameter[pointIndex] = struct{}{}
}
}
// Get nearest point relative to Y as start point
yAxisIndex, ok := divideAndConquer(m.yAxis, pointA.Y, 0, func(i int) int {
return m.points[i].Y
})
if ok {
// Taxicab circle radius - left
for i := yAxisIndex - 1; i > 0; i-- {
pointIndex := m.yAxis[i]
pointB := m.points[pointIndex]
distanceY := pointB.Y - pointA.Y
if distanceY > distance {
break
}
if _, exists := xAxisDiameter[pointIndex]; exists {
pointsDistance := pointA.Distance(pointB)
if pointsDistance <= distance {
// Add point already sorted
sortedList.Add(model.RelativePoint{
Point: pointB,
Distance: pointsDistance,
})
}
}
}
// Taxicab circle radius - right
for i := yAxisIndex; i < len(m.yAxis); i++ {
pointIndex := m.yAxis[i]
pointB := m.points[pointIndex]
distanceY := pointA.Y - pointB.Y
if distanceY > distance {
break
}
if _, exists := xAxisDiameter[pointIndex]; exists {
pointsDistance := pointA.Distance(pointB)
if pointsDistance <= distance {
// Add point already sorted
sortedList.Add(model.RelativePoint{
Point: pointB,
Distance: pointsDistance,
})
}
}
}
}
relativePoints = sortedList.Get()
return
}
func divideAndConquer(items []int, item, acc int, getter func(int) int) (int, bool) {
total := len(items)
if total == 1 {
return 0, true
} else if total == 0 {
return -1, false
}
middle := int(total / 2)
if getter(items[middle]) == item {
return middle, true
}
// Get diff of each side of slice
left := item - getter(items[middle-1])
right := getter(items[middle]) - item
// Find by proximity, not exact value. The lower the closer
if left < right {
i, ok := divideAndConquer(items[:middle], item, acc, getter)
return i + acc, ok
}
i, ok := divideAndConquer(items[middle:], item, acc, getter)
return i + acc + middle, ok
} | storage/memory/points.go | 0.759939 | 0.405213 | points.go | starcoder |
package pack3d
import "github.com/fogleman/fauxgl"
type Tree []fauxgl.Box
func NewTreeForMesh(mesh *fauxgl.Mesh, depth int) Tree {
mesh = mesh.Copy()
mesh.Center()
boxes := make([]fauxgl.Box, len(mesh.Triangles))
for i, t := range mesh.Triangles {
boxes[i] = t.BoundingBox()
}
root := NewNode(boxes, depth)
tree := make(Tree, 1<<uint(depth+1)-1)
root.Flatten(tree, 0)
return tree
}
func (a Tree) Transform(m fauxgl.Matrix) Tree {
b := make(Tree, len(a))
for i, box := range a {
b[i] = box.Transform(m)
}
return b
}
func (a Tree) Intersects(b Tree, t1, t2 fauxgl.Vector) bool {
return a.intersects(b, t1, t2, 0, 0)
}
func (a Tree) intersects(b Tree, t1, t2 fauxgl.Vector, i, j int) bool {
if !boxesIntersect(a[i], b[j], t1, t2) {
return false
}
i1 := i*2 + 1
i2 := i*2 + 2
j1 := j*2 + 1
j2 := j*2 + 2
if i1 >= len(a) && j1 >= len(b) {
return true
} else if i1 >= len(a) {
return a.intersects(b, t1, t2, i, j1) || a.intersects(b, t1, t2, i, j2)
} else if j1 >= len(b) {
return a.intersects(b, t1, t2, i1, j) || a.intersects(b, t1, t2, i2, j)
} else {
return a.intersects(b, t1, t2, i1, j1) ||
a.intersects(b, t1, t2, i1, j2) ||
a.intersects(b, t1, t2, i2, j1) ||
a.intersects(b, t1, t2, i2, j2)
}
}
func boxesIntersect(b1, b2 fauxgl.Box, t1, t2 fauxgl.Vector) bool {
if b1 == fauxgl.EmptyBox || b2 == fauxgl.EmptyBox {
return false
}
return !(b1.Min.X+t1.X > b2.Max.X+t2.X ||
b1.Max.X+t1.X < b2.Min.X+t2.X ||
b1.Min.Y+t1.Y > b2.Max.Y+t2.Y ||
b1.Max.Y+t1.Y < b2.Min.Y+t2.Y ||
b1.Min.Z+t1.Z > b2.Max.Z+t2.Z ||
b1.Max.Z+t1.Z < b2.Min.Z+t2.Z)
}
type Node struct {
Box fauxgl.Box
Left *Node
Right *Node
}
func NewNode(boxes []fauxgl.Box, depth int) *Node {
box := fauxgl.BoxForBoxes(boxes).Offset(2.5)
node := &Node{box, nil, nil}
node.Split(boxes, depth)
return node
}
func (a *Node) Flatten(tree Tree, i int) {
tree[i] = a.Box
if a.Left != nil {
a.Left.Flatten(tree, i*2+1)
}
if a.Right != nil {
a.Right.Flatten(tree, i*2+2)
}
}
func (node *Node) Split(boxes []fauxgl.Box, depth int) {
if depth == 0 {
return
}
box := node.Box
best := box.Volume()
bestAxis := AxisNone
bestPoint := 0.0
bestSide := false
const N = 16
for s := 0; s < 2; s++ {
side := s == 1
for i := 1; i < N; i++ {
p := float64(i) / N
x := box.Min.X + (box.Max.X-box.Min.X)*p
y := box.Min.Y + (box.Max.Y-box.Min.Y)*p
z := box.Min.Z + (box.Max.Z-box.Min.Z)*p
sx := partitionScore(boxes, AxisX, x, side)
if sx < best {
best = sx
bestAxis = AxisX
bestPoint = x
bestSide = side
}
sy := partitionScore(boxes, AxisY, y, side)
if sy < best {
best = sy
bestAxis = AxisY
bestPoint = y
bestSide = side
}
sz := partitionScore(boxes, AxisZ, z, side)
if sz < best {
best = sz
bestAxis = AxisZ
bestPoint = z
bestSide = side
}
}
}
if bestAxis == AxisNone {
return
}
l, r := partition(boxes, bestAxis, bestPoint, bestSide)
node.Left = NewNode(l, depth-1)
node.Right = NewNode(r, depth-1)
}
func partitionBox(box fauxgl.Box, axis Axis, point float64) (left, right bool) {
switch axis {
case AxisX:
left = box.Min.X <= point
right = box.Max.X >= point
case AxisY:
left = box.Min.Y <= point
right = box.Max.Y >= point
case AxisZ:
left = box.Min.Z <= point
right = box.Max.Z >= point
}
return
}
func partitionScore(boxes []fauxgl.Box, axis Axis, point float64, side bool) float64 {
var major fauxgl.Box
for _, box := range boxes {
l, r := partitionBox(box, axis, point)
if (l && r) || (l && side) || (r && !side) {
major = major.Extend(box)
}
}
var minor fauxgl.Box
for _, box := range boxes {
if !major.ContainsBox(box) {
minor = minor.Extend(box)
}
}
return major.Volume() + minor.Volume() - major.Intersection(minor).Volume()
}
func partition(boxes []fauxgl.Box, axis Axis, point float64, side bool) (left, right []fauxgl.Box) {
var major fauxgl.Box
for _, box := range boxes {
l, r := partitionBox(box, axis, point)
if (l && r) || (l && side) || (r && !side) {
major = major.Extend(box)
}
}
for _, box := range boxes {
if major.ContainsBox(box) {
left = append(left, box)
} else {
right = append(right, box)
}
}
if !side {
left, right = right, left
}
return
} | pack3d/bvh.go | 0.599954 | 0.466967 | bvh.go | starcoder |
package mapping
import (
"bytes"
"errors"
"fmt"
"math"
enc "github.com/KoddiDev/sketches-go/ddsketch/encoding"
"github.com/KoddiDev/sketches-go/ddsketch/pb/sketchpb"
)
const (
A = 6.0 / 35.0
B = -3.0 / 5.0
C = 10.0 / 7.0
)
// A fast IndexMapping that approximates the memory-optimal LogarithmicMapping by extracting the floor value
// of the logarithm to the base 2 from the binary representations of floating-point values and cubically
// interpolating the logarithm in-between.
// More detailed documentation of this method can be found in:
// <a href="https://github.com/DataDog/sketches-java/">sketches-java</a>
type CubicallyInterpolatedMapping struct {
relativeAccuracy float64
multiplier float64
normalizedIndexOffset float64
}
func NewCubicallyInterpolatedMapping(relativeAccuracy float64) (*CubicallyInterpolatedMapping, error) {
if relativeAccuracy <= 0 || relativeAccuracy >= 1 {
return nil, errors.New("The relative accuracy must be between 0 and 1.")
}
return &CubicallyInterpolatedMapping{
relativeAccuracy: relativeAccuracy,
multiplier: 7.0 / (10 * math.Log1p(2*relativeAccuracy/(1-relativeAccuracy))),
}, nil
}
func NewCubicallyInterpolatedMappingWithGamma(gamma, indexOffset float64) (*CubicallyInterpolatedMapping, error) {
if gamma <= 1 {
return nil, errors.New("Gamma must be greater than 1.")
}
m := CubicallyInterpolatedMapping{
relativeAccuracy: 1 - 2/(1+math.Exp(7.0/10*math.Log2(gamma))),
multiplier: 1 / math.Log2(gamma),
}
m.normalizedIndexOffset = indexOffset - m.approximateLog(1)*m.multiplier
return &m, nil
}
func (m *CubicallyInterpolatedMapping) Equals(other IndexMapping) bool {
o, ok := other.(*CubicallyInterpolatedMapping)
if !ok {
return false
}
tol := 1e-12
return (withinTolerance(m.multiplier, o.multiplier, tol) && withinTolerance(m.normalizedIndexOffset, o.normalizedIndexOffset, tol))
}
func (m *CubicallyInterpolatedMapping) Index(value float64) int {
index := m.approximateLog(value)*m.multiplier + m.normalizedIndexOffset
if index >= 0 {
return int(index)
} else {
return int(index) - 1
}
}
func (m *CubicallyInterpolatedMapping) Value(index int) float64 {
return m.LowerBound(index) * (1 + m.relativeAccuracy)
}
func (m *CubicallyInterpolatedMapping) LowerBound(index int) float64 {
return m.approximateInverseLog((float64(index) - m.normalizedIndexOffset) / m.multiplier)
}
// Return an approximation of log(1) + Math.log(x) / Math.log(base(2)).
func (m *CubicallyInterpolatedMapping) approximateLog(x float64) float64 {
bits := math.Float64bits(x)
e := getExponent(bits)
s := getSignificandPlusOne(bits) - 1
return ((A*s+B)*s+C)*s + e
}
// The exact inverse of approximateLog.
func (m *CubicallyInterpolatedMapping) approximateInverseLog(x float64) float64 {
exponent := math.Floor(x)
// Derived from Cardano's formula
d0 := B*B - 3*A*C
d1 := 2*B*B*B - 9*A*B*C - 27*A*A*(x-exponent)
p := math.Cbrt((d1 - math.Sqrt(d1*d1-4*d0*d0*d0)) / 2)
significandPlusOne := -(B+p+d0/p)/(3*A) + 1
return buildFloat64(int(exponent), significandPlusOne)
}
func (m *CubicallyInterpolatedMapping) MinIndexableValue() float64 {
return math.Max(
math.Exp2((math.MinInt32-m.normalizedIndexOffset)/m.multiplier-m.approximateLog(1)+1), // so that index >= MinInt32:w
minNormalFloat64*(1+m.relativeAccuracy)/(1-m.relativeAccuracy),
)
}
func (m *CubicallyInterpolatedMapping) MaxIndexableValue() float64 {
return math.Min(
math.Exp2((math.MaxInt32-m.normalizedIndexOffset)/m.multiplier-m.approximateLog(float64(1))-1), // so that index <= MaxInt32
math.Exp(expOverflow)/(1+m.relativeAccuracy), // so that math.Exp does not overflow
)
}
func (m *CubicallyInterpolatedMapping) RelativeAccuracy() float64 {
return m.relativeAccuracy
}
func (m *CubicallyInterpolatedMapping) gamma() float64 {
return math.Exp2(1 / m.multiplier)
}
func (m *CubicallyInterpolatedMapping) ToProto() *sketchpb.IndexMapping {
return &sketchpb.IndexMapping{
Gamma: m.gamma(),
IndexOffset: m.normalizedIndexOffset + m.approximateLog(1)*m.multiplier,
Interpolation: sketchpb.IndexMapping_CUBIC,
}
}
func (m *CubicallyInterpolatedMapping) Encode(b *[]byte) {
enc.EncodeFlag(b, enc.FlagIndexMappingBaseCubic)
enc.EncodeFloat64LE(b, m.gamma())
enc.EncodeFloat64LE(b, m.normalizedIndexOffset)
}
func (m *CubicallyInterpolatedMapping) string() string {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("relativeAccuracy: %v, multiplier: %v, normalizedIndexOffset: %v\n", m.relativeAccuracy, m.multiplier, m.normalizedIndexOffset))
return buffer.String()
}
var _ IndexMapping = (*CubicallyInterpolatedMapping)(nil) | ddsketch/mapping/cubically_interpolated_mapping.go | 0.873849 | 0.441191 | cubically_interpolated_mapping.go | starcoder |
// Package co provides holiday definitions for Colombia.
package co
import (
"time"
"github.com/Tamh/cal/v2"
"github.com/Tamh/cal/v2/aa"
)
var (
// AñoNuevo represents New Year's Day on 1-Jan
AñoNuevo = aa.NewYear.Clone(&cal.Holiday{Name: "Año Nuevo", Type: cal.ObservancePublic})
// Reyes represents Epiphany on 6-Jan moved to next Monday
Reyes = &cal.Holiday{
Name: "Día de Reyes",
Type: cal.ObservancePublic,
Month: aa.Epiphany.Month,
Day: aa.Epiphany.Day,
Weekday: time.Monday,
Offset: 1,
Func: cal.CalcWeekdayFrom,
}
// DomingoDeRamos represents Palm Sunday on the Sunday before Easter
DomingoDeRamos = &cal.Holiday{
Name: "Domingo de Ramos",
Offset: -7,
Func: cal.CalcEasterOffset,
Type: cal.ObservanceOther,
}
// JuevesSanto represents Maundy Thursday on the Thursday before Easter
JuevesSanto = aa.MaundyThursday.Clone(&cal.Holiday{Name: "Jueves Santo", Type: cal.ObservancePublic})
// ViernesSanto represents Good Friday on the Friday before Easter
ViernesSanto = aa.GoodFriday.Clone(&cal.Holiday{Name: "Viernes Santo", Type: cal.ObservancePublic})
// Pascua represents Easter
Pascua = &cal.Holiday{
Name: "Pascua",
Offset: 0,
Func: cal.CalcEasterOffset,
Type: cal.ObservanceOther,
}
// DíaAscension represents Day of Ascention on the Monday 43 days after Easter
DíaAscension = &cal.Holiday{
Name: "Día de la Ascensión",
Offset: 43,
Func: cal.CalcEasterOffset,
Type: cal.ObservancePublic,
}
// CorpusChristi represents Corpus Christi on the Monday 64 days after Easter
CorpusChristi = &cal.Holiday{
Name: "Corpus Christi",
Offset: 64,
Func: cal.CalcEasterOffset,
Type: cal.ObservancePublic,
}
// SagradoCorazon represents Sacred Heart on the Monday 71 days after Easter
SagradoCorazon = &cal.Holiday{
Name: "<NAME>",
Offset: 71,
Func: cal.CalcEasterOffset,
Type: cal.ObservancePublic,
}
// DíaMujer represents Women's Day on 8-Mar
DíaMujer = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservanceOther,
Month: time.March,
Day: 8,
Func: cal.CalcDayOfMonth,
}
// DíaSanJose represents Saint Joseph's Day on 19-Mar moved to next Monday
DíaSanJose = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservancePublic,
Month: time.March,
Day: 19,
Weekday: time.Monday,
Offset: 1,
Func: cal.CalcWeekdayFrom,
}
// DíaIdioma represents Day of Language on 23-Mar
DíaIdioma = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservanceOther,
Month: time.April,
Day: 23,
Func: cal.CalcDayOfMonth,
}
// DíaNino represents Children's Day on the last Saturday of April
DíaNino = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservanceOther,
Month: time.April,
Weekday: time.Saturday,
Offset: -1,
Func: cal.CalcWeekdayOffset,
}
// Trabajo represents Labour Day on 1-May
Trabajo = aa.WorkersDay.Clone(&cal.Holiday{Name: "<NAME>abajo", Type: cal.ObservancePublic})
// DíaMadre represents Mother's Day on the second Sunday of May
DíaMadre = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservanceOther,
Month: time.May,
Weekday: time.Sunday,
Offset: 2,
Func: cal.CalcWeekdayOffset,
}
// DíaPadre represents Father's Day on the third Sunday of June
DíaPadre = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservanceOther,
Month: time.June,
Weekday: time.Sunday,
Offset: 3,
Func: cal.CalcWeekdayOffset,
Except: []int{2018},
}
// DíaPadre2018 represents Father's Day on the fourth Sunday of June 2018
DíaPadre2018 = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservanceOther,
Month: time.June,
Weekday: time.Sunday,
Offset: 4,
Func: cal.CalcWeekdayOffset,
StartYear: 2018,
EndYear: 2018,
}
// SanPedroSanPablo represents the Feast of Saint Peter and Saint Paul on 29-Jul moved to next Monday
SanPedroSanPablo = &cal.Holiday{
Name: "<NAME> <NAME>",
Type: cal.ObservancePublic,
Month: time.June,
Day: 29,
Weekday: time.Monday,
Offset: 1,
Func: cal.CalcWeekdayFrom,
}
// Independencia represents Independence Day on 20-Jul
Independencia = &cal.Holiday{
Name: "Día de la Independencia",
Type: cal.ObservancePublic,
Month: time.July,
Day: 20,
Func: cal.CalcDayOfMonth,
}
// BatallaBoyaca represents Battle of Boyaca Day on 7-Aug
BatallaBoyaca = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservancePublic,
Month: time.August,
Day: 7,
Func: cal.CalcDayOfMonth,
}
// Asunción represents Assumption of Mary on 15-Aug moved to next Monday
Asunción = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservancePublic,
Month: aa.AssumptionOfMary.Month,
Day: aa.AssumptionOfMary.Day,
Weekday: time.Monday,
Offset: 1,
Func: cal.CalcWeekdayFrom,
}
// DíaAmorAmistad represents Valentine's Day on the third Saturday of September
DíaAmorAmistad = &cal.Holiday{
Name: "Día del Amor y la Amistad",
Type: cal.ObservanceOther,
Month: time.September,
Weekday: time.Saturday,
Offset: 3,
Func: cal.CalcWeekdayOffset,
}
// DíaRaza represents Columbus' Day on 15-Aug moved to next Monday
DíaRaza = &cal.Holiday{
Name: "<NAME>",
Month: time.October,
Day: 12,
Weekday: time.Monday,
Offset: 1,
Func: cal.CalcWeekdayFrom,
}
// TodosLosSantos represents All Saints' Day on 1-Nov moved to next Monday
TodosLosSantos = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservancePublic,
Month: aa.AllSaintsDay.Month,
Day: aa.AllSaintsDay.Day,
Weekday: time.Monday,
Offset: 1,
Func: cal.CalcWeekdayFrom,
}
// IndependenciaCartagena represents Cartagena's Independence Day on 11-Nov moved to next Monday
IndependenciaCartagena = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservancePublic,
Month: time.November,
Day: 11,
Weekday: time.Monday,
Offset: 1,
Func: cal.CalcWeekdayFrom,
}
// DíaMujerColombiana represents Colombian Women's Day on 14-Nov
DíaMujerColombiana = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservanceOther,
Month: time.November,
Day: 14,
Func: cal.CalcDayOfMonth,
}
// VísperaInmaculadaConcepción represents Eve of Immaculate Conception on 7-Dec
VísperaInmaculadaConcepción = &cal.Holiday{
Name: "Víspera de la Inmaculada Concepción",
Type: cal.ObservanceOther,
Month: time.December,
Day: 7,
Func: cal.CalcDayOfMonth,
}
// InmaculadaConcepción represents Immaculate Conception on 8-Dec
InmaculadaConcepción = aa.ImmaculateConception.Clone(&cal.Holiday{Name: "Inmaculada Concepción", Type: cal.ObservancePublic})
// Nochebuena represents Christmas' Eve on 24-Dec
Nochebuena = &cal.Holiday{
Name: "Nochebuena",
Type: cal.ObservanceOther,
Month: time.December,
Day: 24,
Func: cal.CalcDayOfMonth,
}
// Navidad represents Christmas Day on 25-Dec
Navidad = aa.ChristmasDay.Clone(&cal.Holiday{Name: "Navidad", Type: cal.ObservancePublic})
// AñoViejo represents New Year's Eve on 31-Dec
AñoViejo = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservanceOther,
Month: time.December,
Day: 31,
Func: cal.CalcDayOfMonth,
}
// Holidays provides a list of the standard national holidays
Holidays = []*cal.Holiday{
AñoNuevo,
Reyes,
DomingoDeRamos,
JuevesSanto,
ViernesSanto,
Pascua,
DíaAscension,
CorpusChristi,
SagradoCorazon,
DíaMujer,
DíaSanJose,
DíaIdioma,
DíaNino,
Trabajo,
DíaMadre,
DíaPadre,
DíaPadre2018,
SanPedroSanPablo,
Independencia,
BatallaBoyaca,
Asunción,
DíaAmorAmistad,
DíaRaza,
TodosLosSantos,
IndependenciaCartagena,
DíaMujerColombiana,
VísperaInmaculadaConcepción,
InmaculadaConcepción,
Nochebuena,
Navidad,
AñoViejo,
}
) | v2/co/co_holidays.go | 0.522446 | 0.644141 | co_holidays.go | starcoder |
package lo
// Keys creates an array of the map keys.
func Keys[K comparable, V any](in map[K]V) []K {
result := make([]K, 0, len(in))
for k := range in {
result = append(result, k)
}
return result
}
// Values creates an array of the map values.
func Values[K comparable, V any](in map[K]V) []V {
result := make([]V, 0, len(in))
for _, v := range in {
result = append(result, v)
}
return result
}
// PickBy returns same map type filtered by given predicate.
func PickBy[K comparable, V any](in map[K]V, predicate func(K, V) bool) map[K]V {
r := map[K]V{}
for k, v := range in {
if predicate(k, v) {
r[k] = v
}
}
return r
}
// PickByKeys returns same map type filtered by given keys.
func PickByKeys[K comparable, V any](in map[K]V, keys []K) map[K]V {
r := map[K]V{}
for k, v := range in {
if Contains(keys, k) {
r[k] = v
}
}
return r
}
// PickByValues returns same map type filtered by given values.
func PickByValues[K comparable, V comparable](in map[K]V, values []V) map[K]V {
r := map[K]V{}
for k, v := range in {
if Contains(values, v) {
r[k] = v
}
}
return r
}
// PickBy returns same map type filtered by given predicate.
func OmitBy[K comparable, V any](in map[K]V, predicate func(K, V) bool) map[K]V {
r := map[K]V{}
for k, v := range in {
if !predicate(k, v) {
r[k] = v
}
}
return r
}
// OmitByKeys returns same map type filtered by given keys.
func OmitByKeys[K comparable, V any](in map[K]V, keys []K) map[K]V {
r := map[K]V{}
for k, v := range in {
if !Contains(keys, k) {
r[k] = v
}
}
return r
}
// OmitByValues returns same map type filtered by given values.
func OmitByValues[K comparable, V comparable](in map[K]V, values []V) map[K]V {
r := map[K]V{}
for k, v := range in {
if !Contains(values, v) {
r[k] = v
}
}
return r
}
// Entries transforms a map into array of key/value pairs.
func Entries[K comparable, V any](in map[K]V) []Entry[K, V] {
entries := make([]Entry[K, V], 0, len(in))
for k, v := range in {
entries = append(entries, Entry[K, V]{
Key: k,
Value: v,
})
}
return entries
}
// FromEntries transforms an array of key/value pairs into a map.
func FromEntries[K comparable, V any](entries []Entry[K, V]) map[K]V {
out := map[K]V{}
for _, v := range entries {
out[v.Key] = v.Value
}
return out
}
// Invert creates a map composed of the inverted keys and values. If map
// contains duplicate values, subsequent values overwrite property assignments
// of previous values.
func Invert[K comparable, V comparable](in map[K]V) map[V]K {
out := map[V]K{}
for k, v := range in {
out[v] = k
}
return out
}
// Assign merges multiple maps from left to right.
func Assign[K comparable, V any](maps ...map[K]V) map[K]V {
out := map[K]V{}
for _, m := range maps {
for k, v := range m {
out[k] = v
}
}
return out
}
// MapKeys manipulates a map keys and transforms it to a map of another type.
func MapKeys[K comparable, V any, R comparable](in map[K]V, iteratee func(V, K) R) map[R]V {
result := map[R]V{}
for k, v := range in {
result[iteratee(v, k)] = v
}
return result
}
// MapValues manipulates a map values and transforms it to a map of another type.
func MapValues[K comparable, V any, R any](in map[K]V, iteratee func(V, K) R) map[K]R {
result := map[K]R{}
for k, v := range in {
result[k] = iteratee(v, k)
}
return result
} | vendor/github.com/samber/lo/map.go | 0.892375 | 0.464659 | map.go | starcoder |
package bayesfactor
import (
"math"
. "pkg/distributions"
)
func CreateLikelihood(likelihood LikelihoodDefinition) Likelihood {
var data Likelihood
switch likelihood.Name {
case "noncentral_d":
d := likelihood.Params[0]
n := likelihood.Params[1]
fun := NoncentralDLikelihood(d, n)
data.Function = fun
data.Name = "noncentral_d"
case "noncentral_d2":
d := likelihood.Params[0]
n1 := likelihood.Params[1]
n2 := likelihood.Params[2]
fun := NoncentralD2Likelihood(d, n1, n2)
data.Function = fun
data.Name = "noncentral_d2"
case "normal":
mean := likelihood.Params[0]
sd := likelihood.Params[1]
fun := NormalLikelihood(mean, sd)
data.Function = fun
data.Name = "normal"
case "binomial":
successes := likelihood.Params[0]
trials := likelihood.Params[1]
data.Function = BinomialLikelihood(successes, trials)
data.Name = "binomial"
case "noncentral_t":
t := likelihood.Params[0]
df := likelihood.Params[1]
// fun := Noncentral_t_likelihood(t, df)
data.Function = NoncentralTLikelihood(t, df)
data.Name = "noncentral_t"
case "student_t":
mean := likelihood.Params[0]
sd := likelihood.Params[1]
df := likelihood.Params[2]
fun := StudentTLikelihood(mean, sd, df)
data.Function = fun
data.Name = "student_t"
}
return data
}
func CreatePrior(priorDefinition PriorDefinition) Prior {
var prior Prior
switch priorDefinition.Name {
case "cauchy":
location := priorDefinition.Params[0]
scale := priorDefinition.Params[1]
min := priorDefinition.Params[2]
max := priorDefinition.Params[3]
prior = CauchyPrior(location, scale, min, max)
case "normal":
mean := priorDefinition.Params[0]
sd := priorDefinition.Params[1]
min := priorDefinition.Params[2]
max := priorDefinition.Params[3]
prior = NormalPrior(mean, sd, min, max)
case "beta":
alpha := priorDefinition.Params[0]
beta := priorDefinition.Params[1]
prior = BetaPrior(alpha, beta, 0, 1)
case "uniform":
alpha := priorDefinition.Params[0]
beta := priorDefinition.Params[1]
prior = UniformPrior(alpha, beta)
case "student_t":
mean := priorDefinition.Params[0]
sd := priorDefinition.Params[1]
df := priorDefinition.Params[2]
min := priorDefinition.Params[3]
max := priorDefinition.Params[4]
prior = StudentTPrior(mean, sd, df, min, max)
case "point":
point := priorDefinition.Params[0]
prior = PointPrior(point)
}
return prior
}
func Bayesfactor(likelihood LikelihoodDefinition, altprior PriorDefinition, nullprior PriorDefinition) (float64, error) {
altModel := Pp(likelihood, altprior)
nullModel := Pp(likelihood, nullprior)
bf := altModel.Auc / nullModel.Auc
return bf, nil
}
// Types
type LikelihoodDefinition struct {
Name string
Params []float64
}
type PriorDefinition struct {
Name string
Params []float64
}
// Output types
// Predctive type
type Predictive struct {
Function func(x float64) float64
Auc float64
Likelihood func(x float64) float64
Prior func(x float64) float64
}
// Prior type
type Prior struct {
Function func(x float64) float64
Name string
point float64 // this is only used for the point prior because floating point :(
}
// Likelihood type
type Likelihood struct {
Function func(x float64) float64
Name string
}
// Helper functions
func inrange(x float64, min float64, max float64) float64 {
if x >= min && x <= max {
return 1
}
return 0
}
func mult(likelihood func(x float64) float64, prior func(x float64) float64) func(x float64) float64 {
return func(x float64) float64 {
return likelihood(x) * prior(x)
}
}
func Pp(likelihoodDef LikelihoodDefinition, priorDef PriorDefinition) Predictive {
likelihood := CreateLikelihood(likelihoodDef)
prior := CreatePrior(priorDef)
var prod func(x float64) float64
likelihoodFunction := likelihood.Function
prod = mult(likelihoodFunction, prior.Function)
var pred Predictive
pred.Likelihood = likelihood.Function
pred.Prior = prior.Function
pred.Function = prod
// handle point priors
if prior.Name == "point" {
pred.Auc = likelihoodFunction(prior.point)
return pred
}
// handle binomial likelihoods
if likelihood.Name == "binomial" {
pred.Auc = Integrate(prod, 0, 1)
return pred
}
// handle general case
pred.Auc = Integrate(prod, math.Inf(-1), math.Inf(1))
return pred
}
// normal likelihood
func NormalLikelihood(mean float64, sd float64) func(x float64) float64 {
return func(x float64) float64 {
return Dnorm(x, mean, sd)
}
}
// student-t likelihood
func StudentTLikelihood(mean float64, sd float64, df float64) func(x float64) float64 {
return func(x float64) float64 {
return Scaled_shifted_t(x, mean, sd, df)
}
}
// noncentral t likehood
func NoncentralTLikelihood(t float64, df float64) func(x float64) float64 {
return func(x float64) float64 {
return Dt(t, df, x)
}
}
// noncentral d likelihood
func NoncentralDLikelihood(d float64, n float64) func(x float64) float64 {
df := n - 1
return func(x float64) float64 {
return Dt(d*math.Sqrt(df+1), df, math.Sqrt(df+1)*x)
}
}
func NoncentralD2Likelihood(d float64, n1 float64, n2 float64) func(x float64) float64 {
return func(x float64) float64 {
return Dt(d/math.Sqrt((1/n1)+(1/n2)), n1+n2-2, x*math.Sqrt((n1*n2)/(n1+n2)))
}
}
// binomial likelihood
func BinomialLikelihood(successes float64, trials float64) func(x float64) float64 {
return func(x float64) float64 {
return Dbinom(successes, trials, x)
}
}
// normal prior
func NormalPrior(mean float64, sd float64, min float64, max float64) Prior {
// If max and max are +/-Inf then set K to 1
// otherwise, integrate and normalize
if min == math.Inf(-1) && max == math.Inf(1) {
var prior Prior
prior.point = 0
prior.Function = func(x float64) float64 {
return Dnorm(x, mean, sd)
}
prior.Name = "normal"
return prior
} else if (min == 0.0 && max == math.Inf(1)) || (min == math.Inf(-1) && max == 0.0) {
k := 2.0
var prior Prior
prior.Function = func(x float64) float64 {
return (Dnorm(x, mean, sd) * inrange(x, min, max)) * k
}
prior.Name = "normal"
return prior
} else {
normal := func(x float64) float64 {
return Dnorm(x, mean, sd) * inrange(x, min, max)
}
auc := Integrate(normal, math.Inf(-1), math.Inf(1))
k := 1 / auc
var prior Prior
prior.Function = func(x float64) float64 {
return (Dnorm(x, mean, sd) * inrange(x, min, max)) * k
}
prior.Name = "normal"
return prior
}
}
// student t prior
func StudentTPrior(mean float64, sd float64, df float64, min float64, max float64) Prior {
// If max and max are +/-Inf then set K to 1
// otherwise, integrate and normalize
if min == math.Inf(-1) && max == math.Inf(1) {
var prior Prior
prior.point = 0
prior.Function = func(x float64) float64 {
return Scaled_shifted_t(x, mean, sd, df)
}
prior.Name = "student_t"
return prior
} else if (min == 0.0 && max == math.Inf(1)) || (min == math.Inf(-1) && max == 0.0) {
k := 2.0
var prior Prior
prior.Function = func(x float64) float64 {
return (Scaled_shifted_t(x, mean, sd, df) * inrange(x, min, max)) * k
}
prior.Name = "student_t"
return prior
} else {
normal := func(x float64) float64 {
return Scaled_shifted_t(x, mean, sd, df) * inrange(x, min, max)
}
auc := Integrate(normal, math.Inf(-1), math.Inf(1))
k := 1 / auc
var prior Prior
prior.Function = func(x float64) float64 {
return (Scaled_shifted_t(x, mean, sd, df) * inrange(x, min, max)) * k
}
prior.Name = "student_t"
return prior
}
}
// cauchy prior
func CauchyPrior(location float64, scale float64, min float64, max float64) Prior {
// If max and max are +/-Inf then set K to 1
// otherwise, integrate and normalize
if min == math.Inf(-1) && max == math.Inf(1) {
var prior Prior
prior.point = 0
prior.Function = func(x float64) float64 {
return Dcauchy(x, location, scale)
}
prior.Name = "cauchy"
return prior
} else if (min == 0.0 && max == math.Inf(1)) || (min == math.Inf(-1) && max == 0.0) {
k := 2.0
var prior Prior
prior.Function = func(x float64) float64 {
return (Dcauchy(x, location, scale) * inrange(x, min, max)) * k
}
prior.Name = "cauchy"
return prior
} else {
cauchy := func(x float64) float64 {
return Dcauchy(x, location, scale) * inrange(x, min, max)
}
auc := Integrate(cauchy, math.Inf(-1), math.Inf(1))
k := 1 / auc
var prior Prior
prior.Function = func(x float64) float64 {
return (Dcauchy(x, location, scale) * inrange(x, min, max)) * k
}
prior.Name = "cauchy"
return prior
}
}
// beta prior
func BetaPrior(alpha float64, beta float64, min float64, max float64) Prior {
var prior Prior
prior.point = 0
prior.Function = func(x float64) float64 {
return Dbeta(x, alpha, beta) * inrange(x, min, max)
}
prior.Name = "beta"
return prior
}
// point prior
func PointPrior(point float64) Prior {
var prior Prior
prior.Function = func(x float64) float64 {
if x == point {
return 1.0
}
return 0
}
prior.Name = "point"
prior.point = point
return prior
}
// uniform prior
func UniformPrior(alpha float64, beta float64) Prior {
var prior Prior
prior.point = 0
prior.Function = func(x float64) float64 {
return Dunif(x, alpha, beta)
}
prior.Name = "uniform"
return prior
} | pkg/bayesfactor/bayesfactor.go | 0.784402 | 0.683169 | bayesfactor.go | starcoder |
package band
import "time"
func newUS902Band() (Band, error) {
band := Band{
DefaultTXPower: 20,
ImplementsCFlist: false,
RX2Frequency: 923300000,
RX2DataRate: 8,
MaxFCntGap: 16384,
ADRACKLimit: 64,
ADRACKDelay: 32,
ReceiveDelay1: time.Second,
ReceiveDelay2: time.Second * 2,
JoinAcceptDelay1: time.Second * 5,
JoinAcceptDelay2: time.Second * 6,
ACKTimeoutMin: time.Second,
ACKTimeoutMax: time.Second * 3,
DataRates: []DataRate{
{Modulation: LoRaModulation, SpreadFactor: 10, Bandwidth: 125},
{Modulation: LoRaModulation, SpreadFactor: 9, Bandwidth: 125},
{Modulation: LoRaModulation, SpreadFactor: 8, Bandwidth: 125},
{Modulation: LoRaModulation, SpreadFactor: 7, Bandwidth: 125},
{Modulation: LoRaModulation, SpreadFactor: 8, Bandwidth: 500},
{}, // RFU
{}, // RFU
{}, // RFU
{Modulation: LoRaModulation, SpreadFactor: 12, Bandwidth: 500},
{Modulation: LoRaModulation, SpreadFactor: 11, Bandwidth: 500},
{Modulation: LoRaModulation, SpreadFactor: 10, Bandwidth: 500},
{Modulation: LoRaModulation, SpreadFactor: 9, Bandwidth: 500},
{Modulation: LoRaModulation, SpreadFactor: 8, Bandwidth: 500},
{Modulation: LoRaModulation, SpreadFactor: 7, Bandwidth: 500},
{}, // RFU
{}, // RFU
},
MaxPayloadSize: []MaxPayloadSize{
{M: 19, N: 11},
{M: 61, N: 53},
{M: 137, N: 129},
{M: 250, N: 242},
{M: 250, N: 242},
{}, // Not defined
{}, // Not defined
{}, // Not defined
{M: 41, N: 33},
{M: 117, N: 109},
{M: 230, N: 222},
{M: 230, N: 222},
{M: 230, N: 222},
{M: 230, N: 222},
{}, // Not defined
{}, // Not defined
},
RX1DataRate: [][]int{
{10, 9, 8, 8},
{11, 10, 9, 8},
{12, 11, 10, 9},
{13, 12, 11, 10},
{13, 13, 12, 11},
{}, // Not defined
{}, // Not defined
{}, // Not defined
{8, 8, 8, 8},
{9, 8, 8, 8},
{10, 9, 8, 8},
{11, 10, 9, 8},
{12, 11, 10, 9},
{13, 12, 11, 10},
},
TXPower: []int{
30,
28,
26,
24,
22,
20,
18,
16,
14,
12,
10,
0,
0,
0,
0,
0,
},
UplinkChannels: make([]Channel, 72),
DownlinkChannels: make([]Channel, 8),
getRX1ChannelFunc: func(txChannel int) int {
return txChannel % 8
},
getRX1FrequencyFunc: func(b *Band, txFrequency int) (int, error) {
uplinkChan, err := b.GetChannel(txFrequency, nil)
if err != nil {
return 0, err
}
rx1Chan := b.GetRX1Channel(uplinkChan)
return b.DownlinkChannels[rx1Chan].Frequency, nil
},
}
// initialize uplink channel 0 - 63
for i := 0; i < 64; i++ {
band.UplinkChannels[i] = Channel{
Frequency: 902300000 + (i * 200000),
DataRates: []int{0, 1, 2, 3},
}
}
// initialize uplink channel 64 - 71
for i := 0; i < 8; i++ {
band.UplinkChannels[i+64] = Channel{
Frequency: 903000000 + (i * 1600000),
DataRates: []int{4},
}
}
// initialize downlink channel 0 - 7
for i := 0; i < 8; i++ {
band.DownlinkChannels[i] = Channel{
Frequency: 923300000 + (i * 600000),
DataRates: []int{10, 11, 12, 13},
}
}
return band, nil
} | vendor/github.com/brocaar/lorawan/band/band_us902_928.go | 0.525856 | 0.42919 | band_us902_928.go | starcoder |
package svgshapes
import (
"encoding/xml"
"strconv"
)
type Path struct {
XMLName xml.Name `xml:"path"`
Shapebase
PathCommands string `xml:"d,attr"`
}
func (p *Path) addCommand(command string, parameters ...float64) {
cmdString := command
for _, param := range parameters {
cmdString += " " + strconv.FormatFloat(param, 'f', -1, 64)
}
if len(p.PathCommands) == 0 {
p.PathCommands = cmdString
} else {
p.PathCommands = p.PathCommands + " " + cmdString
}
}
func (p *Path) MoveTo(x, y float64) *Path {
p.addCommand("M", x, y)
return p
}
func (p *Path) MoveRelative(dx, dy float64) *Path {
p.addCommand("m", dx, dy)
return p
}
func (p *Path) ClosePath() *Path {
p.addCommand("Z")
return p
}
func (p *Path) LineTo(x, y float64) *Path {
p.addCommand("L", x, y)
return p
}
func (p *Path) LineRelative(dx, dy float64) *Path {
p.addCommand("l", dx, dy)
return p
}
func (p *Path) HLineTo(x float64) *Path {
p.addCommand("H", x)
return p
}
func (p *Path) HLineRelative(dx float64) *Path {
p.addCommand("h", dx)
return p
}
func (p *Path) VLineTo(y float64) *Path {
p.addCommand("V", y)
return p
}
func (p *Path) VLineRelative(dy float64) *Path {
p.addCommand("v", dy)
return p
}
func (p *Path) CubicBezierTo(controlX1, controlY1, controlX2, controlY2, x, y float64) *Path {
p.addCommand("C", controlX1, controlY1, controlX2, controlY2, x, y)
return p
}
func (p *Path) CubicBezierRelative(controlDX1, controlDY1, controlDX2, controlDY2, dx, dy float64) *Path {
p.addCommand("c", controlDX1, controlDY1, controlDX2, controlDY2, dx, dy)
return p
}
func (p *Path) SmoothCubicBezierTo(controlX2, controlY2, x, y float64) *Path {
p.addCommand("S", controlX2, controlY2, x, y)
return p
}
func (p *Path) SmoothCubicBezierRelative(controlDX2, controlDY2, dx, dy float64) *Path {
p.addCommand("s", controlDX2, controlDY2, dx, dy)
return p
}
func (p *Path) QuadraticBezierTo(controlX, controlY, x, y float64) *Path {
p.addCommand("Q", controlX, controlY, x, y)
return p
}
func (p *Path) QuadraticBezierRelative(controlDX, controlDY, dx, dy float64) *Path {
p.addCommand("q", controlDX, controlDY, dx, dy)
return p
}
func (p *Path) SmoothQuadraticBezierTo(x, y float64) *Path {
p.addCommand("T", x, y)
return p
}
func (p *Path) SmoothQuadraticBezierRelative(dx, dy float64) *Path {
p.addCommand("t", dx, dy)
return p
}
func (p *Path) addEllipticalArcCommand(command string, radiusX, radiusY, xAxisRotation float64, largeArc, sweep bool, x, y float64) {
largeArcNum := 0.0
if largeArc {
largeArcNum = 1.0
}
sweepNum := 0.0
if sweep {
sweepNum = 1.0
}
p.addCommand(command, radiusX, radiusY, xAxisRotation, largeArcNum, sweepNum, x, y)
}
func (p *Path) EllipticalArcTo(radiusX, radiusY, xAxisRotation float64, largeArc, sweep bool, x, y float64) *Path {
p.addEllipticalArcCommand("A", radiusX, radiusY, xAxisRotation, largeArc, sweep, x, y)
return p
}
func (p *Path) EllipticalArcRelative(radiusX, radiusY, xAxisRotation float64, largeArc, sweep bool, dx, dy float64) *Path {
p.addEllipticalArcCommand("a", radiusX, radiusY, xAxisRotation, largeArc, sweep, dx, dy)
return p
} | path.go | 0.731634 | 0.416381 | path.go | starcoder |
package symdiff
import (
"errors"
"reflect"
"unsafe"
)
var (
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
ErrNilArguments = errors.New("src and dst must not be nil")
)
// During deepSymmetricDifference, we need to keep track of
// checks that are in progress. The comparison algorithm
// assumes that all checks in progress are true when it
// re-encounters them. Visited comparisons are stored in a
// map indexed by visit.
type visit struct {
a1 unsafe.Pointer
a2 unsafe.Pointer
typ reflect.Type
}
// Diff finds the symmetric difference between two interfaces.
func Diff(dst, src interface{}) error {
if dst == nil || src == nil {
return ErrNilArguments
}
vDst, vSrc, err := resolveValues(dst, src)
if err != nil {
return err
}
if vDst.Type() != vSrc.Type() {
return ErrDifferentArgumentsTypes
}
return deepSymmetricDifference(vDst, vSrc, make(map[visit]bool), 0)
}
// Merges the deep symmetric difference using reflected types into dst. The map argument tracks
// comparisons that have already been seen, which allows short circuiting on recursive types.
func deepSymmetricDifference(dst, src reflect.Value, visited map[visit]bool, depth int) error {
// We want to avoid putting more in the visited map than we need to.
// For any possible reference cycle that might be encountered,
// hard(t) needs to return true for at least one of the types in the cycle.
hard := func(k reflect.Kind) bool {
switch k {
case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Interface:
return true
}
return false
}
if dst.CanAddr() && src.CanAddr() && hard(dst.Kind()) {
addr1 := unsafe.Pointer(dst.UnsafeAddr())
addr2 := unsafe.Pointer(src.UnsafeAddr())
if uintptr(addr1) > uintptr(addr2) {
// Canonicalize order to reduce number of entries in visited.
// Assumes non-moving garbage collector.
addr1, addr2 = addr2, addr1
}
typ := dst.Type()
v := visit{addr1, addr2, typ}
// Short circuit if references have already been seen.
if visited[v] {
return nil
}
// Remember that we've visited this node
visited[v] = true
}
switch dst.Kind() {
case reflect.Array:
for i := 0; i < dst.Len(); i++ {
if err := deepSymmetricDifference(dst.Index(i), src.Index(i), visited, depth+1); err != nil {
return err
}
}
case reflect.Slice:
if dst.IsNil() || src.IsNil() {
break
}
if dst.Len() != src.Len() {
break
}
if dst.Pointer() == src.Pointer() && dst.Elem().CanSet() {
dst.Elem().Set(reflect.Zero(dst.Elem().Type()))
}
deepEqual := true
for i := 0; i < dst.Len(); i++ {
if !reflect.DeepEqual(dst.Index(i), src.Index(i)) {
deepEqual = false
}
}
if deepEqual && dst.CanSet() {
dst.Set(reflect.Zero(dst.Type()))
}
case reflect.Interface:
if dst.IsNil() || src.IsNil() {
break
}
return deepSymmetricDifference(dst.Elem(), src.Elem(), visited, depth+1)
case reflect.Ptr:
if dst.Pointer() == src.Pointer() && dst.Elem().CanSet() {
dst.Elem().Set(reflect.Zero(dst.Elem().Type()))
}
return deepSymmetricDifference(dst.Elem(), src.Elem(), visited, depth+1)
case reflect.Struct:
if hasExportedField(dst) {
for i := 0; i < dst.NumField(); i++ {
if err := deepSymmetricDifference(dst.Field(i), src.Field(i), visited, depth+1); err != nil {
return err
}
}
} else {
if dst.CanSet() && dst.Interface() == src.Interface() {
dst.Set(reflect.Zero(dst.Type()))
}
}
case reflect.Map:
for _, k := range dst.MapKeys() {
val1 := dst.MapIndex(k)
val2 := src.MapIndex(k)
if val1.IsValid() && val2.IsValid() {
if err := deepSymmetricDifference(val1, val2, visited, depth+1); err != nil {
return err
}
}
}
default:
if dst.CanSet() && dst.Interface() == src.Interface() {
dst.Set(reflect.Zero(dst.Type()))
}
}
return nil
}
func hasExportedField(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ {
field := dst.Type().Field(i)
if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
exported = exported || hasExportedField(dst.Field(i))
} else {
exported = exported || len(field.PkgPath) == 0
}
}
return
}
func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
if dst == nil || src == nil {
err = ErrNilArguments
return
}
vDst = reflect.ValueOf(dst).Elem()
vSrc = reflect.ValueOf(src)
// We check if vSrc is a pointer to dereference it.
if vSrc.Kind() == reflect.Ptr {
vSrc = vSrc.Elem()
}
return
} | diff.go | 0.581065 | 0.427217 | diff.go | starcoder |
package GoSmartSearch
import (
"fmt"
"sort"
"strings"
)
type stringResult struct {
value string
accuracy float32
}
// SearchInMaps returns a map slice formed by the input elements ordered (based on the key) from most to least similar to the input term
func SearchInMaps(elements []map[string]string, term, key string, tolerance float32) ([]map[string]string, error) {
if err := validateTolerance(tolerance); err != nil {
return nil, err
}
keyValues := make([]string, 0, len(elements))
for _, item := range elements {
keyValues = append(keyValues, item[key])
}
sortedKeyValues, err := SearchInStrings(keyValues, term, tolerance)
if err != nil {
return nil, err
}
result := make([]map[string]string, 0, len(sortedKeyValues))
for _, item := range sortedKeyValues {
itemMap := findItemInMapSlice(elements, key, item)
result = append(result, itemMap)
}
return result, nil
}
// SearchInStrings returns a slice formed by the input elements ordered from most to least similar to the input term
func SearchInStrings(elements []string, term string, tolerance float32) ([]string, error) {
if err := validateTolerance(tolerance); err != nil {
return nil, err
}
var tmpResult []stringResult
for _, currentTerm := range elements {
var resultObject stringResult
resultObject.accuracy = calculateAccuracy(term, currentTerm)
resultObject.value = currentTerm
if resultObject.accuracy >= tolerance {
tmpResult = append(tmpResult, resultObject)
}
}
sort.Slice(tmpResult, func(a, b int) bool {
return tmpResult[a].accuracy > tmpResult[b].accuracy
})
result := make([]string, len(tmpResult))
for i := range tmpResult {
result[i] = tmpResult[i].value
}
return result, nil
}
func calculateAccuracy(original, current string) float32 {
var hits, hitsExact float32
var limit int
if original == current {
return 1
}
original, current = strings.ToLower(original), strings.ToLower(current)
if original == current {
return 1
}
if len(original) > len(current) {
limit = len(current)
} else {
limit = len(original)
}
for i := 0; i < limit; i++ {
if original[i] == current[i] {
hitsExact++
} else {
for e := 0; e < limit; e++ {
if (original[i] == current[e]) || (original[e] == current[i]) {
hits += 0.25
}
}
}
}
if int(hitsExact) == len(original) {
return 1
}
hitsExact += hits
return hitsExact / float32(len(original)) / 4
}
func findItemInMapSlice(elements []map[string]string, key, value string) map[string]string {
for _, item := range elements {
if item[key] == value {
return item
}
}
return nil
}
func validateTolerance(tolerance float32) error {
if tolerance > 1 || tolerance < 0 {
return fmt.Errorf("validation error: tolerance (%f) must be in range 0-1", tolerance)
}
return nil
} | GoSmartSearch.go | 0.737253 | 0.47171 | GoSmartSearch.go | starcoder |
package layers
import (
"math"
"gitlab.com/akita/dnn/tensor"
"gitlab.com/akita/mgpusim/driver"
)
// Vector represents a 1D array stored in the GPU memory.
type Vector struct {
size int
ptr driver.GPUPtr
GPUDriver *driver.Driver
GPUCtx *driver.Context
}
// Init intialized the data and the size of the vector.
func (v *Vector) Init(data []float64, size int) {
v.size = size
v.ptr = v.GPUDriver.AllocateMemory(v.GPUCtx, uint64(len(data)*4))
tempData := make([]float32, len(data))
for i, value := range data {
tempData[i] = float32(value)
}
v.GPUDriver.MemCopyH2D(v.GPUCtx, v.ptr, tempData)
}
// AsMatrix returns the vector as a matrix, with given row and col size.
func (v Vector) AsMatrix(row, col int) *Tensor {
m := &Tensor{
size: []int{row, col},
ptr: v.ptr,
driver: v.GPUDriver,
ctx: v.GPUCtx,
}
return m
}
// Raw returns the underlying data stored in the Vector.
func (v Vector) Raw() []float64 {
tempData := make([]float32, v.size)
v.GPUDriver.MemCopyD2H(v.GPUCtx, tempData, v.ptr)
out := make([]float64, v.size)
for i, value := range tempData {
out[i] = float64(value)
}
return out
}
// Set assignes the data of the vector.
func (v Vector) Set(val []float64) {
temp := make([]float32, v.size)
for i, value := range val {
temp[i] = float32(value)
}
v.GPUDriver.MemCopyH2D(v.GPUCtx, v.ptr, temp)
}
// Clone creates a new vector with same data.
func (v Vector) Clone() tensor.Vector {
vector := &Vector{
size: v.size,
GPUDriver: v.GPUDriver,
GPUCtx: v.GPUCtx,
}
vector.ptr = v.GPUDriver.AllocateMemory(v.GPUCtx, uint64(v.size*4))
tempData := make([]float32, v.size)
v.GPUDriver.MemCopyD2H(v.GPUCtx, tempData, v.ptr)
v.GPUDriver.MemCopyH2D(v.GPUCtx, vector.ptr, tempData)
return vector
}
// Scale multiply each numbers in the vector by alpha.
func (v Vector) Scale(alpha float64) {
raw := v.Raw()
for i := range raw {
raw[i] *= alpha
}
v.Set(raw)
}
// Add performs a element-wise add operation.
func (v Vector) Add(b tensor.Vector) {
aRaw := v.Raw()
bRaw := b.Raw()
for i := range aRaw {
aRaw[i] += bRaw[i]
}
v.Set(aRaw)
}
// AddScalar adds each element in the vector with alpha.
func (v Vector) AddScalar(alpha float64) {
aRaw := v.Raw()
for i := range aRaw {
aRaw[i] += alpha
}
v.Set(aRaw)
}
// ScaleAdd performs an alpha*A + beta*B operation. A is the current vector.
func (v Vector) ScaleAdd(alpha, beta float64, b tensor.Vector) {
aRaw := v.Raw()
bRaw := b.Raw()
for i := range aRaw {
aRaw[i] = alpha*aRaw[i] + beta*bRaw[i]
}
v.Set(aRaw)
}
// MulElemWise performs a element-size multiply operation.
func (v Vector) MulElemWise(b tensor.Vector) {
aRaw := v.Raw()
bRaw := b.Raw()
for i := range aRaw {
aRaw[i] *= bRaw[i]
}
v.Set(aRaw)
}
// DivElemWise performs a element-wise division operation.
func (v Vector) DivElemWise(b tensor.Vector) {
aRaw := v.Raw()
bRaw := b.Raw()
for i := range aRaw {
aRaw[i] /= bRaw[i]
}
v.Set(aRaw)
}
// PowerScalar calculates the power of element element in the vector.
func (v Vector) PowerScalar(alpha float64) {
aRaw := v.Raw()
for i := range aRaw {
aRaw[i] = math.Pow(aRaw[i], alpha)
}
v.Set(aRaw)
} | benchmarks/dnn/layers/vector.go | 0.699357 | 0.567577 | vector.go | starcoder |
package formula
import (
"fmt"
"math"
)
// BinOpType is the binary operation operator type
//go:generate stringer -type=BinOpType
type BinOpType byte
// Operator type constants
const (
BinOpTypeUnknown BinOpType = iota
BinOpTypePlus
BinOpTypeMinus
BinOpTypeMult
BinOpTypeDiv
BinOpTypeExp
BinOpTypeLT
BinOpTypeGT
BinOpTypeEQ
BinOpTypeLEQ
BinOpTypeGEQ
BinOpTypeNE
BinOpTypeConcat // '&' in Excel
)
// BinaryExpr is a binary expression.
type BinaryExpr struct {
lhs, rhs Expression
op BinOpType
}
// NewBinaryExpr constructs a new binary expression with a given operator.
func NewBinaryExpr(lhs Expression, op BinOpType, rhs Expression) Expression {
return BinaryExpr{lhs, rhs, op}
}
// Eval evaluates the binary expression using the context given.
func (b BinaryExpr) Eval(ctx Context, ev Evaluator) Result {
lhs := b.lhs.Eval(ctx, ev)
rhs := b.rhs.Eval(ctx, ev)
// peel off array/list ops first
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeArray {
if !sameDim(lhs.ValueArray, rhs.ValueArray) {
return MakeErrorResult("lhs/rhs should have same dimensions")
}
return arrayOp(b.op, lhs.ValueArray, rhs.ValueArray)
} else if lhs.Type == ResultTypeList {
if len(lhs.ValueList) != len(rhs.ValueList) {
return MakeErrorResult("lhs/rhs should have same dimensions")
}
return listOp(b.op, lhs.ValueList, rhs.ValueList)
}
}
// TODO: check for and add support for binary operators on boolean values
switch b.op {
case BinOpTypePlus:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeNumberResult(lhs.ValueNumber + rhs.ValueNumber)
}
}
case BinOpTypeMinus:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeNumberResult(lhs.ValueNumber - rhs.ValueNumber)
}
}
case BinOpTypeMult:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeNumberResult(lhs.ValueNumber * rhs.ValueNumber)
}
}
case BinOpTypeDiv:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
if rhs.ValueNumber == 0 {
return MakeErrorResultType(ErrorTypeDivideByZero, "divide by zero")
}
return MakeNumberResult(lhs.ValueNumber / rhs.ValueNumber)
}
}
case BinOpTypeExp:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeNumberResult(math.Pow(lhs.ValueNumber, rhs.ValueNumber))
}
}
case BinOpTypeLT:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeBoolResult(lhs.ValueNumber < rhs.ValueNumber)
}
}
case BinOpTypeGT:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeBoolResult(lhs.ValueNumber > rhs.ValueNumber)
}
}
case BinOpTypeEQ:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
// TODO: see what Excel does regarding floating point comparison
return MakeBoolResult(lhs.ValueNumber == rhs.ValueNumber)
}
}
case BinOpTypeNE:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeBoolResult(lhs.ValueNumber != rhs.ValueNumber)
}
}
case BinOpTypeLEQ:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeBoolResult(lhs.ValueNumber <= rhs.ValueNumber)
}
}
case BinOpTypeGEQ:
if lhs.Type == rhs.Type {
if lhs.Type == ResultTypeNumber {
return MakeBoolResult(lhs.ValueNumber >= rhs.ValueNumber)
}
}
case BinOpTypeConcat:
return MakeStringResult(lhs.Value() + rhs.Value())
}
return MakeErrorResult("unsupported binary op")
}
func (b BinaryExpr) Reference(ctx Context, ev Evaluator) Reference {
return ReferenceInvalid
}
// sameDim returns true if the arrays have the same dimensions.
func sameDim(lhs, rhs [][]Result) bool {
if len(lhs) != len(rhs) {
return false
}
for i := range lhs {
if len(lhs[i]) != len(rhs[i]) {
return false
}
}
return true
}
func arrayOp(op BinOpType, lhs, rhs [][]Result) Result {
// we can assume the arrays are the same size here
res := [][]Result{}
for i := range lhs {
lst := listOp(op, lhs[i], rhs[i])
if lst.Type == ResultTypeError {
return lst
}
res = append(res, lst.ValueList)
}
return MakeArrayResult(res)
}
func listOp(op BinOpType, lhs, rhs []Result) Result {
res := []Result{}
// we can assume the arrays are the same size here
for i := range lhs {
l := lhs[i].AsNumber()
r := rhs[i].AsNumber()
if l.Type != ResultTypeNumber || r.Type != ResultTypeNumber {
return MakeErrorResult("non-nunmeric value in binary operation")
}
switch op {
case BinOpTypePlus:
res = append(res, MakeNumberResult(l.ValueNumber+r.ValueNumber))
case BinOpTypeMinus:
res = append(res, MakeNumberResult(l.ValueNumber-r.ValueNumber))
case BinOpTypeMult:
res = append(res, MakeNumberResult(l.ValueNumber*r.ValueNumber))
case BinOpTypeDiv:
if r.ValueNumber == 0 {
return MakeErrorResultType(ErrorTypeDivideByZero, "")
}
res = append(res, MakeNumberResult(l.ValueNumber/r.ValueNumber))
case BinOpTypeExp:
res = append(res, MakeNumberResult(math.Pow(l.ValueNumber, r.ValueNumber)))
case BinOpTypeLT:
res = append(res, MakeBoolResult(l.ValueNumber < r.ValueNumber))
case BinOpTypeGT:
res = append(res, MakeBoolResult(l.ValueNumber > r.ValueNumber))
case BinOpTypeEQ:
res = append(res, MakeBoolResult(l.ValueNumber == r.ValueNumber))
case BinOpTypeLEQ:
res = append(res, MakeBoolResult(l.ValueNumber <= r.ValueNumber))
case BinOpTypeGEQ:
res = append(res, MakeBoolResult(l.ValueNumber >= r.ValueNumber))
case BinOpTypeNE:
res = append(res, MakeBoolResult(l.ValueNumber != r.ValueNumber))
// TODO: support concat here
// case BinOpTypeConcat:
default:
return MakeErrorResult(fmt.Sprintf("unsupported list binary op %s", op))
}
}
return MakeListResult(res)
} | spreadsheet/formula/binaryexpr.go | 0.505127 | 0.417746 | binaryexpr.go | starcoder |
package gos7
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"strconv"
"time"
)
const (
bias int64 = 621355968000000000 // "decimicros" between 0001-01-01 00:00:00 and 1970-01-01 00:00:00
)
//Helper the helper to get/set value from/to byte array with difference types
type Helper struct{}
//SetValueAt set a value at a position of a byte array,
//which based on builtin function: https://golang.org/pkg/encoding/binary/#Read
func (s7 *Helper) SetValueAt(buffer []byte, pos int, data interface{}) {
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, data)
if err != nil {
fmt.Println("binary.Write failed:", err)
}
copy(buffer[pos:], buf.Bytes())
}
//GetValueAt set a value at a position of a byte array,
// which based on builtin function: https://golang.org/pkg/encoding/binary/#Write
func (s7 *Helper) GetValueAt(buffer []byte, pos int, value interface{}) {
buf := bytes.NewReader(buffer[pos:])
if err := binary.Read(buf, binary.BigEndian, value); err != nil {
fmt.Println("binary.Read failed:", err)
}
}
//GetRealAt 32 bit floating point number (S7 Real) (Range of float32)
func (s7 *Helper) GetRealAt(buffer []byte, pos int) float32 {
var value uint32
s7.GetValueAt(buffer, pos, &value)
float := math.Float32frombits(value)
return float
}
//SetRealAt 32 bit floating point number (S7 Real) (Range of float32)
func (s7 *Helper) SetRealAt(buffer []byte, pos int, value float32) {
s7.SetValueAt(buffer, pos, math.Float32bits(value))
}
//GetLRealAt 64 bit floating point number (S7 LReal) (Range of float64)
func (s7 *Helper) GetLRealAt(buffer []byte, pos int) float64 {
var value uint64
s7.GetValueAt(buffer, pos, &value)
float := math.Float64frombits(value)
return float
}
//SetLRealAt 64 bit floating point number (S7 LReal) (Range of float64)
func (s7 *Helper) SetLRealAt(Buffer []byte, Pos int, Value float64) {
s7.SetValueAt(Buffer, Pos, math.Float64bits(Value))
}
//GetDateTimeAt DateTime (S7 DATE_AND_TIME)
func (s7 *Helper) GetDateTimeAt(Buffer []byte, Pos int) time.Time {
var Year, Month, Day, Hour, Min, Sec, MSec int
Year = decodeBcd(Buffer[Pos])
if Year < 90 {
Year = Year + 2000
} else {
Year += 1900
}
Month = decodeBcd(Buffer[Pos+1])
Day = decodeBcd(Buffer[Pos+2])
Hour = decodeBcd(Buffer[Pos+3])
Min = decodeBcd(Buffer[Pos+4])
Sec = decodeBcd(Buffer[Pos+5])
MSec = (decodeBcd(Buffer[Pos+6]) * 10) + (decodeBcd(Buffer[Pos+7]) / 10)
return time.Date(Year, time.Month(Month), Day, Hour, Min, Sec, MSec, time.UTC)
}
//Binary-coded decimal https://en.wikipedia.org/wiki/Binary-coded_decimal
func decodeBcd(b byte) int {
return int(((b >> 4) * 10) + (b & 0x0F))
}
func encodeBcd(value int) byte {
return byte(((value / 10) << 4) | (value % 10))
}
//SetDateTimeAt DateTime (S7 DATE_AND_TIME)
func (s7 *Helper) SetDateTimeAt(buffer []byte, pos int, value time.Time) {
y := value.Year()
m := int(value.Month())
d := value.Day()
h := value.Hour()
mi := value.Minute()
s := value.Second()
dow := int(value.Weekday()) + 1
// msh = First two digits of miliseconds
msh := int(int64(value.UnixNano()/1000000) / 10)
// msl = Last digit of miliseconds
msl := int(int64(value.UnixNano()/1000000) % 10)
if y > 1999 {
y -= 2000
}
buffer[pos] = encodeBcd(y)
buffer[pos+1] = encodeBcd(m)
buffer[pos+2] = encodeBcd(d)
buffer[pos+3] = encodeBcd(h)
buffer[pos+4] = encodeBcd(mi)
buffer[pos+5] = encodeBcd(s)
buffer[pos+6] = encodeBcd(msh)
buffer[pos+7] = encodeBcd(msl*10 + dow)
}
//GetDateAt DATE (S7 DATE)
func (s7 *Helper) GetDateAt(buffer []byte, pos int) time.Time {
initDate := time.Date(1900, time.Month(1), 1, 0, 0, 0, 0, time.UTC)
var year int16
s7.GetValueAt(buffer, pos, &year)
return initDate.AddDate(0, 0, int(year))
}
//SetDateAt DATE (S7 DATE)
func (s7 *Helper) SetDateAt(buffer []byte, pos int, value time.Time) {
initDate := time.Date(1900, time.Month(1), 1, 0, 0, 0, 0, time.UTC)
s7.SetValueAt(buffer, pos, int16(value.YearDay()-initDate.YearDay()))
}
//GetTODAt TOD (S7 TIME_OF_DAY)
func (s7 *Helper) GetTODAt(buffer []byte, pos int) time.Time {
var nano int32
s7.GetValueAt(buffer, pos, &nano)
return time.Date(0001, 1, 1, 0, 0, int(nano/1000), 0, time.UTC)
}
//SetTODAt TOD (S7 TIME_OF_DAY)
func (s7 *Helper) SetTODAt(buffer []byte, pos int, value time.Time) {
s7.SetValueAt(buffer, pos, int32(value.Nanosecond()/1000000))
}
//GetLTODAt LTOD (S7 1500 LONG TIME_OF_DAY)
func (s7 *Helper) GetLTODAt(Buffer []byte, Pos int) time.Time {
//S71500 Tick = 1 ns
var nano int64
s7.GetValueAt(Buffer, Pos, &nano)
return time.Date(0, 0, 0, 0, 0, 0, int(nano), time.UTC)
}
//SetLTODAt LTOD (S7 1500 LONG TIME_OF_DAY)
func (s7 *Helper) SetLTODAt(buffer []byte, pos int, value time.Time) {
s7.SetValueAt(buffer, pos, int64(value.Nanosecond()))
}
//GetLDTAt LDT (S7 1500 Long Date and Time)
func (s7 *Helper) GetLDTAt(buffer []byte, pos int) time.Time {
var nano int64
s7.GetValueAt(buffer, pos, &nano)
return time.Date(0, 0, 0, 0, 0, 0, int(nano+bias), time.UTC)
}
//SetLDTAt LDT (S7 1500 Long Date and Time)
func (s7 *Helper) SetLDTAt(buffer []byte, pos int, value time.Time) {
s7.SetValueAt(buffer, pos, int64(value.Nanosecond())-bias)
}
//GetDTLAt DTL (S71200/1500 Date and Time)
func (s7 *Helper) GetDTLAt(buffer []byte, pos int) time.Time {
Year := int(buffer[pos])*256 + int(buffer[pos+1])
Month := int(buffer[pos+2])
Day := int(buffer[pos+3])
Hour := int(buffer[pos+5])
Min := int(buffer[pos+6])
Sec := int(buffer[pos+7])
var nsec int
s7.GetValueAt(buffer, pos, &nsec)
return time.Date(Year, time.Month(Month), Day, Hour, Min, Sec, nsec, time.UTC)
}
//SetDTLAt DTL (S71200/1500 Date and Time)
func (s7 *Helper) SetDTLAt(buffer []byte, pos int, value time.Time) []byte {
Year := []byte(strconv.Itoa(value.Year()))
buffer[pos] = Year[1]
buffer[pos+1] = Year[0]
buffer[pos+2] = byte(value.Month())
buffer[pos+3] = byte(value.Day())
buffer[pos+4] = byte(int(value.Weekday()) + 1)
buffer[pos+5] = byte(value.Hour())
buffer[pos+6] = byte(value.Minute())
buffer[pos+7] = byte(value.Second())
buffer[pos+7] = byte(value.Nanosecond())
return buffer
}
//SetStringAt Set String (S7 String)
func (s7 *Helper) SetStringAt(buffer []byte, pos int, maxLen int, value string) []byte {
buffer[pos] = byte(maxLen)
buffer[pos+1] = byte(len(value))
buffer = append(buffer[:pos+2], append([]byte(value), buffer[pos+2:]...)...)
return buffer
}
//GetCharsAt Get Array of char (S7 ARRAY OF CHARS)
func (s7 *Helper) GetCharsAt(buffer []byte, pos int, Size int) string {
return string(buffer[pos : pos+Size])
}
//SetCharsAt Get Array of char (S7 ARRAY OF CHARS)
func (s7 *Helper) SetCharsAt(buffer []byte, pos int, value string) {
buffer = append(buffer[:pos], append([]byte(value), buffer[pos:]...)...)
}
//GetCounter Get S7 Counter
func (s7 *Helper) GetCounter(value uint16) int {
return int(decodeBcd(byte(value))*100 + decodeBcd(byte(value>>8)))
}
//GetCounterAt Get S7 Counter at a index
func (s7 *Helper) GetCounterAt(buffer []uint16, index int) int {
return s7.GetCounter(buffer[index])
}
//ToCounter convert value to s7
func (s7 *Helper) ToCounter(value int) uint16 {
return uint16(encodeBcd(value/100) + encodeBcd(value%100<<8))
}
//SetCounterAt set a counter at a postion
func (s7 *Helper) SetCounterAt(buffer []uint16, pos int, value int) []uint16 {
buffer[pos] = s7.ToCounter(value)
return buffer
}
// SetBoolAt sets a boolean (bit) within a byte at bit position
// without changing the other bits
// it returns the resulted byte
func (s7 *Helper) SetBoolAt(b byte, bitPos uint, data bool) byte {
if data {
return b | (1 << bitPos)
}
return b &^ (1 << bitPos)
}
// GetBoolAt gets a boolean (bit) from a byte at position
func (s7 *Helper) GetBoolAt(b byte, pos uint) bool {
return b&(1<<pos) != 0
} | helper.go | 0.594316 | 0.472014 | helper.go | starcoder |
package longtitude
import (
"fmt"
"math"
)
// Longtitude stores a numeric coordinate referencing a celestial bodies X axis.
type Longtitude float32
// Absolute returns the numeric value held by the Longtitude pointer to an absolute number.
func (longtitude *Longtitude) Absolute() float32 {
return float32(math.Abs(float64(*longtitude)))
}
// Correct returns a boolean that identifies whether the Longtitude value does not exceed the accepted Longtitude bounds.
func (longtitude *Longtitude) Correct() bool {
return (longtitude.Value() >= Minimum) && (longtitude.Value() <= Maximum)
}
// Float64 returns a the Longtitude value as a 64 bit floating number.
func (longtitude *Longtitude) Float64() float64 {
return float64(*longtitude)
}
// From returns a Longtitude expressing the distance between to Longtitude pointers, using the current Longtitude as the subtraction.
func (longtitude *Longtitude) From(l *Longtitude) *Longtitude {
return NewLongtitude(l.Value() - longtitude.Value())
}
// Max returns a new Longtitude pointer containing the largest sum of the two Longtitudes.
func (longtitude *Longtitude) Max(l *Longtitude) *Longtitude {
return NewLongtitude(float32(math.Max(math.Abs(float64(*longtitude)), math.Abs(float64(*l)))))
}
// Measurement returns the measurement unit used by the Longtitude pointer.
func (longtitude *Longtitude) Measurement() string {
return Measurement
}
// Min returns a new Longtitude pointer containing the smallest sum of the two Longtitudes.
func (longtitude *Longtitude) Min(l *Longtitude) *Longtitude {
return NewLongtitude(float32(math.Min(math.Abs(float64(*longtitude)), math.Abs(float64(*l)))))
}
func (longtitude *Longtitude) String() string {
return fmt.Sprintf("%v", float32(*longtitude))
}
// To returns a Longtitude expressing the distance between two Longtitude pointers, using the argument Longtitude as the subtraction.
func (longtitude *Longtitude) To(l *Longtitude) *Longtitude {
return NewLongtitude(longtitude.Value() - l.Value())
}
// Value returns the numeric value held by the Longtitude pointer.
func (longtitude *Longtitude) Value() float32 {
return float32(*longtitude)
} | longtitude/longtitude.go | 0.929616 | 0.805364 | longtitude.go | starcoder |
package types
import (
"io"
"regexp"
"strings"
"github.com/lyraproj/issue/issue"
"github.com/lyraproj/pcore/px"
)
type typedName struct {
namespace px.Namespace
authority px.URI
name string
canonical string
parts []string
}
var TypedNameMetaType px.Type
func init() {
TypedNameMetaType = newObjectType(`TypedName`, `{
attributes => {
'namespace' => String,
'name' => String,
'authority' => { type => Optional[URI], value => undef },
'parts' => { type => Array[String], kind => derived },
'is_qualified' => { type => Boolean, kind => derived },
'child' => { type => Optional[TypedName], kind => derived },
'parent' => { type => Optional[TypedName], kind => derived }
},
functions => {
'is_parent' => Callable[[TypedName],Boolean],
'relative_to' => Callable[[TypedName],Optional[TypedName]]
}
}`, func(ctx px.Context, args []px.Value) px.Value {
ns := px.Namespace(args[0].String())
n := args[1].String()
if len(args) > 2 {
return newTypedName2(ns, n, px.URI(args[2].(*UriValue).String()))
}
return NewTypedName(ns, n)
}, func(ctx px.Context, args []px.Value) px.Value {
h := args[0].(*Hash)
ns := px.Namespace(h.Get5(`namespace`, px.EmptyString).String())
n := h.Get5(`name`, px.EmptyString).String()
if x, ok := h.Get4(`authority`); ok {
return newTypedName2(ns, n, px.URI(x.(*UriValue).String()))
}
return NewTypedName(ns, n)
})
}
func (t *typedName) ToString(bld io.Writer, format px.FormatContext, g px.RDetect) {
ObjectToString(t, format, bld, g)
}
func (t *typedName) PType() px.Type {
return TypedNameMetaType
}
func (t *typedName) Call(c px.Context, method px.ObjFunc, args []px.Value, block px.Lambda) (result px.Value, ok bool) {
switch method.Name() {
case `is_parent`:
return booleanValue(t.IsParent(args[0].(px.TypedName))), true
case `relative_to`:
if r, ok := t.RelativeTo(args[0].(px.TypedName)); ok {
return r, true
}
return undef, true
}
return nil, false
}
func (t *typedName) Get(key string) (value px.Value, ok bool) {
switch key {
case `namespace`:
return stringValue(string(t.namespace)), true
case `authority`:
if t.authority == px.RuntimeNameAuthority {
return px.Undef, true
}
return WrapURI2(string(t.authority)), true
case `name`:
return stringValue(t.Name()), true
case `parts`:
return t.PartsList(), true
case `is_qualified`:
return booleanValue(t.IsQualified()), true
case `parent`:
p := t.Parent()
if p == nil {
return undef, true
}
return p, true
case `child`:
p := t.Child()
if p == nil {
return undef, true
}
return p, true
}
return nil, false
}
func (t *typedName) InitHash() px.OrderedMap {
es := make([]*HashEntry, 0, 3)
es = append(es, WrapHashEntry2(`namespace`, stringValue(string(t.Namespace()))))
es = append(es, WrapHashEntry2(`name`, stringValue(t.Name())))
if t.authority != px.RuntimeNameAuthority {
es = append(es, WrapHashEntry2(`authority`, WrapURI2(string(t.authority))))
}
return WrapHash(es)
}
func NewTypedName(namespace px.Namespace, name string) px.TypedName {
return newTypedName2(namespace, name, px.RuntimeNameAuthority)
}
var allowedCharacters = regexp.MustCompile(`\A[A-Za-z][0-9A-Z_a-z]*\z`)
func newTypedName2(namespace px.Namespace, name string, nameAuthority px.URI) px.TypedName {
tn := typedName{}
tn.namespace = namespace
tn.authority = nameAuthority
tn.name = strings.TrimPrefix(name, `::`)
return &tn
}
func typedNameFromMapKey(mapKey string) px.TypedName {
if i := strings.LastIndexByte(mapKey, '/'); i > 0 {
pfx := mapKey[:i]
name := mapKey[i+1:]
if i = strings.LastIndexByte(pfx, '/'); i > 0 {
return newTypedName2(px.Namespace(pfx[i+1:]), name, px.URI(pfx[:i]))
}
}
panic(px.Error(px.InvalidTypedNameMapKey, issue.H{`mapKey`: mapKey}))
}
func (t *typedName) Child() px.TypedName {
if !t.IsQualified() {
return nil
}
return t.child(1)
}
func (t *typedName) child(stripCount int) px.TypedName {
name := t.name
sx := 0
for i := 0; i < stripCount; i++ {
sx = strings.Index(name, `::`)
if sx < 0 {
return nil
}
name = name[sx+2:]
}
tn := &typedName{
namespace: t.namespace,
authority: t.authority,
name: name}
if t.canonical != `` {
pfxLen := len(t.authority) + len(t.namespace) + 2
diff := len(t.name) - len(name)
tn.canonical = t.canonical[:pfxLen] + t.canonical[pfxLen+diff:]
}
if t.parts != nil {
tn.parts = t.parts[stripCount:]
}
return tn
}
func (t *typedName) Parent() px.TypedName {
lx := strings.LastIndex(t.name, `::`)
if lx < 0 {
return nil
}
tn := &typedName{
namespace: t.namespace,
authority: t.authority,
name: t.name[:lx]}
if t.canonical != `` {
pfxLen := len(t.authority) + len(t.namespace) + 2
tn.canonical = t.canonical[:pfxLen+lx]
}
if t.parts != nil {
tn.parts = t.parts[:len(t.parts)-1]
}
return tn
}
func (t *typedName) Equals(other interface{}, g px.Guard) bool {
if tn, ok := other.(px.TypedName); ok {
return t.MapKey() == tn.MapKey()
}
return false
}
func (t *typedName) Name() string {
return t.name
}
func (t *typedName) IsParent(o px.TypedName) bool {
tps := t.Parts()
ops := o.Parts()
top := len(tps)
if top < len(ops) {
for idx := 0; idx < top; idx++ {
if tps[idx] != ops[idx] {
return false
}
}
return true
}
return false
}
func (t *typedName) RelativeTo(parent px.TypedName) (px.TypedName, bool) {
if parent.IsParent(t) {
return t.child(len(parent.Parts())), true
}
return nil, false
}
func (t *typedName) IsQualified() bool {
if t.parts == nil {
return strings.Contains(t.name, `::`)
}
return len(t.parts) > 1
}
func (t *typedName) MapKey() string {
if t.canonical == `` {
t.canonical = strings.ToLower(string(t.authority) + `/` + string(t.namespace) + `/` + t.name)
}
return t.canonical
}
func (t *typedName) Parts() []string {
if t.parts == nil {
parts := strings.Split(strings.ToLower(t.name), `::`)
for _, part := range parts {
if !allowedCharacters.MatchString(part) {
panic(px.Error(px.InvalidCharactersInName, issue.H{`name`: t.name}))
}
}
t.parts = parts
}
return t.parts
}
func (t *typedName) PartsList() px.List {
parts := t.Parts()
es := make([]px.Value, len(parts))
for i, p := range parts {
es[i] = stringValue(p)
}
return WrapValues(es)
}
func (t *typedName) String() string {
return px.ToString(t)
}
func (t *typedName) Namespace() px.Namespace {
return t.namespace
}
func (t *typedName) Authority() px.URI {
return t.authority
} | types/typedname.go | 0.516595 | 0.424472 | typedname.go | starcoder |
package sort
import "math/rand"
// Bubble sort algorithm
func Bubble(slice []int) {
swapped := true
for swapped {
swapped = false
for idx := 0; idx < len(slice)-1; idx++ {
if slice[idx] > slice[idx+1] {
slice[idx], slice[idx+1] = slice[idx+1], slice[idx]
swapped = true
}
}
}
}
// Selection sort algorithm
func Selection(slice []int) {
for i := 0; i < len(slice); i++ {
min := i
for j := i + 1; j < len(slice); j++ {
if slice[j] < slice[min] {
min = j
}
}
slice[i], slice[min] = slice[min], slice[i]
}
}
// Insertion sort algorithm
func Insertion(slice []int) {
i, j := 0, 0
for i = 1; i < len(slice); i++ {
for j = 0; j < i; j++ {
if slice[j] > slice[i] {
slice[i], slice[j] = slice[j], slice[i]
}
}
}
}
// part of Merge sort
func mer(left, right []int) []int {
result := make([]int, 0, len(left)+len(right))
for len(left) > 0 || len(right) > 0 {
if len(left) == 0 {
return append(result, right...)
}
if len(right) == 0 {
return append(result, left...)
}
if left[0] <= right[0] {
result = append(result, left[0])
left = left[1:]
} else {
result = append(result, right[0])
right = right[1:]
}
}
return result
}
// Merge sort algorithm
func Merge(slice []int) []int {
if len(slice) <= 1 {
return slice
}
middle := len(slice) / 2
left := Merge(slice[:middle])
right := Merge(slice[middle:])
return mer(left, right)
}
// Quick sort algorithm
func Quick(slice []int) []int {
if len(slice) <= 1 {
return slice
}
median := slice[rand.Intn(len(slice))]
low := make([]int, 0, len(slice))
high := make([]int, 0, len(slice))
middle := make([]int, 0, len(slice))
for _, item := range slice {
switch {
case item < median:
low = append(low, item)
case item == median:
middle = append(middle, item)
case item > median:
high = append(high, item)
}
}
low = Quick(low)
high = Quick(high)
low = append(low, middle...)
low = append(low, high...)
return low
}
// part of Heap sort
func sift(slice []int, i int, sliceLen int) []int {
done := false
maxChild := 0
for i*2+1 < sliceLen && !done {
if i*2+1 == sliceLen-1 {
maxChild = i*2 + 1
} else if slice[i*2+1] > slice[i*2+2] {
maxChild = i*2 + 1
} else {
maxChild = i*2 + 2
}
if slice[i] < slice[maxChild] {
slice[i], slice[maxChild] = slice[maxChild], slice[i]
i = maxChild
} else {
done = true
}
}
return slice
}
// Heap sort algorithm
func Heap(slice []int) {
i := 0
for i = len(slice)/2 - 1; i >= 0; i-- {
slice = sift(slice, i, len(slice))
}
for i = len(slice) - 1; i >= 1; i-- {
slice[0], slice[i] = slice[i], slice[0]
slice = sift(slice, 0, i)
}
}
// Counting sort algorithm
func Counting(slice []int) {
k := 0
if len(slice) == 0 {
k = 1
}
n := slice[0]
for _, v := range slice {
if v > n {
n = v
}
}
k = n + 1
sliceCounts := make([]int, k)
for i := 0; i < len(slice); i++ {
sliceCounts[slice[i]]++
}
for i, j := 0, 0; i < k; i++ {
for {
if sliceCounts[i] > 0 {
slice[j] = i
j++
sliceCounts[i]--
continue
}
break
}
}
}
// Shell sort algorithm
func Shell(slice []int) {
for d := int(len(slice) / 2); d > 0; d /= 2 {
for i := d; i < len(slice); i++ {
for j := i; j >= d && slice[j-d] > slice[j]; j -= d {
slice[j], slice[j-d] = slice[j-d], slice[j]
}
}
}
}
// Cocktail sort algorithm
func Cocktail(slice []int) {
for i := 0; i < len(slice)/2; i++ {
left := 0
right := len(slice) - 1
for left <= right {
if slice[left] > slice[left+1] {
slice[left], slice[left+1] = slice[left+1], slice[left]
}
left++
if slice[right-1] > slice[right] {
slice[right-1], slice[right] = slice[right], slice[right-1]
}
right--
}
}
}
// Comb sort algorithm
func Comb(slice []int) {
gap := len(slice)
for {
if gap > 1 {
gap = gap * 100 / 124
}
for i := 0; ; {
if slice[i] > slice[i+gap] {
slice[i], slice[i+gap] = slice[i+gap], slice[i]
}
i++
if i+gap >= len(slice) {
break
}
}
if gap == 1 {
break
}
}
}
// Gnome sort algorithm
func Gnome(slice []int) {
i := 1
for i < len(slice) {
if slice[i] >= slice[i-1] {
i++
} else {
slice[i], slice[i-1] = slice[i-1], slice[i]
if i > 1 {
i--
}
}
}
} | sort.go | 0.605216 | 0.434881 | sort.go | starcoder |
package tree
import (
"context"
"fmt"
"strings"
"github.com/pbanos/botanic/feature"
"github.com/pbanos/botanic/set"
)
// Tree represents a a regression tree. It is composed of a
// NodeStore where all its nodes are stored, the id for the
// root node of the tree and the classFeature it is able to
// predict.
type Tree struct {
NodeStore
RootID string
ClassFeature feature.Feature
}
// New takes the ID for the root Node, a NodeStore and a class feature and
// returns a tree composed of the nodes in the NodeStore connected to the
// node with the given root ID that to predict the given feature.
func New(rootID string, nodeStore NodeStore, classFeature feature.Feature) *Tree {
return &Tree{nodeStore, rootID, classFeature}
}
// Predict takes a sample and returns a prediction according to the tree and an
// error if the prediction could not be made.
func (t *Tree) Predict(ctx context.Context, s feature.Sample) (*Prediction, error) {
if t == nil {
return nil, fmt.Errorf("nil tree cannot predict samples")
}
n, err := t.Get(ctx, t.RootID)
if err != nil {
return nil, fmt.Errorf("predicting sample: retrieving node %v: %v", t.RootID, err)
}
if n == nil {
return nil, fmt.Errorf("predicting sample: root node %v not found", t.RootID)
}
for {
if n.SubtreeFeature == nil {
break
}
var selectedNode *Node
for _, nID := range n.SubtreeIDs {
subnode, err := t.Get(ctx, nID)
if err != nil {
return nil, fmt.Errorf("predicting sample: retrieving node %v: %v", nID, err)
}
if subnode == nil {
return nil, fmt.Errorf("predicting sample: node %v not found", nID)
}
if subnode.FeatureCriterion != nil {
ok, err := subnode.FeatureCriterion.SatisfiedBy(s)
if err != nil {
return nil, err
}
if ok {
selectedNode = subnode
if _, ok = subnode.FeatureCriterion.(feature.UndefinedCriterion); !ok {
break
}
}
}
}
if selectedNode == nil {
return nil, fmt.Errorf("sample does not satisfy any subtree criteria on feature %s", n.SubtreeFeature.Name())
}
n = selectedNode
}
if n.Prediction != nil {
return n.Prediction, nil
}
return nil, ErrCannotPredictFromSample
}
/*
Test takes a context.Context, a Set and a class Feature and returns three values:
* the prediction success rate of the tree over the given Set for the classFeature
* the number of failing predictions for the set because of ErrCannotPredictFromSample errors
* an error if a prediction could not be set for reasons other than the tree not
being able to do so. If this is not nil, the other values will be 0.0 and 0
respectively
*/
func (t *Tree) Test(ctx context.Context, s set.Set) (float64, int, error) {
if t == nil {
return 0.0, 0, nil
}
var result float64
var errCount int
samples, err := s.Samples(ctx)
if err != nil {
return 0.0, 0, err
}
count, err := s.Count(ctx)
if err != nil {
return 0.0, 0, err
}
for _, sample := range samples {
p, err := t.Predict(ctx, sample)
if err != nil {
if err != ErrCannotPredictFromSample {
return 0.0, 0, err
}
errCount++
} else {
pV, _ := p.PredictedValue()
v, err := sample.ValueFor(t.ClassFeature)
if err != nil {
return 0.0, 0, err
}
if pV == v {
result += 1.0
}
}
}
result = result / float64(count)
return result, errCount, nil
}
// Traverse takes a context, bottomup boolean and an
// error-returning function that takes a context and a node
// as parameters, and goes through the tree running the
// function with the context and every traversed node.
// Traverse will call the function with a parent node before
// calling it for its children if bottomup is false, and
// call it after its children if bottomup is true.
// If the given context times out or is cancelled, the context
// error is returned. If a node cannot be retrieved from the
// tree's node store, the obtained error is returned. If the
// call to the function returns an error, the traversing is
// aborted and the error is returned. Otherwise, when the
// traversing is over, nil is returned.
func (t *Tree) Traverse(ctx context.Context, bottomup bool, f func(context.Context, *Node) error) error {
n, err := t.NodeStore.Get(ctx, t.RootID)
if err != nil {
return err
}
return t.traverse(ctx, n, bottomup, f)
}
func (t *Tree) traverse(ctx context.Context, n *Node, bottomup bool, f func(context.Context, *Node) error) error {
err := ctx.Err()
if err != nil {
return err
}
if !bottomup {
err = f(ctx, n)
}
if err != nil {
return err
}
for _, snID := range n.SubtreeIDs {
sn, err := t.NodeStore.Get(ctx, snID)
if err != nil {
return err
}
err = t.traverse(ctx, sn, bottomup, f)
if err != nil {
return err
}
}
if bottomup {
err = f(ctx, n)
}
if err != nil {
return err
}
return nil
}
func (t *Tree) String() string {
return t.subtreeString(t.RootID)
}
func (t *Tree) subtreeString(nodeID string) string {
n, err := t.NodeStore.Get(context.TODO(), nodeID)
if err != nil {
return fmt.Sprintf("ERROR: %s\n", err.Error())
}
result := fmt.Sprintf("[%s]\n", nodeID)
if n.FeatureCriterion != nil {
result = fmt.Sprintf("%s{ %v }\n", result, n.FeatureCriterion)
}
if n.Prediction != nil {
result = fmt.Sprintf("%s{ %v }\n", result, n.Prediction)
}
if len(n.SubtreeIDs) > 0 {
result = fmt.Sprintf("%s|\n", result)
} else {
result = fmt.Sprintf("%s \n", result)
}
for i, subtreeID := range n.SubtreeIDs {
for j, line := range strings.Split(t.subtreeString(subtreeID), "\n") {
if len(line) > 0 {
if j == 0 {
result = fmt.Sprintf("%s|__%s\n", result, line)
} else {
if i == len(n.SubtreeIDs)-1 {
result = fmt.Sprintf("%s %s\n", result, line)
} else {
result = fmt.Sprintf("%s| %s\n", result, line)
}
}
}
}
}
return result
} | tree/tree.go | 0.664758 | 0.462534 | tree.go | starcoder |
package openapi
import (
"encoding/json"
)
// KinesisIntegration struct for KinesisIntegration
type KinesisIntegration struct {
AwsAccessKey *AwsAccessKey `json:"aws_access_key,omitempty"`
AwsRole *AwsRole `json:"aws_role,omitempty"`
}
// NewKinesisIntegration instantiates a new KinesisIntegration object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewKinesisIntegration() *KinesisIntegration {
this := KinesisIntegration{}
return &this
}
// NewKinesisIntegrationWithDefaults instantiates a new KinesisIntegration object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewKinesisIntegrationWithDefaults() *KinesisIntegration {
this := KinesisIntegration{}
return &this
}
// GetAwsAccessKey returns the AwsAccessKey field value if set, zero value otherwise.
func (o *KinesisIntegration) GetAwsAccessKey() AwsAccessKey {
if o == nil || o.AwsAccessKey == nil {
var ret AwsAccessKey
return ret
}
return *o.AwsAccessKey
}
// GetAwsAccessKeyOk returns a tuple with the AwsAccessKey field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *KinesisIntegration) GetAwsAccessKeyOk() (*AwsAccessKey, bool) {
if o == nil || o.AwsAccessKey == nil {
return nil, false
}
return o.AwsAccessKey, true
}
// HasAwsAccessKey returns a boolean if a field has been set.
func (o *KinesisIntegration) HasAwsAccessKey() bool {
if o != nil && o.AwsAccessKey != nil {
return true
}
return false
}
// SetAwsAccessKey gets a reference to the given AwsAccessKey and assigns it to the AwsAccessKey field.
func (o *KinesisIntegration) SetAwsAccessKey(v AwsAccessKey) {
o.AwsAccessKey = &v
}
// GetAwsRole returns the AwsRole field value if set, zero value otherwise.
func (o *KinesisIntegration) GetAwsRole() AwsRole {
if o == nil || o.AwsRole == nil {
var ret AwsRole
return ret
}
return *o.AwsRole
}
// GetAwsRoleOk returns a tuple with the AwsRole field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *KinesisIntegration) GetAwsRoleOk() (*AwsRole, bool) {
if o == nil || o.AwsRole == nil {
return nil, false
}
return o.AwsRole, true
}
// HasAwsRole returns a boolean if a field has been set.
func (o *KinesisIntegration) HasAwsRole() bool {
if o != nil && o.AwsRole != nil {
return true
}
return false
}
// SetAwsRole gets a reference to the given AwsRole and assigns it to the AwsRole field.
func (o *KinesisIntegration) SetAwsRole(v AwsRole) {
o.AwsRole = &v
}
func (o KinesisIntegration) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.AwsAccessKey != nil {
toSerialize["aws_access_key"] = o.AwsAccessKey
}
if o.AwsRole != nil {
toSerialize["aws_role"] = o.AwsRole
}
return json.Marshal(toSerialize)
}
type NullableKinesisIntegration struct {
value *KinesisIntegration
isSet bool
}
func (v NullableKinesisIntegration) Get() *KinesisIntegration {
return v.value
}
func (v *NullableKinesisIntegration) Set(val *KinesisIntegration) {
v.value = val
v.isSet = true
}
func (v NullableKinesisIntegration) IsSet() bool {
return v.isSet
}
func (v *NullableKinesisIntegration) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableKinesisIntegration(val *KinesisIntegration) *NullableKinesisIntegration {
return &NullableKinesisIntegration{value: val, isSet: true}
}
func (v NullableKinesisIntegration) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableKinesisIntegration) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | openapi/model_kinesis_integration.go | 0.747708 | 0.415907 | model_kinesis_integration.go | starcoder |
package svg
import (
"fmt"
"github.com/go-gl/mathgl/mgl32"
"reflect"
)
type PathData interface {
fmt.Stringer
empty_PathData()
}
// https://www.w3.org/TR/SVG2/paths.html#PathDataMovetoCommands
type (
AbsoluteMoveTo struct {
Arg []mgl32.Vec2
}
RelativeMoveTo struct {
Arg []mgl32.Vec2
}
)
func (AbsoluteMoveTo) empty_PathData() {
panic("implement me")
}
func (s AbsoluteMoveTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeMoveTo) empty_PathData() {
panic("implement me")
}
func (s RelativeMoveTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
// https://www.w3.org/TR/SVG2/paths.html#PathDataLinetoCommands
type (
AbsoluteLineTo struct {
Arg []mgl32.Vec2
}
RelativeLineTo struct {
Arg []mgl32.Vec2
}
AbsoluteVerticalLineTo struct {
Arg []float32
}
RelativeVerticalLineTo struct {
Arg []float32
}
AbsoluteHorizontalLineTo struct {
Arg []float32
}
RelativeHorizontalLineTo struct {
Arg []float32
}
)
func (AbsoluteLineTo) empty_PathData() {
panic("implement me")
}
func (s AbsoluteLineTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeLineTo) empty_PathData() {
panic("implement me")
}
func (s RelativeLineTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (AbsoluteVerticalLineTo) empty_PathData() {
panic("implement me")
}
func (s AbsoluteVerticalLineTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeVerticalLineTo) empty_PathData() {
panic("implement me")
}
func (s RelativeVerticalLineTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (AbsoluteHorizontalLineTo) empty_PathData() {
panic("implement me")
}
func (s AbsoluteHorizontalLineTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeHorizontalLineTo) empty_PathData() {
panic("implement me")
}
func (s RelativeHorizontalLineTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
// https://www.w3.org/TR/SVG2/paths.html#PathDataClosePathCommand
type (
CloseTo struct{}
)
func (CloseTo) empty_PathData() {
panic("implement me")
}
func (s CloseTo) String() string {
return fmt.Sprintf("%s", reflect.TypeOf(s).Name())
}
// https://www.w3.org/TR/SVG2/paths.html#PathDataQuadraticBezierCommands
type (
AbsoluteQuadTo struct {
Arg [][2]mgl32.Vec2
}
RelativeQuadTo struct {
Arg [][2]mgl32.Vec2
}
AbsoluteSmoothQuadTo struct {
Arg []mgl32.Vec2
}
RelativeSmoothQuadTo struct {
Arg []mgl32.Vec2
}
)
func (AbsoluteQuadTo) empty_PathData() {
panic("implement me")
}
func (s AbsoluteQuadTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeQuadTo) empty_PathData() {
panic("implement me")
}
func (s RelativeQuadTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (AbsoluteSmoothQuadTo) empty_PathData() {
panic("implement me")
}
func (s AbsoluteSmoothQuadTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeSmoothQuadTo) empty_PathData() {
panic("implement me")
}
func (s RelativeSmoothQuadTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
// https://www.w3.org/TR/SVG2/paths.html#PathDataCubicBezierCommands
type (
AbsoluteCubeTo struct {
Arg [][3]mgl32.Vec2
}
RelativeCubeTo struct {
Arg [][3]mgl32.Vec2
}
AbsoluteSmoothCubeTo struct {
Arg [][2]mgl32.Vec2
}
RelativeSmoothCubeTo struct {
Arg [][2]mgl32.Vec2
}
)
func (AbsoluteCubeTo) empty_PathData() {
panic("implement me")
}
func (s AbsoluteCubeTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeCubeTo) empty_PathData() {
panic("implement me")
}
func (s RelativeCubeTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (AbsoluteSmoothCubeTo) empty_PathData() {
panic("implement me")
}
func (s AbsoluteSmoothCubeTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeSmoothCubeTo) empty_PathData() {
panic("implement me")
}
func (s RelativeSmoothCubeTo) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
// TODO
// https://www.w3.org/TR/SVG2/paths.html#PathDataEllipticalArcCommands
type (
AbsoluteArc struct {
Arg []ArcArguments
}
RelativeArc struct {
Arg []ArcArguments
}
ArcArguments struct {
Radius mgl32.Vec2
Rotation float32
LargeArc bool
Sweep bool
To mgl32.Vec2
}
)
func (s ArcArguments) String() string {
flags := ""
if s.LargeArc{
flags += "LargeArc, "
}
if s.Sweep{
flags += "Sweep, "
}
return fmt.Sprintf("{Radius : %v, Rotation : %f, %sTo : %v}", s.Radius, s.Rotation, flags, s.To)
}
func (AbsoluteArc) empty_PathData() {
panic("implement me")
}
func (s AbsoluteArc) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeArc) empty_PathData() {
panic("implement me")
}
func (s RelativeArc) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
// https://www.w3.org/TR/SVG2/paths.html#PathDataBearingCommands
type (
AbsoluteBearing struct {
Arg []float32
}
RelativeBearing struct {
Arg []float32
}
)
func (AbsoluteBearing) empty_PathData() {
panic("implement me")
}
func (s AbsoluteBearing) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
}
func (RelativeBearing) empty_PathData() {
panic("implement me")
}
func (s RelativeBearing) String() string {
return fmt.Sprintf("%s : %v", reflect.TypeOf(s).Name(), s.Arg)
} | tools/svg/Path.go | 0.523908 | 0.417034 | Path.go | starcoder |
package document
import (
"fmt"
"sort"
"strings"
)
// Map of valid expression operators
var validOperators = map[string]bool{
"==": true,
">": true,
"<": true,
">=": true,
"<=": true,
"startsWith": true,
}
// ValidateKey - validates a document key, used for operations on a single document e.g. Get, Set, Delete
func ValidateKey(key *Key) error {
if key == nil {
return fmt.Errorf("provide non-nil key")
}
if key.Id == "" {
return fmt.Errorf("provide non-blank key.Id")
}
if key.Collection == nil {
return fmt.Errorf("provide non-nil key.Collection")
} else {
if err := ValidateCollection(key.Collection); err != nil {
return fmt.Errorf("invalid collection for document key %s, %v", key.Id, err)
}
}
return nil
}
// ValidateCollection - validates a collection key, used for operations on a single document/collection e.g. Get, Set, Delete
func ValidateCollection(collection *Collection) error {
if collection == nil {
return fmt.Errorf("provide non-nil collection")
}
if collection.Name == "" {
return fmt.Errorf("provide non-blank collection.Name")
}
if collection.Parent != nil {
if err := ValidateKey(collection.Parent); err != nil {
return fmt.Errorf("invalid parent for collection %s, %v", collection.Name, err)
}
}
return validateSubCollectionDepth(collection)
}
// ValidateQueryKey - Validates a key used for query operations.
// unique from ValidateKey in that it permits blank key.Id values for wildcard query scenarios.
// e.g. querying values in a sub-collection for all documents in the parent collection.
func ValidateQueryKey(key *Key) error {
if key == nil {
return fmt.Errorf("provide non-nil key")
}
if key.Collection == nil {
return fmt.Errorf("provide non-nil key.Collection")
} else {
if err := ValidateQueryCollection(key.Collection); err != nil {
return fmt.Errorf("invalid collection for document key %s, %v", key.Id, err)
}
}
return nil
}
// ValidateQueryCollection - Validates a collection used for query operations.
// unique from ValidateCollection in that it calls ValidateQueryKey for the collection.Key
func ValidateQueryCollection(collection *Collection) error {
if collection == nil {
return fmt.Errorf("provide non-nil collection")
}
if collection.Name == "" {
return fmt.Errorf("provide non-blank collection.Name")
}
if collection.Parent != nil {
if err := ValidateQueryKey(collection.Parent); err != nil {
return fmt.Errorf("invalid parent for collection %s, %v", collection.Name, err)
}
}
return validateSubCollectionDepth(collection)
}
// GetEndRangeValue - Get end range value to implement "startsWith" expression operator using where clause.
// For example with sdk.Expression("pk", "startsWith", "Customer#") this translates to:
// WHERE pk >= {startRangeValue} AND pk < {endRangeValue}
// WHERE pk >= "Customer#" AND pk < "Customer!"
func GetEndRangeValue(value string) string {
strFrontCode := value[:len(value)-1]
strEndCode := value[len(value)-1:]
return strFrontCode + string(strEndCode[0]+1)
}
// ValidateExpressions - Validate the provided query expressions
func ValidateExpressions(expressions []QueryExpression) error {
if expressions == nil {
return fmt.Errorf("provide non-nil query expressions")
}
inequalityProperties := make(map[string]string)
for _, exp := range expressions {
if exp.Operand == "" {
return fmt.Errorf("provide non-blank query expression operand: %v", exp)
}
if _, found := validOperators[exp.Operator]; !found {
return fmt.Errorf("provide valid query expression operator [==, <, >, <=, >=, startsWith]: %v", exp.Operator)
}
if exp.Value == "" {
return fmt.Errorf("provide non-blank query expression value: %v", exp)
}
if exp.Operator != "==" {
inequalityProperties[exp.Operand] = exp.Operator
}
}
// Firestore inequality compatibility check
if len(inequalityProperties) > 1 {
msg := ""
for prop, exp := range inequalityProperties {
if msg != "" {
msg += ", "
}
msg += prop + " " + exp
}
// Firestore does not support inequality expressions on multiple properties.
// Firestore requires composite key to be created at deployment time.
return fmt.Errorf("inequality expressions on multiple properties are not supported: [ %v ]", msg)
}
// DynamoDB range expression compatibility check
if err := hasRangeError(expressions); err != nil {
return err
}
return nil
}
// QueryExpression sorting support with sort.Interface
type ExpsSort []QueryExpression
func (exps ExpsSort) Len() int {
return len(exps)
}
// Less - Sort by Operand then Operator then Value
func (exps ExpsSort) Less(i, j int) bool {
operandCompare := strings.Compare(exps[i].Operand, exps[j].Operand)
if operandCompare == 0 {
// Reverse operator comparison for to support range expressions
operatorCompare := strings.Compare(exps[j].Operator, exps[i].Operator)
if operatorCompare == 0 {
iVal := fmt.Sprintf("%v", exps[i].Value)
jVal := fmt.Sprintf("%v", exps[j].Value)
return strings.Compare(iVal, jVal) < 0
} else {
return operatorCompare < 0
}
} else {
return operandCompare < 0
}
}
func (exps ExpsSort) Swap(i, j int) {
exps[i], exps[j] = exps[j], exps[i]
}
// validateSubCollectionDepth - returns an error if the provided collection exceeds the maximum supported
// depth for a sub-collection.
func validateSubCollectionDepth(collection *Collection) error {
coll := collection
depth := 0
for coll.Parent != nil {
depth += 1
coll = coll.Parent.Collection
}
if depth > MaxSubCollectionDepth {
return fmt.Errorf(
"sub-collections only supported to a depth of %d, found depth of %d for collection %s",
MaxSubCollectionDepth,
depth,
collection.Name,
)
}
return nil
}
// DynamoDB only supports query range operands: >= AND <=
// For example: WHERE price >= 20.00 AND price <= 50.0
func hasRangeError(exps []QueryExpression) error {
sortedExps := make([]QueryExpression, len(exps))
copy(sortedExps, exps)
sort.Sort(ExpsSort(sortedExps))
for index, exp := range sortedExps {
if index < (len(sortedExps) - 1) {
nextExp := sortedExps[index+1]
if exp.Operand == nextExp.Operand &&
((exp.Operator == ">" && nextExp.Operator == "<") ||
(exp.Operator == ">" && nextExp.Operator == "<=") ||
(exp.Operator == ">=" && nextExp.Operator == "<")) {
// Range expression combination not supported with DynamoDB, must use >= and <= which maps to DynamoDB BETWEEN
return fmt.Errorf("range expression combination not supported (use operators >= and <=) : %v", exp)
}
}
}
return nil
} | pkg/plugins/document/document.go | 0.64646 | 0.401805 | document.go | starcoder |
package op3
import (
"sort"
"github.com/mmcloughlin/ec3/efd/op3/ast"
"github.com/mmcloughlin/ec3/internal/errutil"
)
// VariableSet builds a set from a list of variables.
func VariableSet(vs []ast.Variable) map[ast.Variable]bool {
set := map[ast.Variable]bool{}
for _, v := range vs {
set[v] = true
}
return set
}
// Variables returns all variables used in the program.
func Variables(p *ast.Program) []ast.Variable {
seen := map[ast.Variable]bool{}
vs := []ast.Variable{}
for _, a := range p.Assignments {
for _, v := range ast.Variables(a.Operands()) {
if !seen[v] {
vs = append(vs, v)
seen[v] = true
}
}
}
return vs
}
// InputSet returns the set of input variables for the given program.
func InputSet(p *ast.Program) map[ast.Variable]bool {
// Inputs are variables that are read before they are written.
inputs := map[ast.Variable]bool{}
written := map[ast.Variable]bool{}
for _, a := range p.Assignments {
for _, v := range ast.Variables(a.RHS.Inputs()) {
if !written[v] {
inputs[v] = true
}
}
written[a.LHS] = true
}
return inputs
}
// Inputs returns input variables for the given program.
func Inputs(p *ast.Program) []ast.Variable {
inputs := InputSet(p)
// Convert to slice.
vs := make([]ast.Variable, 0, len(inputs))
for input := range inputs {
vs = append(vs, input)
}
return vs
}
// SortedVariables returns the variables vs sorted in string order.
func SortedVariables(vs []ast.Variable) []ast.Variable {
sorted := append([]ast.Variable{}, vs...)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i] < sorted[j]
})
return sorted
}
// IsSSA reports whether every variable is written once.
func IsSSA(p *ast.Program) bool {
seen := map[ast.Variable]bool{}
for _, a := range p.Assignments {
v := a.LHS
if seen[v] {
return false
}
seen[v] = true
}
return true
}
// ReadOnly reports whether v is a read-only variable in the program p.
func ReadOnly(p *ast.Program, v ast.Variable) bool {
for _, a := range p.Assignments {
if v == a.LHS {
return false
}
}
return true
}
// RenameVariables applies the given variable replacements to the program p.
func RenameVariables(p *ast.Program, replacements map[ast.Variable]ast.Variable) *ast.Program {
r := &ast.Program{}
for _, a := range p.Assignments {
var expr ast.Expression
switch e := a.RHS.(type) {
case ast.Pow:
expr = ast.Pow{
X: renamevariable(e.X, replacements),
N: e.N,
}
case ast.Inv:
expr = ast.Inv{X: renameoperand(e.X, replacements)}
case ast.Mul:
expr = ast.Mul{
X: renameoperand(e.X, replacements),
Y: renameoperand(e.Y, replacements),
}
case ast.Neg:
expr = ast.Neg{X: renameoperand(e.X, replacements)}
case ast.Add:
expr = ast.Add{
X: renameoperand(e.X, replacements),
Y: renameoperand(e.Y, replacements),
}
case ast.Sub:
expr = ast.Sub{
X: renameoperand(e.X, replacements),
Y: renameoperand(e.Y, replacements),
}
case ast.Cond:
expr = ast.Cond{
X: renamevariable(e.X, replacements),
C: renamevariable(e.C, replacements),
}
case ast.Variable:
expr = renamevariable(e, replacements)
case ast.Constant:
expr = e
default:
panic(errutil.UnexpectedType(e))
}
r.Assignments = append(r.Assignments, ast.Assignment{
LHS: renamevariable(a.LHS, replacements),
RHS: expr,
})
}
return r
}
func renameoperand(op ast.Operand, replacements map[ast.Variable]ast.Variable) ast.Operand {
v, ok := op.(ast.Variable)
if !ok {
return op
}
return renamevariable(v, replacements)
}
func renamevariable(v ast.Variable, replacements map[ast.Variable]ast.Variable) ast.Variable {
if r, ok := replacements[v]; ok {
return r
}
return v
}
// LiveSet maintains a set of live variables.
type LiveSet map[ast.Variable]bool
// NewLiveSet constructs an empty set of live variables.
func NewLiveSet() LiveSet {
return make(LiveSet)
}
// MarkLive records all variables in vs as live.
func (l LiveSet) MarkLive(vs ...ast.Variable) {
for _, v := range vs {
l[v] = true
}
}
// Update the live set based on the assignment. In liveness analysis, program
// assignments should be processed in reverse.
func (l LiveSet) Update(a ast.Assignment) {
// Kill the variable that's written.
delete(l, a.LHS)
// Input variables are live.
inputs := ast.Variables(a.RHS.Inputs())
l.MarkLive(inputs...)
}
// Pare down the given program to only the operations required to produce given
// outputs.
func Pare(p *ast.Program, outputs []ast.Variable) (*ast.Program, error) {
// This is essentially liveness analysis for a single basic block.
// Initially, the required outputs are live.
live := NewLiveSet()
live.MarkLive(outputs...)
// Process the program in reverse order.
n := len(p.Assignments)
required := make([]ast.Assignment, 0, n)
for i := n - 1; i >= 0; i-- {
a := p.Assignments[i]
// If the variable written to is live, then this operation is required.
if live[a.LHS] {
required = append(required, a)
}
// Update liveness.
live.Update(a)
}
// Required assignments list was created in reverse order.
for l, r := 0, len(required)-1; l < r; l, r = l+1, r-1 {
required[l], required[r] = required[r], required[l]
}
return &ast.Program{
Assignments: required,
}, nil
}
// InterferenceGraph records which variables interfere with each other.
type InterferenceGraph struct {
edges map[edge]bool
}
// NewInterferenceGraph builds an empty interference graph.
func NewInterferenceGraph() *InterferenceGraph {
return &InterferenceGraph{
edges: make(map[edge]bool),
}
}
// edge in at interference graph.
type edge struct{ X, Y ast.Variable }
// newedge builds an edge between x and y.
func newedge(x, y ast.Variable) edge {
if x > y {
return edge{y, x}
}
return edge{x, y}
}
// AddInterference records that x and y interfere.
func (g *InterferenceGraph) AddInterference(x, y ast.Variable) {
if x == y {
return
}
g.edges[newedge(x, y)] = true
}
// Interfere reports whether x and y interfere.
func (g *InterferenceGraph) Interfere(x, y ast.Variable) bool {
_, ok := g.edges[newedge(x, y)]
return ok
}
// BuildInterferenceGraph builds the interference graph for variables in p,
// given that the provided outputs are required.
func BuildInterferenceGraph(p *ast.Program, outputs []ast.Variable) *InterferenceGraph {
g := NewInterferenceGraph()
// Initially, the required outputs are live.
live := NewLiveSet()
live.MarkLive(outputs...)
// Process the program in reverse order.
n := len(p.Assignments)
for i := n - 1; i >= 0; i-- {
a := p.Assignments[i]
// The output interferes with all currently live variables.
for l := range live {
g.AddInterference(a.LHS, l)
}
// Update liveness.
live.Update(a)
}
return g
} | efd/op3/analysis.go | 0.700588 | 0.496765 | analysis.go | starcoder |
package personnummer
import (
"errors"
)
// County represents the counties within Sweden. This could be told from the
// serial number before 1990. See
// https://en.wikipedia.org/wiki/Personal_identity_number_(Sweden)#Format
// The naming and values of the counties is an ISO 3166-2 standard. See
// https://en.wikipedia.org/wiki/Counties_of_Sweden#Map
type County int
const (
CountyA County = iota
CountyAB
CountyB
CountyC
CountyD
CountyE
CountyF
CountyG
CountyH
CountyI
CountyK
CountyL
CountyM
CountyN
CountyO
CountyP
CountyR
CountyS
CountyT
CountyU
CountyW
CountyX
CountyY
CountyZ
CountyAC
CountyBD
CountyQ
CountyQQ
CountyUnknown
)
// String returns the name of the region where the person was born, if born
// before 1990 when this system was removed.
func (c County) String() string {
switch c {
case CountyA, CountyAB, CountyB:
return "Stockholms Län"
case CountyC:
return "Uppsala län"
case CountyD:
return "Södermanlands län"
case CountyE:
return "Östergötlands län"
case CountyF:
return "Jönköpings län"
case CountyG:
return "Kronobergs län"
case CountyH:
return "Kalmar län"
case CountyI:
return "Gotlands län"
case CountyK:
return "Blekinge län"
case CountyL:
return "Kristianstads län"
case CountyM:
return "Malmöhus län"
case CountyN:
return "Hallands län"
case CountyO:
return "Göteborgs och Bohus län"
case CountyP:
return "Älvsborgs län"
case CountyR:
return "Skaraborgs län"
case CountyS:
return "Värmlands län"
case CountyQ:
return "Födda utomlands"
case CountyT:
return "Örebro län"
case CountyU:
return "Västmanlands län"
case CountyW:
return "Kopparbergs län"
case CountyX:
return "Gävleborgs län"
case CountyY:
return "Västernorrlands län"
case CountyZ:
return "Jämtlands län"
case CountyAC:
return "Västerbottens län"
case CountyBD:
return "Norrbottens län"
case CountyQQ:
return "Outside Sweden or non Swedish citizen"
case CountyUnknown:
return "Unknown"
}
return ""
}
// CountyFromSerial will calculate the appropriate county based on a serial
// number. The source for these values may be found here:
// https://sv.wikipedia.org/wiki/Personnummer_i_Sverige#F%C3%B6delsenumret
func CountyFromSerial(serial int) (County, error) {
switch s := serial; {
case s < 139:
return CountyA, nil
case s < 159:
return CountyC, nil
case s < 189:
return CountyD, nil
case s < 239:
return CountyE, nil
case s < 269:
return CountyF, nil
case s < 289:
return CountyG, nil
case s < 319:
return CountyH, nil
case s < 329:
return CountyI, nil
case s < 349:
return CountyK, nil
case s < 389:
return CountyL, nil
case s < 459:
return CountyM, nil
case s < 479:
return CountyN, nil
case s < 549:
return CountyO, nil
case s < 589:
return CountyP, nil
case s < 619:
return CountyR, nil
case s < 649:
return CountyS, nil
case s < 659:
return CountyQ, nil
case s < 689:
return CountyT, nil
case s < 709:
return CountyU, nil
case s < 739:
return CountyW, nil
case s < 779:
return CountyX, nil
case s < 819:
return CountyY, nil
case s < 849:
return CountyZ, nil
case s < 889:
return CountyAC, nil
case s < 929:
return CountyBD, nil
case s < 999:
return CountyQQ, nil
}
return County(-1), errors.New("invalid serial")
} | counties.go | 0.742888 | 0.53868 | counties.go | starcoder |
package cmd
import (
"sort"
"strconv"
"strings"
"testing"
"github.com/gomodule/redigo/redis"
"github.com/stretchr/testify/assert"
)
//ExampleList the key command
//memberScores record the key and value of the operation
type ExampleZSet struct {
memberScores map[string]map[string]float64
conn redis.Conn
}
//NewExampleList create list object
func NewExampleZSet(conn redis.Conn) *ExampleZSet {
return &ExampleZSet{
conn: conn,
memberScores: make(map[string]map[string]float64),
}
}
func (ez *ExampleZSet) ZAddEqual(t *testing.T, key string, values ...string) {
msmap, ok := ez.memberScores[key]
if !ok {
ez.memberScores[key] = make(map[string]float64)
msmap = ez.memberScores[key]
}
oldLen := len(msmap)
req := make([]interface{}, 0, len(values))
req = append(req, key)
for i := range values {
req = append(req, values[i])
}
if len(values)%2 != 0 {
reply, err := redis.Int(ez.conn.Do("zadd", req...))
assert.Equal(t, oldLen, len(msmap))
assert.Nil(t, reply)
assert.NotNil(t, err)
return
}
uniq_members := make(map[string]bool)
for i := range values {
if i%2 == 0 {
if _, ok := uniq_members[values[i+1]]; ok {
continue
}
fscore, err := strconv.ParseFloat(values[i], 64)
if err != nil {
reply, err := redis.Int(ez.conn.Do("zadd", req...))
assert.Equal(t, oldLen, len(msmap))
assert.Nil(t, reply)
assert.NotNil(t, err)
return
}
msmap[values[i+1]] = fscore
uniq_members[values[i+1]] = true
}
}
reply, err := redis.Int(ez.conn.Do("zadd", req...))
t.Logf("reply :%v, %v, %v", reply, err, req)
assert.Equal(t, len(msmap)-oldLen, reply)
assert.Nil(t, err)
}
func (ez *ExampleZSet) ZAddEqualErr(t *testing.T, errValue string, args ...interface{}) {
_, err := ez.conn.Do("zadd", args...)
assert.EqualError(t, err, errValue)
}
func (ez *ExampleZSet) ZRemEqual(t *testing.T, key string, members ...string) {
req := make([]interface{}, 0, len(members))
req = append(req, key)
for i := range members {
req = append(req, members[i])
}
deleted := 0
msmap, ok := ez.memberScores[key]
if ok {
for _, member := range members {
if _, ok := msmap[member]; !ok {
continue
}
delete(msmap, member)
deleted += 1
}
}
reply, err := redis.Int(ez.conn.Do("zrem", req...))
assert.Equal(t, deleted, reply)
assert.Nil(t, err)
}
func (ez *ExampleZSet) ZRemEqualErr(t *testing.T, errValue string, args ...interface{}) {
_, err := ez.conn.Do("zrem", args...)
assert.EqualError(t, err, errValue)
}
func (ez *ExampleZSet) ZAnyOrderRangeEqual(t *testing.T, key string, start int, stop int, positiveOrder bool, withScore bool) {
cmd := "zrange"
if !positiveOrder {
cmd = "zrevrange"
}
msmap, ok := ez.memberScores[key]
if !ok {
reply, err := redis.Strings(ez.conn.Do(cmd, key, start, stop))
assert.Equal(t, msmap, reply)
assert.Nil(t, err)
return
}
if start >= len(msmap) {
reply, err := redis.Strings(ez.conn.Do(cmd, key, start, stop))
assert.Equal(t, []string{}, reply)
assert.Nil(t, err)
return
}
tmp := getAllOutput(msmap, positiveOrder, withScore)
var reply []string
var err error
if withScore {
reply, err = redis.Strings(ez.conn.Do(cmd, key, start, stop, "WITHSCORES"))
} else {
reply, err = redis.Strings(ez.conn.Do(cmd, key, start, stop))
}
if start < 0 {
if start += len(msmap); start < 0 {
start = 0
}
}
if stop < 0 {
if stop += len(msmap); stop < 0 {
stop = 0
}
} else if stop >= len(msmap) {
stop = len(msmap) - 1
}
if withScore {
assert.Equal(t, tmp[2*start:2*stop+2], reply)
} else {
assert.Equal(t, tmp[start:stop+1], reply)
}
assert.Nil(t, err)
}
func (ez *ExampleZSet) ZRangeEqual(t *testing.T, key string, start int, stop int, withScore bool) {
ez.ZAnyOrderRangeEqual(t, key, start, stop, true, withScore)
}
func (ez *ExampleZSet) ZRangeEqualErr(t *testing.T, errValue string, args ...interface{}) {
_, err := ez.conn.Do("zrange", args...)
assert.EqualError(t, err, errValue)
}
func (ez *ExampleZSet) ZRevRangeEqual(t *testing.T, key string, start int, stop int, withScore bool) {
ez.ZAnyOrderRangeEqual(t, key, start, stop, false, withScore)
}
func (ez *ExampleZSet) ZRevRangeEqualErr(t *testing.T, errValue string, args ...interface{}) {
_, err := ez.conn.Do("zrevrange", args...)
assert.EqualError(t, err, errValue)
}
func (ez *ExampleZSet) ZRangeByScoreEqual(t *testing.T, key string, start string, stop string, withScores bool, limit string, expected string) {
ez.ZAnyOrderRangeByScoreEqual(t, key, start, stop, withScores, true, limit, expected)
}
func (ez *ExampleZSet) ZRevRangeByScoreEqual(t *testing.T, key string, start string, stop string, withScores bool, limit string, expected string) {
ez.ZAnyOrderRangeByScoreEqual(t, key, start, stop, withScores, false, limit, expected)
}
func (ez *ExampleZSet) ZAnyOrderRangeByScoreEqual(t *testing.T, key string, start string, stop string, withScores bool, positiveOrder bool, limit string, expected string) {
cmd := "zrangebyscore"
if !positiveOrder {
cmd = "zrevrangebyscore"
}
var reply []string
var err error
req := make([]interface{}, 0)
req = append(req, key)
req = append(req, start)
req = append(req, stop)
if withScores {
req = append(req, "WITHSCORES")
}
if limit != "" {
limitArgs := strings.Split(limit, " ")
for _, limitArg := range limitArgs {
req = append(req, limitArg)
}
}
reply, err = redis.Strings(ez.conn.Do(cmd, req...))
if expected != "" {
expectedStrs := strings.Split(expected, " ")
assert.Equal(t, expectedStrs, reply)
} else {
assert.Equal(t, []string{}, reply)
}
assert.Nil(t, err)
}
func (ez *ExampleZSet) ZRangeByScoreEqualErr(t *testing.T, errValue string, args ...interface{}) {
_, err := ez.conn.Do("zrangebyscore", args...)
assert.EqualError(t, err, errValue)
}
func (ez *ExampleZSet) ZRevRangeByScoreEqualErr(t *testing.T, errValue string, args ...interface{}) {
_, err := ez.conn.Do("zrevrangebyscore", args...)
assert.EqualError(t, err, errValue)
}
func (ez *ExampleZSet) ZCardEqual(t *testing.T, key string) {
reply, err := redis.Int(ez.conn.Do("zcard", key))
assert.Equal(t, len(ez.memberScores[key]), reply)
assert.Nil(t, err)
}
func (ez *ExampleZSet) ZCardEqualErr(t *testing.T, errValue string, args ...interface{}) {
_, err := ez.conn.Do("zcard", args...)
assert.EqualError(t, err, errValue)
}
func (ez *ExampleZSet) ZScoreEqual(t *testing.T, key string, member string) {
msmap, ok := ez.memberScores[key]
reply, err := redis.String(ez.conn.Do("zscore", key, member))
if !ok {
assert.Equal(t, "", reply)
assert.EqualError(t, err, "redigo: nil returned")
return
}
score, ok := msmap[member]
if !ok {
assert.Equal(t, "", reply)
assert.EqualError(t, err, "redigo: nil returned")
return
}
val := strconv.FormatFloat(score, 'f', -1, 64)
assert.Equal(t, val, reply)
assert.Nil(t, err)
}
func (ez *ExampleZSet) ZScoreEqualErr(t *testing.T, errValue string, args ...interface{}) {
_, err := ez.conn.Do("zscore", args...)
assert.EqualError(t, err, errValue)
}
func getAllOutput(msmap map[string]float64, positiveOrder bool, withScore bool) []string {
scoreMembers := make(map[float64][]string)
for member, score := range msmap {
if _, ok := scoreMembers[score]; !ok {
scoreMembers[score] = make([]string, 0, 1)
}
scoreMembers[score] = append(scoreMembers[score], member)
}
scores := make([]float64, 0)
for score, _ := range scoreMembers {
scores = append(scores, score)
}
if positiveOrder {
sort.Float64s(scores)
} else {
sort.Sort(sort.Reverse(sort.Float64Slice(scores)))
}
fullOutput := make([]string, 0, 2*len(msmap))
for _, score := range scores {
members := scoreMembers[score]
if positiveOrder {
sort.Strings(members)
} else {
sort.Sort(sort.Reverse(sort.StringSlice(members)))
}
for _, member := range members {
fullOutput = append(fullOutput, member)
if withScore {
val := strconv.FormatFloat(score, 'f', -1, 64)
fullOutput = append(fullOutput, val)
}
}
}
return fullOutput
} | tools/autotest/cmd/zset.go | 0.590897 | 0.567457 | zset.go | starcoder |
package genomisc
import (
"compress/bzip2"
"compress/gzip"
"compress/zlib"
"io"
"github.com/krolaw/zipstream"
"github.com/xi2/xz"
)
type DataType byte
const (
DataTypeInvalid DataType = iota
DataTypeNoCompression
DataTypeGzip
DataTypeZip
DataTypeXZ
DataTypeZ
DataTypeBZip2
)
var byteCodeSigs = map[DataType][]byte{
DataTypeGzip: {0x1f, 0x8b, 0x08},
DataTypeZip: {0x50, 0x4b, 0x03, 0x04},
DataTypeXZ: {0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00},
DataTypeZ: {0x1f, 0x9d},
DataTypeBZip2: {0x42, 0x5a, 0x68},
}
// DetectDataType attempts to detect the data type of a stream by checking
// against a set of known data types. Byte code signatures from
// https://stackoverflow.com/a/19127748/199475
func DetectDataType(r io.Reader) (DataType, error) {
buff := make([]byte, 6)
if _, err := r.Read(buff); err != nil {
return DataTypeInvalid, err
}
// Match known signatures
Outer:
for dt, sig := range byteCodeSigs {
for position := range sig {
if buff[position] != sig[position] {
continue Outer
}
}
return dt, nil
}
return DataTypeNoCompression, nil
}
// MaybeDecompressReadCloserFromFile detects whether a file-like object (must
// implement io.Reader, io.Seeker, and io.Closer) is compressed with GZip, Zip,
// BZip2, XZ, or Z and decompresses it. If not, it returns the file as-is. It
// uses the seek method to ensure that the reader is reset to the starting byte
// so that it does not discard bytes.
func MaybeDecompressReadCloserFromFile(f io.ReadSeekCloser) (io.ReadCloser, error) {
dt, err := DetectDataType(f)
if err != nil {
return nil, err
}
// Reset your original reader
f.Seek(0, 0)
switch dt {
case DataTypeGzip:
return gzip.NewReader(f)
case DataTypeZip:
return &readCloserFaker{zipstream.NewReader(f)}, nil
case DataTypeBZip2:
return &readCloserFaker{bzip2.NewReader(f)}, nil
case DataTypeXZ:
reader, err := xz.NewReader(f, 0)
if err != nil {
return nil, err
}
return &readCloserFaker{reader}, nil
case DataTypeZ:
return zlib.NewReader(f)
}
// No data type detected. For now, we assume this is uncompressed.
return f, nil
}
// readCloserFaker "upgrades" readers that don't need to be closed
type readCloserFaker struct {
io.Reader
}
func (c *readCloserFaker) Close() error {
return nil
} | detecreaddatatype.go | 0.624752 | 0.425546 | detecreaddatatype.go | starcoder |
package parse
import (
"fmt"
)
// TypeDef represents a user-defined named type.
type TypeDef struct {
NamePos // name assigned by the user, pos and doc
Type Type // the underlying type of the type definition.
}
// Type is an interface representing symbolic occurrences of types in VDL files.
type Type interface {
// String returns a human-readable description of the type.
String() string
// Kind returns a short human-readable string describing the kind of type.
Kind() string
// Pos returns the position of the first character in the type.
Pos() Pos
}
// TypeNamed captures named references to other types. Both built-in primitives
// and user-defined named types use this representation.
type TypeNamed struct {
Name string
P Pos
}
// TypeEnum represents enum types.
type TypeEnum struct {
Labels []NamePos
P Pos
}
// TypeArray represents array types.
type TypeArray struct {
Len int
Elem Type
P Pos
}
// TypeList represents list types.
type TypeList struct {
Elem Type
P Pos
}
// TypeSet represents set types.
type TypeSet struct {
Key Type
P Pos
}
// TypeMap represents map types.
type TypeMap struct {
Key Type
Elem Type
P Pos
}
// TypeStruct represents struct types.
type TypeStruct struct {
Fields []*Field
P Pos
}
// TypeUnion represents union types.
type TypeUnion struct {
Fields []*Field
P Pos
}
// TypeOptional represents optional types.
type TypeOptional struct {
Base Type
P Pos
}
func (t *TypeNamed) Pos() Pos { return t.P }
func (t *TypeEnum) Pos() Pos { return t.P }
func (t *TypeArray) Pos() Pos { return t.P }
func (t *TypeList) Pos() Pos { return t.P }
func (t *TypeSet) Pos() Pos { return t.P }
func (t *TypeMap) Pos() Pos { return t.P }
func (t *TypeStruct) Pos() Pos { return t.P }
func (t *TypeUnion) Pos() Pos { return t.P }
func (t *TypeOptional) Pos() Pos { return t.P }
func (t *TypeNamed) Kind() string { return "named" }
func (t *TypeEnum) Kind() string { return "enum" }
func (t *TypeArray) Kind() string { return "array" }
func (t *TypeList) Kind() string { return "list" }
func (t *TypeSet) Kind() string { return "set" }
func (t *TypeMap) Kind() string { return "map" }
func (t *TypeStruct) Kind() string { return "struct" }
func (t *TypeUnion) Kind() string { return "union" }
func (t *TypeOptional) Kind() string { return "optional" }
func (t *TypeNamed) String() string { return t.Name }
func (t *TypeEnum) String() string {
result := "enum{"
for index, label := range t.Labels {
if index > 0 {
result += ";"
}
result += label.Name
}
return result + "}"
}
func (t *TypeArray) String() string { return fmt.Sprintf("[%v]%v", t.Len, t.Elem) }
func (t *TypeList) String() string { return fmt.Sprintf("[]%v", t.Elem) }
func (t *TypeSet) String() string { return fmt.Sprintf("set[%v]", t.Key) }
func (t *TypeMap) String() string { return fmt.Sprintf("map[%v]%v", t.Key, t.Elem) }
func (t *TypeStruct) String() string {
result := "struct{"
for index, field := range t.Fields {
if index > 0 {
result += ";"
}
result += field.Name + " " + field.Type.String()
}
return result + "}"
}
func (t *TypeUnion) String() string {
result := "union{"
for index, field := range t.Fields {
if index > 0 {
result += ";"
}
result += field.Name + " " + field.Type.String()
}
return result + "}"
}
func (t *TypeOptional) String() string { return fmt.Sprintf("?%v", t.Base) }
func (t *TypeDef) String() string {
return fmt.Sprintf("(%v %v %v)", t.Pos, t.Name, t.Type)
} | x/ref/lib/vdl/parse/type.go | 0.671901 | 0.451447 | type.go | starcoder |
// Package avl implements an AVL tree.
package avl
// Item represents a value in the tree.
type Item interface {
// Less compares whether the current item is less than the given Item.
Less(than Item) bool
}
// Int implements the Item interface for int.
type Int int
// Less returns true if int(a) < int(b).
func (a Int) Less(b Item) bool {
return a < b.(Int)
}
// String implements the Item interface for string.
type String string
// Less returns true if string(a) < string(b).
func (a String) Less(b Item) bool {
return a < b.(String)
}
// New returns a new AVL tree.
func New() *Tree {
return &Tree{}
}
// Tree represents an AVL tree.
type Tree struct {
length int
root *Node
}
// Length returns the number of items currently in the AVL tree.
func (t *Tree) Length() int {
return t.length
}
// Root returns the root node of the AVL tree.
func (t *Tree) Root() *Node {
return t.root
}
// Max returns the max node of the AVL tree.
func (t *Tree) Max() *Node {
return t.root.Max()
}
// Min returns the min node of the AVL tree.
func (t *Tree) Min() *Node {
return t.root.Min()
}
// Search searches the Item of the AVL tree.
func (t *Tree) Search(item Item) Item {
return t.search(item).Item()
}
// SearchNode searches the node of the AVL tree with the item.
func (t *Tree) SearchNode(item Item) *Node {
return t.search(item)
}
func (t *Tree) search(item Item) *Node {
n := t.root
for n != nil {
if item.Less(n.item) {
n = n.left
} else if n.item.Less(item) {
n = n.right
} else {
return n
}
}
return nil
}
// Insert inserts the item into the AVL tree.
func (t *Tree) Insert(item Item) {
var ok bool
t.root, ok = t.root.insert(item)
if ok {
t.length++
}
}
// Clear removes all items from the AVL tree.
func (t *Tree) Clear() {
t.root = nil
t.length = 0
}
// Delete deletes the node of the AVL tree with the item.
func (t *Tree) Delete(item Item) {
var ok bool
t.root, ok = t.root.delete(item)
if ok {
t.length--
}
}
// Node represents a node in the AVL tree.
type Node struct {
height int
left *Node
right *Node
parent *Node
item Item
}
// Height returns the height of this node's sub-tree.
func (n *Node) Height() int {
if n == nil {
return -1
}
return n.height
}
// Left returns the left child node.
func (n *Node) Left() *Node {
if n == nil {
return nil
}
return n.left
}
// Right returns the right child node.
func (n *Node) Right() *Node {
if n == nil {
return nil
}
return n.right
}
// Parent returns the parent node.
func (n *Node) Parent() *Node {
if n == nil {
return nil
}
return n.parent
}
// Item returns the item of this node.
func (n *Node) Item() Item {
if n == nil {
return nil
}
return n.item
}
// Max returns the max node of this node's subtree.
func (n *Node) Max() *Node {
if n == nil {
return nil
}
for n.right != nil {
return n.right.Max()
}
return n
}
// Min returns the min node of this node's subtree.
func (n *Node) Min() *Node {
if n == nil {
return nil
}
for n.left != nil {
return n.left.Min()
}
return n
}
// Last returns the last node less than this node.
func (n *Node) Last() *Node {
if n == nil {
return nil
}
if n.left != nil {
return n.left.Max()
}
left := n
p := left.parent
for p != nil && left == p.left {
left = p
p = left.parent
}
return p
}
// Next returns the next node more than this node.
func (n *Node) Next() *Node {
if n == nil {
return nil
}
if n.right != nil {
return n.right.Min()
}
right := n
p := right.parent
for p != nil && right == p.right {
right = p
p = right.parent
}
return p
}
func (n *Node) insert(item Item) (root *Node, ok bool) {
if n == nil {
return &Node{item: item}, true
}
if item.Less(n.item) {
n.left, ok = n.left.insert(item)
if n.left.Height() == 0 {
n.left.parent = n
}
} else if n.item.Less(item) {
n.right, ok = n.right.insert(item)
if n.right.Height() == 0 {
n.right.parent = n
}
} else {
n.item = item
}
return n.rebalance(), ok
}
func (n *Node) delete(item Item) (root *Node, ok bool) {
if n == nil {
return nil, false
}
if item.Less(n.item) {
n.left, ok = n.left.delete(item)
return n.rebalance(), ok
} else if n.item.Less(item) {
n.right, ok = n.right.delete(item)
return n.rebalance(), ok
} else {
if n.left == nil && n.right == nil {
return nil, true
}
p := n.parent
if n.right == nil {
n.left.parent = p
if p != nil {
if n == p.left {
p.left = n.left
} else {
p.right = n.left
}
}
return n.left, true
}
if n.left == nil {
n.right.parent = p
if p != nil {
if n == p.left {
p.left = n.right
} else {
p.right = n.right
}
}
return n.right, true
}
var min *Node
min, n.right = n.right.deleteMin()
n.item = min.item
return n.rebalance(), true
}
}
func (n *Node) deleteMin() (min *Node, parent *Node) {
if n.left != nil {
min, n.left = n.left.deleteMin()
return min, n.rebalance()
}
if n.right != nil {
n.right.parent = n.parent
}
return n, n.right
}
func (n *Node) rebalance() *Node {
n.updateHeight()
balanceFactor := n.balanceFactor()
if balanceFactor > 1 {
if n.right.balanceFactor() < 0 {
n.right = n.right.rotateRight()
}
return n.rotateLeft()
} else if balanceFactor < -1 {
if n.left.balanceFactor() > 0 {
n.left = n.left.rotateLeft()
}
return n.rotateRight()
}
return n
}
func (n *Node) updateHeight() {
n.height = max(n.left.Height(), n.right.Height()) + 1
}
func (n *Node) balanceFactor() int {
if n == nil {
return 0
}
return n.right.Height() - n.left.Height()
}
func (n *Node) rotateLeft() *Node {
newParent := n.right
n.right = newParent.left
if newParent.left != nil {
newParent.left.parent = n
}
p := n.parent
if p != nil {
if n == p.left {
p.left = newParent
} else {
p.right = newParent
}
}
newParent.parent = p
n.parent = newParent
newParent.left = n
n.updateHeight()
newParent.updateHeight()
return newParent
}
func (n *Node) rotateRight() *Node {
newParent := n.left
n.left = newParent.right
if newParent.right != nil {
newParent.right.parent = n
}
p := n.parent
if p != nil {
if n == p.left {
p.left = newParent
} else {
p.right = newParent
}
}
newParent.parent = p
n.parent = newParent
newParent.right = n
n.updateHeight()
newParent.updateHeight()
return newParent
}
func max(a, b int) int {
if a > b {
return a
}
return b
} | avl.go | 0.893988 | 0.527073 | avl.go | starcoder |
package dataset
import (
"fmt"
"io/ioutil"
"path"
"regexp"
"strings"
"github.com/araddon/dateparse"
"github.com/pkg/errors"
"github.com/uncharted-distil/distil-compute/model"
"github.com/uncharted-distil/distil-compute/primitive/compute"
"github.com/uncharted-distil/gdal"
log "github.com/unchartedsoftware/plog"
"github.com/uncharted-distil/distil/api/env"
"github.com/uncharted-distil/distil/api/serialization"
"github.com/uncharted-distil/distil/api/util"
"github.com/uncharted-distil/distil/api/util/imagery"
)
const (
errorLogLimit = 50
)
var (
satTypeMap = map[string]string{
"tif": "tiff",
"tiff": "tiff",
}
satTypeContentMap = map[string][]string{
"tiff": {"tif", "tiff"},
}
bandRegex = regexp.MustCompile(`_B[0-9][0-9a-zA-Z][.]`)
timestampRegex = regexp.MustCompile(`\d{8}T\d{4,6}`)
// eurosat drops cloud layer, has the 8A layer and offsets everything else.
eurosatBandMapping = map[int]string{
10: "",
13: "8A",
}
)
// RemoteSensingDatasetProperties lists the data properties of a remote sensing dataset.
type RemoteSensingDatasetProperties struct {
MultiClass bool
MultiTimestamp bool
}
// Satellite captures the data in a satellite (remote sensing) dataset.
type Satellite struct {
Dataset string `json:"dataset"`
ImageType string `json:"imageType"`
RawFilePath string `json:"rawFilePath"`
ExtractedFilePath string `json:"extractedFilePath"`
definitiveTypes []*model.Variable
}
// BoundingBox is a box delineated by four corners.
type BoundingBox struct {
UpperLeft *Point
UpperRight *Point
LowerLeft *Point
LowerRight *Point
}
// Point represents a coordinate in 2d space.
type Point struct {
X float64
Y float64
}
// ToString writes out the bounding box to a string.
func (b *BoundingBox) String() string {
coords := []string{
b.pointToString(b.LowerLeft, ","),
b.pointToString(b.UpperLeft, ","),
b.pointToString(b.UpperRight, ","),
b.pointToString(b.LowerRight, ","),
}
return strings.Join(coords, ",")
}
// ToGeometryString writes out the bounding box to a geometry string (POSTGIS).
func (b *BoundingBox) ToGeometryString() string {
coords := []string{
b.pointToString(b.LowerLeft, " "),
b.pointToString(b.UpperLeft, " "),
b.pointToString(b.UpperRight, " "),
b.pointToString(b.LowerRight, " "),
b.pointToString(b.LowerLeft, " "),
}
return fmt.Sprintf("POLYGON((%s))", strings.Join(coords, ","))
}
func (b *BoundingBox) pointToString(point *Point, separator string) string {
if point != nil {
return fmt.Sprintf("%f%s%f", point.X, separator, point.Y)
}
return separator
}
// NewSatelliteDataset creates a new satelitte dataset from geotiff files
func NewSatelliteDataset(dataset string, imageType string, rawFilePath string) (*Satellite, error) {
expandedInfo, err := ExpandZipDataset(rawFilePath, dataset)
if err != nil {
return nil, err
}
return &Satellite{
Dataset: dataset,
ImageType: imageType,
RawFilePath: expandedInfo.RawFilePath,
ExtractedFilePath: expandedInfo.ExtractedFilePath,
}, nil
}
// NewSatelliteDatasetFromExpanded creates a new satelitte dataset from geotiff files where the archive has already been expanded.
func NewSatelliteDatasetFromExpanded(dataset string, imageType string, rawFilePath string, extractedFilePath string) (*Satellite, error) {
return &Satellite{
Dataset: dataset,
ImageType: imageType,
RawFilePath: rawFilePath,
ExtractedFilePath: extractedFilePath,
}, nil
}
// CreateDataset processes the raw satellite dataset and creates a raw D3M dataset.
func (s *Satellite) CreateDataset(rootDataPath string, datasetName string, config *env.Config) (*serialization.RawDataset, error) {
if datasetName == "" {
datasetName = s.Dataset
}
errorLogCount := 0
outputDatasetPath := rootDataPath
dataFilePath := path.Join(outputDatasetPath, compute.D3MDataFolder, compute.D3MLearningData)
imageFolders, err := getLabelFolders(s.ExtractedFilePath)
if err != nil {
return nil, err
}
props := s.readProperties(imageFolders)
labelHeader := "label"
expectedHeaders := []string{model.D3MIndexFieldName, "image_file", "group_id", "band", "timestamp", "coordinates", "geo_coordinates"}
if props.MultiClass {
expectedHeaders = append(expectedHeaders, labelHeader)
}
headerNames := append([]string{}, expectedHeaders...)
csvData := make([][]string, 0)
csvData = append(csvData, headerNames)
mediaFolder := util.GetUniqueFolder(path.Join(outputDatasetPath, "media"))
// need to keep track of d3m Index values since they are shared for a whole group
d3mIDs := make(map[string]int)
d3mIDRunning := 1
// the folder name represents the label to apply for all containing images
errorCount := 0
timestampType := model.DateTimeType
indicesToKeep := getIndicesToKeep(expectedHeaders, headerNames)
for _, imageFolder := range imageFolders {
log.Infof("processing satellite image folder '%s'", imageFolder)
label := path.Base(imageFolder)
imageFiles, err := ioutil.ReadDir(imageFolder)
if err != nil {
return nil, err
}
// copy images while building the csv data
log.Infof("building csv data")
for _, imageFile := range imageFiles {
imageFilename := imageFile.Name()
imageFilenameFull := path.Join(imageFolder, imageFilename)
ok := verifySatelliteImage(imageFilenameFull, s.ImageType)
if !ok {
logWarning(errorCount, "'%s' is not a valid or supported satellite image", imageFilenameFull)
errorCount++
continue
}
filesToProcess, err := copyAndSplitMultiBandImage(imageFilenameFull, s.ImageType, mediaFolder)
if err != nil {
errorLogCount++
if errorLogCount < 5 {
log.Warn(err)
}
continue
}
for _, targetImageFilename := range filesToProcess {
coordinates, err := extractCoordinates(targetImageFilename)
if err != nil {
logWarning(errorCount, "unable to extract coordinates from '%s': %v", targetImageFilename, err)
errorCount++
continue
}
band, err := extractBand(targetImageFilename)
if err != nil {
logWarning(errorCount, "unable to extract band from '%s': %v", targetImageFilename, err)
errorCount++
continue
}
timestamp, err := extractTimestamp(targetImageFilename)
if err != nil {
logWarning(errorCount, "unable to extract timestamp from '%s': %v", targetImageFilename, err)
errorCount++
timestampType = model.StringType
}
groupID := extractGroupID(targetImageFilename, props)
d3mID := d3mIDs[groupID]
if d3mID == 0 {
d3mID = d3mIDRunning
d3mIDRunning = d3mIDRunning + 1
d3mIDs[groupID] = d3mID
}
// remove values that are not needed based on the headerNames (expects values, expectedHeaders and headerNames to be IN ORDER)
csvLine := removeMissingValues(indicesToKeep, []string{fmt.Sprintf("%d", d3mID), path.Base(targetImageFilename), groupID, band, timestamp, coordinates.String(), coordinates.ToGeometryString(), label})
csvData = append(csvData, csvLine)
}
}
}
log.Infof("parsed all input data creating %d rows of data and %d errors", len(csvData)-1, errorCount)
// create the dataset schema doc
datasetID := model.NormalizeDatasetID(datasetName)
meta := model.NewMetadata(datasetName, datasetName, "", datasetID)
dr := model.NewDataResource(compute.DefaultResourceID, model.ResTypeTable, map[string][]string{compute.D3MResourceFormat: {"csv"}})
dr.ResPath = dataFilePath
varCounter := 0
dr.Variables = append(dr.Variables,
model.NewVariable(varCounter, model.D3MIndexFieldName, model.D3MIndexFieldName, model.D3MIndexFieldName,
model.D3MIndexFieldName, model.IntegerType, model.IntegerType, "D3M index",
[]string{model.RoleMultiIndex}, []string{model.VarDistilRoleIndex}, nil, dr.Variables, false),
)
varCounter++
dr.Variables = append(dr.Variables,
model.NewVariable(varCounter, "image_file", "image_file", "image_file", "image_file", model.MultiBandImageType,
model.StringType, "Reference to image file", []string{"attribute"},
[]string{model.VarDistilRoleData}, map[string]interface{}{"resID": "0", "resObject": "item"}, dr.Variables, false))
varCounter++
dr.Variables = append(dr.Variables,
model.NewVariable(varCounter, "group_id", "group_id", "group_id", "group_id", model.StringType,
model.StringType, "ID linking all bands of a particular image set together", []string{"attribute", "suggestedGroupingKey"},
[]string{model.VarDistilRoleGrouping}, nil, dr.Variables, false))
varCounter++
dr.Variables = append(dr.Variables,
model.NewVariable(varCounter, "band", "band", "band", "band", model.StringType,
model.StringType, "Image band", []string{"attribute"},
[]string{model.VarDistilRoleData}, nil, dr.Variables, false))
varCounter++
dr.Variables = append(dr.Variables,
model.NewVariable(varCounter, "timestamp", "timestamp", "timestamp", "timestamp", timestampType,
model.StringType, "Image timestamp", []string{"attribute"},
[]string{model.VarDistilRoleData, model.VarDistilRoleGroupingSupplemental}, nil, dr.Variables, false))
varCounter++
dr.Variables = append(dr.Variables,
model.NewVariable(varCounter, "coordinates", "coordinates", "coordinates", "coordinates", model.RealVectorType,
model.RealVectorType, "Coordinates of the image defined by a bounding box", []string{"attribute"},
[]string{model.VarDistilRoleData}, nil, dr.Variables, false))
varCounter++
dr.Variables = append(dr.Variables,
model.NewVariable(varCounter, "__geo_coordinates", "coordinates", "geo_coordinates", "__geo_coordinates", model.GeoBoundsType,
model.GeoBoundsType, "postgis structure for the bounding box coordinates of the tile", []string{},
[]string{model.VarDistilRoleData}, nil, dr.Variables, false))
varCounter++
if len(expectedHeaders) == len(headerNames) {
dr.Variables = append(dr.Variables,
model.NewVariable(varCounter, "label", "label", "label", "label", model.CategoricalType,
model.StringType, "Label of the image", []string{"suggestedTarget"},
[]string{model.VarDistilRoleData}, nil, dr.Variables, false))
}
// create the data resource for the referenced images
imageTypeLookup := satTypeMap[s.ImageType]
refDR := model.NewDataResource("0", model.ResTypeImage, map[string][]string{fmt.Sprintf("image/%s", imageTypeLookup): satTypeContentMap[imageTypeLookup]})
refDR.ResPath = mediaFolder
refDR.IsCollection = true
meta.DataResources = []*model.DataResource{refDR, dr}
s.definitiveTypes = dr.Variables
return &serialization.RawDataset{
ID: datasetID,
Name: datasetName,
Data: csvData,
Metadata: meta,
DefinitiveTypes: true,
}, nil
}
func (s *Satellite) readProperties(imageFolders []string) *RemoteSensingDatasetProperties {
// cycle through folders to determine the dataset properties
props := &RemoteSensingDatasetProperties{
MultiClass: len(imageFolders) > 1,
}
// cycle through image names to determine if there are at least 2 timestamps
firstTimestamp := ""
for _, imageFolder := range imageFolders {
imageFiles, err := ioutil.ReadDir(imageFolder)
if err != nil {
break
}
for _, imageFile := range imageFiles {
timestamp, _ := extractTimestamp(imageFile.Name())
if firstTimestamp == "" {
firstTimestamp = timestamp
} else if firstTimestamp != timestamp {
props.MultiTimestamp = true
break
}
}
}
return props
}
// GetDefinitiveTypes returns an empty list as definitive types.
func (s *Satellite) GetDefinitiveTypes() []*model.Variable {
return s.definitiveTypes
}
// CleanupTempFiles does nothing since this creates no temp files.
func (s *Satellite) CleanupTempFiles() {
if !util.IsInDirectory(env.GetPublicPath(), s.ExtractedFilePath) {
util.Delete(s.ExtractedFilePath)
}
}
// removeValues removes values not needed based on supplied headernames
func removeMissingValues(indices []int, values []string) []string {
result := []string{}
// build the value string based on the actual headers that exist
for _, idx := range indices {
result = append(result, values[idx])
}
return result
}
func getIndicesToKeep(expectedHeaders []string, headers []string) []int {
result := []int{}
headerMap := map[string]int{}
// build map
for i, header := range headers {
headerMap[header] = i
}
// check what is missing and append indices
for i, header := range expectedHeaders {
if _, ok := headerMap[header]; ok {
result = append(result, i)
}
}
return result
}
func verifySatelliteImage(filename string, defaultType string) bool {
typ := path.Ext(filename)
if len(typ) > 0 {
typ = typ[1:]
} else {
typ = defaultType
}
return satTypeMap[typ] != ""
}
func extractBand(filename string) (string, error) {
bandRaw := bandRegex.Find([]byte(filename))
if len(bandRaw) > 0 {
band := string(bandRaw)
return strings.ToLower(band[2 : len(band)-1]), nil
}
return "", errors.New("unable to extract band from filename")
}
func extractTimestamp(filename string) (string, error) {
timestampRaw := timestampRegex.Find([]byte(filename))
if len(timestampRaw) == 0 {
return "", errors.New("unable to extract band from filename")
}
parsed, err := dateparse.ParseAny(strings.Replace(string(timestampRaw), "T", "", -1))
if err != nil {
return "", errors.Wrapf(err, "unable to parse timestamp")
}
return parsed.Format("2006-01-02 03:04:05"), nil
}
func extractGroupID(filename string, props *RemoteSensingDatasetProperties) string {
bandRaw := bandRegex.Find([]byte(filename))
adjustedFilename := path.Base(filename)
if len(bandRaw) > 0 {
adjustedFilename = strings.Replace(adjustedFilename, string(bandRaw), ".", 1)
}
// remove the timestamp from the group id if there is only one timestamp.
if !props.MultiTimestamp {
timestampRaw := timestampRegex.Find([]byte(adjustedFilename))
if len(timestampRaw) > 0 {
timestampString := fmt.Sprintf("_%s", string(timestampRaw))
adjustedFilename = strings.Replace(adjustedFilename, timestampString, "", 1)
}
}
adjustedFilename = strings.TrimSuffix(adjustedFilename, path.Ext(adjustedFilename))
return adjustedFilename
}
func extractCoordinates(filename string) (*BoundingBox, error) {
ds, err := gdal.OpenEx(filename, gdal.OFReadOnly, []string{"GTIFF"}, nil, nil)
if err != nil {
return nil, errors.Wrap(err, "unable to open geotiff file")
}
defer ds.Close()
width := float64(ds.RasterXSize())
height := float64(ds.RasterYSize())
gt := ds.GeoTransform()
minX := gt[0]
minY := gt[3] + width*gt[4] + height*gt[5]
maxX := gt[0] + width*gt[1] + height*gt[2]
maxY := gt[3]
source := gdal.CreateSpatialReference("")
err = source.FromWKT(ds.Projection())
if err != nil {
return nil, errors.Wrap(err, "unable to create source spatial reference from projection")
}
target := gdal.CreateSpatialReference("")
err = target.FromEPSG(4326)
if err != nil {
return nil, errors.Wrap(err, "unable to create source spatial reference EPSG code")
}
pointsX := []float64{minX, maxX}
pointsY := []float64{minY, maxY}
transform := gdal.CreateCoordinateTransform(source, target)
success := transform.Transform(len(pointsX), pointsX, pointsY, []float64{0, 0})
if !success {
return nil, errors.New("unable to transform points")
}
defer transform.Destroy()
return &BoundingBox{
LowerLeft: &Point{
X: pointsX[0],
Y: pointsY[0],
},
UpperLeft: &Point{
X: pointsX[0],
Y: pointsY[1],
},
UpperRight: &Point{
X: pointsX[1],
Y: pointsY[1],
},
LowerRight: &Point{
X: pointsX[1],
Y: pointsY[0],
},
}, nil
}
func logWarning(currentCount int, warning string, params ...interface{}) {
if currentCount < errorLogLimit {
log.Warnf(warning, params...)
} else if currentCount == errorLogLimit {
log.Warnf("reached error log limit (%d) so no further parsing errors will be logged", errorLogLimit)
}
}
func copyAndSplitMultiBandImage(imageFilename string, imageType string, outputFolder string) ([]string, error) {
files := make([]string, 0)
// open file
dataset, err := gdal.Open(imageFilename, gdal.ReadOnly)
if err != nil {
return nil, errors.Wrapf(err, "unable to load geotiff")
}
defer dataset.Close()
// check number of bands
bandCount := dataset.RasterCount()
if bandCount == 1 {
// only one band means a simple copy of the file
targetImageFilename := path.Base(imageFilename)
extension := path.Ext(targetImageFilename)
if extension != fmt.Sprintf(".%s", imageType) {
targetImageFilename = fmt.Sprintf("%s.%s", strings.TrimSuffix(targetImageFilename, extension), imageType)
}
targetImageFilename = util.GetUniqueName(path.Join(outputFolder, targetImageFilename))
err := util.CopyFile(imageFilename, targetImageFilename)
if err != nil {
return nil, errors.Wrap(err, "unable to copy image file")
}
files = append(files, targetImageFilename)
} else {
// multiband so need to split it into separate files
files, err = imagery.SplitMultiBandImage(dataset, outputFolder, eurosatBandMapping)
if err != nil {
return nil, err
}
}
return files, nil
}
// CreateSatelliteGrouping dumps the satellite grouping structure into a map.
// It assumes that the dataset has the same structure as during upload.
func CreateSatelliteGrouping() map[string]interface{} {
// assume dataset structure matches what would be created during ingest
grouping := map[string]interface{}{}
grouping["bandCol"] = "band"
grouping["idCol"] = "group_id"
grouping["imageCol"] = "image_file"
grouping["type"] = model.MultiBandImageType
grouping["hidden"] = []string{"image_file", "band", "group_id"}
return grouping
}
// CreateGeoBoundsGrouping dumps the geobounds grouping structure into a map.
// It assumes that the dataset has the same structure as during upload.
func CreateGeoBoundsGrouping() map[string]interface{} {
grouping := map[string]interface{}{}
grouping["type"] = model.GeoBoundsType
grouping["coordinatesCol"] = "coordinates"
grouping["polygonCol"] = "__geo_coordinates"
grouping["hidden"] = []string{"coordinates", "__geo_coordinates"}
return grouping
}
// GetSatelliteIndexFields lists the fields to index for satellite datasets.
func GetSatelliteIndexFields() []string {
return []string{"band", "__geo_coordinates"}
} | api/dataset/satellite.go | 0.655777 | 0.463141 | satellite.go | starcoder |
package jsonlogic
import (
"fmt"
"math"
)
// AddOpLessThan adds "<" operation to the JSONLogic instance. Param restriction:
// - At least two params.
// - Must be evaluated to json primitives.
// - If comparing numerics, then params must be able to convert to numeric. (See ToNumeric)
func AddOpLessThan(jl *JSONLogic) {
jl.AddOperation(string(LT), opCompare(LT))
}
// AddOpLessEqual adds "<=" operation to the JSONLogic instance. Param restriction: the same as "<".
func AddOpLessEqual(jl *JSONLogic) {
jl.AddOperation(string(LE), opCompare(LE))
}
// AddOpGreaterThan adds ">" operation to the JSONLogic instance. Param restriction: the same as "<".
func AddOpGreaterThan(jl *JSONLogic) {
jl.AddOperation(string(GT), opCompare(GT))
}
// AddOpGreaterEqual adds ">=" operation to the JSONLogic instance. Param restriction: the same as "<".
func AddOpGreaterEqual(jl *JSONLogic) {
jl.AddOperation(string(GE), opCompare(GE))
}
// ref:
// - https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Less_than
func opCompare(symbol CompSymbol) Operation {
return func(apply Applier, params []interface{}, data interface{}) (res interface{}, err error) {
if len(params) < 2 {
return nil, fmt.Errorf("%s: expect at least two params", symbol)
}
params, err = ApplyParams(apply, params, data)
if err != nil {
return
}
r0, err := CompareValues(symbol, params[0], params[1])
if err != nil {
return nil, fmt.Errorf("%s: %s", symbol, err.Error())
}
var r1 = true
if len(params) > 2 {
r1, err = CompareValues(symbol, params[1], params[2])
if err != nil {
return nil, fmt.Errorf("%s: %s", symbol, err.Error())
}
}
return r0 && r1, nil
}
}
// AddOpMin adds "min" operation to the JSONLogic instance. Param restriction:
// - Must be evaluated to json primitives that can convert to numeric.
func AddOpMin(jl *JSONLogic) {
jl.AddOperation("min", opMin)
}
func opMin(apply Applier, params []interface{}, data interface{}) (res interface{}, err error) {
for _, param := range params {
r, err := apply(param, data)
if err != nil {
return nil, err
}
n, err := ToNumeric(r)
if err != nil {
return nil, err
}
if res == nil || res.(float64) > n {
res = n
}
}
return
}
// AddOpMax adds "max" operation to the JSONLogic instance. Param restriction: the same as "and".
func AddOpMax(jl *JSONLogic) {
jl.AddOperation("max", opMax)
}
func opMax(apply Applier, params []interface{}, data interface{}) (res interface{}, err error) {
for _, param := range params {
r, err := apply(param, data)
if err != nil {
return nil, err
}
n, err := ToNumeric(r)
if err != nil {
return nil, err
}
if res == nil || res.(float64) < n {
res = n
}
}
return
}
// AddOpAdd adds "+" operation to the JSONLogic instance. Param restriction:
// - Must be evaluated to json primitives that can convert to numeric.
func AddOpAdd(jl *JSONLogic) {
jl.AddOperation("+", opAdd)
}
func opAdd(apply Applier, params []interface{}, data interface{}) (res interface{}, err error) {
sum := float64(0)
for _, param := range params {
r, err := apply(param, data)
if err != nil {
return nil, err
}
n, err := ToNumeric(r)
if err != nil {
return nil, err
}
sum += n
}
if math.IsInf(sum, 0) {
return nil, fmt.Errorf("+: got -Inf/+Inf result")
}
return sum, nil
}
// AddOpMul adds "*" operation to the JSONLogic instance. Param restriction:
// - At least one param.
// - Must be evaluated to json primitives that can convert to numeric.
func AddOpMul(jl *JSONLogic) {
jl.AddOperation("*", opMul)
}
func opMul(apply Applier, params []interface{}, data interface{}) (res interface{}, err error) {
if len(params) < 1 {
return nil, fmt.Errorf("*: expect at least one param")
}
prod := float64(1)
for _, param := range params {
r, err := apply(param, data)
if err != nil {
return nil, err
}
n, err := ToNumeric(r)
if err != nil {
return nil, err
}
prod *= n
}
if math.IsInf(prod, 0) {
return nil, fmt.Errorf("*: got -Inf/+Inf result")
}
return prod, nil
}
// AddOpMinus adds "-" operation to the JSONLogic instance. Param restriction:
// - At least one param.
// - Must be evaluated to json primitives that can convert to numeric.
func AddOpMinus(jl *JSONLogic) {
jl.AddOperation("-", opMinus)
}
func opMinus(apply Applier, params []interface{}, data interface{}) (res interface{}, err error) {
switch len(params) {
case 0:
return nil, fmt.Errorf("-: expect at least one param")
case 1:
r, err := apply(params[0], data)
if err != nil {
return nil, err
}
n, err := ToNumeric(r)
if err != nil {
return nil, err
}
return -n, nil
default:
params, err := ApplyParams(apply, params, data)
if err != nil {
return nil, err
}
left, err := ToNumeric(params[0])
if err != nil {
return nil, err
}
right, err := ToNumeric(params[1])
if err != nil {
return nil, err
}
r := left - right
if math.IsInf(r, 0) {
return nil, fmt.Errorf("-: got -Inf/+Inf result")
}
return r, nil
}
}
// AddOpDiv adds "/" operation to the JSONLogic instance. Param restriction:
// - At least two params.
// - Must be evaluated to json primitives that can convert to numeric.
func AddOpDiv(jl *JSONLogic) {
jl.AddOperation("/", opDiv)
}
func opDiv(apply Applier, params []interface{}, data interface{}) (res interface{}, err error) {
if len(params) < 2 {
return nil, fmt.Errorf("/: expect at least two params")
}
params, err = ApplyParams(apply, params, data)
if err != nil {
return
}
left, err := ToNumeric(params[0])
if err != nil {
return nil, err
}
right, err := ToNumeric(params[1])
if err != nil {
return nil, err
}
r := left / right
if math.IsInf(r, 0) {
return nil, fmt.Errorf("/: got -Inf/+Inf result")
}
return r, nil
}
// AddOpMod adds "%" operation to the JSONLogic instance. Param restriction:
// - At least two params.
// - Must be evaluated to json primitives that can convert to numeric.
func AddOpMod(jl *JSONLogic) {
jl.AddOperation("%", opMod)
}
func opMod(apply Applier, params []interface{}, data interface{}) (res interface{}, err error) {
if len(params) < 2 {
return nil, fmt.Errorf("%%: expect at least two params")
}
params, err = ApplyParams(apply, params, data)
if err != nil {
return
}
left, err := ToNumeric(params[0])
if err != nil {
return nil, err
}
right, err := ToNumeric(params[1])
if err != nil {
return nil, err
}
r := math.Mod(left, right)
if math.IsNaN(r) {
return nil, fmt.Errorf("%%: got NaN result")
}
return r, nil
} | numeric.go | 0.7696 | 0.439326 | numeric.go | starcoder |
package float128ppc
import (
"math"
"math/big"
)
const (
// precision specifies the number of bits in the mantissa (including the
// implicit lead bit).
precision = 106
)
var (
// -NaN
NegNaN = Float{high: -math.NaN(), low: 0}
// +NaN
NaN = Float{high: math.NaN(), low: 0}
)
// Float is a floating-point number in double-double format.
type Float struct {
// where a long double value is regarded as the exact sum of two double-precision values, giving at least a 106-bit precision
high float64
low float64
}
// NewFromBits returns the floating-point number corresponding to the
// double-double representation.
func NewFromBits(a, b uint64) Float {
return Float{high: math.Float64frombits(a), low: math.Float64frombits(b)}
}
// NewFromFloat32 returns the nearest double-double precision floating-point
// number for x and the accuracy of the conversion.
func NewFromFloat32(x float32) (f Float, exact big.Accuracy) {
f, acc := NewFromFloat64(float64(x))
if acc == big.Exact {
_, acc = f.Float32()
}
return f, acc
}
// NewFromFloat64 returns the nearest double-double precision floating-point
// number for x and the accuracy of the conversion.
func NewFromFloat64(x float64) (f Float, exact big.Accuracy) {
// +-NaN
switch {
case math.IsNaN(x):
if math.Signbit(x) {
// -NaN
return NegNaN, big.Exact
}
// +NaN
return NaN, big.Exact
}
r := Float{high: x, low: 0}
br, _ := r.Big()
return r, br.Acc()
}
// Bits returns the double-double binary representation of f.
func (f Float) Bits() (a, b uint64) {
return math.Float64bits(f.high), math.Float64bits(f.low)
}
// Float32 returns the float32 representation of f.
func (f Float) Float32() (float32, big.Accuracy) {
x, nan := f.Big()
if nan {
if x.Signbit() {
return float32(-math.NaN()), big.Exact
}
return float32(math.NaN()), big.Exact
}
return x.Float32()
}
// Float64 returns the float64 representation of f.
func (f Float) Float64() (float64, big.Accuracy) {
x, nan := f.Big()
if nan {
if x.Signbit() {
return -math.NaN(), big.Exact
}
return math.NaN(), big.Exact
}
return x.Float64()
}
// Big returns the multi-precision floating-point number representation of f and
// a boolean indicating whether f is Not-a-Number.
func (f Float) Big() (x *big.Float, nan bool) {
x = big.NewFloat(0)
x.SetPrec(precision)
x.SetMode(big.ToNearestEven)
if f.IsNaN() {
return x, true
}
x.Add(big.NewFloat(f.high), big.NewFloat(f.low))
return x, false
}
// IsNaN returns true if the Float is NaN
func (f Float) IsNaN() bool {
// NaN + NaN should be NaN in consideration
return math.IsNaN(f.high) || math.IsNaN(f.low)
} | float128ppc/float128ppc.go | 0.89093 | 0.536738 | float128ppc.go | starcoder |
package gibbs
import (
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/stat/distuv"
"math/rand"
)
func Gibbs(dataset []float64, timeSteps int) ([]float64,[]float64,[]float64){
// Initialize parameters
sigmaZero := 1.5 // Lets start we fixed variance parameters and focus on finding the mean
sigmaOne := sigmaZero
muZero := dataset[rand.Intn(len(dataset))]
muOne := dataset[rand.Intn(len(dataset))]
piHat := rand.Float64()
round := 1
piHatEstimates := make([]float64, timeSteps+1)
muZeroEstimates := make([]float64, timeSteps+1)
muOneEstimates := make([]float64, timeSteps+1)
piHatEstimates[0] = piHat
muZeroEstimates[0] = muZero
muOneEstimates[0] = muOne
for ; round <= timeSteps;{
// Simulate with fixed normals
distOne := distuv.Normal{
Mu: muOneEstimates[round-1],
Sigma: sigmaOne,
}
distZero := distuv.Normal{
Mu: muZeroEstimates[round-1],
Sigma: sigmaZero,
}
tmpPiSample := GroupSampler(dataset, distZero, distOne)
piHatEstimates[round] = floats.Sum(tmpPiSample) / float64(len(tmpPiSample))
muZeroEstimates[round],muOneEstimates[round] = MeanSampler(tmpPiSample, dataset)
round++
}
return piHatEstimates, muZeroEstimates, muOneEstimates
}
func MeanSampler(assignments []float64, dataset []float64) (float64,float64){
var tmpTotalMeanZero []float64
var tmpTotalMeanOne []float64
for i := range dataset{
if assignments[i] == 1.0{
tmpTotalMeanZero = append(tmpTotalMeanZero, dataset[i])
}else{
tmpTotalMeanOne = append(tmpTotalMeanOne, dataset[i])
}
}
meanZero := floats.Sum(tmpTotalMeanZero) / float64(len(tmpTotalMeanZero))
meanOne := floats.Sum(tmpTotalMeanOne) / float64(len(tmpTotalMeanOne))
return meanZero, meanOne
}
func GroupSampler(dataset []float64, gaussianZero distuv.Normal, gaussianOne distuv.Normal) []float64{
tmpAssigns := make([]float64, len(dataset))
for i, elem := range dataset{
if gaussianZero.Prob(elem) > gaussianOne.Prob(elem){
tmpAssigns[i] = 1
}else{
tmpAssigns[i] = 0
}
}
return tmpAssigns
} | gibbs/gibbs.go | 0.534612 | 0.497131 | gibbs.go | starcoder |
* ************************************** Useage: **************************************
* A general use port scanner, yo. The scanner outputs the number of open ports, total number of ports scanned, and CSV files,
* one for the open ports and one for the closed ports
*
* The input for the PortScanner function are as follows:
* `portsToScan` is an array with two elements, the first is the starting port and the second is the ending port
* For example, this should be input as `[2]int{1, 1024}` if you want to scan ports 1 ... 1024 (inclusive of the end points)
* `numPorts` is an integer value that indicates how many ports a worker should be assigned.
* For example, if you want each worker to have 100 ports, simply pass in `100`
* A full scanner call would then be: `PortScanner([2]int{1, 1024}, 100)` to scan ports 1 ... 1024 with each worker having 100 ports.
* For a quick sample, from the `lab/2/bhg-scanner/main` directory run `go run main.go`.
*/
package scanner
import (
"encoding/csv"
"fmt"
"log"
"net"
"os"
"path"
"sort"
"strings"
"time"
)
// Set a duration for the DialTimeout function (1 second seems fine for now)
var dur = 1 * time.Second
func worker(ports, results chan int) {
for p := range ports {
address := fmt.Sprintf("scanme.nmap.org:%d", p)
conn, err := net.DialTimeout("tcp", address, dur)
if err != nil {
results <- -p
continue
}
conn.Close()
results <- p
}
}
func PortScanner(portsToScan [2]int, numPorts int) (int, int) {
var openports []int // notice the capitalization here. access limited!
var closedports []int // var for tracking the closed ports
//numPorts := portsToScan[1] - portsToScan[0] + 1
ports := make(chan int, numPorts)
results := make(chan int)
for i := 0; i < cap(ports); i++ {
go worker(ports, results)
}
go func() {
for i := portsToScan[0]; i <= portsToScan[1]; i++ {
ports <- i
}
}()
for i := portsToScan[0]; i <= portsToScan[1]; i++ {
port := <-results
if port > 0 {
openports = append(openports, port)
} else {
negPort := port * (-1)
closedports = append(closedports, negPort)
}
}
close(ports)
close(results)
sort.Ints(openports)
sort.Ints(closedports)
for _, port := range openports {
fmt.Printf("%d open\n", port)
}
/*
* Uncomment the two lines below if a full list of the ports scanned is desirable.
portList := append(openports, closedports...)
writeToCSV(portList)
*/
totalPorts := len(openports) + len(closedports)
numOpenPorts := len(openports)
writeToCSV("openPortList", openports)
writeToCSV("closedPortList", closedports)
return numOpenPorts, totalPorts
}
/*
* A helper function to reduce repetitive error code logic (thanks to Andey for the inspiration
* and https://golangcode.com/write-data-to-a-csv-file/ for a simple extension (previous version panics w/out a msg.)).
*/
func checkErr(message string, err error) {
if err != nil {
log.Fatal(message, err)
}
}
/*
* A function for writing to a .csv file, breaking this piece out to help keep the PortScanner function readable
* I used this source as a reference for the function: https://golangcode.com/write-data-to-a-csv-file/
* and this source for fixing the typing issues (the ints need to be strings for the Writer function).
*/
func writeToCSV(fileName string, portList []int) {
// Create a new CSV file with the supplied fileName and the appropriate file extension
filePath := path.Join("results", fileName+".csv")
file, err := os.Create(filePath)
// Check to make sure the file was created
checkErr("Cannot create file, yo.", err)
// Don't close the file until we are finished writing to it
defer file.Close()
// Convert the slice of ints to strings
stringPorts := strings.Fields(strings.Trim(fmt.Sprint(portList), "[]"))
// Set up a writer for the CSV file
writer := csv.NewWriter(file)
// Write the stringified ports to the file
writer.Write(stringPorts)
// Don't flush the writers contents until we are done
defer writer.Flush()
} | materials/lab/2/bhg-scanner/scanner/scanner.go | 0.679498 | 0.447883 | scanner.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.