code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package azure
import (
"fmt"
"strings"
"github.com/infracost/infracost/internal/schema"
"github.com/shopspring/decimal"
"github.com/tidwall/gjson"
)
func GetAzureRMSynapseWorkspacRegistryItem() *schema.RegistryItem {
return &schema.RegistryItem{
Name: "azurerm_synapse_workspace",
RFunc: NewAzureRMSynapseWorkspace,
ReferenceAttributes: []string{
"resource_group_name",
},
Notes: []string{"the total costs consist of several resources that should be viewed as a whole"},
}
}
func NewAzureRMSynapseWorkspace(d *schema.ResourceData, u *schema.UsageData) *schema.Resource {
region := lookupRegion(d, []string{"resource_group_name"})
costComponents := make([]*schema.CostComponent, 0)
managedVirtualNetwork := false
if d.Get("managed_virtual_network_enabled").Type != gjson.Null {
managedVirtualNetwork = d.Get("managed_virtual_network_enabled").Bool()
}
var serverlessSQLPoolSize *decimal.Decimal
if u != nil && u.Get("serverless_sql_pool_size_tb").Type != gjson.Null {
serverlessSQLPoolSize = decimalPtr(decimal.NewFromInt(u.Get("serverless_sql_pool_size_tb").Int()))
}
costComponents = append(costComponents, synapseServerlessSQLPoolCostComponent(region, "Serverless SQL pool size", "10", serverlessSQLPoolSize))
dataflowTiers := [2]string{"Basic", "Standard"}
for _, tier := range dataflowTiers {
var dataflowInstances, dataflowVCores, dataflowHours *decimal.Decimal
var instancesUsageKey = fmt.Sprintf("dataflow_%s_instances", strings.ToLower(tier))
var vcoresUsageKey = fmt.Sprintf("dataflow_%s_vcores", strings.ToLower(tier))
var hoursUsageKey = fmt.Sprintf("monthly_dataflow_%s_hours", strings.ToLower(tier))
if u != nil && u.Get(instancesUsageKey).Type != gjson.Null && u.Get(vcoresUsageKey).Type != gjson.Null && u.Get(hoursUsageKey).Type != gjson.Null {
dataflowInstances = decimalPtr(decimal.NewFromInt(u.Get(instancesUsageKey).Int()))
dataflowVCores = decimalPtr(decimal.NewFromInt(u.Get(vcoresUsageKey).Int()))
dataflowHours = decimalPtr(decimal.NewFromInt(u.Get(hoursUsageKey).Int()))
}
costComponents = append(costComponents, synapseDataFlowCostComponent(region, fmt.Sprintf("Data flow (%s)", strings.ToLower(tier)), tier, dataflowInstances, dataflowVCores, dataflowHours))
}
datapipelineTiers := [2]string{"Azure Hosted IR", "Self Hosted IR"}
datapipelineUsageKeys := [2]string{"azure_hosted", "self_hosted"}
if managedVirtualNetwork {
datapipelineTiers = [2]string{"Azure Hosted Managed VNET IR", "Self Hosted IR"}
}
for i, tier := range datapipelineTiers {
var activityRuns, dataIntegrationUnits, dataIntegrationHours, dataMovementHours, integrationRuntimeHours, externalIntegrationRuntimeHours *decimal.Decimal
var usageName = strings.Replace(datapipelineUsageKeys[i], "_", " ", 1)
var activityRunsUsageKey = fmt.Sprintf("monthly_datapipeline_%s_activity_runs", datapipelineUsageKeys[i])
if u != nil && u.Get(activityRunsUsageKey).Type != gjson.Null {
activityRuns = decimalPtr(decimal.NewFromInt(u.Get(activityRunsUsageKey).Int()))
}
costComponents = append(costComponents, synapseDataPipelineActivityRunCostComponent(region, fmt.Sprintf("Data pipeline %s activity runs", usageName), tier, "Orchestration Activity Run", activityRuns))
if datapipelineUsageKeys[i] == "azure_hosted" {
var dataIntegrationUnitUsageKey = fmt.Sprintf("monthly_datapipeline_%s_data_integration_units", datapipelineUsageKeys[i])
var dataIntegrationHoursUsageKey = fmt.Sprintf("monthly_datapipeline_%s_data_integration_hours", datapipelineUsageKeys[i])
if u != nil && u.Get(dataIntegrationUnitUsageKey).Type != gjson.Null && u.Get(dataIntegrationHoursUsageKey).Type != gjson.Null {
dataIntegrationUnits = decimalPtr(decimal.NewFromInt(u.Get(dataIntegrationUnitUsageKey).Int()))
dataIntegrationHours = decimalPtr(decimal.NewFromInt(u.Get(dataIntegrationHoursUsageKey).Int()))
}
costComponents = append(costComponents, synapseDataPipelineDataMovementCostComponent(region, fmt.Sprintf("Data pipeline %s data integration units", usageName), tier, "Data Movement", "DIU-hours", dataIntegrationUnits, dataIntegrationHours))
} else {
var dataMovementHoursUsageKey = fmt.Sprintf("monthly_datapipeline_%s_data_movement_hours", datapipelineUsageKeys[i])
if u != nil && u.Get(dataMovementHoursUsageKey).Type != gjson.Null {
dataMovementHours = decimalPtr(decimal.NewFromInt(u.Get(dataMovementHoursUsageKey).Int()))
}
costComponents = append(costComponents, synapseDataPipelineDataMovementCostComponent(region, fmt.Sprintf("Data pipeline %s data movement", usageName), tier, "Data Movement", "hours", decimalPtr(decimal.NewFromInt(1)), dataMovementHours))
}
var integrationRuntimeUsageKey = fmt.Sprintf("monthly_datapipeline_%s_integration_runtime_hours", datapipelineUsageKeys[i])
if u != nil && u.Get(integrationRuntimeUsageKey).Type != gjson.Null {
integrationRuntimeHours = decimalPtr(decimal.NewFromInt(u.Get(integrationRuntimeUsageKey).Int()))
}
costComponents = append(costComponents, synapseDataPipelineActivityIntegrationRuntimeCostComponent(region, fmt.Sprintf("Data pipeline %s integration runtime", usageName), tier, "Pipeline Activity", integrationRuntimeHours))
var externalIntegrationRuntimeUsageKey = fmt.Sprintf("monthly_datapipeline_%s_external_integration_runtime_hours", datapipelineUsageKeys[i])
if u != nil && u.Get(externalIntegrationRuntimeUsageKey).Type != gjson.Null {
externalIntegrationRuntimeHours = decimalPtr(decimal.NewFromInt(u.Get(externalIntegrationRuntimeUsageKey).Int()))
}
costComponents = append(costComponents, synapseDataPipelineActivityIntegrationRuntimeCostComponent(region, fmt.Sprintf("Data pipeline %s external integration runtime", usageName), tier, "External Pipeline Activity", externalIntegrationRuntimeHours))
}
return &schema.Resource{
Name: d.Address,
CostComponents: costComponents,
}
}
func synapseServerlessSQLPoolCostComponent(region, name, start string, quantity *decimal.Decimal) *schema.CostComponent {
return &schema.CostComponent{
Name: name,
Unit: "TB",
UnitMultiplier: decimal.NewFromInt(1),
MonthlyQuantity: quantity,
ProductFilter: &schema.ProductFilter{
VendorName: strPtr("azure"),
Region: strPtr(region),
Service: strPtr("Azure Synapse Analytics"),
ProductFamily: strPtr("Analytics"),
AttributeFilters: []*schema.AttributeFilter{
{Key: "productName", Value: strPtr("Azure Synapse Analytics Serverless SQL Pool")},
},
},
PriceFilter: &schema.PriceFilter{
PurchaseOption: strPtr("Consumption"),
StartUsageAmount: strPtr(start),
},
}
}
func synapseDataPipelineActivityRunCostComponent(region, name, sku, meter string, runs *decimal.Decimal) *schema.CostComponent {
return &schema.CostComponent{
Name: name,
Unit: "1k activity runs",
UnitMultiplier: decimal.NewFromInt(1),
MonthlyQuantity: runs,
ProductFilter: &schema.ProductFilter{
VendorName: strPtr("azure"),
Region: strPtr(region),
Service: strPtr("Azure Synapse Analytics"),
ProductFamily: strPtr("Analytics"),
AttributeFilters: []*schema.AttributeFilter{
{Key: "skuName", Value: strPtr(sku)},
{Key: "meterName", Value: strPtr(fmt.Sprintf("%s %s", sku, meter))},
},
},
PriceFilter: &schema.PriceFilter{
PurchaseOption: strPtr("Consumption"),
},
}
}
func synapseDataPipelineDataMovementCostComponent(region, name, sku, meter, unit string, diu, hours *decimal.Decimal) *schema.CostComponent {
var hourlyQuantity *decimal.Decimal
if diu != nil && hours != nil {
hourlyQuantity = decimalPtr(diu.Mul(*hours))
}
return &schema.CostComponent{
Name: name,
Unit: unit,
UnitMultiplier: decimal.NewFromInt(1),
MonthlyQuantity: hourlyQuantity,
ProductFilter: &schema.ProductFilter{
VendorName: strPtr("azure"),
Region: strPtr(region),
Service: strPtr("Azure Synapse Analytics"),
ProductFamily: strPtr("Analytics"),
AttributeFilters: []*schema.AttributeFilter{
{Key: "skuName", Value: strPtr(sku)},
{Key: "meterName", Value: strPtr(fmt.Sprintf("%s %s", sku, meter))},
},
},
PriceFilter: &schema.PriceFilter{
PurchaseOption: strPtr("Consumption"),
},
}
}
func synapseDataPipelineActivityIntegrationRuntimeCostComponent(region, name, sku, meter string, hours *decimal.Decimal) *schema.CostComponent {
return &schema.CostComponent{
Name: name,
Unit: "hours",
UnitMultiplier: decimal.NewFromInt(1),
MonthlyQuantity: hours,
ProductFilter: &schema.ProductFilter{
VendorName: strPtr("azure"),
Region: strPtr(region),
Service: strPtr("Azure Synapse Analytics"),
ProductFamily: strPtr("Analytics"),
AttributeFilters: []*schema.AttributeFilter{
{Key: "skuName", Value: strPtr(sku)},
{Key: "meterName", Value: strPtr(fmt.Sprintf("%s %s", sku, meter))},
},
},
PriceFilter: &schema.PriceFilter{
PurchaseOption: strPtr("Consumption"),
},
}
}
func synapseDataFlowCostComponent(region, name, tier string, instances, vCores, hours *decimal.Decimal) *schema.CostComponent {
var hourlyQuantity *decimal.Decimal
if instances != nil && vCores != nil && hours != nil {
hourlyQuantity = decimalPtr(vCores.Mul(*instances).Mul(*hours))
}
return &schema.CostComponent{
Name: name,
Unit: "vCore-hours",
UnitMultiplier: decimal.NewFromInt(1),
MonthlyQuantity: hourlyQuantity,
ProductFilter: &schema.ProductFilter{
VendorName: strPtr("azure"),
Region: strPtr(region),
Service: strPtr("Azure Synapse Analytics"),
ProductFamily: strPtr("Analytics"),
AttributeFilters: []*schema.AttributeFilter{
{Key: "productName", Value: strPtr(fmt.Sprintf("Azure Synapse Analytics Data Flow - %s", tier))},
{Key: "skuName", Value: strPtr("vCore")},
},
},
PriceFilter: &schema.PriceFilter{
PurchaseOption: strPtr("Consumption"),
},
}
} | internal/providers/terraform/azure/synapse_workspace.go | 0.548915 | 0.400544 | synapse_workspace.go | starcoder |
package chart
// Interface Assertions.
var (
_ Series = (*PercentChangeSeries)(nil)
_ FirstValuesProvider = (*PercentChangeSeries)(nil)
_ LastValuesProvider = (*PercentChangeSeries)(nil)
_ ValueFormatterProvider = (*PercentChangeSeries)(nil)
)
// PercentChangeSeriesSource is a series that
// can be used with a PercentChangeSeries
type PercentChangeSeriesSource interface {
Series
FirstValuesProvider
LastValuesProvider
ValuesProvider
ValueFormatterProvider
}
// PercentChangeSeries applies a
// percentage difference function to a given continuous series.
type PercentChangeSeries struct {
Name string
Style Style
YAxis YAxisType
InnerSeries PercentChangeSeriesSource
}
// GetName returns the name of the time series.
func (pcs PercentChangeSeries) GetName() string {
return pcs.Name
}
// GetStyle returns the line style.
func (pcs PercentChangeSeries) GetStyle() Style {
return pcs.Style
}
// Len implements part of Series.
func (pcs PercentChangeSeries) Len() int {
return pcs.InnerSeries.Len()
}
// GetFirstValues implements FirstValuesProvider.
func (pcs PercentChangeSeries) GetFirstValues() (x, y float64) {
return pcs.InnerSeries.GetFirstValues()
}
// GetValues gets x, y values at a given index.
func (pcs PercentChangeSeries) GetValues(index int) (x, y float64) {
_, fy := pcs.InnerSeries.GetFirstValues()
x0, y0 := pcs.InnerSeries.GetValues(index)
x = x0
y = PercentDifference(fy, y0)
return
}
// GetValueFormatters returns value formatter defaults for the series.
func (pcs PercentChangeSeries) GetValueFormatters() (x, y ValueFormatter) {
x, _ = pcs.InnerSeries.GetValueFormatters()
y = PercentValueFormatter
return
}
// GetYAxis returns which YAxis the series draws on.
func (pcs PercentChangeSeries) GetYAxis() YAxisType {
return pcs.YAxis
}
// GetLastValues gets the last values.
func (pcs PercentChangeSeries) GetLastValues() (x, y float64) {
_, fy := pcs.InnerSeries.GetFirstValues()
x0, y0 := pcs.InnerSeries.GetLastValues()
x = x0
y = PercentDifference(fy, y0)
return
}
// Render renders the series.
func (pcs PercentChangeSeries) Render(r Renderer, canvasBox Box, xrange, yrange Range, defaults Style) {
style := pcs.Style.InheritFrom(defaults)
Draw.LineSeries(r, canvasBox, xrange, yrange, style, pcs)
}
// Validate validates the series.
func (pcs PercentChangeSeries) Validate() error {
return pcs.InnerSeries.Validate()
} | vendor/github.com/wcharczuk/go-chart/v2/percent_change_series.go | 0.883663 | 0.512998 | percent_change_series.go | starcoder |
package modelzooserver
const sampleInitCode = `from .my_test_model import DNNClassifier`
const sampleDockerfile = `FROM ubuntu:bionic
ADD my_test_models /models/my_test_models/
ENV PYTHONPATH=/models:/usr/local/sqlflow/python
`
const sampleModelCode = `import tensorflow as tf
class DNNClassifier(tf.keras.Model):
def __init__(self, feature_columns=None, hidden_units=[100,100], n_classes=3):
"""DNNClassifier
:param feature_columns: feature columns.
:type feature_columns: list[tf.feature_column].
:param hidden_units: number of hidden units.
:type hidden_units: list[int].
:param n_classes: List of hidden units per layer.
:type n_classes: int.
"""
global _loss
super(DNNClassifier, self).__init__()
self.feature_layer = None
self.n_classes = n_classes
if feature_columns is not None:
# combines all the data as a dense tensor
self.feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
self.hidden_layers = []
for hidden_unit in hidden_units:
self.hidden_layers.append(tf.keras.layers.Dense(hidden_unit, activation='relu'))
if self.n_classes == 2:
# special setup for binary classification
pred_act = 'sigmoid'
_loss = 'binary_crossentropy'
n_out = 1
else:
pred_act = 'softmax'
_loss = 'categorical_crossentropy'
n_out = self.n_classes
self.prediction_layer = tf.keras.layers.Dense(n_out, activation=pred_act)
def call(self, inputs, training=True):
if self.feature_layer is not None:
x = self.feature_layer(inputs)
else:
x = tf.keras.layers.Flatten()(inputs)
for hidden_layer in self.hidden_layers:
x = hidden_layer(x)
return self.prediction_layer(x)
def optimizer(learning_rate=0.001):
"""Default optimizer name. Used in model.compile."""
return tf.keras.optimizers.Adagrad(lr=learning_rate)
def loss(labels, output):
"""Default loss function. Used in model.compile."""
global _loss
if _loss == "binary_crossentropy":
return tf.reduce_mean(tf.keras.losses.binary_crossentropy(labels, output))
elif _loss == "categorical_crossentropy":
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(labels, output))
def prepare_prediction_column(prediction):
"""Return the class label of highest probability."""
return prediction.argmax(axis=-1)
def eval_metrics_fn():
return {
"accuracy": lambda labels, predictions: tf.equal(
tf.argmax(predictions, 1, output_type=tf.int32),
tf.cast(tf.reshape(labels, [-1]), tf.int32),
)
}
` | go/modelzooserver/templates.go | 0.737536 | 0.460835 | templates.go | starcoder |
package main
import (
"fmt"
"math"
)
// TESTE ...
func main() {
fmt.Println("IR")
ir := IR(1.3, 1, 36, 100000, 0)
fmt.Println(ir)
}
// IPMT ...
func IPMT(rate float64, period int32, periods int32, present float64, future float64, tipo float64) float64 {
tipo = 0.0
future = 0.0
payment := PMT(rate, periods, present, future, tipo)
var interest float64
if period == 1 {
if tipo == 1 {
interest = 0
} else {
interest = -present
}
} else {
if tipo == 1 {
interest = FV(rate, period-2, payment, present, 1) - payment
} else {
interest = FV(rate, period-1, payment, present, 0)
}
}
return interest * rate
}
//PMT ...
func PMT(rate float64, periods int32, present float64, future float64, tipo float64) float64 {
// Credits: algorithm inspired by Apache OpenOffice
// Initialize tipo
tipo = 0.0
// Initialize future
future = 0.0
// Evaluate rate (TODO: replace with secure expression evaluator)
// rate = eval(rate)
// Return payment
var result float64
if rate == 0 {
result = (present + future) / float64(periods)
} else {
var term = math.Pow(1+rate, float64(periods))
if tipo == 1 {
result = (future*rate/(term-1) + present*rate/(1-1/term)) / (1 + rate)
} else {
result = future*rate/(term-1) + present*rate/(1-1/term)
}
}
// result = Math.round(result * 100) / 100;
return -result
}
//FV ...
func FV(rate float64, periods int32, payment float64, value float64, tipo int32) float64 {
var result float64
if rate == 0 {
result = value + payment*float64(periods)
} else {
term := math.Pow(1+rate, float64(periods))
if tipo == 1 {
result = value*term + payment*(1+rate)*(term-1.0)/rate
} else {
result = value*term + payment*(term-1)/rate
}
}
return -result
}
//IR ...
func IR(retorno float64, fatura int32, prazo int32, valor float64, juros float64) float64 {
var aliqIR float64
var ipmt float64
ipmt = -IPMT(retorno/100, fatura, prazo, valor, 0, 0)
ipmt = math.Round(ipmt*100) / 100
if fatura >= 1 && fatura < 6 {
aliqIR = 0.225
} else if fatura >= 6 && fatura < 12 {
aliqIR = 0.2
} else if fatura >= 12 && fatura < 24 {
aliqIR = 0.175
} else {
aliqIR = 0.15
}
return (ipmt + juros) * aliqIR
} | golang-exemples/finance_calc/main.go | 0.503906 | 0.455925 | main.go | starcoder |
package iotmaker_geo_osm
import (
"fmt"
"math"
)
//todo unidades para constantes
type DistanceStt struct {
Meters float64 // distance
Kilometers float64 // distance
unit string // distance unit
preserveUnit string // original unit
}
type DistanceListStt struct {
List []DistanceStt
}
// Get distance value
func (d *DistanceStt) GetMeters() float64 {
return d.Meters
}
func (d *DistanceStt) GetKilometers() float64 {
return d.Kilometers
}
// Get distance unit
func (d *DistanceStt) GetUnit() string {
return d.unit
}
func (d *DistanceStt) GetOriginalUnit() string {
return d.preserveUnit
}
// Set distance as meters
func (d *DistanceStt) AddMeters(m float64) {
d.Meters += m
d.Kilometers += m / 1000
}
// Set distance as meters
func (d *DistanceStt) SetMeters(m float64) {
d.Meters = m
d.Kilometers = m / 1000
d.unit = "m"
d.preserveUnit = "m"
}
func (d *DistanceStt) SetMetersIfGreaterThan(m float64) {
test := math.Max(d.Meters, m)
d.Meters = test
d.Kilometers = test / 1000
d.unit = "m"
d.preserveUnit = "m"
}
func (d *DistanceStt) SetKilometersIfGreaterThan(km float64) {
test := math.Max(d.Kilometers, km)
d.Meters = test * 1000
d.Kilometers = test
d.unit = "km"
d.preserveUnit = "km"
}
func (d *DistanceStt) SetMetersIfLessThan(m float64) {
test := math.Min(d.Meters, m)
d.Meters = test
d.Kilometers = test / 1000
d.unit = "m"
d.preserveUnit = "m"
}
func (d *DistanceStt) SetKilometersIfLessThan(km float64) {
test := math.Min(d.Kilometers, km)
d.Meters = test * 1000
d.Kilometers = test
d.unit = "km"
d.preserveUnit = "km"
}
// Set distance as kilometers
func (d *DistanceStt) AddKilometers(km float64) {
d.Meters += km * 1000
d.Kilometers += km
d.unit = "Km"
d.preserveUnit = "Km"
}
// Set distance as kilometers
func (d *DistanceStt) SetKilometers(km float64) {
d.Meters = km * 1000
d.Kilometers = km
d.unit = "Km"
d.preserveUnit = "Km"
}
// Get distance as string
func (d *DistanceStt) ToMetersString() string {
return fmt.Sprintf("%1.2fm", d.Meters)
}
func (d *DistanceStt) ToKilometersString() string {
return fmt.Sprintf("%1.2fKm", d.Kilometers)
} | typeDistance.go | 0.620966 | 0.587588 | typeDistance.go | starcoder |
package advent
import (
"strconv"
"strings"
)
var _ Problem = &dive{}
type dive struct {
dailyProblem
}
func NewDive() Problem {
return &dive{
dailyProblem{
day: 2,
},
}
}
func (d *dive) Solve() interface{} {
input := d.GetInputLines()
var results []int
results = append(results, d.getPositionProduct(input))
results = append(results, d.getCorrectPositionProduct(input))
return results
}
/*
--- Day 2: Dive! ---
Now, you need to figure out how to pilot this thing.
It seems like the submarine can take a series of commands like forward 1, down 2, or up 3:
forward X increases the horizontal position by X units.
down X increases the depth by X units.
up X decreases the depth by X units.
Note that since you're on a submarine, down and up affect your depth, and so they have the opposite result of what you might expect.
The submarine seems to already have a planned course (your puzzle input). You should probably figure out where it's going. For example:
forward 5
down 5
forward 8
up 3
down 8
forward 2
Your horizontal position and depth both start at 0. The steps above would then modify them as follows:
forward 5 adds 5 to your horizontal position, a total of 5.
down 5 adds 5 to your depth, resulting in a value of 5.
forward 8 adds 8 to your horizontal position, a total of 13.
up 3 decreases your depth by 3, resulting in a value of 2.
down 8 adds 8 to your depth, resulting in a value of 10.
forward 2 adds 2 to your horizontal position, a total of 15.
After following these instructions, you would have a horizontal position of 15 and a depth of 10. (Multiplying these together produces 150.)
Calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth?
*/
func (d *dive) getPositionProduct(input []string) int {
var posX, posY int
for _, vector := range input {
direction, magnitude := d.parseInputLine(vector)
switch direction {
case "forward":
posX += magnitude
case "down":
posY += magnitude
case "up":
posY -= magnitude
}
}
return posX * posY
}
/*
Based on your calculations, the planned course doesn't seem to make any sense. You find the submarine manual and discover that the process is actually slightly more complicated.
In addition to horizontal position and depth, you'll also need to track a third value, aim, which also starts at 0. The commands also mean something entirely different than you first thought:
down X increases your aim by X units.
up X decreases your aim by X units.
forward X does two things:
It increases your horizontal position by X units.
It increases your depth by your aim multiplied by X.
Again note that since you're on a submarine, down and up do the opposite of what you might expect: "down" means aiming in the positive direction.
Now, the above example does something different:
forward 5 adds 5 to your horizontal position, a total of 5. Because your aim is 0, your depth does not change.
down 5 adds 5 to your aim, resulting in a value of 5.
forward 8 adds 8 to your horizontal position, a total of 13. Because your aim is 5, your depth increases by 8*5=40.
up 3 decreases your aim by 3, resulting in a value of 2.
down 8 adds 8 to your aim, resulting in a value of 10.
forward 2 adds 2 to your horizontal position, a total of 15. Because your aim is 10, your depth increases by 2*10=20 to a total of 60.
After following these new instructions, you would have a horizontal position of 15 and a depth of 60. (Multiplying these produces 900.)
Using this new interpretation of the commands, calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth?
*/
func (d *dive) getCorrectPositionProduct(input []string) int {
var aim, posX, posY int
for _, vector := range input {
direction, magnitude := d.parseInputLine(vector)
switch direction {
case "forward":
posX += magnitude
posY += aim * magnitude
case "down":
aim += magnitude
case "up":
aim -= magnitude
}
}
return posX * posY
}
func (d *dive) parseInputLine(vector string) (string, int) {
vectorItems := strings.Split(vector, " ")
magnitude, _ := strconv.Atoi(vectorItems[1]) //trusting all input is "<direction> <some number>"
return vectorItems[0], magnitude
} | internal/advent/day2.go | 0.814311 | 0.67046 | day2.go | starcoder |
package k2tree
import "fmt"
type quartileIndex struct {
bits bitarray
offsets [3]int
counts [3]int
}
var _ bitarray = (*quartileIndex)(nil)
func newQuartileIndex(bits bitarray) *quartileIndex {
q := &quartileIndex{
bits: bits,
offsets: [3]int{
bits.Len() / 4,
bits.Len() / 2,
(bits.Len()/2 + bits.Len()/4),
},
}
q.counts[0] = bits.Count(0, q.offsets[0])
q.counts[1] = bits.Count(q.offsets[0], q.offsets[1]) + q.counts[1]
q.counts[2] = bits.Count(q.offsets[0], q.offsets[2]) + q.counts[2]
return q
}
// Len returns the number of bits in the bitarray.
func (q *quartileIndex) Len() int {
return q.bits.Len()
}
// Set sets the bit at an index `at` to the value `val`.
func (q *quartileIndex) Set(at int, val bool) {
cur := q.bits.Get(at)
if cur && val {
return
}
if !cur && !val {
return
}
q.bits.Set(at, val)
var delta int
if val {
delta = 1
} else {
delta = -1
}
for i, o := range q.offsets {
if at < o {
q.counts[i] += delta
}
}
}
// Get returns the value stored at `at`.
func (q *quartileIndex) Get(at int) bool {
return q.bits.Get(at)
}
// Count returns the number of set bits in the interval [from, to).
func (q *quartileIndex) Count(from int, to int) int {
if from == 0 {
return q.zeroCount(to)
}
return q.zeroCount(to) - q.zeroCount(from)
}
// zeroCount computes the count from zero to the given value.
func (q *quartileIndex) zeroCount(to int) int {
prevoff := 0
prevcount := 0
for i, off := range q.offsets {
if to < off {
if off-to < to-prevoff {
return q.counts[i] - q.bits.Count(to, off)
} else {
return q.bits.Count(prevoff, to) + prevcount
}
}
prevoff = off
prevcount = q.counts[i]
}
if q.bits.Len()-to < to-prevoff {
return q.bits.Total() - q.bits.Count(to, q.bits.Len())
} else {
return q.bits.Count(prevoff, to) + prevcount
}
}
// Total returns the total number of set bits.
func (q *quartileIndex) Total() int {
return q.bits.Total()
}
// Insert extends the bitarray by `n` bits. The bits are zeroed
// and start at index `at`. Example:
// Initial string: 11101
// Insert(3, 2)
// Resulting string: 11000101
func (q *quartileIndex) Insert(n int, at int) error {
if n%4 != 0 {
panic("can only extend by nibbles (multiples of 4)")
}
err := q.bits.Insert(n, at)
if err != nil {
return err
}
newlen := q.bits.Len()
for i := 0; i < 3; i++ {
q.adjust(i, n, at, (newlen * (i + 1) / 4))
}
return nil
}
func (q *quartileIndex) adjust(i, n, at, newi int) {
oldi := q.offsets[i]
assert(newi >= oldi, "Inserting shrunk the array?")
q.offsets[i] = newi
if (n + at) < oldi {
// Entire span below me, adjust for loss.
q.counts[i] -= q.bits.Count(newi, oldi+n)
} else if at >= oldi {
// Entire span above me, adjust for gain.
q.counts[i] += q.bits.Count(oldi, newi)
} else {
// Span intersects me.
// Stupid answer:
q.counts[i] = q.bits.Count(0, newi)
}
}
func (q *quartileIndex) debug() string {
return fmt.Sprintf("Quartile:\n internal: %s, %#v", q.bits.debug(), q)
} | quartileindex.go | 0.679604 | 0.551211 | quartileindex.go | starcoder |
// Package lut provides a look up table, which compresses indexed data
package lut
import (
"sort"
"dawn.googlesource.com/tint/tools/src/list"
)
// LUT is a look up table.
// The table holds a number of items that are stored in a linear list.
type LUT interface {
// Add adds a sequence of items to the table.
// items can be a single element, a slice of element, or a List of element.
// Returns a pointer to the offset of the first item in the table's list.
// The sequence of items stored at [offset, offset+N), where N is the
// number of items added will remain equal, even after calling Compact().
Add(items interface{}) *int
// Compact reorders the table items so that the table storage is compacted
// by shuffling data around and de-duplicating sequences of common data.
// Each originally added sequence is preserved in the resulting table, with
// the same contiguous ordering, but with a potentially different offset.
// Heuristics are used to shorten the table length, by exploiting common
// subsequences, and removing duplicate sequences.
// Note that shortest common superstring is NP-hard, so heuristics are used.
// Compact updates pointers returned by Add().
Compact()
}
// New returns a new look up table
func New(storage list.List) LUT {
return &lut{storage: storage}
}
// A sequence represents a span of entries in the table
type sequence struct {
offset *int // Pointer to the start index of the sequence
count int // Length of the sequence
}
// lut implements LUT
type lut struct {
storage list.List // The List that backs this LUT
sequences []sequence // The entries in the LUT
}
func (t *lut) Add(items interface{}) *int {
offset := t.storage.Count()
t.storage.Append(items)
count := t.storage.Count() - offset
offsetPtr := &offset
t.sequences = append(t.sequences, sequence{offsetPtr, count})
return offsetPtr
}
func (t lut) Compact() {
// Generate int32 identifiers for each unique item in the table.
// We use these to compare items instead of comparing the real data as this
// function is comparison-heavy, and integer compares are cheap.
srcIDs := t.itemIDs()
dstIDs := make([]int32, len(srcIDs))
// Make a copy the data held in the table, use the copy as the source, and
// t.storage as the destination.
srcData := list.Copy(t.storage)
dstData := t.storage
// Sort all the sequences by length, with the largest first.
// This helps 'seed' the compacted form with the largest items first.
// This can improve the compaction as small sequences can pack into larger,
// placed items.
sort.Slice(t.sequences, func(i, j int) bool {
return t.sequences[i].count > t.sequences[j].count
})
// unplaced is the list of sequences that have not yet been placed.
// All sequences are initially unplaced.
unplaced := make([]sequence, len(t.sequences))
copy(unplaced, t.sequences)
// placed is the list of sequences that have been placed.
// Nothing is initially placed.
placed := make([]sequence, 0, len(t.sequences))
// remove removes the sequence in unplaced with the index i.
remove := func(i int) {
placed = append(placed, unplaced[i])
if i > 0 {
if i < len(unplaced)-1 {
copy(unplaced[i:], unplaced[i+1:])
}
unplaced = unplaced[:len(unplaced)-1]
} else {
unplaced = unplaced[1:]
}
}
// cp copies data from [srcOffset:srcOffset+count] to [dstOffset:dstOffset+count].
cp := func(dstOffset, srcOffset, count int) {
dstData.CopyFrom(srcData, dstOffset, srcOffset, count)
copy(
dstIDs[dstOffset:dstOffset+count],
srcIDs[srcOffset:srcOffset+count],
)
}
// match describes a sequence that can be placed.
type match struct {
dst int // destination offset
src sequence // source sequence
len int // number of items that matched
idx int // sequence index
}
// number of items that have been placed.
newSize := 0
// While there's sequences to place...
for len(unplaced) > 0 {
// Place the next largest, unplaced sequence at the end of the new list
cp(newSize, *unplaced[0].offset, unplaced[0].count)
*unplaced[0].offset = newSize
newSize += unplaced[0].count
remove(0)
for {
// Look for the sequence with the longest match against the
// currently placed data. Any mismatches with currently placed data
// will nullify the match. The head or tail of this sequence may
// extend the currently placed data.
best := match{}
// For each unplaced sequence...
for i := 0; i < len(unplaced); i++ {
seq := unplaced[i]
if best.len >= seq.count {
// The best match is already at least as long as this
// sequence and sequences are sorted by size, so best cannot
// be beaten. Stop searching.
break
}
// Perform a full sweep from left to right, scoring the match...
for shift := -seq.count + 1; shift < newSize; shift++ {
dstS := max(shift, 0)
dstE := min(shift+seq.count, newSize)
count := dstE - dstS
srcS := *seq.offset - min(shift, 0)
srcE := srcS + count
if best.len < count {
if equal(srcIDs[srcS:srcE], dstIDs[dstS:dstE]) {
best = match{shift, seq, count, i}
}
}
}
}
if best.src.offset == nil {
// Nothing matched. Not even one element.
// Resort to placing the next largest sequence at the end.
break
}
if best.dst < 0 {
// Best match wants to place the sequence to the left of the
// current output. We have to shuffle everything...
n := -best.dst
dstData.Copy(n, 0, newSize)
copy(dstIDs[n:n+newSize], dstIDs)
newSize += n
best.dst = 0
for _, p := range placed {
*p.offset += n
}
}
// Place the best matching sequence.
cp(best.dst, *best.src.offset, best.src.count)
newSize = max(newSize, best.dst+best.src.count)
*best.src.offset = best.dst
remove(best.idx)
}
}
// Shrink the output buffer to the new size.
dstData.Resize(newSize)
// All done.
}
// Generate a set of identifiers for all the unique items in storage
func (t lut) itemIDs() []int32 {
storageSize := t.storage.Count()
keys := make([]int32, storageSize)
dataToKey := map[interface{}]int32{}
for i := 0; i < storageSize; i++ {
data := t.storage.Get(i)
key, found := dataToKey[data]
if !found {
key = int32(len(dataToKey))
dataToKey[data] = key
}
keys[i] = key
}
return keys
}
func max(a, b int) int {
if a < b {
return b
}
return a
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
func equal(a, b []int32) bool {
for i, v := range a {
if b[i] != v {
return false
}
}
return true
} | third_party/tint/tools/src/lut/lut.go | 0.792344 | 0.511534 | lut.go | starcoder |
package game
import (
"github.com/oakmound/oak/collision"
"github.com/oakmound/oak/entities"
"github.com/oakmound/oak/event"
"github.com/oakmound/oak/physics"
"github.com/oakmound/oak/render"
)
type Entity struct {
entities.Interactive
physics.Mass
Dir physics.Vector
speedMax float64
collided int
moveVert, moveHoriz bool
}
func (e *Entity) Init() event.CID {
e.CID = event.NextID(e)
return e.CID
}
func NewEntity(x, y, w, h float64, r render.Renderable, id event.CID,
friction, mass float64) *Entity {
e := new(Entity)
e.SetMass(mass)
e.Interactive = entities.NewInteractive(x, y, w, h, r, id.Parse(e), friction)
// Todo: Distinguish these two, when we start tracking hits on walls
e.RSpace.Add(collision.Label(Blocked), bounceEntity)
e.RSpace.Add(collision.Label(LowDamage), infectBounce(0.0001))
e.RSpace.Add(collision.Label(HighDamage), infectBounce(0.0002))
e.RSpace.Add(collision.Label(PressureFan), nudgeEntity)
e.RSpace.Add(Stun, stopEntity)
return e
}
func (e *Entity) CenterPos() physics.Vector {
return e.Vector.Copy().Add(physics.NewVector(e.W/2, e.H/2))
}
func (e *Entity) E() *Entity {
return e
}
func (e *Entity) Cleanup() {
e.UnbindAll()
collision.Remove(e.RSpace.Space)
e.R.UnDraw()
event.DestroyEntity(int(e.CID))
}
type HasE interface {
E() *Entity
}
func (e *Entity) applyMovement() {
//Movement logic
e.enforceSpeedMax()
e.ShiftPos(e.Delta.X(), e.Delta.Y())
<-e.RSpace.CallOnHits()
e.enforceSpeedMax()
if e.collided > 0 {
e.collided = 0
e.Delta.Scale(-1)
e.ShiftPos(e.Delta.X(), e.Delta.Y())
}
e.ApplyFriction(envFriction)
}
func infectBounce(rate float64) func(s1, s2 *collision.Space) {
return func(s1, s2 *collision.Space) {
i := thisBody.VecIndex(traveler.Vector)
o := thisBody.graph[i]
//oak.SetPalette(grayScale)
o.Infect(rate)
bounceEntity(s1, s2)
}
}
func bounceEntity(s1, s2 *collision.Space) {
ent := event.GetEntity(int(s1.CID))
if hase, ok := ent.(HasE); ok {
e := hase.E()
e.collided++
if psh, ok := event.GetEntity(int(s2.CID)).(physics.Pushes); ok {
physics.Push(psh, e)
} else {
e.Delta.Add(s1.OverlapVector(s2).Scale(.5))
}
}
}
func nudgeEntity(s1, s2 *collision.Space) {
ent := event.GetEntity(int(s1.CID))
if hase, ok := ent.(HasE); ok {
e := hase.E()
//e.collided++
if psh, ok := event.GetEntity(int(s2.CID)).(physics.Pushes); ok {
physics.Push(psh, e)
} else {
e.Delta.Add(s1.OverlapVector(s2).Scale(.2))
}
}
}
func stopEntity(s1, _ *collision.Space) {
ent := event.GetEntity(int(s1.CID))
if hase, ok := ent.(HasE); ok {
e := hase.E()
e.Delta.SetPos(0, 0)
}
}
func (e *Entity) moveForward() {
e.Delta.Add(e.Dir.Copy().Scale(e.Speed.Y()))
e.moveVert = true
}
func (e *Entity) moveBack() {
e.Delta.Add(e.Dir.Copy().Scale(-e.Speed.Y()))
e.moveVert = true
}
func (e *Entity) moveRight() {
e.Delta.Add(e.Dir.Copy().Rotate(90).Scale(e.Speed.X()))
e.moveHoriz = true
}
func (e *Entity) moveLeft() {
e.Delta.Add(e.Dir.Copy().Rotate(90).Scale(-e.Speed.X()))
e.moveHoriz = true
}
func (e *Entity) teleportForward(distance float64) {
e.Vector.Add(e.Dir.Copy().Scale(distance))
}
func (e *Entity) teleportBack(distance float64) {
e.Vector.Add(e.Dir.Copy().Scale(-distance))
}
func (e *Entity) teleportRight(distance float64) {
e.Vector.Add(e.Dir.Copy().Rotate(90).Scale(distance))
}
func (e *Entity) teleportLeft(distance float64) {
e.Vector.Add(e.Dir.Copy().Rotate(90).Scale(-distance))
}
func (e *Entity) scaleDiagonal() {
if e.moveHoriz && e.moveVert {
e.Delta.Scale(.8)
}
e.moveHoriz = false
e.moveVert = false
}
func (e *Entity) enforceSpeedMax() {
if e.Delta.Magnitude() > e.speedMax {
e.Delta.Scale(e.speedMax / e.Delta.Magnitude())
}
}
var (
tEntities = []event.CID{}
)
func CleanupEntities() {
for _, c := range tEntities {
c.UnbindAll()
event.DestroyEntity(int(c))
}
tEntities = []event.CID{}
} | game/entity.go | 0.544075 | 0.474753 | entity.go | starcoder |
package binpacker
import "errors"
func New(width, height int) *Packer {
return &Packer{
root: node{Rect: Rect{Width: width, Height: height}},
binWidth: width,
binHeight: height,
}
}
type Packer struct {
root node
binWidth, binHeight int
}
type node struct {
Rect
left, right *node
}
type Rect struct{ X, Y, Width, Height int }
// Enlarge will mark the previous space as completely occupied and insert the
// new area right and down of the existing area.
func (p *Packer) Enlarge(newWidth, newHeight int) error {
if newWidth < p.binWidth || newHeight < p.binHeight {
return errors.New("enlarge: new size is smaller")
}
p.root = node{
Rect: Rect{X: 0, Y: 0, Width: newWidth, Height: newHeight},
left: &node{Rect: Rect{
X: 0,
Y: p.binHeight,
Width: newWidth,
Height: newHeight - p.binHeight,
}},
right: &node{Rect: Rect{
X: p.binWidth,
Y: 0,
Width: newWidth - p.binWidth,
Height: p.binHeight,
}},
}
p.binWidth = newWidth
p.binHeight = newHeight
return nil
}
func (p *Packer) Insert(width, height int) (Rect, error) {
n, err := insert(&p.root, width, height)
if err != nil {
return Rect{}, err
}
return n.Rect, nil
}
var ErrNoMoreSpace = errors.New("insert: no more space in bin")
func insert(n *node, width, height int) (*node, error) {
if n.left != nil || n.right != nil {
if n.left != nil {
newNode, _ := insert(n.left, width, height)
if newNode != nil {
return newNode, nil
}
}
if n.right != nil {
newNode, _ := insert(n.right, width, height)
if newNode != nil {
return newNode, nil
}
}
return nil, ErrNoMoreSpace
}
// this node is a leaf, can we insert the new rectangle here?
if width > n.Width || height > n.Height {
return nil, ErrNoMoreSpace
}
// the new cell will fit, split the remaining space along the shorter axis,
// that is probably more optimal.
restW, restH := n.Width-width, n.Height-height
if restW < restH {
// split the remaining space horizontally
n.left = &node{Rect: Rect{
X: n.X + width,
Y: n.Y,
Width: restW,
Height: height,
}}
n.right = &node{Rect: Rect{
X: n.X,
Y: n.Y + height,
Width: n.Width,
Height: restH,
}}
} else {
// split the remaining space vertically
n.left = &node{Rect: Rect{
X: n.X,
Y: n.Y + height,
Width: width,
Height: restH,
}}
n.right = &node{Rect: Rect{
X: n.X + width,
Y: n.Y,
Width: restW,
Height: n.Height,
}}
}
// Note that as a result of the above, it can happen that node->left or
// node->right is now a degenerate (zero area) rectangle. No need to do
// anything about it, like remove the nodes as "unnecessary" since they
// need to exist as children of this node (this node can't be a leaf
// anymore).
// This node is now a non-leaf, so shrink its area - it now denotes
// *occupied* space instead of free space. Its children spawn the resulting
// area of free space.
n.Width, n.Height = width, height
return n, nil
}
func (p *Packer) Occupancy() float64 {
return float64(usedArea(&p.root)) / float64(p.binWidth*p.binHeight)
}
func usedArea(n *node) int {
if n.left != nil || n.right != nil {
used := n.Width * n.Height
if n.left != nil {
used += usedArea(n.left)
}
if n.right != nil {
used += usedArea(n.right)
}
return used
}
// this is a leaf node and does not constitute to the total surface area
return 0
} | binpacker.go | 0.786295 | 0.456894 | binpacker.go | starcoder |
package aoc2020
/*
https://adventofcode.com/2020/day/12
--- Day 12: Rain Risk ---
Your ferry made decent progress toward the island, but the storm came in faster than anyone expected. The ferry needs to take evasive actions!
Unfortunately, the ship's navigation computer seems to be malfunctioning; rather than giving a route directly to safety, it produced extremely circuitous instructions. When the captain uses the PA system to ask if anyone can help, you quickly volunteer.
The navigation instructions (your puzzle input) consists of a sequence of single-character actions paired with integer input values. After staring at them for a few minutes, you work out what they probably mean:
Action N means to move north by the given value.
Action S means to move south by the given value.
Action E means to move east by the given value.
Action W means to move west by the given value.
Action L means to turn left the given number of degrees.
Action R means to turn right the given number of degrees.
Action F means to move forward by the given value in the direction the ship is currently facing.
The ship starts by facing east. Only the L and R actions change the direction the ship is facing. (That is, if the ship is facing east and the next instruction is N10, the ship would move north 10 units, but would still move east if the following action were F.)
For example:
F10
N3
F7
R90
F11
These instructions would be handled as follows:
F10 would move the ship 10 units east (because the ship starts by facing east) to east 10, north 0.
N3 would move the ship 3 units north to east 10, north 3.
F7 would move the ship another 7 units east (because the ship is still facing east) to east 17, north 3.
R90 would cause the ship to turn right by 90 degrees and face south; it remains at east 17, north 3.
F11 would move the ship 11 units south to east 17, south 8.
At the end of these instructions, the ship's Manhattan distance (sum of the absolute values of its east/west position and its north/south position) from its starting position is 17 + 8 = 25.
Figure out where the navigation instructions lead. What is the Manhattan distance between that location and the ship's starting position?
*/
import (
"fmt"
"strconv"
goutils "github.com/simonski/goutils"
)
// AOC_2020_12 is the entrypoint
func AOC_2020_12(cli *goutils.CLI) {
// AOC_2020_12_part1_attempt1(cli)
AOC_2020_12_part2_attempt1(cli)
}
func AOC_2020_12_part1_attempt1(cli *goutils.CLI) {
filename := cli.GetFileExistsOrDie("-input")
p := NewPathFromFile(filename)
// p.Debug()
s := NewShip()
for index, m := range p.movements {
s.Execute(m)
fmt.Printf("[%v] %v%v -> Ship[x=%v, y=%v, a=%v]\n", index, m.Command, m.Value, s.X, s.Y, s.Angle)
}
}
func AOC_2020_12_part2_attempt1(cli *goutils.CLI) {
filename := cli.GetFileExistsOrDie("-input")
p := NewPathFromFile(filename)
s := NewShip2(10, 1)
fmt.Printf("START\n Ship2(%v,%v %v) WP (%v,%v)\n", s.X, s.Y, s.Angle, s.Waypoint.X, s.Waypoint.Y)
for index, m := range p.movements {
s.Execute(m)
wp := s.Waypoint
fmt.Printf("[%v] %v%v -> Ship(%v,%v %v), -> Wp(%v,%v)\n", index, m.Command, m.Value, s.X, s.Y, s.Angle, wp.X, wp.Y)
}
}
type Path struct {
movements []*Movement
}
func (p *Path) Debug() {
for index, m := range p.movements {
fmt.Printf("[%v] %v%v\n", index, m.Command, m.Value)
}
}
type Movement struct {
Command string
Value int
}
type Ship struct {
X int
Y int
Angle int
}
type Ship2 struct {
X int
Y int
Angle int
Waypoint *Waypoint
}
type Waypoint struct {
X int
Y int
}
func NewShip() *Ship {
return &Ship{X: 0, Y: 0, Angle: 90}
}
func (s *Ship) Execute(m *Movement) {
if m.Command == "N" {
s.Y += m.Value
}
if m.Command == "S" {
s.Y -= m.Value
}
if m.Command == "E" {
s.X += m.Value
}
if m.Command == "W" {
s.X -= m.Value
}
if m.Command == "R" {
s.Angle += m.Value
if s.Angle >= 360 {
s.Angle = s.Angle - 360
}
}
if m.Command == "L" {
s.Angle -= m.Value
if s.Angle < 0 {
s.Angle = 360 + s.Angle
}
}
if m.Command == "F" {
angle := s.Angle
if angle == 0 {
s.Y += m.Value
} else if angle == 180 {
s.Y -= m.Value
} else if angle == 90 {
s.X += m.Value
} else {
s.X -= m.Value
}
}
}
func NewShip2(waypoint_x int, waypoint_y int) *Ship2 {
wp := Waypoint{X: waypoint_x, Y: waypoint_y}
return &Ship2{X: 0, Y: 0, Angle: 90, Waypoint: &wp}
}
func (s *Ship2) Execute(m *Movement) {
if m.Command == "N" {
s.Waypoint.Y += m.Value
}
if m.Command == "S" {
s.Waypoint.Y -= m.Value
}
if m.Command == "E" {
s.Waypoint.X += m.Value
}
if m.Command == "W" {
s.Waypoint.X -= m.Value
}
if m.Command == "R" {
wp := &goutils.Point2D{s.Waypoint.X, s.Waypoint.Y}
origin := &goutils.Point2D{s.X, s.Y}
wp.RotateAroundOrigin(m.Value, origin)
s.Waypoint.X = wp.X
s.Waypoint.Y = wp.Y
} else if m.Command == "L" {
wp := &goutils.Point2D{s.Waypoint.X, s.Waypoint.Y}
origin := &goutils.Point2D{s.X, s.Y}
wp.RotateAroundOrigin(-m.Value, origin)
s.Waypoint.Y = wp.X
s.Waypoint.Y = wp.Y
}
if m.Command == "F" {
x_diff := s.Waypoint.X - s.X
y_diff := s.Waypoint.Y - s.Y
x := (x_diff * m.Value)
y := (y_diff * m.Value)
fmt.Printf("Movement is %v%v, x/y diff is %v,%v, total changes will be adding %v,%v\n", m.Command, m.Value, x_diff, y_diff, x, y)
s.X += x
s.Y += y
fmt.Printf("New s.x/s.y %v,%v\n", s.X, s.Y)
s.Waypoint.X = s.X + x_diff
s.Waypoint.Y = s.Y + y_diff
}
}
func NewPathFromFile(filename string) *Path {
lines := goutils.Load_file_to_strings(filename)
data := make([]*Movement, 0)
for _, line := range lines {
command := line[0:1]
value := line[1:]
ivalue, _ := strconv.Atoi(value)
movement := &Movement{Command: command, Value: ivalue}
data = append(data, movement)
}
path := Path{movements: data}
return &path
} | app/aoc2020/aoc2020_12.go | 0.746878 | 0.6395 | aoc2020_12.go | starcoder |
package lib
/*
A single zone can be anything from a room to a patch of land or space.
It is represented as a square area in which a player can find him/her self
All events/characters/actions take place in discrete zones.
Every zone defines a maximum of 8 exit points, which link to adjascent zones.
By linking one zone to another through these 'portals', one can create an
arbitrarily large world with different area types and places to visit.
The 8 exit points are all marked by compass directions and a single zone id to
which the portal links.
NW----N----NE NW----N----NE
| | | |
| ZONE 1 | | ZONE 2 |
W E W E
| | | |
| | | |
SW----S----SE SW----S----SE
NW----N----NE NW----N----NE
| | | |
| ZONE 3 | | ZONE 4 |
W E W E
| | | |
| | | |
SW----S----SE SW----S----SE
One zone's Northern exit links to another zone's Southern exit, etc.
The diagonal exits would technically link to 3 adjascent zones, but to simplify
the navigation in a text-based environment, we opt to strictly use them for
the diagonal route. So from the example above:
[Zone 1: South-East] <-> [Zone 4: North-West]
[Zone 2: South-West] <-> [Zone 3: North-East]
[Zone 3: East ] <-> [Zone 4: West ]
[Zone 2: South ] <-> [Zone 4: North ]
etc.
When a game world has been built, the World.Sanitize() method will traverse all
zones and ensure that all links match up properly. Specifically to make sure we
do not have multiple zones which occupy the same space.
For example, we have Zones 1, 2 and 3 defined. Then add zone 4 which links
[2:S] to [4:N]. Then define zone 5 which links [3:E] to [5:W]. This would put
both zone 4 and 5 in the same location and would constitute a spacial paradox.
Physics 101 teaches us that such things are proposterous and should they be
attempted, will most likely end in the entire universe collapsing into a super-
massive cupcake. One can understand this would be a bad deal for all involved.
It should be mentioned though, that the warnings generated by this in
World.Sanitize() are not fatal. So if one choses to ignore these warnings, the
game will behave just the way you have defined it to behave. Just be aware that
this will likely confuse the living daylights out of your players.
*/
type Zone struct {
Id int64
Name string
Description string
Lighting string
Smell string
Sound string
Exits []*Portal
}
func NewZone() *Zone {
v := new(Zone)
v.Exits = make([]*Portal, 0, 8)
return v
} | lib/game/components/zone.go | 0.572006 | 0.618089 | zone.go | starcoder |
package common
import (
"github.com/coschain/contentos-go/common"
"github.com/coschain/contentos-go/common/constants"
. "github.com/coschain/contentos-go/dandelion"
"github.com/coschain/contentos-go/prototype"
"github.com/stretchr/testify/assert"
"math/big"
"strings"
"testing"
)
type TrxTester struct{}
func (tester *TrxTester) Test(t *testing.T, d *Dandelion) {
t.Run("too_big", d.Test(tester.tooBig))
t.Run("require_multi_signers", d.Test(tester.requireMultiSigners))
t.Run("double_spent", d.Test(tester.doubleSpent))
t.Run("dup_trx_inside_block", d.Test(tester.dupTrxInsideBlock))
t.Run("sig_replay", d.Test(tester.sigReplay))
}
func (tester *TrxTester) tooBig(t *testing.T, d *Dandelion) {
a := assert.New(t)
// trxs with normal sizes should be accepted.
a.NotNil(tester.transferWithMemo(d, ""))
a.NotNil(tester.transferWithMemo(d, "your money"))
// trxs larger than constants.MaxTransactionSize must be ignored.
a.Nil(tester.transferWithMemo(d, strings.Repeat("A", constants.MaxTransactionSize)))
a.Nil(tester.transferWithMemo(d, strings.Repeat("B", constants.MaxTransactionSize + 100)))
}
func (tester *TrxTester) doubleSpent(t *testing.T, d *Dandelion) {
a := assert.New(t)
act1 := "actor1"
act2 := "actor2"
op := Transfer(act1, act2, 1, "double spent")
prevBalance := d.Account(act1).GetBalance()
trx, _, err := d.SendTrxEx2( d.GetAccountKey(act1), op )
a.NoError(err)
d.ProduceBlocks(1)
a.Equal( prevBalance.Value - 1 , d.Account(act1).GetBalance().Value )
// start double spent test
for index := 0; index < constants.TrxMaxExpirationTime + 10 ; index++ {
_, err = d.SendRawTrx(trx)
d.ProduceBlocks(1)
a.Error(err)
a.Equal( prevBalance.Value - 1 , d.Account(act1).GetBalance().Value )
}
}
func (tester *TrxTester) transferWithMemo(d *Dandelion, memo string) *prototype.TransactionReceiptWithInfo {
return d.Account(constants.COSInitMiner).TrxReceipt(Transfer(constants.COSInitMiner, "actor0", 1, memo))
}
func (tester *TrxTester) requireMultiSigners(t *testing.T, d *Dandelion) {
a := assert.New(t)
// normal case
a.NotNil(d.Account(constants.COSInitMiner).TrxReceipt(
Transfer(constants.COSInitMiner, "actor0", 2, ""),
))
// all operations in a trx must require the same signer.
a.Nil(d.Account(constants.COSInitMiner).TrxReceipt(
Transfer(constants.COSInitMiner, "actor0", 2, ""),
Transfer("actor0", constants.COSInitMiner, 1, ""),
))
}
func (tester *TrxTester) dupTrxInsideBlock(t *testing.T, d *Dandelion) {
a := assert.New(t)
op := Transfer(constants.COSInitMiner, "actor0", 1, "")
acc := d.Account(constants.COSInitMiner)
key := d.GetAccountKey(constants.COSInitMiner)
// first, initminer transfer to actor0, we get a receipt which records net/cpu usage for a transfer.
r := acc.TrxReceipt(op)
a.NotNil(r)
// second, try to apply a block containing duplicate transfer transactions.
trx, _ := d.NewSignedTransaction(key, op)
trxWrapper := &prototype.TransactionWrapper{
SigTrx: trx,
Receipt: &prototype.TransactionReceipt{
Status: r.Status,
NetUsage: r.NetUsage,
CpuUsage: r.CpuUsage,
},
}
block, err := d.PushBlock(trxWrapper, trxWrapper, trxWrapper)
a.NotNil(block)
// PushBlock() must fail because the block contains duplicate transactions.
a.Error(err)
}
func (tester *TrxTester) sigReplay(t *testing.T, d *Dandelion) {
const amount = 1
a := assert.New(t)
op := Transfer(constants.COSInitMiner, "actor0", amount, "")
acc := d.Account(constants.COSInitMiner)
key := d.GetAccountKey(constants.COSInitMiner)
balance := acc.GetBalance().GetValue()
trx, _ := d.NewSignedTransaction(key, op)
trx2 := new(prototype.SignedTransaction)
*trx2 = *trx
sig := common.CopyBytes(trx.Signature.Sig)
s := big.NewInt(0).SetBytes(sig[32:64])
n, _ := big.NewInt(0).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
copy(sig[32:64], s.Sub(n, s).Bytes())
sig[64] ^= 1
trx2.Signature = &prototype.SignatureType{Sig:sig}
_, _ = d.SendRawTrx(trx)
_, _ = d.SendRawTrx(trx2)
a.NoError(d.ProduceBlocks(1))
a.EqualValues(balance - amount, acc.GetBalance().GetValue())
} | tests/common/trx.go | 0.609873 | 0.45423 | trx.go | starcoder |
package slices
func Extract_columns_from_2d_slices(original_slice [][]string, columns_to_extract []int) [][]string {
length := len(columns_to_extract)
breadth := len(original_slice)
extracted_slice := Make_dynamic_2d_string_slice(length, breadth)
for row_index, original_slice_row := range original_slice {
for column_index, original_slice_cell := range original_slice_row {
for extracted_column_index, column_number := range columns_to_extract {
if column_index == column_number {
extracted_slice[row_index][extracted_column_index] = original_slice_cell
}
}
}
}
return extracted_slice
}
func Make_dynamic_2d_string_slice(cols int, rows int) [][]string {
var mat = make([][]string, rows)
for i := range mat {
mat[i] = make([]string, cols)
}
return mat
}
func Make_dynamic_2d_interface_slice(cols int, rows int) [][]interface{} {
var mat = make([][]interface{}, rows)
for i := range mat {
mat[i] = make([]interface{}, cols)
}
return mat
}
func Add_single_value_column_to_2d_slice(original_slice [][]string, value string) [][]string { //#TODO - change to interface
converted_slice := Copy_2d_slices(original_slice)
for row_index, _ := range original_slice {
converted_slice[row_index] = append(original_slice[row_index], value)
}
return converted_slice
}
func Copy_2d_slices(original_slice [][]string) [][]string {
copy_slice := make([][]string, len(original_slice))
for i := range original_slice {
copy_slice[i] = make([]string, len(original_slice[i]))
copy(copy_slice[i], original_slice[i])
}
return copy_slice
}
func Convert_2d_interface_to_string(interface_to_convert [][]interface{}) [][]string {
var converted_string [][]string
converted_string = Make_dynamic_2d_string_slice(len(interface_to_convert[0]), len(interface_to_convert))
for row_index, interface_to_convert_row := range interface_to_convert {
for column_index, interface_to_convert_cell := range interface_to_convert_row {
converted_string[row_index][column_index] = interface_to_convert_cell.(string)
}
}
return converted_string
}
func Convert_2d_string_to_interface(string_to_convert [][]string) [][]interface{} {
converted_interface := Make_dynamic_2d_interface_slice(len(string_to_convert[0]), len(string_to_convert))
for row_index, string_to_convert_row := range string_to_convert {
for column_index, string_to_convert_cell := range string_to_convert_row {
converted_interface[row_index][column_index] = string_to_convert_cell
}
}
return converted_interface
} | code/services/in_memory/slices/slice_management.go | 0.53777 | 0.597402 | slice_management.go | starcoder |
package raylib
//#include "raylib.h"
import "C"
import "unsafe"
const (
MaxMeshVertices = 1 << 28
MaxMeshIndices = 1 << 28
MaxMeshTexCoords = 1 << 28
MaxMeshAnimatedVertices = 1 << 28
MaxMeshBones = 1 << 28
)
//Mesh is vertex data that is stored in CPU and GPU memory.
// Note that the values are pointing to C memory
type Mesh struct {
// Number of vertices stored in arrays
VertexCount int32
// Number of triangles stored (indexed or not)
TriangleCount int32
// Vertex position (XYZ - 3 components per vertex) (shader-location = 0)
Vertices *[MaxMeshVertices]Vector3
// Vertex texture coordinates (UV - 2 components per vertex) (shader-location = 1)
Texcoords *[MaxMeshTexCoords]Vector2
// Vertex second texture coordinates (useful for lightmaps) (shader-location = 5)
Texcoords2 *[MaxMeshTexCoords]Vector2
// Vertex normals (XYZ - 3 components per vertex) (shader-location = 2)
Normals *[MaxMeshVertices]Vector3
// Vertex tangents (XYZ - 3 components per vertex) (shader-location = 4)
Tangents *[MaxMeshVertices]Vector3
// Vertex colors (RGBA - 4 components per vertex) (shader-location = 3)
Colors *[MaxMeshVertices]Color
// Vertex indices (in case vertex data comes indexed)
Indices *[MaxMeshIndices]uint16
AnimVertices *[MaxMeshAnimatedVertices]Vector3
AnimNormals *[MaxMeshAnimatedVertices]Vector3
BoneIds *[MaxMeshBones]int32
BoneWeights *[MaxMeshBones]float32
// OpenGL Vertex Array Object id
VaoID uint32
// OpenGL Vertex Buffer Objects id (7 types of vertex data)
VboID unsafe.Pointer
}
func newMeshFromPointer(ptr unsafe.Pointer) *Mesh {
return (*Mesh)(ptr)
}
func (s *Mesh) cptr() *C.Mesh {
return (*C.Mesh)(unsafe.Pointer(s))
}
type BoneInfo struct {
Name [32]byte
Parent int32
}
const (
MaxModelMeshes = 1 << 28
MaxModelMaterials = 1 << 28
MaxModelBones = 1 << 28
MaxModelBinds = 1 << 28
)
type Model struct {
Transform Matrix
MeshCount int32
Meshes *[MaxModelMeshes]Mesh
MaterialCount int32
Materials *[MaxModelMaterials]Material
MeshMaterial *int32
BoneCount int32
Bones *[MaxModelBones]BoneInfo
BindPos *[MaxModelBinds]Transform
}
type ModelAnimation struct {
BoneCount int32
Bones *[]BoneInfo
FrameCount int32
FramePoses *[](*[]Transform)
}
func newModelFromPointer(ptr unsafe.Pointer) *Model {
return (*Model)(ptr)
}
func (s *Model) cptr() *C.Model {
return (*C.Model)(unsafe.Pointer(s))
}
func newModelAnimationFromPointer(ptr unsafe.Pointer) *ModelAnimation {
return (*ModelAnimation)(ptr)
}
func (s *ModelAnimation) cptr() *C.ModelAnimation {
return (*C.ModelAnimation)(unsafe.Pointer(s))
} | raylib/models.go | 0.731442 | 0.489015 | models.go | starcoder |
package gtrie
// SearchType of Search func
// [SearchExactly, SearchByPrefix, SearchLongestMatchingPrefix, SearchMatcingPrefix, SearchApproximate]
type SearchType int
const (
// SearchExactly - finds the key exactly matching to input `key`.
SearchExactly = 0
// SearchByPrefix - finds all matching keys that starts with the input `key`
// The input `key` is the prefix of the keys found.
SearchByPrefix SearchType = 1
// SearchLongestMatchingPrefix - finds the longest matching prefix substring
// against to intput `key` from the trie using Longest Prefix Match algorithm.
SearchLongestMatchingPrefix SearchType = 2
// SearchMatcingPrefix - finds all matching prefix keys with the intput `key`.
SearchMatcingPrefix SearchType = 3
// SearchApproximate (Fuzzy search: Approximate string matching) - finds all matched strings by fuzzy search.
SearchApproximate SearchType = 4
// SearchAllRelativeKey = SearchByPrefix + SearchMatcingPrefix + SearchApproximate
SearchAllRelativeKey SearchType = 5
)
// Search finds all matching keys according to stype (SearchType).
func (t *Trie) Search(key string, stype SearchType) []string {
switch stype {
case SearchExactly:
if _, ok := t.Find(key); ok {
return []string{key}
}
case SearchByPrefix:
return t.FindByPrefix(key)
case SearchLongestMatchingPrefix:
if k, _, ok := t.FindLongestMatchingPrefix(key); ok {
return []string{k}
}
case SearchMatcingPrefix:
if keys, ok := t.FindMatchingPrefix(key); ok {
return keys
}
case SearchApproximate:
return t.FindByFuzzy(key)
case SearchAllRelativeKey:
return t.FindRelative(key)
}
return nil
}
// SearchValues finds all matching keys according to stype (SearchType)
// and returns all the values of the matching keys.
func (t *Trie) SearchValues(key string, stype SearchType) []interface{} {
switch stype {
case SearchExactly:
if v, ok := t.Find(key); ok {
return []interface{}{v}
}
case SearchByPrefix:
return t.FindByPrefixValue(key)
case SearchLongestMatchingPrefix:
if _, v, ok := t.FindLongestMatchingPrefix(key); ok {
return []interface{}{v}
}
case SearchMatcingPrefix:
return t.FindMatchingPrefixValue(key)
case SearchApproximate:
return t.FindByFuzzyValue(key)
case SearchAllRelativeKey:
return t.FindRelativeValues(key)
}
return nil
}
// SearchAll finds all matching keys and values according to stype (SearchType).
func (t *Trie) SearchAll(key string, stype SearchType) map[string]interface{} {
switch stype {
case SearchExactly:
if v, ok := t.Find(key); ok {
return map[string]interface{}{key: v}
}
case SearchByPrefix:
return t.FindByPrefixAll(key)
case SearchLongestMatchingPrefix:
if k, v, ok := t.FindLongestMatchingPrefix(key); ok {
return map[string]interface{}{k: v}
}
case SearchMatcingPrefix:
return t.FindMatchingPrefixAll(key)
case SearchApproximate:
return t.FindByFuzzyAll(key)
case SearchAllRelativeKey:
return t.FindRelativeAll(key)
}
return nil
}
// FindRelative finds all relative keys against to the input `key`.
// It returns the result of (FindByPrefix + FindMatchingPrefix + FindByFuzzy)
func (t *Trie) FindRelative(key string) []string {
t.mu.RLock()
defer t.mu.RUnlock()
m := map[string]interface{}{}
node := findNode(t.root, []rune(key))
if node != nil {
m = collectAll(node)
}
nodes, ok := t.findPrefixMatchNodes(key)
if ok {
for _, n := range nodes {
m[n.path] = n.value
}
}
keys := t.FindByFuzzy(key)
for i := range keys {
m[keys[i]], _ = t.Find(key)
}
keys = make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}
// FindRelativeValues finds all relative values against to the input `key`.
// It returns the result of (FindByPrefix + FindMatchingPrefix + FindByFuzzy)
func (t *Trie) FindRelativeValues(key string) []interface{} {
t.mu.RLock()
defer t.mu.RUnlock()
m := map[string]interface{}{}
node := findNode(t.root, []rune(key))
if node != nil {
m = collectAll(node)
}
nodes, ok := t.findPrefixMatchNodes(key)
if ok {
for _, n := range nodes {
m[n.path] = n.value
}
}
keys := t.FindByFuzzy(key)
for i := range keys {
m[keys[i]], _ = t.Find(key)
}
values := make([]interface{}, 0, len(m))
for k := range m {
values = append(values, k)
}
return values
}
// FindRelativeAll finds all relative keys against to the input `key`.
// It returns the result of (FindByPrefix + FindMatchingPrefix + FindByFuzzy)
func (t *Trie) FindRelativeAll(key string) map[string]interface{} {
t.mu.RLock()
defer t.mu.RUnlock()
m := make(map[string]interface{})
node := findNode(t.root, []rune(key))
if node != nil {
m = collectAll(node)
}
nodes, ok := t.findPrefixMatchNodes(key)
if ok {
for _, n := range nodes {
m[n.path] = n.value
}
}
fm := t.FindByFuzzyAll(key)
for k, v := range fm {
m[k] = v
}
return m
} | trie.search.go | 0.738858 | 0.522263 | trie.search.go | starcoder |
package rfc3464
import (
"net/textproto"
"strings"
)
/*
RecipientRecord represents per-recipient DSN record
A DSN contains information about attempts to deliver a message to one
or more recipients. The delivery information for any particular
recipient is contained in a group of contiguous per-recipient fields.
Each group of per-recipient fields is preceded by a blank line.
The syntax for the group of per-recipient fields is as follows:
per-recipient-fields =
[ original-recipient-field CRLF ]
final-recipient-field CRLF
action-field CRLF
status-field CRLF
[ remote-mta-field CRLF ]
[ diagnostic-code-field CRLF ]
[ last-attempt-date-field CRLF ]
[ final-log-id-field CRLF ]
[ will-retry-until-field CRLF ]
*( extension-field CRLF )
*/
type RecipientRecord struct {
/*
2.3.1 Original-Recipient field
The Original-Recipient field indicates the original recipient address
as specified by the sender of the message for which the DSN is being
issued.
original-recipient-field =
"Original-Recipient" ":" address-type ";" generic-address
generic-address = *text
The address-type field indicates the type of the original recipient
address. If the message originated within the Internet, the
address-type field will normally be "rfc822", and the address will be
according to the syntax specified in [RFC822]. The value "unknown"
should be used if the Reporting MTA cannot determine the type of the
original recipient address from the message envelope.
This field is optional. It should be included only if the sender-
specified recipient address was present in the message envelope, such
as by the SMTP extensions defined in [RFC3461]. This address is the
same as that provided by the sender and can be used to automatically
correlate DSN reports and message transactions.
*/
OriginalRecipient TypeValueField
/*
2.3.2 Final-Recipient field
The Final-Recipient field indicates the recipient for which this set
of per-recipient fields applies. This field MUST be present in each
set of per-recipient data.
The syntax of the field is as follows:
final-recipient-field =
"Final-Recipient" ":" address-type ";" generic-address
The generic-address sub-field of the Final-Recipient field MUST
contain the mailbox address of the recipient (from the transport
envelope), as it was when the Reporting MTA accepted the message for
delivery.
The Final-Recipient address may differ from the address originally
provided by the sender, because it may have been transformed during
forwarding and gatewaying into a totally unrecognizable mess.
However, in the absence of the optional Original-Recipient field, the
Final-Recipient field and any returned content may be the only
information available with which to correlate the DSN with a
particular message submission.
The address-type sub-field indicates the type of address expected by
the reporting MTA in that context. Recipient addresses obtained via
SMTP will normally be of address-type "rfc822".
NOTE: The Reporting MTA is not expected to ensure that the address
actually conforms to the syntax conventions of the address-type.
Instead, it MUST report exactly the address received in the envelope,
unless that address contains characters such as CR or LF which are
not allowed in a DSN field.
Since mailbox addresses (including those used in the Internet) may be
case sensitive, the case of alphabetic characters in the address MUST
be preserved.
*/
FinalRecipient TypeValueField
/*
2.3.3 Action field
The Action field indicates the action performed by the Reporting-MTA
as a result of its attempt to deliver the message to this recipient
address. This field MUST be present for each recipient named in the
DSN.
The syntax for the action-field is:
action-field = "Action" ":" action-value
action-value =
"failed" / "delayed" / "delivered" / "relayed" / "expanded"
The action-value may be spelled in any combination of upper and lower
case characters.
"failed" indicates that the message could not be delivered to the
recipient. The Reporting MTA has abandoned any attempts
to deliver the message to this recipient. No further
notifications should be expected.
"delayed" indicates that the Reporting MTA has so far been unable
to deliver or relay the message, but it will continue to
attempt to do so. Additional notification messages may
be issued as the message is further delayed or
successfully delivered, or if delivery attempts are later
abandoned.
"delivered" indicates that the message was successfully delivered to
the recipient address specified by the sender, which
includes "delivery" to a mailing list exploder. It does
not indicate that the message has been read. This is a
terminal state and no further DSN for this recipient
should be expected.
"relayed" indicates that the message has been relayed or gatewayed
into an environment that does not accept responsibility
for generating DSNs upon successful delivery. This
action-value SHOULD NOT be used unless the sender has
requested notification of successful delivery for this
recipient.
"expanded" indicates that the message has been successfully
delivered to the recipient address as specified by the
sender, and forwarded by the Reporting-MTA beyond that
destination to multiple additional recipient addresses.
An action-value of "expanded" differs from "delivered" in
that "expanded" is not a terminal state. Further
"failed" and/or "delayed" notifications may be provided.
Using the terms "mailing list" and "alias" as defined in [RFC3461],
section 7.2.7: An action-value of "expanded" is only to be used when
the message is delivered to a multiple-recipient "alias". An
action-value of "expanded" SHOULD NOT be used with a DSN issued on
delivery of a message to a "mailing list".
NOTE ON ACTION VS. STATUS CODES: Although the 'action' field
might seem to be redundant with the 'status' field, this is not
the case. In particular, a "temporary failure" ("4") status code
could be used with an action-value of either "delayed" or
"failed". For example, assume that an SMTP client repeatedly
tries to relay a message to the mail exchanger for a recipient,
but fails because a query to a domain name server timed out.
After a few hours, it might issue a "delayed" DSN to inform the
sender that the message had not yet been delivered. After a few
days, the MTA might abandon its attempt to deliver the message
and return a "failed" DSN. The status code (which would begin
with a "4" to indicate "temporary failure") would be the same for
both DSNs.
Another example for which the action and status codes may appear
contradictory: If an MTA or mail gateway cannot deliver a message
because doing so would entail conversions resulting in an
unacceptable loss of information, it would issue a DSN with the
'action' field of "failure" and a status code of 'XXX'. If the
message had instead been relayed, but with some loss of
information, it might generate a DSN with the same XXX status-
code, but with an action field of "relayed".
*/
Action RecipientAction
/*
2.3.4 Status field
The per-recipient Status field contains a transport-independent
status code that indicates the delivery status of the message to that
recipient. This field MUST be present for each delivery attempt
which is described by a DSN.
The syntax of the status field is:
status-field = "Status" ":" status-code
status-code = DIGIT "." 1*3DIGIT "." 1*3DIGIT
; White-space characters and comments are NOT allowed within
; a status-code, though a comment enclosed in parentheses
; MAY follow the last numeric sub-field of the status-code.
; Each numeric sub-field within the status-code MUST be
; expressed without leading zero digits.
Status codes thus consist of three numerical fields separated by ".".
The first sub-field indicates whether the delivery attempt was
successful (2= success, 4 = persistent temporary failure, 5 =
permanent failure). The second sub-field indicates the probable
source of any delivery anomalies, and the third sub-field denotes a
precise error condition, if known.
The initial set of status-codes is defined in [RFC3463].
*/
Status string
/*
2.3.5 Remote-MTA field
The value associated with the Remote-MTA DSN field is a printable
ASCII representation of the name of the "remote" MTA that reported
delivery status to the "reporting" MTA.
remote-mta-field = "Remote-MTA" ":" mta-name-type ";" mta-name
NOTE: The Remote-MTA field preserves the "while talking to"
information that was provided in some pre-existing nondelivery
reports.
This field is optional. It MUST NOT be included if no remote MTA was
involved in the attempted delivery of the message to that recipient.
*/
RemoteMTA TypeValueField
/*
2.3.6 Diagnostic-Code field
For a "failed" or "delayed" recipient, the Diagnostic-Code DSN field
contains the actual diagnostic code issued by the mail transport.
Since such codes vary from one mail transport to another, the
diagnostic-type sub-field is needed to specify which type of
diagnostic code is represented.
diagnostic-code-field =
"Diagnostic-Code" ":" diagnostic-type ";" *text
NOTE: The information in the Diagnostic-Code field may be somewhat
redundant with that from the Status field. The Status field is
needed so that any DSN, regardless of origin, may be understood by
any user agent or gateway that parses DSNs. Since the Status code
will sometimes be less precise than the actual transport diagnostic
code, the Diagnostic-Code field is provided to retain the latter
information. Such information may be useful in a trouble ticket sent
to the administrator of the Reporting MTA, or when tunneling foreign
non-delivery reports through DSNs.
If the Diagnostic Code was obtained from a Remote MTA during an
attempt to relay the message to that MTA, the Remote-MTA field should
be present. When interpreting a DSN, the presence of a Remote-MTA
field indicates that the Diagnostic Code was issued by the Remote
MTA. The absence of a Remote-MTA indicates that the Diagnostic Code
was issued by the Reporting MTA.
In addition to the Diagnostic-Code itself, additional textual
description of the diagnostic, MAY appear in a comment enclosed in
parentheses.
This field is optional, because some mail systems supply no
additional information beyond that which is returned in the 'action'
and 'status' fields. However, this field SHOULD be included if
transport-specific diagnostic information is available.
*/
DiagnosticCode TypeValueField
/*
2.3.7 Last-Attempt-Date field
The Last-Attempt-Date field gives the date and time of the last
attempt to relay, gateway, or deliver the message (whether successful
or unsuccessful) by the Reporting MTA. This is not necessarily the
same as the value of the Date field from the header of the message
used to transmit this delivery status notification: In cases where
the DSN was generated by a gateway, the Date field in the message
header contains the time the DSN was sent by the gateway and the DSN
Last-Attempt-Date field contains the time the last delivery attempt
occurred.
last-attempt-date-field = "Last-Attempt-Date" ":" date-time
This field is optional. It MUST NOT be included if the actual date
and time of the last delivery attempt are not available (which might
be the case if the DSN were being issued by a gateway).
The date and time are expressed in RFC 822 'date-time' format, as
modified by [HOSTREQ]. Numeric timezones ([+/-]HHMM format) MUST be
used.
*/
LastAttemptDate string
/*
2.3.8 final-log-id field
The "final-log-id" field gives the final-log-id of the message that
was used by the final-mta. This can be useful as an index to the
final-mta's log entry for that delivery attempt.
final-log-id-field = "Final-Log-ID" ":" *text
This field is optional.
*/
FinalLogID string
/*
2.3.9 Will-Retry-Until field
For DSNs of type "delayed", the Will-Retry-Until field gives the date
after which the Reporting MTA expects to abandon all attempts to
deliver the message to that recipient. The Will-Retry-Until field is
optional for "delay" DSNs, and MUST NOT appear in other DSNs.
will-retry-until-field = "Will-Retry-Until" ":" date-time
The date and time are expressed in RFC 822 'date-time' format, as
modified by [RFC1123]. Numeric timezones ([+/-]HHMM format) MUST be
used.
*/
WillRetryUntil string
/*
2.4 Extension fields
Additional per-message or per-recipient DSN fields may be defined in
the future by later revisions or extensions to this specification.
Extension-field names beginning with "X-" will never be defined as
standard fields; such names are reserved for experimental use. DSN
field names NOT beginning with "X-" MUST be registered with the
Internet Assigned Numbers Authority (IANA) and published in an RFC.
Extension DSN fields may be defined for the following reasons:
(a) To allow additional information from foreign delivery status
reports to be tunneled through Internet DSNs. The names of such
DSN fields should begin with an indication of the foreign
environment name (e.g., X400-Physical-Forwarding-Address).
(b) To allow the transmission of diagnostic information which is
specific to a particular mail transport protocol. The names of
such DSN fields should begin with an indication of the mail
transport being used (e.g., SMTP-Remote-Recipient-Address). Such
fields should be used for diagnostic purposes only and not by
user agents or mail gateways.
(c) To allow transmission of diagnostic information which is specific
to a particular message transfer agent (MTA). The names of such
DSN fields should begin with an indication of the MTA
implementation that produced the DSN. (e.g., Foomail-Queue-ID).
MTA implementers are encouraged to provide adequate information, via
extension fields if necessary, to allow an MTA maintainer to
understand the nature of correctable delivery failures and how to fix
them. For example, if message delivery attempts are logged, the DSN
might include information that allows the MTA maintainer to easily
find the log entry for a failed delivery attempt.
If an MTA developer does not wish to register the meanings of such
extension fields, "X-" fields may be used for this purpose. To avoid
name collisions, the name of the MTA implementation should follow the
"X-", (e.g., "X-Foomail-Log-ID").
*/
Extensions Extensions
}
func (record *RecipientRecord) fillFromHeader(hdr textproto.MIMEHeader) {
record.Extensions = make(Extensions)
var (
keyOriginalRecipient = textproto.CanonicalMIMEHeaderKey("Original-Recipient")
keyFinalRecipient = textproto.CanonicalMIMEHeaderKey("Final-Recipient")
keyAction = textproto.CanonicalMIMEHeaderKey("Action")
keyStatus = textproto.CanonicalMIMEHeaderKey("Status")
keyRemoteMTA = textproto.CanonicalMIMEHeaderKey("Remote-MTA")
keyDiagnosticCode = textproto.CanonicalMIMEHeaderKey("Diagnostic-Code")
keyLastAttemptDate = textproto.CanonicalMIMEHeaderKey("Last-Attempt-Date")
keyFinalLogID = textproto.CanonicalMIMEHeaderKey("Final-Log-ID")
keyWillRetryUntil = textproto.CanonicalMIMEHeaderKey("Will-Retry-Until")
)
for k, v := range hdr {
val := strings.Join(v, "\n")
switch k {
case keyOriginalRecipient:
record.OriginalRecipient = ParseTypeValueField(val)
case keyFinalRecipient:
record.FinalRecipient = ParseTypeValueField(val)
case keyAction:
record.Action = RecipientAction(val)
case keyStatus:
record.Status = val
case keyRemoteMTA:
record.RemoteMTA = ParseTypeValueField(val)
case keyDiagnosticCode:
record.DiagnosticCode = ParseTypeValueField(val)
case keyLastAttemptDate:
record.LastAttemptDate = val
case keyFinalLogID:
record.FinalLogID = val
case keyWillRetryUntil:
record.WillRetryUntil = val
default:
record.Extensions.Set(k, val)
}
}
} | rfc3464/RecipientRecord.go | 0.534612 | 0.514461 | RecipientRecord.go | starcoder |
// package retention implements models for liquid retention curves
// References:
// [1] <NAME>, <NAME> and <NAME> (2009) The concept of reference curves for constitutive
// modelling in soil mechanics, Computers and Geotechnics, 36(1-2), 149-165,
// http://dx.doi.org/10.1016/j.compgeo.2008.01.009
// [2] <NAME> and Williams DJ (2010) A novel approach for modelling soil-water
// characteristic curves with hysteresis, Computers and Geotechnics, 37(3), 374-380,
// http://dx.doi.org/10.1016/j.compgeo.2009.12.004
// [3] <NAME> and <NAME> (2011) Automatic Calibration of soil-water characteristic
// curves using genetic algorithms. Computers and Geotechnics, 38(3), 330-340,
// http://dx.doi.org/10.1016/j.compgeo.2010.12.004
package retention
import (
"github.com/cpmech/gosl/chk"
"github.com/cpmech/gosl/fun/dbf"
"github.com/cpmech/gosl/la"
"github.com/cpmech/gosl/ode"
)
// Model implements a liquid retention model (LRM)
// Derivs computes (see [1] page 618):
// L = ∂Cc/∂pc
// Lx = ∂²Cc/∂pc²
// J = ∂Cc/∂sl
// Jx == ∂²Cc/(∂pc ∂sl)
// Jy == ∂²Cc/∂sl²
// References:
// [1] <NAME> (2015) A consistent u-p formulation for porous media with hysteresis.
// Int Journal for Numerical Methods in Engineering, 101(8) 606-634
// http://dx.doi.org/10.1002/nme.4808
type Model interface {
Init(prms dbf.Params) error // initialises retention model
GetPrms(example bool) dbf.Params // gets (an example) of parameters
SlMin() float64 // returns sl_min
SlMax() float64 // returns sl_max
Cc(pc, sl float64, wet bool) (float64, error) // computes Cc = f = ∂sl/∂pc
L(pc, sl float64, wet bool) (float64, error) // computes L = ∂Cc/∂pc
J(pc, sl float64, wet bool) (float64, error) // computes J = ∂Cc/∂sl
Derivs(pc, sl float64, wet bool) (L, Lx, J, Jx, Jy float64, err error) // computes all derivatives
}
// Nonrate is a subset of LRM that directly computes saturation from capillary pressure
type Nonrate interface {
Sl(pc float64) float64 // compute sl directly from pc
}
// Update updates pc and sl for given Δpc. An implicit ODE solver is used.
func Update(mdl Model, pc0, sl0, Δpc float64) (slNew float64, err error) {
// wetting flag
wet := Δpc < 0
// callback functions
// x = [0.0, 1.0]
// pc = pc0 + x * Δpc
// y[0] = sl
// f(x,y) = dy/dx = dsl/dpc * dpc/dx = Cc * Δpc
// J(x,y) = df/dy = DCcDsl * Δpc
fcn := func(f []float64, dx, x float64, y []float64) (e error) {
f[0], e = mdl.Cc(pc0+x*Δpc, y[0], wet)
f[0] *= Δpc
return nil
}
jac := func(dfdy *la.Triplet, dx, x float64, y []float64) (e error) {
if dfdy.Max() == 0 {
dfdy.Init(1, 1, 1)
}
J, e := mdl.J(pc0+x*Δpc, y[0], wet)
dfdy.Start()
dfdy.Put(0, 0, J)
return
}
// ode solver
var odesol ode.Solver
odesol.Init("Radau5", 1, fcn, jac, nil, nil)
odesol.SetTol(1e-10, 1e-7)
odesol.Distr = false // this is important to avoid problems with MPI runs
// solve
y := []float64{sl0}
err = odesol.Solve(y, 0, 1, 1, false)
slNew = y[0]
return
}
// New returns new liquid retention model
func New(name string) (model Model, err error) {
allocator, ok := allocators[name]
if !ok {
return nil, chk.Err("model %q is not available in 'retention' database", name)
}
return allocator(), nil
}
// allocators holds all available models
var allocators = map[string]func() Model{} | mdl/retention/model.go | 0.832883 | 0.544438 | model.go | starcoder |
package ccCalc
import (
"github.com/bejohi/gococomp/model"
"math"
"image"
)
func CountConnectedComponents(uniformImg *image.Gray, radius int) int{
height := uniformImg.Rect.Max.Y
width := uniformImg.Rect.Max.X
count := 0
for y := 0; y < height; y++{
for x := 0; x < width; x++{
// we only want to add theses cc's where, the center pixel is relevant.
if uniformImg.GrayAt(x,y).Y == 0{
continue
}
centerPixel := model.LbpPixel{X:x,Y:y}
count += GetAllUniformPixelInRadius(uniformImg,height,width,centerPixel,radius)
}
}
return count
}
func GetAllUniformPixelInRadius(uniformImg *image.Gray, imgHeight int, imgWidth int, centerPixel model.LbpPixel, radius int) int{
// We use this rectangle to roughly calculate the radius around our pixel.
// Inside the for loop we then calculate the euclidean distance, to know exactly if the pixel is in range.
roughRect := GetRectangleAroundPixelByRadius(centerPixel,radius,imgWidth,imgHeight)
count := 0
for y := roughRect.Top; y <= roughRect.Bottom; y++{
for x := roughRect.Left; x <= roughRect.Right;x++{
pixel := model.LbpPixel{x,y}
if pixel.Equals(centerPixel) || CalcPixelDistance(&pixel,¢erPixel) > radius{
continue
}
if uniformImg.GrayAt(x,y).Y >= 255{
count++
}
}
}
return count
}
// getRectangleAroundPixelByRadius creates a rectangle around a given pixel, which is definitely in range of a matrix.
// Therefor matrixWidth and matrixHeight are provided.
func GetRectangleAroundPixelByRadius(pixel model.LbpPixel, radius int, matrixWidth int, matrixHeight int) model.SidesRect {
left := pixel.X - radius
right := pixel.X + radius
top := pixel.Y - radius
bottom := pixel.Y + radius
if left < 0{
left = 0
} else if left > matrixWidth -1{
left = matrixWidth -1
}
if right < 0{
right = 0
} else if right > matrixWidth -1 {
right = matrixWidth -1
}
if top < 0{
top = 0
} else if right > matrixHeight -1 {
top = matrixHeight -1
}
if bottom < 0 {
bottom = 0
} else if bottom > matrixHeight -1 {
bottom = matrixHeight -1
}
return model.SidesRect{left,top,right,bottom}
}
// CalcPixelDistance calculates the euclidean distance between to pixels in the same matrix.
// Therefore the Pythagorean theorem is used.
func CalcPixelDistance(pix1 *model.LbpPixel, pix2 *model.LbpPixel) int{
leg1 := math.Abs(float64(pix1.X - pix2.X))
leg2 := math.Abs(float64(pix1.Y - pix2.Y))
hypotenuse := math.Sqrt(leg1 * leg1 + leg2 * leg2)
return int(hypotenuse)
} | ccCalc/ccCalculator.go | 0.823577 | 0.521349 | ccCalculator.go | starcoder |
// See also:
// - https://gobyexample.com/slices
// - https://golang.org/doc/
// - https://tour.golang.org/moretypes/7
// - https://blog.golang.org/slices-intro
// Slices are typically used far more often than arrays in Go.
// Slices are a key data type in Go, giving a more powerful interface
// to sequence than arrays.
package main
import "fmt"
func main() {
// Unlike arrays, slices are typed only by the elements they contain (not
// by the number of elements). To create an empty slice with non-zero length,
// use the built-in make.
// Here, we make a slice of strings of length 3 (initially zero-valued)
s := make([]string, 3)
fmt.Println("emp:", s)
// We can set and get elements from slices in the same way we can from arrays.
s[0] = "a"
s[1] = "b"
s[2] = "c"
fmt.Println("set:", s)
fmt.Println("get:", s[2])
// len returns the length of the slice, as it does with arrays.
fmt.Println("len:", len(s))
// In addition to these basic operations, slices support several additional
// operations that make them more versatile than arrays.
// One of these additional operations is the built-in append, which returns a slice
// containing one or more new values.
s = append(s, "d")
s = append(s, "e", "f")
fmt.Println("apd:", s)
// Slices can also be copied using the copy built-in.
// Below, we create an empty slice c of the same length as s, and then
// copy the values from s into c.
c := make([]string, len(s))
copy(c, s)
fmt.Println("cpy", c)
// Slices also support the "slice" operator. The "slice" operator takes
// the syntax 'slice[low:high]'. In the example below, this gets a slice of
// the elements s[2], s[3], and s[4].
l := s[2:5]
fmt.Println("sl1:", l)
// This slices up to (but excluding) s[5]
l = s[:5]
fmt.Println("sl2:", l)
// This slices up from (and including) s[2]
l = s[2:]
fmt.Println("sl3:", l)
// We can declare and initialize a variable for slice in a single line as well.
t := []string{"g", "h", "i"}
fmt.Println("dcl:", t)
// Append the letter 'j' to our declared string slice
t = append(t, "j")
fmt.Println("dcl_apd:", t)
// Slices can be composed into multi-dimensional data structures.
// Unlike multi-dimensional arrays, the length of the inner slices can vary.
twoD := make([][]int, 3)
for i := 0; i < 3; i++ {
innerLen := i + 1
twoD[i] = make([]int, innerLen)
for j := 0; j < innerLen; j++ {
twoD[i][j] = i + j
}
}
fmt.Println("2d: ", twoD)
// Although slices are different types than arrays, they are rendered
// similarly by fmt.Println.
} | examples/09-slices.go | 0.721547 | 0.506469 | 09-slices.go | starcoder |
package types
import (
"io"
"github.com/lyraproj/pcore/px"
)
type IterableType struct {
typ px.Type
}
var IterableMetaType px.ObjectType
func init() {
IterableMetaType = newObjectType(`Pcore::IterableType`,
`Pcore::AnyType {
attributes => {
type => {
type => Optional[Type],
value => Any
},
}
}`, func(ctx px.Context, args []px.Value) px.Value {
return newIterableType2(args...)
})
}
func DefaultIterableType() *IterableType {
return iterableTypeDefault
}
func NewIterableType(elementType px.Type) *IterableType {
if elementType == nil || elementType == anyTypeDefault {
return DefaultIterableType()
}
return &IterableType{elementType}
}
func newIterableType2(args ...px.Value) *IterableType {
switch len(args) {
case 0:
return DefaultIterableType()
case 1:
containedType, ok := args[0].(px.Type)
if !ok {
panic(illegalArgumentType(`Iterable[]`, 0, `Type`, args[0]))
}
return NewIterableType(containedType)
default:
panic(illegalArgumentCount(`Iterable[]`, `0 - 1`, len(args)))
}
}
func (t *IterableType) Accept(v px.Visitor, g px.Guard) {
v(t)
t.typ.Accept(v, g)
}
func (t *IterableType) Default() px.Type {
return iterableTypeDefault
}
func (t *IterableType) Equals(o interface{}, g px.Guard) bool {
if ot, ok := o.(*IterableType); ok {
return t.typ.Equals(ot.typ, g)
}
return false
}
func (t *IterableType) Generic() px.Type {
return NewIterableType(px.GenericType(t.typ))
}
func (t *IterableType) Get(key string) (value px.Value, ok bool) {
switch key {
case `type`:
return t.typ, true
}
return nil, false
}
func (t *IterableType) IsAssignable(o px.Type, g px.Guard) bool {
var et px.Type
switch o := o.(type) {
case *ArrayType:
et = o.ElementType()
case *BinaryType:
et = NewIntegerType(0, 255)
case *HashType:
et = o.EntryType()
case *stringType, *vcStringType, *scStringType:
et = OneCharStringType
case *TupleType:
return allAssignableTo(o.types, t.typ, g)
default:
return false
}
return GuardedIsAssignable(t.typ, et, g)
}
func (t *IterableType) IsInstance(o px.Value, g px.Guard) bool {
if iv, ok := o.(px.Indexed); ok {
return GuardedIsAssignable(t.typ, iv.ElementType(), g)
}
return false
}
func (t *IterableType) MetaType() px.ObjectType {
return IterableMetaType
}
func (t *IterableType) Name() string {
return `Iterable`
}
func (t *IterableType) Parameters() []px.Value {
if t.typ == DefaultAnyType() {
return px.EmptyValues
}
return []px.Value{t.typ}
}
func (t *IterableType) Resolve(c px.Context) px.Type {
t.typ = resolve(c, t.typ)
return t
}
func (t *IterableType) CanSerializeAsString() bool {
return canSerializeAsString(t.typ)
}
func (t *IterableType) SerializationString() string {
return t.String()
}
func (t *IterableType) String() string {
return px.ToString2(t, None)
}
func (t *IterableType) ElementType() px.Type {
return t.typ
}
func (t *IterableType) ToString(b io.Writer, s px.FormatContext, g px.RDetect) {
TypeToString(t, b, s, g)
}
func (t *IterableType) PType() px.Type {
return &TypeType{t}
}
var iterableTypeDefault = &IterableType{typ: DefaultAnyType()} | types/iterabletype.go | 0.645902 | 0.526282 | iterabletype.go | starcoder |
package balance
import (
"strconv"
"github.com/iotaledger/hive.go/marshalutil"
)
// Balance represents a balance in the IOTA ledger. It consists out of a numeric value and a color.
type Balance struct {
value int64
color Color
}
// New creates a new Balance with the given details.
func New(color Color, balance int64) (result *Balance) {
result = &Balance{
color: color,
value: balance,
}
return
}
// FromBytes unmarshals a Balance from a sequence of bytes.
func FromBytes(bytes []byte) (result *Balance, consumedBytes int, err error) {
result = &Balance{}
marshalUtil := marshalutil.New(bytes)
result.value, err = marshalUtil.ReadInt64()
if err != nil {
return
}
coinColor, colorErr := marshalUtil.Parse(func(data []byte) (interface{}, int, error) {
return ColorFromBytes(data)
})
if colorErr != nil {
return nil, marshalUtil.ReadOffset(), colorErr
}
result.color = coinColor.(Color)
consumedBytes = marshalUtil.ReadOffset()
return
}
// Parse is a wrapper for simplified unmarshaling in a byte stream using the marshalUtil package.
func Parse(marshalUtil *marshalutil.MarshalUtil) (*Balance, error) {
address, err := marshalUtil.Parse(func(data []byte) (interface{}, int, error) { return FromBytes(data) })
if err != nil {
return nil, err
}
return address.(*Balance), nil
}
// Value returns the numeric value of the balance.
func (balance *Balance) Value() int64 {
return balance.value
}
// Color returns the Color of the balance.
func (balance *Balance) Color() Color {
return balance.color
}
// Bytes marshals the Balance into a sequence of bytes.
func (balance *Balance) Bytes() []byte {
marshalUtil := marshalutil.New(Length)
marshalUtil.WriteInt64(balance.value)
marshalUtil.WriteBytes(balance.color.Bytes())
return marshalUtil.Bytes()
}
// String creates a human readable string of the Balance.
func (balance *Balance) String() string {
return strconv.FormatInt(balance.value, 10) + " " + balance.color.String()
}
// Length encodes the length of a marshaled Balance (the length of the color + 8 bytes for the balance).
const Length = 8 + ColorLength | dapps/valuetransfers/packages/balance/balance.go | 0.856242 | 0.507934 | balance.go | starcoder |
package additionalPractice
/*
Problem : https://www.interviewbit.com/problems/array-sum/
Solution :
1) Two Pointer Approach
- Keep pointers at the end of each array.
- keep adding the values pointed at each array and decrease the pointer value.
- reverse the resultant array.
Follow up :
1) Try to do the above problem without using any additional space.
- Use the longer input array as the resultant array.
- Save the sum in resultant array using two pointer approach.
- if carry is present then append the resultant array with carry.
- right shift all the elemnets by 1.
- set carry as the first element of the array.
- return the array.
*/
// T(n) : O(n), S(n) : O(n)
func addArrays(A []int, B []int) []int {
arr := make([]int, 0)
iA, iB, carry := len(A)-1, len(B)-1, 0
for iA >= 0 || iB >= 0 {
valA, valB := 0, 0
if iA >= 0 {
valA = A[iA]
iA--
}
if iB >= 0 {
valB = B[iB]
iB--
}
sum := valA + valB + carry
arr = append(arr, sum%10)
carry = sum / 10
}
if carry != 0 {
arr = append(arr, carry)
}
// reverse the resultant array.
n := len(arr)
for i := 0; i < int(n/2); i++ {
arr[i], arr[n-1-i] = arr[n-1-i], arr[i]
}
return arr
}
// T(n) : O(n), S(n) : O(1)
func addArraysFollowUp(A []int, B []int) []int {
nA, nB := len(A)-1, len(B)-1
res, iC := A, nA
if nB > nA {
res = B
iC = nB
}
iA, iB, carry := nA, nB, 0
for iA >= 0 || iB >= 0 {
valA, valB := 0, 0
if iA >= 0 {
valA = A[iA]
iA--
}
if iB >= 0 {
valB = B[iB]
iB--
}
sum := valA + valB + carry
res[iC] = sum % 10
iC--
carry = sum / 10
}
if carry != 0 {
return append([]int{carry}, res...)
}
return res
}
// T(n) : O(n), S(n) : O(1)
func addArraysFollowUp1(A []int, B []int) []int {
nA, nB := len(A)-1, len(B)-1
res, iC := A, nA
if nB > nA {
res = B
iC = nB
}
iA, iB, carry := nA, nB, 0
for iA >= 0 || iB >= 0 {
valA, valB := 0, 0
if iA >= 0 {
valA = A[iA]
iA--
}
if iB >= 0 {
valB = B[iB]
iB--
}
sum := valA + valB + carry
res[iC] = sum % 10
iC--
carry = sum / 10
}
if carry != 0 {
res = append(res, carry)
nC := len(res)
for i := nC - 1; i > 0; i-- {
res[i] = res[i-1]
}
res[0] = carry
}
return res
}
func SolveArraySum(A []int, B []int) []int {
return addArrays(A, B)
}
func SolveArraySumFollowUp(A []int, B []int) []int {
return addArraysFollowUp(A, B)
}
func SolveArraySumFollowUp1(A []int, B []int) []int {
return addArraysFollowUp1(A, B)
} | src/arrays/additionalPractice/arraySum.go | 0.797951 | 0.739399 | arraySum.go | starcoder |
package cartogram
import (
"encoding/json"
"io/ioutil"
"time"
)
// Cartogram defines a set of accounts and their metadata
type Cartogram struct {
Version int `json:"version"`
Created time.Time `json:"created"`
AccountSet AccountSet `json:"accounts"`
}
// dummyCartogram just parses the Version
// this is used to test for schema version mismatch
type dummyCartogram struct {
Version int `json:"version"`
}
// NewCartogram creates a new cartogram from an account set
func NewCartogram(as AccountSet) Cartogram {
return Cartogram{
Version: specVersion,
Created: time.Now(),
AccountSet: as,
}
}
// Lookup finds an account in a Cartogram based on its ID
func (c Cartogram) Lookup(accountID string) (bool, Account) {
return c.AccountSet.Lookup(accountID)
}
// Search finds accounts based on their tags
func (c Cartogram) Search(tfs TagFilterSet) AccountSet {
return c.AccountSet.Search(tfs)
}
// AllProfiles returns all unique profiles found
func (c Cartogram) AllProfiles() []string {
res := []string{}
for _, x := range c.AccountSet {
res = append(res, x.AllProfiles()...)
}
return uniqCollect(res)
}
func (c *Cartogram) loadFromFile(filePath string) error {
logger.InfoMsgf("loading cartogram from %s", filePath)
data, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
return c.loadFromString(data)
}
func (c *Cartogram) loadFromString(data []byte) error {
if err := schemaVersionCheck(data); err != nil {
return err
}
return json.Unmarshal(data, &c)
}
func schemaVersionCheck(data []byte) error {
var c dummyCartogram
if err := json.Unmarshal(data, &c); err != nil {
return err
}
if c.Version != specVersion {
return SpecVersionError{ActualVersion: c.Version, ExpectedVersion: specVersion}
}
return nil
}
func (c Cartogram) writeToFile(filePath string) error {
logger.InfoMsgf("writing cartogram to %s", filePath)
data, err := c.writeToString()
if err != nil {
return err
}
err = ioutil.WriteFile(filePath, data, 0600)
return err
}
func (c Cartogram) writeToString() ([]byte, error) {
buffer, err := json.MarshalIndent(c, "", " ")
if err != nil {
return []byte{}, err
}
return buffer, nil
} | cartogram/cartogram.go | 0.681409 | 0.404155 | cartogram.go | starcoder |
package carbon
import (
"strconv"
"strings"
"time"
)
// Parse parses a standard string as a Carbon instance.
// 将标准格式时间字符串解析成 Carbon 实例
func (c Carbon) Parse(value string, timezone ...string) Carbon {
layout := DateTimeFormat
if _, err := strconv.ParseInt(value, 10, 64); err == nil {
switch {
case len(value) == 8:
layout = ShortDateFormat
case len(value) == 14:
layout = ShortDateTimeFormat
}
} else {
switch {
case len(value) == 10 && strings.Count(value, "-") == 2:
layout = DateFormat
case len(value) == 18 && strings.Index(value, ".") == 14:
layout = ShortDateTimeMilliFormat
case len(value) == 21 && strings.Index(value, ".") == 14:
layout = ShortDateTimeMicroFormat
case len(value) == 24 && strings.Index(value, ".") == 14:
layout = ShortDateTimeNanoFormat
case len(value) == 25 && strings.Index(value, "T") == 10:
layout = RFC3339Format
case len(value) == 29 && strings.Index(value, "T") == 10 && strings.Index(value, ".") == 19:
layout = RFC3339MilliFormat
case len(value) == 32 && strings.Index(value, "T") == 10 && strings.Index(value, ".") == 19:
layout = RFC3339MicroFormat
case len(value) == 35 && strings.Index(value, "T") == 10 && strings.Index(value, ".") == 19:
layout = RFC3339NanoFormat
}
}
carbon := c.ParseByLayout(value, layout, timezone...)
if carbon.Error != nil {
carbon.Error = invalidValueError(value)
}
return carbon
}
// Parse parses a standard string as a Carbon instance.
// 将标准时间字符串解析成 Carbon 实例
func Parse(value string, timezone ...string) Carbon {
return NewCarbon().Parse(value, timezone...)
}
// ParseByFormat parses a string as a Carbon instance by format.
// 通过格式化字符将字符串解析成 carbon 实例
func (c Carbon) ParseByFormat(value, format string, timezone ...string) Carbon {
carbon := c.ParseByLayout(value, format2layout(format), timezone...)
if carbon.Error != nil {
carbon.Error = invalidFormatError(value, format)
}
return carbon
}
// ParseByFormat parses a string as a Carbon instance by format.
// 通过布局字符将字符串解析成 carbon 实例
func ParseByFormat(value, format string, timezone ...string) Carbon {
return NewCarbon().ParseByFormat(value, format, timezone...)
}
// ParseByLayout parses a string as a Carbon instance by layout.
// 通过布局字符将字符串解析成 carbon 实例
func (c Carbon) ParseByLayout(value, layout string, timezone ...string) Carbon {
if len(timezone) > 0 {
c.loc, c.Error = getLocationByTimezone(timezone[len(timezone)-1])
}
if c.Error != nil {
return c
}
if value == "" || value == "0" || value == "0000-00-00 00:00:00" || value == "0000-00-00" || value == "00:00:00" {
return c
}
tt, err := time.ParseInLocation(layout, value, c.loc)
if err != nil {
c.Error = invalidLayoutError(value, layout)
return c
}
c.time = tt
return c
}
// ParseByLayout parses a string as a Carbon instance by layout.
// 将布局时间字符串解析成 Carbon 实例
func ParseByLayout(value, layout string, timezone ...string) Carbon {
return NewCarbon().ParseByLayout(value, layout, timezone...)
} | parser.go | 0.571767 | 0.434821 | parser.go | starcoder |
package threefish
import (
"crypto/cipher"
)
const (
// Size of a 1024-bit block in bytes
blockSize1024 = 128
// Number of 64-bit words per 1024-bit block
numWords1024 = blockSize1024 / 8
// Number of rounds when using a 1024-bit cipher
numRounds1024 = 80
)
type cipher1024 struct {
t [(tweakSize / 8) + 1]uint64
ks [(numRounds1024 / 4) + 1][numWords1024]uint64
}
// New1024 creates a new Threefish cipher with a block size of 1024 bits.
// The key argument must be 64 bytes and the tweak argument must be 16 bytes.
func New1024(key, tweak []byte) (cipher.Block, error) {
// Length check the provided key
if len(key) != blockSize1024 {
return nil, KeySizeError(blockSize1024)
}
c := new(cipher1024)
// Load and extend the tweak value
if err := calculateTweak(&c.t, tweak); err != nil {
return nil, err
}
// Load and extend the key
k := new([numWords1024 + 1]uint64)
k[numWords1024] = c240
for i := 0; i < numWords1024; i++ {
k[i] = loadWord(key[i*8 : (i+1)*8])
k[numWords1024] ^= k[i]
}
// Calculate the key schedule
for s := 0; s <= numRounds1024/4; s++ {
for i := 0; i < numWords1024; i++ {
c.ks[s][i] = k[(s+i)%(numWords1024+1)]
switch i {
case numWords1024 - 3:
c.ks[s][i] += c.t[s%3]
case numWords1024 - 2:
c.ks[s][i] += c.t[(s+1)%3]
case numWords1024 - 1:
c.ks[s][i] += uint64(s)
}
}
}
return c, nil
}
// BlockSize returns the block size of a 1024-bit cipher.
func (c *cipher1024) BlockSize() int { return blockSize1024 }
// Encrypt loads plaintext from src, encrypts it, and stores it in dst.
func (c *cipher1024) Encrypt(dst, src []byte) {
// Load the input
in := new([numWords1024]uint64)
in[0] = loadWord(src[0:8])
in[1] = loadWord(src[8:16])
in[2] = loadWord(src[16:24])
in[3] = loadWord(src[24:32])
in[4] = loadWord(src[32:40])
in[5] = loadWord(src[40:48])
in[6] = loadWord(src[48:56])
in[7] = loadWord(src[56:64])
in[8] = loadWord(src[64:72])
in[9] = loadWord(src[72:80])
in[10] = loadWord(src[80:88])
in[11] = loadWord(src[88:96])
in[12] = loadWord(src[96:104])
in[13] = loadWord(src[104:112])
in[14] = loadWord(src[112:120])
in[15] = loadWord(src[120:128])
// Perform encryption rounds
for d := 0; d < numRounds1024; d += 8 {
// Add round key
in[0] += c.ks[d/4][0]
in[1] += c.ks[d/4][1]
in[2] += c.ks[d/4][2]
in[3] += c.ks[d/4][3]
in[4] += c.ks[d/4][4]
in[5] += c.ks[d/4][5]
in[6] += c.ks[d/4][6]
in[7] += c.ks[d/4][7]
in[8] += c.ks[d/4][8]
in[9] += c.ks[d/4][9]
in[10] += c.ks[d/4][10]
in[11] += c.ks[d/4][11]
in[12] += c.ks[d/4][12]
in[13] += c.ks[d/4][13]
in[14] += c.ks[d/4][14]
in[15] += c.ks[d/4][15]
// Four rounds of mix and permute
in[0] += in[1]
in[1] = ((in[1] << 24) | (in[1] >> (64 - 24))) ^ in[0]
in[2] += in[3]
in[3] = ((in[3] << 13) | (in[3] >> (64 - 13))) ^ in[2]
in[4] += in[5]
in[5] = ((in[5] << 8) | (in[5] >> (64 - 8))) ^ in[4]
in[6] += in[7]
in[7] = ((in[7] << 47) | (in[7] >> (64 - 47))) ^ in[6]
in[8] += in[9]
in[9] = ((in[9] << 8) | (in[9] >> (64 - 8))) ^ in[8]
in[10] += in[11]
in[11] = ((in[11] << 17) | (in[11] >> (64 - 17))) ^ in[10]
in[12] += in[13]
in[13] = ((in[13] << 22) | (in[13] >> (64 - 22))) ^ in[12]
in[14] += in[15]
in[15] = ((in[15] << 37) | (in[15] >> (64 - 37))) ^ in[14]
in[1], in[3], in[4], in[5], in[6], in[7], in[8], in[9], in[10], in[11], in[12], in[13], in[14], in[15] =
in[9], in[13], in[6], in[11], in[4], in[15], in[10], in[7], in[12], in[3], in[14], in[5], in[8], in[1]
in[0] += in[1]
in[1] = ((in[1] << 38) | (in[1] >> (64 - 38))) ^ in[0]
in[2] += in[3]
in[3] = ((in[3] << 19) | (in[3] >> (64 - 19))) ^ in[2]
in[4] += in[5]
in[5] = ((in[5] << 10) | (in[5] >> (64 - 10))) ^ in[4]
in[6] += in[7]
in[7] = ((in[7] << 55) | (in[7] >> (64 - 55))) ^ in[6]
in[8] += in[9]
in[9] = ((in[9] << 49) | (in[9] >> (64 - 49))) ^ in[8]
in[10] += in[11]
in[11] = ((in[11] << 18) | (in[11] >> (64 - 18))) ^ in[10]
in[12] += in[13]
in[13] = ((in[13] << 23) | (in[13] >> (64 - 23))) ^ in[12]
in[14] += in[15]
in[15] = ((in[15] << 52) | (in[15] >> (64 - 52))) ^ in[14]
in[1], in[3], in[4], in[5], in[6], in[7], in[8], in[9], in[10], in[11], in[12], in[13], in[14], in[15] =
in[9], in[13], in[6], in[11], in[4], in[15], in[10], in[7], in[12], in[3], in[14], in[5], in[8], in[1]
in[0] += in[1]
in[1] = ((in[1] << 33) | (in[1] >> (64 - 33))) ^ in[0]
in[2] += in[3]
in[3] = ((in[3] << 4) | (in[3] >> (64 - 4))) ^ in[2]
in[4] += in[5]
in[5] = ((in[5] << 51) | (in[5] >> (64 - 51))) ^ in[4]
in[6] += in[7]
in[7] = ((in[7] << 13) | (in[7] >> (64 - 13))) ^ in[6]
in[8] += in[9]
in[9] = ((in[9] << 34) | (in[9] >> (64 - 34))) ^ in[8]
in[10] += in[11]
in[11] = ((in[11] << 41) | (in[11] >> (64 - 41))) ^ in[10]
in[12] += in[13]
in[13] = ((in[13] << 59) | (in[13] >> (64 - 59))) ^ in[12]
in[14] += in[15]
in[15] = ((in[15] << 17) | (in[15] >> (64 - 17))) ^ in[14]
in[1], in[3], in[4], in[5], in[6], in[7], in[8], in[9], in[10], in[11], in[12], in[13], in[14], in[15] =
in[9], in[13], in[6], in[11], in[4], in[15], in[10], in[7], in[12], in[3], in[14], in[5], in[8], in[1]
in[0] += in[1]
in[1] = ((in[1] << 5) | (in[1] >> (64 - 5))) ^ in[0]
in[2] += in[3]
in[3] = ((in[3] << 20) | (in[3] >> (64 - 20))) ^ in[2]
in[4] += in[5]
in[5] = ((in[5] << 48) | (in[5] >> (64 - 48))) ^ in[4]
in[6] += in[7]
in[7] = ((in[7] << 41) | (in[7] >> (64 - 41))) ^ in[6]
in[8] += in[9]
in[9] = ((in[9] << 47) | (in[9] >> (64 - 47))) ^ in[8]
in[10] += in[11]
in[11] = ((in[11] << 28) | (in[11] >> (64 - 28))) ^ in[10]
in[12] += in[13]
in[13] = ((in[13] << 16) | (in[13] >> (64 - 16))) ^ in[12]
in[14] += in[15]
in[15] = ((in[15] << 25) | (in[15] >> (64 - 25))) ^ in[14]
in[1], in[3], in[4], in[5], in[6], in[7], in[8], in[9], in[10], in[11], in[12], in[13], in[14], in[15] =
in[9], in[13], in[6], in[11], in[4], in[15], in[10], in[7], in[12], in[3], in[14], in[5], in[8], in[1]
// Add round key
in[0] += c.ks[(d/4)+1][0]
in[1] += c.ks[(d/4)+1][1]
in[2] += c.ks[(d/4)+1][2]
in[3] += c.ks[(d/4)+1][3]
in[4] += c.ks[(d/4)+1][4]
in[5] += c.ks[(d/4)+1][5]
in[6] += c.ks[(d/4)+1][6]
in[7] += c.ks[(d/4)+1][7]
in[8] += c.ks[(d/4)+1][8]
in[9] += c.ks[(d/4)+1][9]
in[10] += c.ks[(d/4)+1][10]
in[11] += c.ks[(d/4)+1][11]
in[12] += c.ks[(d/4)+1][12]
in[13] += c.ks[(d/4)+1][13]
in[14] += c.ks[(d/4)+1][14]
in[15] += c.ks[(d/4)+1][15]
// Four rounds of mix and permute
in[0] += in[1]
in[1] = ((in[1] << 41) | (in[1] >> (64 - 41))) ^ in[0]
in[2] += in[3]
in[3] = ((in[3] << 9) | (in[3] >> (64 - 9))) ^ in[2]
in[4] += in[5]
in[5] = ((in[5] << 37) | (in[5] >> (64 - 37))) ^ in[4]
in[6] += in[7]
in[7] = ((in[7] << 31) | (in[7] >> (64 - 31))) ^ in[6]
in[8] += in[9]
in[9] = ((in[9] << 12) | (in[9] >> (64 - 12))) ^ in[8]
in[10] += in[11]
in[11] = ((in[11] << 47) | (in[11] >> (64 - 47))) ^ in[10]
in[12] += in[13]
in[13] = ((in[13] << 44) | (in[13] >> (64 - 44))) ^ in[12]
in[14] += in[15]
in[15] = ((in[15] << 30) | (in[15] >> (64 - 30))) ^ in[14]
in[1], in[3], in[4], in[5], in[6], in[7], in[8], in[9], in[10], in[11], in[12], in[13], in[14], in[15] =
in[9], in[13], in[6], in[11], in[4], in[15], in[10], in[7], in[12], in[3], in[14], in[5], in[8], in[1]
in[0] += in[1]
in[1] = ((in[1] << 16) | (in[1] >> (64 - 16))) ^ in[0]
in[2] += in[3]
in[3] = ((in[3] << 34) | (in[3] >> (64 - 34))) ^ in[2]
in[4] += in[5]
in[5] = ((in[5] << 56) | (in[5] >> (64 - 56))) ^ in[4]
in[6] += in[7]
in[7] = ((in[7] << 51) | (in[7] >> (64 - 51))) ^ in[6]
in[8] += in[9]
in[9] = ((in[9] << 4) | (in[9] >> (64 - 4))) ^ in[8]
in[10] += in[11]
in[11] = ((in[11] << 53) | (in[11] >> (64 - 53))) ^ in[10]
in[12] += in[13]
in[13] = ((in[13] << 42) | (in[13] >> (64 - 42))) ^ in[12]
in[14] += in[15]
in[15] = ((in[15] << 41) | (in[15] >> (64 - 41))) ^ in[14]
in[1], in[3], in[4], in[5], in[6], in[7], in[8], in[9], in[10], in[11], in[12], in[13], in[14], in[15] =
in[9], in[13], in[6], in[11], in[4], in[15], in[10], in[7], in[12], in[3], in[14], in[5], in[8], in[1]
in[0] += in[1]
in[1] = ((in[1] << 31) | (in[1] >> (64 - 31))) ^ in[0]
in[2] += in[3]
in[3] = ((in[3] << 44) | (in[3] >> (64 - 44))) ^ in[2]
in[4] += in[5]
in[5] = ((in[5] << 47) | (in[5] >> (64 - 47))) ^ in[4]
in[6] += in[7]
in[7] = ((in[7] << 46) | (in[7] >> (64 - 46))) ^ in[6]
in[8] += in[9]
in[9] = ((in[9] << 19) | (in[9] >> (64 - 19))) ^ in[8]
in[10] += in[11]
in[11] = ((in[11] << 42) | (in[11] >> (64 - 42))) ^ in[10]
in[12] += in[13]
in[13] = ((in[13] << 44) | (in[13] >> (64 - 44))) ^ in[12]
in[14] += in[15]
in[15] = ((in[15] << 25) | (in[15] >> (64 - 25))) ^ in[14]
in[1], in[3], in[4], in[5], in[6], in[7], in[8], in[9], in[10], in[11], in[12], in[13], in[14], in[15] =
in[9], in[13], in[6], in[11], in[4], in[15], in[10], in[7], in[12], in[3], in[14], in[5], in[8], in[1]
in[0] += in[1]
in[1] = ((in[1] << 9) | (in[1] >> (64 - 9))) ^ in[0]
in[2] += in[3]
in[3] = ((in[3] << 48) | (in[3] >> (64 - 48))) ^ in[2]
in[4] += in[5]
in[5] = ((in[5] << 35) | (in[5] >> (64 - 35))) ^ in[4]
in[6] += in[7]
in[7] = ((in[7] << 52) | (in[7] >> (64 - 52))) ^ in[6]
in[8] += in[9]
in[9] = ((in[9] << 23) | (in[9] >> (64 - 23))) ^ in[8]
in[10] += in[11]
in[11] = ((in[11] << 31) | (in[11] >> (64 - 31))) ^ in[10]
in[12] += in[13]
in[13] = ((in[13] << 37) | (in[13] >> (64 - 37))) ^ in[12]
in[14] += in[15]
in[15] = ((in[15] << 20) | (in[15] >> (64 - 20))) ^ in[14]
in[1], in[3], in[4], in[5], in[6], in[7], in[8], in[9], in[10], in[11], in[12], in[13], in[14], in[15] =
in[9], in[13], in[6], in[11], in[4], in[15], in[10], in[7], in[12], in[3], in[14], in[5], in[8], in[1]
}
// Add the final round key
in[0] += c.ks[numRounds1024/4][0]
in[1] += c.ks[numRounds1024/4][1]
in[2] += c.ks[numRounds1024/4][2]
in[3] += c.ks[numRounds1024/4][3]
in[4] += c.ks[numRounds1024/4][4]
in[5] += c.ks[numRounds1024/4][5]
in[6] += c.ks[numRounds1024/4][6]
in[7] += c.ks[numRounds1024/4][7]
in[8] += c.ks[numRounds1024/4][8]
in[9] += c.ks[numRounds1024/4][9]
in[10] += c.ks[numRounds1024/4][10]
in[11] += c.ks[numRounds1024/4][11]
in[12] += c.ks[numRounds1024/4][12]
in[13] += c.ks[numRounds1024/4][13]
in[14] += c.ks[numRounds1024/4][14]
in[15] += c.ks[numRounds1024/4][15]
// Store the ciphertext in destination
storeWord(dst[0:8], in[0])
storeWord(dst[8:16], in[1])
storeWord(dst[16:24], in[2])
storeWord(dst[24:32], in[3])
storeWord(dst[32:40], in[4])
storeWord(dst[40:48], in[5])
storeWord(dst[48:56], in[6])
storeWord(dst[56:64], in[7])
storeWord(dst[64:72], in[8])
storeWord(dst[72:80], in[9])
storeWord(dst[80:88], in[10])
storeWord(dst[88:96], in[11])
storeWord(dst[96:104], in[12])
storeWord(dst[104:112], in[13])
storeWord(dst[112:120], in[14])
storeWord(dst[120:128], in[15])
}
// Decrypt loads ciphertext from src, decrypts it, and stores it in dst.
func (c *cipher1024) Decrypt(dst, src []byte) {
// Load the ciphertext
ct := new([numWords1024]uint64)
ct[0] = loadWord(src[0:8])
ct[1] = loadWord(src[8:16])
ct[2] = loadWord(src[16:24])
ct[3] = loadWord(src[24:32])
ct[4] = loadWord(src[32:40])
ct[5] = loadWord(src[40:48])
ct[6] = loadWord(src[48:56])
ct[7] = loadWord(src[56:64])
ct[8] = loadWord(src[64:72])
ct[9] = loadWord(src[72:80])
ct[10] = loadWord(src[80:88])
ct[11] = loadWord(src[88:96])
ct[12] = loadWord(src[96:104])
ct[13] = loadWord(src[104:112])
ct[14] = loadWord(src[112:120])
ct[15] = loadWord(src[120:128])
// Subtract the final round key
ct[0] -= c.ks[numRounds1024/4][0]
ct[1] -= c.ks[numRounds1024/4][1]
ct[2] -= c.ks[numRounds1024/4][2]
ct[3] -= c.ks[numRounds1024/4][3]
ct[4] -= c.ks[numRounds1024/4][4]
ct[5] -= c.ks[numRounds1024/4][5]
ct[6] -= c.ks[numRounds1024/4][6]
ct[7] -= c.ks[numRounds1024/4][7]
ct[8] -= c.ks[numRounds1024/4][8]
ct[9] -= c.ks[numRounds1024/4][9]
ct[10] -= c.ks[numRounds1024/4][10]
ct[11] -= c.ks[numRounds1024/4][11]
ct[12] -= c.ks[numRounds1024/4][12]
ct[13] -= c.ks[numRounds1024/4][13]
ct[14] -= c.ks[numRounds1024/4][14]
ct[15] -= c.ks[numRounds1024/4][15]
// Perform decryption rounds
for d := numRounds1024 - 1; d >= 0; d -= 8 {
// Four rounds of permute and unmix
ct[1], ct[3], ct[4], ct[5], ct[6], ct[7], ct[8], ct[9], ct[10], ct[11], ct[12], ct[13], ct[14], ct[15] =
ct[15], ct[11], ct[6], ct[13], ct[4], ct[9], ct[14], ct[1], ct[8], ct[5], ct[10], ct[3], ct[12], ct[7]
ct[15] = ((ct[15] ^ ct[14]) << (64 - 20)) | ((ct[15] ^ ct[14]) >> 20)
ct[14] -= ct[15]
ct[13] = ((ct[13] ^ ct[12]) << (64 - 37)) | ((ct[13] ^ ct[12]) >> 37)
ct[12] -= ct[13]
ct[11] = ((ct[11] ^ ct[10]) << (64 - 31)) | ((ct[11] ^ ct[10]) >> 31)
ct[10] -= ct[11]
ct[9] = ((ct[9] ^ ct[8]) << (64 - 23)) | ((ct[9] ^ ct[8]) >> 23)
ct[8] -= ct[9]
ct[7] = ((ct[7] ^ ct[6]) << (64 - 52)) | ((ct[7] ^ ct[6]) >> 52)
ct[6] -= ct[7]
ct[5] = ((ct[5] ^ ct[4]) << (64 - 35)) | ((ct[5] ^ ct[4]) >> 35)
ct[4] -= ct[5]
ct[3] = ((ct[3] ^ ct[2]) << (64 - 48)) | ((ct[3] ^ ct[2]) >> 48)
ct[2] -= ct[3]
ct[1] = ((ct[1] ^ ct[0]) << (64 - 9)) | ((ct[1] ^ ct[0]) >> 9)
ct[0] -= ct[1]
ct[1], ct[3], ct[4], ct[5], ct[6], ct[7], ct[8], ct[9], ct[10], ct[11], ct[12], ct[13], ct[14], ct[15] =
ct[15], ct[11], ct[6], ct[13], ct[4], ct[9], ct[14], ct[1], ct[8], ct[5], ct[10], ct[3], ct[12], ct[7]
ct[15] = ((ct[15] ^ ct[14]) << (64 - 25)) | ((ct[15] ^ ct[14]) >> 25)
ct[14] -= ct[15]
ct[13] = ((ct[13] ^ ct[12]) << (64 - 44)) | ((ct[13] ^ ct[12]) >> 44)
ct[12] -= ct[13]
ct[11] = ((ct[11] ^ ct[10]) << (64 - 42)) | ((ct[11] ^ ct[10]) >> 42)
ct[10] -= ct[11]
ct[9] = ((ct[9] ^ ct[8]) << (64 - 19)) | ((ct[9] ^ ct[8]) >> 19)
ct[8] -= ct[9]
ct[7] = ((ct[7] ^ ct[6]) << (64 - 46)) | ((ct[7] ^ ct[6]) >> 46)
ct[6] -= ct[7]
ct[5] = ((ct[5] ^ ct[4]) << (64 - 47)) | ((ct[5] ^ ct[4]) >> 47)
ct[4] -= ct[5]
ct[3] = ((ct[3] ^ ct[2]) << (64 - 44)) | ((ct[3] ^ ct[2]) >> 44)
ct[2] -= ct[3]
ct[1] = ((ct[1] ^ ct[0]) << (64 - 31)) | ((ct[1] ^ ct[0]) >> 31)
ct[0] -= ct[1]
ct[1], ct[3], ct[4], ct[5], ct[6], ct[7], ct[8], ct[9], ct[10], ct[11], ct[12], ct[13], ct[14], ct[15] =
ct[15], ct[11], ct[6], ct[13], ct[4], ct[9], ct[14], ct[1], ct[8], ct[5], ct[10], ct[3], ct[12], ct[7]
ct[15] = ((ct[15] ^ ct[14]) << (64 - 41)) | ((ct[15] ^ ct[14]) >> 41)
ct[14] -= ct[15]
ct[13] = ((ct[13] ^ ct[12]) << (64 - 42)) | ((ct[13] ^ ct[12]) >> 42)
ct[12] -= ct[13]
ct[11] = ((ct[11] ^ ct[10]) << (64 - 53)) | ((ct[11] ^ ct[10]) >> 53)
ct[10] -= ct[11]
ct[9] = ((ct[9] ^ ct[8]) << (64 - 4)) | ((ct[9] ^ ct[8]) >> 4)
ct[8] -= ct[9]
ct[7] = ((ct[7] ^ ct[6]) << (64 - 51)) | ((ct[7] ^ ct[6]) >> 51)
ct[6] -= ct[7]
ct[5] = ((ct[5] ^ ct[4]) << (64 - 56)) | ((ct[5] ^ ct[4]) >> 56)
ct[4] -= ct[5]
ct[3] = ((ct[3] ^ ct[2]) << (64 - 34)) | ((ct[3] ^ ct[2]) >> 34)
ct[2] -= ct[3]
ct[1] = ((ct[1] ^ ct[0]) << (64 - 16)) | ((ct[1] ^ ct[0]) >> 16)
ct[0] -= ct[1]
ct[1], ct[3], ct[4], ct[5], ct[6], ct[7], ct[8], ct[9], ct[10], ct[11], ct[12], ct[13], ct[14], ct[15] =
ct[15], ct[11], ct[6], ct[13], ct[4], ct[9], ct[14], ct[1], ct[8], ct[5], ct[10], ct[3], ct[12], ct[7]
ct[15] = ((ct[15] ^ ct[14]) << (64 - 30)) | ((ct[15] ^ ct[14]) >> 30)
ct[14] -= ct[15]
ct[13] = ((ct[13] ^ ct[12]) << (64 - 44)) | ((ct[13] ^ ct[12]) >> 44)
ct[12] -= ct[13]
ct[11] = ((ct[11] ^ ct[10]) << (64 - 47)) | ((ct[11] ^ ct[10]) >> 47)
ct[10] -= ct[11]
ct[9] = ((ct[9] ^ ct[8]) << (64 - 12)) | ((ct[9] ^ ct[8]) >> 12)
ct[8] -= ct[9]
ct[7] = ((ct[7] ^ ct[6]) << (64 - 31)) | ((ct[7] ^ ct[6]) >> 31)
ct[6] -= ct[7]
ct[5] = ((ct[5] ^ ct[4]) << (64 - 37)) | ((ct[5] ^ ct[4]) >> 37)
ct[4] -= ct[5]
ct[3] = ((ct[3] ^ ct[2]) << (64 - 9)) | ((ct[3] ^ ct[2]) >> 9)
ct[2] -= ct[3]
ct[1] = ((ct[1] ^ ct[0]) << (64 - 41)) | ((ct[1] ^ ct[0]) >> 41)
ct[0] -= ct[1]
// Subtract round key
ct[0] -= c.ks[d/4][0]
ct[1] -= c.ks[d/4][1]
ct[2] -= c.ks[d/4][2]
ct[3] -= c.ks[d/4][3]
ct[4] -= c.ks[d/4][4]
ct[5] -= c.ks[d/4][5]
ct[6] -= c.ks[d/4][6]
ct[7] -= c.ks[d/4][7]
ct[8] -= c.ks[d/4][8]
ct[9] -= c.ks[d/4][9]
ct[10] -= c.ks[d/4][10]
ct[11] -= c.ks[d/4][11]
ct[12] -= c.ks[d/4][12]
ct[13] -= c.ks[d/4][13]
ct[14] -= c.ks[d/4][14]
ct[15] -= c.ks[d/4][15]
// Four rounds of permute and unmix
ct[1], ct[3], ct[4], ct[5], ct[6], ct[7], ct[8], ct[9], ct[10], ct[11], ct[12], ct[13], ct[14], ct[15] =
ct[15], ct[11], ct[6], ct[13], ct[4], ct[9], ct[14], ct[1], ct[8], ct[5], ct[10], ct[3], ct[12], ct[7]
ct[15] = ((ct[15] ^ ct[14]) << (64 - 25)) | ((ct[15] ^ ct[14]) >> 25)
ct[14] -= ct[15]
ct[13] = ((ct[13] ^ ct[12]) << (64 - 16)) | ((ct[13] ^ ct[12]) >> 16)
ct[12] -= ct[13]
ct[11] = ((ct[11] ^ ct[10]) << (64 - 28)) | ((ct[11] ^ ct[10]) >> 28)
ct[10] -= ct[11]
ct[9] = ((ct[9] ^ ct[8]) << (64 - 47)) | ((ct[9] ^ ct[8]) >> 47)
ct[8] -= ct[9]
ct[7] = ((ct[7] ^ ct[6]) << (64 - 41)) | ((ct[7] ^ ct[6]) >> 41)
ct[6] -= ct[7]
ct[5] = ((ct[5] ^ ct[4]) << (64 - 48)) | ((ct[5] ^ ct[4]) >> 48)
ct[4] -= ct[5]
ct[3] = ((ct[3] ^ ct[2]) << (64 - 20)) | ((ct[3] ^ ct[2]) >> 20)
ct[2] -= ct[3]
ct[1] = ((ct[1] ^ ct[0]) << (64 - 5)) | ((ct[1] ^ ct[0]) >> 5)
ct[0] -= ct[1]
ct[1], ct[3], ct[4], ct[5], ct[6], ct[7], ct[8], ct[9], ct[10], ct[11], ct[12], ct[13], ct[14], ct[15] =
ct[15], ct[11], ct[6], ct[13], ct[4], ct[9], ct[14], ct[1], ct[8], ct[5], ct[10], ct[3], ct[12], ct[7]
ct[15] = ((ct[15] ^ ct[14]) << (64 - 17)) | ((ct[15] ^ ct[14]) >> 17)
ct[14] -= ct[15]
ct[13] = ((ct[13] ^ ct[12]) << (64 - 59)) | ((ct[13] ^ ct[12]) >> 59)
ct[12] -= ct[13]
ct[11] = ((ct[11] ^ ct[10]) << (64 - 41)) | ((ct[11] ^ ct[10]) >> 41)
ct[10] -= ct[11]
ct[9] = ((ct[9] ^ ct[8]) << (64 - 34)) | ((ct[9] ^ ct[8]) >> 34)
ct[8] -= ct[9]
ct[7] = ((ct[7] ^ ct[6]) << (64 - 13)) | ((ct[7] ^ ct[6]) >> 13)
ct[6] -= ct[7]
ct[5] = ((ct[5] ^ ct[4]) << (64 - 51)) | ((ct[5] ^ ct[4]) >> 51)
ct[4] -= ct[5]
ct[3] = ((ct[3] ^ ct[2]) << (64 - 4)) | ((ct[3] ^ ct[2]) >> 4)
ct[2] -= ct[3]
ct[1] = ((ct[1] ^ ct[0]) << (64 - 33)) | ((ct[1] ^ ct[0]) >> 33)
ct[0] -= ct[1]
ct[1], ct[3], ct[4], ct[5], ct[6], ct[7], ct[8], ct[9], ct[10], ct[11], ct[12], ct[13], ct[14], ct[15] =
ct[15], ct[11], ct[6], ct[13], ct[4], ct[9], ct[14], ct[1], ct[8], ct[5], ct[10], ct[3], ct[12], ct[7]
ct[15] = ((ct[15] ^ ct[14]) << (64 - 52)) | ((ct[15] ^ ct[14]) >> 52)
ct[14] -= ct[15]
ct[13] = ((ct[13] ^ ct[12]) << (64 - 23)) | ((ct[13] ^ ct[12]) >> 23)
ct[12] -= ct[13]
ct[11] = ((ct[11] ^ ct[10]) << (64 - 18)) | ((ct[11] ^ ct[10]) >> 18)
ct[10] -= ct[11]
ct[9] = ((ct[9] ^ ct[8]) << (64 - 49)) | ((ct[9] ^ ct[8]) >> 49)
ct[8] -= ct[9]
ct[7] = ((ct[7] ^ ct[6]) << (64 - 55)) | ((ct[7] ^ ct[6]) >> 55)
ct[6] -= ct[7]
ct[5] = ((ct[5] ^ ct[4]) << (64 - 10)) | ((ct[5] ^ ct[4]) >> 10)
ct[4] -= ct[5]
ct[3] = ((ct[3] ^ ct[2]) << (64 - 19)) | ((ct[3] ^ ct[2]) >> 19)
ct[2] -= ct[3]
ct[1] = ((ct[1] ^ ct[0]) << (64 - 38)) | ((ct[1] ^ ct[0]) >> 38)
ct[0] -= ct[1]
ct[1], ct[3], ct[4], ct[5], ct[6], ct[7], ct[8], ct[9], ct[10], ct[11], ct[12], ct[13], ct[14], ct[15] =
ct[15], ct[11], ct[6], ct[13], ct[4], ct[9], ct[14], ct[1], ct[8], ct[5], ct[10], ct[3], ct[12], ct[7]
ct[15] = ((ct[15] ^ ct[14]) << (64 - 37)) | ((ct[15] ^ ct[14]) >> 37)
ct[14] -= ct[15]
ct[13] = ((ct[13] ^ ct[12]) << (64 - 22)) | ((ct[13] ^ ct[12]) >> 22)
ct[12] -= ct[13]
ct[11] = ((ct[11] ^ ct[10]) << (64 - 17)) | ((ct[11] ^ ct[10]) >> 17)
ct[10] -= ct[11]
ct[9] = ((ct[9] ^ ct[8]) << (64 - 8)) | ((ct[9] ^ ct[8]) >> 8)
ct[8] -= ct[9]
ct[7] = ((ct[7] ^ ct[6]) << (64 - 47)) | ((ct[7] ^ ct[6]) >> 47)
ct[6] -= ct[7]
ct[5] = ((ct[5] ^ ct[4]) << (64 - 8)) | ((ct[5] ^ ct[4]) >> 8)
ct[4] -= ct[5]
ct[3] = ((ct[3] ^ ct[2]) << (64 - 13)) | ((ct[3] ^ ct[2]) >> 13)
ct[2] -= ct[3]
ct[1] = ((ct[1] ^ ct[0]) << (64 - 24)) | ((ct[1] ^ ct[0]) >> 24)
ct[0] -= ct[1]
// Subtract round key
ct[0] -= c.ks[(d/4)-1][0]
ct[1] -= c.ks[(d/4)-1][1]
ct[2] -= c.ks[(d/4)-1][2]
ct[3] -= c.ks[(d/4)-1][3]
ct[4] -= c.ks[(d/4)-1][4]
ct[5] -= c.ks[(d/4)-1][5]
ct[6] -= c.ks[(d/4)-1][6]
ct[7] -= c.ks[(d/4)-1][7]
ct[8] -= c.ks[(d/4)-1][8]
ct[9] -= c.ks[(d/4)-1][9]
ct[10] -= c.ks[(d/4)-1][10]
ct[11] -= c.ks[(d/4)-1][11]
ct[12] -= c.ks[(d/4)-1][12]
ct[13] -= c.ks[(d/4)-1][13]
ct[14] -= c.ks[(d/4)-1][14]
ct[15] -= c.ks[(d/4)-1][15]
}
// Store decrypted value in destination
storeWord(dst[0:8], ct[0])
storeWord(dst[8:16], ct[1])
storeWord(dst[16:24], ct[2])
storeWord(dst[24:32], ct[3])
storeWord(dst[32:40], ct[4])
storeWord(dst[40:48], ct[5])
storeWord(dst[48:56], ct[6])
storeWord(dst[56:64], ct[7])
storeWord(dst[64:72], ct[8])
storeWord(dst[72:80], ct[9])
storeWord(dst[80:88], ct[10])
storeWord(dst[88:96], ct[11])
storeWord(dst[96:104], ct[12])
storeWord(dst[104:112], ct[13])
storeWord(dst[112:120], ct[14])
storeWord(dst[120:128], ct[15])
} | threefish/threefish1024.go | 0.61231 | 0.502991 | threefish1024.go | starcoder |
package types
import (
"fmt"
"strings"
"github.com/frankkopp/FrankyGo/internal/assert"
)
// Move is a 32bit unsigned int type for encoding chess moves as a primitive data type
// 16 bits for move encoding - 16 bits for sort value
type Move uint32
const (
// MoveNone empty non valid move
MoveNone Move = 0
)
// CreateMove returns an encoded Move instance
func CreateMove(from Square, to Square, t MoveType, promType PieceType) Move {
if promType < Knight {
promType = Knight
}
if assert.DEBUG {
assert.Assert(from.IsValid(), "Invalid From square")
assert.Assert(to.IsValid(), "Invalid To square")
assert.Assert(t.IsValid(), "Invalid MoveType")
assert.Assert(promType.IsValid(), "Invalid promotion PieceType")
}
// promType will be reduced to 2 bits (4 values) Knight, Bishop, Rook, Queen
// therefore we subtract the Knight value from the promType to get
// value between 0 and 3 (0b00 - 0b11)
return Move(to) |
Move(from)<<fromShift |
Move(promType-Knight)<<promTypeShift |
Move(t)<<typeShift
}
// CreateMoveValue returns an encoded Move instance including a sort value
func CreateMoveValue(from Square, to Square, t MoveType, promType PieceType, value Value) Move {
if promType < Knight {
promType = Knight
}
if assert.DEBUG {
assert.Assert(from.IsValid(), "Invalid From square")
assert.Assert(to.IsValid(), "Invalid To square")
assert.Assert(t.IsValid(), "Invalid MoveType")
assert.Assert(promType.IsValid(), "Invalid promotion PieceType")
}
// promType will be reduced to 2 bits (4 values) Knight, Bishop, Rook, Queen
// therefore we subtract the Knight value from the promType to get
// value between 0 and 3 (0b00 - 0b11)
return Move(value-ValueNA)<<valueShift |
Move(to) |
Move(from)<<fromShift |
Move(promType-Knight)<<promTypeShift |
Move(t)<<typeShift
}
// MoveType returns the type of the move as defined in MoveType
// Normal, Promotion, EnPassant, Castling
func (m Move) MoveType() MoveType {
return MoveType((m & moveTypeMask) >> typeShift)
}
// PromotionType returns the PieceType considered for promotion when
// move type is also MoveType.Promotion.
// Must be ignored when move type is not MoveType.Promotion.
func (m Move) PromotionType() PieceType {
return PieceType((m&promTypeMask)>>promTypeShift) + Knight
}
// To returns the to-Square of the move
func (m Move) To() Square {
return Square(m & toMask)
}
// From returns the from-Square of the move
func (m Move) From() Square {
return Square((m & fromMask) >> fromShift)
}
// MoveOf returns the move without any value (least 16-bits)
func (m Move) MoveOf() Move {
return m & moveMask
}
// ValueOf returns the sort value for the move used in the move generator
func (m Move) ValueOf() Value {
return Value((m&valueMask)>>valueShift) + ValueNA
}
// SetValue encodes the given value into the high 16-bit of the move
func (m *Move) SetValue(v Value) Move {
if assert.DEBUG {
assert.Assert(v == ValueNA || v.IsValid(), "Invalid value value: %d", v)
}
// can't store a value on MoveNone
if *m == MoveNone {
return *m
}
// when saving a value to a move we shift value to a positive integer
// (0-VALUE_NONE) and encode it into the move. For retrieving we then shift
// the value back to a range from VALUE_NONE to VALUE_INF
*m = *m&moveMask | Move(v-ValueNA)<<valueShift
return *m
}
// IsValid check if the move has valid squares, promotion type and move type.
// MoveNone is not a valid move in this sense.
func (m Move) IsValid() bool {
return m != MoveNone &&
m.From().IsValid() &&
m.To().IsValid() &&
m.PromotionType().IsValid() &&
m.MoveType().IsValid() &&
(m.ValueOf() == ValueNA || m.ValueOf().IsValid())
}
// String string representation of a move which is UCI compatible
func (m Move) String() string {
if m == MoveNone {
return "Move: { MoveNone }"
}
return fmt.Sprintf("Move: { %-5s type:%1s prom:%1s value:%-6d (%d) }",
m.StringUci(), m.MoveType().String(), m.PromotionType().Char(),m.ValueOf(), m)
}
// StringUci string representation of a move which is UCI compatible
func (m Move) StringUci() string {
if m == MoveNone {
return "NoMove"
}
var os strings.Builder
os.WriteString(m.From().String())
os.WriteString(m.To().String())
if m.MoveType() == Promotion {
os.WriteString(m.PromotionType().Char())
}
return os.String()
}
// StringBits returns a string with details of a Move
// E.g. Move { From[001100](e2) To[011100](e4) Prom[11](N) tType[00](n) value[0000000000000000](0) (796)}
func (m Move) StringBits() string {
return fmt.Sprintf(
"Move { From[%-0.6b](%s) To[%-0.6b](%s) Prom[%-0.2b](%s) tType[%-0.2b](%s) value[%-0.16b](%d) (%d)}",
m.From(), m.From().String(),
m.To(), m.To().String(),
m.PromotionType(), (m.PromotionType()).Char(),
m.MoveType(), m.MoveType().String(),
m.ValueOf(), m.ValueOf(),
m)
}
/* @formatter:off
BITMAP 32-bit
|-value ------------------------|-Move -------------------------|
3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 | 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 | 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
--------------------------------|--------------------------------
| 1 1 1 1 1 1 to
| 1 1 1 1 1 1 from
| 1 1 promotion piece type (pt-2 > 0-3)
| 1 1 move type
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | move sort value
*/// @formatter:on
const (
fromShift uint = 6
promTypeShift uint = 12
typeShift uint = 14
valueShift uint = 16
squareMask Move = 0x3F
toMask = squareMask
fromMask = squareMask << fromShift
promTypeMask Move = 3 << promTypeShift
moveTypeMask Move = 3 << typeShift
moveMask Move = 0xFFFF // first 16-bit
valueMask Move = 0xFFFF << valueShift // second 16-bit
) | internal/types/move.go | 0.698124 | 0.475423 | move.go | starcoder |
package collections
// Splitter represents an ordered collection that can be split into smaller
// units.
type Splitter interface {
Bounded
// Implementation of comparable should at the very least be able to compare
// the current element to its predecessor.
Comparable
// Split splits the collection from i up to (but not including) j.
Split(i, j int)
}
// IntSplitter represents a splitter over a list of ints.
// Note: By adding IntSlice anonymously, we gain access to the underlying
// Bounded and Comparable implementations of IntSlice. This same pattern is
// true for FloatSlice, StringSlice, etc.
type IntSplitter struct {
IntSlice
Result []IntSlice
}
// NewIntSplitter initializes a new IntSplitter containing an IntSlice and an
// empty result set.
func NewIntSplitter(xs IntSlice) *IntSplitter {
return &IntSplitter{xs, []IntSlice{}}
}
// Split splits the int slice into slices of ints with equal value.
func (s *IntSplitter) Split(i, j int) {
s.Result = append(s.Result, s.IntSlice[i:j])
}
// FloatSplitter represents a splitter over a list of floats.
type FloatSplitter struct {
Floats FloatSlice
Result []FloatSlice
}
// NewFloatSplitter initializes a new FloatSplitter containing a FloatSlice and
// an empty result set.
func NewFloatSplitter(xs FloatSlice) *FloatSplitter {
return &FloatSplitter{xs, []FloatSlice{}}
}
// Split splits the float slice into slices of floats with equal value.
func (s *FloatSplitter) Split(i, j int) {
s.Result = append(s.Result, s.Floats[i:j])
}
// StringSplitter represents a splitter over a list of strings.
type StringSplitter struct {
Strings StringSlice
Result []StringSlice
}
// NewStringSplitter initializes a new StringSplitter containing a StringSlice
// and an empty result set.
func NewStringSplitter(xs StringSlice) *StringSplitter {
return &StringSplitter{xs, []StringSlice{}}
}
// Split splits the string slice into slices of strings with equal value.
func (s *StringSplitter) Split(i, j int) {
s.Result = append(s.Result, s.Strings[i:j])
}
// TimeSplitter represents a splitter over a list of times.
type TimeSplitter struct {
Times TimeSlice
Result []TimeSlice
}
// NewTimeSplitter initializes a new TimeSplitter containing a TimeSlice and
// an empty result set.
func NewTimeSplitter(xs TimeSlice) *TimeSplitter {
return &TimeSplitter{xs, []TimeSlice{}}
}
// Split splits the time slice into slices of times with equal value.
func (s *TimeSplitter) Split(i, j int) {
s.Result = append(s.Result, s.Times[i:j])
}
// Split iterates over the entire collection in order, splitting it into groups
// where each element in the group is equal according to Compare.
func Split(s Splitter) {
start := 0
max := s.Len()
// early termination/panic conditions
switch {
case max < 0:
panic("Split: negative splitter length")
case max == 0:
return
}
for i := 0; i < max; i++ {
o := s.Compare(start, i)
checkOrd("Split", o)
if o != Equal {
s.Split(start, i)
start = i
}
}
if start < max {
s.Split(start, max)
}
} | split.go | 0.851645 | 0.527986 | split.go | starcoder |
package types
import (
"bytes"
"errors"
"fmt"
"io"
"github.com/c0mm4nd/wasman/leb128decode"
)
// ErrInvalidTypeByte means the type byte mismatches the one from wasm binary
var ErrInvalidTypeByte = errors.New("invalid byte")
// ValueType classifies the individual values that WebAssembly code can compute with and the values that a variable accepts
// https://www.w3.org/TR/wasm-core-1/#value-types%E2%91%A0
type ValueType byte
const (
// ValueTypeI32 classify 32 bit integers
ValueTypeI32 ValueType = 0x7f
// ValueTypeI64 classify 64 bit integers
// Integers are not inherently signed or unsigned, the interpretation is determined by individual operations
ValueTypeI64 ValueType = 0x7e
// ValueTypeF32 classify 32 bit floating-point data, known as single
ValueTypeF32 ValueType = 0x7d
// ValueTypeF64 classify 64 bit floating-point data, known as double
ValueTypeF64 ValueType = 0x7c
)
// String will convert the types.ValueType into a string
func (v ValueType) String() string {
switch v {
case ValueTypeI32:
return "i32"
case ValueTypeI64:
return "i64"
case ValueTypeF32:
return "f32"
case ValueTypeF64:
return "f64"
default:
return "unknown value type"
}
}
// ReadValueTypes will read a types.ValueType from the io.Reader
func ReadValueTypes(r io.Reader, num uint32) ([]ValueType, error) {
ret := make([]ValueType, num)
buf := make([]byte, num)
_, err := io.ReadFull(r, buf)
if err != nil {
return nil, err
}
for i, v := range buf {
switch vt := ValueType(v); vt {
case ValueTypeI32, ValueTypeF32, ValueTypeI64, ValueTypeF64:
ret[i] = vt
default:
return nil, fmt.Errorf("invalid value type: %d", vt)
}
}
return ret, nil
}
// ReadNameValue will read a name string from the io.Reader
func ReadNameValue(r *bytes.Reader) (string, error) {
vs, _, err := leb128decode.DecodeUint32(r)
if err != nil {
return "", fmt.Errorf("read size of name: %w", err)
}
buf := make([]byte, vs)
if _, err := io.ReadFull(r, buf); err != nil {
return "", fmt.Errorf("read bytes of name: %w", err)
}
return string(buf), nil
}
// HasSameSignature will verify whether the two types.ValueType are same
func HasSameSignature(a []ValueType, b []ValueType) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
} | types/value.go | 0.678753 | 0.427576 | value.go | starcoder |
package gi3d
import (
"sync"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
"github.com/goki/mat32"
)
// Camera defines the properties of the camera
type Camera struct {
Pose Pose `desc:"overall orientation and direction of the camera, relative to pointing at negative Z axis with up (positive Y) direction"`
CamMu sync.RWMutex `desc:"mutex protecting camera data"`
Target mat32.Vec3 `desc:"target location for the camera -- where it is pointing at -- defaults to the origin, but moves with panning movements, and is reset by a call to LookAt method"`
UpDir mat32.Vec3 `desc:"up direction for camera -- which way is up -- defaults to positive Y axis, and is reset by call to LookAt method"`
Ortho bool `desc:"default is a Perspective camera -- set this to make it Orthographic instead, in which case the view includes the volume specified by the Near - Far distance (i.e., you probably want to decrease Far)."`
FOV float32 `desc:"field of view in degrees "`
Aspect float32 `desc:"aspect ratio (width/height)"`
Near float32 `desc:"near plane z coordinate"`
Far float32 `desc:"far plane z coordinate"`
ViewMatrix mat32.Mat4 `view:"-" desc:"view matrix (inverse of the Pose.Matrix)"`
PrjnMatrix mat32.Mat4 `view:"-" desc:"projection matrix, defining the camera perspective / ortho transform"`
InvPrjnMatrix mat32.Mat4 `view:"-" desc:"inverse of the projection matrix"`
Frustum *mat32.Frustum `view:"-" desc:"frustum of projection -- viewable space defined by 6 planes of a pyrammidal shape"`
}
var KiT_Camera = kit.Types.AddType(&Camera{}, CameraProps)
func (cm *Camera) Defaults() {
cm.FOV = 30
cm.Aspect = 1.5
cm.Near = .01
cm.Far = 1000
cm.DefaultPose()
}
// DefaultPose resets the camera pose to default location and orientation, looking
// at the origin from 0,0,10, with up Y axis
func (cm *Camera) DefaultPose() {
cm.Pose.Defaults()
cm.Pose.Pos.Set(0, 0, 10)
cm.LookAtOrigin()
}
// GenGoSet returns code to set values at given path (var.member etc)
func (cm *Camera) GenGoSet(path string) string {
return cm.Pose.GenGoSet(path+".Pose") + "; " + cm.Target.GenGoSet(path+".Target") + "; " + cm.UpDir.GenGoSet(path+".UpDir")
}
// UpdateMatrix updates the view and prjn matricies
func (cm *Camera) UpdateMatrix() {
cm.CamMu.Lock()
defer cm.CamMu.Unlock()
cm.Pose.UpdateMatrix()
cm.ViewMatrix.SetInverse(&cm.Pose.Matrix)
if cm.Ortho {
height := 2 * cm.Far * mat32.Tan(mat32.DegToRad(cm.FOV*0.5))
width := cm.Aspect * height
cm.PrjnMatrix.SetOrthographic(width, height, cm.Near, cm.Far)
} else {
cm.PrjnMatrix.SetPerspective(cm.FOV, cm.Aspect, cm.Near, cm.Far)
}
cm.InvPrjnMatrix.SetInverse(&cm.PrjnMatrix)
var proj mat32.Mat4
proj.MulMatrices(&cm.PrjnMatrix, &cm.ViewMatrix)
cm.Frustum = mat32.NewFrustumFromMatrix(&proj)
}
// LookAt points the camera at given target location, using given up direction,
// and sets the Target, UpDir fields for future camera movements.
func (cm *Camera) LookAt(target, upDir mat32.Vec3) {
cm.CamMu.Lock()
cm.Target = target
if upDir.IsNil() {
upDir = mat32.Vec3Y
}
cm.UpDir = upDir
cm.Pose.LookAt(target, upDir)
cm.CamMu.Unlock()
cm.UpdateMatrix()
}
// LookAtOrigin points the camera at origin with Y axis pointing Up (i.e., standard)
func (cm *Camera) LookAtOrigin() {
cm.LookAt(mat32.Vec3Zero, mat32.Vec3Y)
}
// LookAtTarget points the camera at current target using current up direction
func (cm *Camera) LookAtTarget() {
cm.LookAt(cm.Target, cm.UpDir)
}
// ViewVector is the vector between the camera position and target
func (cm *Camera) ViewVector() mat32.Vec3 {
cm.CamMu.RLock()
defer cm.CamMu.RUnlock()
return cm.Pose.Pos.Sub(cm.Target)
}
// ViewMainAxis returns the dimension along which the view vector is largest
// along with the sign of that axis (+1 for positive, -1 for negative).
// this is useful for determining how manipulations should function, for example.
func (cm *Camera) ViewMainAxis() (dim mat32.Dims, sign float32) {
vv := cm.ViewVector()
va := vv.Abs()
switch {
case va.X > va.Y && va.X > va.Z:
return mat32.X, mat32.Sign(vv.X)
case va.Y > va.X && va.Y > va.Z:
return mat32.Y, mat32.Sign(vv.Y)
default:
return mat32.Z, mat32.Sign(vv.Z)
}
}
// Orbit moves the camera along the given 2D axes in degrees
// (delX = left/right, delY = up/down),
// relative to current position and orientation,
// keeping the same distance from the Target, and rotating the camera and
// the Up direction vector to keep looking at the target.
func (cm *Camera) Orbit(delX, delY float32) {
ctdir := cm.ViewVector()
if ctdir.IsNil() {
ctdir.Set(0, 0, 1)
}
dir := ctdir.Normal()
cm.CamMu.Lock()
up := cm.UpDir
right := cm.UpDir.Cross(dir).Normal()
// up := dir.Cross(right).Normal() // ensure ortho -- not needed
// delX rotates around the up vector
dxq := mat32.NewQuatAxisAngle(up, mat32.DegToRad(delX))
dx := ctdir.MulQuat(dxq).Sub(ctdir)
// delY rotates around the right vector
dyq := mat32.NewQuatAxisAngle(right, mat32.DegToRad(delY))
dy := ctdir.MulQuat(dyq).Sub(ctdir)
cm.Pose.Pos = cm.Pose.Pos.Add(dx).Add(dy)
cm.UpDir.SetMulQuat(dyq) // this is only one that affects up
cm.CamMu.Unlock()
cm.LookAtTarget()
}
// Pan moves the camera along the given 2D axes (left/right, up/down),
// relative to current position and orientation (i.e., in the plane of the
// current window view)
// and it moves the target by the same increment, changing the target position.
func (cm *Camera) Pan(delX, delY float32) {
cm.CamMu.Lock()
dx := mat32.Vec3{-delX, 0, 0}.MulQuat(cm.Pose.Quat)
dy := mat32.Vec3{0, -delY, 0}.MulQuat(cm.Pose.Quat)
td := dx.Add(dy)
cm.Pose.Pos.SetAdd(td)
cm.Target.SetAdd(td)
cm.CamMu.Unlock()
}
// PanAxis moves the camera and target along world X,Y axes
func (cm *Camera) PanAxis(delX, delY float32) {
cm.CamMu.Lock()
td := mat32.Vec3{-delX, -delY, 0}
cm.Pose.Pos.SetAdd(td)
cm.Target.SetAdd(td)
cm.CamMu.Unlock()
}
// PanTarget moves the target along world X,Y,Z axes and does LookAt
// at the new target location. It ensures that the target is not
// identical to the camera position.
func (cm *Camera) PanTarget(delX, delY, delZ float32) {
td := mat32.Vec3{-delX, -delY, delZ}
cm.Target.SetAdd(td)
dist := cm.ViewVector().Length()
cm.CamMu.Lock()
if dist == 0 {
cm.Target.SetAdd(td)
}
cm.CamMu.Unlock()
cm.LookAtTarget()
}
// Zoom moves along axis given pct closer or further from the target
// it always moves the target back also if it distance is < 1
func (cm *Camera) Zoom(zoomPct float32) {
ctaxis := cm.ViewVector()
cm.CamMu.Lock()
if ctaxis.IsNil() {
ctaxis.Set(0, 0, 1)
}
dist := ctaxis.Length()
del := ctaxis.MulScalar(zoomPct)
cm.Pose.Pos.SetAdd(del)
if zoomPct < 0 && dist < 1 {
cm.Target.SetAdd(del)
}
cm.CamMu.Unlock()
}
// CameraProps define the ToolBar and MenuBar for StructView
var CameraProps = ki.Props{
"ToolBar": ki.PropSlice{
{"Defaults", ki.Props{
"label": "Defaults",
"icon": "reset",
}},
{"LookAt", ki.Props{
"icon": "rotate-3d",
"Args": ki.PropSlice{
{"Target", ki.BlankProp{}},
{"UpDir", ki.BlankProp{}},
},
}},
{"Orbit", ki.Props{
"icon": "rotate-3d",
"Args": ki.PropSlice{
{"DeltaX", ki.BlankProp{}},
{"DeltaY", ki.BlankProp{}},
},
}},
{"Pan", ki.Props{
"icon": "pan",
"Args": ki.PropSlice{
{"DeltaX", ki.BlankProp{}},
{"DeltaY", ki.BlankProp{}},
},
}},
{"PanAxis", ki.Props{
"icon": "pan",
"Args": ki.PropSlice{
{"DeltaX", ki.BlankProp{}},
{"DeltaY", ki.BlankProp{}},
},
}},
{"PanTarget", ki.Props{
"icon": "pan",
"Args": ki.PropSlice{
{"DeltaX", ki.BlankProp{}},
{"DeltaY", ki.BlankProp{}},
{"DeltaZ", ki.BlankProp{}},
},
}},
{"Zoom", ki.Props{
"icon": "zoom-in",
"Args": ki.PropSlice{
{"ZoomPct", ki.BlankProp{}},
},
}},
},
} | gi3d/camera.go | 0.841858 | 0.536252 | camera.go | starcoder |
package btrand
import (
"math/rand"
)
/************************************************************
Core probabilities/rates/stds/averages of the generator
Mu -> average
Sigma -> standard deviation
Prob -> probabilities
*************************************************************/
const (
timePaceRateCore float64 = 0.5
noiseSigmaCore float64 = 0.0000005
trendChangeProbCore float64 = 0.20
trendMuCore float64 = 0.0000005
burstActivationProbCore float64 = 0.10
burstDeactivationProbCore float64 = 0.90
burstSigmaCore float64 = 0.00005
spreadMinCore float64 = 0.00005
spreadMaxCore float64 = 0.00025
)
/*
Random generator with time pace and 3 components noise, trend and volatility bursts
*/
type randomGenerator struct {
timePaceRate float64
noiseSigma float64
trendChange float64
trendMu float64
burstActivation float64
burstDeactivation float64
burstSigma float64
burstActivated bool
spreadMin float64
spreadMax float64
rand *rand.Rand
}
type Option func(g *randomGenerator)
func TimePaceRate(p float64) Option {
return func(g *randomGenerator) {
g.timePaceRate = p
}
}
func NoiseSigma(p float64) Option {
return func(g *randomGenerator) {
g.noiseSigma = p
}
}
func newCoreRandomGenerator(seed int64) *randomGenerator {
gen := &randomGenerator{
timePaceRate: timePaceRateCore,
noiseSigma: noiseSigmaCore,
trendChange: trendChangeProbCore,
trendMu: trendMuCore,
burstActivation: burstActivationProbCore,
burstDeactivation: burstDeactivationProbCore,
burstSigma: burstSigmaCore,
spreadMin: spreadMinCore,
spreadMax: spreadMaxCore,
rand: rand.New(rand.NewSource(seed)),
}
gen.trendMu = gen.trendMu * float64(gen.rand.Int63n(2)*2-1)
return gen
}
func newRandomGenerator(seed int64, opts ...Option) *randomGenerator {
gen := newCoreRandomGenerator(seed)
for _, o := range opts {
o(gen)
}
return gen
}
func (g *randomGenerator) next() (float64, float64, float64) {
timeInc := g.rand.Float64() * g.timePaceRate
price := g.rand.NormFloat64()*g.noiseSigma + g.trendMu
if g.rand.Float64() < g.trendChange {
g.trendMu = -g.trendMu
}
if !g.burstActivated && g.rand.Float64() < g.burstActivation {
g.burstActivated = true
}
if g.burstActivated {
price += g.rand.NormFloat64() * g.burstSigma
}
if g.burstActivated && g.rand.Float64() < g.burstDeactivation {
g.burstActivated = false
}
spread := g.rand.Float64()*(g.spreadMax-g.spreadMin) + g.spreadMin
return timeInc, price, spread
} | clients/btrand/randgen.go | 0.746693 | 0.485966 | randgen.go | starcoder |
package approximations
import (
"errors"
"math"
"strings"
"github.com/j4rv/gostuff/log"
)
// CellType indicates the structure of the function that will be approximated
type CellType int8
const (
//Cubes f(x) = ? + ?x + ?x^2 + ?x^3
Cubes CellType = iota
//Sines2 f(x) = ? + ?x + sin(?x+?)*? + sin(?x+?)*?
Sines2
//Sines3 f(x) = ? + ?x + sin(?x+?)*? + sin(?x+?)*? + sin(?x+?)*?
Sines3
)
func calcInitialTemp(points *[]Point) float64 {
var min, max float64
for _, p := range *points {
if p.Y < min {
min = p.Y
}
if p.Y > max {
max = p.Y
}
}
return (max - min) / 1000
}
// TypeFromString will return the corresponding CellType from its string identifier
// example: "sines2" (string) -> Sines2 (CellType)
func TypeFromString(s string) (CellType, error) {
switch strings.ToLower(s) {
case "cubes":
return Cubes, nil
case "sines2":
return Sines2, nil
case "sines3":
return Sines3, nil
default:
return Sines3, errors.New(s + " is not a valid cell type. Returned Sines3.")
}
}
func typeToCell(ct CellType) Cell {
switch ct {
case Cubes:
return &cubes{}
case Sines2:
return &sines2{}
case Sines3:
return &sines3{}
default:
log.Error(ct, "is not a valid function type, using sines3")
return &sines3{}
}
}
// CalcBestCell will try to find the function with the celltype structure
// that best approximates the points given
func CalcBestCell(cfg Config, ct CellType, points *[]Point) (candidate Cell, fitting float64) {
if cfg.initialTemp == 0 {
cfg.initialTemp = calcInitialTemp(points)
}
cell := typeToCell(ct)
cells := make([]Cell, cfg.population)
for i := range cells {
cells[i] = cell.New(cfg)
}
candidate, fitting = findBestCandidate(points, &cells)
for i := 0; i <= cfg.generations; i++ {
log.Debug("Iteration nº:", i)
temp := getTemp(cfg, i)
newGeneration(cfg, temp, candidate, &cells)
candidate, fitting = findBestCandidate(points, &cells)
}
return candidate, fitting
}
// TODO this is a good candidate for goroutines optimization
func newGeneration(cfg Config, temperature float64, best Cell, gens *[]Cell) {
log.Trace("Adding last best candidate:", best)
(*gens)[0] = best
var bigMutations = (cfg.population * cfg.mutationPercentage) / 100
var smallMutations = cfg.population - bigMutations
log.Trace("Mutating citizens:", smallMutations-1)
for i := 1; i < smallMutations; i++ {
best.Mutation(temperature, &(*gens)[i])
}
log.Trace("Mutating with high temperature:", bigMutations)
for i := smallMutations; i < cfg.population; i++ {
best.Mutation(cfg.initialTemp, &(*gens)[i])
}
}
func findBestCandidate(points *[]Point, cells *[]Cell) (Cell, float64) {
bestCell := (*cells)[0]
bestFit := fitness(bestCell, points)
for i := 1; i < len(*cells); i++ {
c := (*cells)[i]
fit := fitness(c, points)
if bestFit == -1 || fit < bestFit {
bestFit = fit
bestCell = c
}
}
log.Debug("Best candidate: ", bestCell)
log.Debug("Fitness: ", bestFit)
return bestCell, bestFit
}
func getTemp(cfg Config, iteration int) float64 {
return cfg.initialTemp * math.Exp(-float64(iteration))
} | approximations/main.go | 0.624179 | 0.486271 | main.go | starcoder |
// Package push tests only test the oras transport (and a invalid transport) against a local registry
package push
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/pkg/errors"
"github.com/sylabs/singularity/e2e/internal/e2e"
"github.com/sylabs/singularity/e2e/internal/testhelper"
)
type ctx struct {
env e2e.TestEnv
}
func (c ctx) testInvalidTransport(t *testing.T) {
e2e.EnsureImage(t, c.env)
tests := []struct {
name string
uri string
expectOp e2e.SingularityCmdResultOp
expectExit int
}{
{
name: "push invalid transport",
uri: "nothing://bar/foo/foobar:latest",
expectOp: e2e.ExpectError(e2e.ContainMatch, "Unsupported transport type: nothing"),
expectExit: 255,
},
}
for _, tt := range tests {
args := []string{c.env.ImagePath, tt.uri}
c.env.RunSingularity(
t,
e2e.AsSubtest(tt.name),
e2e.WithProfile(e2e.UserProfile),
e2e.WithCommand("push"),
e2e.WithArgs(args...),
e2e.ExpectExit(tt.expectExit, tt.expectOp),
)
}
}
func (c ctx) testPushCmd(t *testing.T) {
e2e.EnsureImage(t, c.env)
e2e.PrepRegistry(t, c.env)
// setup file and dir to use as invalid sources
orasInvalidDir, err := ioutil.TempDir(c.env.TestDir, "oras_push_dir-")
if err != nil {
err = errors.Wrap(err, "creating oras temporary directory")
t.Fatalf("unable to create src dir for push tests: %+v", err)
}
orasInvalidFile, err := e2e.WriteTempFile(orasInvalidDir, "oras_invalid_image-", "Invalid Image Contents")
if err != nil {
err = errors.Wrap(err, "creating oras temporary file")
t.Fatalf("unable to create src file for push tests: %+v", err)
}
tests := []struct {
desc string // case description
dstURI string // destination URI for image
imagePath string // src image path
expectedExitCode int // expected exit code for the test
}{
{
desc: "non existent image",
imagePath: filepath.Join(orasInvalidDir, "not_an_existing_file.sif"),
dstURI: fmt.Sprintf("oras://%s/non_existent:test", c.env.TestRegistry),
expectedExitCode: 255,
},
{
desc: "non SIF file",
imagePath: orasInvalidFile,
dstURI: fmt.Sprintf("oras://%s/non_sif:test", c.env.TestRegistry),
expectedExitCode: 255,
},
{
desc: "directory",
imagePath: orasInvalidDir,
dstURI: fmt.Sprintf("oras://%s/directory:test", c.env.TestRegistry),
expectedExitCode: 255,
},
{
desc: "standard SIF push",
imagePath: c.env.ImagePath,
dstURI: fmt.Sprintf("oras://%s/standard_sif:test", c.env.TestRegistry),
expectedExitCode: 0,
},
}
for _, tt := range tests {
tmpdir, err := ioutil.TempDir(c.env.TestDir, "pull_test.")
if err != nil {
t.Fatalf("Failed to create temporary directory for pull test: %+v", err)
}
defer os.RemoveAll(tmpdir)
// We create the list of arguments using a string instead of a slice of
// strings because using slices of strings most of the type ends up adding
// an empty elements to the list when passing it to the command, which
// will create a failure.
args := tt.dstURI
if tt.imagePath != "" {
args = tt.imagePath + " " + args
}
c.env.RunSingularity(
t,
e2e.AsSubtest(tt.desc),
e2e.WithProfile(e2e.UserProfile),
e2e.WithCommand("push"),
e2e.WithArgs(strings.Split(args, " ")...),
e2e.ExpectExit(tt.expectedExitCode),
)
}
}
// E2ETests is the main func to trigger the test suite
func E2ETests(env e2e.TestEnv) testhelper.Tests {
c := ctx{
env: env,
}
return testhelper.Tests{
"invalid transport": c.testInvalidTransport,
"oras": c.testPushCmd,
}
} | e2e/push/push.go | 0.591369 | 0.47171 | push.go | starcoder |
package dagger
import (
"github.com/autom8ter/dagger/primitive"
)
// Edge is an edge in the directed graph. It represents a relationship between two nodes.
type Edge struct {
primitive.TypedID
}
// NewEdge creates a new edge node in the global, in-memory graph.
func NewEdge(relationship string, from, to *Node, mutual bool) (*Edge, error) {
return from.Connect(to, relationship, mutual)
}
func edgeFrom(edge *primitive.Edge) (*Edge, error) {
if !globalGraph.HasEdge(edge) || !edge.HasID() {
if err := globalGraph.AddEdge(edge); err != nil {
return nil, err
}
return &Edge{edge}, nil
}
return &Edge{edge}, nil
}
func (e *Edge) load() *primitive.Edge {
edge, ok := globalGraph.GetEdge(e)
if !ok {
return &primitive.Edge{
Node: primitive.Node{},
From: primitive.Node{},
To: primitive.Node{},
}
}
return edge
}
// From returns the node that points to the node returned by To()
func (e *Edge) From() *Node {
return nodeFrom(e.load().From)
}
// To returns the node that is being pointed to by From()
func (e *Edge) To() *Node {
return nodeFrom(e.load().To)
}
// Patch patches the edge attributes with the given data
func (e *Edge) Patch(data map[string]interface{}) {
edge := e.load()
edge.SetAll(data)
globalGraph.AddEdge(edge)
}
// Range iterates over the edges attributes until the iterator returns false
func (e *Edge) Range(fn func(key string, value interface{}) bool) {
edge := e.load()
edge.Range(fn)
}
func (e *Edge) Node() *Node {
edge := e.load()
return nodeFrom(edge.Node)
}
// GetString gets a string value from the edges attributes(if it exists)
func (e *Edge) GetString(key string) string {
edge := e.load()
return edge.GetString(key)
}
// GetInt gets an int value from the edges attributes(if it exists)
func (e *Edge) GetInt(key string) int {
edge := e.load()
return edge.GetInt(key)
}
// GetBool gets a bool value from the edges attributes(if it exists)
func (e *Edge) GetBool(key string) bool {
edge := e.load()
return edge.GetBool(key)
}
// Get gets an empty interface value(any value type) from the edges attributes(if it exists)
func (e *Edge) Get(key string) interface{} {
edge := e.load()
return edge.Get(key)
}
// Del deletes the entry from the edge by key
func (e *Edge) Del(key string) {
edge := e.load()
edge.Del(key)
}
// JSON returns the edge as JSON bytes
func (e *Edge) JSON() ([]byte, error) {
return e.load().JSON()
}
// FromJSON encodes the edge with the given JSON bytes
func (e *Edge) FromJSON(bits []byte) error {
edge := e.load()
return edge.FromJSON(bits)
} | edge.go | 0.866655 | 0.528108 | edge.go | starcoder |
package gorgonia
import (
"fmt"
"hash"
"github.com/chewxy/hm"
"github.com/pkg/errors"
"gorgonia.org/tensor"
)
/* MIN BETWEEN */
type minBetween struct{}
// Arity returns the number of inputs the Op expects. -1 indicates that it's n-ary and will be determined at runtime
func (op minBetween) Arity() int { return 2 }
// Informs the type of the Op (not the node). This will be used by the type system to infer the final type of the node
func (op minBetween) Type() hm.Type {
return hm.NewFnType(hm.TypeVariable('a'), hm.TypeVariable('a'), hm.TypeVariable('a'))
}
// returns the output shape as a function of the inputs
func (op minBetween) InferShape(shps ...DimSizer) (tensor.Shape, error) {
if err := checkArity(op, len(shps)); err != nil {
return nil, err
}
a := shps[0].(tensor.Shape)
b := shps[1].(tensor.Shape)
if !a.Eq(b) {
return nil, errors.Errorf("Expected both inputs to have the same shape. Got %v and %v instead", a, b)
}
return a.Clone(), nil
}
// Do executes the op
func (op minBetween) Do(vs ...Value) (Value, error) {
if err := checkArity(op, len(vs)); err != nil {
return nil, err
}
a := vs[0]
b := vs[1]
return tensor.MinBetween(a, b)
}
// ReturnsPtr returns false
func (op minBetween) ReturnsPtr() bool { return false }
// CallsExtern returns false
func (op minBetween) CallsExtern() bool { return false }
func (op minBetween) OverwritesInput() int { return -1 }
/* Other methods */
func (op minBetween) WriteHash(h hash.Hash) { fmt.Fprintf(h, op.String()) }
func (op minBetween) Hashcode() uint32 { return simpleHash(op) }
func (op minBetween) String() string { return "MinBetween" }
func (op minBetween) UsePreallocDo(prealloc Value, vs ...Value) (Value, error) {
if err := checkArity(op, len(vs)); err != nil {
return nil, err
}
a := vs[0]
b := vs[1]
return tensor.MinBetween(a, b, tensor.WithReuse(prealloc.(tensor.Tensor)))
}
func (op minBetween) DiffWRT(inputs int) []bool { return []bool{true, true} }
func (op minBetween) SymDiff(inputs Nodes, output, grad *Node) (Nodes, error) {
return minmaxSymDiff(inputs[0], inputs[1], output, grad)
}
func (op minBetween) DoDiff(ctx ExecutionContext, inputs Nodes, output *Node) error {
return minmaxAutoDiff(ctx, inputs[0], inputs[1], output)
}
/* MAX BETWEEN */
type maxBetween struct{}
// Arity returns the number of inputs the Op expects. -1 indicates that it's n-ary and will be determaxed at runtime
func (op maxBetween) Arity() int { return 2 }
// Informs the type of the Op (not the node). This will be used by the type system to infer the final type of the node
func (op maxBetween) Type() hm.Type {
return hm.NewFnType(hm.TypeVariable('a'), hm.TypeVariable('a'), hm.TypeVariable('a'))
}
// returns the output shape as a function of the inputs
func (op maxBetween) InferShape(shps ...DimSizer) (tensor.Shape, error) {
if err := checkArity(op, len(shps)); err != nil {
return nil, err
}
a := shps[0].(tensor.Shape)
b := shps[1].(tensor.Shape)
if !a.Eq(b) {
return nil, errors.Errorf("Expected both inputs to have the same shape. Got %v and %v instead", a, b)
}
return a.Clone(), nil
}
// Do executes the op
func (op maxBetween) Do(vs ...Value) (Value, error) {
if err := checkArity(op, len(vs)); err != nil {
return nil, err
}
a := vs[0]
b := vs[1]
return tensor.MaxBetween(a, b)
}
// ReturnsPtr returns false
func (op maxBetween) ReturnsPtr() bool { return false }
// CallsExtern returns false
func (op maxBetween) CallsExtern() bool { return false }
func (op maxBetween) OverwritesInput() int { return -1 }
/* Other methods */
func (op maxBetween) WriteHash(h hash.Hash) { fmt.Fprintf(h, op.String()) }
func (op maxBetween) Hashcode() uint32 { return simpleHash(op) }
func (op maxBetween) String() string { return "MaxBetween" }
func (op maxBetween) UsePreallocDo(prealloc Value, vs ...Value) (Value, error) {
if err := checkArity(op, len(vs)); err != nil {
return nil, err
}
a := vs[0]
b := vs[1]
return tensor.MaxBetween(a, b, tensor.WithReuse(prealloc.(tensor.Tensor)))
}
func (op maxBetween) DiffWRT(inputs int) []bool { return []bool{true, true} }
func (op maxBetween) SymDiff(inputs Nodes, output, grad *Node) (Nodes, error) {
return minmaxSymDiff(inputs[0], inputs[1], output, grad)
}
func (op maxBetween) DoDiff(ctx ExecutionContext, inputs Nodes, output *Node) error {
return minmaxAutoDiff(ctx, inputs[0], inputs[1], output)
}
func minmaxSymDiff(a, b *Node, out *Node, grad *Node) (Nodes, error) {
mask, err := Eq(a, out, true)
if err != nil {
return nil, err
}
WithGroupName(gradClust)(mask)
gradA, err := HadamardProd(grad, mask)
if err != nil {
return nil, err
}
WithGroupName(gradClust)(gradA)
gradB, err := Sub(grad, gradA)
if err != nil {
return nil, err
}
WithGroupName(gradClust)(gradB)
return Nodes{gradA, gradB}, nil
}
func minmaxAutoDiff(ctx ExecutionContext, a, b *Node, output *Node) (err error) {
// dummy for now so let's keep everything as simple as possible
adv, bdv := getDV(a, b)
outdv := output.boundTo.(*dualValue)
eqOp := newElemBinOp(ltOpType, a, b)
eqOp.retSame = true
eq := &ExternalOp{
Op: eqOp,
ExecutionContext: ctx,
}
ctx.Device = a.Device()
mask, err := eq.Do(adv.Value, outdv.Value)
if err != nil {
return errors.Wrap(err, "Unable to get mask")
}
dev := a.Device()
var gradA, gradB, gradOut Value
var extra bool
if gradOut, extra, err = output.GradOnDevice(dev, ctx.External); err != nil {
return errors.Wrapf(err, gradOnDeviceFail, output, dev)
}
if extra {
defer ctx.PutValue(dev, gradOut)
}
if gradA, extra, err = a.GradOnDevice(dev, ctx.External); err != nil {
return errors.Wrapf(err, gradOnDeviceFail, a, dev)
}
if extra {
defer ctx.PutValue(dev, gradA)
}
mul := NewHadamardProdOp(a, output, ctx)
mul.Incr = gradA
var d Value
if d, err = mul.Do(gradOut, mask); err != nil {
return errors.Wrapf(err, "IncrDo gradA failed")
}
adv.SetDeriv(d)
sub := NewSubOp(b, a, ctx)
sub.Incr = gradB
if d, err = sub.Do(gradOut, adv.d); err != nil {
return errors.Wrapf(err, "IncrDo gradB failed")
}
bdv.SetDeriv(d)
return nil
} | op_minmaxBetween.go | 0.758868 | 0.591487 | op_minmaxBetween.go | starcoder |
package atomic
import "sync/atomic"
// This file contains some simplified atomics primitives that Golang default library does not offer
// like, Boolean
// Takes in a uint32 and converts to bool by checking whether the last bit is set to 1
func toBool(n uint32) bool {
return n&1 == 1
}
// Takes in a bool and returns a uint32 representation
func toInt(b bool) uint32 {
if b {
return 1
}
return 0
}
// Bool is an atomic Boolean.
// It stores the bool as a uint32 internally. This is to use the uint32 atomic functions available in golang
type Bool struct{ v uint32 }
// NewBool creates a Bool.
func NewBool(initial bool) Bool {
return Bool{v: toInt(initial)}
}
// Load atomically loads the Boolean.
func (b *Bool) Load() bool {
return toBool(atomic.LoadUint32(&b.v))
}
// CAS is an atomic compare-and-swap.
func (b *Bool) CompareAndSwap(old, new bool) bool {
return atomic.CompareAndSwapUint32(&b.v, toInt(old), toInt(new))
}
// Store atomically stores the passed value.
func (b *Bool) Store(new bool) {
atomic.StoreUint32(&b.v, toInt(new))
}
// Swap sets the given value and returns the previous value.
func (b *Bool) Swap(new bool) bool {
return toBool(atomic.SwapUint32(&b.v, toInt(new)))
}
// Toggle atomically negates the Boolean and returns the previous value.
func (b *Bool) Toggle() bool {
return toBool(atomic.AddUint32(&b.v, 1) - 1)
}
type Uint32 struct {
v uint32
}
// Returns a loaded uint32 value
func (u *Uint32) Load() uint32 {
return atomic.LoadUint32(&u.v)
}
// CAS is an atomic compare-and-swap.
func (u *Uint32) CompareAndSwap(old, new uint32) bool {
return atomic.CompareAndSwapUint32(&u.v, old, new)
}
// Add a delta to the number
func (u *Uint32) Add(delta uint32) uint32 {
return atomic.AddUint32(&u.v, delta)
}
// Increment the value
func (u *Uint32) Inc() uint32 {
return atomic.AddUint32(&u.v, 1)
}
// Set the value
func (u *Uint32) Store(v uint32) {
atomic.StoreUint32(&u.v, v)
}
func NewUint32(v uint32) Uint32 {
return Uint32{v: v}
}
type Int32 struct {
v int32
}
// Returns a loaded uint32 value
func (i *Int32) Load() int32 {
return atomic.LoadInt32(&i.v)
}
// CAS is an atomic compare-and-swap.
func (i *Int32) CompareAndSwap(old, new int32) bool {
return atomic.CompareAndSwapInt32(&i.v, old, new)
}
// Add a delta to the number
func (i *Int32) Add(delta int32) int32 {
return atomic.AddInt32(&i.v, delta)
}
// Subtract a delta from the number
func (i *Int32) Sub(delta int32) int32 {
return atomic.AddInt32(&i.v, -delta)
}
// Increment the value
func (i *Int32) Inc() int32 {
return atomic.AddInt32(&i.v, 1)
}
// Decrement the value
func (i *Int32) Dec() int32 {
return atomic.AddInt32(&i.v, -1)
}
// Set the value
func (i *Int32) Store(v int32) {
atomic.StoreInt32(&i.v, v)
}
func NewInt32(v int32) Int32 {
return Int32{v: v}
} | atomic/atomic.go | 0.851814 | 0.490175 | atomic.go | starcoder |
package talgo
//Function defines a function that applies to the ith elements of a collection
type Function func(i int)
//Predicate defines a predicate that applies to the ith elements of a collection
type Predicate func(i int) bool
//Selector defines a selection function, a selector return i value or j value
type Selector func(i, j int) int
//Index use to reindex a collection
type Index func(i int) int
//ReverseIndex inverse the index of a collection
func ReverseIndex(len int) Index {
return func(i int) int {
return len - i - 1
}
}
//ForEach apply m to each element of s
func ForEach(len int, f Function) {
for i := 0; i < len; i++ {
f(i)
}
}
//ReverseForEach apply m to each element of s in reverse order
func ReverseForEach(len int, f Function) {
for i := len - 1; i >= 0; i-- {
f(i)
}
}
func IndexedForEach(len int, index Index, f Function) {
for i := 0; i < len; i++ {
f(index(i))
}
}
//Select select best element against selector s
func Select(len int, s Selector) int {
selected := 0
for i := 1; i < len; i++ {
selected = s(selected, i)
}
return selected
}
//FindFirst find the first element of a collections that satisfies predicate p
func FindFirst(len int, p Predicate) int {
for i := 0; i < len; i++ {
if p(i) {
return i
}
}
return -1
}
//FindLast find the last element of a collections that satisfies predicate p
func FindLast(len int, p Predicate) int {
for i := len - 1; i >= 0; i-- {
if p(i) {
return i
}
}
return -1
}
//FindAll find all the elements of a collections that satisfies predicate p
func FindAll(len int, p Predicate) []int {
var indexes []int
for i := 0; i < len; i++ {
if p(i) {
indexes = append(indexes, i)
}
}
return indexes
}
//CountItems counts number of items that satisfies predicate p
func CountItems(len int, p Predicate) int {
cpt := 0
for i := 0; i < len; i++ {
if p(i) {
cpt++
}
}
return cpt
}
//Any checks if at least one element of the collection satisfies predicate p
func Any(len int, p Predicate) bool {
i := FindFirst(len, p)
return i >= 0
}
//None checks if at none element of the collection satisfies predicate p
func None(len int, p Predicate) bool {
return !Any(len, p)
}
//All checks if all elements of the collection satisfy predicate p
func All(len int, p Predicate) bool {
for i := 0; i < len; i++ {
if !p(i) {
return false
}
}
return true
} | talgo.go | 0.635449 | 0.549399 | talgo.go | starcoder |
package types
import (
"fmt"
"github.com/attic-labs/noms/go/d"
"github.com/attic-labs/noms/go/hash"
)
type valueDecoder struct {
nomsReader
vr ValueReader
tc *TypeCache
}
// |tc| must be locked as long as the valueDecoder is being used
func newValueDecoder(nr nomsReader, vr ValueReader, tc *TypeCache) *valueDecoder {
return &valueDecoder{nr, vr, tc}
}
func (r *valueDecoder) readKind() NomsKind {
return NomsKind(r.readUint8())
}
func (r *valueDecoder) readRef(t *Type) Ref {
h := r.readHash()
height := r.readUint64()
return constructRef(t, h, height)
}
func (r *valueDecoder) readType() *Type {
k := r.readKind()
switch k {
case ListKind:
return r.tc.getCompoundType(ListKind, r.readType())
case MapKind:
return r.tc.getCompoundType(MapKind, r.readType(), r.readType())
case RefKind:
return r.tc.getCompoundType(RefKind, r.readType())
case SetKind:
return r.tc.getCompoundType(SetKind, r.readType())
case StructKind:
return r.readStructType()
case UnionKind:
return r.readUnionType()
case CycleKind:
return r.tc.getCycleType(r.readUint32())
}
d.Chk.True(IsPrimitiveKind(k))
return MakePrimitiveType(k)
}
func (r *valueDecoder) readBlobLeafSequence() indexedSequence {
b := r.readBytes()
return newBlobLeafSequence(r.vr, b)
}
func (r *valueDecoder) readValueSequence() ValueSlice {
count := r.readUint32()
data := ValueSlice{}
for i := uint32(0); i < count; i++ {
v := r.readValue()
data = append(data, v)
}
return data
}
func (r *valueDecoder) readListLeafSequence(t *Type) indexedSequence {
data := r.readValueSequence()
return listLeafSequence{data, t, r.vr}
}
func (r *valueDecoder) readSetLeafSequence(t *Type) orderedSequence {
data := r.readValueSequence()
return setLeafSequence{data, t, r.vr}
}
func (r *valueDecoder) readMapLeafSequence(t *Type) orderedSequence {
count := r.readUint32()
data := []mapEntry{}
for i := uint32(0); i < count; i++ {
k := r.readValue()
v := r.readValue()
data = append(data, mapEntry{k, v})
}
return mapLeafSequence{data, t, r.vr}
}
func (r *valueDecoder) readMetaSequence() metaSequenceData {
count := r.readUint32()
data := metaSequenceData{}
for i := uint32(0); i < count; i++ {
ref := r.readValue().(Ref)
v := r.readValue()
var key orderedKey
if r, ok := v.(Ref); ok {
// See https://github.com/attic-labs/noms/issues/1688#issuecomment-227528987
key = orderedKeyFromHash(r.TargetHash())
} else {
key = newOrderedKey(v)
}
numLeaves := r.readUint64()
data = append(data, newMetaTuple(ref, key, numLeaves, nil))
}
return data
}
func (r *valueDecoder) readIndexedMetaSequence(t *Type) indexedMetaSequence {
return newIndexedMetaSequence(r.readMetaSequence(), t, r.vr)
}
func (r *valueDecoder) readOrderedMetaSequence(t *Type) orderedMetaSequence {
return newOrderedMetaSequence(r.readMetaSequence(), t, r.vr)
}
func (r *valueDecoder) readValue() Value {
t := r.readType()
switch t.Kind() {
case BlobKind:
isMeta := r.readBool()
if isMeta {
return newBlob(r.readIndexedMetaSequence(t))
}
return newBlob(r.readBlobLeafSequence())
case BoolKind:
return Bool(r.readBool())
case NumberKind:
return r.readNumber()
case StringKind:
return String(r.readString())
case ListKind:
isMeta := r.readBool()
if isMeta {
return newList(r.readIndexedMetaSequence(t))
}
return newList(r.readListLeafSequence(t))
case MapKind:
isMeta := r.readBool()
if isMeta {
return newMap(r.readOrderedMetaSequence(t))
}
return newMap(r.readMapLeafSequence(t))
case RefKind:
return r.readRef(t)
case SetKind:
isMeta := r.readBool()
if isMeta {
return newSet(r.readOrderedMetaSequence(t))
}
return newSet(r.readSetLeafSequence(t))
case StructKind:
return r.readStruct(t)
case TypeKind:
return r.readType()
case CycleKind, UnionKind, ValueKind:
d.Chk.Fail(fmt.Sprintf("A value instance can never have type %s", KindToString[t.Kind()]))
}
panic("not reachable")
}
func (r *valueDecoder) readStruct(t *Type) Value {
// We've read `[StructKind, name, fields, unions` at this point
desc := t.Desc.(StructDesc)
count := desc.Len()
values := make([]Value, count)
for i := 0; i < count; i++ {
values[i] = r.readValue()
}
return Struct{values, t, &hash.Hash{}}
}
func (r *valueDecoder) readCachedStructType() *Type {
trie := r.tc.trieRoots[StructKind].Traverse(r.readIdent(r.tc))
count := r.readUint32()
for i := uint32(0); i < count; i++ {
trie = trie.Traverse(r.readIdent(r.tc))
trie = trie.Traverse(r.readType().id)
}
return trie.t
}
func (r *valueDecoder) readStructType() *Type {
// Try to decode cached type without allocating
pos := r.pos()
t := r.readCachedStructType()
if t != nil {
return t
}
// Cache miss. Go back to read and create type
r.seek(pos)
name := r.readString()
count := r.readUint32()
fieldNames := make([]string, count)
fieldTypes := make([]*Type, count)
for i := uint32(0); i < count; i++ {
fieldNames[i] = r.readString()
fieldTypes[i] = r.readType()
}
return r.tc.makeStructType(name, fieldNames, fieldTypes)
}
func (r *valueDecoder) readUnionType() *Type {
l := r.readUint32()
ts := make(typeSlice, l)
for i := uint32(0); i < l; i++ {
ts[i] = r.readType()
}
return r.tc.getCompoundType(UnionKind, ts...)
} | go/types/value_decoder.go | 0.631935 | 0.413122 | value_decoder.go | starcoder |
package merkle
import (
"bytes"
cmn "github.com/torusresearch/tendermint/libs/common"
)
//----------------------------------------
// ProofOp gets converted to an instance of ProofOperator:
// ProofOperator is a layer for calculating intermediate Merkle roots
// when a series of Merkle trees are chained together.
// Run() takes leaf values from a tree and returns the Merkle
// root for the corresponding tree. It takes and returns a list of bytes
// to allow multiple leaves to be part of a single proof, for instance in a range proof.
// ProofOp() encodes the ProofOperator in a generic way so it can later be
// decoded with OpDecoder.
type ProofOperator interface {
Run([][]byte) ([][]byte, error)
GetKey() []byte
ProofOp() ProofOp
}
//----------------------------------------
// Operations on a list of ProofOperators
// ProofOperators is a slice of ProofOperator(s).
// Each operator will be applied to the input value sequentially
// and the last Merkle root will be verified with already known data
type ProofOperators []ProofOperator
func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) {
return poz.Verify(root, keypath, [][]byte{value})
}
func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) {
keys, err := KeyPathToKeys(keypath)
if err != nil {
return
}
for i, op := range poz {
key := op.GetKey()
if len(key) != 0 {
if len(keys) == 0 {
return cmn.NewError("Key path has insufficient # of parts: expected no more keys but got %+v", string(key))
}
lastKey := keys[len(keys)-1]
if !bytes.Equal(lastKey, key) {
return cmn.NewError("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key))
}
keys = keys[:len(keys)-1]
}
args, err = op.Run(args)
if err != nil {
return
}
}
if !bytes.Equal(root, args[0]) {
return cmn.NewError("Calculated root hash is invalid: expected %+v but got %+v", root, args[0])
}
if len(keys) != 0 {
return cmn.NewError("Keypath not consumed all")
}
return nil
}
//----------------------------------------
// ProofRuntime - main entrypoint
type OpDecoder func(ProofOp) (ProofOperator, error)
type ProofRuntime struct {
decoders map[string]OpDecoder
}
func NewProofRuntime() *ProofRuntime {
return &ProofRuntime{
decoders: make(map[string]OpDecoder),
}
}
func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) {
_, ok := prt.decoders[typ]
if ok {
panic("already registered for type " + typ)
}
prt.decoders[typ] = dec
}
func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) {
decoder := prt.decoders[pop.Type]
if decoder == nil {
return nil, cmn.NewError("unrecognized proof type %v", pop.Type)
}
return decoder(pop)
}
func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) {
poz := make(ProofOperators, 0, len(proof.Ops))
for _, pop := range proof.Ops {
operator, err := prt.Decode(pop)
if err != nil {
return nil, cmn.ErrorWrap(err, "decoding a proof operator")
}
poz = append(poz, operator)
}
return poz, nil
}
func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) {
return prt.Verify(proof, root, keypath, [][]byte{value})
}
// TODO In the long run we'll need a method of classifcation of ops,
// whether existence or absence or perhaps a third?
func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) {
return prt.Verify(proof, root, keypath, nil)
}
func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) {
poz, err := prt.DecodeProof(proof)
if err != nil {
return cmn.ErrorWrap(err, "decoding proof")
}
return poz.Verify(root, keypath, args)
}
// DefaultProofRuntime only knows about Simple value
// proofs.
// To use e.g. IAVL proofs, register op-decoders as
// defined in the IAVL package.
func DefaultProofRuntime() (prt *ProofRuntime) {
prt = NewProofRuntime()
prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder)
return
} | crypto/merkle/proof.go | 0.657428 | 0.541591 | proof.go | starcoder |
package bls
import (
mat "github.com/nlpodyssey/spago/pkg/mat32"
"github.com/nlpodyssey/spago/pkg/ml/ag"
"github.com/nlpodyssey/spago/pkg/ml/nn"
"log"
)
// BroadLearningAlgorithm performs the ridge regression approximation to optimize the output params (Wo).
// The parameters for feature mapping (Wz) can also be optimized through the alternating direction method of
// multipliers (ADMM) method (Goldstein et al. 2014).
// The parameters of the enhanced nodes remain the initial ones and are not optimized.
type BroadLearningAlgorithm struct {
Model *Model
Input []mat.Matrix
DesiredOutput []mat.Matrix
Penalty mat.Float
OptimizeFeaturesWeight bool // skip optimization if you don't want to
Verbose bool
}
// Do runs the board learning algorithm.
func (l *BroadLearningAlgorithm) Do() {
if l.OptimizeFeaturesWeight {
l.log("Optimizing features weights...")
l.optimizeFeaturesWeight()
}
l.log("Collecting features and enhanced nodes...")
zh := mat.ConcatH(l.zhs()...)
y := mat.ConcatH(l.DesiredOutput...)
l.log("Performing ridge regression. It will take a while...")
w := ridgeRegression(zh, y, l.Penalty)
l.updateOutputWeights(w)
l.log("All right, the model is served.")
}
func (l *BroadLearningAlgorithm) optimizeFeaturesWeight() {
featuresMap := make([][]mat.Matrix, l.Model.NumOfFeatures)
for _, x := range l.Input {
g := ag.NewGraph()
x := g.NewVariable(x, false)
m := nn.ReifyForTraining(l.Model, g).(*Model)
for j := 0; j < m.NumOfFeatures; j++ {
featuresMap[j] = append(featuresMap[j], nn.Affine(m.Graph(), m.Bz[j], m.Wz[j], x).Value())
}
}
x := mat.ConcatH(l.Input...)
for i := 0; i < l.Model.NumOfFeatures; i++ {
z := mat.ConcatH(featuresMap[i]...)
wz := admn(z, x, 1e-3, 100) // weight optimization
l.Model.Wz[i].Value().SetData(wz.T().Data())
}
}
func (l *BroadLearningAlgorithm) zhs() []mat.Matrix {
zhs := make([]mat.Matrix, len(l.Input))
for i, x := range l.Input {
g := ag.NewGraph()
x := g.NewVariable(x, false)
m := nn.ReifyForTraining(l.Model, g).(*Model)
zhs[i] = singleZH(m, x)
}
return zhs
}
func (l *BroadLearningAlgorithm) updateOutputWeights(w mat.Matrix) {
l.Model.W.Value().SetData(w.T().Data())
}
func (l *BroadLearningAlgorithm) log(message string) {
if l.Verbose {
log.Println(message)
}
}
func singleZH(m *Model, x ag.Node) mat.Matrix {
g := m.Graph()
z := m.useFeaturesDropout(m.featuresMapping(x))
h := m.useEnhancedNodesDropout(g.Invoke(m.EnhancedNodesActivation, nn.Affine(g, m.Bh, m.Wh, z)))
return g.Concat(z, h).Value()
}
// ridgeRegression obtains the solution of output weight solving W = Inv(T(A)A+λI)T(A)Y
func ridgeRegression(x mat.Matrix, y mat.Matrix, c mat.Float) mat.Matrix {
i2 := mat.I(x.Columns()).ProdScalar(c)
x2 := x.T().Mul(x).Add(i2)
invX2 := x2.Inverse()
return invX2.Mul(x.T()).Mul(y)
}
// admn is a naive implementation of the alternating direction method of multipliers method (Goldstein et al. 2014).
func admn(z mat.Matrix, x mat.Matrix, lam mat.Float, iterations int) mat.Matrix {
ZZ := z.T().Mul(z)
var Wk mat.Matrix = mat.NewEmptyDense(z.Columns(), x.Columns())
var Ok mat.Matrix = mat.NewEmptyDense(z.Columns(), x.Columns())
var Uk mat.Matrix = mat.NewEmptyDense(z.Columns(), x.Columns())
L1 := ZZ.AddInPlace(mat.I(z.Columns()))
L1 = L1.Inverse()
L2 := L1.Mul(z.T()).Mul(x)
for i := 0; i < iterations; i++ {
temp := Ok.Sub(Uk)
Ck := L2.Add(L1.Mul(temp))
Ok = shrinkage(Ck.Add(Uk), lam)
Uk = Uk.Add(Ck.Sub(Ok))
Wk = Ok
}
return Wk
}
func shrinkage(X mat.Matrix, k mat.Float) mat.Matrix {
Zeros := mat.NewEmptyDense(X.Rows(), X.Columns())
X1 := X.SubScalar(k)
X2 := X.ProdScalar(-1.0).SubScalar(k)
X2 = X2.Maximum(Zeros)
X2 = Zeros.Sub(X2)
return X1.Maximum(X2)
} | pkg/ml/nn/bls/bla.go | 0.727007 | 0.539408 | bla.go | starcoder |
package testutil
import (
"encoding/json"
"fmt"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
)
// Metric defines a single point measurement
type Metric struct {
Measurement string
Tags map[string]string
Fields map[string]interface{}
Time time.Time
}
func (p *Metric) String() string {
return fmt.Sprintf("%s %v", p.Measurement, p.Fields)
}
// Accumulator defines a mocked out accumulator
type Accumulator struct {
sync.Mutex
*sync.Cond
Metrics []*Metric
nMetrics uint64
Discard bool
Errors []error
debug bool
}
func (a *Accumulator) NMetrics() uint64 {
return atomic.LoadUint64(&a.nMetrics)
}
func (a *Accumulator) ClearMetrics() {
atomic.StoreUint64(&a.nMetrics, 0)
a.Lock()
defer a.Unlock()
a.Metrics = make([]*Metric, 0)
}
// AddFields adds a measurement point with a specified timestamp.
func (a *Accumulator) AddFields(
measurement string,
fields map[string]interface{},
tags map[string]string,
timestamp ...time.Time,
) {
atomic.AddUint64(&a.nMetrics, 1)
a.Lock()
defer a.Unlock()
if a.Cond != nil {
a.Cond.Broadcast()
}
if a.Discard {
return
}
if tags == nil {
tags = map[string]string{}
}
if len(fields) == 0 {
return
}
var t time.Time
if len(timestamp) > 0 {
t = timestamp[0]
} else {
t = time.Now()
}
if a.debug {
pretty, _ := json.MarshalIndent(fields, "", " ")
prettyTags, _ := json.MarshalIndent(tags, "", " ")
msg := fmt.Sprintf("Adding Measurement [%s]\nFields:%s\nTags:%s\n",
measurement, string(pretty), string(prettyTags))
fmt.Print(msg)
}
p := &Metric{
Measurement: measurement,
Fields: fields,
Tags: tags,
Time: t,
}
a.Metrics = append(a.Metrics, p)
}
func (a *Accumulator) AddCounter(
measurement string,
fields map[string]interface{},
tags map[string]string,
timestamp ...time.Time,
) {
a.AddFields(measurement, fields, tags, timestamp...)
}
func (a *Accumulator) AddGauge(
measurement string,
fields map[string]interface{},
tags map[string]string,
timestamp ...time.Time,
) {
a.AddFields(measurement, fields, tags, timestamp...)
}
func (a *Accumulator) AddMetrics(metrics []telegraf.Metric) {
for _, m := range metrics {
a.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
}
// AddError appends the given error to Accumulator.Errors.
func (a *Accumulator) AddError(err error) {
if err == nil {
return
}
a.Lock()
a.Errors = append(a.Errors, err)
a.Unlock()
}
func (a *Accumulator) SetPrecision(precision, interval time.Duration) {
return
}
func (a *Accumulator) DisablePrecision() {
return
}
func (a *Accumulator) Debug() bool {
// stub for implementing Accumulator interface.
return a.debug
}
func (a *Accumulator) SetDebug(debug bool) {
// stub for implementing Accumulator interface.
a.debug = debug
}
// Get gets the specified measurement point from the accumulator
func (a *Accumulator) Get(measurement string) (*Metric, bool) {
for _, p := range a.Metrics {
if p.Measurement == measurement {
return p, true
}
}
return nil, false
}
// NFields returns the total number of fields in the accumulator, across all
// measurements
func (a *Accumulator) NFields() int {
a.Lock()
defer a.Unlock()
counter := 0
for _, pt := range a.Metrics {
for _, _ = range pt.Fields {
counter++
}
}
return counter
}
// Wait waits for a metric to be added to the accumulator.
// Accumulator must already be locked.
func (a *Accumulator) Wait() {
if a.Cond == nil {
a.Cond = sync.NewCond(&a.Mutex)
}
a.Cond.Wait()
}
func (a *Accumulator) AssertContainsTaggedFields(
t *testing.T,
measurement string,
fields map[string]interface{},
tags map[string]string,
) {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if !reflect.DeepEqual(tags, p.Tags) {
continue
}
if p.Measurement == measurement {
assert.Equal(t, fields, p.Fields)
return
}
}
msg := fmt.Sprintf("unknown measurement %s with tags %v", measurement, tags)
assert.Fail(t, msg)
}
func (a *Accumulator) AssertContainsFields(
t *testing.T,
measurement string,
fields map[string]interface{},
) {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if p.Measurement == measurement {
assert.Equal(t, fields, p.Fields)
return
}
}
msg := fmt.Sprintf("unknown measurement %s", measurement)
assert.Fail(t, msg)
}
func (a *Accumulator) AssertDoesNotContainMeasurement(t *testing.T, measurement string) {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if p.Measurement == measurement {
msg := fmt.Sprintf("found unexpected measurement %s", measurement)
assert.Fail(t, msg)
}
}
}
// HasIntField returns true if the measurement has an Int value
func (a *Accumulator) HasIntField(measurement string, field string) bool {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if p.Measurement == measurement {
for fieldname, value := range p.Fields {
if fieldname == field {
_, ok := value.(int64)
return ok
}
}
}
}
return false
}
// HasInt32Field returns true if the measurement has an Int value
func (a *Accumulator) HasInt32Field(measurement string, field string) bool {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if p.Measurement == measurement {
for fieldname, value := range p.Fields {
if fieldname == field {
_, ok := value.(int32)
return ok
}
}
}
}
return false
}
// HasStringField returns true if the measurement has an String value
func (a *Accumulator) HasStringField(measurement string, field string) bool {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if p.Measurement == measurement {
for fieldname, value := range p.Fields {
if fieldname == field {
_, ok := value.(string)
return ok
}
}
}
}
return false
}
// HasUIntValue returns true if the measurement has a UInt value
func (a *Accumulator) HasUIntField(measurement string, field string) bool {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if p.Measurement == measurement {
for fieldname, value := range p.Fields {
if fieldname == field {
_, ok := value.(uint64)
return ok
}
}
}
}
return false
}
// HasFloatValue returns true if the given measurement has a float value
func (a *Accumulator) HasFloatField(measurement string, field string) bool {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if p.Measurement == measurement {
for fieldname, value := range p.Fields {
if fieldname == field {
_, ok := value.(float64)
return ok
}
}
}
}
return false
}
// HasMeasurement returns true if the accumulator has a measurement with the
// given name
func (a *Accumulator) HasMeasurement(measurement string) bool {
a.Lock()
defer a.Unlock()
for _, p := range a.Metrics {
if p.Measurement == measurement {
return true
}
}
return false
} | testutil/accumulator.go | 0.723212 | 0.454593 | accumulator.go | starcoder |
package openapi
import (
"encoding/json"
"net/url"
"strings"
)
// Optional parameters for the method 'CreateVerification'
type CreateVerificationParams struct {
// The amount of the associated PSD2 compliant transaction. Requires the PSD2 Service flag enabled.
Amount *string `json:"Amount,omitempty"`
// Your [App Hash](https://developers.google.com/identity/sms-retriever/verify#computing_your_apps_hash_string) to be appended at the end of your verification SMS body. Applies only to SMS. Example SMS body: `<#> Your AppName verification code is: 1234 He42w354ol9`.
AppHash *string `json:"AppHash,omitempty"`
// The verification method to use. Can be: [`email`](https://www.twilio.com/docs/verify/email), `sms`, `whatsapp` or `call`.
Channel *string `json:"Channel,omitempty"`
// [`email`](https://www.twilio.com/docs/verify/email) channel configuration in json format. Must include 'from' and 'from_name'.
ChannelConfiguration *map[string]interface{} `json:"ChannelConfiguration,omitempty"`
// A pre-generated code to use for verification. The code can be between 4 and 10 characters, inclusive.
CustomCode *string `json:"CustomCode,omitempty"`
// A custom user defined friendly name that overwrites the existing one in the verification message
CustomFriendlyName *string `json:"CustomFriendlyName,omitempty"`
// The text of a custom message to use for the verification.
CustomMessage *string `json:"CustomMessage,omitempty"`
// The locale to use for the verification SMS, WhatsApp or call. Can be: `af`, `ar`, `ca`, `cs`, `da`, `de`, `el`, `en`, `en-GB`, `es`, `fi`, `fr`, `he`, `hi`, `hr`, `hu`, `id`, `it`, `ja`, `ko`, `ms`, `nb`, `nl`, `pl`, `pt`, `pr-BR`, `ro`, `ru`, `sv`, `th`, `tl`, `tr`, `vi`, `zh`, `zh-CN`, or `zh-HK.`
Locale *string `json:"Locale,omitempty"`
// The payee of the associated PSD2 compliant transaction. Requires the PSD2 Service flag enabled.
Payee *string `json:"Payee,omitempty"`
// The custom key-value pairs of Programmable Rate Limits. Keys correspond to `unique_name` fields defined when [creating your Rate Limit](https://www.twilio.com/docs/verify/api/service-rate-limits). Associated value pairs represent values in the request that you are rate limiting on. You may include multiple Rate Limit values in each request.
RateLimits *map[string]interface{} `json:"RateLimits,omitempty"`
// The digits to send after a phone call is answered, for example, to dial an extension. For more information, see the Programmable Voice documentation of [sendDigits](https://www.twilio.com/docs/voice/twiml/number#attributes-sendDigits).
SendDigits *string `json:"SendDigits,omitempty"`
// A stringified JSON object in which the keys are the template's special variables and the values are the variables substitutions.
TemplateCustomSubstitutions *string `json:"TemplateCustomSubstitutions,omitempty"`
// The message [template](https://www.twilio.com/docs/verify/api/templates). If provided, will override the default template for the Service. SMS channel only.
TemplateSid *string `json:"TemplateSid,omitempty"`
// The phone number or [email](https://www.twilio.com/docs/verify/email) to verify. Phone numbers must be in [E.164 format](https://www.twilio.com/docs/glossary/what-e164).
To *string `json:"To,omitempty"`
}
func (params *CreateVerificationParams) SetAmount(Amount string) *CreateVerificationParams {
params.Amount = &Amount
return params
}
func (params *CreateVerificationParams) SetAppHash(AppHash string) *CreateVerificationParams {
params.AppHash = &AppHash
return params
}
func (params *CreateVerificationParams) SetChannel(Channel string) *CreateVerificationParams {
params.Channel = &Channel
return params
}
func (params *CreateVerificationParams) SetChannelConfiguration(ChannelConfiguration map[string]interface{}) *CreateVerificationParams {
params.ChannelConfiguration = &ChannelConfiguration
return params
}
func (params *CreateVerificationParams) SetCustomCode(CustomCode string) *CreateVerificationParams {
params.CustomCode = &CustomCode
return params
}
func (params *CreateVerificationParams) SetCustomFriendlyName(CustomFriendlyName string) *CreateVerificationParams {
params.CustomFriendlyName = &CustomFriendlyName
return params
}
func (params *CreateVerificationParams) SetCustomMessage(CustomMessage string) *CreateVerificationParams {
params.CustomMessage = &CustomMessage
return params
}
func (params *CreateVerificationParams) SetLocale(Locale string) *CreateVerificationParams {
params.Locale = &Locale
return params
}
func (params *CreateVerificationParams) SetPayee(Payee string) *CreateVerificationParams {
params.Payee = &Payee
return params
}
func (params *CreateVerificationParams) SetRateLimits(RateLimits map[string]interface{}) *CreateVerificationParams {
params.RateLimits = &RateLimits
return params
}
func (params *CreateVerificationParams) SetSendDigits(SendDigits string) *CreateVerificationParams {
params.SendDigits = &SendDigits
return params
}
func (params *CreateVerificationParams) SetTemplateCustomSubstitutions(TemplateCustomSubstitutions string) *CreateVerificationParams {
params.TemplateCustomSubstitutions = &TemplateCustomSubstitutions
return params
}
func (params *CreateVerificationParams) SetTemplateSid(TemplateSid string) *CreateVerificationParams {
params.TemplateSid = &TemplateSid
return params
}
func (params *CreateVerificationParams) SetTo(To string) *CreateVerificationParams {
params.To = &To
return params
}
// Create a new Verification using a Service
func (c *ApiService) CreateVerification(ServiceSid string, params *CreateVerificationParams) (*VerifyV2Verification, error) {
path := "/v2/Services/{ServiceSid}/Verifications"
path = strings.Replace(path, "{"+"ServiceSid"+"}", ServiceSid, -1)
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.Amount != nil {
data.Set("Amount", *params.Amount)
}
if params != nil && params.AppHash != nil {
data.Set("AppHash", *params.AppHash)
}
if params != nil && params.Channel != nil {
data.Set("Channel", *params.Channel)
}
if params != nil && params.ChannelConfiguration != nil {
v, err := json.Marshal(params.ChannelConfiguration)
if err != nil {
return nil, err
}
data.Set("ChannelConfiguration", string(v))
}
if params != nil && params.CustomCode != nil {
data.Set("CustomCode", *params.CustomCode)
}
if params != nil && params.CustomFriendlyName != nil {
data.Set("CustomFriendlyName", *params.CustomFriendlyName)
}
if params != nil && params.CustomMessage != nil {
data.Set("CustomMessage", *params.CustomMessage)
}
if params != nil && params.Locale != nil {
data.Set("Locale", *params.Locale)
}
if params != nil && params.Payee != nil {
data.Set("Payee", *params.Payee)
}
if params != nil && params.RateLimits != nil {
v, err := json.Marshal(params.RateLimits)
if err != nil {
return nil, err
}
data.Set("RateLimits", string(v))
}
if params != nil && params.SendDigits != nil {
data.Set("SendDigits", *params.SendDigits)
}
if params != nil && params.TemplateCustomSubstitutions != nil {
data.Set("TemplateCustomSubstitutions", *params.TemplateCustomSubstitutions)
}
if params != nil && params.TemplateSid != nil {
data.Set("TemplateSid", *params.TemplateSid)
}
if params != nil && params.To != nil {
data.Set("To", *params.To)
}
resp, err := c.requestHandler.Post(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &VerifyV2Verification{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
}
// Fetch a specific Verification
func (c *ApiService) FetchVerification(ServiceSid string, Sid string) (*VerifyV2Verification, error) {
path := "/v2/Services/{ServiceSid}/Verifications/{Sid}"
path = strings.Replace(path, "{"+"ServiceSid"+"}", ServiceSid, -1)
path = strings.Replace(path, "{"+"Sid"+"}", Sid, -1)
data := url.Values{}
headers := make(map[string]interface{})
resp, err := c.requestHandler.Get(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &VerifyV2Verification{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
}
// Optional parameters for the method 'UpdateVerification'
type UpdateVerificationParams struct {
// The new status of the resource. Can be: `canceled` or `approved`.
Status *string `json:"Status,omitempty"`
}
func (params *UpdateVerificationParams) SetStatus(Status string) *UpdateVerificationParams {
params.Status = &Status
return params
}
// Update a Verification status
func (c *ApiService) UpdateVerification(ServiceSid string, Sid string, params *UpdateVerificationParams) (*VerifyV2Verification, error) {
path := "/v2/Services/{ServiceSid}/Verifications/{Sid}"
path = strings.Replace(path, "{"+"ServiceSid"+"}", ServiceSid, -1)
path = strings.Replace(path, "{"+"Sid"+"}", Sid, -1)
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.Status != nil {
data.Set("Status", *params.Status)
}
resp, err := c.requestHandler.Post(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &VerifyV2Verification{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
} | rest/verify/v2/services_verifications.go | 0.799677 | 0.417687 | services_verifications.go | starcoder |
package redis
import (
"fmt"
"net"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/golang/glog"
)
const (
// ClusterInfoUnset status of the cluster info: no data set
ClusterInfoUnset = "Unset"
// ClusterInfoPartial status of the cluster info: data is not complete (some nodes didn't respond)
ClusterInfoPartial = "Partial"
// ClusterInfoInconsistent status of the cluster info: nodeinfos is not consistent between nodes
ClusterInfoInconsistent = "Inconsistent"
// ClusterInfoConsistent status of the cluster info: nodeinfos is complete and consistent between nodes
ClusterInfoConsistent = "Consistent"
)
// NodeInfos representation of a node info, i.e. data returned by the CLUSTER NODES redis command
// Node is the information of the targeted node
// Friends are the view of the other nodes from the targeted node
type NodeInfos struct {
Node *Node
Friends Nodes
}
// ClusterInfos represents the node infos for all nodes of the cluster
type ClusterInfos struct {
Infos map[string]*NodeInfos
Status string
}
// NewNodeInfos returns an instance of NodeInfo
func NewNodeInfos() *NodeInfos {
return &NodeInfos{
Node: NewDefaultNode(),
Friends: Nodes{},
}
}
// NewClusterInfos returns an instance of ClusterInfos
func NewClusterInfos() *ClusterInfos {
return &ClusterInfos{
Infos: make(map[string]*NodeInfos),
Status: ClusterInfoUnset,
}
}
// DecodeNodeStartTime decode from the cmd output the Redis instance info. Second argument is the node on which we are connected to request info
func DecodeNodeStartTime(input *string) (time.Time, error) {
lines := strings.Split(*input, "\n")
for _, line := range lines {
values := strings.Split(line, ":")
if values[0] == "uptime_in_seconds" {
uptimeInSeconds, err := strconv.Atoi(strings.TrimSpace(values[1]))
if err != nil {
glog.Errorf("Error while decoding redis instance uptime in seconds. String : %s Error: %v", values[1], err)
return time.Now(), err
}
return time.Now().Add(-time.Duration(uptimeInSeconds) * time.Second), nil
}
}
glog.Errorf("Error while decoding redis instance uptime in seconds. No data found")
return time.Now(), fmt.Errorf("Error while decoding redis instance uptime in seconds. No data found")
}
// DecodeNodeInfos decode from the cmd output the Redis nodes info. Second argument is the node on which we are connected to request info
func DecodeNodeInfos(input *string, addr string) *NodeInfos {
infos := NewNodeInfos()
lines := strings.Split(*input, "\n")
for _, line := range lines {
values := strings.Split(line, " ")
if len(values) < 8 {
// last line is always empty
glog.V(7).Infof("Not enough values in line split, ignoring line: '%s'", line)
continue
} else {
node := NewDefaultNode()
node.ID = values[0]
//remove trailing port for cluster internal protocol
ipPort := strings.Split(values[1], "@")
if ip, port, err := net.SplitHostPort(ipPort[0]); err == nil {
node.IP = ip
node.Port = port
if ip == "" {
// ip of the node we are connecting to is sometime empty
node.IP, _, _ = net.SplitHostPort(addr)
}
} else {
glog.Errorf("Error while decoding node info for node '%s', cannot split ip:port ('%s'): %v", node.ID, values[1], err)
}
err := node.SetRole(values[2])
if err != nil {
glog.Errorf("Couldn't set role %q: %v", values[2], err)
}
node.SetFailureStatus(values[2])
node.SetPrimaryReferent(values[3])
if i, err := strconv.ParseInt(values[4], 10, 64); err == nil {
node.PingSent = i
}
if i, err := strconv.ParseInt(values[5], 10, 64); err == nil {
node.PongRecv = i
}
if i, err := strconv.ParseInt(values[6], 10, 64); err == nil {
node.ConfigEpoch = i
}
err = node.SetLinkStatus(values[7])
if err != nil {
glog.Errorf("Can't set link status %q: %v", values[7], err)
}
for _, slot := range values[8:] {
if s, importing, migrating, err := DecodeSlotRange(slot); err == nil {
node.Slots = append(node.Slots, s...)
if importing != nil {
node.ImportingSlots[importing.SlotID] = importing.FromNodeID
}
if migrating != nil {
node.MigratingSlots[migrating.SlotID] = migrating.ToNodeID
}
}
}
if strings.HasPrefix(values[2], "myself") {
infos.Node = node
glog.V(7).Infof("Getting node info for node: '%s'", node)
} else {
infos.Friends = append(infos.Friends, node)
glog.V(7).Infof("Adding node to slice: '%s'", node)
}
}
}
return infos
}
// ComputeStatus checks the ClusterInfos status based on the current data.
// The status ClusterInfoPartial is set while building the ClusterInfos.
// If already set, do nothing. Returns true if consistent or on error.
func (c *ClusterInfos) ComputeStatus() bool {
if c.Status != ClusterInfoUnset {
return false
}
consistencyStatus := false
consolidatedView := c.GetNodes().SortByFunc(LessByID)
consolidatedSignature := getConfigSignature(consolidatedView)
glog.V(7).Infof("Consolidated view:\n%s", consolidatedSignature)
for addr, nodeinfos := range c.Infos {
nodesView := append(nodeinfos.Friends, nodeinfos.Node).SortByFunc(LessByID)
nodeSignature := getConfigSignature(nodesView)
glog.V(7).Infof("Node view from %s (ID: %s):\n%s", addr, nodeinfos.Node.ID, nodeSignature)
if !reflect.DeepEqual(consolidatedSignature, nodeSignature) {
glog.V(4).Info("Temporary inconsistency between nodes is possible. If the following inconsistency message persists for more than 20 mins, any cluster operation (scale, rolling update) should be avoided before the message is gone")
glog.V(4).Infof("Inconsistency from %s: \n%s\nVS\n%s", addr, consolidatedSignature, nodeSignature)
c.Status = ClusterInfoInconsistent
}
}
if c.Status == ClusterInfoUnset {
c.Status = ClusterInfoConsistent
consistencyStatus = true
}
return consistencyStatus
}
// GetNodes returns a nodeSlice view of the cluster
// the slice if formed from how each node see itself
// you should check the Status before doing it, to wait for a consistent view
func (c *ClusterInfos) GetNodes() Nodes {
nodes := Nodes{}
for _, nodeinfos := range c.Infos {
nodes = append(nodes, nodeinfos.Node)
}
return nodes
}
// ConfigSignature Represents the slots of each node
type ConfigSignature map[string]SlotSlice
// String representation of a ConfigSignaure
func (c ConfigSignature) String() string {
s := "map["
sc := make([]string, 0, len(c))
for i := range c {
sc = append(sc, i)
}
sort.Strings(sc)
for _, i := range sc {
s += fmt.Sprintf("%s:%s\n", i, c[i])
}
s += "]"
return s
}
// getConfigSignature returns a way to identify a cluster view, to check consistency
func getConfigSignature(nodes Nodes) ConfigSignature {
signature := ConfigSignature{}
for _, node := range nodes {
if node.Role == redisPrimaryRole {
signature[node.ID] = node.Slots
}
}
return signature
}
// OwnerWithStatus represents a node owner and the way it sees the slot
type OwnerWithStatus struct {
Addr string
Status string
}
// OwneshipView map representing who owns a slot and who sees it
type OwneshipView map[OwnerWithStatus][]string
// ClusterInconsistencies structure representing inconsistencies in the cluster
type ClusterInconsistencies map[Slot]OwneshipView
// String
func (ci ClusterInconsistencies) String() string {
output := ""
for slot, ownership := range ci {
output += fmt.Sprintf("%d: %s\n", slot, ownership)
}
return output
}
// GetInconsistencies returns a view of the inconsistent configuration per slot
func (c *ClusterInfos) GetInconsistencies() *ClusterInconsistencies {
ci := ClusterInconsistencies{}
for addr, nodeinfo := range c.Infos {
allSlots := SlotSlice{}
for _, node := range append(nodeinfo.Friends, nodeinfo.Node) {
// owned slots
allSlots = append(allSlots, node.Slots...)
for _, slot := range node.Slots {
if _, ok := ci[slot]; !ok {
ci[slot] = OwneshipView{}
}
viewers := ci[slot][OwnerWithStatus{Addr: node.IPPort(), Status: "owned"}]
ci[slot][OwnerWithStatus{Addr: node.IPPort(), Status: "owned"}] = append(viewers, addr)
}
// migrating slots
for slot := range node.MigratingSlots {
if _, ok := ci[slot]; !ok {
ci[slot] = OwneshipView{}
}
viewers := ci[slot][OwnerWithStatus{Addr: node.IPPort(), Status: "migrating"}]
ci[slot][OwnerWithStatus{Addr: node.IPPort(), Status: "migrating"}] = append(viewers, addr)
}
// importing slots
for slot := range node.ImportingSlots {
if _, ok := ci[slot]; !ok {
ci[slot] = OwneshipView{}
}
viewers := ci[slot][OwnerWithStatus{Addr: node.IPPort(), Status: "importing"}]
ci[slot][OwnerWithStatus{Addr: node.IPPort(), Status: "importing"}] = append(viewers, addr)
}
}
// slots that are not owned according to this node
missing := RemoveSlots(BuildSlotSlice(0, 16383), allSlots)
for _, slot := range missing {
if _, ok := ci[slot]; !ok {
ci[slot] = OwneshipView{}
}
viewers := ci[slot][OwnerWithStatus{Addr: "", Status: ""}]
ci[slot][OwnerWithStatus{Addr: "", Status: ""}] = append(viewers, addr)
}
}
// now cleaning all consistent data
for slot, ownership := range ci {
if len(ownership) <= 1 {
delete(ci, slot)
}
}
return &ci
} | pkg/redis/clusterinfo.go | 0.657648 | 0.435962 | clusterinfo.go | starcoder |
package stats
import (
"context"
"time"
"github.com/deixis/spine/contextutil"
"github.com/deixis/spine/log"
)
// Stats is an interface for app statistics
type Stats interface {
Start()
Stop()
// Count is a simple counter
Count(key string, n interface{}, meta ...map[string]string)
// Inc increments the given counter by 1
Inc(key string, meta ...map[string]string)
// Dec decrements the given counter by 1
Dec(key string, meta ...map[string]string)
// Gauge measures the amount, level, or contents of something
// The given value replaces the current one
// e.g. in-flight requests, uptime, ...
Gauge(key string, n interface{}, meta ...map[string]string)
// Timing measures how long it takes to accomplish something
// e.g. algorithm, request, ...
Timing(key string, t time.Duration, meta ...map[string]string)
// Histogram measures the distribution of values over the time
Histogram(key string, n interface{}, tags ...map[string]string)
// With returns a child Stats, and add meta to that Stats
With(meta map[string]string) Stats
// Log attaches a logger to a Stats instance
Log(l log.Logger) Stats
}
// Count calls `Count` on the context `Stats`
func Count(ctx context.Context, key string, n interface{}, meta ...map[string]string) {
FromContext(ctx).Count(key, n, meta...)
}
// Inc calls `Inc` on the context `Stats`
func Inc(ctx context.Context, key string, meta ...map[string]string) {
FromContext(ctx).Inc(key, meta...)
}
// Dec calls `Dec` on the context `Stats`
func Dec(ctx context.Context, key string, meta ...map[string]string) {
FromContext(ctx).Dec(key, meta...)
}
// Gauge calls `Gauge` on the context `Stats`
func Gauge(ctx context.Context, key string, n interface{}, meta ...map[string]string) {
FromContext(ctx).Gauge(key, n, meta...)
}
// Timing calls `Timing` on the context `Stats`
func Timing(ctx context.Context, key string, t time.Duration, meta ...map[string]string) {
FromContext(ctx).Timing(key, t, meta...)
}
// Histogram calls `Histogram` on the context `Stats`
func Histogram(ctx context.Context, key string, n interface{}, tags ...map[string]string) {
FromContext(ctx).Histogram(key, n, tags...)
}
type contextKey struct{}
var activeContextKey = contextKey{}
// FromContext returns a `Stats` instance associated with `ctx`, or
// `NopStats` if no `Stats` instance could be found.
func FromContext(ctx contextutil.ValueContext) Stats {
val := ctx.Value(activeContextKey)
if o, ok := val.(Stats); ok {
return o
}
return NopStats()
}
// WithContext returns a copy of parent in which `Stats` is stored
func WithContext(ctx context.Context, s Stats) context.Context {
return context.WithValue(ctx, activeContextKey, s)
} | stats/stats.go | 0.704973 | 0.427217 | stats.go | starcoder |
package fast
// FindCorners - Finds corners coordinates on the graysacaled image.
func FindCorners(pixels map[int]int, width, height, threshold int) []int {
var circleOffsets = getCircleOffsets(width)
var circlePixels [16]int
var corners []int
// When looping through the image pixels, skips the first three lines from
// the image boundaries to constrain the surrounding circle inside the image
// area.
for i := 3; i < height-3; i++ {
for j := 3; j < width-3; j++ {
var w = i*width + j
var p = pixels[w]
// Loops the circle offsets to read the pixel value for the sixteen
// surrounding pixels.
for k := 0; k < 16; k++ {
circlePixels[k] = pixels[w+circleOffsets[k]]
}
if isCorner(p, circlePixels, threshold) {
// The pixel p is classified as a corner, as optimization increment j
// by the circle radius 3 to skip the neighbor pixels inside the
// surrounding circle. This can be removed without compromising the
// result.
corners = append(corners, j, i)
j += 3
}
}
}
return corners
}
/**
* Checks if the circle pixel is within the corner of the candidate pixel p
* by a threshold.
*/
func isCorner(p int, circlePixels [16]int, threshold int) bool {
if isTriviallyExcluded(circlePixels, p, threshold) {
return false
}
for x := 0; x < 16; x++ {
var darker = true
var brighter = true
for y := 0; y < 9; y++ {
var circlePixel = circlePixels[(x+y)&15]
if !isBrighter(p, circlePixel, threshold) {
brighter = false
if !darker {
break
}
}
if !isDarker(p, circlePixel, threshold) {
darker = false
if !brighter {
break
}
}
}
if brighter || darker {
return true
}
}
return false
}
/**
* Fast check to test if the candidate pixel is a trivially excluded value.
* In order to be a corner, the candidate pixel value should be darker or
* brighter than 9-12 surrounding pixels, when at least three of the top,
* bottom, left and right pixels are brighter or darker it can be
* automatically excluded improving the performance.
*/
func isTriviallyExcluded(circlePixels [16]int, p int, threshold int) bool {
var count = 0
var circleBottom = circlePixels[8]
var circleLeft = circlePixels[12]
var circleRight = circlePixels[4]
var circleTop = circlePixels[0]
if isBrighter(circleTop, p, threshold) {
count++
}
if isBrighter(circleRight, p, threshold) {
count++
}
if isBrighter(circleBottom, p, threshold) {
count++
}
if isBrighter(circleLeft, p, threshold) {
count++
}
if count < 3 {
count = 0
if isDarker(circleTop, p, threshold) {
count++
}
if isDarker(circleRight, p, threshold) {
count++
}
if isDarker(circleBottom, p, threshold) {
count++
}
if isDarker(circleLeft, p, threshold) {
count++
}
if count < 3 {
return true
}
}
return false
}
/**
* Checks if the circle pixel is brighter than the candidate pixel p by
* a threshold.
*/
func isBrighter(circlePixel int, p int, threshold int) bool {
return circlePixel-p > threshold
}
/**
* Checks if the circle pixel is darker than the candidate pixel p by
* a threshold.
*/
func isDarker(circlePixel int, p int, threshold int) bool {
return p-circlePixel > threshold
}
/**
* Gets the sixteen offset values of the circle surrounding pixel.
*/
func getCircleOffsets(width int) [16]int {
var circle [16]int
circle[0] = -width - width - width
circle[1] = circle[0] + 1
circle[2] = circle[1] + width + 1
circle[3] = circle[2] + width + 1
circle[4] = circle[3] + width
circle[5] = circle[4] + width
circle[6] = circle[5] + width - 1
circle[7] = circle[6] + width - 1
circle[8] = circle[7] - 1
circle[9] = circle[8] - 1
circle[10] = circle[9] - width - 1
circle[11] = circle[10] - width - 1
circle[12] = circle[11] - width
circle[13] = circle[12] - width
circle[14] = circle[13] - width + 1
circle[15] = circle[14] - width + 1
return circle
} | fast.go | 0.866655 | 0.533154 | fast.go | starcoder |
package scan
import (
"fmt"
"reflect"
"strings"
"github.com/jmoiron/sqlx/reflectx"
)
// Allocator allocates values
type Allocator struct {
types []reflect.Type
create func(values []interface{}) reflect.Value
}
// Create sets the given values
func (r *Allocator) Create(values []interface{}) reflect.Value {
return r.create(values)
}
// Allocate allocates values
func (r *Allocator) Allocate() []interface{} {
values := make([]interface{}, len(r.types))
for index := range r.types {
values[index] = reflect.New(r.types[index]).Interface()
}
return values
}
// Set sets the values
func (r *Allocator) Set(value, next reflect.Value, columns []string) {
switch {
case value.Kind() == reflect.Ptr:
r.Set(value.Elem(), next.Elem(), columns)
case value.Kind() == reflect.Struct:
for _, name := range columns {
field := fieldByName(value.Type(), name)
// copy the value from the source to target
source := next.FieldByIndex(field.Index)
target := valueByIndex(value, field.Index)
// set the value
target.Set(source)
}
default:
value.Set(next)
}
}
// NewAllocator returns allocator for the given reflect.Type.
func NewAllocator(target reflect.Type, columns []string) (*Allocator, error) {
switch k := target.Kind(); {
case k == reflect.Interface && target.NumMethod() == 0:
fallthrough // interface{}
case k == reflect.String || k >= reflect.Bool && k <= reflect.Float64:
return NewAllocatorPrimitive(target), nil
case k == reflect.Ptr:
return NewAllocatorPtr(target, columns)
case k == reflect.Struct:
return NewAllocatorStruct(target, columns)
default:
return nil, fmt.Errorf("sql/scan: unsupported type ([]%s)", k)
}
}
// NewAllocatorPrimitive allocates primitive type
func NewAllocatorPrimitive(typ reflect.Type) *Allocator {
return &Allocator{
types: []reflect.Type{typ},
create: func(v []interface{}) reflect.Value {
return reflect.Indirect(reflect.ValueOf(v[0]))
},
}
}
// NewAllocatorStruct returns the a configuration for scanning an sql.Row into a struct.
func NewAllocatorStruct(target reflect.Type, columns []string) (*Allocator, error) {
var (
types = []reflect.Type{}
indices = make([][]int, 0, target.NumField())
)
for _, name := range columns {
name = strings.ToLower(strings.Split(name, "(")[0])
field := fieldByName(target, name)
// check if the field is nil
if field == nil {
return nil, fmt.Errorf("sql/scan: missing struct field for column: %s", name)
}
indices = append(indices, field.Index)
types = append(types, field.Field.Type)
}
allocator := &Allocator{
types: types,
create: func(values []interface{}) reflect.Value {
row := reflect.New(target).Elem()
for index, value := range values {
vector := indices[index]
column := valueByIndex(row, vector)
column.Set(reflect.Indirect(reflect.ValueOf(value)))
}
return row
},
}
return allocator, nil
}
// NewAllocatorPtr wraps the underlying type with rowScan.
func NewAllocatorPtr(target reflect.Type, columns []string) (*Allocator, error) {
target = target.Elem()
allocator, err := NewAllocator(target, columns)
if err != nil {
return nil, err
}
create := allocator.create
allocator.create = func(vs []interface{}) reflect.Value {
value := create(vs)
ptrTyp := reflect.PtrTo(value.Type())
ptr := reflect.New(ptrTyp.Elem())
ptr.Elem().Set(value)
return ptr
}
return allocator, nil
}
func valueByIndex(target reflect.Value, vector []int) reflect.Value {
if len(vector) == 1 {
return target.Field(vector[0])
}
for depth, index := range vector {
if depth > 0 && target.Kind() == reflect.Ptr {
valType := target.Type().Elem()
if valType.Kind() == reflect.Struct && target.IsNil() {
// set the value
target.Set(reflect.New(valType))
}
target = target.Elem()
}
// field
target = target.Field(index)
}
return target
}
func fieldByName(target reflect.Type, name string) *reflectx.FieldInfo {
meta := mapper.TypeMap(target)
if field, ok := meta.Names[name]; ok {
return field
}
for _, parent := range meta.Tree.Children {
if _, ok := parent.Options["inline"]; ok {
if _, ok := parent.Options["prefix"]; ok {
name = strings.TrimPrefix(name, parent.Name+"_")
}
if field := fieldByName(parent.Field.Type, name); field != nil {
// translate the field
index := append(meta.Tree.Index, parent.Index...)
index = append(index, field.Index...)
// traverse
return meta.GetByTraversal(index)
}
}
}
return nil
} | dialect/sql/scan/alloc.go | 0.654011 | 0.434941 | alloc.go | starcoder |
package wire
import (
"bytes"
"fmt"
"io"
log "github.com/p9c/logi"
"github.com/p9c/chainhash"
)
// defaultTransactionAlloc is the default size used for the backing array for transactions. The transaction array will dynamically grow as needed, but this figure is intended to provide enough space for the number of transactions in the vast majority of blocks without needing to grow the backing array multiple times.
const defaultTransactionAlloc = 2048
// MaxBlocksPerMsg is the maximum number of blocks allowed per message.
const MaxBlocksPerMsg = 500
// MaxBlockPayload is the maximum bytes a block message can be in bytes. After Segregated Witness, the max block payload has been raised to 4MB.
const MaxBlockPayload = 4000000
// maxTxPerBlock is the maximum number of transactions that could possibly fit into a block.
const maxTxPerBlock = (MaxBlockPayload / minTxPayload) + 1
// TxLoc holds locator data for the offset and length of where a transaction is located within a MsgBlock data buffer.
type TxLoc struct {
TxStart int
TxLen int
}
// MsgBlock implements the Message interface and represents a bitcoin block
// message. It is used to deliver block and transaction information in
// response to a getdata message (MsgGetData) for a given block hash.
type MsgBlock struct {
Header BlockHeader
Transactions []*MsgTx
}
// AddTransaction adds a transaction to the message.
func (msg *MsgBlock) AddTransaction(tx *MsgTx) error {
msg.Transactions = append(msg.Transactions, tx)
return nil
}
// ClearTransactions removes all transactions from the message.
func (msg *MsgBlock) ClearTransactions() {
msg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc)
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. This is part of the Message interface implementation. See Deserialize for decoding blocks stored to disk, such as in a database, as opposed to decoding blocks from the wire.
func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
err := readBlockHeader(r, pver, &msg.Header)
if err != nil {
log.L.Error(err)
return err
}
txCount, err := ReadVarInt(r, pver)
if err != nil {
log.L.Error(err)
return err
}
// Prevent more transactions than could possibly fit into a block. It would be possible to cause memory exhaustion and panics without a sane upper bound on this count.
if txCount > maxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, maxTxPerBlock)
return messageError("MsgBlock.BtcDecode", str)
}
msg.Transactions = make([]*MsgTx, 0, txCount)
for i := uint64(0); i < txCount; i++ {
tx := MsgTx{}
err := tx.BtcDecode(r, pver, enc)
if err != nil {
log.L.Error(err)
return err
}
msg.Transactions = append(msg.Transactions, &tx)
}
return nil
}
// Deserialize decodes a block from r into the receiver using a format that is suitable for long-term storage such as a database while respecting the Version field in the block. This function differs from BtcDecode in that BtcDecode decodes from the bitcoin wire protocol as it was sent across the network. The wire encoding can technically differ depending on the protocol version and doesn't even really need to match the format of a stored block at all. As of the time this comment was written, the encoded block is the same in both instances, but there is a distinct difference and separating the two allows the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding at protocol version 0 and the stable long-term storage format. As a result, make use of BtcDecode. Passing an encoding type of WitnessEncoding to BtcEncode for the MessageEncoding parameter indicates that the transactions within the block are expected to be serialized according to the new serialization structure defined in BIP0141.
return msg.BtcDecode(r, 0, WitnessEncoding)
}
// DeserializeNoWitness decodes a block from r into the receiver similar to Deserialize, however DeserializeWitness strips all (if any) witness data from the transactions within the block before encoding them.
func (msg *MsgBlock) DeserializeNoWitness(r io.Reader) error {
return msg.BtcDecode(r, 0, BaseEncoding)
}
// DeserializeTxLoc decodes r in the same manner Deserialize does, but it takes a byte buffer instead of a generic reader and returns a slice containing the start and length of each transaction within the raw data that is being deserialized.
func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
fullLen := r.Len()
// At the current time, there is no difference between the wire encoding at protocol version 0 and the stable long-term storage format. As a result, make use of existing wire protocol functions.
err := readBlockHeader(r, 0, &msg.Header)
if err != nil {
log.L.Error(err)
return nil, err
}
txCount, err := ReadVarInt(r, 0)
if err != nil {
log.L.Error(err)
return nil, err
}
// Prevent more transactions than could possibly fit into a block. It would be possible to cause memory exhaustion and panics without a sane upper bound on this count.
if txCount > maxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, maxTxPerBlock)
return nil, messageError("MsgBlock.DeserializeTxLoc", str)
}
// Deserialize each transaction while keeping track of its location within the byte stream.
msg.Transactions = make([]*MsgTx, 0, txCount)
txLocs := make([]TxLoc, txCount)
for i := uint64(0); i < txCount; i++ {
txLocs[i].TxStart = fullLen - r.Len()
tx := MsgTx{}
err := tx.Deserialize(r)
if err != nil {
log.L.Error(err)
return nil, err
}
msg.Transactions = append(msg.Transactions, &tx)
txLocs[i].TxLen = (fullLen - r.Len()) - txLocs[i].TxStart
}
return txLocs, nil
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. This is part of the Message
// interface implementation. See Serialize for encoding blocks to be stored to disk, such as in a
// database, as opposed to encoding blocks for the wire.
func (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
err := writeBlockHeader(w, pver, &msg.Header)
if err != nil {
log.L.Error(err)
return err
}
err = WriteVarInt(w, pver, uint64(len(msg.Transactions)))
if err != nil {
log.L.Error(err)
return err
}
for _, tx := range msg.Transactions {
err = tx.BtcEncode(w, pver, enc)
if err != nil {
log.L.Error(err)
return err
}
}
return nil
}
// Serialize encodes the block to w using a format that suitable for long-term storage such as a database while respecting the Version field in the block. This function differs from BtcEncode in that BtcEncode encodes the block to the bitcoin wire protocol in order to be sent across the network. The wire encoding can technically differ depending on the protocol version and doesn't even really need to match the format of a stored block at all. As of the time this comment was written, the encoded block is the same in both instances, but there is a distinct difference and separating the two allows the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding at protocol version 0 and the stable long-term storage format. As a result, make use of BtcEncode.
// Passing WitnessEncoding as the encoding type here indicates that each of the transactions should be serialized using the witness serialization structure defined in BIP0141.
return msg.BtcEncode(w, 0, WitnessEncoding)
}
// SerializeNoWitness encodes a block to w using an identical format to Serialize, with all (if any) witness data stripped from all transactions. This method is provided in additon to the regular Serialize, in order to allow one to selectively encode transaction witness data to non-upgraded peers which are unaware of the new encoding.
func (msg *MsgBlock) SerializeNoWitness(w io.Writer) error {
return msg.BtcEncode(w, 0, BaseEncoding)
}
// SerializeSize returns the number of bytes it would take to serialize the block, factoring in any witness data within transaction.
func (msg *MsgBlock) SerializeSize() int {
// Block header bytes + Serialized varint size for the number of transactions.
n := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions)))
for _, tx := range msg.Transactions {
n += tx.SerializeSize()
}
return n
}
// SerializeSizeStripped returns the number of bytes it would take to serialize the block, excluding any witness data (if any).
func (msg *MsgBlock) SerializeSizeStripped() int {
// Block header bytes + Serialized varint size for the number of transactions.
n := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions)))
for _, tx := range msg.Transactions {
n += tx.SerializeSizeStripped()
}
return n
}
// Command returns the protocol command string for the message. This is part of the Message interface implementation.
func (msg *MsgBlock) Command() string {
return CmdBlock
}
// MaxPayloadLength returns the maximum length the payload can be for the receiver. This is part of the Message interface implementation.
func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 {
// Block header at 80 bytes + transaction count + max transactions which can vary up to the MaxBlockPayload (including the block header and transaction count).
return MaxBlockPayload
}
// BlockHash computes the block identifier hash for this block.
func (msg *MsgBlock) BlockHash() chainhash.Hash {
return msg.Header.BlockHash()
}
// BlockHashWithAlgos computes the block identifier hash for this block.
func (msg *MsgBlock) BlockHashWithAlgos(h int32) chainhash.Hash {
return msg.Header.BlockHashWithAlgos(h)
}
// TxHashes returns a slice of hashes of all of transactions in this block.
func (msg *MsgBlock) TxHashes() ([]chainhash.Hash, error) {
hashList := make([]chainhash.Hash, 0, len(msg.Transactions))
for _, tx := range msg.Transactions {
hashList = append(hashList, tx.TxHash())
}
return hashList, nil
}
// NewMsgBlock returns a new bitcoin block message that conforms to the Message interface. See MsgBlock for details.
func NewMsgBlock(blockHeader *BlockHeader) *MsgBlock {
return &MsgBlock{
Header: *blockHeader,
Transactions: make([]*MsgTx, 0, defaultTransactionAlloc),
}
} | msgblock.go | 0.667364 | 0.462291 | msgblock.go | starcoder |
package timeperiods
import (
"errors"
"sort"
"time"
"github.com/openware/pkg/common"
)
// FindTimeRangesContainingData will break the start and end into time periods using the provided period
// it will then check whether any comparisonTimes are within those periods and concatenate them
// eg if no comparisonTimes match, you will receive 1 TimeRange of Start End with dataInRange = false
// eg2 if 1 comparisonTime matches in the middle of start and end, you will receive three ranges
func FindTimeRangesContainingData(start, end time.Time, period time.Duration, comparisonTimes []time.Time) ([]TimeRange, error) {
var errs common.Errors
if start.IsZero() {
errs = append(errs, errors.New("invalid start time"))
}
if end.IsZero() {
errs = append(errs, errors.New("invalid end time"))
}
if err := validatePeriod(period); err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return nil, errs
}
var t TimePeriodCalculator
t.periodDuration = period
t.start = start.Truncate(period)
t.end = end.Truncate(period)
t.comparisonTimes = comparisonTimes
t.setTimePeriodExists()
t.Sort(false)
t.calculateRanges()
return t.TimeRanges, nil
}
func validatePeriod(period time.Duration) error {
if period != time.Hour &&
period != time.Second &&
period != time.Minute &&
period != time.Hour*24 {
return errors.New("invalid period")
}
return nil
}
// CalculateTimePeriodsInRange can break down start and end times into time periods
// eg 1 hourly intervals
func CalculateTimePeriodsInRange(start, end time.Time, period time.Duration) ([]TimePeriod, error) {
var errs common.Errors
if start.IsZero() {
errs = append(errs, errors.New("invalid start time"))
}
if end.IsZero() {
errs = append(errs, errors.New("invalid end time"))
}
if err := validatePeriod(period); err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return nil, errs
}
var t TimePeriodCalculator
t.periodDuration = period
t.start = start.Truncate(period)
t.end = end.Truncate(period)
t.calculatePeriods()
return t.TimePeriods, nil
}
func (t *TimePeriodCalculator) calculateRanges() {
var tr TimeRange
for i := range t.TimePeriods {
if i != 0 {
if (t.TimePeriods[i].dataInRange && !t.TimePeriods[i-1].dataInRange) ||
(!t.TimePeriods[i].dataInRange && t.TimePeriods[i-1].dataInRange) {
// the status has changed and therefore a range has ended
tr.HasDataInRange = t.TimePeriods[i-1].dataInRange
tr.EndOfRange = t.TimePeriods[i].Time
t.TimeRanges = append(t.TimeRanges, tr)
tr = TimeRange{}
}
}
if tr.StartOfRange.IsZero() {
// start of new time range
tr.StartOfRange = t.TimePeriods[i].Time
}
}
if !tr.StartOfRange.IsZero() {
if tr.EndOfRange.IsZero() {
tr.EndOfRange = t.end
}
tr.HasDataInRange = t.TimePeriods[len(t.TimePeriods)-1].dataInRange
t.TimeRanges = append(t.TimeRanges, tr)
tr = TimeRange{}
}
}
func (t *TimePeriodCalculator) calculatePeriods() {
if t.start.IsZero() || t.end.IsZero() {
return
}
if t.start.After(t.end) {
return
}
iterateDateMate := t.start
for !iterateDateMate.Equal(t.end) && !iterateDateMate.After(t.end) {
tp := TimePeriod{
Time: iterateDateMate,
dataInRange: false,
}
t.TimePeriods = append(t.TimePeriods, tp)
iterateDateMate = iterateDateMate.Add(t.periodDuration)
}
}
// setTimePeriodExists compares loaded comparisonTimes
// against calculated TimePeriods to determine whether
// there is existing data within the time period
func (t *TimePeriodCalculator) setTimePeriodExists() {
t.calculatePeriods()
for i := range t.TimePeriods {
for j := range t.comparisonTimes {
if t.comparisonTimes[j].Truncate(t.periodDuration).Equal(t.TimePeriods[i].Time) {
t.TimePeriods[i].dataInRange = true
break
}
}
}
}
// Sort will sort the time period asc or desc
func (t *TimePeriodCalculator) Sort(desc bool) {
sort.Slice(t.TimePeriods, func(i, j int) bool {
if desc {
return t.TimePeriods[i].Time.After(t.TimePeriods[j].Time)
}
return t.TimePeriods[i].Time.Before(t.TimePeriods[j].Time)
})
} | common/timeperiods/timeperiods.go | 0.654122 | 0.598782 | timeperiods.go | starcoder |
package wkb
import (
"bytes"
"database/sql/driver"
"fmt"
"github.com/twpayne/go-geom"
"github.com/twpayne/go-geom/encoding/wkbcommon"
)
// ErrExpectedByteSlice is returned when a []byte is expected.
type ErrExpectedByteSlice struct {
Value interface{}
}
func (e ErrExpectedByteSlice) Error() string {
return fmt.Sprintf("wkb: want []byte, got %T", e.Value)
}
// A Point is a WKB-encoded Point that implements the sql.Scanner and
// driver.Valuer interfaces.
type Point struct {
*geom.Point
}
// A LineString is a WKB-encoded LineString that implements the sql.Scanner and
// driver.Valuer interfaces.
type LineString struct {
*geom.LineString
}
// A Polygon is a WKB-encoded Polygon that implements the sql.Scanner and
// driver.Valuer interfaces.
type Polygon struct {
*geom.Polygon
}
// A MultiPoint is a WKB-encoded MultiPoint that implements the sql.Scanner and
// driver.Valuer interfaces.
type MultiPoint struct {
*geom.MultiPoint
}
// A MultiLineString is a WKB-encoded MultiLineString that implements the
// sql.Scanner and driver.Valuer interfaces.
type MultiLineString struct {
*geom.MultiLineString
}
// A MultiPolygon is a WKB-encoded MultiPolygon that implements the sql.Scanner
// and driver.Valuer interfaces.
type MultiPolygon struct {
*geom.MultiPolygon
}
// A GeometryCollection is a WKB-encoded GeometryCollection that implements the
// sql.Scanner and driver.Valuer interfaces.
type GeometryCollection struct {
*geom.GeometryCollection
}
// Scan scans from a []byte.
func (p *Point) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return ErrExpectedByteSlice{Value: src}
}
got, err := Unmarshal(b)
if err != nil {
return err
}
p1, ok := got.(*geom.Point)
if !ok {
return wkbcommon.ErrUnexpectedType{Got: p1, Want: p}
}
p.Point = p1
return nil
}
// Value returns the WKB encoding of p.
func (p *Point) Value() (driver.Value, error) {
return value(p.Point)
}
// Scan scans from a []byte.
func (ls *LineString) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return ErrExpectedByteSlice{Value: src}
}
got, err := Unmarshal(b)
if err != nil {
return err
}
ls1, ok := got.(*geom.LineString)
if !ok {
return wkbcommon.ErrUnexpectedType{Got: ls1, Want: ls}
}
ls.LineString = ls1
return nil
}
// Value returns the WKB encoding of ls.
func (ls *LineString) Value() (driver.Value, error) {
return value(ls.LineString)
}
// Scan scans from a []byte.
func (p *Polygon) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return ErrExpectedByteSlice{Value: src}
}
got, err := Unmarshal(b)
if err != nil {
return err
}
p1, ok := got.(*geom.Polygon)
if !ok {
return wkbcommon.ErrUnexpectedType{Got: p1, Want: p}
}
p.Polygon = p1
return nil
}
// Value returns the WKB encoding of p.
func (p *Polygon) Value() (driver.Value, error) {
return value(p.Polygon)
}
// Scan scans from a []byte.
func (mp *MultiPoint) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return ErrExpectedByteSlice{Value: src}
}
got, err := Unmarshal(b)
if err != nil {
return err
}
mp1, ok := got.(*geom.MultiPoint)
if !ok {
return wkbcommon.ErrUnexpectedType{Got: mp1, Want: mp}
}
mp.MultiPoint = mp1
return nil
}
// Value returns the WKB encoding of mp.
func (mp *MultiPoint) Value() (driver.Value, error) {
return value(mp.MultiPoint)
}
// Scan scans from a []byte.
func (mls *MultiLineString) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return ErrExpectedByteSlice{Value: src}
}
got, err := Unmarshal(b)
if err != nil {
return err
}
mls1, ok := got.(*geom.MultiLineString)
if !ok {
return wkbcommon.ErrUnexpectedType{Got: mls1, Want: mls}
}
mls.MultiLineString = mls1
return nil
}
// Value returns the WKB encoding of mls.
func (mls *MultiLineString) Value() (driver.Value, error) {
return value(mls.MultiLineString)
}
// Scan scans from a []byte.
func (mp *MultiPolygon) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return ErrExpectedByteSlice{Value: src}
}
got, err := Unmarshal(b)
if err != nil {
return err
}
mp1, ok := got.(*geom.MultiPolygon)
if !ok {
return wkbcommon.ErrUnexpectedType{Got: mp1, Want: mp}
}
mp.MultiPolygon = mp1
return nil
}
// Value returns the WKB encoding of mp.
func (mp *MultiPolygon) Value() (driver.Value, error) {
return value(mp.MultiPolygon)
}
// Scan scans from a []byte.
func (gc *GeometryCollection) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return ErrExpectedByteSlice{Value: src}
}
got, err := Unmarshal(b)
if err != nil {
return err
}
gc1, ok := got.(*geom.GeometryCollection)
if !ok {
return wkbcommon.ErrUnexpectedType{Got: gc1, Want: gc}
}
gc.GeometryCollection = gc1
return nil
}
// Value returns the WKB encoding of gc.
func (gc *GeometryCollection) Value() (driver.Value, error) {
return value(gc.GeometryCollection)
}
func value(g geom.T) (driver.Value, error) {
b := &bytes.Buffer{}
if err := Write(b, NDR, g); err != nil {
return nil, err
}
return b.Bytes(), nil
} | vendor/github.com/whosonfirst/go-whosonfirst-static/vendor/github.com/whosonfirst/go-whosonfirst-readwrite-sqlite/vendor/github.com/whosonfirst/go-whosonfirst-sqlite-features/vendor/github.com/twpayne/go-geom/encoding/wkb/sql.go | 0.75392 | 0.461866 | sql.go | starcoder |
package ImageData
import (
"image/color"
)
// LUT from https://github.com/guidocioni/eumetsat-python/blob/master/IR4AVHRR6.cpt
var TemperatureScaleLUT = []color.Color{
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 127, G: 0, B: 127, A: 255},
color.RGBA{R: 140, G: 13, B: 135, A: 255},
color.RGBA{R: 153, G: 25, B: 142, A: 255},
color.RGBA{R: 165, G: 38, B: 150, A: 255},
color.RGBA{R: 178, G: 51, B: 157, A: 255},
color.RGBA{R: 191, G: 64, B: 165, A: 255},
color.RGBA{R: 204, G: 76, B: 173, A: 255},
color.RGBA{R: 217, G: 89, B: 180, A: 255},
color.RGBA{R: 229, G: 102, B: 188, A: 255},
color.RGBA{R: 242, G: 114, B: 195, A: 255},
color.RGBA{R: 255, G: 127, B: 203, A: 255},
color.RGBA{R: 230, G: 230, B: 230, A: 255},
color.RGBA{R: 205, G: 205, B: 205, A: 255},
color.RGBA{R: 179, G: 179, B: 179, A: 255},
color.RGBA{R: 154, G: 154, B: 154, A: 255},
color.RGBA{R: 128, G: 128, B: 128, A: 255},
color.RGBA{R: 103, G: 103, B: 103, A: 255},
color.RGBA{R: 77, G: 77, B: 77, A: 255},
color.RGBA{R: 52, G: 52, B: 52, A: 255},
color.RGBA{R: 26, G: 26, B: 26, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 26, G: 0, B: 0, A: 255},
color.RGBA{R: 51, G: 0, B: 0, A: 255},
color.RGBA{R: 77, G: 0, B: 0, A: 255},
color.RGBA{R: 102, G: 0, B: 0, A: 255},
color.RGBA{R: 128, G: 0, B: 0, A: 255},
color.RGBA{R: 153, G: 0, B: 0, A: 255},
color.RGBA{R: 179, G: 0, B: 0, A: 255},
color.RGBA{R: 204, G: 0, B: 0, A: 255},
color.RGBA{R: 230, G: 0, B: 0, A: 255},
color.RGBA{R: 255, G: 0, B: 0, A: 255},
color.RGBA{R: 255, G: 26, B: 0, A: 255},
color.RGBA{R: 255, G: 51, B: 0, A: 255},
color.RGBA{R: 255, G: 77, B: 0, A: 255},
color.RGBA{R: 255, G: 102, B: 0, A: 255},
color.RGBA{R: 255, G: 128, B: 0, A: 255},
color.RGBA{R: 255, G: 153, B: 0, A: 255},
color.RGBA{R: 255, G: 179, B: 0, A: 255},
color.RGBA{R: 255, G: 204, B: 0, A: 255},
color.RGBA{R: 255, G: 230, B: 0, A: 255},
color.RGBA{R: 255, G: 255, B: 0, A: 255},
color.RGBA{R: 230, G: 255, B: 0, A: 255},
color.RGBA{R: 204, G: 255, B: 0, A: 255},
color.RGBA{R: 179, G: 255, B: 0, A: 255},
color.RGBA{R: 153, G: 255, B: 0, A: 255},
color.RGBA{R: 128, G: 255, B: 0, A: 255},
color.RGBA{R: 102, G: 255, B: 0, A: 255},
color.RGBA{R: 77, G: 255, B: 0, A: 255},
color.RGBA{R: 51, G: 255, B: 0, A: 255},
color.RGBA{R: 26, G: 255, B: 0, A: 255},
color.RGBA{R: 0, G: 255, B: 0, A: 255},
color.RGBA{R: 0, G: 234, B: 10, A: 255},
color.RGBA{R: 0, G: 213, B: 19, A: 255},
color.RGBA{R: 0, G: 191, B: 29, A: 255},
color.RGBA{R: 0, G: 170, B: 38, A: 255},
color.RGBA{R: 0, G: 149, B: 48, A: 255},
color.RGBA{R: 0, G: 128, B: 58, A: 255},
color.RGBA{R: 0, G: 106, B: 67, A: 255},
color.RGBA{R: 0, G: 85, B: 77, A: 255},
color.RGBA{R: 0, G: 64, B: 86, A: 255},
color.RGBA{R: 0, G: 43, B: 96, A: 255},
color.RGBA{R: 0, G: 21, B: 105, A: 255},
color.RGBA{R: 0, G: 0, B: 115, A: 255},
color.RGBA{R: 0, G: 0, B: 115, A: 255},
color.RGBA{R: 0, G: 13, B: 122, A: 255},
color.RGBA{R: 0, G: 26, B: 129, A: 255},
color.RGBA{R: 0, G: 38, B: 136, A: 255},
color.RGBA{R: 0, G: 51, B: 143, A: 255},
color.RGBA{R: 0, G: 64, B: 150, A: 255},
color.RGBA{R: 0, G: 77, B: 157, A: 255},
color.RGBA{R: 0, G: 89, B: 164, A: 255},
color.RGBA{R: 0, G: 102, B: 171, A: 255},
color.RGBA{R: 0, G: 115, B: 178, A: 255},
color.RGBA{R: 0, G: 128, B: 185, A: 255},
color.RGBA{R: 0, G: 140, B: 192, A: 255},
color.RGBA{R: 0, G: 153, B: 199, A: 255},
color.RGBA{R: 0, G: 166, B: 206, A: 255},
color.RGBA{R: 0, G: 179, B: 213, A: 255},
color.RGBA{R: 0, G: 191, B: 220, A: 255},
color.RGBA{R: 0, G: 204, B: 227, A: 255},
color.RGBA{R: 0, G: 217, B: 234, A: 255},
color.RGBA{R: 0, G: 230, B: 241, A: 255},
color.RGBA{R: 0, G: 242, B: 248, A: 255},
color.RGBA{R: 0, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 255, G: 255, B: 255, A: 255},
color.RGBA{R: 254, G: 254, B: 254, A: 255},
color.RGBA{R: 252, G: 252, B: 252, A: 255},
color.RGBA{R: 249, G: 249, B: 249, A: 255},
color.RGBA{R: 247, G: 247, B: 247, A: 255},
color.RGBA{R: 244, G: 244, B: 244, A: 255},
color.RGBA{R: 242, G: 242, B: 242, A: 255},
color.RGBA{R: 239, G: 239, B: 239, A: 255},
color.RGBA{R: 237, G: 237, B: 237, A: 255},
color.RGBA{R: 234, G: 234, B: 234, A: 255},
color.RGBA{R: 232, G: 232, B: 232, A: 255},
color.RGBA{R: 229, G: 229, B: 229, A: 255},
color.RGBA{R: 226, G: 226, B: 226, A: 255},
color.RGBA{R: 224, G: 224, B: 224, A: 255},
color.RGBA{R: 221, G: 221, B: 221, A: 255},
color.RGBA{R: 219, G: 219, B: 219, A: 255},
color.RGBA{R: 216, G: 216, B: 216, A: 255},
color.RGBA{R: 214, G: 214, B: 214, A: 255},
color.RGBA{R: 211, G: 211, B: 211, A: 255},
color.RGBA{R: 209, G: 209, B: 209, A: 255},
color.RGBA{R: 206, G: 206, B: 206, A: 255},
color.RGBA{R: 203, G: 203, B: 203, A: 255},
color.RGBA{R: 201, G: 201, B: 201, A: 255},
color.RGBA{R: 198, G: 198, B: 198, A: 255},
color.RGBA{R: 196, G: 196, B: 196, A: 255},
color.RGBA{R: 193, G: 193, B: 193, A: 255},
color.RGBA{R: 191, G: 191, B: 191, A: 255},
color.RGBA{R: 188, G: 188, B: 188, A: 255},
color.RGBA{R: 186, G: 186, B: 186, A: 255},
color.RGBA{R: 183, G: 183, B: 183, A: 255},
color.RGBA{R: 181, G: 181, B: 181, A: 255},
color.RGBA{R: 178, G: 178, B: 178, A: 255},
color.RGBA{R: 175, G: 175, B: 175, A: 255},
color.RGBA{R: 173, G: 173, B: 173, A: 255},
color.RGBA{R: 170, G: 170, B: 170, A: 255},
color.RGBA{R: 168, G: 168, B: 168, A: 255},
color.RGBA{R: 165, G: 165, B: 165, A: 255},
color.RGBA{R: 163, G: 163, B: 163, A: 255},
color.RGBA{R: 160, G: 160, B: 160, A: 255},
color.RGBA{R: 158, G: 158, B: 158, A: 255},
color.RGBA{R: 155, G: 155, B: 155, A: 255},
color.RGBA{R: 152, G: 152, B: 152, A: 255},
color.RGBA{R: 150, G: 150, B: 150, A: 255},
color.RGBA{R: 147, G: 147, B: 147, A: 255},
color.RGBA{R: 145, G: 145, B: 145, A: 255},
color.RGBA{R: 142, G: 142, B: 142, A: 255},
color.RGBA{R: 140, G: 140, B: 140, A: 255},
color.RGBA{R: 137, G: 137, B: 137, A: 255},
color.RGBA{R: 135, G: 135, B: 135, A: 255},
color.RGBA{R: 132, G: 132, B: 132, A: 255},
color.RGBA{R: 130, G: 130, B: 130, A: 255},
color.RGBA{R: 127, G: 127, B: 127, A: 255},
color.RGBA{R: 124, G: 124, B: 124, A: 255},
color.RGBA{R: 122, G: 122, B: 122, A: 255},
color.RGBA{R: 119, G: 119, B: 119, A: 255},
color.RGBA{R: 117, G: 117, B: 117, A: 255},
color.RGBA{R: 114, G: 114, B: 114, A: 255},
color.RGBA{R: 112, G: 112, B: 112, A: 255},
color.RGBA{R: 109, G: 109, B: 109, A: 255},
color.RGBA{R: 107, G: 107, B: 107, A: 255},
color.RGBA{R: 104, G: 104, B: 104, A: 255},
color.RGBA{R: 101, G: 101, B: 101, A: 255},
color.RGBA{R: 99, G: 99, B: 99, A: 255},
color.RGBA{R: 96, G: 96, B: 96, A: 255},
color.RGBA{R: 94, G: 94, B: 94, A: 255},
color.RGBA{R: 91, G: 91, B: 91, A: 255},
color.RGBA{R: 89, G: 89, B: 89, A: 255},
color.RGBA{R: 86, G: 86, B: 86, A: 255},
color.RGBA{R: 84, G: 84, B: 84, A: 255},
color.RGBA{R: 81, G: 81, B: 81, A: 255},
color.RGBA{R: 79, G: 79, B: 79, A: 255},
color.RGBA{R: 76, G: 76, B: 76, A: 255},
color.RGBA{R: 73, G: 73, B: 73, A: 255},
color.RGBA{R: 71, G: 71, B: 71, A: 255},
color.RGBA{R: 68, G: 68, B: 68, A: 255},
color.RGBA{R: 66, G: 66, B: 66, A: 255},
color.RGBA{R: 63, G: 63, B: 63, A: 255},
color.RGBA{R: 61, G: 61, B: 61, A: 255},
color.RGBA{R: 58, G: 58, B: 58, A: 255},
color.RGBA{R: 56, G: 56, B: 56, A: 255},
color.RGBA{R: 53, G: 53, B: 53, A: 255},
color.RGBA{R: 50, G: 50, B: 50, A: 255},
color.RGBA{R: 48, G: 48, B: 48, A: 255},
color.RGBA{R: 45, G: 45, B: 45, A: 255},
color.RGBA{R: 43, G: 43, B: 43, A: 255},
color.RGBA{R: 40, G: 40, B: 40, A: 255},
color.RGBA{R: 38, G: 38, B: 38, A: 255},
color.RGBA{R: 35, G: 35, B: 35, A: 255},
color.RGBA{R: 33, G: 33, B: 33, A: 255},
color.RGBA{R: 30, G: 30, B: 30, A: 255},
color.RGBA{R: 28, G: 28, B: 28, A: 255},
color.RGBA{R: 25, G: 25, B: 25, A: 255},
color.RGBA{R: 22, G: 22, B: 22, A: 255},
color.RGBA{R: 20, G: 20, B: 20, A: 255},
color.RGBA{R: 17, G: 17, B: 17, A: 255},
color.RGBA{R: 15, G: 15, B: 15, A: 255},
color.RGBA{R: 12, G: 12, B: 12, A: 255},
color.RGBA{R: 10, G: 10, B: 10, A: 255},
color.RGBA{R: 07, G: 07, B: 07, A: 255},
color.RGBA{R: 05, G: 05, B: 05, A: 255},
color.RGBA{R: 02, G: 02, B: 02, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
color.RGBA{R: 0, G: 0, B: 0, A: 255},
}
const (
colorLutLength = 256
DefaultMinimumTemperature = 173
DefaultMaximumTemperature = 373
)
func LutTempToIndex(minTemperature, maxTemperature, temp float32) int {
scale := colorLutLength / float32(maxTemperature-minTemperature)
temp -= minTemperature
temp *= scale
if temp < 0 {
temp = 0
}
if temp > 255 {
temp = 255
}
return int(temp)
}
func ScaleLutToColor(minTemperature, maxTemperature float32, baseLut []float32, colorLut []color.Color) []color.Color {
out := make([]color.Color, len(baseLut))
for i := 0; i < len(baseLut); i++ {
temp := baseLut[i]
colorIdx := LutTempToIndex(minTemperature, maxTemperature, temp)
out[i] = colorLut[colorIdx]
}
return out
}
func LutIndexToTemperature(minTemperature, maxTemperature float32, index int) float32 {
scale := colorLutLength / float32(maxTemperature-minTemperature)
temp := float32(index)
temp /= scale
temp += minTemperature
return temp
} | ImageProcessor/ImageData/temperatureScale.go | 0.872619 | 0.481576 | temperatureScale.go | starcoder |
package types
import (
"bytes"
"github.com/attic-labs/noms/go/hash"
)
type ValueCallback func(v Value)
type RefCallback func(ref Ref)
// Valuable is an interface from which a Value can be retrieved.
type Valuable interface {
// Kind is the NomsKind describing the kind of value this is.
Kind() NomsKind
Value() Value
}
// Emptyable is an interface for Values which may or may not be empty
type Emptyable interface {
Empty() bool
}
// Value is the interface all Noms values implement.
type Value interface {
Valuable
// Equals determines if two different Noms values represents the same underlying value.
Equals(other Value) bool
// Less determines if this Noms value is less than another Noms value.
// When comparing two Noms values and both are comparable and the same type (Bool, Number or
// String) then the natural ordering is used. For other Noms values the Hash of the value is
// used. When comparing Noms values of different type the following ordering is used:
// Bool < Number < String < everything else.
Less(other Value) bool
// Hash is the hash of the value. All Noms values have a unique hash and if two values have the
// same hash they must be equal.
Hash() hash.Hash
// WalkValues iterates over the immediate children of this value in the DAG, if any, not including
// Type()
WalkValues(ValueCallback)
// WalkRefs iterates over the refs to the underlying chunks. If this value is a collection that has been
// chunked then this will return the refs of th sub trees of the prolly-tree.
WalkRefs(RefCallback)
// typeOf is the internal implementation of types.TypeOf. It is not normalized
// and unions might have a single element, duplicates and be in the wrong
// order.
typeOf() *Type
// writeTo writes the encoded version of the value to a nomsWriter.
writeTo(nomsWriter)
}
type ValueSlice []Value
func (vs ValueSlice) Len() int { return len(vs) }
func (vs ValueSlice) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
func (vs ValueSlice) Less(i, j int) bool { return vs[i].Less(vs[j]) }
func (vs ValueSlice) Equals(other ValueSlice) bool {
if vs.Len() != other.Len() {
return false
}
for i, v := range vs {
if !v.Equals(other[i]) {
return false
}
}
return true
}
func (vs ValueSlice) Contains(v Value) bool {
for _, v := range vs {
if v.Equals(v) {
return true
}
}
return false
}
type valueReadWriter interface {
valueReadWriter() ValueReadWriter
}
type valueImpl struct {
vrw ValueReadWriter
buff []byte
offsets []uint32
}
func (v valueImpl) valueReadWriter() ValueReadWriter {
return v.vrw
}
func (v valueImpl) writeTo(enc nomsWriter) {
enc.writeRaw(v.buff)
}
func (v valueImpl) valueBytes() []byte {
return v.buff
}
// IsZeroValue can be used to test if a Value is the same as T{}.
func (v valueImpl) IsZeroValue() bool {
return v.buff == nil
}
func (v valueImpl) Hash() hash.Hash {
return hash.Of(v.buff)
}
func (v valueImpl) decoder() valueDecoder {
return newValueDecoder(v.buff, v.vrw)
}
func (v valueImpl) decoderAtOffset(offset int) valueDecoder {
return newValueDecoder(v.buff[offset:], v.vrw)
}
func (v valueImpl) asValueImpl() valueImpl {
return v
}
func (v valueImpl) Equals(other Value) bool {
if otherValueImpl, ok := other.(asValueImpl); ok {
return bytes.Equal(v.buff, otherValueImpl.asValueImpl().buff)
}
return false
}
func (v valueImpl) Less(other Value) bool {
return valueLess(v, other)
}
func (v valueImpl) WalkRefs(cb RefCallback) {
walkRefs(v.valueBytes(), cb)
}
type asValueImpl interface {
asValueImpl() valueImpl
}
func (v valueImpl) Kind() NomsKind {
return NomsKind(v.buff[0])
} | go/types/value.go | 0.771241 | 0.583708 | value.go | starcoder |
package geometry
import (
"math"
"github.com/tab58/v1/spatial/pkg/numeric"
)
// Point3DReader is a read-only interface for vectors.
type Point3DReader interface {
GetX() float64
GetY() float64
GetZ() float64
Clone() *Point3D
AsVector() *Vector3D
DistanceTo(q Point3DReader) (float64, error)
IsEqualTo(q Point3DReader, tol float64) (bool, error)
}
// Point3DWriter is a write-only interface for vectors.
type Point3DWriter interface {
SetX(float64)
SetY(float64)
SetZ(float64)
}
// Origin3D is the canonical origin in the 3D plane.
var Origin3D Point3DReader = &Point3D{X: 0, Y: 0, Z: 0}
// Point3D represents a 3D point.
type Point3D struct {
X float64
Y float64
Z float64
}
// GetX returns the x-coordinate of the point.
func (p *Point3D) GetX() float64 {
return p.X
}
// GetY returns the y-coordinate of the point.
func (p *Point3D) GetY() float64 {
return p.Y
}
// GetZ returns the z-coordinate of the point.
func (p *Point3D) GetZ() float64 {
return p.Z
}
// SetX sets the x-coordinate of the point.
func (p *Point3D) SetX(x float64) {
p.X = x
}
// SetY sets the y-coordinate of the point.
func (p *Point3D) SetY(y float64) {
p.Y = y
}
// SetZ sets the z-coordinate of the point.
func (p *Point3D) SetZ(z float64) {
p.Z = z
}
// Clone creates a new Point3D with the same coordinate information.
func (p *Point3D) Clone() *Point3D {
return &Point3D{
X: p.GetX(),
Y: p.GetY(),
Z: p.GetZ(),
}
}
// AsVector creates a displacement vector from the origin to this point.
func (p *Point3D) AsVector() *Vector3D {
return &Vector3D{
X: p.GetX(),
Y: p.GetY(),
Z: p.GetZ(),
}
}
// DistanceTo calculates the distance from one point to another.
func (p *Point3D) DistanceTo(q Point3DReader) (float64, error) {
v := q.AsVector()
err := v.Sub(p.AsVector())
if err != nil {
return 0, err
}
return v.Length()
}
// IsEqualTo returns true if 2 points can be considered equal to within a specific tolerance, false if not.
func (p *Point3D) IsEqualTo(q Point3DReader, tol float64) (bool, error) {
if numeric.IsInvalidTolerance(tol) {
return false, numeric.ErrInvalidTol
}
px, py, pz := p.GetX(), p.GetY(), p.GetZ()
qx, qy, qz := q.GetX(), q.GetY(), q.GetZ()
resX := math.Abs(qx - px)
resY := math.Abs(qy - py)
resZ := math.Abs(qz - pz)
isEqual := resX <= tol && resY <= tol && resZ <= tol
return isEqual, nil
} | pkg/geometry/point3d.go | 0.887101 | 0.678084 | point3d.go | starcoder |
package exporters
import (
"fmt"
"strings"
"github.com/cs3org/reva/pkg/mentix/exchangers"
"github.com/cs3org/reva/pkg/mentix/meshdata"
)
// Exporter is the interface that all exporters must implement.
type Exporter interface {
exchangers.Exchanger
// MeshData returns the mesh data.
MeshData() *meshdata.MeshData
// Update is called whenever the mesh data set has changed to reflect these changes.
Update(meshdata.Map) error
}
// BaseExporter implements basic exporter functionality common to all exporters.
type BaseExporter struct {
exchangers.BaseExchanger
meshData *meshdata.MeshData
allowUnauthorizedSites bool
}
// Update is called whenever the mesh data set has changed to reflect these changes.
func (exporter *BaseExporter) Update(meshDataSet meshdata.Map) error {
// Update the stored mesh data set
if err := exporter.storeMeshDataSet(meshDataSet); err != nil {
return fmt.Errorf("unable to store the mesh data: %v", err)
}
return nil
}
func (exporter *BaseExporter) storeMeshDataSet(meshDataSet meshdata.Map) error {
// Store the new mesh data set by cloning it and then merging the cloned data into one object
meshDataSetCloned := make(meshdata.Map)
for connectorID, meshData := range meshDataSet {
if !exporter.IsConnectorEnabled(connectorID) {
continue
}
meshDataCloned := meshData.Clone()
if meshDataCloned == nil {
return fmt.Errorf("unable to clone the mesh data")
}
if !exporter.allowUnauthorizedSites {
exporter.removeUnauthorizedSites(meshDataCloned)
}
meshDataSetCloned[connectorID] = meshDataCloned
}
exporter.SetMeshData(meshdata.MergeMeshDataMap(meshDataSetCloned))
return nil
}
// MeshData returns the stored mesh data.
func (exporter *BaseExporter) MeshData() *meshdata.MeshData {
return exporter.meshData
}
// SetMeshData sets new mesh data.
func (exporter *BaseExporter) SetMeshData(meshData *meshdata.MeshData) {
exporter.Locker().Lock()
defer exporter.Locker().Unlock()
exporter.meshData = meshData
}
func (exporter *BaseExporter) removeUnauthorizedSites(meshData *meshdata.MeshData) {
cleanedSites := make([]*meshdata.Site, 0, len(meshData.Sites))
for _, site := range meshData.Sites {
// Only keep authorized sites
if value := meshdata.GetPropertyValue(site.Properties, meshdata.PropertyAuthorized, "false"); strings.EqualFold(value, "true") {
cleanedSites = append(cleanedSites, site)
}
}
meshData.Sites = cleanedSites
} | pkg/mentix/exchangers/exporters/exporter.go | 0.646795 | 0.451629 | exporter.go | starcoder |
package builder
import (
"fmt"
"github.com/ulule/loukoum/v3/stmt"
"github.com/ulule/loukoum/v3/types"
)
// Insert is a builder used for "INSERT" query.
type Insert struct {
query stmt.Insert
}
// NewInsert creates a new Insert.
func NewInsert() Insert {
return Insert{
query: stmt.NewInsert(),
}
}
// Into sets the INTO clause of the query.
func (b Insert) Into(into interface{}) Insert {
if !b.query.Into.IsEmpty() {
panic("loukoum: insert builder has into clause already defined")
}
b.query.Into = ToInto(into)
return b
}
// Columns sets the query columns.
func (b Insert) Columns(columns ...interface{}) Insert {
if len(columns) == 0 {
return b
}
if len(b.query.Columns) != 0 {
panic("loukoum: insert builder has columns clause already defined")
}
b.query.Columns = ToColumns(columns)
return b
}
// Values sets the query values.
func (b Insert) Values(values ...interface{}) Insert {
if !b.query.Values.IsEmpty() {
panic("loukoum: insert builder has values clause already defined")
}
b.query.Values = stmt.NewValues(stmt.NewArrayListExpression(values...))
return b
}
// Returning builds the RETURNING clause.
func (b Insert) Returning(values ...interface{}) Insert {
if !b.query.Returning.IsEmpty() {
panic("loukoum: insert builder has returning clause already defined")
}
b.query.Returning = stmt.NewReturning(ToSelectExpressions(values))
return b
}
// Comment adds comment to the query.
func (b Insert) Comment(comment string) Insert {
b.query.Comment = stmt.NewComment(comment)
return b
}
// OnConflict builds the ON CONFLICT clause.
func (b Insert) OnConflict(args ...interface{}) Insert {
if !b.query.OnConflict.IsEmpty() {
panic("loukoum: insert builder has on conflict clause already defined")
}
if len(args) == 0 {
panic("loukoum: on conflict clause requires arguments")
}
for i := range args {
switch value := args[i].(type) {
case string, stmt.Column:
b.query.OnConflict.Target.Columns = append(b.query.OnConflict.Target.Columns, ToColumn(value))
case stmt.ConflictNoAction:
b.query.OnConflict.Action = value
return b
case stmt.ConflictUpdateAction:
if b.query.OnConflict.Target.IsEmpty() {
panic("loukoum: on conflict update clause requires at least one target")
}
b.query.OnConflict.Action = value
return b
default:
panic(fmt.Sprintf("loukoum: cannot use %T as on conflict clause", args[i]))
}
}
panic("loukoum: on conflict clause requires an action")
}
// Set is a wrapper that defines columns and values clauses using a pair.
func (b Insert) Set(args ...interface{}) Insert {
if len(b.query.Columns) != 0 {
panic("loukoum: insert builder has columns clause already defined")
}
if !b.query.Values.IsEmpty() {
panic("loukoum: insert builder has values clause already defined")
}
pairs := ToSet(args).Pairs
columns, expressions := pairs.Values()
array := stmt.NewArrayListExpression(expressions)
values := stmt.NewValues(array)
b.query.Columns = columns
b.query.Values = values
return b
}
// String returns the underlying query as a raw statement.
// This function should be used for debugging since it doesn't escape anything and is completely
// vulnerable to SQL injection.
// You should use either NamedQuery() or Query()...
func (b Insert) String() string {
ctx := &types.RawContext{}
b.query.Write(ctx)
return ctx.Query()
}
// NamedQuery returns the underlying query as a named statement.
func (b Insert) NamedQuery() (string, map[string]interface{}) {
ctx := &types.NamedContext{}
b.query.Write(ctx)
return ctx.Query(), ctx.Values()
}
// Query returns the underlying query as a regular statement.
func (b Insert) Query() (string, []interface{}) {
ctx := &types.StdContext{}
b.query.Write(ctx)
return ctx.Query(), ctx.Values()
}
// Statement returns underlying statement.
func (b Insert) Statement() stmt.Statement {
return b.query
}
// Ensure that Insert is a Builder
var _ Builder = Insert{} | builder/insert.go | 0.74826 | 0.47859 | insert.go | starcoder |
package gamemap
import (
"math/rand"
"github.com/torlenor/asciiventure/assets"
"github.com/torlenor/asciiventure/utils"
)
//NewRandomMap returns a random game map with the specified number of rooms and sizes.
func NewRandomMap(maxRooms int, roomMinSize, roomMaxSize, mapWidth, mapHeight int, glyphTexture *assets.GlyphTexture) GameMap {
var gameMap GameMap
gameMap.Tiles = make(map[int]map[int]Tile)
for y := int(0); y < mapHeight; y++ {
if _, ok := gameMap.Tiles[y]; !ok {
gameMap.Tiles[int(y)] = make(map[int]Tile)
}
for x := int(0); x < mapWidth; x++ {
foregroundColor := foregroundColorWallVisible
gameMap.Tiles[int(y)][int(x)] = Tile{Char: "#", Opaque: true, Blocking: true, ForegroundColor: foregroundColor}
}
}
var rooms []rect
Loop:
for i := 0; i < maxRooms; i++ {
w := rand.Intn(int(roomMaxSize)+1) + int(roomMinSize) + 1
h := rand.Intn(int(roomMaxSize)+1) + int(roomMinSize) + 1
x := int(rand.Intn(int(mapWidth) - w))
y := int(rand.Intn(int(mapHeight) - h))
newRoom := newRect(int(x), int(y), w, h)
for _, otherRoom := range rooms {
if newRoom.intersect(otherRoom) {
continue Loop
}
}
createRoom(&gameMap, newRoom)
newX, newY := newRoom.center()
if i == 0 {
gameMap.SpawnPoint = utils.Vec2{X: int32(newX), Y: int32(newY)}
} else {
prevX, prevY := rooms[len(rooms)-1].center()
if rand.Intn(2) == 0 {
createHTunnel(&gameMap, int(prevX), int(newX), int(prevY))
createVTunnel(&gameMap, int(prevY), int(newY), int(newX))
} else {
createVTunnel(&gameMap, int(prevY), int(newY), int(prevX))
createHTunnel(&gameMap, int(prevX), int(newX), int(newY))
}
}
rooms = append(rooms, newRoom)
}
mapChangeX, mapChangeY := rooms[len(rooms)-1].center()
gameMap.MapChangePoint = utils.Vec2{X: int32(mapChangeX), Y: int32(mapChangeY)}
gameMap.Tiles[int(mapChangeY)][int(mapChangeX)] = Tile{Char: "+",
Opaque: false,
Blocking: false,
ForegroundColor: utils.ColorRGBA{R: 255, G: 255, B: 0, A: 255},
}
gameMap.T = glyphTexture
gameMap.notSeenGlyph, _ = gameMap.T.Get("#")
gameMap.notSeenGlyph.Color = utils.ColorRGB{
R: 20,
G: 20,
B: 20,
}
return gameMap
}
func createRoom(gameMap *GameMap, room rect) {
for y := room.y1 + 1; y < room.y2; y++ {
if _, ok := gameMap.Tiles[int(y)]; !ok {
gameMap.Tiles[int(y)] = make(map[int]Tile)
}
for x := room.x1 + 1; x < room.x2; x++ {
foregroundColor := foregroundColorEmptyDot
gameMap.Tiles[int(y)][int(x)] = Tile{Char: "·", Opaque: false, Blocking: false, ForegroundColor: foregroundColor}
}
}
}
func createHTunnel(gameMap *GameMap, x1, x2, y int) {
for x := utils.MinInt(x1, x2); x < utils.MaxInt(x1, x2)+1; x++ {
foregroundColor := foregroundColorEmptyDot
gameMap.Tiles[y][x] = Tile{Char: "·", Opaque: false, Blocking: false, ForegroundColor: foregroundColor}
}
}
func createVTunnel(gameMap *GameMap, y1, y2, x int) {
for y := utils.MinInt(y1, y2); y < utils.MaxInt(y1, y2)+1; y++ {
foregroundColor := foregroundColorEmptyDot
gameMap.Tiles[y][x] = Tile{Char: "·", Opaque: false, Blocking: false, ForegroundColor: foregroundColor}
}
} | gamemap/randommap.go | 0.617859 | 0.402451 | randommap.go | starcoder |
package bitonic
import (
"sync"
)
// SortOrder represents sort order.
type SortOrder bool
const (
// Ascending represents ascending order.
Ascending SortOrder = true
// Descending represents descending order.
Descending SortOrder = false
)
// SortInts sorts `x` by `ord` order (concurrent).
// `len(x)` must be a power of 2.
func SortInts(x []int, ord SortOrder) {
bitonicSort(x, ord, len(x))
}
// SortInts1 sorts `x` by `ord` order (non-concurrent).
// `len(x)` must be a power of 2.
func SortInts1(x []int, ord SortOrder) {
bitonicSort1(x, ord, len(x))
}
// Threshold is used to decide whether to run concurrently.
const Threshold = 1 << 14
func bitonicSort(x []int, ord SortOrder, ln int) {
if ln <= 1 {
return
}
mid := ln >> 1
if mid >= Threshold {
var wg sync.WaitGroup
wg.Add(2)
go func() {
bitonicSort(x[:mid], true, mid)
wg.Done()
}()
go func() {
bitonicSort(x[mid:], false, mid)
wg.Done()
}()
wg.Wait()
} else {
bitonicSort(x[:mid], true, mid)
bitonicSort(x[mid:], false, mid)
}
bitonicMerge(x, ord, ln)
}
func bitonicMerge(x []int, ord SortOrder, ln int) {
if ln <= 1 {
return
}
compareAndSwap(x, ord, ln)
mid := ln >> 1
if mid >= Threshold {
var wg sync.WaitGroup
wg.Add(2)
go func() {
bitonicMerge(x[:mid], ord, mid)
wg.Done()
}()
go func() {
bitonicMerge(x[mid:], ord, mid)
wg.Done()
}()
wg.Wait()
} else {
bitonicMerge(x[:mid], ord, mid)
bitonicMerge(x[mid:], ord, mid)
}
}
func bitonicSort1(x []int, ord SortOrder, ln int) {
if ln <= 1 {
return
}
mid := ln >> 1
bitonicSort1(x[:mid], true, mid)
bitonicSort1(x[mid:], false, mid)
bitonicMerge1(x, ord, ln)
}
func bitonicMerge1(x []int, ord SortOrder, ln int) {
if ln <= 1 {
return
}
compareAndSwap(x, ord, ln)
mid := ln >> 1
bitonicMerge1(x[:mid], ord, mid)
bitonicMerge1(x[mid:], ord, mid)
}
func compareAndSwap(x []int, ord SortOrder, ln int) {
mid := ln >> 1
for i := 0; i < mid; i++ {
peer := mid ^ i
if (x[i] > x[peer]) == ord {
x[i] = x[i] ^ x[peer]
x[peer] = x[i] ^ x[peer]
x[i] = x[i] ^ x[peer]
}
}
} | sort.go | 0.666497 | 0.502747 | sort.go | starcoder |
package lib
import (
"image"
lib_image "github.com/mchapman87501/go_mars_2020_img_utils/lib/image"
)
// I *think* this is a more traditional exposure matcher than that of
// Compositor.matchColors. The latter needs to exactly match subimages
// of the same scene. This just tries to make the exposure (CIE Lab "L")
// histograms of two images look the same.
const numBuckets = int(10001)
type ImageExposure struct {
BinMinVal float64 // The L value corresponding to the first bin.
BinScale float64 // Multiplier maps Lab L range to bucket index range
Histogram []int // Histogram of L values.
CDF []float64 // Cumulative density function
}
func NewImageExposure(image *lib_image.CIELab) *ImageExposure {
result := &ImageExposure{
BinMinVal: 0.0,
BinScale: 1.0,
Histogram: make([]int, numBuckets, numBuckets+1),
CDF: make([]float64, numBuckets, numBuckets+1),
}
// Input image may have an invalid Lab L range.
// This can happen when building a composite image out of tiles that
// have widely varying exposure ranges.
// Map the actual range onto 0...lhBuckets-1
min := image.Bounds().Min
max := image.Bounds().Max
first := true
labLMin := 0.0
labLMax := 100.0
for x := min.X; x < max.X; x++ {
for y := min.Y; y < max.Y; y++ {
lab := image.CIELabAt(x, y)
if first || lab.L < labLMin {
labLMin = lab.L
}
if first || lab.L > labLMax {
labLMax = lab.L
}
first = false
}
}
scale := float64(numBuckets-1) / (labLMax - labLMin)
result.BinMinVal = labLMin
result.BinScale = scale
for x := min.X; x < max.X; x++ {
for y := min.Y; y < max.Y; y++ {
lab := image.CIELabAt(x, y)
index := int((lab.L - labLMin) * scale)
result.Histogram[index] += 1
}
}
// Compute the PDFs
numPixels := image.Bounds().Dx() * image.Bounds().Dy()
if numPixels > 0 {
for i := 0; i < numBuckets; i++ {
pdf := float64(result.Histogram[i]) / float64(numPixels)
prevCDF := 0.0
if i > 0 {
prevCDF = result.CDF[i-1]
}
result.CDF[i] = prevCDF + pdf
}
}
return result
}
// Find the cumulative density bin that covers at least fract
// of all image pixels. I.e., find the Lab L value such that the given
// fraction of pixelx are no brighter than that value.
func (e ImageExposure) cdf(fract float64) float64 {
for i, cdf := range e.CDF {
if cdf >= fract {
return e.labL(i)
}
}
// Should not get here, unless there is a fract domain error.
if fract <= 0.0 {
return 0
}
// Assume fract >= 1
return e.labL(numBuckets - 1)
}
func (e ImageExposure) labL(binIndex int) float64 {
return float64(binIndex)/e.BinScale + e.BinMinVal
}
// Get a copy of a ref image whose exposure is matched to that of a target image.
func MatchExposure(ref image.Image, target image.Image) image.Image {
refLab := lib_image.CIELabFromImage(ref)
refExposure := NewImageExposure(refLab)
targetExposure := NewImageExposure(lib_image.CIELabFromImage(target))
// map CIE Lab L range, 0...100, onto 0...lhBuckets-1
scale := float64(numBuckets-1) / 100.0
// Build a mapping from refExposure to targetExposure.
labLMap := make(map[int]float64)
iTarg := 0
for iRef := 0; iRef < numBuckets; iRef++ {
refCDF := refExposure.CDF[iRef]
for (iTarg < numBuckets-1) && (targetExposure.CDF[iTarg] < refCDF) {
iTarg += 1
}
labLMap[iRef] = float64(iTarg) / scale
}
result := lib_image.NewCIELab(ref.Bounds())
min := result.Bounds().Min
max := result.Bounds().Max
for x := min.X; x < max.X; x++ {
for y := min.Y; y < max.Y; y++ {
src := refLab.CIELabAt(x, y)
key := int(src.L * scale)
src.L = labLMap[key]
result.SetCIELab(x, y, src)
}
}
return result
} | lib/image_exposure.go | 0.672439 | 0.568416 | image_exposure.go | starcoder |
package mouse
import (
"github.com/go-vgo/robotgo"
"github.com/haroflow/go-macros/automation"
)
func Commands() []automation.Command {
moduleName := "mouse"
return []automation.Command{
{
ModuleName: moduleName,
MethodName: "move",
Parameters: "x: int, y: int",
Description: "Moves the mouse cursor to a point on the screen in one step.",
Action: Move,
},
{
ModuleName: moduleName,
MethodName: "moveSmooth",
Parameters: "x: int, y: int",
Description: "Moves the mouse cursor to a point on the screen smoothly.",
Action: MoveSmooth,
},
{
ModuleName: moduleName,
MethodName: "moveRelative",
Parameters: "x: int, y: int",
Description: "Moves the mouse cursor to a point relative to the current mouse position in one step.",
Action: MoveRelative,
},
{
ModuleName: moduleName,
MethodName: "click",
Parameters: "",
Description: "Triggers a left click.",
Action: Click,
},
{
ModuleName: moduleName,
MethodName: "rightClick",
Parameters: "",
Description: "Triggers a right click.",
Action: RightClick,
},
{
ModuleName: moduleName,
MethodName: "doubleClick",
Parameters: "",
Description: "Triggers a left double-click.",
Action: DoubleClick,
},
{
ModuleName: moduleName,
MethodName: "drag",
Parameters: "x: int, y: int",
Description: "Press the left mouse button on the current position and drag to another position on screen.",
Action: Drag,
},
{
ModuleName: moduleName,
MethodName: "dragRelative",
Parameters: "x: int, y: int",
Description: "Press the left mouse button on the current position and drag to another position on screen relative to the current position.",
Action: DragRelative,
},
{
ModuleName: moduleName,
MethodName: "getX",
Parameters: "",
Description: "Returns the current mouse X position.",
Action: GetX,
},
{
ModuleName: moduleName,
MethodName: "getY",
Parameters: "",
Description: "Returns the current mouse Y position.",
Action: GetY,
},
}
}
func Move(x, y int) {
robotgo.MoveMouse(x, y)
}
func MoveRelative(x, y int) {
robotgo.MoveRelative(x, y)
}
func MoveSmooth(x, y int) {
robotgo.MoveMouseSmooth(x, y)
}
func Click() {
robotgo.Click("left", false)
}
func DoubleClick() {
robotgo.Click("left", true)
}
func RightClick() {
robotgo.Click("right", false)
}
func GetX() int {
x, _ := robotgo.GetMousePos()
return x
}
func GetY() int {
_, y := robotgo.GetMousePos()
return y
}
func Position() (x, y int) {
return robotgo.GetMousePos()
}
func Drag(x, y int) {
robotgo.DragSmooth(x, y)
}
func DragRelative(x, y int) {
dx := GetX() + x
dy := GetY() + y
robotgo.DragSmooth(dx, dy)
} | automation/mouse/mouse.go | 0.610453 | 0.499329 | mouse.go | starcoder |
package hyper
// Hypercube is represented by a slice of its coordinates.
type Cube []int
type Cubes []Cube
// Parameters of space discretization.
type Params struct {
// Value limits per dimension. For example 0, 255 for pixel values.
Min, Max float64
// Uncertainty interval expressed as a fraction of bucketWidth
// (for example 0.25 for eps = 1/4 of bucketWidth).
EpsPercent float64
// Number of buckets per dimension.
NumBuckets int
}
// CubeSet returns a set of hypercubes, which represent
// fuzzy discretization of one n-dimensional vector,
// as described in
// https://vitali-fedulov.github.io/algorithm-for-hashing-high-dimensional-float-vectors.html
// One hupercube is defined by bucket numbers in each dimension.
// min and max are minimum and maximum possible values of
// the vector components. The assumption is that min and max
// are the same for all dimensions.
func CubeSet(vector []float64, params Params) (set Cubes) {
if params.EpsPercent >= 0.5 {
panic(`Error: EpsPercent must be less than 0.5.`)
}
var (
bC int // Central bucket number.
bL, bR int // Left and right bucket number.
setL, setR Cubes // Set clones (for Left and Right).
branching bool // Branching flag.
)
// Rescaling vector to avoid potential mistakes with
// divisions and offsets later on.
rescaled := rescale(vector, params)
// After the rescale value range of the vector are
// [0, numBuckets], and not [min, max].
// min = 0.0 from now on.
max := float64(params.NumBuckets)
for _, val := range rescaled {
branching = false
bL = int(val - params.EpsPercent)
bR = int(val + params.EpsPercent)
// Get extreme values out of the way.
if val-params.EpsPercent <= 0.0 { // This means that val >= 0.
bC = bR
goto branchingCheck // No branching.
}
// Get extreme values out of the way.
if val+params.EpsPercent >= max { // This means that val =< max.
// Above max = numBuckets.
bC = bL
goto branchingCheck // No branching.
}
if bL == bR {
bC = bL
goto branchingCheck // No branching.
} else { // Meaning bL != bR and not any condition above.
branching = true
}
branchingCheck:
if branching {
setL = clone(set)
setR = clone(set)
if len(setL) == 0 {
setL = append(setL, []int{bL})
} else {
for i := range setL {
setL[i] = append(setL[i], bL)
}
}
if len(setR) == 0 {
setR = append(setR, []int{bR})
} else {
for i := range setR {
setR[i] = append(setR[i], bR)
}
}
set = append(setL, setR...)
} else { // No branching.
if len(set) == 0 {
set = append(set, []int{bC})
} else {
for i := range set {
set[i] = append(set[i], bC)
}
}
}
}
// Real use case verification that branching works correctly
// and no buckets are lost for a very large number of vectors.
// TODO: Remove once tested.
for i := 0; i < len(set); i++ {
if len(set[i]) != len(vector) {
panic(`Number of hypercube coordinates must equal
to len(vector).`)
}
}
return set
}
// CentralCube returns the hypercube containing the vector end.
// Arguments are the same as for the CubeSet function.
func CentralCube(vector []float64, params Params) (central Cube) {
if params.EpsPercent >= 0.5 {
panic(`Error: EpsPercent must be less than 0.5.`)
}
var bC int // Central bucket numbers.
// Rescaling vector to avoid potential mistakes with
// divisions and offsets later on.
rescaled := rescale(vector, params)
// After the rescale value range of the vector are
// [0, numBuckets], and not [min, max].
// min = 0.0 from now on.
max := float64(params.NumBuckets)
for _, val := range rescaled {
bC = int(val)
if val-params.EpsPercent <= 0.0 { // This means that val >= 0.
bC = int(val + params.EpsPercent)
}
if val+params.EpsPercent >= max { // Meaning val =< max.
bC = int(val - params.EpsPercent)
}
central = append(central, bC)
}
return central
}
// rescale is a helper function to offset and rescale all values
// to [0, numBuckets] range.
func rescale(vector []float64, params Params) []float64 {
rescaled := make([]float64, len(vector))
amp := params.Max - params.Min
for i := range vector {
// Offset to zero and rescale to [0, numBuckets] range.
rescaled[i] =
(vector[i] - params.Min) * float64(params.NumBuckets) / amp
}
return rescaled
}
// clone makes an unlinked copy of a 2D slice.
func clone(src Cubes) (dst Cubes) {
dst = make(Cubes, len(src))
for i := range src {
dst[i] = append(Cube{}, src[i]...)
}
return dst
} | cubes.go | 0.713032 | 0.592608 | cubes.go | starcoder |
package schedule
// There are a total of n courses you have to take, labeled from 0 to n-1.
// Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
// Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
// Example 1:
// Input: 2, [[1,0]]
// Output: true
// Explanation: There are a total of 2 courses to take.
// To take course 1 you should have finished course 0. So it is possible.
// Example 2:
// Input: 2, [[1,0],[0,1]]
// Output: false
// Explanation: There are a total of 2 courses to take.
// To take course 1 you should have finished course 0, and to take course 0 you should
// also have finished course 1. So it is impossible.
// Note:
// The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
// You may assume that there are no duplicate edges in the input prerequisites.
/*
Design:
This looks like a cycle in this directed graph would make every class
in the cycle and every class which has a prerequisite class in the a cycle impossible to take.
make a list of each class
Find each cycle and it's start point and add each of them to
a map of impossible nodes.
remove each impossible node from the class list
for every class in the class list, look at it's prereqs and if any are in the impossible map, eliminate it too.
compare class list length to number of required classes
Is it helpful to do a union find to group classes that are in the same graph? I don't think so.
If I iterate through the list of edges and...
with an edge's left, iterate through the rest of the edges looking for it's prereqs. * no, translate to a list of classes *
sort the list, by l then r. cause it is free and might help.
**
Where to start?
start at each root.
a root is a class with no prerequisite.
DFS from each root through the classes required by it.
record if a root has been explored, so when searching from another root, not to repeat
mark a node if it is impossible to take.
****
how to find each root?
make a map of classes to the list of immediate prerequisites. and then of those, find those which have none.
how to find which classes are required by it?
make a map of classes the ones which immediately require it.
Do I add the list as children of the node? This may be better to traverse. since you don't need to refer to the map. I'll start with out that.
how do I get a list of classes?
iterate over each node and add each left an right to a set.
how to count the possible nodes?
iterate over each node and increment count if it is not marked as impossible.
****
design of the node struct
{
Id int
Possible bool // instead, it is possible by being in the order at the end
Explored bool // maybe instead of this, remove it from the graph to the order queue
Children/Required by
}
****
TIL about adjacency list representation of a graph which is good for exploring sparse graphs.
TIL about topological sort
Design using a topological sort
build the a graph then remove them to a topological sort
when processNext is empty, compare the number of nodes to the number of courses needed
*** I misunderstood: there are n courses, the prerequisites represent the ones of those which have prerequisites, not all the courses.
so if the topological sort length doesn't match the length course list,
then some courses cannot be taken and return false
*/
func canFinish(numCourses int, prerequisites [][]int) bool {
order := make([]*course, 0)
processNext := make([]*course, 0)
courseList := makeUnconnectedCourses(prerequisites)
connectCourses(courseList, prerequisites)
// prime process Next with all courses which have none other required.
for _, c := range courseList {
if c.RequiredCount == 0 {
processNext = append(processNext, c)
}
}
for len(processNext) > 0 {
n := processNext[0]
processNext = processNext[1:]
for _, x := range n.RequiredBy {
x.RequiredCount--
if x.RequiredCount == 0 {
processNext = append(processNext, x)
}
}
order = append(order, n)
}
return len(order) == len(courseList)
}
func makeUnconnectedCourses(prerequisites [][]int) map[int]*course {
courseList := make(map[int]*course, 0)
for i := 0; i < len(prerequisites); i++ {
nv := prerequisites[i][0]
if _, ok := courseList[nv]; !ok {
courseList[nv] = &course{ID: nv}
}
v := prerequisites[i][1]
if _, ok := courseList[v]; !ok {
courseList[v] = &course{ID: v}
}
}
return courseList
}
func connectCourses(courseList map[int]*course, prerequisites [][]int) {
for i := 0; i < len(prerequisites); i++ {
l := prerequisites[i][0]
r := prerequisites[i][1]
cl := courseList[l]
cr := courseList[r]
// add cr to cl.RequiredBy
// increment cr.RequiredCount
cl.RequiredBy = append(cl.RequiredBy, cr)
cr.RequiredCount++
}
}
type course struct {
ID int
//outbound edges
RequiredBy []*course
// inbound count
RequiredCount int
} | graphs/course/schedule/schedule.go | 0.797202 | 0.814938 | schedule.go | starcoder |
package common
import (
"log"
"engo.io/engo"
"engo.io/gl"
)
// Spritesheet is a class that stores a set of tiles from a file, used by tilemaps and animations
type BasicSpritesheet struct {
texture *gl.Texture // The original texture
width, height float32 // The dimensions of the total texture
cellWidth, cellHeight int // The dimensions of the cells
cache map[int]Texture // The cell cache cells
}
func NewBasicSpritesheetFromTexture(tr *TextureResource, cellWidth, cellHeight int) *BasicSpritesheet {
return &BasicSpritesheet{texture: tr.Texture,
width: tr.Width, height: tr.Height,
cellWidth: cellWidth, cellHeight: cellHeight,
cache: make(map[int]Texture),
}
}
// NewBasicSpritesheetFromFile is a simple handler for creating a new spritesheet from a file
// textureName is the name of a texture already preloaded with engo.Files.Add
func NewBasicSpritesheetFromFile(textureName string, cellWidth, cellHeight int) *BasicSpritesheet {
res, err := engo.Files.Resource(textureName)
if err != nil {
log.Println("[WARNING] [NewBasicSpritesheetFromFile]: Received error:", err)
return nil
}
img, ok := res.(TextureResource)
if !ok {
log.Println("[WARNING] [NewBasicSpritesheetFromFile]: Resource not of type `TextureResource`:", textureName)
return nil
}
return NewBasicSpritesheetFromTexture(&img, cellWidth, cellHeight)
}
// Cell gets the region at the index i, updates and pulls from cache if need be
func (s *BasicSpritesheet) Cell(index int) Texture {
if r, ok := s.cache[index]; ok {
return r
}
cellsPerRow := int(s.Width())
var x float32 = float32((index % cellsPerRow) * s.cellWidth)
var y float32 = float32((index / cellsPerRow) * s.cellHeight)
s.cache[index] = Texture{id: s.texture, width: float32(s.cellWidth), height: float32(s.cellHeight), viewport: engo.AABB{
engo.Point{x / s.width, y / s.height},
engo.Point{(x + float32(s.cellWidth)) / s.width, (y + float32(s.cellHeight)) / s.height},
}}
return s.cache[index]
}
func (s *BasicSpritesheet) Drawable(index int) Drawable {
return s.Cell(index)
}
func (s *BasicSpritesheet) Drawables() []Drawable {
drawables := make([]Drawable, s.CellCount())
for i := 0; i < s.CellCount(); i++ {
drawables[i] = s.Drawable(i)
}
return drawables
}
func (s *BasicSpritesheet) CellCount() int {
return int(s.Width()) * int(s.Height())
}
func (s *BasicSpritesheet) Cells() []Texture {
cellsNo := s.CellCount()
cells := make([]Texture, cellsNo)
for i := 0; i < cellsNo; i++ {
cells[i] = s.Cell(i)
}
return cells
}
// Width is the amount of tiles on the x-axis of the spritesheet
func (s BasicSpritesheet) Width() float32 {
return s.width / float32(s.cellWidth)
}
// Height is the amount of tiles on the y-axis of the spritesheet
func (s BasicSpritesheet) Height() float32 {
return s.height / float32(s.cellHeight)
}
/*
type Sprite struct {
Position *Point
Scale *Point
Anchor *Point
Rotation float32
Color color.Color
Alpha float32
Region *Region
}
func NewSprite(region *Region, x, y float32) *Sprite {
return &Sprite{
Position: &Point{x, y},
Scale: &Point{1, 1},
Anchor: &Point{0, 0},
Rotation: 0,
Color: color.White,
Alpha: 1,
Region: region,
}
}
*/ | common/basic_spritesheet.go | 0.70416 | 0.574813 | basic_spritesheet.go | starcoder |
package inflect
import (
"fmt"
"regexp"
"strings"
)
func Pluralize(str string) string {
if inflector, ok := Languages[Language]; ok {
return inflector.Pluralize(str)
}
return str
}
func Singularize(str string) string {
if inflector, ok := Languages[Language]; ok {
return inflector.Singularize(str)
}
return str
}
func FromNumber(str string, n int) string {
switch n {
case 1:
return Singularize(str)
default:
return Pluralize(str)
}
}
// Split's a string so that it can be converted to a different casing.
// Splits on underscores, hyphens, spaces and camel casing.
func split(str string) []string {
// FIXME: This isn't a perfect solution.
// ex. WEiRD CaSINg (Support for 13 year old developers)
return strings.Split(regexp.MustCompile(`-|_|([a-z])([A-Z])`).ReplaceAllString(strings.Trim(str, `-|_| `), `$1 $2`), ` `)
}
// UpperCamelCase converts a string to it's upper camel case version.
func UpperCamelCase(str string) string {
pieces := split(str)
for index, s := range pieces {
pieces[index] = fmt.Sprintf(`%v%v`, strings.ToUpper(string(s[0])), strings.ToLower(s[1:]))
}
return strings.Join(pieces, ``)
}
// LowerCamelCase converts a string to it's lower camel case version.
func LowerCamelCase(str string) string {
pieces := split(str)
pieces[0] = strings.ToLower(pieces[0])
for i := 1; i < len(pieces); i++ {
pieces[i] = fmt.Sprintf(`%v%v`, strings.ToUpper(string(pieces[i][0])), strings.ToLower(pieces[i][1:]))
}
return strings.Join(pieces, ``)
}
// Underscore converts a string to it's underscored version.
func Underscore(str string) string {
pieces := split(str)
for index, piece := range pieces {
pieces[index] = strings.ToLower(piece)
}
return strings.Join(pieces, `_`)
}
// Hyphenate converts a string to it's hyphenated version.
func Hyphenate(str string) string {
pieces := split(str)
for index, piece := range pieces {
pieces[index] = strings.ToLower(piece)
}
return strings.Join(pieces, `-`)
}
// Constantize converts a string to it's constantized version.
func Constantize(str string) string {
pieces := split(str)
for index, piece := range pieces {
pieces[index] = strings.ToUpper(piece)
}
return strings.Join(pieces, `_`)
}
// Humanize converts a string to it's humanized version.
func Humanize(str string) string {
pieces := split(str)
pieces[0] = fmt.Sprintf(`%v%v`, strings.ToUpper(string(pieces[0][0])), strings.ToLower(pieces[0][1:]))
for i := 1; i < len(pieces); i++ {
pieces[i] = fmt.Sprintf(`%v`, strings.ToLower(pieces[i]))
}
return strings.Join(pieces, ` `)
}
// Titleize converts a string to it's titleized version.
func Titleize(str string) string {
pieces := split(str)
for i := 0; i < len(pieces); i++ {
pieces[i] = fmt.Sprintf(`%v%v`, strings.ToUpper(string(pieces[i][0])), strings.ToLower(pieces[i][1:]))
}
return strings.Join(pieces, ` `)
} | go/src/github.com/chuckpreslar/inflect/inflect.go | 0.634317 | 0.443179 | inflect.go | starcoder |
package fixtures
import (
"fmt"
)
// argument encodes an argument as per the go text/template library.
// Only scalar types are supported.
// https://golang.org/pkg/text/template/#hdr-Arguments
func argument(value interface{}) string {
switch t := value.(type) {
case string:
return fmt.Sprintf(`"%s"`, t)
case int:
return fmt.Sprintf(`%d`, t)
case bool:
if t {
return `true`
}
return `false`
case nil:
return `nil`
}
return ``
}
// Pipeline represents a pipeline of of go template functions.
// https://golang.org/pkg/text/template/#hdr-Pipelines
type Pipeline string
// NewPipeline creates a new pipeline from a function. While a pipeline can
// start with an argument, all our data accessors are through functions so
// this is the common path.
func NewPipeline(fn Function) Pipeline {
return Pipeline(fn)
}
// NewRegistryPipeline creates a pipeline initialized with a registry lookup
// function.
func NewRegistryPipeline(arg interface{}) Pipeline {
return NewPipeline(Registry(arg))
}
// NewParameterPipeline creates a pipeline initialized with a parameter lookup
// function.
func NewParameterPipeline(arg interface{}) Pipeline {
return NewPipeline(Parameter(arg))
}
// NewGeneratePasswordPipeline creates a pipeline initialized with a generate
// password function.
func NewGeneratePasswordPipeline(length, dictionary interface{}) Pipeline {
return NewPipeline(GeneratePassword(length, dictionary))
}
// NewGeneratePrivateKeyPipeline creates a pipeline initialized with a generate
// private key function.
func NewGeneratePrivateKeyPipeline(typ, encoding, bits interface{}) Pipeline {
return NewPipeline(GeneratePrivateKey(typ, encoding, bits))
}
// NewGenerateCertificatePipeline creates a pipeline initialized with a generate
// certificate function.
func NewGenerateCertificatePipeline(key, cn, lifetime, usage, sans, caKey, caCert interface{}) Pipeline {
return NewPipeline(GenerateCertificate(key, cn, lifetime, usage, sans, caKey, caCert))
}
// With appends a function to a pipeline.
func (p Pipeline) With(fn Function) Pipeline {
if p == "" {
return Pipeline(fn)
}
return Pipeline(string(p) + " | " + string(fn))
}
// WithDefault appends a defaulting function to a pipeline.
func (p Pipeline) WithDefault(arg interface{}) Pipeline {
return p.With(Default(arg))
}
// Required appends a function to a pipeline that sets a default if the
// input is nil.
func (p Pipeline) Required() Pipeline {
return p.With(Required())
}
// Function represents a named function that accepts an arbitrary number
// of arguments.
// https://golang.org/pkg/text/template/#hdr-Functions
type Function string
// NewFunction creates a new named function.
func NewFunction(fn string, args ...interface{}) Function {
expression := fn
for _, arg := range args {
switch t := arg.(type) {
case Function:
expression = fmt.Sprintf("%s (%s)", expression, string(t))
case Pipeline:
expression = fmt.Sprintf("%s (%s)", expression, string(t))
case string, int, bool, nil:
expression = fmt.Sprintf("%s %s", expression, argument(t))
}
}
return Function(expression)
}
// Registry returns a function that looks up a registry entry.
func Registry(arg interface{}) Function {
return NewFunction("registry", arg)
}
// Parameter returns a function that looks up a parameter path.
func Parameter(arg interface{}) Function {
return NewFunction("parameter", arg)
}
// GeneratePassword returns a function that generates a random password string.
func GeneratePassword(length, dictionary interface{}) Function {
return NewFunction("generatePassword", length, dictionary)
}
// GeneratePrivateKey returns a function that generates a private key.
func GeneratePrivateKey(typ, encoding, bits interface{}) Function {
return NewFunction("generatePrivateKey", typ, encoding, bits)
}
// GenerateCertificate returns a function that generates a certificate.
func GenerateCertificate(key, cn, lifetime, usage, sans, caKey, caCert interface{}) Function {
return NewFunction("generateCertificate", key, cn, lifetime, usage, sans, caKey, caCert)
}
// Default generates a function that returns a default if the input it nil.
func Default(arg interface{}) Function {
return NewFunction("default", arg)
}
// Required returns a function that raises an error if the input is nil.
func Required() Function {
return NewFunction(`required`)
} | test/unit/fixtures/template.go | 0.867106 | 0.495789 | template.go | starcoder |
package c
import (
. "github.com/maxinehazel/chroma" // nolint
"github.com/maxinehazel/chroma/lexers/internal"
)
// Coq lexer.
var Coq = internal.Register(MustNewLexer(
&Config{
Name: "Coq",
Aliases: []string{"coq"},
Filenames: []string{"*.v"},
MimeTypes: []string{"text/x-coq"},
},
Rules{
"root": {
{`\s+`, Text, nil},
{`false|true|\(\)|\[\]`, NameBuiltinPseudo, nil},
{`\(\*`, Comment, Push("comment")},
{Words(`\b`, `\b`, `Section`, `Module`, `End`, `Require`, `Import`, `Export`, `Variable`, `Variables`, `Parameter`, `Parameters`, `Axiom`, `Hypothesis`, `Hypotheses`, `Notation`, `Local`, `Tactic`, `Reserved`, `Scope`, `Open`, `Close`, `Bind`, `Delimit`, `Definition`, `Let`, `Ltac`, `Fixpoint`, `CoFixpoint`, `Morphism`, `Relation`, `Implicit`, `Arguments`, `Set`, `Unset`, `Contextual`, `Strict`, `Prenex`, `Implicits`, `Inductive`, `CoInductive`, `Record`, `Structure`, `Canonical`, `Coercion`, `Theorem`, `Lemma`, `Corollary`, `Proposition`, `Fact`, `Remark`, `Example`, `Proof`, `Goal`, `Save`, `Qed`, `Defined`, `Hint`, `Resolve`, `Rewrite`, `View`, `Search`, `Show`, `Print`, `Printing`, `All`, `Graph`, `Projections`, `inside`, `outside`, `Check`, `Global`, `Instance`, `Class`, `Existing`, `Universe`, `Polymorphic`, `Monomorphic`, `Context`), KeywordNamespace, nil},
{Words(`\b`, `\b`, `forall`, `exists`, `exists2`, `fun`, `fix`, `cofix`, `struct`, `match`, `end`, `in`, `return`, `let`, `if`, `is`, `then`, `else`, `for`, `of`, `nosimpl`, `with`, `as`), Keyword, nil},
{Words(`\b`, `\b`, `Type`, `Prop`), KeywordType, nil},
{Words(`\b`, `\b`, `pose`, `set`, `move`, `case`, `elim`, `apply`, `clear`, `hnf`, `intro`, `intros`, `generalize`, `rename`, `pattern`, `after`, `destruct`, `induction`, `using`, `refine`, `inversion`, `injection`, `rewrite`, `congr`, `unlock`, `compute`, `ring`, `field`, `replace`, `fold`, `unfold`, `change`, `cutrewrite`, `simpl`, `have`, `suff`, `wlog`, `suffices`, `without`, `loss`, `nat_norm`, `assert`, `cut`, `trivial`, `revert`, `bool_congr`, `nat_congr`, `symmetry`, `transitivity`, `auto`, `split`, `left`, `right`, `autorewrite`, `tauto`, `setoid_rewrite`, `intuition`, `eauto`, `eapply`, `econstructor`, `etransitivity`, `constructor`, `erewrite`, `red`, `cbv`, `lazy`, `vm_compute`, `native_compute`, `subst`), Keyword, nil},
{Words(`\b`, `\b`, `by`, `done`, `exact`, `reflexivity`, `tauto`, `romega`, `omega`, `assumption`, `solve`, `contradiction`, `discriminate`, `congruence`), KeywordPseudo, nil},
{Words(`\b`, `\b`, `do`, `last`, `first`, `try`, `idtac`, `repeat`), KeywordReserved, nil},
{`\b([A-Z][\w\']*)`, Name, nil},
{"(\u03bb|\u03a0|\\|\\}|\\{\\||\\\\/|/\\\\|=>|~|\\}|\\|]|\\||\\{<|\\{|`|_|]|\\[\\||\\[>|\\[<|\\[|\\?\\?|\\?|>\\}|>]|>|=|<->|<-|<|;;|;|:>|:=|::|:|\\.\\.|\\.|->|-\\.|-|,|\\+|\\*|\\)|\\(|&&|&|#|!=)", Operator, nil},
{`([=<>@^|&+\*/$%-]|[!?~])?[!$%&*+\./:<=>?@^|~-]`, Operator, nil},
{`\b(unit|nat|bool|string|ascii|list)\b`, KeywordType, nil},
{`[^\W\d][\w']*`, Name, nil},
{`\d[\d_]*`, LiteralNumberInteger, nil},
{`0[xX][\da-fA-F][\da-fA-F_]*`, LiteralNumberHex, nil},
{`0[oO][0-7][0-7_]*`, LiteralNumberOct, nil},
{`0[bB][01][01_]*`, LiteralNumberBin, nil},
{`-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)`, LiteralNumberFloat, nil},
{`'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'`, LiteralStringChar, nil},
{`'.'`, LiteralStringChar, nil},
{`'`, Keyword, nil},
{`"`, LiteralStringDouble, Push("string")},
{`[~?][a-z][\w\']*:`, Name, nil},
},
"comment": {
{`[^(*)]+`, Comment, nil},
{`\(\*`, Comment, Push()},
{`\*\)`, Comment, Pop(1)},
{`[(*)]`, Comment, nil},
},
"string": {
{`[^"]+`, LiteralStringDouble, nil},
{`""`, LiteralStringDouble, nil},
{`"`, LiteralStringDouble, Pop(1)},
},
"dotted": {
{`\s+`, Text, nil},
{`\.`, Punctuation, nil},
{`[A-Z][\w\']*(?=\s*\.)`, NameNamespace, nil},
{`[A-Z][\w\']*`, NameClass, Pop(1)},
{`[a-z][a-z0-9_\']*`, Name, Pop(1)},
Default(Pop(1)),
},
},
)) | lexers/c/coq.go | 0.537527 | 0.646418 | coq.go | starcoder |
package state
import (
"fmt"
"sort"
"strings"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/dapr/components-contrib/state"
"github.com/dapr/components-contrib/tests/conformance/utils"
)
type ValueType struct {
Message string `json:"message"`
}
type scenario struct {
key string
value interface{}
expectedReadResponse []byte
toBeDeleted bool
bulkOnly bool
transactionOnly bool
transactionGroup int
}
type TestConfig struct {
utils.CommonConfig
}
func NewTestConfig(component string, allOperations bool, operations []string, conf map[string]interface{}) TestConfig {
tc := TestConfig{
CommonConfig: utils.CommonConfig{
ComponentType: "state",
ComponentName: component,
AllOperations: allOperations,
Operations: utils.NewStringSet(operations...),
},
}
return tc
}
// ConformanceTests runs conf tests for state store.
func ConformanceTests(t *testing.T, props map[string]string, statestore state.Store, config TestConfig) {
// Test vars
key := strings.ReplaceAll(uuid.New().String(), "-", "")
t.Logf("Base key for test: %s", key)
scenarios := []scenario{
{
key: fmt.Sprintf("%s-int", key),
value: 123,
expectedReadResponse: []byte("123"),
},
{
key: fmt.Sprintf("%s-bool", key),
value: true,
expectedReadResponse: []byte("true"),
},
{
key: fmt.Sprintf("%s-bytes", key),
value: []byte{0x1},
expectedReadResponse: []byte{0x1},
},
{
key: fmt.Sprintf("%s-string-with-json", key),
value: "{\"a\":\"b\"}",
expectedReadResponse: []byte("\"{\\\"a\\\":\\\"b\\\"}\""),
},
{
key: fmt.Sprintf("%s-string", key),
value: "hello world",
expectedReadResponse: []byte("\"hello world\""),
},
{
key: fmt.Sprintf("%s-struct", key),
value: ValueType{Message: "test"},
expectedReadResponse: []byte("{\"message\":\"test\"}"),
},
{
key: fmt.Sprintf("%s-to-be-deleted", key),
value: "to be deleted",
expectedReadResponse: []byte("\"to be deleted\""),
toBeDeleted: true,
},
{
key: fmt.Sprintf("%s-bulk-int", key),
value: 123,
expectedReadResponse: []byte("123"),
bulkOnly: true,
},
{
key: fmt.Sprintf("%s-bulk-bool", key),
value: true,
expectedReadResponse: []byte("true"),
bulkOnly: true,
},
{
key: fmt.Sprintf("%s-bulk-bytes", key),
value: []byte{0x1},
expectedReadResponse: []byte{0x1},
bulkOnly: true,
},
{
key: fmt.Sprintf("%s-bulk-string", key),
value: "hello world",
expectedReadResponse: []byte("\"hello world\""),
bulkOnly: true,
},
{
key: fmt.Sprintf("%s-bulk-struct", key),
value: ValueType{Message: "test"},
expectedReadResponse: []byte("{\"message\":\"test\"}"),
bulkOnly: true,
},
{
key: fmt.Sprintf("%s-bulk-to-be-deleted", key),
value: "to be deleted",
expectedReadResponse: []byte("\"to be deleted\""),
toBeDeleted: true,
bulkOnly: true,
},
{
key: fmt.Sprintf("%s-bulk-to-be-deleted-too", key),
value: "to be deleted too",
expectedReadResponse: []byte("\"to be deleted too\""),
toBeDeleted: true,
bulkOnly: true,
},
{
key: fmt.Sprintf("%s-trx-int", key),
value: 123,
expectedReadResponse: []byte("123"),
transactionOnly: true,
transactionGroup: 1,
},
{
key: fmt.Sprintf("%s-trx-bool", key),
value: true,
expectedReadResponse: []byte("true"),
transactionOnly: true,
transactionGroup: 1,
},
{
key: fmt.Sprintf("%s-trx-bytes", key),
value: []byte{0x1},
expectedReadResponse: []byte{0x1},
transactionOnly: true,
transactionGroup: 1,
},
{
key: fmt.Sprintf("%s-trx-string", key),
value: "hello world",
expectedReadResponse: []byte("\"hello world\""),
transactionOnly: true,
transactionGroup: 1,
},
{
key: fmt.Sprintf("%s-trx-struct", key),
value: ValueType{Message: "test"},
expectedReadResponse: []byte("{\"message\":\"test\"}"),
transactionOnly: true,
transactionGroup: 2,
},
{
key: fmt.Sprintf("%s-trx-to-be-deleted", key),
value: "to be deleted",
expectedReadResponse: []byte("\"to be deleted\""),
toBeDeleted: true,
transactionOnly: true,
transactionGroup: 1,
},
{
key: fmt.Sprintf("%s-trx-to-be-deleted-too", key),
value: "to be deleted too",
expectedReadResponse: []byte("\"to be deleted too\""),
toBeDeleted: true,
transactionOnly: true,
transactionGroup: 3,
},
}
t.Run("init", func(t *testing.T) {
err := statestore.Init(state.Metadata{
Properties: props,
})
assert.Nil(t, err)
})
t.Run("ping", func(t *testing.T) {
err := statestore.Ping()
assert.Nil(t, err)
})
if config.HasOperation("set") {
t.Run("set", func(t *testing.T) {
for _, scenario := range scenarios {
if !scenario.bulkOnly && !scenario.transactionOnly {
t.Logf("Setting value for %s", scenario.key)
err := statestore.Set(&state.SetRequest{
Key: scenario.key,
Value: scenario.value,
})
assert.Nil(t, err)
}
}
})
}
if config.HasOperation("get") {
t.Run("get", func(t *testing.T) {
for _, scenario := range scenarios {
if !scenario.bulkOnly && !scenario.transactionOnly {
t.Logf("Checking value presence for %s", scenario.key)
res, err := statestore.Get(&state.GetRequest{
Key: scenario.key,
})
assert.Nil(t, err)
assert.Equal(t, scenario.expectedReadResponse, res.Data)
}
}
})
}
if config.HasOperation("delete") {
t.Run("delete", func(t *testing.T) {
for _, scenario := range scenarios {
if !scenario.bulkOnly && scenario.toBeDeleted {
t.Logf("Deleting %s", scenario.key)
err := statestore.Delete(&state.DeleteRequest{
Key: scenario.key,
})
assert.Nil(t, err)
t.Logf("Checking value absence for %s", scenario.key)
res, err := statestore.Get(&state.GetRequest{
Key: scenario.key,
})
assert.Nil(t, err)
assert.Nil(t, res.Data)
}
}
})
}
if config.HasOperation("bulkset") {
t.Run("bulkset", func(t *testing.T) {
var bulk []state.SetRequest
for _, scenario := range scenarios {
if scenario.bulkOnly {
t.Logf("Adding set request to bulk for %s", scenario.key)
bulk = append(bulk, state.SetRequest{
Key: scenario.key,
Value: scenario.value,
})
}
}
err := statestore.BulkSet(bulk)
assert.Nil(t, err)
for _, scenario := range scenarios {
if scenario.bulkOnly {
t.Logf("Checking value presence for %s", scenario.key)
// Data should have been inserted at this point
res, err := statestore.Get(&state.GetRequest{
Key: scenario.key,
})
assert.Nil(t, err)
assert.Equal(t, scenario.expectedReadResponse, res.Data)
}
}
})
}
if config.HasOperation("bulkdelete") {
t.Run("bulkdelete", func(t *testing.T) {
var bulk []state.DeleteRequest
for _, scenario := range scenarios {
if scenario.bulkOnly && scenario.toBeDeleted {
t.Logf("Adding delete request to bulk for %s", scenario.key)
bulk = append(bulk, state.DeleteRequest{
Key: scenario.key,
})
}
}
err := statestore.BulkDelete(bulk)
assert.Nil(t, err)
for _, req := range bulk {
t.Logf("Checking value absence for %s", req.Key)
res, err := statestore.Get(&state.GetRequest{
Key: req.Key,
})
assert.Nil(t, err)
assert.Nil(t, res.Data)
}
})
}
// nolint: nestif
if config.HasOperation("transaction") {
t.Run("transaction", func(t *testing.T) {
// Check if transactional feature is listed
features := statestore.Features()
assert.True(t, state.FeatureTransactional.IsPresent(features))
var transactionGroups []int
transactions := map[int][]state.TransactionalStateOperation{}
for _, scenario := range scenarios {
if scenario.transactionOnly {
if transactions[scenario.transactionGroup] == nil {
transactionGroups = append(transactionGroups, scenario.transactionGroup)
}
transactions[scenario.transactionGroup] = append(
transactions[scenario.transactionGroup], state.TransactionalStateOperation{
Operation: state.Upsert,
Request: state.SetRequest{
Key: scenario.key,
Value: scenario.value,
},
})
// Deletion happens in the following transaction.
if scenario.toBeDeleted {
if transactions[scenario.transactionGroup+1] == nil {
transactionGroups = append(transactionGroups, scenario.transactionGroup+1)
}
transactions[scenario.transactionGroup+1] = append(
transactions[scenario.transactionGroup+1], state.TransactionalStateOperation{
Operation: state.Delete,
Request: state.DeleteRequest{
Key: scenario.key,
},
})
}
}
}
transactionStore := statestore.(state.TransactionalStore)
sort.Ints(transactionGroups)
for _, transactionGroup := range transactionGroups {
t.Logf("Testing transaction #%d", transactionGroup)
err := transactionStore.Multi(&state.TransactionalStateRequest{
Operations: transactions[transactionGroup],
// For CosmosDB
Metadata: map[string]string{
"partitionKey": "myPartition",
},
})
assert.Nil(t, err)
for _, scenario := range scenarios {
if scenario.transactionOnly {
if scenario.transactionGroup == transactionGroup {
t.Logf("Checking value presence for %s", scenario.key)
// Data should have been inserted at this point
res, err := statestore.Get(&state.GetRequest{
Key: scenario.key,
// For CosmosDB
Metadata: map[string]string{
"partitionKey": "myPartition",
},
})
assert.Nil(t, err)
assert.Equal(t, scenario.expectedReadResponse, res.Data)
}
if scenario.toBeDeleted && (scenario.transactionGroup == transactionGroup-1) {
t.Logf("Checking value absence for %s", scenario.key)
// Data should have been deleted at this point
res, err := statestore.Get(&state.GetRequest{
Key: scenario.key,
// For CosmosDB
Metadata: map[string]string{
"partitionKey": "myPartition",
},
})
assert.Nil(t, err)
assert.Nil(t, res.Data)
}
}
}
}
})
} else {
// Check if transactional feature is NOT listed
features := statestore.Features()
assert.False(t, state.FeatureTransactional.IsPresent(features))
}
// Supporting etags requires support for get, set, and delete so they are not checked individually
if config.HasOperation("etag") {
t.Run("etag", func(t *testing.T) {
testKey := "etagTest"
firstValue := []byte("testValue1")
secondValue := []byte("testValue2")
fakeEtag := "not-an-etag"
// Check if eTag feature is listed
features := statestore.Features()
assert.True(t, state.FeatureETag.IsPresent(features))
// Delete any potential object, it's important to start from a clean slate.
err := statestore.Delete(&state.DeleteRequest{
Key: testKey,
})
assert.Nil(t, err)
// Set an object.
err = statestore.Set(&state.SetRequest{
Key: testKey,
Value: firstValue,
})
assert.Nil(t, err)
// Validate the set.
res, err := statestore.Get(&state.GetRequest{
Key: testKey,
})
assert.Nil(t, err)
assert.Equal(t, firstValue, res.Data)
etag := res.ETag
// Try and update with wrong ETag, expect failure.
err = statestore.Set(&state.SetRequest{
Key: testKey,
Value: secondValue,
ETag: &fakeEtag,
})
assert.NotNil(t, err)
// Try and update with corect ETag, expect success.
err = statestore.Set(&state.SetRequest{
Key: testKey,
Value: secondValue,
ETag: etag,
})
assert.Nil(t, err)
// Validate the set.
res, err = statestore.Get(&state.GetRequest{
Key: testKey,
})
assert.Nil(t, err)
assert.Equal(t, secondValue, res.Data)
assert.NotEqual(t, etag, res.ETag)
etag = res.ETag
// Try and delete with wrong ETag, expect failure.
err = statestore.Delete(&state.DeleteRequest{
Key: testKey,
ETag: &fakeEtag,
})
assert.NotNil(t, err)
// Try and delete with correct ETag, expect success.
err = statestore.Delete(&state.DeleteRequest{
Key: testKey,
ETag: etag,
})
assert.Nil(t, err)
})
} else {
// Check if eTag feature is NOT listed
features := statestore.Features()
assert.False(t, state.FeatureETag.IsPresent(features))
}
if config.HasOperation("first-write") {
t.Run("first-write without etag", func(t *testing.T) {
testKey := "first-writeTest"
firstValue := []byte("testValue1")
secondValue := []byte("testValue2")
emptyString := ""
requestSets := [][2]*state.SetRequest{
{
{
Key: testKey,
Value: firstValue,
Options: state.SetStateOption{
Concurrency: state.FirstWrite,
Consistency: state.Strong,
},
}, {
Key: testKey,
Value: secondValue,
Options: state.SetStateOption{
Concurrency: state.FirstWrite,
Consistency: state.Strong,
},
},
},
{{
Key: testKey,
Value: firstValue,
Options: state.SetStateOption{
Concurrency: state.FirstWrite,
Consistency: state.Strong,
},
ETag: &emptyString,
}, {
Key: testKey,
Value: secondValue,
Options: state.SetStateOption{
Concurrency: state.FirstWrite,
Consistency: state.Strong,
},
ETag: &emptyString,
}},
}
for _, requestSet := range requestSets {
// Delete any potential object, it's important to start from a clean slate.
err := statestore.Delete(&state.DeleteRequest{
Key: testKey,
})
assert.Nil(t, err)
err = statestore.Set(requestSet[0])
assert.Nil(t, err)
// Validate the set.
res, err := statestore.Get(&state.GetRequest{
Key: testKey,
})
assert.Nil(t, err)
assert.Equal(t, firstValue, res.Data)
// Second write expect fail
err = statestore.Set(requestSet[1])
assert.NotNil(t, err)
}
})
t.Run("first-write with etag", func(t *testing.T) {
testKey := "first-writeTest"
firstValue := []byte("testValue1")
secondValue := []byte("testValue2")
request := &state.SetRequest{
Key: testKey,
Value: firstValue,
}
// Delete any potential object, it's important to start from a clean slate.
err := statestore.Delete(&state.DeleteRequest{
Key: testKey,
})
assert.Nil(t, err)
err = statestore.Set(request)
assert.Nil(t, err)
// Validate the set.
res, err := statestore.Get(&state.GetRequest{
Key: testKey,
})
assert.Nil(t, err)
assert.Equal(t, firstValue, res.Data)
etag := res.ETag
request = &state.SetRequest{
Key: testKey,
Value: secondValue,
ETag: etag,
Options: state.SetStateOption{
Concurrency: state.FirstWrite,
Consistency: state.Strong,
},
}
err = statestore.Set(request)
assert.Nil(t, err)
// Validate the set.
res, err = statestore.Get(&state.GetRequest{
Key: testKey,
})
assert.Nil(t, err)
assert.NotEqual(t, etag, res.ETag)
assert.Equal(t, secondValue, res.Data)
request.ETag = etag
// Second write expect fail
err = statestore.Set(request)
assert.NotNil(t, err)
})
}
} | tests/conformance/state/state.go | 0.540439 | 0.415729 | state.go | starcoder |
package parse
// WalkTreerBFS walks the Treer level by level.
// See: https://en.wikipedia.org/wiki/Breadth-first_search
func WalkTreerBFS(tree Treer, fn func(int, Treer) error) error {
var (
stack = Treers{}
current Treer
childs Treers
// starting from the root element
// only one element left to jump
// to the next level.
currentLevelLeft = 1
level int
nextLevelLen int
n int
err error
)
current = tree
for current != nil {
if currentLevelLeft == 0 {
level++
currentLevelLeft = nextLevelLen
nextLevelLen = 0
}
err = fn(level, current)
if err != nil {
switch err {
case ErrStopIteration:
return nil
case ErrSkipBranch:
goto nextLevel
default:
return err
}
}
childs = current.GetChilds()
if len(childs) > 0 {
nextLevelLen += len(childs)
stack = append(
stack,
childs...,
)
}
nextLevel:
if len(stack) == 0 {
break
}
current = stack[0]
stack = stack[1:]
n++
currentLevelLeft--
}
return nil
}
// WalkTreerNameChainBFS is a walker which reports nesting as chain of
// Treer node Name's on every iteration and uses WalkerTreerBFS.
func WalkTreerNameChainBFS(tree Treer, fn func([]string, int, Treer) error) error {
type nodeInfo struct {
left int
chain []string
}
var (
childs Treers
childsLen int
chain []string
chainCopy []string
parent Treer
parentInfo *nodeInfo
parents = map[Treer]Treer{}
info = map[Treer]*nodeInfo{}
)
return WalkTreerBFS(
tree,
func(level int, tree Treer) error {
parent = parents[tree]
childs = tree.GetChilds()
for _, v := range childs {
parents[v] = tree
}
if parent != nil {
parentInfo = info[parent]
chain = append(
parentInfo.chain,
tree.Name(),
)
parentInfo.left--
if parentInfo.left == 0 {
delete(info, parent)
delete(parents, parent)
}
} else {
chain = []string{tree.Name()}
}
chainCopy = make([]string, len(chain))
copy(chainCopy, chain)
childsLen = len(childs)
if childsLen > 0 {
info[tree] = &nodeInfo{
left: childsLen,
chain: chainCopy,
}
}
return fn(chainCopy, level, tree)
},
)
}
// WalkTreerDFS walks the Treer childs from top to leafs.
// See: https://en.wikipedia.org/wiki/Depth-first_search
func WalkTreerDFS(tree Treer, fn func(int, Treer) error) error {
var (
current Treer
stack Treers
level int
ok bool
err error
)
current = tree
backlog := map[int]Treers{}
for current != nil {
err = fn(level, current)
if err != nil {
switch err {
case ErrStopIteration:
return nil
case ErrSkipBranch:
goto nextLevel
default:
return err
}
}
stack = current.GetChilds()
if len(stack) > 0 {
level++
backlog[level] = stack[1:]
current = stack[0]
continue
}
nextLevel:
stack, ok = backlog[level]
if ok && len(stack) > 0 {
current = stack[0]
backlog[level] = stack[1:]
continue
}
level--
if level < 0 {
break
}
goto nextLevel
}
return nil
}
// WalkTreerNameChainDFS is a walker which reports nesting as chain of
// Treer node Name's on every iteration and uses WalkerTreerDFS.
func WalkTreerNameChainDFS(tree Treer, fn func([]string, int, Treer) error) error {
var (
chain []string
chainCopy []string
previousLevel int
)
return WalkTreerDFS(
tree,
func(level int, tree Treer) error {
if level <= previousLevel {
chain = chain[:level]
}
previousLevel = level
chain = append(
chain,
tree.Name(),
)
chainCopy = make([]string, len(chain))
copy(
chainCopy,
chain,
)
return fn(chainCopy, level, tree)
},
)
}
func FindFirstDFSPrefix(tree Treer, prefix []string) (Treer, bool) {
var (
node Treer
ok bool
)
if len(prefix) == 0 {
return node, ok
}
_ = WalkTreerNameChainDFS(
tree,
func(chain []string, level int, tree Treer) error {
if len(prefix) > len(chain) {
return nil
}
for n := 0; n < len(prefix); n++ {
if prefix[n] != chain[n] {
return nil
}
}
node = tree
ok = true
return ErrStopIteration
},
)
return node, ok
}
func FindFirstDFSSuffix(tree Treer, suffix []string) (Treer, bool) {
var (
node Treer
ok bool
)
if len(suffix) == 0 {
return node, ok
}
_ = WalkTreerNameChainDFS(
tree,
func(chain []string, level int, tree Treer) error {
if len(suffix) > len(chain) {
return nil
}
n := len(suffix) - 1
cn := len(chain) - 1
for ; n >= 0; n-- {
if suffix[n] != chain[cn] {
return nil
}
cn--
}
if n < 0 { // walked whole suffix without return
node = tree
ok = true
return ErrStopIteration
}
return nil
},
)
return node, ok
} | treer_walk.go | 0.654784 | 0.430985 | treer_walk.go | starcoder |
package env
import (
"fmt"
"github.com/aunum/gold/pkg/v1/dense"
sphere "github.com/aunum/sphere/api/gen/go/v1alpha"
"gorgonia.org/tensor"
)
// Normalizer will normalize the data coming from an environment.
type Normalizer interface {
// Init the normalizer.
Init(env *Env) error
// Norm normalizes the input data.
Norm(input *tensor.Dense) (*tensor.Dense, error)
}
// MinMaxNormalizer is a min/max normalizer that makes all values between 0>x<1.
type MinMaxNormalizer struct {
min *tensor.Dense
max *tensor.Dense
}
// NewMinMaxNormalizer returns a new min/max
func NewMinMaxNormalizer() *MinMaxNormalizer {
return &MinMaxNormalizer{}
}
// Init the normalizer.
func (m *MinMaxNormalizer) Init(e *Env) (err error) {
m.min, m.max, err = SpaceMinMax(e.GetObservationSpace())
if err != nil {
return
}
return
}
// Norm normalizes the input.
func (m *MinMaxNormalizer) Norm(input *tensor.Dense) (*tensor.Dense, error) {
return dense.MinMaxNorm(input, m.min, m.max)
}
// EqWidthBinNormalizer is an EqWidthBinner applied using tensors.
type EqWidthBinNormalizer struct {
intervals *tensor.Dense
binner *dense.EqWidthBinner
}
// NewEqWidthBinNormalizer is a new dense binner.
func NewEqWidthBinNormalizer(intervals *tensor.Dense) *EqWidthBinNormalizer {
return &EqWidthBinNormalizer{intervals: intervals}
}
// Init the normalizer.
func (d *EqWidthBinNormalizer) Init(e *Env) error {
min, max, err := SpaceMinMax(e.GetObservationSpace())
if err != nil {
return err
}
binner, err := dense.NewEqWidthBinner(d.intervals, min, max)
if err != nil {
return err
}
d.binner = binner
return nil
}
// Norm normalizes the values placing them in their bins.
func (d *EqWidthBinNormalizer) Norm(input *tensor.Dense) (*tensor.Dense, error) {
return d.binner.Bin(input)
}
// ReshapeNormalizer will reshape the state.
type ReshapeNormalizer struct {
shape tensor.Shape
}
// NewReshapeNormalizer returns a new reshape normalizer.
func NewReshapeNormalizer(shape tensor.Shape) *ReshapeNormalizer {
return &ReshapeNormalizer{shape: shape}
}
// Init the normalizer.
func (r *ReshapeNormalizer) Init(e *Env) error {
e.reshape = r.shape
return nil
}
// Norm normalizes the values by reshaping them.
func (r *ReshapeNormalizer) Norm(input *tensor.Dense) (*tensor.Dense, error) {
err := input.Reshape(r.shape...)
if err != nil {
return nil, err
}
return input, nil
}
// ExpandDimsNormalizer will expand the dims of the state.
type ExpandDimsNormalizer struct {
axis int
shape tensor.Shape
}
// NewExpandDimsNormalizer returns a new expand dims normalizer.
func NewExpandDimsNormalizer(axis int) *ExpandDimsNormalizer {
return &ExpandDimsNormalizer{axis: axis}
}
// Init the normalizer.
func (r *ExpandDimsNormalizer) Init(e *Env) error {
e.reshape = dense.ExpandDimsShape(e.ObservationSpaceShape(), r.axis)
return nil
}
// Norm normalizes the values by expanding their dims along an axis.
func (r *ExpandDimsNormalizer) Norm(input *tensor.Dense) (*tensor.Dense, error) {
err := dense.ExpandDims(input, r.axis)
if err != nil {
return nil, err
}
return input, nil
}
// SpaceMinMax returns the min/max for a space as tensors.
// Sphere already normalizes infinite spaces to floats.
func SpaceMinMax(space *sphere.Space) (min, max *tensor.Dense, err error) {
switch s := space.GetInfo().(type) {
case *sphere.Space_Box:
shape := []int{}
for _, i := range s.Box.GetShape() {
shape = append(shape, int(i))
}
min = tensor.New(tensor.WithBacking(s.Box.GetLow()))
max = tensor.New(tensor.WithBacking(s.Box.GetHigh()))
case *sphere.Space_Discrete:
min = tensor.New(tensor.WithBacking([]float32{0}))
max = tensor.New(tensor.WithBacking([]float32{float32(s.Discrete.N)}))
case *sphere.Space_MultiDiscrete:
minB := []float32{}
maxB := []float32{}
for _, v := range s.MultiDiscrete.DiscreteSpaces {
minB = append(minB, 0)
maxB = append(maxB, float32(v))
}
min = tensor.New(tensor.WithBacking(minB))
max = tensor.New(tensor.WithBacking(maxB))
case *sphere.Space_MultiBinary:
err = fmt.Errorf("multi-binary space not supported")
case *sphere.Space_StructSpace:
err = fmt.Errorf("struct space not supported")
default:
err = fmt.Errorf("unknown action space type: %v", space)
}
return
} | pkg/v1/env/norm.go | 0.722233 | 0.431165 | norm.go | starcoder |
package openapi
import (
"encoding/json"
"time"
)
// WorkflowRunState struct for WorkflowRunState
type WorkflowRunState struct {
// Time at which the workflow execution ended
EndedAt NullableTime `json:"ended_at"`
// Time at which workflow execution started
StartedAt NullableTime `json:"started_at"`
// Current status of the workflow, based on steps statuses and approvals
Status string `json:"status"`
// Status of individual run steps, indexed by name
Steps *map[string]WorkflowRunStepState `json:"steps,omitempty"`
}
// NewWorkflowRunState instantiates a new WorkflowRunState object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewWorkflowRunState(endedAt NullableTime, startedAt NullableTime, status string) *WorkflowRunState {
this := WorkflowRunState{}
this.EndedAt = endedAt
this.StartedAt = startedAt
this.Status = status
return &this
}
// NewWorkflowRunStateWithDefaults instantiates a new WorkflowRunState object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewWorkflowRunStateWithDefaults() *WorkflowRunState {
this := WorkflowRunState{}
return &this
}
// GetEndedAt returns the EndedAt field value
// If the value is explicit nil, the zero value for time.Time will be returned
func (o *WorkflowRunState) GetEndedAt() time.Time {
if o == nil || o.EndedAt.Get() == nil {
var ret time.Time
return ret
}
return *o.EndedAt.Get()
}
// GetEndedAtOk returns a tuple with the EndedAt field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *WorkflowRunState) GetEndedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return o.EndedAt.Get(), o.EndedAt.IsSet()
}
// SetEndedAt sets field value
func (o *WorkflowRunState) SetEndedAt(v time.Time) {
o.EndedAt.Set(&v)
}
// GetStartedAt returns the StartedAt field value
// If the value is explicit nil, the zero value for time.Time will be returned
func (o *WorkflowRunState) GetStartedAt() time.Time {
if o == nil || o.StartedAt.Get() == nil {
var ret time.Time
return ret
}
return *o.StartedAt.Get()
}
// GetStartedAtOk returns a tuple with the StartedAt field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *WorkflowRunState) GetStartedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return o.StartedAt.Get(), o.StartedAt.IsSet()
}
// SetStartedAt sets field value
func (o *WorkflowRunState) SetStartedAt(v time.Time) {
o.StartedAt.Set(&v)
}
// GetStatus returns the Status field value
func (o *WorkflowRunState) GetStatus() string {
if o == nil {
var ret string
return ret
}
return o.Status
}
// GetStatusOk returns a tuple with the Status field value
// and a boolean to check if the value has been set.
func (o *WorkflowRunState) GetStatusOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Status, true
}
// SetStatus sets field value
func (o *WorkflowRunState) SetStatus(v string) {
o.Status = v
}
// GetSteps returns the Steps field value if set, zero value otherwise.
func (o *WorkflowRunState) GetSteps() map[string]WorkflowRunStepState {
if o == nil || o.Steps == nil {
var ret map[string]WorkflowRunStepState
return ret
}
return *o.Steps
}
// GetStepsOk returns a tuple with the Steps field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *WorkflowRunState) GetStepsOk() (*map[string]WorkflowRunStepState, bool) {
if o == nil || o.Steps == nil {
return nil, false
}
return o.Steps, true
}
// HasSteps returns a boolean if a field has been set.
func (o *WorkflowRunState) HasSteps() bool {
if o != nil && o.Steps != nil {
return true
}
return false
}
// SetSteps gets a reference to the given map[string]WorkflowRunStepState and assigns it to the Steps field.
func (o *WorkflowRunState) SetSteps(v map[string]WorkflowRunStepState) {
o.Steps = &v
}
func (o WorkflowRunState) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["ended_at"] = o.EndedAt.Get()
}
if true {
toSerialize["started_at"] = o.StartedAt.Get()
}
if true {
toSerialize["status"] = o.Status
}
if o.Steps != nil {
toSerialize["steps"] = o.Steps
}
return json.Marshal(toSerialize)
}
type NullableWorkflowRunState struct {
value *WorkflowRunState
isSet bool
}
func (v NullableWorkflowRunState) Get() *WorkflowRunState {
return v.value
}
func (v *NullableWorkflowRunState) Set(val *WorkflowRunState) {
v.value = val
v.isSet = true
}
func (v NullableWorkflowRunState) IsSet() bool {
return v.isSet
}
func (v *NullableWorkflowRunState) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableWorkflowRunState(val *WorkflowRunState) *NullableWorkflowRunState {
return &NullableWorkflowRunState{value: val, isSet: true}
}
func (v NullableWorkflowRunState) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableWorkflowRunState) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | client/pkg/client/openapi/model_workflow_run_state.go | 0.734691 | 0.500671 | model_workflow_run_state.go | starcoder |
package graph
import (
"fmt"
"math/rand"
"strconv"
"sync"
)
// Input are the inputs for each experiment
type Input struct {
rounds, carCount, threads int
trafficSlowdown float64
root, destination string
}
// NewInput defines a new Input struct
func NewInput(rounds, carsCount, threads int,
trafficSlowdown float64, root, destination string) *Input {
input := new(Input)
input.rounds = rounds
input.carCount = carsCount
input.threads = threads
input.trafficSlowdown = trafficSlowdown
input.root = root
input.destination = destination
return input
}
// Complete creates a complete graph with equal (1) weights on the verteces and equal (0) on edges
func Complete(n int, input *Input) *Graph {
if n < 0 {
panic("")
}
var vertexA, vertexB string
graph := NewGraph(input)
for i := 0; i < n; i++ {
vertexA = strconv.Itoa(i)
graph.AddVertex(NewNode(vertexA, 1))
graph.adj[vertexA] = make(map[string]Adj)
graph.visitedEdge[vertexA] = make(map[string]bool)
for j := 0; j < i; j++ {
vertexA = strconv.Itoa(i)
vertexB = strconv.Itoa(j)
graph.adj[vertexA][vertexB] = *NewAdj(0)
graph.adj[vertexB][vertexA] = *NewAdj(0)
graph.visitedEdge[vertexA][vertexB] = false
graph.visitedEdge[vertexB][vertexA] = false
}
}
return graph
}
// Complement return a new graph which is the complement of the input graph.
func (graph *Graph) Complement(input *Input) *Graph {
graph.lock.RLock()
defer graph.lock.RUnlock()
graphC := NewGraph(input)
for vertexA, edges := range graph.adj {
for vertexB, weight := range edges {
if graphC.adj[vertexB] == nil {
graphC.adj[vertexB] = make(map[string]Adj)
}
graphC.adj[vertexB][vertexA] = weight
}
}
return graphC
}
// NewRandom returns a graph with edges randomly placed throughout the graph
func NewRandom(maxVertices, edgeCount int,
weightRange []float64, input *Input) *Graph {
graph := NewGraph(input)
min, max := weightRange[0], weightRange[1]
var vertexA, vertexB string
for i := 0; i < edgeCount; i++ {
vertexA = strconv.Itoa(rand.Intn(maxVertices))
vertexB = strconv.Itoa(rand.Intn(maxVertices))
graph.AddEdges(NewNode(vertexA, 0), NewNode(vertexB, 0),
rand.Float64()*max+min)
}
return graph
}
// NewCity creates a city like graph (e.g. each vertex is connected with betweem 2 - 6 other vertices)
func NewCity(intersections int, input *Input) *Graph {
graph := NewGraph(input)
var vertex string
var crossroads int
for i := 0; i < intersections; i++ {
graph.AddVertex(NewNode(strconv.Itoa(i), 0))
}
for i := 0; i < intersections; i++ {
crossroads = rand.Intn(4) + 2
for j := 0; j < crossroads; j++ {
vertex = strconv.Itoa(rand.Intn(intersections))
graph.AddEdges(
NewNode(strconv.Itoa(i), 0),
NewNode(vertex, 0),
1.0)
}
}
return graph
}
// Drive simulates one round of cars driving and then updating the time it took to drive on each weighted edge
func (graph *Graph) Drive() ([][]float64, [][][]string) {
carsCount, threads := graph.input.carCount, graph.input.threads
trafficSlowdown, root := graph.input.trafficSlowdown, graph.input.root
destination := graph.input.destination
var wg sync.WaitGroup
distances := make([][]float64, threads)
paths := make([][][]string, threads)
// Cars driving around
wg.Add(threads)
for i := 0; i < threads; i++ {
distances[i] = make([]float64, carsCount)
paths[i] = make([][]string, carsCount)
go func(i int) {
for j := 0; j < carsCount; j++ {
distances[i][j], paths[i][j] =
graph.ShortestPath(root, destination)
}
wg.Done()
}(i)
}
wg.Wait()
// Updating weight of edges of graph
wg.Add(threads)
for i := 0; i < threads; i++ {
go func(i int) {
for j := 0; j < carsCount; j++ {
pathLength := len(paths[i][j]) - 1
for k := 0; k < pathLength; k++ {
nodeA, nodeB := paths[i][j][k], paths[i][j][k+1]
graph.UpdateEdge(nodeA, nodeB, trafficSlowdown)
}
}
wg.Done()
}(i)
}
wg.Wait()
// Reset any unvisited edges to 1
for vertexA, edges := range graph.visitedEdge {
for vertexB, visited := range edges {
if visited {
graph.saveWeight(vertexA, vertexB)
} else {
graph.resetWeight(vertexA, vertexB)
}
}
}
return distances, paths
}
func (graph *Graph) resetWeight(vertexA, vertexB string) {
graph.adj[vertexA][vertexB] = Adj{weight: 1, prevWeight: 1}
}
func (graph *Graph) saveWeight(vertexA, vertexB string) {
weight := graph.adj[vertexA][vertexB].weight
graph.adj[vertexA][vertexB] = Adj{weight: 1, prevWeight: weight}
}
// Simulate is the driver to run the simulation some number of times
func (graph *Graph) Simulate() *Graph {
for i := 0; i < graph.input.rounds; i++ {
graph.Drive()
}
return graph
}
// GenerateExperiment chooses which experiment to run
func GenerateExperiment(size, i int, experimentType string, input *Input) *Graph {
if experimentType == "city" {
g := NewCity(size, input)
g.id = i
g.cityType = experimentType
return g
} else if experimentType == "complete" {
g := Complete(size, input)
g.id = i
g.cityType = experimentType
return g
} else if experimentType == "random" {
g := NewRandom(size, rand.Intn(size*size), []float64{0, 1}, input)
g.id = i
g.cityType = experimentType
return g
} else {
valid := "Use city, complete, or random."
panic(fmt.Sprintf("%v is not a valid experiment. %v",
experimentType, valid))
}
} | graph/generate.go | 0.64512 | 0.455501 | generate.go | starcoder |
package buffer
import (
"bytes"
"io"
"io/ioutil"
"github.com/buildbarn/bb-storage/pkg/digest"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
type validatedByteSliceBuffer struct {
data []byte
}
// NewValidatedBufferFromByteSlice creates a Buffer that is backed by a
// slice of bytes. No checking of data integrity is performed, as it is
// assumed that the data stored in the slice is valid.
func NewValidatedBufferFromByteSlice(data []byte) Buffer {
return &validatedByteSliceBuffer{
data: data,
}
}
// NewCASBufferFromByteSlice creates a buffer for an object stored in
// the Content Addressable Storage, backed by a byte slice.
func NewCASBufferFromByteSlice(digest digest.Digest, data []byte, source Source) Buffer {
// Compare the blob's size.
expectedSizeBytes := digest.GetSizeBytes()
actualSizeBytes := int64(len(data))
if expectedSizeBytes != actualSizeBytes {
return NewBufferFromError(source.notifyCASSizeMismatch(expectedSizeBytes, actualSizeBytes))
}
// Compare the blob's checksum.
expectedChecksum := digest.GetHashBytes()
hasher := digest.NewHasher()
hasher.Write(data)
actualChecksum := hasher.Sum(nil)
if bytes.Compare(expectedChecksum, actualChecksum) != 0 {
return NewBufferFromError(source.notifyCASHashMismatch(expectedChecksum, actualChecksum))
}
source.notifyDataValid()
return NewValidatedBufferFromByteSlice(data)
}
func (b validatedByteSliceBuffer) GetSizeBytes() (int64, error) {
return int64(len(b.data)), nil
}
func (b validatedByteSliceBuffer) IntoWriter(w io.Writer) error {
_, err := w.Write(b.data)
return err
}
func (b validatedByteSliceBuffer) ReadAt(p []byte, off int64) (int, error) {
if off < 0 {
return 0, status.Errorf(codes.InvalidArgument, "Negative read offset: %d", off)
}
if off > int64(len(b.data)) {
return 0, io.EOF
}
n := copy(p, b.data[off:])
if n < len(p) {
return n, io.EOF
}
return n, nil
}
func (b validatedByteSliceBuffer) ToProto(m proto.Message, maximumSizeBytes int) (proto.Message, error) {
return toProtoViaByteSlice(b, m, maximumSizeBytes)
}
func (b validatedByteSliceBuffer) ToByteSlice(maximumSizeBytes int) ([]byte, error) {
if len(b.data) > maximumSizeBytes {
return nil, status.Errorf(codes.InvalidArgument, "Buffer is %d bytes in size, while a maximum of %d bytes is permitted", len(b.data), maximumSizeBytes)
}
return b.data, nil
}
func (b validatedByteSliceBuffer) ToChunkReader(off int64, maximumChunkSizeBytes int) ChunkReader {
return b.toUnvalidatedChunkReader(off, maximumChunkSizeBytes)
}
func (b validatedByteSliceBuffer) ToReader() io.ReadCloser {
return b.toUnvalidatedReader(0)
}
func (b validatedByteSliceBuffer) CloneCopy(maximumSizeBytes int) (Buffer, Buffer) {
return b, b
}
func (b validatedByteSliceBuffer) CloneStream() (Buffer, Buffer) {
return b, b
}
func (b validatedByteSliceBuffer) WithTask(task func() error) Buffer {
// This buffer is trivially cloneable, so we can run the task in
// the foreground.
if err := task(); err != nil {
return NewBufferFromError(err)
}
return b
}
func (b validatedByteSliceBuffer) Discard() {}
func (b validatedByteSliceBuffer) applyErrorHandler(errorHandler ErrorHandler) (Buffer, bool) {
// The buffer is in a known good state. Terminate the error
// handler directly. There is no need to return a wrapped buffer.
errorHandler.Done()
return b, false
}
func (b validatedByteSliceBuffer) toUnvalidatedChunkReader(off int64, maximumChunkSizeBytes int) ChunkReader {
if err := validateReaderOffset(int64(len(b.data)), off); err != nil {
return newErrorChunkReader(err)
}
return &byteSliceChunkReader{
maximumChunkSizeBytes: maximumChunkSizeBytes,
data: b.data[off:],
}
}
func (b validatedByteSliceBuffer) toUnvalidatedReader(off int64) io.ReadCloser {
if err := validateReaderOffset(int64(len(b.data)), off); err != nil {
return newErrorReader(err)
}
return ioutil.NopCloser(bytes.NewBuffer(b.data[off:]))
}
type byteSliceChunkReader struct {
maximumChunkSizeBytes int
data []byte
}
func (r *byteSliceChunkReader) Read() ([]byte, error) {
data := r.data
if len(data) == 0 {
// No more data to return.
return nil, io.EOF
}
if len(data) <= r.maximumChunkSizeBytes {
// Last chunk of data to be returned.
r.data = nil
return data, nil
}
// Full chunk of data still available.
r.data = r.data[r.maximumChunkSizeBytes:]
return data[:r.maximumChunkSizeBytes], nil
}
func (r *byteSliceChunkReader) Close() {} | pkg/blobstore/buffer/validated_byte_slice_buffer.go | 0.800107 | 0.476823 | validated_byte_slice_buffer.go | starcoder |
package vec
import (
"github.com/chewxy/math32"
"github.com/foxis/EasyRobot/pkg/core/math"
)
type Vector3D [3]float32
func (v *Vector3D) Sum() float32 {
var sum float32
for _, val := range v {
sum += val
}
return sum
}
func (v *Vector3D) Vector() Vector {
return v[:]
}
func (v *Vector3D) Slice(start, end int) Vector {
if end < 0 {
end = len(v)
}
return v[start:end]
}
func (v *Vector3D) XYZ() (float32, float32, float32) {
return v[0], v[1], v[2]
}
func (v *Vector3D) SumSqr() float32 {
var sum float32
for _, val := range v {
sum += val * val
}
return sum
}
func (v *Vector3D) Magnitude() float32 {
return math32.Sqrt(v.SumSqr())
}
func (v *Vector3D) DistanceSqr(v1 Vector3D) float32 {
return v.Clone().Sub(v1).SumSqr()
}
func (v *Vector3D) Distance(v1 Vector3D) float32 {
return math32.Sqrt(v.DistanceSqr(v1))
}
func (v *Vector3D) Clone() *Vector3D {
clone := Vector3D{}
copy(clone[:], v[:])
return &clone
}
func (v *Vector3D) CopyFrom(start int, v1 Vector) *Vector3D {
copy(v[start:], v1)
return v
}
func (v *Vector3D) CopyTo(start int, v1 Vector) Vector {
copy(v1, v[start:])
return v1
}
func (v *Vector3D) Clamp(min, max Vector3D) *Vector3D {
for i := range v {
v[i] = math.Clamp(v[i], min[i], max[i])
}
return v
}
func (v *Vector3D) FillC(c float32) *Vector3D {
for i := range v {
v[i] = c
}
return v
}
func (v *Vector3D) Neg() *Vector3D {
for i := range v {
v[i] = -v[i]
}
return v
}
func (v *Vector3D) Add(v1 Vector3D) *Vector3D {
for i := range v {
v[i] += v1[i]
}
return v
}
func (v *Vector3D) AddC(c float32) *Vector3D {
for i := range v {
v[i] += c
}
return v
}
func (v *Vector3D) Sub(v1 Vector3D) *Vector3D {
for i := range v {
v[i] -= v1[i]
}
return v
}
func (v *Vector3D) SubC(c float32) *Vector3D {
for i := range v {
v[i] -= c
}
return v
}
func (v *Vector3D) MulC(c float32) *Vector3D {
for i := range v {
v[i] *= c
}
return v
}
func (v *Vector3D) MulCAdd(c float32, v1 Vector3D) *Vector3D {
for i := range v {
v[i] += v1[i] * c
}
return v
}
func (v *Vector3D) MulCSub(c float32, v1 Vector3D) *Vector3D {
for i := range v {
v[i] -= v1[i] * c
}
return v
}
func (v *Vector3D) DivC(c float32) *Vector3D {
for i := range v {
v[i] /= c
}
return v
}
func (v *Vector3D) DivCAdd(c float32, v1 Vector3D) *Vector3D {
for i := range v {
v[i] += v1[i] / c
}
return v
}
func (v *Vector3D) DivCSub(c float32, v1 Vector3D) *Vector3D {
for i := range v {
v[i] -= v1[i] / c
}
return v
}
func (v *Vector3D) Normal() *Vector3D {
d := v.Magnitude()
return v.DivC(d)
}
func (v *Vector3D) NormalFast() *Vector3D {
d := v.SumSqr()
return v.MulC(math.FastISqrt(d))
}
func (v *Vector3D) Multiply(v1 Vector3D) *Vector3D {
for i := range v {
v[i] *= v1[i]
}
return v
}
func (v *Vector3D) Dot(v1 Vector3D) float32 {
var sum float32
for i := range v {
sum += v[i] * v1[i]
}
return sum
}
func (v *Vector3D) Cross(v1 Vector3D) *Vector3D {
t := []float32{v[0], v[1], v[2]}
v[0] = t[1]*v1[2] - t[2]*v1[1]
v[1] = t[2]*v1[0] - t[0]*v1[2]
v[2] = t[0]*v1[1] - t[1]*v1[0]
return v
}
func (v *Vector3D) Refract(n Vector3D, ni, nt float32) (*Vector3D, bool) {
var (
sin_T Vector3D /* sin vect of the refracted vect */
cos_V Vector3D /* cos vect of the incident vect */
n_mult float32 /* ni over nt */
)
N_dot_V := n.Dot(*v)
if N_dot_V > 0.0 {
n_mult = ni / nt
} else {
n_mult = nt / ni
}
cos_V[0] = n[0] * N_dot_V
cos_V[1] = n[1] * N_dot_V
cos_V[2] = n[2] * N_dot_V
sin_T[0] = (cos_V[0] - v[0]) * (n_mult)
sin_T[1] = (cos_V[1] - v[1]) * (n_mult)
sin_T[2] = (cos_V[2] - v[2]) * (n_mult)
len_sin_T := sin_T.Dot(sin_T)
if len_sin_T >= 1.0 {
return v, false // internal reflection
}
N_dot_T := math32.Sqrt(1.0 - len_sin_T)
if N_dot_V < 0.0 {
N_dot_T = -N_dot_T
}
v[0] = sin_T[0] - n[0]*N_dot_T
v[1] = sin_T[1] - n[1]*N_dot_T
v[2] = sin_T[2] - n[2]*N_dot_T
return v, true
}
func (v *Vector3D) Reflect(n Vector3D) *Vector3D {
N_dot_V := n.Dot(*v) * 2
return v.Neg().MulCAdd(N_dot_V, n)
}
func (v *Vector3D) Interpolate(v1 Vector3D, t float32) *Vector3D {
d := v1.Clone().Sub(*v)
return v.MulCAdd(t, *d)
} | pkg/core/math/vec/vec3d.go | 0.774583 | 0.683215 | vec3d.go | starcoder |
package geoindex
import (
"fmt"
"time"
)
type Minutes int
type counter interface {
Add(point Point)
Remove(point Point)
Point() *CountPoint
}
type timestampedCounter struct {
counter accumulatingCounter
timestamp time.Time
}
// Expiring counter.
type expiringCounter struct {
counters *queue
minutes Minutes
count accumulatingCounter
newCounter func(point Point) accumulatingCounter
}
func newExpiringCounter(expiration Minutes) *expiringCounter {
return &expiringCounter{
newQueue(int(expiration) + 1),
expiration,
&singleValueAccumulatingCounter{0.0, 0.0, 0},
newSingleValueAccumulatingCounter,
}
}
func newExpiringMultiCounter(expiration Minutes) *expiringCounter {
return &expiringCounter{
newQueue(int(expiration) + 1),
expiration,
&multiValueAccumulatingCounter{
&singleValueAccumulatingCounter{0.0, 0.0, 0},
make(map[string]int),
},
newMultiValueCounter,
}
}
func newExpiringAverageCounter(expiration Minutes) *expiringCounter {
return &expiringCounter{
newQueue(int(expiration) + 1),
expiration,
&averageAccumulatingCounter{
&singleValueAccumulatingCounter{0.0, 0.0, 0},
0.0,
},
newAverageAccumulatingCounter,
}
}
func (c *expiringCounter) expire() {
for !c.counters.IsEmpty() {
counter := c.counters.Peek().(*timestampedCounter)
counterAgeInMinutes := int(getNow().Sub(counter.timestamp).Minutes())
if counterAgeInMinutes > int(c.minutes) {
c.counters.Pop()
c.count.Minus(counter.counter)
} else {
break
}
}
}
func (c *expiringCounter) Add(point Point) {
c.expire()
c.count.Plus(c.newCounter(point))
lastCounter := c.counters.PeekBack()
if lastCounter != nil && lastCounter.(*timestampedCounter).timestamp.Minute() == getNow().Minute() {
lastCounter.(*timestampedCounter).counter.Add(point)
} else {
counter := ×tampedCounter{c.newCounter(point), getNow()}
c.counters.Push(counter)
}
}
func (c *expiringCounter) Remove(point Point) {
panic("Unsupported operation. Too complicated.")
}
func (c *expiringCounter) Point() *CountPoint {
c.expire()
return c.count.Point()
}
func (c *expiringCounter) Count() accumulatingCounter {
c.expire()
return c.count
}
func (c *expiringCounter) String() string {
return fmt.Sprintf("counters=%s minutes=%d", c.counters, c.minutes)
}
// Accumulating counter.
type accumulatingCounter interface {
Add(point Point)
Remove(point Point)
Point() *CountPoint
Plus(c accumulatingCounter)
Minus(c accumulatingCounter)
}
// Single value counter.
func newSingleValueAccumulatingCounter(point Point) accumulatingCounter {
return &singleValueAccumulatingCounter{point.Lat(), point.Lon(), 1}
}
type singleValueAccumulatingCounter struct {
latSum float64
lonSum float64
count int
}
func (c *singleValueAccumulatingCounter) Add(point Point) {
c.latSum += point.Lat()
c.lonSum += point.Lon()
c.count++
}
func (c *singleValueAccumulatingCounter) Remove(point Point) {
c.latSum -= point.Lat()
c.lonSum -= point.Lon()
c.count--
}
func (c *singleValueAccumulatingCounter) Point() *CountPoint {
if c.count > 0 {
return &CountPoint{&GeoPoint{"", c.latSum / float64(c.count), c.lonSum / float64(c.count)}, c.count}
}
return nil
}
func (c1 *singleValueAccumulatingCounter) Plus(value accumulatingCounter) {
c2 := value.(*singleValueAccumulatingCounter)
c1.latSum += c2.latSum
c1.lonSum += c2.lonSum
c1.count += c2.count
}
func (c1 *singleValueAccumulatingCounter) Minus(value accumulatingCounter) {
c2 := value.(*singleValueAccumulatingCounter)
c1.latSum -= c2.latSum
c1.lonSum -= c2.lonSum
c1.count -= c2.count
}
func (c *singleValueAccumulatingCounter) String() string {
return fmt.Sprintf("%f %f %d", c.latSum, c.lonSum, c.count)
}
// Multi value counter.
func newMultiValueCounter(point Point) accumulatingCounter {
values := make(map[string]int)
values[point.Id()] = 1
return &multiValueAccumulatingCounter{
newSingleValueAccumulatingCounter(point).(*singleValueAccumulatingCounter),
values,
}
}
type multiValueAccumulatingCounter struct {
point *singleValueAccumulatingCounter
values map[string]int
}
func (counter *multiValueAccumulatingCounter) Add(point Point) {
counter.point.Add(point)
counter.values[point.Id()] += 1
}
func (counter *multiValueAccumulatingCounter) Remove(point Point) {
counter.point.Remove(point)
counter.values[point.Id()] -= 1
}
func (counter *multiValueAccumulatingCounter) Point() *CountPoint {
center := counter.point.Point()
if center == nil {
return nil
}
return &CountPoint{&GeoPoint{"", center.Lat(), center.Lon()}, counter.values}
}
func (counter *multiValueAccumulatingCounter) Plus(value accumulatingCounter) {
c := value.(*multiValueAccumulatingCounter)
counter.point.Plus(c.point)
for key, value := range c.values {
counter.values[key] += value
}
}
func (counter *multiValueAccumulatingCounter) Minus(value accumulatingCounter) {
c := value.(*multiValueAccumulatingCounter)
counter.point.Minus(c.point)
for key, value := range c.values {
counter.values[key] -= value
}
}
// Average accumulating counter. Expect adding and removing CountPoints.
func newAverageAccumulatingCounter(point Point) accumulatingCounter {
return &averageAccumulatingCounter{
newSingleValueAccumulatingCounter(point).(*singleValueAccumulatingCounter),
point.(*CountPoint).Count.(float64),
}
}
type averageAccumulatingCounter struct {
point *singleValueAccumulatingCounter
sum float64
}
func (counter *averageAccumulatingCounter) Add(point Point) {
counter.point.Add(point)
counter.sum += point.(*CountPoint).Count.(float64)
}
func (counter *averageAccumulatingCounter) Remove(point Point) {
counter.point.Remove(point)
counter.sum -= point.(*CountPoint).Count.(float64)
}
func (counter *averageAccumulatingCounter) Point() *CountPoint {
center := counter.point.Point()
if center == nil {
return nil
}
return &CountPoint{&GeoPoint{"", center.Lat(), center.Lon()}, counter.sum / float64(center.Count.(int))}
}
func (counter *averageAccumulatingCounter) Plus(value accumulatingCounter) {
c := value.(*averageAccumulatingCounter)
counter.point.Plus(c.point)
counter.sum += c.sum
}
func (counter *averageAccumulatingCounter) Minus(value accumulatingCounter) {
c := value.(*averageAccumulatingCounter)
counter.point.Minus(c.point)
counter.sum -= c.sum
} | hotelReservation/vendor/github.com/hailocab/go-geoindex/counters.go | 0.702122 | 0.416678 | counters.go | starcoder |
package orbcore
import (
"fmt"
"math"
"time"
)
/*
BoundingBox defines a four dimensional bounding box in space and time.
*/
type BoundingBox struct {
MinX float64
MinY float64
MinZ float64
MinTime time.Time
MaxX float64
MaxY float64
MaxZ float64
MaxTime time.Time
}
func (bb *BoundingBox) String() string {
return fmt.Sprintf(
"(%v,%v,%v,%v)x(%v,%v,%v,%v)",
bb.MinX, bb.MinY, bb.MinZ, bb.MinTime,
bb.MaxX, bb.MaxY, bb.MaxZ, bb.MaxTime,
)
}
/*
Contains returns true if the provided position is inside the box.
*/
func (bb *BoundingBox) Contains(pos *Position) bool {
return bb.MinX <= pos.X && pos.X <= bb.MaxX &&
bb.MinY <= pos.Y && pos.Y <= bb.MaxY &&
bb.MinZ <= pos.Z && pos.Z <= bb.MaxZ &&
(bb.MinTime.Before(pos.Epoch) || bb.MinTime.Equal(pos.Epoch)) &&
(pos.Epoch.Before(bb.MaxTime) || pos.Epoch.Equal(bb.MaxTime))
}
/*
Overlaps returns true if the other bounding box overlaps this one.
*/
func (bb *BoundingBox) Overlaps(other *BoundingBox) bool {
// For each of the corners of this bounding box. are they inside the other bounding box?
for _, c := range bb.Corners() {
if other.Contains(&c) {
return true
}
}
// If not are any of the corners of the other box inside this box?
for _, c := range other.Corners() {
if bb.Contains(&c) {
return true
}
}
return false
}
/*
Center returns the point in the center of this bounding box.
*/
func (bb *BoundingBox) Center() Position {
return Position{
ID: "center",
Epoch: splitTime(bb.MinTime, bb.MaxTime),
X: splitFloat64(bb.MinX, bb.MaxX),
Y: splitFloat64(bb.MinY, bb.MaxY),
Z: splitFloat64(bb.MinZ, bb.MaxZ),
}
}
/*
Corners returns the 16 corners of our 4 dimensional bounding box.
*/
func (bb *BoundingBox) Corners() [16]Position {
var result [16]Position
for i := 0; i < 16; i++ {
result[i].X = pickSide(i&0x1, bb.MinX, bb.MaxX)
result[i].Y = pickSide(i&0x2, bb.MinY, bb.MaxY)
result[i].Z = pickSide(i&0x4, bb.MinZ, bb.MaxZ)
if i&0x8 == 0 {
result[i].Epoch = bb.MinTime
} else {
result[i].Epoch = bb.MaxTime
}
}
return result
}
func pickSide(side int, min, max float64) float64 {
if side == 0 {
return min
} else {
return max
}
}
type BoundingBoxSplitter struct {
Box *BoundingBox
Center Position
i int
}
func NewBoundingBoxSplitter(box *BoundingBox) BoundingBoxSplitter {
return BoundingBoxSplitter{
Box: box,
Center: box.Center(),
i: 0,
}
}
func (bs BoundingBoxSplitter) Next() *BoundingBox {
result := bs.At(bs.i)
bs.i = bs.i + 1
return result
}
func (bs BoundingBoxSplitter) HasNext() bool {
return bs.i < 16
}
func (bs BoundingBoxSplitter) At(i int) *BoundingBox {
var result BoundingBox
if i >= 16 {
return &result
}
result.MinX, result.MaxX = pickBoxEdge(i&0x1, bs.Box.MinX, bs.Center.X, bs.Box.MaxX)
result.MinY, result.MaxY = pickBoxEdge(i&0x2, bs.Box.MinY, bs.Center.Y, bs.Box.MaxY)
result.MinZ, result.MaxZ = pickBoxEdge(i&0x4, bs.Box.MinZ, bs.Center.Z, bs.Box.MaxZ)
if i&0x8 == 0 {
result.MinTime = bs.Box.MinTime
result.MaxTime = bs.Center.Epoch
} else {
result.MinTime = bs.Center.Epoch
result.MaxTime = bs.Box.MaxTime
}
return &result
}
func pickBoxEdge(side int, min, mid, max float64) (float64, float64) {
if side == 0 {
return min, mid
} else {
return mid, max
}
}
/*
PositionsToBoundingBox creates a bounding box around a set of positions.
*/
func PositionsToBoundingBox(positions []*Position) *BoundingBox {
minX, minY, minZ := math.MaxFloat64, math.MaxFloat64, math.MaxFloat64
maxX, maxY, maxZ := -math.MaxFloat64, -math.MaxFloat64, -math.MaxFloat64
minTime := time.Unix(1<<63-62135596801, 999999999)
maxTime := time.Unix(-62135596801, -999999999)
for _, p := range positions {
minX = math.Min(minX, p.X)
minY = math.Min(minY, p.Y)
minZ = math.Min(minZ, p.Z)
maxX = math.Max(maxX, p.X)
maxY = math.Max(maxY, p.Y)
maxZ = math.Max(maxZ, p.Z)
if p.Epoch.Before(minTime) {
minTime = p.Epoch
}
if p.Epoch.After(maxTime) {
maxTime = p.Epoch
}
}
return &BoundingBox{
MinX: minX, MaxX: maxX,
MinY: minY, MaxY: maxY,
MinZ: minZ, MaxZ: maxZ,
MinTime: minTime, MaxTime: maxTime,
}
}
/*
splitFloat64 finds the mid point between two floats
*/
func splitFloat64(min, max float64) float64 {
return ((max - min) / 2) + min
}
func splitTime(min, max time.Time) time.Time {
return min.Add(time.Duration(int64(max.Sub(min)) / 2))
} | orbcore/boundingbox.go | 0.790813 | 0.433682 | boundingbox.go | starcoder |
package webrtc
// RefBool returns a pointer to a newly created bool.
func RefBool(value bool) *bool {
return &value
}
// RefUint returns a pointer to a newly created uint.
func RefUint(value uint) *uint {
return &value
}
// RefUint8 returns a pointer to a newly created uint8.
func RefUint8(value uint8) *uint8 {
return &value
}
// RefUint16 returns a pointer to a newly created uint16.
func RefUint16(value uint16) *uint16 {
return &value
}
// RefUint32 returns a pointer to a newly created uint32.
func RefUint32(value uint32) *uint32 {
return &value
}
// RefUint64 returns a pointer to a newly created uint64.
func RefUint64(value uint64) *uint64 {
return &value
}
// RefInt returns a pointer to a newly created int.
func RefInt(value int) *int {
return &value
}
// RefInt8 returns a pointer to a newly created int8.
func RefInt8(value int8) *int8 {
return &value
}
// RefInt16 returns a pointer to a newly created int16.
func RefInt16(value int16) *int16 {
return &value
}
// RefInt32 returns a pointer to a newly created int32.
func RefInt32(value int32) *int32 {
return &value
}
// RefInt64 returns a pointer to a newly created int64.
func RefInt64(value int64) *int64 {
return &value
}
// RefFloat32 returns a pointer to a newly created float32.
func RefFloat32(value float32) *float32 {
return &value
}
// RefFloat64 returns a pointer to a newly created float64.
func RefFloat64(value float64) *float64 {
return &value
}
// RefComplex64 returns a pointer to a newly created complex64.
func RefComplex64(value complex64) *complex64 {
return &value
}
// RefComplex128 returns a pointer to a newly created complex128.
func RefComplex128(value complex128) *complex128 {
return &value
}
// RefByte returns a pointer to a newly created byte.
func RefByte(value byte) *byte {
return &value
}
// RefRune returns a pointer to a newly created rune.
func RefRune(value rune) *rune {
return &value
}
// RefString returns a pointer to a newly created string.
func RefString(value string) *string {
return &value
} | reftypereturn.go | 0.913058 | 0.562177 | reftypereturn.go | starcoder |
package router
import (
"fmt"
"strconv"
"strings"
"time"
)
// Params is similar to url.Values, but with a few more utility functions
type Params map[string][]string
// Map gets the params as a flat map[string]string, discarding any multiple values.
func (p Params) Map() map[string]string {
flat := make(map[string]string)
for k, v := range p {
flat[k] = v[0]
}
return flat
}
// Clean returns the params as a map[string]string, discarding any multiple values, with any params not in the accepted list removed
func (p Params) Clean(accepted []string) map[string]string {
flat := make(map[string]string)
for k, v := range p {
if containsString(accepted, k) {
flat[k] = v[0]
}
}
return flat
}
// Flatten deflates a set of params (of any type) to a comma separated list (only for simple params)
func (p Params) Flatten(k string) string {
flat := ""
for i, v := range p[k] {
if i > 0 {
flat = fmt.Sprintf("%s,%s", flat, v)
} else {
flat = v
}
}
// replace this key with an array containing only flat
p[k] = []string{flat}
return flat
}
// GetDate returns the first value associated with a given key as a time, using the given time format.
func (p Params) GetDate(key string, format string) (time.Time, error) {
v := p.Get(key)
return time.Parse(format, v)
}
// GetInt returns the first value associated with the given key as an integer. If there is no value or a parse error, it returns 0
// If the string contains non-numeric characters, they are first stripped
func (p Params) GetInt(key string) int64 {
var i int64
v := p.Get(key)
// We truncate the string at the first non-numeric character
v = v[0 : strings.LastIndexAny(v, "0123456789")+1]
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return 0
}
return i
}
// GetInts returns all values associated with the given key as an array of integers.
func (p Params) GetInts(key string) []int64 {
ints := []int64{}
for _, v := range p.GetAll(key) {
vi, err := strconv.ParseInt(v, 10, 64)
if err != nil {
vi = 0
}
ints = append(ints, vi)
}
return ints
}
// GetUniqueInts returns all unique non-zero int values
// associated with the given key as an array of integers
func (p Params) GetUniqueInts(key string) []int64 {
ints := []int64{}
for _, v := range p.GetAll(key) {
if string(v) == "" {
continue // ignore blank ints
}
vi, err := strconv.ParseInt(v, 10, 64)
if err != nil {
vi = 0
}
// Do not insert 0, or duplicate entries
if vi > 0 && !contains(ints, vi) {
ints = append(ints, vi)
}
}
return ints
}
// GetIntsString returns all values associated with the given key as a comma separated string
func (p Params) GetIntsString(key string) string {
ints := ""
for _, v := range p.GetAll(key) {
if "" == string(v) {
continue // ignore blank ints
}
if len(ints) > 0 {
ints += "," + string(v)
} else {
ints += string(v)
}
}
return ints
}
// GetFloat returns the first value associated with the given key as an integer. If there is no value or a parse error, it returns 0.0
func (p Params) GetFloat(key string) float64 {
var value float64
v := p.Get(key)
// Remove percent signs from float values
v = strings.Replace(v, "%", "", -1)
value, err := strconv.ParseFloat(v, 64)
if err != nil {
return 0.0
}
return value
}
// GetFloats returns all values associated with the given key as an array of floats.
func (p Params) GetFloats(key string) []float64 {
var values []float64
for _, v := range p.GetAll(key) {
// Remove percent signs from float values
v = strings.Replace(v, "%", "", -1)
value, err := strconv.ParseFloat(v, 64)
if err != nil {
value = 0.0
}
values = append(values, value)
}
return values
}
// GetAll returns all values associated with the given key - equivalent to params[key].
func (p Params) GetAll(key string) []string {
return p[key]
}
// Get gets the first value associated with the given key.
// If there are no values returns the empty string.
func (p Params) Get(key string) string {
if p == nil {
return ""
}
v := p[key]
if v == nil || len(v) == 0 {
return ""
}
return v[0]
}
// Blank returns true if the value corresponding to key is an empty string
func (p Params) Blank(key string) bool {
v := p.Get(key)
return v == ""
}
// Set sets the key to a string value replacing any existing values.
func (p Params) Set(key, value string) {
p[key] = []string{value}
}
// SetInt sets the key to a single int value as string replacing any existing values.
func (p Params) SetInt(key string, value int64) {
p[key] = []string{fmt.Sprintf("%d", value)}
}
// Add adds the value, if necessary appending to any existing values associated with key.
func (p Params) Add(key, value string) {
p[key] = append(p[key], value)
}
// Remove deletes the values associated with key.
func (p Params) Remove(key string) {
delete(p, key)
}
// Contains returns true if this array of ints contains the given int
func contains(list []int64, item int64) bool {
for _, b := range list {
if b == item {
return true
}
}
return false
}
// containsString returns true if the string is in this list
func containsString(allowed []string, p string) bool {
for _, v := range allowed {
if p == v {
return true
}
}
return false
} | params.go | 0.770119 | 0.489015 | params.go | starcoder |
package kvsql
import (
"github.com/meshplus/gosdk/kvsql/types"
)
// Row represents a row of data, can be used to access values.
type Row struct {
c *Chunk
idx int
}
// Chunk returns the Chunk which the row belongs to.
func (r Row) Chunk() *Chunk {
return r.c
}
// IsEmpty returns true if the Row is empty.
func (r Row) IsEmpty() bool {
return r == Row{}
}
// Idx returns the row index of Chunk.
func (r Row) Idx() int {
return r.idx
}
// Len returns the number of values in the row.
func (r Row) Len() int {
return r.c.NumCols()
}
/**************** these should not be used in normal, supported for decode recode set ****************/
// GetInt8 returns the int8 value with the colIdx.
func (r Row) GetInt8(colIdx int) int8 {
return r.c.columns[colIdx].getInt8(r.idx)
}
// GetUint8 returns the uint8 value with the colIdx.
func (r Row) GetUint8(colIdx int) uint8 {
return r.c.columns[colIdx].getUint8(r.idx)
}
// GetInt16 returns the int16 value with the colIdx.
func (r Row) GetInt16(colIdx int) int16 {
return r.c.columns[colIdx].getInt16(r.idx)
}
// GetUint16 returns the uint16 value with the colIdx.
func (r Row) GetUint16(colIdx int) uint16 {
return r.c.columns[colIdx].getUint16(r.idx)
}
// GetInt32 returns the int32 value with the colIdx.
func (r Row) GetInt32(colIdx int) int32 {
return r.c.columns[colIdx].getInt32(r.idx)
}
// GetUint32 returns the uint8 value with the colIdx.
func (r Row) GetUint32(colIdx int) uint32 {
return r.c.columns[colIdx].getUint32(r.idx)
}
// GetInt64 returns the int64 value with the colIdx.
func (r Row) GetInt64(colIdx int) int64 {
return r.c.columns[colIdx].GetInt64(r.idx)
}
// GetUint64 returns the uint64 value with the colIdx.
func (r Row) GetUint64(colIdx int) uint64 {
return r.c.columns[colIdx].GetUint64(r.idx)
}
// GetFloat32 returns the float32 value with the colIdx.
func (r Row) GetFloat32(colIdx int) float32 {
return r.c.columns[colIdx].GetFloat32(r.idx)
}
// GetFloat64 returns the float64 value with the colIdx.
func (r Row) GetFloat64(colIdx int) float64 {
return r.c.columns[colIdx].GetFloat64(r.idx)
}
// GetString returns the string value with the colIdx.
func (r Row) GetString(colIdx int) string {
return r.c.columns[colIdx].GetString(r.idx)
}
// GetBytes returns the bytes value with the colIdx.
func (r Row) GetBytes(colIdx int) []byte {
return r.c.columns[colIdx].GetBytes(r.idx)
}
// GetTime returns the Time value with the colIdx.
func (r Row) GetTime(colIdx int) types.Time {
return r.c.columns[colIdx].GetTime(r.idx)
}
// GetDuration returns the Duration value with the colIdx.
func (r Row) GetDuration(colIdx int, fillFsp int) types.Duration {
return r.c.columns[colIdx].GetDuration(r.idx, fillFsp)
}
func (r Row) getNameValue(colIdx int) (string, uint64) {
return r.c.columns[colIdx].getNameValue(r.idx)
}
// GetEnum returns the Enum value with the colIdx.
func (r Row) GetEnum(colIdx int) types.Enum {
return r.c.columns[colIdx].GetEnum(r.idx)
}
// IsNull returns if the datum in the chunk.Row is null.
func (r Row) IsNull(colIdx int) bool {
return r.c.columns[colIdx].IsNull(r.idx)
}
// GetDecimal returns the MyDecimal value with the colIdx.
func (r Row) GetDecimal(colIdx int) *types.MyDecimal {
return r.c.columns[colIdx].GetDecimal(r.idx)
}
// GetSet returns the Set value with the colIdx.
func (r Row) GetSet(colIdx int) types.Set {
return r.c.columns[colIdx].GetSet(r.idx)
} | kvsql/row.go | 0.867906 | 0.601213 | row.go | starcoder |
package bitfield
import (
"encoding/hex"
"errors"
)
func NumBytes(length uint32) int {
return int((uint64(length) + 7) / 8)
}
// Bitfield is described in BEP 3.
type Bitfield struct {
bytes []byte
length uint32
}
// New creates a new Bitfield of length bits.
func New(length uint32) *Bitfield {
return &Bitfield{
bytes: make([]byte, NumBytes(length)),
length: length,
}
}
// NewBytes returns a new Bitfield from bytes.
// Bytes in b are not copied. Unused bits in last byte are cleared.
// Panics if b is not big enough to hold "length" bits.
func NewBytes(b []byte, length uint32) (*Bitfield, error) {
div, mod := divMod32(length)
lastByteIncomplete := mod != 0
requiredBytes := div
if lastByteIncomplete {
requiredBytes++
}
if uint32(len(b)) != requiredBytes {
return nil, errors.New("invalid length")
}
if lastByteIncomplete {
b[len(b)-1] &= ^(0xff >> mod)
}
return &Bitfield{
bytes: b[:requiredBytes],
length: length,
}, nil
}
func (b *Bitfield) Copy() *Bitfield {
b2 := &Bitfield{
bytes: make([]byte, len(b.bytes)),
length: b.length,
}
copy(b2.bytes, b.bytes)
return b2
}
// Bytes returns bytes in b. If you modify the returned slice the bits in b are modified too.
func (b *Bitfield) Bytes() []byte { return b.bytes }
// Len returns the number of bits as given to New.
func (b *Bitfield) Len() uint32 { return b.length }
// Hex returns bytes as string. If not all the bits in last byte are used, they encode as not set.
func (b *Bitfield) Hex() string {
return hex.EncodeToString(b.bytes)
}
// Set bit i. 0 is the most significant bit. Panics if i >= b.Len().
func (b *Bitfield) Set(i uint32) {
b.checkIndex(i)
div, mod := divMod32(i)
b.bytes[div] |= 1 << (7 - mod)
}
// SetTo sets bit i to value. Panics if i >= b.Len().
func (b *Bitfield) SetTo(i uint32, value bool) {
b.checkIndex(i)
if value {
b.Set(i)
} else {
b.Clear(i)
}
}
// Clear bit i. 0 is the most significant bit. Panics if i >= b.Len().
func (b *Bitfield) Clear(i uint32) {
b.checkIndex(i)
div, mod := divMod32(i)
b.bytes[div] &= ^(1 << (7 - mod))
}
// FirstSet returns the index of the first bit that is set starting from start.
func (b *Bitfield) FirstSet(start uint32) (uint32, bool) {
for i := start; i < b.length; i++ {
if b.Test(i) {
return i, true
}
}
return 0, false
}
// FirstClear returns the index of the first bit that is not set starting from start.
func (b *Bitfield) FirstClear(start uint32) (uint32, bool) {
for i := start; i < b.length; i++ {
if !b.Test(i) {
return i, true
}
}
return 0, false
}
// ClearAll clears all bits.
func (b *Bitfield) ClearAll() {
for i := range b.bytes {
b.bytes[i] = 0
}
}
// Test bit i. 0 is the most significant bit. Panics if i >= b.Len().
func (b *Bitfield) Test(i uint32) bool {
b.checkIndex(i)
div, mod := divMod32(i)
return (b.bytes[div] & (1 << (7 - mod))) > 0
}
var countCache = [256]byte{
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
}
// Count returns the count of set bits.
func (b *Bitfield) Count() uint32 {
var total uint32
for _, v := range b.bytes {
total += uint32(countCache[v])
}
return total
}
// All returns true if all bits are set, false otherwise.
func (b *Bitfield) All() bool {
return b.Count() == b.length
}
func (b *Bitfield) checkIndex(i uint32) {
if i >= b.Len() {
panic("index out of bound")
}
}
func (b *Bitfield) And(b2 *Bitfield) {
if b.length != b2.length {
panic("length mismatch")
}
for i := range b.bytes {
b.bytes[i] &= b2.bytes[i]
}
}
func (b *Bitfield) Or(b2 *Bitfield) {
if b.length != b2.length {
panic("length mismatch")
}
for i := range b.bytes {
b.bytes[i] |= b2.bytes[i]
}
}
func divMod32(a uint32) (uint32, uint32) { return a / 8, a % 8 } | internal/bitfield/bitfield.go | 0.762247 | 0.453625 | bitfield.go | starcoder |
package gorocksdb
// #include "rocksdb/c.h"
import "C"
// A SliceTransform can be used as a prefix extractor.
type SliceTransform interface {
// Transform a src in domain to a dst in the range.
Transform(src []byte) []byte
// Determine whether this is a valid src upon the function applies.
InDomain(src []byte) bool
// Determine whether dst=Transform(src) for some src.
InRange(src []byte) bool
// Return the name of this transformation.
Name() string
}
// NewFixedPrefixTransform creates a new fixed prefix transform.
func NewFixedPrefixTransform(prefixLen int) SliceTransform {
return NewNativeSliceTransform(C.rocksdb_slicetransform_create_fixed_prefix(C.size_t(prefixLen)))
}
// NewNativeSliceTransform creates a SliceTransform object.
func NewNativeSliceTransform(c *C.rocksdb_slicetransform_t) SliceTransform {
return nativeSliceTransform{c}
}
type nativeSliceTransform struct {
c *C.rocksdb_slicetransform_t
}
func (st nativeSliceTransform) Transform(src []byte) []byte { return nil }
func (st nativeSliceTransform) InDomain(src []byte) bool { return false }
func (st nativeSliceTransform) InRange(src []byte) bool { return false }
func (st nativeSliceTransform) Name() string { return "" }
// Hold references to slice transforms.
var sliceTransforms []SliceTransform
func registerSliceTransform(st SliceTransform) int {
sliceTransforms = append(sliceTransforms, st)
return len(sliceTransforms) - 1
}
//export dragonboat_slicetransform_transform
func dragonboat_slicetransform_transform(idx int, cKey *C.char, cKeyLen C.size_t, cDstLen *C.size_t) *C.char {
key := charToByte(cKey, cKeyLen)
dst := sliceTransforms[idx].Transform(key)
*cDstLen = C.size_t(len(dst))
return cByteSlice(dst)
}
//export dragonboat_slicetransform_in_domain
func dragonboat_slicetransform_in_domain(idx int, cKey *C.char, cKeyLen C.size_t) C.uchar {
key := charToByte(cKey, cKeyLen)
inDomain := sliceTransforms[idx].InDomain(key)
return boolToChar(inDomain)
}
//export dragonboat_slicetransform_in_range
func dragonboat_slicetransform_in_range(idx int, cKey *C.char, cKeyLen C.size_t) C.uchar {
key := charToByte(cKey, cKeyLen)
inRange := sliceTransforms[idx].InRange(key)
return boolToChar(inRange)
}
//export dragonboat_slicetransform_name
func dragonboat_slicetransform_name(idx int) *C.char {
return stringToChar(sliceTransforms[idx].Name())
} | internal/logdb/kv/rocksdb/gorocksdb/slice_transform.go | 0.770724 | 0.410343 | slice_transform.go | starcoder |
package obi
import (
"encoding/binary"
"fmt"
"reflect"
)
// Encode uses obi encoding scheme to encode the given input into bytes.
func Encode(v interface{}) ([]byte, error) {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Uint8:
return EncodeUnsigned8(uint8(rv.Uint())), nil
case reflect.Uint16:
return EncodeUnsigned16(uint16(rv.Uint())), nil
case reflect.Uint32:
return EncodeUnsigned32(uint32(rv.Uint())), nil
case reflect.Uint64:
return EncodeUnsigned64(uint64(rv.Uint())), nil
case reflect.Int8:
return EncodeSigned8(int8(rv.Int())), nil
case reflect.Int16:
return EncodeSigned16(int16(rv.Int())), nil
case reflect.Int32:
return EncodeSigned32(int32(rv.Int())), nil
case reflect.Int64:
return EncodeSigned64(int64(rv.Int())), nil
case reflect.String:
return EncodeString(rv.String()), nil
case reflect.Slice:
if rv.Type().Elem().Kind() == reflect.Uint8 {
return EncodeBytes(rv.Bytes()), nil
}
res := EncodeUnsigned32(uint32(rv.Len()))
for idx := 0; idx < rv.Len(); idx++ {
each, err := Encode(rv.Index(idx).Interface())
if err != nil {
return nil, err
}
res = append(res, each...)
}
return res, nil
case reflect.Struct:
res := []byte{}
for idx := 0; idx < rv.NumField(); idx++ {
each, err := Encode(rv.Field(idx).Interface())
if err != nil {
return nil, err
}
res = append(res, each...)
}
return res, nil
default:
return nil, fmt.Errorf("obi: unsupported value type: %s", rv.Kind())
}
}
// MustEncode uses obi encoding scheme to encode the given input into bytes. Panics on error.
func MustEncode(v interface{}) []byte {
res, err := Encode(v)
if err != nil {
panic(err)
}
return res
}
// EncodeUnsigned8 takes an `uint8` variable and encodes it into a byte array
func EncodeUnsigned8(v uint8) []byte {
return []byte{v}
}
// EncodeUnsigned16 takes an `uint16` variable and encodes it into a byte array
func EncodeUnsigned16(v uint16) []byte {
bytes := make([]byte, 2)
binary.BigEndian.PutUint16(bytes, v)
return bytes
}
// EncodeUnsigned32 takes an `uint32` variable and encodes it into a byte array
func EncodeUnsigned32(v uint32) []byte {
bytes := make([]byte, 4)
binary.BigEndian.PutUint32(bytes, v)
return bytes
}
// EncodeUnsigned64 takes an `uint64` variable and encodes it into a byte array
func EncodeUnsigned64(v uint64) []byte {
bytes := make([]byte, 8)
binary.BigEndian.PutUint64(bytes, v)
return bytes
}
// EncodeSigned8 takes an `int8` variable and encodes it into a byte array
func EncodeSigned8(v int8) []byte {
return EncodeUnsigned8(uint8(v))
}
// EncodeSigned16 takes an `int16` variable and encodes it into a byte array
func EncodeSigned16(v int16) []byte {
return EncodeUnsigned16(uint16(v))
}
// EncodeSigned32 takes an `int32` variable and encodes it into a byte array
func EncodeSigned32(v int32) []byte {
return EncodeUnsigned32(uint32(v))
}
// EncodeSigned64 takes an `int64` variable and encodes it into a byte array
func EncodeSigned64(v int64) []byte {
return EncodeUnsigned64(uint64(v))
}
// EncodeBytes takes a `[]byte` variable and encodes it into a byte array
func EncodeBytes(v []byte) []byte {
return append(EncodeUnsigned32(uint32(len(v))), v...)
}
// EncodeString takes a `string` variable and encodes it into a byte array
func EncodeString(v string) []byte {
return append(EncodeUnsigned32(uint32(len(v))), []byte(v)...)
} | chain/pkg/obi/encode.go | 0.746971 | 0.407569 | encode.go | starcoder |
package bridge
import (
"fmt"
)
const (
NullType = iota
UnresolvedType
NamedType
AliasType
ReferenceType
TupleType
RecordType
FunctionType
ListType
MapType
)
type Type struct {
// One of the above type classes
TypeClass int
TypeData interface{}
}
func (t *Type) TypeClassString() string {
switch t.TypeClass {
case NullType:
return "NullType"
case UnresolvedType:
return "UnresolvedType"
case NamedType:
return "NamedType"
case AliasType:
return "AliasType"
case ReferenceType:
return "ReferenceType"
case TupleType:
return "TupleType"
case RecordType:
return "RecordType"
case FunctionType:
return "FunctionType"
case ListType:
return "ListType"
case MapType:
return "MapType"
}
return ""
}
/**
* Tells if the value will be passed by value or by reference.
*/
func (t *Type) IsValueType() bool {
if t.TypeClass == ReferenceType || t.TypeClass == ListType ||
t.TypeClass == MapType || t.TypeClass == FunctionType {
return false
} else if t.TypeClass == AliasType {
return t.AsAliasType().TargetType.IsValueType()
} else if t.TypeClass == NamedType || t.TypeClass == RecordType {
return true
}
return false
}
func (t *Type) IsNullType() bool { return t.TypeClass == NullType }
func (t *Type) IsUnresolvedType() bool { return t.TypeClass == UnresolvedType }
func (t *Type) IsNamedType() bool { return t.TypeClass == NamedType }
func (t *Type) IsAliasType() bool { return t.TypeClass == AliasType }
func (t *Type) IsReferenceType() bool { return t.TypeClass == ReferenceType }
func (t *Type) IsTupleType() bool { return t.TypeClass == TupleType }
func (t *Type) IsRecordType() bool { return t.TypeClass == RecordType }
func (t *Type) IsFunctionType() bool { return t.TypeClass == FunctionType }
func (t *Type) IsListType() bool { return t.TypeClass == ListType }
func (t *Type) IsMapType() bool { return t.TypeClass == MapType }
func (t *Type) AsNamedType() *NamedTypeData { return t.TypeData.(*NamedTypeData) }
func (t *Type) AsAliasType() *AliasTypeData { return t.TypeData.(*AliasTypeData) }
func (t *Type) AsReferenceType() *ReferenceTypeData { return t.TypeData.(*ReferenceTypeData) }
func (t *Type) AsTupleType() *TupleTypeData { return t.TypeData.(*TupleTypeData) }
func (t *Type) AsRecordType() *RecordTypeData { return t.TypeData.(*RecordTypeData) }
func (t *Type) AsFunctionType() *FunctionTypeData { return t.TypeData.(*FunctionTypeData) }
func (t *Type) AsListType() *ListTypeData { return t.TypeData.(*ListTypeData) }
func (t *Type) AsMapType() *MapTypeData { return t.TypeData.(*MapTypeData) }
func (t *Type) LeafType() *NamedTypeData {
switch typeData := t.TypeData.(type) {
case *NamedTypeData:
return typeData
case *AliasTypeData:
return typeData.TargetType.LeafType()
case *ReferenceTypeData:
return typeData.TargetType.LeafType()
case *RecordTypeData:
return &typeData.NamedTypeData
}
return nil
}
func (t *Type) String() string {
return fmt.Sprintf("{%d - %s}", t.TypeClass, t.TypeData)
}
func NewType(typeCls int, data interface{}) *Type {
return &Type{TypeClass: typeCls, TypeData: data}
}
type NamedTypeData struct {
Name string
Package string
}
type AliasTypeData struct {
// Type this is an alias/typedef for
Name string
TargetType *Type
}
type ReferenceTypeData struct {
// The target type this is a reference to
TargetType *Type
}
type MapTypeData struct {
// The target type this is an array of
KeyType *Type
ValueType *Type
}
type ListTypeData struct {
// The target type this is an array of
TargetType *Type
}
type TupleTypeData struct {
SubTypes []*Type
}
type RecordTypeData struct {
NamedTypeData
// Type of each member in the struct
Bases []*Type
Fields []*Field
}
func (td *RecordTypeData) NumFields() int {
return len(td.Fields)
}
func (td *RecordTypeData) NumBases() int {
return len(td.Bases)
}
type Field struct {
Name string
Type *Type
}
type FunctionTypeData struct {
// Types of the input parameters
InputTypes []*Type
// Types of the output parameters
OutputTypes []*Type
// Types of possible exceptions that can be thrown (not supported in all languages)
ExceptionTypes []*Type
}
func (td *FunctionTypeData) NumInputs() int {
return len(td.InputTypes)
}
func (td *FunctionTypeData) NumOutputs() int {
return len(td.OutputTypes)
}
func (td *FunctionTypeData) NumExceptions() int {
return len(td.ExceptionTypes)
} | types.go | 0.623262 | 0.65379 | types.go | starcoder |
package spdx
type SnippetRangePointer struct {
// 5.3: Snippet Byte Range: [start byte]:[end byte]
// Cardinality: mandatory, one
Offset int `json:"offset,omitempty"`
// 5.4: Snippet Line Range: [start line]:[end line]
// Cardinality: optional, one
LineNumber int `json:"lineNumber,omitempty"`
FileSPDXIdentifier ElementID `json:"reference"`
}
type SnippetRange struct {
StartPointer SnippetRangePointer `json:"startPointer"`
EndPointer SnippetRangePointer `json:"endPointer"`
}
// Snippet2_1 is a Snippet section of an SPDX Document for version 2.1 of the spec.
type Snippet2_1 struct {
// 5.1: Snippet SPDX Identifier: "SPDXRef-[idstring]"
// Cardinality: mandatory, one
SnippetSPDXIdentifier ElementID `json:"SPDXID"`
// 5.2: Snippet from File SPDX Identifier
// Cardinality: mandatory, one
SnippetFromFileSPDXIdentifier ElementID `json:"snippetFromFile"`
// Ranges denotes the start/end byte offsets or line numbers that the snippet is relevant to
Ranges []SnippetRange `json:"ranges"`
// 5.5: Snippet Concluded License: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
SnippetLicenseConcluded string `json:"licenseConcluded"`
// 5.6: License Information in Snippet: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: optional, one or many
LicenseInfoInSnippet []string `json:"licenseInfoInSnippets,omitempty"`
// 5.7: Snippet Comments on License
// Cardinality: optional, one
SnippetLicenseComments string `json:"licenseComments,omitempty"`
// 5.8: Snippet Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
SnippetCopyrightText string `json:"copyrightText"`
// 5.9: Snippet Comment
// Cardinality: optional, one
SnippetComment string `json:"comment,omitempty"`
// 5.10: Snippet Name
// Cardinality: optional, one
SnippetName string `json:"name,omitempty"`
}
// Snippet2_2 is a Snippet section of an SPDX Document for version 2.2 of the spec.
type Snippet2_2 struct {
// 5.1: Snippet SPDX Identifier: "SPDXRef-[idstring]"
// Cardinality: mandatory, one
SnippetSPDXIdentifier ElementID `json:"SPDXID"`
// 5.2: Snippet from File SPDX Identifier
// Cardinality: mandatory, one
SnippetFromFileSPDXIdentifier ElementID `json:"snippetFromFile"`
// Ranges denotes the start/end byte offsets or line numbers that the snippet is relevant to
Ranges []SnippetRange `json:"ranges"`
// 5.5: Snippet Concluded License: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
SnippetLicenseConcluded string `json:"licenseConcluded"`
// 5.6: License Information in Snippet: SPDX License Expression, "NONE" or "NOASSERTION"
// Cardinality: optional, one or many
LicenseInfoInSnippet []string `json:"licenseInfoInSnippets,omitempty"`
// 5.7: Snippet Comments on License
// Cardinality: optional, one
SnippetLicenseComments string `json:"licenseComments,omitempty"`
// 5.8: Snippet Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION"
// Cardinality: mandatory, one
SnippetCopyrightText string `json:"copyrightText"`
// 5.9: Snippet Comment
// Cardinality: optional, one
SnippetComment string `json:"comment,omitempty"`
// 5.10: Snippet Name
// Cardinality: optional, one
SnippetName string `json:"name,omitempty"`
// 5.11: Snippet Attribution Text
// Cardinality: optional, one or many
SnippetAttributionTexts []string `json:"-"`
} | spdx/snippet.go | 0.7478 | 0.434701 | snippet.go | starcoder |
package intrusive
import (
"math"
"unsafe"
)
// HashMap presents a hash map.
type HashMap struct {
maxLoadFactor float64
keyHasher HashMapKeyHasher
nodeMatcher HashMapNodeMatcher
slots []hashMapSlot
minSlotCountShift int
nodeCount int
}
// Init initializes the map and then returns the map.
func (hm *HashMap) Init(maxLoadFactor float64, keyHasher HashMapKeyHasher, nodeMatcher HashMapNodeMatcher) *HashMap {
if maxLoadFactor <= 0 {
maxLoadFactor = defaultMaxHashMapLoadFactor
}
hm.maxLoadFactor = maxLoadFactor
hm.keyHasher = keyHasher
hm.nodeMatcher = nodeMatcher
hm.slots = []hashMapSlot{emptyHashMapSlot}
return hm
}
// InsertNode inserts the given node with the given key
// to the map.
func (hm *HashMap) InsertNode(node *HashMapNode, key interface{}) {
keyHash := hm.keyHasher(key)
hm.getSlot(keyHash).AppendNode(node)
node.keyHash = keyHash
hm.nodeCount++
hm.maybeExpand()
}
// RemoveNode removes the given node from the map.
func (hm *HashMap) RemoveNode(node *HashMapNode) {
hm.getSlot(node.keyHash).RemoveNode(node)
hm.nodeCount--
hm.maybeShrink()
}
// FindNode finds a node with the given key in the map and
// then returns the node.
// If no node with an identical key exists, it returns false.
func (hm *HashMap) FindNode(key interface{}) (*HashMapNode, bool) {
keyHash := hm.keyHasher(key)
return hm.getSlot(keyHash).FindNode(keyHash, hm.nodeMatcher, key)
}
// Foreach returns an iterator over all nodes in the map.
func (hm *HashMap) Foreach() *HashMapIterator {
return new(HashMapIterator).Init(hm)
}
// IsEmpty indicates whether the map is empty.
func (hm *HashMap) IsEmpty() bool {
return hm.NumberOfNodes() == 0
}
// NumberOfNodes returns the number of nodes in the map.
func (hm *HashMap) NumberOfNodes() int {
return hm.nodeCount
}
func (hm *HashMap) getSlot(keyHash uint64) *hashMapSlot {
slotIndex := hm.locateSlot(keyHash)
return &hm.slots[slotIndex]
}
func (hm *HashMap) locateSlot(keyHash uint64) int {
slotIndex := int(keyHash & uint64(hm.maxSlotCountPlusOne()-1))
if slotIndex >= len(hm.slots) {
slotIndex = hm.calculateLowSlotIndex(slotIndex)
}
return slotIndex
}
func (hm *HashMap) calculateLowSlotIndex(highSlotIndex int) int {
return highSlotIndex &^ hm.minSlotCount()
}
func (hm *HashMap) maybeExpand() {
for hm.loadFactor() > hm.maxLoadFactor {
hm.addSlot()
}
}
func (hm *HashMap) maybeShrink() {
for len(hm.slots) >= 2 && hm.loadFactor() < hm.minLoadFactor() {
hm.removeSlot()
}
}
func (hm *HashMap) addSlot() {
highSlotIndex := len(hm.slots)
hm.slots = append(hm.slots, emptyHashMapSlot)
highSlot := &hm.slots[highSlotIndex]
lowSlotIndex := hm.calculateLowSlotIndex(highSlotIndex)
lowSlot := &hm.slots[lowSlotIndex]
lowSlot.Split(uint64(hm.minSlotCount()), highSlot)
if len(hm.slots) == hm.maxSlotCountPlusOne() {
hm.minSlotCountShift++
}
}
func (hm *HashMap) removeSlot() {
highSlotIndex := len(hm.slots) - 1
highSlot := &hm.slots[highSlotIndex]
hm.slots = hm.slots[:highSlotIndex]
if len(hm.slots) < hm.minSlotCount() {
hm.minSlotCountShift--
}
lowSlotIndex := hm.calculateLowSlotIndex(highSlotIndex)
lowSlot := &hm.slots[lowSlotIndex]
highSlot.Merge(lowSlot)
}
func (hm *HashMap) minLoadFactor() float64 {
return hm.maxLoadFactor / 2
}
func (hm *HashMap) loadFactor() float64 {
return float64(hm.nodeCount) / float64(len(hm.slots))
}
func (hm *HashMap) minSlotCount() int {
return 1 << hm.minSlotCountShift
}
func (hm *HashMap) maxSlotCountPlusOne() int {
return 1 << (hm.minSlotCountShift + 1)
}
// HashMapKeyHasher is the type of a function hashing the given key
// into a hash.
type HashMapKeyHasher func(key interface{}) uint64
// HashMapNodeMatcher is the type of a function indicating whether the
// given node is matched with the given key.
type HashMapNodeMatcher func(hmn *HashMapNode, key interface{}) bool
// HashMapNode represents a node in a hash map.
type HashMapNode struct {
prev *HashMapNode
keyHash uint64
}
// GetContainer returns a pointer to the container which contains
// the HashMapNode field about the node at the given offset.
func (hmn *HashMapNode) GetContainer(offset uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(unsafe.Pointer(hmn)) - offset)
}
// IsReset indicates whether the node is reset (with a zero value).
func (hmn *HashMapNode) IsReset() bool {
return hmn.prev == nil
}
// HashMapIterator represents an iterator over all nodes in
// a hash map.
type HashMapIterator struct {
hm *HashMap
slotIndex int
node, nextNode *HashMapNode
}
// Init initializes the iterator and then returns the iterator.
func (hmi *HashMapIterator) Init(hm *HashMap) *HashMapIterator {
hmi.hm = hm
hmi.scanSlots(0)
return hmi
}
// IsAtEnd indicates whether the iteration has no more nodes.
func (hmi *HashMapIterator) IsAtEnd() bool {
return hmi.node == nil
}
// Node returns the current node in the iteration.
// It's safe to erase the current node for the next node
// to advance to is pre-cached. That will be useful to
// destroy the entire map while iterating through the map.
func (hmi *HashMapIterator) Node() *HashMapNode {
return hmi.node
}
// Advance advances the iterator to the next node.
func (hmi *HashMapIterator) Advance() {
if node := hmi.nextNode; node != &hashMapNil {
hmi.node = node
hmi.nextNode = node.prev
return
}
hmi.scanSlots(hmi.slotIndex + 1)
}
func (hmi *HashMapIterator) scanSlots(startSlotIndex int) {
n := len(hmi.hm.slots)
for i := startSlotIndex; i < n; i++ {
slot := &hmi.hm.slots[i]
if node := slot.lastNode; node != &hashMapNil {
hmi.slotIndex = i
hmi.node = node
hmi.nextNode = node.prev
return
}
}
hmi.slotIndex = n
hmi.node = nil
}
const defaultMaxHashMapLoadFactor = 1 - 1/math.E
type hashMapSlot struct {
lastNode *HashMapNode
}
func (hms *hashMapSlot) AppendNode(node *HashMapNode) {
node.prev = hms.lastNode
hms.lastNode = node
}
func (hms *hashMapSlot) RemoveNode(node *HashMapNode) {
for node2 := &hms.lastNode; ; node2 = &(*node2).prev {
if *node2 == node {
*node2 = node.prev
return
}
}
}
func (hms *hashMapSlot) FindNode(keyHash uint64, nodeMatcher HashMapNodeMatcher, key interface{}) (*HashMapNode, bool) {
for node := hms.lastNode; node != &hashMapNil; node = node.prev {
if node.keyHash == keyHash && nodeMatcher(node, key) {
return node, true
}
}
return nil, false
}
func (hms *hashMapSlot) Split(distinctKeyHashBit uint64, high *hashMapSlot) {
node := &hms.lastNode
for *node != &hashMapNil {
if (*node).keyHash&distinctKeyHashBit == 0 {
node = &(*node).prev
continue
}
node2 := *node
*node = (*node).prev
high.AppendNode(node2)
}
}
func (hms *hashMapSlot) Merge(low *hashMapSlot) {
node := &low.lastNode
for *node != &hashMapNil {
node = &(*node).prev
}
*node = hms.lastNode
hms.lastNode = nil // &hashMapNil
}
var (
hashMapNil HashMapNode
emptyHashMapSlot = hashMapSlot{&hashMapNil}
) | hashmap.go | 0.870721 | 0.509947 | hashmap.go | starcoder |
package dst
// Beta distribution reparametrized using mean and standard deviation.
// BetaμσPDF returns the PDF of the Beta distribution reparametrized using mean and standard deviation.
func BetaμσPDF(μ, σ float64) func(x float64) float64 {
α := μ * (μ*(1-μ)/(σ*σ) - 1)
β := (1 - μ) * (μ*(1-μ)/(σ*σ) - 1)
return BetaPDF(α, β)
}
// BetaμσLnPDF returns the natural logarithm of the PDF of the Beta distribution reparametrized using mean and standard deviation.
func BetaμσLnPDF(μ, σ float64) func(x float64) float64 {
α := μ * (μ*(1-μ)/(σ*σ) - 1)
β := (1 - μ) * (μ*(1-μ)/(σ*σ) - 1)
return BetaLnPDF(α, β)
}
// BetaμσNext returns random number drawn from the Beta distribution reparametrized using mean and standard deviation.
func BetaμσNext(μ, σ float64) float64 {
α := μ * (μ*(1-μ)/(σ*σ) - 1)
β := (1 - μ) * (μ*(1-μ)/(σ*σ) - 1)
return BetaNext(α, β)
}
// Betaμσ returns the random number generator with Beta distribution reparametrized using mean and standard deviation.
func Betaμσ(μ, σ float64) func() float64 {
α := μ * (μ*(1-μ)/(σ*σ) - 1)
β := (1 - μ) * (μ*(1-μ)/(σ*σ) - 1)
return func() float64 { return BetaNext(α, β) }
}
// BetaμσPDFAt returns the value of PDF of Beta distribution at x.
func BetaμσPDFAt(μ, σ, x float64) float64 {
pdf := BetaμσPDF(μ, σ)
return pdf(x)
}
// BetaμσCDF returns the CDF of the Beta distribution reparametrized using mean and standard deviation.
func BetaμσCDF(μ, σ float64) func(x float64) float64 {
α := μ * (μ*(1-μ)/(σ*σ) - 1)
β := (1 - μ) * (μ*(1-μ)/(σ*σ) - 1)
return BetaCDF(α, β)
}
// BetaμσCDFAt returns the value of CDF of the Beta distribution reparametrized using mean and standard deviation, at x.
func BetaμσCDFAt(μ, σ, x float64) float64 {
cdf := BetaCDF(μ, σ)
return cdf(x)
}
// BetaμσQtl returns the inverse of the CDF (quantile) of the Beta distribution reparametrized using mean and standard deviation.
func BetaμσQtl(μ, σ float64) func(p float64) float64 {
// p: probability for which the quantile is evaluated
α := μ * (μ*(1-μ)/(σ*σ) - 1)
β := (1 - μ) * (μ*(1-μ)/(σ*σ) - 1)
return BetaQtl(α, β)
}
// BetaμσQtlFor returns the inverse of the CDF (quantile) of the Beta distribution reparametrized using mean and standard deviation, for a given probability.
func BetaμσQtlFor(μ, σ, p float64) float64 {
cdf := BetaμσQtl(μ, σ)
return cdf(p)
} | dst/beta-mu-sigma.go | 0.932083 | 0.656713 | beta-mu-sigma.go | starcoder |
package draw
func doellipse(cmd byte, dst *Image, c Point, xr, yr, thick int, src *Image, sp Point, alpha uint32, phi int, op Op) {
setdrawop(dst.Display, op)
a := dst.Display.bufimage(1 + 4 + 4 + 2*4 + 4 + 4 + 4 + 2*4 + 2*4)
a[0] = cmd
bplong(a[1:], dst.id)
bplong(a[5:], src.id)
bplong(a[9:], uint32(c.X))
bplong(a[13:], uint32(c.Y))
bplong(a[17:], uint32(xr))
bplong(a[21:], uint32(yr))
bplong(a[25:], uint32(thick))
bplong(a[29:], uint32(sp.X))
bplong(a[33:], uint32(sp.Y))
bplong(a[37:], alpha)
bplong(a[41:], uint32(phi))
}
// Ellipse draws in dst an ellipse centered on c with horizontal and vertical
// semiaxes a and b. The source is aligned so sp in src corresponds to c in dst.
// The ellipse is drawn with thickness 1+2*thick.
func (dst *Image) Ellipse(c Point, a, b, thick int, src *Image, sp Point) {
dst.Display.mu.Lock()
defer dst.Display.mu.Unlock()
doellipse('e', dst, c, a, b, thick, src, sp, 0, 0, SoverD)
}
// EllipseOp is like Ellipse but specifies an explicit Porter-Duff operator.
func (dst *Image) EllipseOp(c Point, a, b, thick int, src *Image, sp Point, op Op) {
dst.Display.mu.Lock()
defer dst.Display.mu.Unlock()
doellipse('e', dst, c, a, b, thick, src, sp, 0, 0, op)
}
// FillEllipse is like Ellipse but fills the ellipse rather than outlining it.
func (dst *Image) FillEllipse(c Point, a, b int, src *Image, sp Point) {
dst.Display.mu.Lock()
defer dst.Display.mu.Unlock()
doellipse('E', dst, c, a, b, 0, src, sp, 0, 0, SoverD)
}
// FillEllipseOp is like FillEllipse but specifies an explicit Porter-Duff operator.
func (dst *Image) FillEllipseOp(c Point, a, b int, src *Image, sp Point, op Op) {
dst.Display.mu.Lock()
defer dst.Display.mu.Unlock()
doellipse('E', dst, c, a, b, 0, src, sp, 0, 0, op)
}
// Arc is like Ellipse but draws only that portion of the ellipse starting at angle alpha
// and extending through an angle of phi. The angles are measured in degrees
// counterclockwise from the positive x axis.
func (dst *Image) Arc(c Point, a, b, thick int, src *Image, sp Point, alpha, phi int) {
dst.Display.mu.Lock()
defer dst.Display.mu.Unlock()
doellipse('e', dst, c, a, b, thick, src, sp, uint32(alpha)|1<<31, phi, SoverD)
}
// ArcOp is like Arc but specifies an explicit Porter-Duff operator.
func (dst *Image) ArcOp(c Point, a, b, thick int, src *Image, sp Point, alpha, phi int, op Op) {
dst.Display.mu.Lock()
defer dst.Display.mu.Unlock()
doellipse('e', dst, c, a, b, thick, src, sp, uint32(alpha)|1<<31, phi, op)
}
// FillArc is like Arc but fills the sector with the source color.
func (dst *Image) FillArc(c Point, a, b int, src *Image, sp Point, alpha, phi int) {
dst.Display.mu.Lock()
defer dst.Display.mu.Unlock()
doellipse('E', dst, c, a, b, 0, src, sp, uint32(alpha)|1<<31, phi, SoverD)
}
// FillArcOp is like FillArc but specifies an explicit Porter-Duff operator.
func (dst *Image) FillArcOp(c Point, a, b int, src *Image, sp Point, alpha, phi int, op Op) {
dst.Display.mu.Lock()
defer dst.Display.mu.Unlock()
doellipse('E', dst, c, a, b, 0, src, sp, uint32(alpha)|1<<31, phi, op)
} | draw/ellipse.go | 0.64232 | 0.495239 | ellipse.go | starcoder |
package builtins
import "github.com/xav/go-script/types"
var (
// AppendType is the definition of the append built-in function (https://golang.org/ref/spec#Appending_and_copying_slices)
AppendType = &types.FuncType{Builtin: "append"}
// CapType is the definition of the cap(s) built-in function (https://golang.org/ref/spec#Length_and_capacity)
CapType = &types.FuncType{Builtin: "cap"}
// CloseType is the definition of the close(c) built-in function (https://golang.org/ref/spec#Close)
CloseType = &types.FuncType{Builtin: "close"}
// ComplexType is the definition of the complex built-in function (https://golang.org/ref/spec#Complex_numbers)
ComplexType = &types.FuncType{Builtin: "complex"}
// CopyType is the definition of the copy built-in function (https://golang.org/ref/spec#Appending_and_copying_slices)
CopyType = &types.FuncType{Builtin: "copy"}
// DeleteType is the definition of the delete built-in function (https://golang.org/ref/spec#Deletion_of_map_elements)
DeleteType = &types.FuncType{Builtin: "delete"}
// ImagType is the definition of the imag built-in function (https://golang.org/ref/spec#Complex_numbers)
ImagType = &types.FuncType{Builtin: "imag"}
// LenType is the definition of the len(s) built-in function (https://golang.org/ref/spec#Length_and_capacity)
LenType = &types.FuncType{Builtin: "len"}
// MakeType is the definition of the make built-in function (https://golang.org/ref/spec#Making_slices_maps_and_channels)
MakeType = &types.FuncType{Builtin: "make"}
// NewType is the definition of the new built-in function (https://golang.org/ref/spec#Allocation)
NewType = &types.FuncType{Builtin: "new"}
// PanicType is the definition of the panic built-in function (https://golang.org/ref/spec#Handling_panics)
PanicType = &types.FuncType{Builtin: "panic"}
// PrintType is the definition of the print built-in function (https://golang.org/ref/spec#Bootstrapping)
PrintType = &types.FuncType{Builtin: "print"}
// PrintlnType is the definition of the println built-in function (https://golang.org/ref/spec#Bootstrapping)
PrintlnType = &types.FuncType{Builtin: "println"}
// RealType is the definition of the real built-in function (https://golang.org/ref/spec#Complex_numbers)
RealType = &types.FuncType{Builtin: "real"}
// RecoverType is the definition of the recover built-in function (https://golang.org/ref/spec#Handling_panics)
RecoverType = &types.FuncType{Builtin: "recover"}
) | builtins/functions.go | 0.54698 | 0.506469 | functions.go | starcoder |
package plotter
import (
"image"
"math"
"github.com/hneemann/nplot"
"github.com/hneemann/nplot/vg"
"github.com/hneemann/nplot/vg/draw"
)
// Image is a plotter that draws a scaled, raster image.
type Image struct {
img image.Image
cols int
rows int
xmin, xmax, dx float64
ymin, ymax, dy float64
}
// NewImage creates a new image plotter.
// Image will nplot img inside the rectangle defined by the
// (xmin, ymin) and (xmax, ymax) points given in the data space.
// The img will be scaled to fit inside the rectangle.
func NewImage(img image.Image, xmin, ymin, xmax, ymax float64) *Image {
bounds := img.Bounds()
cols := bounds.Dx()
rows := bounds.Dy()
dx := math.Abs(xmax-xmin) / float64(cols)
dy := math.Abs(ymax-ymin) / float64(rows)
return &Image{
img: img,
cols: cols,
rows: rows,
xmin: xmin,
xmax: xmax,
dx: dx,
ymin: ymin,
ymax: ymax,
dy: dy,
}
}
// Plot implements the Plot method of the nplot.Plotter interface.
func (img *Image) Plot(c draw.Canvas, p *nplot.Plot) {
trX, trY := p.Transforms(&c)
xmin := trX(img.xmin)
ymin := trY(img.ymin)
xmax := trX(img.xmax)
ymax := trY(img.ymax)
rect := vg.Rectangle{
Min: vg.Point{X: xmin, Y: ymin},
Max: vg.Point{X: xmax, Y: ymax},
}
c.DrawImage(rect, img.transformFor(p))
}
// DataRange implements the DataRange method
// of the nplot.DataRanger interface.
func (img *Image) DataRange() (xmin, xmax, ymin, ymax float64) {
return img.xmin, img.xmax, img.ymin, img.ymax
}
// GlyphBoxes implements the GlyphBoxes method
// of the nplot.GlyphBoxer interface.
func (img *Image) GlyphBoxes(plt *nplot.Plot) []nplot.GlyphBox {
return nil
}
// transform warps the image to align with non-linear axes.
func (img *Image) transformFor(p *nplot.Plot) image.Image {
_, xLinear := p.X.Scale.(nplot.LinearScale)
_, yLinear := p.Y.Scale.(nplot.LinearScale)
if xLinear && yLinear {
return img.img
}
b := img.img.Bounds()
o := image.NewNRGBA64(b)
for c := 0; c < img.cols; c++ {
// Find the equivalent image column after applying axis transforms.
cTrans := int(p.X.Norm(img.x(c)) * float64(img.cols))
// Find the equivalent column of the previous image column after applying
// axis transforms.
cPrevTrans := int(p.X.Norm(img.x(maxInt(c-1, 0))) * float64(img.cols))
for r := 0; r < img.rows; r++ {
// Find the equivalent image row after applying axis transforms.
rTrans := int(p.Y.Norm(img.y(r)) * float64(img.rows))
// Find the equivalent row of the previous image row after applying
// axis transforms.
rPrevTrans := int(p.Y.Norm(img.y(maxInt(r-1, 0))) * float64(img.rows))
crColor := img.img.At(c, img.rows-r-1)
// Set all the pixels in the new image between (cPrevTrans, rPrevTrans)
// and (cTrans, rTrans) to the color at (c,r) in the original image.
// TODO: Improve interpolation.
for cPrime := cPrevTrans; cPrime <= cTrans; cPrime++ {
for rPrime := rPrevTrans; rPrime <= rTrans; rPrime++ {
o.Set(cPrime, img.rows-rPrime-1, crColor)
}
}
}
}
return o
}
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}
func (img *Image) x(c int) float64 {
if c >= img.cols || c < 0 {
panic("plotter/image: illegal range")
}
return img.xmin + float64(c)*img.dx
}
func (img *Image) y(r int) float64 {
if r >= img.rows || r < 0 {
panic("plotter/image: illegal range")
}
return img.ymin + float64(r)*img.dy
} | plotter/image.go | 0.829112 | 0.520862 | image.go | starcoder |
package main
import (
"fmt"
"github.com/theatlasroom/advent-of-code/go/utils"
)
/**
--- Day 1: Sonar Sweep ---
You're minding your own business on a ship at sea when the overboard alarm goes off! You rush to see if you can help. Apparently, one of the Elves tripped and accidentally sent the sleigh keys flying into the ocean!
Before you know it, you're inside a submarine the Elves keep ready for situations like this. It's covered in Christmas lights (because of course it is), and it even has an experimental antenna that should be able to track the keys if you can boost its signal strength high enough; there's a little meter that indicates the antenna's signal strength by displaying 0-50 stars.
Your instincts tell you that in order to save Christmas, you'll need to get all fifty stars by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
As the submarine drops below the surface of the ocean, it automatically performs a sonar sweep of the nearby sea floor. On a small screen, the sonar sweep report (your puzzle input) appears: each line is a measurement of the sea floor depth as the sweep looks further and further away from the submarine.
For example, suppose you had the following report:
199
200
208
210
200
207
240
269
260
263
This report indicates that, scanning outward from the submarine, the sonar sweep found depths of 199, 200, 208, 210, and so on.
The first order of business is to figure out how quickly the depth increases, just so you know what you're dealing with - you never know if the keys will get carried into deeper water by an ocean current or a fish or something.
To do this, count the number of times a depth measurement increases from the previous measurement. (There is no measurement before the first measurement.) In the example above, the changes are as follows:
199 (N/A - no previous measurement)
200 (increased)
208 (increased)
210 (increased)
200 (decreased)
207 (increased)
240 (increased)
269 (increased)
260 (decreased)
263 (increased)
In this example, there are 7 measurements that are larger than the previous measurement.
How many measurements are larger than the previous measurement?
--- Part Two ---
Considering every single measurement isn't as useful as you expected: there's just too much noise in the data.
Instead, consider sums of a three-measurement sliding window. Again considering the above example:
199 A
200 A B
208 A B C
210 B C D
200 E C D
207 E F D
240 E F G
269 F G H
260 G H
263 H
Start by comparing the first and second three-measurement windows. The measurements in the first window are marked A (199, 200, 208); their sum is 199 + 200 + 208 = 607. The second window is marked B (200, 208, 210); its sum is 618. The sum of measurements in the second window is larger than the sum of the first, so this first comparison increased.
Your goal now is to count the number of times the sum of measurements in this sliding window increases from the previous sum. So, compare A with B, then compare B with C, then C with D, and so on. Stop when there aren't enough measurements left to create a new three-measurement sum.
In the above example, the sum of each three-measurement window is as follows:
A: 607 (N/A - no previous sum)
B: 618 (increased)
C: 618 (no change)
D: 617 (decreased)
E: 647 (increased)
F: 716 (increased)
G: 769 (increased)
H: 792 (increased)
In this example, there are 5 sums that are larger than the previous sum.
Consider sums of a three-measurement sliding window. How many sums are larger than the previous sum?
**/
type tuple [3]int
func (w tuple) Add(i int) tuple {
w = [3]int{w[1], w[2], i}
return w
}
func (w tuple) Sum() int {
return w[0] + w[1] + w[2]
}
func (w tuple) Full() bool {
if w[0] > 0 && w[1] > 0 && w[2] > 0 {
return true
}
return false
}
func part2(data []int) {
var a tuple
var b tuple
maxItems := len(data)
increases := 0
curr := 0
for idx, next := range data {
if idx != 0 && idx < maxItems {
b = b.Add(curr)
}
if idx < maxItems {
a = a.Add(next)
}
if a.Full() && b.Full() {
if a.Sum() > b.Sum() {
increases += 1
}
}
curr = next
}
fmt.Printf("Part 2: Increased %d times\n", increases)
}
func part1(data []int) {
cfg := utils.BannerConfig{Year: 2021, Day: 1}
utils.Banner(cfg)
increases := 0
curr := 0
for _, next := range data {
if next > curr && curr != 0 {
increases += 1
}
curr = next
}
fmt.Printf("Part 1: Increased %d times\n", increases)
}
func main() {
// Read all the numbers
input := utils.LoadDataAsString("1.txt")
data := utils.StrToIntArr(input)
part1(data)
part2(data)
} | go/2021/1.go | 0.657318 | 0.570062 | 1.go | starcoder |
package metric
// BatchObserver represents an Observer callback that can report
// observations for multiple instruments.
type BatchObserver struct {
meter Meter
runner AsyncBatchRunner
}
// Int64ValueObserver is a metric that captures a set of int64 values at a
// point in time.
type Int64ValueObserver struct {
asyncInstrument
}
// Float64ValueObserver is a metric that captures a set of float64 values
// at a point in time.
type Float64ValueObserver struct {
asyncInstrument
}
// Int64SumObserver is a metric that captures a precomputed sum of
// int64 values at a point in time.
type Int64SumObserver struct {
asyncInstrument
}
// Float64SumObserver is a metric that captures a precomputed sum of
// float64 values at a point in time.
type Float64SumObserver struct {
asyncInstrument
}
// Int64UpDownSumObserver is a metric that captures a precomputed sum of
// int64 values at a point in time.
type Int64UpDownSumObserver struct {
asyncInstrument
}
// Float64UpDownSumObserver is a metric that captures a precomputed sum of
// float64 values at a point in time.
type Float64UpDownSumObserver struct {
asyncInstrument
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64ValueObserver) Observation(v int64) Observation {
return Observation{
number: NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64ValueObserver) Observation(v float64) Observation {
return Observation{
number: NewFloat64Number(v),
instrument: f.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64SumObserver) Observation(v int64) Observation {
return Observation{
number: NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64SumObserver) Observation(v float64) Observation {
return Observation{
number: NewFloat64Number(v),
instrument: f.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64UpDownSumObserver) Observation(v int64) Observation {
return Observation{
number: NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64UpDownSumObserver) Observation(v float64) Observation {
return Observation{
number: NewFloat64Number(v),
instrument: f.instrument,
}
} | vendor/go.opentelemetry.io/otel/api/metric/observer.go | 0.903286 | 0.533701 | observer.go | starcoder |
package geom
import (
"fmt"
"github.com/peterstace/simplefeatures/rtree"
)
// Intersects return true if and only the two geometries intersect with each
// other, i.e. the point sets that the geometries represent have at least one
// common point.
func Intersects(g1, g2 Geometry) bool {
if rank(g1) > rank(g2) {
g1, g2 = g2, g1
}
if g2.IsGeometryCollection() {
gc := g2.AsGeometryCollection()
n := gc.NumGeometries()
for i := 0; i < n; i++ {
g := gc.GeometryN(i)
if Intersects(g1, g) {
return true
}
}
return false
}
switch {
case g1.IsPoint():
switch {
case g2.IsPoint():
return hasIntersectionPointWithPoint(g1.AsPoint(), g2.AsPoint())
case g2.IsLineString():
return hasIntersectionPointWithLineString(g1.AsPoint(), g2.AsLineString())
case g2.IsPolygon():
return hasIntersectionPointWithPolygon(g1.AsPoint(), g2.AsPolygon())
case g2.IsMultiPoint():
return hasIntersectionPointWithMultiPoint(g1.AsPoint(), g2.AsMultiPoint())
case g2.IsMultiLineString():
return hasIntersectionPointWithMultiLineString(g1.AsPoint(), g2.AsMultiLineString())
case g2.IsMultiPolygon():
return hasIntersectionPointWithMultiPolygon(g1.AsPoint(), g2.AsMultiPolygon())
}
case g1.IsLineString():
switch {
case g2.IsLineString():
has, _ := hasIntersectionLineStringWithLineString(
g1.AsLineString(),
g2.AsLineString(),
false,
)
return has
case g2.IsPolygon():
return hasIntersectionMultiLineStringWithMultiPolygon(
g1.AsLineString().AsMultiLineString(),
g2.AsPolygon().AsMultiPolygon(),
)
case g2.IsMultiPoint():
return hasIntersectionMultiPointWithMultiLineString(
g2.AsMultiPoint(),
g1.AsLineString().AsMultiLineString(),
)
case g2.IsMultiLineString():
has, _ := hasIntersectionMultiLineStringWithMultiLineString(
g1.AsLineString().AsMultiLineString(),
g2.AsMultiLineString(),
false,
)
return has
case g2.IsMultiPolygon():
return hasIntersectionMultiLineStringWithMultiPolygon(
g1.AsLineString().AsMultiLineString(),
g2.AsMultiPolygon(),
)
}
case g1.IsPolygon():
switch {
case g2.IsPolygon():
return hasIntersectionPolygonWithPolygon(
g1.AsPolygon(),
g2.AsPolygon(),
)
case g2.IsMultiPoint():
return hasIntersectionMultiPointWithPolygon(
g2.AsMultiPoint(),
g1.AsPolygon(),
)
case g2.IsMultiLineString():
return hasIntersectionMultiLineStringWithMultiPolygon(
g2.AsMultiLineString(),
g1.AsPolygon().AsMultiPolygon(),
)
case g2.IsMultiPolygon():
return hasIntersectionMultiPolygonWithMultiPolygon(
g1.AsPolygon().AsMultiPolygon(),
g2.AsMultiPolygon(),
)
}
case g1.IsMultiPoint():
switch {
case g2.IsMultiPoint():
return hasIntersectionMultiPointWithMultiPoint(
g1.AsMultiPoint(),
g2.AsMultiPoint(),
)
case g2.IsMultiLineString():
return hasIntersectionMultiPointWithMultiLineString(
g1.AsMultiPoint(),
g2.AsMultiLineString(),
)
case g2.IsMultiPolygon():
return hasIntersectionMultiPointWithMultiPolygon(
g1.AsMultiPoint(),
g2.AsMultiPolygon(),
)
}
case g1.IsMultiLineString():
switch {
case g2.IsMultiLineString():
has, _ := hasIntersectionMultiLineStringWithMultiLineString(
g1.AsMultiLineString(),
g2.AsMultiLineString(),
false,
)
return has
case g2.IsMultiPolygon():
return hasIntersectionMultiLineStringWithMultiPolygon(
g1.AsMultiLineString(),
g2.AsMultiPolygon(),
)
}
case g1.IsMultiPolygon():
switch {
case g2.IsMultiPolygon():
return hasIntersectionMultiPolygonWithMultiPolygon(
g1.AsMultiPolygon(),
g2.AsMultiPolygon(),
)
}
}
panic(fmt.Sprintf("implementation error: unhandled geometry types %T and %T", g1, g2))
}
func hasIntersectionMultiPointWithMultiLineString(mp MultiPoint, mls MultiLineString) bool {
for i := 0; i < mp.NumPoints(); i++ {
pt := mp.PointN(i)
ptXY, ok := pt.XY()
if !ok {
continue
}
for j := 0; j < mls.NumLineStrings(); j++ {
seq := mls.LineStringN(j).Coordinates()
for k := 0; k < seq.Length(); k++ {
ln, ok := getLine(seq, k)
if ok && ln.intersectsXY(ptXY) {
return true
}
}
}
}
return false
}
type mlsWithMLSIntersectsExtension struct {
// set to true iff the intersection covers multiple points (e.g. multiple 0
// dimension points, or at least one line segment).
multiplePoints bool
// If an intersection occurs, singlePoint is set to one of the intersection
// points.
singlePoint XY
}
func hasIntersectionLineStringWithLineString(
ls1, ls2 LineString, populateExtension bool,
) (
bool, mlsWithMLSIntersectsExtension,
) {
lines1 := ls1.asLines()
lines2 := ls2.asLines()
return hasIntersectionBetweenLines(lines1, lines2, populateExtension)
}
func hasIntersectionMultiLineStringWithMultiLineString(
mls1, mls2 MultiLineString, populateExtension bool,
) (
bool, mlsWithMLSIntersectsExtension,
) {
lines1 := mls1.asLines()
lines2 := mls2.asLines()
return hasIntersectionBetweenLines(lines1, lines2, populateExtension)
}
func hasIntersectionBetweenLines(
lines1, lines2 []line, populateExtension bool,
) (
bool, mlsWithMLSIntersectsExtension,
) {
// Put the larger out of the two inputs into the RTree.
if len(lines1) > len(lines2) {
lines1, lines2 = lines2, lines1
}
bulk := make([]rtree.BulkItem, len(lines1))
for i, ln := range lines1 {
bulk[i] = rtree.BulkItem{
Box: ln.envelope().box(),
RecordID: i,
}
}
tree := rtree.BulkLoad(bulk)
// Keep track of an envelope of all of the points that are in the
// intersection.
var env Envelope
var envPopulated bool
for _, lnA := range lines2 {
tree.RangeSearch(lnA.envelope().box(), func(i int) error {
lnB := lines1[i]
inter := lnA.intersectLine(lnB)
if inter.empty {
return nil
}
if !populateExtension {
envPopulated = true
env = NewEnvelope(inter.ptA)
env = env.ExtendToIncludePoint(inter.ptB)
return rtree.Stop
}
if inter.ptA != inter.ptB {
envPopulated = true
env = NewEnvelope(inter.ptA)
env = env.ExtendToIncludePoint(inter.ptB)
return rtree.Stop
}
// Single point intersection case from here onwards:
if !envPopulated {
envPopulated = true
env = NewEnvelope(inter.ptA)
return nil
}
env = env.ExtendToIncludePoint(inter.ptA)
if env.Min() != env.Max() {
return rtree.Stop
}
return nil
})
}
var ext mlsWithMLSIntersectsExtension
if populateExtension {
ext = mlsWithMLSIntersectsExtension{
multiplePoints: envPopulated && env.Min() != env.Max(),
singlePoint: env.Min(),
}
}
return envPopulated, ext
}
func hasIntersectionMultiLineStringWithMultiPolygon(mls MultiLineString, mp MultiPolygon) bool {
if has, _ := hasIntersectionMultiLineStringWithMultiLineString(mls, mp.Boundary(), false); has {
return true
}
// Because there is no intersection of the MultiLineString with the
// boundary of the MultiPolygon, then each LineString inside the
// MultiLineString is either fully contained within the MultiPolygon, or
// fully outside of it. So we just have to check any control point of each
// LineString to see if it falls inside or outside of the MultiPolygon.
for i := 0; i < mls.NumLineStrings(); i++ {
ls := mls.LineStringN(i)
if hasIntersectionPointWithMultiPolygon(ls.StartPoint(), mp) {
return true
}
}
return false
}
func hasIntersectionPointWithLineString(pt Point, ls LineString) bool {
// Worst case speed is O(n), n is the number of lines.
ptXY, ok := pt.XY()
if !ok {
return false
}
seq := ls.Coordinates()
for i := 0; i < seq.Length(); i++ {
ln, ok := getLine(seq, i)
if ok && ln.intersectsXY(ptXY) {
return true
}
}
return false
}
func hasIntersectionMultiPointWithMultiPoint(mp1, mp2 MultiPoint) bool {
mp1N := mp1.NumPoints()
set := make(map[XY]bool, mp1N)
for i := 0; i < mp1N; i++ {
if xy, ok := mp1.PointN(i).XY(); ok {
set[xy] = true
}
}
mp2N := mp2.NumPoints()
for i := 0; i < mp2N; i++ {
if xy, ok := mp2.PointN(i).XY(); ok && set[xy] {
return true
}
}
return false
}
func hasIntersectionPointWithMultiPoint(point Point, mp MultiPoint) bool {
// Worst case speed is O(n) but that's optimal because mp is not sorted.
for i := 0; i < mp.NumPoints(); i++ {
pt := mp.PointN(i)
if hasIntersectionPointWithPoint(point, pt) {
return true
}
}
return false
}
func hasIntersectionPointWithMultiLineString(point Point, mls MultiLineString) bool {
n := mls.NumLineStrings()
for i := 0; i < n; i++ {
if hasIntersectionPointWithLineString(point, mls.LineStringN(i)) {
return true
}
}
return false
}
func hasIntersectionPointWithMultiPolygon(pt Point, mp MultiPolygon) bool {
n := mp.NumPolygons()
for i := 0; i < n; i++ {
if hasIntersectionPointWithPolygon(pt, mp.PolygonN(i)) {
return true
}
}
return false
}
func hasIntersectionPointWithPoint(pt1, pt2 Point) bool {
// Speed is O(1).
xy1, ok1 := pt1.XY()
xy2, ok2 := pt2.XY()
return ok1 && ok2 && xy1 == xy2
}
func hasIntersectionPointWithPolygon(pt Point, p Polygon) bool {
// Speed is O(m), m is the number of holes in the polygon.
xy, ok := pt.XY()
if !ok {
return false
}
if p.IsEmpty() {
return false
}
if relatePointToRing(xy, p.ExteriorRing()) == exterior {
return false
}
m := p.NumInteriorRings()
for i := 0; i < m; i++ {
ring := p.InteriorRingN(i)
if relatePointToRing(xy, ring) == interior {
return false
}
}
return true
}
func hasIntersectionMultiPointWithPolygon(mp MultiPoint, p Polygon) bool {
// Speed is O(n*m), n is the number of points, m is the number of holes in the polygon.
n := mp.NumPoints()
for i := 0; i < n; i++ {
pt := mp.PointN(i)
if hasIntersectionPointWithPolygon(pt, p) {
return true
}
}
return false
}
func hasIntersectionPolygonWithPolygon(p1, p2 Polygon) bool {
// Check if the boundaries intersect. If so, then the polygons must
// intersect.
b1 := p1.Boundary()
b2 := p2.Boundary()
if has, _ := hasIntersectionMultiLineStringWithMultiLineString(b1, b2, false); has {
return true
}
// Other check to see if an arbitrary point from each polygon is inside the
// other polygon.
return hasIntersectionPointWithPolygon(p1.ExteriorRing().StartPoint(), p2) ||
hasIntersectionPointWithPolygon(p2.ExteriorRing().StartPoint(), p1)
}
func hasIntersectionMultiPolygonWithMultiPolygon(mp1, mp2 MultiPolygon) bool {
n := mp1.NumPolygons()
for i := 0; i < n; i++ {
p1 := mp1.PolygonN(i)
m := mp2.NumPolygons()
for j := 0; j < m; j++ {
p2 := mp2.PolygonN(j)
if hasIntersectionPolygonWithPolygon(p1, p2) {
return true
}
}
}
return false
}
func hasIntersectionMultiPointWithMultiPolygon(pts MultiPoint, polys MultiPolygon) bool {
n := pts.NumPoints()
for i := 0; i < n; i++ {
pt := pts.PointN(i)
if hasIntersectionPointWithMultiPolygon(pt, polys) {
return true
}
}
return false
} | geom/alg_intersects.go | 0.618204 | 0.516169 | alg_intersects.go | starcoder |
package main
import (
"crypto/rand"
"crypto/sha256"
"math/big"
)
// RandomBytes sample B random Bytes.
func RandomBytes() []byte {
r := make([]byte, B)
rand.Read(r)
return r
}
// HashToCurve hash t to a curve point T.
// If T is not valid, then Ty = nil.
func HashToCurve(t []byte) (Tx *big.Int, Ty *big.Int) {
hash := sha256.Sum256(t)
Tx = new(big.Int).SetBytes(hash[:])
// Verify that (Ty)^2 = (Tx)^3 - 3*Tx + B mod P
// holds for T = (Tx,Ty). Otherwise set Ty = nil.
Ty = new(big.Int).ModSqrt(polynomial(params.B, params.P, Tx), params.P)
return
}
// CreateChallenge hash all informaion into B Bytes.
// Reference: https://golang.org/pkg/crypto/sha256.
// There is probably a more elegant way to do this.
func CreateChallenge(Px, Py, Qx, Qy, Kx, Ky, Ax, Ay, Bx, By *big.Int) (c [B]byte) {
bytes := append(params.Gx.Bytes(), params.Gy.Bytes()...)
list := []big.Int{*Px, *Py, *Qx, *Qy, *Kx, *Ky, *Ax, *Ay, *Bx, *By}
for _, element := range list {
bytes = append(bytes, element.Bytes()...)
}
c = sha256.Sum256(bytes)
return
}
// CreateProof creates a proof (c,z) proving
// that Q = [k]*P and K = [k]*G without revealing k.
func CreateProof(Px, Py, Qx, Qy, Kx, Ky *big.Int, k []byte) (c [B]byte, z []byte) {
// Generate random mask r, and
// then compute A = [r]*P and B = [r]*G
r := RandomBytes()
Ax, Ay := curve.ScalarMult(Px, Py, r)
Bx, By := curve.ScalarBaseMult(r)
// Hash everything
c = CreateChallenge(Px, Py, Qx, Qy, Kx, Ky, Ax, Ay, Bx, By)
// Compute z = r - ck mod N
temp := new(big.Int).SetBytes(c[:])
temp.Mul(temp, new(big.Int).SetBytes(k[:]))
temp.Sub(new(big.Int).SetBytes(r[:]), temp)
temp.Mod(temp, params.N)
z = temp.Bytes()
return
}
// VerifyProof verifies the proof (c,z).
func VerifyProof(Px, Py, Qx, Qy, Kx, Ky *big.Int, c [B]byte, z []byte) bool {
var x1, x2, y1, y2 *big.Int
// Compute [z]*P+[c]*Q = [r]*P = A
x1, y1 = curve.ScalarMult(Px, Py, z)
x2, y2 = curve.ScalarMult(Qx, Qy, c[:])
Ax, Ay := curve.Add(x1, y1, x2, y2)
// Compute [z]*G+[c]*K = [r]*G = B
x1, y1 = curve.ScalarBaseMult(z)
x2, y2 = curve.ScalarMult(Kx, Ky, c[:])
Bx, By := curve.Add(x1, y1, x2, y2)
hash := CreateChallenge(Px, Py, Qx, Qy, Kx, Ky, Ax, Ay, Bx, By)
// Verify that c = H(G,P,Q,K,zP+cQ,zG+cK)
return c == hash
}
// Computes x^3 - 3x + B mod P. Copied from:
// https://golang.org/src/crypto/elliptic/elliptic.go
func polynomial(B, P, x *big.Int) *big.Int {
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
threeX := new(big.Int).Lsh(x, 1)
threeX.Add(threeX, x)
x3.Sub(x3, threeX)
x3.Add(x3, B)
x3.Mod(x3, P)
return x3
} | computation.go | 0.761716 | 0.515254 | computation.go | starcoder |
package tsm1
/*
This code is originally from: https://github.com/dgryski/go-tsz and has been modified to remove
the timestamp compression fuctionality.
It implements the float compression as presented in: http://www.vldb.org/pvldb/vol8/p1816-teller.pdf.
This implementation uses a sentinel value of NaN which means that float64 NaN cannot be stored using
this version.
*/
import (
"bytes"
"fmt"
"math"
"github.com/dgryski/go-bits"
"github.com/dgryski/go-bitstream"
)
const (
// floatUncompressed is an uncompressed format using 8 bytes per value.
// Not yet implemented.
floatUncompressed = 0
// floatCompressedGorilla is a compressed format using the gorilla paper encoding
floatCompressedGorilla = 1
)
// FloatEncoder encodes multiple float64s into a byte slice
type FloatEncoder struct {
val float64
err error
leading uint64
trailing uint64
buf bytes.Buffer
bw *bitstream.BitWriter
first bool
finished bool
}
func NewFloatEncoder() *FloatEncoder {
s := FloatEncoder{
first: true,
leading: ^uint64(0),
}
s.bw = bitstream.NewWriter(&s.buf)
return &s
}
func (s *FloatEncoder) Bytes() ([]byte, error) {
return append([]byte{floatCompressedGorilla << 4}, s.buf.Bytes()...), s.err
}
func (s *FloatEncoder) Finish() {
if !s.finished {
// write an end-of-stream record
s.finished = true
s.Push(math.NaN())
s.bw.Flush(bitstream.Zero)
}
}
func (s *FloatEncoder) Push(v float64) {
// Only allow NaN as a sentinel value
if math.IsNaN(v) && !s.finished {
s.err = fmt.Errorf("unsupported value: NaN")
return
}
if s.first {
// first point
s.val = v
s.first = false
s.bw.WriteBits(math.Float64bits(v), 64)
return
}
vDelta := math.Float64bits(v) ^ math.Float64bits(s.val)
if vDelta == 0 {
s.bw.WriteBit(bitstream.Zero)
} else {
s.bw.WriteBit(bitstream.One)
leading := bits.Clz(vDelta)
trailing := bits.Ctz(vDelta)
// Clamp number of leading zeros to avoid overflow when encoding
leading &= 0x1F
if leading >= 32 {
leading = 31
}
// TODO(dgryski): check if it's 'cheaper' to reset the leading/trailing bits instead
if s.leading != ^uint64(0) && leading >= s.leading && trailing >= s.trailing {
s.bw.WriteBit(bitstream.Zero)
s.bw.WriteBits(vDelta>>s.trailing, 64-int(s.leading)-int(s.trailing))
} else {
s.leading, s.trailing = leading, trailing
s.bw.WriteBit(bitstream.One)
s.bw.WriteBits(leading, 5)
// Note that if leading == trailing == 0, then sigbits == 64. But that
// value doesn't actually fit into the 6 bits we have.
// Luckily, we never need to encode 0 significant bits, since that would
// put us in the other case (vdelta == 0). So instead we write out a 0 and
// adjust it back to 64 on unpacking.
sigbits := 64 - leading - trailing
s.bw.WriteBits(sigbits, 6)
s.bw.WriteBits(vDelta>>trailing, int(sigbits))
}
}
s.val = v
}
// FloatDecoder decodes a byte slice into multipe float64 values
type FloatDecoder struct {
val float64
leading uint64
trailing uint64
br *bitstream.BitReader
b []byte
first bool
finished bool
err error
}
func NewFloatDecoder(b []byte) (FloatDecoder, error) {
// first byte is the compression type but we currently just have gorilla
// compression
br := bitstream.NewReader(bytes.NewReader(b[1:]))
v, err := br.ReadBits(64)
if err != nil {
return FloatDecoder{}, err
}
return FloatDecoder{
val: math.Float64frombits(v),
first: true,
br: br,
b: b,
}, nil
}
func (it *FloatDecoder) Next() bool {
if it.err != nil || it.finished {
return false
}
if it.first {
it.first = false
// mark as finished if there were no values.
if math.IsNaN(it.val) {
it.finished = true
return false
}
return true
}
// read compressed value
bit, err := it.br.ReadBit()
if err != nil {
it.err = err
return false
}
if bit == bitstream.Zero {
// it.val = it.val
} else {
bit, err := it.br.ReadBit()
if err != nil {
it.err = err
return false
}
if bit == bitstream.Zero {
// reuse leading/trailing zero bits
// it.leading, it.trailing = it.leading, it.trailing
} else {
bits, err := it.br.ReadBits(5)
if err != nil {
it.err = err
return false
}
it.leading = bits
bits, err = it.br.ReadBits(6)
if err != nil {
it.err = err
return false
}
mbits := bits
// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder
if mbits == 0 {
mbits = 64
}
it.trailing = 64 - it.leading - mbits
}
mbits := int(64 - it.leading - it.trailing)
bits, err := it.br.ReadBits(mbits)
if err != nil {
it.err = err
return false
}
vbits := math.Float64bits(it.val)
vbits ^= (bits << it.trailing)
val := math.Float64frombits(vbits)
if math.IsNaN(val) {
it.finished = true
return false
}
it.val = val
}
return true
}
func (it *FloatDecoder) Values() float64 {
return it.val
}
func (it *FloatDecoder) Error() error {
return it.err
} | vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float.go | 0.66061 | 0.497131 | float.go | starcoder |
package gofpdf
import (
"bytes"
)
// Version of FPDF from which this package is derived
const (
cnFpdfVersion = "1.7"
)
type blendModeType struct {
strokeStr, fillStr, modeStr string
objNum int
}
type gradientType struct {
tp int // 2: linear, 3: radial
clr1Str, clr2Str string
x1, y1, x2, y2, r float64
objNum int
}
// SizeType fields Wd and Ht specify the horizontal and vertical extents of a
// document element such as a page.
type SizeType struct {
Wd, Ht float64
}
// PointType fields X and Y specify the horizontal and vertical coordinates of
// a point, typically used in drawing.
type PointType struct {
X, Y float64
}
// ImageInfoType contains size, color and other information about an image
type ImageInfoType struct {
data []byte
smask []byte
i int
n int
w float64
h float64
cs string
pal []byte
bpc int
f string
dp string
trns []int
scale float64 // document scaling factor
}
// PointConvert returns the value of pt, expressed in points (1/72 inch), as a
// value expressed in the unit of measure specified in New(). Since font
// management in Fpdf uses points, this method can help with line height
// calculations and other methods that require user units.
func (f *Fpdf) PointConvert(pt float64) float64 {
return pt / f.k
}
// Extent returns the width and height of the image in the units of the Fpdf
// object.
func (info *ImageInfoType) Extent() (wd, ht float64) {
return info.w / info.scale, info.h / info.scale
}
// Width returns the width of the image in the units of the Fpdf object.
func (info *ImageInfoType) Width() float64 {
return info.w / info.scale
}
// Height returns the height of the image in the units of the Fpdf object.
func (info *ImageInfoType) Height() float64 {
return info.h / info.scale
}
type fontFileType struct {
length1, length2 int64
n int
}
type linkType struct {
x, y, wd, ht float64
link int // Auto-generated internal link ID or...
linkStr string // ...application-provided external link string
}
type intLinkType struct {
page int
y float64
}
// outlineType is used for a sidebar outline of bookmarks
type outlineType struct {
text string
level, parent, first, last, next, prev int
y float64
p int
}
// InitType is used with NewCustom() to customize an Fpdf instance.
// OrientationStr, UnitStr, SizeStr and FontDirStr correspond to the arguments
// accepted by New(). If the Wd and Ht fields of Size are each greater than
// zero, Size will be used to set the default page size rather than SizeStr. Wd
// and Ht are specified in the units of measure indicated by UnitStr.
type InitType struct {
OrientationStr string
UnitStr string
SizeStr string
Size SizeType
FontDirStr string
}
// Fpdf is the principal structure for creating a single PDF document
type Fpdf struct {
page int // current page number
n int // current object number
offsets []int // array of object offsets
buffer fmtBuffer // buffer holding in-memory PDF
pages []*bytes.Buffer // slice[page] of page content; 1-based
state int // current document state
compress bool // compression flag
k float64 // scale factor (number of points in user unit)
defOrientation string // default orientation
curOrientation string // current orientation
stdPageSizes map[string]SizeType // standard page sizes
defPageSize SizeType // default page size
curPageSize SizeType // current page size
pageSizes map[int]SizeType // used for pages with non default sizes or orientations
unitStr string // unit of measure for all rendered objects except fonts
wPt, hPt float64 // dimensions of current page in points
w, h float64 // dimensions of current page in user unit
lMargin float64 // left margin
tMargin float64 // top margin
rMargin float64 // right margin
bMargin float64 // page break margin
cMargin float64 // cell margin
x, y float64 // current position in user unit
lasth float64 // height of last printed cell
lineWidth float64 // line width in user unit
fontpath string // path containing fonts
coreFonts map[string]bool // array of core font names
fonts map[string]fontDefType // array of used fonts
fontFiles map[string]fontFileType // array of font files
diffs []string // array of encoding differences
fontFamily string // current font family
fontStyle string // current font style
underline bool // underlining flag
currentFont fontDefType // current font info
fontSizePt float64 // current font size in points
fontSize float64 // current font size in user unit
ws float64 // word spacing
images map[string]*ImageInfoType // array of used images
pageLinks [][]linkType // pageLinks[page][link], both 1-based
links []intLinkType // array of internal links
outlines []outlineType // array of outlines
outlineRoot int // root of outlines
autoPageBreak bool // automatic page breaking
acceptPageBreak func() bool // returns true to accept page break
pageBreakTrigger float64 // threshold used to trigger page breaks
inHeader bool // flag set when processing header
headerFnc func() // function provided by app and called to write header
inFooter bool // flag set when processing footer
footerFnc func() // function provided by app and called to write footer
zoomMode string // zoom display mode
layoutMode string // layout display mode
title string // title
subject string // subject
author string // author
keywords string // keywords
creator string // creator
aliasNbPagesStr string // alias for total number of pages
pdfVersion string // PDF version number
fontDirStr string // location of font definition files
capStyle int // line cap style: butt 0, round 1, square 2
joinStyle int // line segment join style: miter 0, round 1, bevel 2
blendList []blendModeType // slice[idx] of alpha transparency modes, 1-based
blendMap map[string]int // map into blendList
gradientList []gradientType // slice[idx] of gradient records
clipNest int // Number of active clipping contexts
transformNest int // Number of active transformation contexts
err error // Set if error occurs during life cycle of instance
protect protectType // document protection structure
layer layerRecType // manages optional layers in document
colorFlag bool // indicates whether fill and text colors are different
color struct { // Composite values of colors
draw, fill, text clrType
}
}
type encType struct {
uv int
name string
}
type encListType [256]encType
type fontBoxType struct {
Xmin, Ymin, Xmax, Ymax int
}
type fontDescType struct {
Ascent int
Descent int
CapHeight int
Flags int
FontBBox fontBoxType
ItalicAngle int
StemV int
MissingWidth int
}
type fontDefType struct {
Tp string // "Core", "TrueType", ...
Name string // "Courier-Bold", ...
Desc fontDescType // Font descriptor
Up int // Underline position
Ut int // Underline thickness
Cw [256]int // Character width by ordinal
Enc string // "cp1252", ...
Diff string // Differences from reference encoding
File string // "Redressed.z"
Size1, Size2 int // Type1 values
OriginalSize int // Size of uncompressed font file
I int // 1-based position in font list, set by font loader, not this program
N int // Set by font loader
DiffN int // Position of diff in app array, set by font loader
}
type fontInfoType struct {
Data []byte
File string
OriginalSize int
FontName string
Bold bool
IsFixedPitch bool
UnderlineThickness int
UnderlinePosition int
Widths [256]int
Size1, Size2 uint32
Desc fontDescType
} | def.go | 0.709422 | 0.499878 | def.go | starcoder |
package iso20022
// Details of the securities trade.
type SecuritiesTradeDetails59 struct {
// Market in which a trade transaction has been executed.
PlaceOfTrade *PlaceOfTradeIdentification2 `xml:"PlcOfTrad,omitempty"`
// Infrastructure which may be a component of a clearing house and wich facilitates clearing and settlement for its members by standing between the buyer and the seller. It may net transactions and it substitutes itself as settlement counterparty for each position.
PlaceOfClearing *PlaceOfClearingIdentification1 `xml:"PlcOfClr,omitempty"`
// Specifies the date/time on which the trade was executed.
TradeDate *TradeDate6Choice `xml:"TradDt,omitempty"`
// Date and time at which the securities are to be delivered or received.
OpeningSettlementDate *DateAndDateTimeChoice `xml:"OpngSttlmDt"`
// Specifies the price of the traded financial instrument.
// This is the deal price of the individual trade transaction.
// If there is only one trade transaction for the execution of the trade, then the deal price could equal the executed trade price (unless, for example, the price includes commissions or rounding, or some other factor has been applied to the deal price or the executed trade price, or both).
DealPrice *Price3 `xml:"DealPric,omitempty"`
// Number of days on which the interest rate accrues (daily accrual note).
NumberOfDaysAccrued *Max3Number `xml:"NbOfDaysAcrd,omitempty"`
// Specifies that a trade is to be reported to a third party.
Reporting []*Reporting9Choice `xml:"Rptg,omitempty"`
// Indicates the conditions under which the order/trade is to be/was executed.
TradeTransactionCondition []*TradeTransactionCondition6Choice `xml:"TradTxCond,omitempty"`
// Specifies the role of the investor in the transaction.
InvestorCapacity *InvestorCapacity5Choice `xml:"InvstrCpcty,omitempty"`
// Specifies the role of the trading party in the transaction.
TradeOriginatorRole *TradeOriginator4Choice `xml:"TradOrgtrRole,omitempty"`
// Account servicer is instructed to buy the indicated currency after the receipt of cash proceeds or to sell the indicated currency in order to obtain the necessary currency to fund the transaction.
CurrencyToBuyOrSell *CurrencyToBuyOrSell1Choice `xml:"CcyToBuyOrSell,omitempty"`
// Status of affirmation of a trade.
AffirmationStatus *AffirmationStatus9Choice `xml:"AffirmSts,omitempty"`
// Provides the matching status of the instruction.
MatchingStatus *MatchingStatus28Choice `xml:"MtchgSts,omitempty"`
// Provides additional settlement processing information which can not be included within the structured fields of the message.
SettlementInstructionProcessingAdditionalDetails *RestrictedFINXMax350Text `xml:"SttlmInstrPrcgAddtlDtls,omitempty"`
// Provides additional details pertaining to foreign exchange instructions.
FXAdditionalDetails *RestrictedFINXMax350Text `xml:"FxAddtlDtls,omitempty"`
}
func (s *SecuritiesTradeDetails59) AddPlaceOfTrade() *PlaceOfTradeIdentification2 {
s.PlaceOfTrade = new(PlaceOfTradeIdentification2)
return s.PlaceOfTrade
}
func (s *SecuritiesTradeDetails59) AddPlaceOfClearing() *PlaceOfClearingIdentification1 {
s.PlaceOfClearing = new(PlaceOfClearingIdentification1)
return s.PlaceOfClearing
}
func (s *SecuritiesTradeDetails59) AddTradeDate() *TradeDate6Choice {
s.TradeDate = new(TradeDate6Choice)
return s.TradeDate
}
func (s *SecuritiesTradeDetails59) AddOpeningSettlementDate() *DateAndDateTimeChoice {
s.OpeningSettlementDate = new(DateAndDateTimeChoice)
return s.OpeningSettlementDate
}
func (s *SecuritiesTradeDetails59) AddDealPrice() *Price3 {
s.DealPrice = new(Price3)
return s.DealPrice
}
func (s *SecuritiesTradeDetails59) SetNumberOfDaysAccrued(value string) {
s.NumberOfDaysAccrued = (*Max3Number)(&value)
}
func (s *SecuritiesTradeDetails59) AddReporting() *Reporting9Choice {
newValue := new(Reporting9Choice)
s.Reporting = append(s.Reporting, newValue)
return newValue
}
func (s *SecuritiesTradeDetails59) AddTradeTransactionCondition() *TradeTransactionCondition6Choice {
newValue := new(TradeTransactionCondition6Choice)
s.TradeTransactionCondition = append(s.TradeTransactionCondition, newValue)
return newValue
}
func (s *SecuritiesTradeDetails59) AddInvestorCapacity() *InvestorCapacity5Choice {
s.InvestorCapacity = new(InvestorCapacity5Choice)
return s.InvestorCapacity
}
func (s *SecuritiesTradeDetails59) AddTradeOriginatorRole() *TradeOriginator4Choice {
s.TradeOriginatorRole = new(TradeOriginator4Choice)
return s.TradeOriginatorRole
}
func (s *SecuritiesTradeDetails59) AddCurrencyToBuyOrSell() *CurrencyToBuyOrSell1Choice {
s.CurrencyToBuyOrSell = new(CurrencyToBuyOrSell1Choice)
return s.CurrencyToBuyOrSell
}
func (s *SecuritiesTradeDetails59) AddAffirmationStatus() *AffirmationStatus9Choice {
s.AffirmationStatus = new(AffirmationStatus9Choice)
return s.AffirmationStatus
}
func (s *SecuritiesTradeDetails59) AddMatchingStatus() *MatchingStatus28Choice {
s.MatchingStatus = new(MatchingStatus28Choice)
return s.MatchingStatus
}
func (s *SecuritiesTradeDetails59) SetSettlementInstructionProcessingAdditionalDetails(value string) {
s.SettlementInstructionProcessingAdditionalDetails = (*RestrictedFINXMax350Text)(&value)
}
func (s *SecuritiesTradeDetails59) SetFXAdditionalDetails(value string) {
s.FXAdditionalDetails = (*RestrictedFINXMax350Text)(&value)
} | SecuritiesTradeDetails59.go | 0.842345 | 0.442877 | SecuritiesTradeDetails59.go | starcoder |
package avl
// Range is the basic node for the AVL tree leaves.
// The concept Range can be a real data range in most cases, such as
// [LOWER_BOUND, UPPER_BOUND] .
// A Range can also be a single value, such as [VALUE_A, VALUE_A] .
type Range interface {
// Compare returns an integer comparing two elements.
// The result will be -1 if current element is less than the right,
// and +1 if current element is greater that the right.
// Otherwise, 0 will be return.
Compare(right Range) int
// Contains returns true if the right is a subset of current element.
Contains(right Range) bool
// Union returns the union of current element and the right.
Union(right Range) Range
}
// Tree is a high-performance AVL tree.
type Tree struct {
root *avlNode
}
// Insert a new Range into the AVL tree.
func (t *Tree) Insert(val Range) {
t.root = t.root.insert(val)
}
// Search returns true if the AVL tree contains the <val>.
func (t *Tree) Search(val Range) bool {
return t.root.search(val)
}
type avlNode struct {
val Range
parent *avlNode
left *avlNode
right *avlNode
h int // the height
}
func (n *avlNode) height() int {
if n == nil {
return -1
}
return n.h
}
// updateHeight updates the height for current node and return the new height.
func (n *avlNode) updateHeight() int {
n.h = n.left.height() + 1
if rh := n.right.height() + 1; rh > n.h {
n.h = rh
}
return n.h
}
// rotateLeft and other rotations are implemented according to the algorithm
// described on https://en.wikipedia.org/wiki/AVL_tree
func (n *avlNode) rotateLeft() (z *avlNode) {
z, n.right = n.right, n.right.left
z.left = n
n.parent = z
if n.right != nil {
n.right.parent = n
}
n.updateHeight()
z.updateHeight()
return
}
func (n *avlNode) rotateRight() (z *avlNode) {
z, n.left = n.left, n.left.right
z.right = n
n.parent = z
if n.left != nil {
n.left.parent = n
}
n.updateHeight()
z.updateHeight()
return
}
func (n *avlNode) rotateLeftRight() *avlNode {
n.left = n.left.rotateLeft()
return n.rotateRight()
}
func (n *avlNode) rotateRightLeft() *avlNode {
n.right = n.right.rotateRight()
return n.rotateLeft()
}
func (n *avlNode) rebalance() (p *avlNode) {
for p = n.parent; p != nil; n, p = p, p.parent {
grandParant, oldHeight := p.parent, p.height()
leftChild := grandParant != nil && grandParant.left == p
switch factor := p.left.height() - p.right.height(); {
case factor > 1: // left heavy
if n.left.height() < n.right.height() {
p = p.rotateLeftRight()
} else {
p = p.rotateRight()
}
case factor < -1: // left heavy
if n.left.height() > n.right.height() {
p = p.rotateRightLeft()
} else {
p = p.rotateLeft()
}
}
p.parent = grandParant
if grandParant == nil {
p.updateHeight()
return
}
if leftChild {
grandParant.left = p
} else {
grandParant.right = p
}
if p.updateHeight() == oldHeight {
// the height for current node didn't change.
break
}
}
for p.parent != nil {
p = p.parent
}
return
}
// insert a new <val> and return the new root of the AVL tree.
func (n *avlNode) insert(val Range) *avlNode {
z := &avlNode{val: val}
if n == nil {
return z
}
for x := n; ; {
switch factor := x.val.Compare(val); {
case factor < 0: // x < z
if x.right == nil {
x.right, z.parent = z, x
return z.rebalance()
}
x = x.right
case factor > 0: // x > z
if x.left == nil {
x.left, z.parent = z, x
return z.rebalance()
}
x = x.left
default: // x == z
if !x.val.Contains(val) {
x.val = x.val.Union(val)
}
return n
}
}
}
// search returns true if the AVL tree contains the <val>.
func (n *avlNode) search(val Range) bool {
for n != nil {
switch factor := n.val.Compare(val); {
case factor < 0: // n < z
n = n.right
case factor > 0: // n > z
n = n.left
default: // n == z
return n.val.Contains(val)
}
}
return false
}
// DebugPreorder will traverse the tree in preorder. For debug-use only.
func DebugPreorder(t *Tree) (ret []interface{}) {
if t.root == nil {
return nil
}
queue := []*avlNode{t.root}
for idx := 0; idx < len(queue); idx++ {
node := queue[idx]
if node == nil {
continue
}
// enqueue
queue = append(queue, node.left, node.right)
// fill the value
ret = append(ret, node.val)
}
return
} | avl/avl.go | 0.850732 | 0.605478 | avl.go | starcoder |
package sqldatabase
import (
"database/sql"
"fmt"
"reflect"
"time"
)
var (
KindString reflect.Kind = reflect.String
KindInt reflect.Kind = reflect.Int64
KindBool reflect.Kind = reflect.Bool
KindFloat32 reflect.Kind = reflect.Float32
KindFloat64 reflect.Kind = reflect.Float64
KindTime reflect.Kind = reflect.TypeOf(time.Time{}).Kind()
KindSqlNullString reflect.Kind = reflect.TypeOf(sql.NullString{}).Kind()
KindSqlNullInt32 reflect.Kind = reflect.TypeOf(sql.NullInt32{}).Kind()
KindSqlNullInt64 reflect.Kind = reflect.TypeOf(sql.NullInt64{}).Kind()
KindSqlNullTime reflect.Kind = reflect.TypeOf(sql.NullTime{}).Kind()
KindSqlNullFloat64 reflect.Kind = reflect.TypeOf(sql.NullFloat64{}).Kind()
)
/*
ScanMappingItem decribes a value and what kind of value it is.
Example 1: A string value
mapping := gosqldatabase.ScanMappingItem{gosqldatabase.KindString, "value"}
Example 2: A sql.NullInt64 value
mapping := gosqldatabase.ScanMappingItem{gosqldatabase.KindSqlNullInt64, sql.NullInt64{25, true}}
*/
type ScanMappingItem struct {
Kind reflect.Kind
Value interface{}
}
/*
ScanMapping is a slice of a slice of ScanMappingItem structs. Think of this
as a set of rows containing a set of column definitions.
Example:
data := gosqldatabase.ScanMapping{
{
{gosqldatabase.KindString, "value1"},
{gosqldatabase.KindInt, 2},
{gosqldatabase.KindSqlNullString, sql.NullString{"value2", true}},
{gosqldatabase.KindSqlNullInt64, sql.NullInt64{nil, false}},
},
}
*/
type ScanMapping [][]ScanMappingItem
/*
Scan scans values into dest... from the provided mappings and row index.
This is most useful in a mock method.
Example:
data := gosqldatabase.ScanMapping{
{
{gosqldatabase.KindString, "value1"},
{gosqldatabase.KindInt, 2},
{gosqldatabase.KindSqlNullString, sql.NullString{"value2", true}},
{gosqldatabase.KindSqlNullInt64, sql.NullInt64{nil, false}},
},
}
rowIndex := 0
mock := &gosqldatabase.MockRow{
ScanFunc: func(dest ...interface{}) error {
gosqldatabase.Scan(data, rowIndex, dest...)
return nil
},
}
*/
func Scan(mappings ScanMapping, rowIndex int, dest ...interface{}) {
for colIndex, d := range dest {
AssignScanValue(mappings, rowIndex, colIndex, d)
}
}
/*
AssignScanValue reads the mapping at a row and column index, determines
the type of value, and assigns it to the provided destination variable.
*/
func AssignScanValue(mappings ScanMapping, rowIndex, colIndex int, dest interface{}) {
var ok bool
wrongType := func(rowIndex, colIndex int, expectedType string) {
fmt.Printf("value at row %d, col %d is not %s\n", rowIndex, colIndex, expectedType)
}
switch mappings[rowIndex][colIndex].Kind {
case reflect.String:
var value string
p := dest.(*string)
if value, ok = mappings[rowIndex][colIndex].Value.(string); !ok {
wrongType(rowIndex, colIndex, "string")
return
}
*p = value
case reflect.Int16, reflect.Int64:
var value int
p := dest.(*int)
if value, ok = mappings[rowIndex][colIndex].Value.(int); !ok {
wrongType(rowIndex, colIndex, "int")
return
}
*p = value
case reflect.Float32:
var value float32
p := dest.(*float32)
if value, ok = mappings[rowIndex][colIndex].Value.(float32); !ok {
wrongType(rowIndex, colIndex, "float32")
return
}
*p = value
case reflect.Float64:
var value float64
p := dest.(*float64)
if value, ok = mappings[rowIndex][colIndex].Value.(float64); !ok {
wrongType(rowIndex, colIndex, "float64")
return
}
*p = value
case reflect.Bool:
var value bool
p := dest.(*bool)
if value, ok = mappings[rowIndex][colIndex].Value.(bool); !ok {
wrongType(rowIndex, colIndex, "bool")
return
}
*p = value
default:
switch reflect.TypeOf(mappings[rowIndex][colIndex].Value).String() {
case "time.Time":
var value time.Time
p := dest.(*time.Time)
if value, ok = mappings[rowIndex][colIndex].Value.(time.Time); !ok {
wrongType(rowIndex, colIndex, "time.Time")
return
}
*p = value
case "sql.NullString":
var value sql.NullString
p := dest.(*sql.NullString)
if value, ok = mappings[rowIndex][colIndex].Value.(sql.NullString); !ok {
wrongType(rowIndex, colIndex, "sql.NullString")
return
}
*p = value
case "sql.NullInt32":
var value sql.NullInt32
p := dest.(*sql.NullInt32)
if value, ok = mappings[rowIndex][colIndex].Value.(sql.NullInt32); !ok {
wrongType(rowIndex, colIndex, "sql.NullInt32")
return
}
*p = value
case "sql.NullInt64":
var value sql.NullInt64
p := dest.(*sql.NullInt64)
if value, ok = mappings[rowIndex][colIndex].Value.(sql.NullInt64); !ok {
wrongType(rowIndex, colIndex, "sql.NullInt64")
return
}
*p = value
case "sql.NullTime":
var value sql.NullTime
p := dest.(*sql.NullTime)
if value, ok = mappings[rowIndex][colIndex].Value.(sql.NullTime); !ok {
wrongType(rowIndex, colIndex, "sql.NullTime")
return
}
*p = value
case "sql.NullFloat64":
var value sql.NullFloat64
p := dest.(*sql.NullFloat64)
if value, ok = mappings[rowIndex][colIndex].Value.(sql.NullFloat64); !ok {
wrongType(rowIndex, colIndex, "sql.NullFloat64")
return
}
*p = value
}
}
} | sqldatabase/TestHelpers.go | 0.663451 | 0.50415 | TestHelpers.go | starcoder |
package scte35
import (
"bytes"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"fmt"
"strconv"
"strings"
"unicode/utf8"
"github.com/bamiaux/iobit"
)
const (
// SegmentationUPIDTypeNotUsed is the segmentation_upid_type for Not Used.
SegmentationUPIDTypeNotUsed = 0x00
// SegmentationUPIDTypeUserDefined is the segmentation_upid_type for User
// Defined.
SegmentationUPIDTypeUserDefined = 0x01
// SegmentationUPIDTypeISCI is the segmentation_upid_type for ISCI
SegmentationUPIDTypeISCI = 0x02
// SegmentationUPIDTypeAdID is the segmentation_upid_type for Ad-ID
SegmentationUPIDTypeAdID = 0x03
// SegmentationUPIDTypeUMID is the segmentation_upid_type for UMID
SegmentationUPIDTypeUMID = 0x04
// SegmentationUPIDTypeISANDeprecated is the segmentation_upid_type for
// ISAN Deprecated.
SegmentationUPIDTypeISANDeprecated = 0x05
// SegmentationUPIDTypeISAN is the segmentation_upid_type for ISAN.
SegmentationUPIDTypeISAN = 0x06
// SegmentationUPIDTypeTID is the segmentation_upid_type for TID.
SegmentationUPIDTypeTID = 0x07
// SegmentationUPIDTypeTI is the segmentation_upid_type for TI.
SegmentationUPIDTypeTI = 0x08
// SegmentationUPIDTypeADI is the segmentation_upid_type for ADI.
SegmentationUPIDTypeADI = 0x09
// SegmentationUPIDTypeEIDR is the segmentation_upid_type for EIDR.
SegmentationUPIDTypeEIDR = 0x0a
// SegmentationUPIDTypeATSC is the segmentation_upid_type for ATSC Content
// Identifier.
SegmentationUPIDTypeATSC = 0x0b
// SegmentationUPIDTypeMPU is the segmentation_upid_type for MPU().
SegmentationUPIDTypeMPU = 0x0c
// SegmentationUPIDTypeMID is the segmentation_upid_type for MID().
SegmentationUPIDTypeMID = 0x0d
// SegmentationUPIDTypeADS is the segmentation_upid_type for ADS Information.
SegmentationUPIDTypeADS = 0x0e
// SegmentationUPIDTypeURI is the segmentation_upid_type for URI.
SegmentationUPIDTypeURI = 0x0f
// SegmentationUPIDTypeUUID is the segmentation_upid_type for UUID.
SegmentationUPIDTypeUUID = 0x10
)
// NewSegmentationUPID construct a new SegmentationUPID
func NewSegmentationUPID(upidType uint32, buf []byte) SegmentationUPID {
r := iobit.NewReader(buf)
switch upidType {
case SegmentationUPIDTypeEIDR:
return SegmentationUPID{
Type: upidType,
Format: "text",
Value: canonicalEIDR(r.LeftBytes()),
}
case SegmentationUPIDTypeISAN, SegmentationUPIDTypeISANDeprecated:
return SegmentationUPID{
Type: upidType,
Format: "base-64",
Value: base64.StdEncoding.EncodeToString(r.LeftBytes()),
}
case SegmentationUPIDTypeMPU:
fi := r.Uint32(32)
return SegmentationUPID{
Type: upidType,
Format: "base-64",
FormatIdentifier: &fi,
Value: base64.StdEncoding.EncodeToString(r.LeftBytes()),
}
case SegmentationUPIDTypeTI:
return SegmentationUPID{
Type: upidType,
Format: "text",
Value: fmt.Sprintf("%d", r.Uint64(r.LeftBits())),
}
default:
return SegmentationUPID{
Type: upidType,
Format: "text",
Value: string(r.LeftBytes()),
}
}
}
// SegmentationUPID is used to express a UPID in an XML document.
type SegmentationUPID struct {
Type uint32 `xml:"segmentationUpidType,attr" json:"segmentationUpidType"`
FormatIdentifier *uint32 `xml:"formatIdentifier,attr" json:"formatIdentifier"`
Format string `xml:"format,attr" json:"format"`
Value string `xml:",chardata" json:"value"`
}
// Name returns the name for the segmentation_upid_type.
func (upid *SegmentationUPID) Name() string {
switch upid.Type {
case SegmentationUPIDTypeNotUsed:
return "Not Used"
case SegmentationUPIDTypeUserDefined:
return "User Defined"
case SegmentationUPIDTypeISCI:
return "ISCI"
case SegmentationUPIDTypeAdID:
return "Ad-ID"
case SegmentationUPIDTypeUMID:
return "UMID"
case SegmentationUPIDTypeISANDeprecated:
return "ISAN (Deprecated)"
case SegmentationUPIDTypeISAN:
return "ISAN"
case SegmentationUPIDTypeTID:
return "TID"
case SegmentationUPIDTypeTI:
return "TI"
case SegmentationUPIDTypeADI:
return "ADI"
case SegmentationUPIDTypeEIDR:
return "EIDR: " + upid.eidrTypeName()
case SegmentationUPIDTypeATSC:
return "ATSC Content Identifier"
case SegmentationUPIDTypeMPU:
return "MPU()"
case SegmentationUPIDTypeMID:
return "MID()"
case SegmentationUPIDTypeADS:
return "ADS Information"
case SegmentationUPIDTypeURI:
return "URI"
case SegmentationUPIDTypeUUID:
return "UUID"
default:
return "Unknown"
}
}
// ASCIIValue returns the UPID value as an ASCII string. Bytes outside ASCII
// range are represented by a dot (.).
func (upid *SegmentationUPID) ASCIIValue() string {
b := upid.valueBytes()
var stringsValues []string
for _, b := range b {
var stringVal string
if utf8.Valid([]byte{b}) {
stringVal = string(b)
} else {
// non valid ascii byte element, it is represented as a dot
stringVal = "."
}
stringsValues = append(stringsValues, stringVal)
}
return strings.Join(stringsValues, "")
}
// compressEIRD returns a compressed EIDR.
func (upid *SegmentationUPID) compressEIDR(s string) []byte {
parts := strings.FieldsFunc(s, func(r rune) bool {
return r == '.' || r == '/'
})
if len(parts) != 3 {
Logger.Printf("non-canonical EIDR string: %s", s)
return []byte(s)
}
i, err := strconv.Atoi(parts[1])
if err != nil {
Logger.Printf("non-canonical EIDR string: %s", s)
return []byte(s)
}
b := make([]byte, 12)
iow := iobit.NewWriter(b)
iow.PutUint32(16, uint32(i))
h, err := hex.DecodeString(strings.ReplaceAll(parts[2], "-", ""))
if err != nil {
Logger.Printf("non-canonical EIDR string: %s", s)
return []byte(s)
}
_, _ = iow.Write(h)
_ = iow.Flush()
return b
}
// eidrTypeName returns the EIDR type name.
func (upid *SegmentationUPID) eidrTypeName() string {
if strings.HasPrefix(upid.Value, "10.5237") {
return "Party ID"
}
if strings.HasPrefix(upid.Value, "10.5238") {
return "User ID"
}
if strings.HasPrefix(upid.Value, "10.5239") {
return "Service ID"
}
if strings.HasPrefix(upid.Value, "10.5240") {
return "Content ID"
}
return ""
}
// formatIdentifierString returns the format identifier as a string
func (upid *SegmentationUPID) formatIdentifierString() string {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, *upid.FormatIdentifier)
return string(b)
}
// valueBytes returns the value as a byte array.
func (upid *SegmentationUPID) valueBytes() []byte {
switch upid.Type {
case SegmentationUPIDTypeMPU:
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, *upid.FormatIdentifier)
v, _ := base64.StdEncoding.DecodeString(upid.Value)
b = append(b, v...)
return b
case SegmentationUPIDTypeEIDR:
return upid.compressEIDR(upid.Value)
case SegmentationUPIDTypeTI:
b := make([]byte, 8)
if i, err := strconv.Atoi(upid.Value); err == nil {
binary.BigEndian.PutUint64(b, uint64(i))
}
return b
default:
switch upid.Format {
case "hexbinary":
b, _ := hex.DecodeString(upid.Value)
return b
case "base-64":
b, _ := base64.StdEncoding.DecodeString(upid.Value)
return b
default:
return []byte(upid.Value)
}
}
}
// canonicalEIDR returns a canonical EIDR.
func canonicalEIDR(b []byte) string {
// already canonical
if bytes.Contains(b, []byte("/")) {
return string(b)
}
// dunno what this is
if len(b) != 12 {
Logger.Printf("unexpected eidr value received: %s", b)
return ""
}
i := int(binary.BigEndian.Uint16(b[:2]))
return fmt.Sprintf("10.%d/%X-%X-%X-%X-%X", i, b[2:4], b[4:6], b[6:8], b[8:10], b[10:12])
} | pkg/scte35/segmentation_upid.go | 0.524151 | 0.469703 | segmentation_upid.go | starcoder |
package plaid
import (
"encoding/json"
)
// CreditCardLiability An object representing a credit card account.
type CreditCardLiability struct {
// The ID of the account that this liability belongs to.
AccountId NullableString `json:"account_id"`
// The various interest rates that apply to the account.
Aprs []APR `json:"aprs"`
// true if a payment is currently overdue. Availability for this field is limited.
IsOverdue NullableBool `json:"is_overdue"`
// The amount of the last payment.
LastPaymentAmount float32 `json:"last_payment_amount"`
// The date of the last payment. Dates are returned in an [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD). Availability for this field is limited.
LastPaymentDate string `json:"last_payment_date"`
// The date of the last statement. Dates are returned in an [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD).
LastStatementIssueDate string `json:"last_statement_issue_date"`
// The total amount owed as of the last statement issued
LastStatementBalance float32 `json:"last_statement_balance"`
// The minimum payment due for the next billing cycle.
MinimumPaymentAmount float32 `json:"minimum_payment_amount"`
// The due date for the next payment. The due date is `null` if a payment is not expected. Dates are returned in an [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (YYYY-MM-DD).
NextPaymentDueDate NullableString `json:"next_payment_due_date"`
AdditionalProperties map[string]interface{}
}
type _CreditCardLiability CreditCardLiability
// NewCreditCardLiability instantiates a new CreditCardLiability object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewCreditCardLiability(accountId NullableString, aprs []APR, isOverdue NullableBool, lastPaymentAmount float32, lastPaymentDate string, lastStatementIssueDate string, lastStatementBalance float32, minimumPaymentAmount float32, nextPaymentDueDate NullableString) *CreditCardLiability {
this := CreditCardLiability{}
this.AccountId = accountId
this.Aprs = aprs
this.IsOverdue = isOverdue
this.LastPaymentAmount = lastPaymentAmount
this.LastPaymentDate = lastPaymentDate
this.LastStatementIssueDate = lastStatementIssueDate
this.LastStatementBalance = lastStatementBalance
this.MinimumPaymentAmount = minimumPaymentAmount
this.NextPaymentDueDate = nextPaymentDueDate
return &this
}
// NewCreditCardLiabilityWithDefaults instantiates a new CreditCardLiability object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewCreditCardLiabilityWithDefaults() *CreditCardLiability {
this := CreditCardLiability{}
return &this
}
// GetAccountId returns the AccountId field value
// If the value is explicit nil, the zero value for string will be returned
func (o *CreditCardLiability) GetAccountId() string {
if o == nil || o.AccountId.Get() == nil {
var ret string
return ret
}
return *o.AccountId.Get()
}
// GetAccountIdOk returns a tuple with the AccountId field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *CreditCardLiability) GetAccountIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.AccountId.Get(), o.AccountId.IsSet()
}
// SetAccountId sets field value
func (o *CreditCardLiability) SetAccountId(v string) {
o.AccountId.Set(&v)
}
// GetAprs returns the Aprs field value
func (o *CreditCardLiability) GetAprs() []APR {
if o == nil {
var ret []APR
return ret
}
return o.Aprs
}
// GetAprsOk returns a tuple with the Aprs field value
// and a boolean to check if the value has been set.
func (o *CreditCardLiability) GetAprsOk() (*[]APR, bool) {
if o == nil {
return nil, false
}
return &o.Aprs, true
}
// SetAprs sets field value
func (o *CreditCardLiability) SetAprs(v []APR) {
o.Aprs = v
}
// GetIsOverdue returns the IsOverdue field value
// If the value is explicit nil, the zero value for bool will be returned
func (o *CreditCardLiability) GetIsOverdue() bool {
if o == nil || o.IsOverdue.Get() == nil {
var ret bool
return ret
}
return *o.IsOverdue.Get()
}
// GetIsOverdueOk returns a tuple with the IsOverdue field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *CreditCardLiability) GetIsOverdueOk() (*bool, bool) {
if o == nil {
return nil, false
}
return o.IsOverdue.Get(), o.IsOverdue.IsSet()
}
// SetIsOverdue sets field value
func (o *CreditCardLiability) SetIsOverdue(v bool) {
o.IsOverdue.Set(&v)
}
// GetLastPaymentAmount returns the LastPaymentAmount field value
func (o *CreditCardLiability) GetLastPaymentAmount() float32 {
if o == nil {
var ret float32
return ret
}
return o.LastPaymentAmount
}
// GetLastPaymentAmountOk returns a tuple with the LastPaymentAmount field value
// and a boolean to check if the value has been set.
func (o *CreditCardLiability) GetLastPaymentAmountOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.LastPaymentAmount, true
}
// SetLastPaymentAmount sets field value
func (o *CreditCardLiability) SetLastPaymentAmount(v float32) {
o.LastPaymentAmount = v
}
// GetLastPaymentDate returns the LastPaymentDate field value
func (o *CreditCardLiability) GetLastPaymentDate() string {
if o == nil {
var ret string
return ret
}
return o.LastPaymentDate
}
// GetLastPaymentDateOk returns a tuple with the LastPaymentDate field value
// and a boolean to check if the value has been set.
func (o *CreditCardLiability) GetLastPaymentDateOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.LastPaymentDate, true
}
// SetLastPaymentDate sets field value
func (o *CreditCardLiability) SetLastPaymentDate(v string) {
o.LastPaymentDate = v
}
// GetLastStatementIssueDate returns the LastStatementIssueDate field value
func (o *CreditCardLiability) GetLastStatementIssueDate() string {
if o == nil {
var ret string
return ret
}
return o.LastStatementIssueDate
}
// GetLastStatementIssueDateOk returns a tuple with the LastStatementIssueDate field value
// and a boolean to check if the value has been set.
func (o *CreditCardLiability) GetLastStatementIssueDateOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.LastStatementIssueDate, true
}
// SetLastStatementIssueDate sets field value
func (o *CreditCardLiability) SetLastStatementIssueDate(v string) {
o.LastStatementIssueDate = v
}
// GetLastStatementBalance returns the LastStatementBalance field value
func (o *CreditCardLiability) GetLastStatementBalance() float32 {
if o == nil {
var ret float32
return ret
}
return o.LastStatementBalance
}
// GetLastStatementBalanceOk returns a tuple with the LastStatementBalance field value
// and a boolean to check if the value has been set.
func (o *CreditCardLiability) GetLastStatementBalanceOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.LastStatementBalance, true
}
// SetLastStatementBalance sets field value
func (o *CreditCardLiability) SetLastStatementBalance(v float32) {
o.LastStatementBalance = v
}
// GetMinimumPaymentAmount returns the MinimumPaymentAmount field value
func (o *CreditCardLiability) GetMinimumPaymentAmount() float32 {
if o == nil {
var ret float32
return ret
}
return o.MinimumPaymentAmount
}
// GetMinimumPaymentAmountOk returns a tuple with the MinimumPaymentAmount field value
// and a boolean to check if the value has been set.
func (o *CreditCardLiability) GetMinimumPaymentAmountOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.MinimumPaymentAmount, true
}
// SetMinimumPaymentAmount sets field value
func (o *CreditCardLiability) SetMinimumPaymentAmount(v float32) {
o.MinimumPaymentAmount = v
}
// GetNextPaymentDueDate returns the NextPaymentDueDate field value
// If the value is explicit nil, the zero value for string will be returned
func (o *CreditCardLiability) GetNextPaymentDueDate() string {
if o == nil || o.NextPaymentDueDate.Get() == nil {
var ret string
return ret
}
return *o.NextPaymentDueDate.Get()
}
// GetNextPaymentDueDateOk returns a tuple with the NextPaymentDueDate field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *CreditCardLiability) GetNextPaymentDueDateOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.NextPaymentDueDate.Get(), o.NextPaymentDueDate.IsSet()
}
// SetNextPaymentDueDate sets field value
func (o *CreditCardLiability) SetNextPaymentDueDate(v string) {
o.NextPaymentDueDate.Set(&v)
}
func (o CreditCardLiability) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["account_id"] = o.AccountId.Get()
}
if true {
toSerialize["aprs"] = o.Aprs
}
if true {
toSerialize["is_overdue"] = o.IsOverdue.Get()
}
if true {
toSerialize["last_payment_amount"] = o.LastPaymentAmount
}
if true {
toSerialize["last_payment_date"] = o.LastPaymentDate
}
if true {
toSerialize["last_statement_issue_date"] = o.LastStatementIssueDate
}
if true {
toSerialize["last_statement_balance"] = o.LastStatementBalance
}
if true {
toSerialize["minimum_payment_amount"] = o.MinimumPaymentAmount
}
if true {
toSerialize["next_payment_due_date"] = o.NextPaymentDueDate.Get()
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *CreditCardLiability) UnmarshalJSON(bytes []byte) (err error) {
varCreditCardLiability := _CreditCardLiability{}
if err = json.Unmarshal(bytes, &varCreditCardLiability); err == nil {
*o = CreditCardLiability(varCreditCardLiability)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "account_id")
delete(additionalProperties, "aprs")
delete(additionalProperties, "is_overdue")
delete(additionalProperties, "last_payment_amount")
delete(additionalProperties, "last_payment_date")
delete(additionalProperties, "last_statement_issue_date")
delete(additionalProperties, "last_statement_balance")
delete(additionalProperties, "minimum_payment_amount")
delete(additionalProperties, "next_payment_due_date")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableCreditCardLiability struct {
value *CreditCardLiability
isSet bool
}
func (v NullableCreditCardLiability) Get() *CreditCardLiability {
return v.value
}
func (v *NullableCreditCardLiability) Set(val *CreditCardLiability) {
v.value = val
v.isSet = true
}
func (v NullableCreditCardLiability) IsSet() bool {
return v.isSet
}
func (v *NullableCreditCardLiability) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableCreditCardLiability(val *CreditCardLiability) *NullableCreditCardLiability {
return &NullableCreditCardLiability{value: val, isSet: true}
}
func (v NullableCreditCardLiability) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableCreditCardLiability) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_credit_card_liability.go | 0.750095 | 0.558688 | model_credit_card_liability.go | starcoder |
package data
// A sequence of runs (each step is a ConditionParams object)
type RunParams struct {
Nm string `desc:"Name of the sequence"`
Desc string `desc:"Description"`
Cond1Nm string `desc:"name of condition 1"`
Cond2Nm string `desc:"name of condition 2"`
Cond3Nm string `desc:"name of condition 3"`
Cond4Nm string `desc:"name of condition 4"`
Cond5Nm string `desc:"name of condition 5"`
}
type RunParamsMap map[string]RunParams
func AllRunParams() RunParamsMap {
seqs := map[string]RunParams{
"RunMaster": {
Nm: "RunMaster",
Desc: "",
Cond1Nm: "PosAcq_B50",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"USDebug": {
Nm: "USDebug",
Desc: "",
Cond1Nm: "USDebug",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"US0": {
Nm: "US0",
Desc: "",
Cond1Nm: "US0",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosAcq_A50": {
Nm: "PosAcq_A50",
Desc: "",
Cond1Nm: "PosAcq_A50",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosAcq_B50Ext": {
Nm: "PosAcq_B50Ext",
Desc: "",
Cond1Nm: "PosAcq_B50",
Cond2Nm: "PosExtinct",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosAcq_B50ExtAcq": {
Nm: "PosAcq_B50ExtAcq",
Desc: "Full cycle: acq, ext, acq",
Cond1Nm: "PosAcq_B50",
Cond2Nm: "PosExtinct",
Cond3Nm: "PosAcq_B50Cont",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosAcq_B100Ext": {
Nm: "PosAcq_B100Ext",
Desc: "",
Cond1Nm: "PosAcq_B100",
Cond2Nm: "PosExtinct",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosAcq": {
Nm: "PosAcq",
Desc: "",
Cond1Nm: "PosAcq_B50",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosExt": {
Nm: "PosExt",
Desc: "",
Cond1Nm: "PosAcq_B50",
Cond2Nm: "PosExtinct",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosAcq_B25": {
Nm: "PosAcq_B25",
Desc: "",
Cond1Nm: "PosAcq_B25",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"NegAcq": {
Nm: "NegAcq",
Desc: "",
Cond1Nm: "NegAcq",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"NegAcqMag": {
Nm: "NegAcqMag",
Desc: "",
Cond1Nm: "NegAcqMag",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosAcqMag": {
Nm: "PosAcqMag",
Desc: "",
Cond1Nm: "PosAcqMag",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"NegAcqExt": {
Nm: "NegAcqExt",
Desc: "",
Cond1Nm: "NegAcq",
Cond2Nm: "NegExtinct",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosCondInhib": {
Nm: "PosCondInhib",
Desc: "",
Cond1Nm: "PosAcq_contextA",
Cond2Nm: "PosCondInhib",
Cond3Nm: "PosCondInhib_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosSecondOrderCond": {
Nm: "PosSecondOrderCond",
Desc: "",
Cond1Nm: "PosAcqPreSecondOrder",
Cond2Nm: "PosSecondOrderCond",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosBlocking": {
Nm: "PosBlocking",
Desc: "",
Cond1Nm: "PosBlocking_A_training",
Cond2Nm: "PosBlocking",
Cond3Nm: "PosBlocking_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosBlocking2": {
Nm: "PosBlocking2",
Desc: "",
Cond1Nm: "PosBlocking_A_training",
Cond2Nm: "PosBlocking",
Cond3Nm: "PosBlocking2_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"NegCondInhib": {
Nm: "NegCondInhib",
Desc: "",
Cond1Nm: "NegAcq",
Cond2Nm: "NegCondInh",
Cond3Nm: "NegCondInh_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"AbaRenewal": {
Nm: "AbaRenewal",
Desc: "",
Cond1Nm: "PosAcq_contextA",
Cond2Nm: "PosExtinct_contextB",
Cond3Nm: "PosRenewal_contextA",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"NegBlocking": {
Nm: "NegBlocking",
Desc: "",
Cond1Nm: "NegBlocking_E_training",
Cond2Nm: "NegBlocking",
Cond3Nm: "NegBlocking_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosSum_test": {
Nm: "PosSum_test",
Desc: "",
Cond1Nm: "PosSumAcq",
Cond2Nm: "PosSumCondInhib",
Cond3Nm: "PosSum_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"NegSum_test": {
Nm: "NegSum_test",
Desc: "",
Cond1Nm: "NegSumAcq",
Cond2Nm: "NegSumCondInhib",
Cond3Nm: "NegSum_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"UnblockingValue": {
Nm: "UnblockingValue",
Desc: "",
Cond1Nm: "Unblocking_train",
Cond2Nm: "UnblockingValue",
Cond3Nm: "UnblockingValue_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"UnblockingIdentity": {
Nm: "UnblockingIdentity",
Desc: "",
Cond1Nm: "Unblocking_trainUS",
Cond2Nm: "UnblockingIdentity",
Cond3Nm: "UnblockingIdentity_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"Overexpect": {
Nm: "Overexpect",
Desc: "",
Cond1Nm: "Overexpect_train",
Cond2Nm: "OverexpectCompound",
Cond3Nm: "Overexpect_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosMagChange": {
Nm: "PosMagChange",
Desc: "",
Cond1Nm: "PosAcqMag",
Cond2Nm: "PosAcqMagChange",
Cond3Nm: "Overexpect_test",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"NegMagChange": {
Nm: "NegMagChange",
Desc: "",
Cond1Nm: "NegAcqMag",
Cond2Nm: "NegAcqMagChange",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"CondExp": {
Nm: "CondExp",
Desc: "",
Cond1Nm: "CondExp",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PainExp": {
Nm: "PainExp",
Desc: "",
Cond1Nm: "PainExp",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosNeg": {
Nm: "PosNeg",
Desc: "",
Cond1Nm: "PosOrNegAcq",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosAcqEarlyUSTest": {
Nm: "PosAcqEarlyUSTest",
Desc: "",
Cond1Nm: "PosAcq_B50",
Cond2Nm: "PosAcqEarlyUS_test",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"AutomatedTesting": {
Nm: "AutomatedTesting",
Desc: "This paramset is just for naming purposes",
Cond1Nm: "NullStep",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosOrNegAcq": {
Nm: "PosOrNegAcq",
Desc: "",
Cond1Nm: "PosOrNegAcq",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
"PosCondInhib_test": {
Nm: "PosCondInhib_test",
Desc: "For debugging",
Cond1Nm: "PosCondInhib_test",
Cond2Nm: "NullStep",
Cond3Nm: "NullStep",
Cond4Nm: "NullStep",
Cond5Nm: "NullStep",
},
}
return seqs
} | ch7/pvlv/data/run_params.go | 0.593256 | 0.673219 | run_params.go | starcoder |
package item
var (
// ArmourTierLeather is the ArmourTier of leather armour.
ArmourTierLeather = ArmourTier{BaseDurability: 55, Name: "leather"}
// ArmourTierGold is the ArmourTier of gold armour.
ArmourTierGold = ArmourTier{BaseDurability: 77, Name: "golden"}
// ArmourTierChain is the ArmourTier of chain armour.
ArmourTierChain = ArmourTier{BaseDurability: 166, Name: "chainmail"}
// ArmourTierIron is the ArmourTier of iron armour.
ArmourTierIron = ArmourTier{BaseDurability: 165, Name: "iron"}
// ArmourTierDiamond is the ArmourTier of diamond armour.
ArmourTierDiamond = ArmourTier{BaseDurability: 363, Name: "diamond"}
// ArmourTierNetherite is the ArmourTier of netherite armour.
ArmourTierNetherite = ArmourTier{BaseDurability: 408, KnockBackResistance: 0.1, Name: "netherite"}
)
type (
// Armour represents an item that may be worn as armour. Generally, these items provide armour points, which
// reduce damage taken. Some pieces of armour also provide toughness, which negates damage proportional to
// the total damage dealt.
Armour interface {
// DefencePoints returns the defence points that the armour provides when worn.
DefencePoints() float64
// KnockBackResistance returns a number from 0-1 that decides the amount of knock back force that is
// resisted upon being attacked. 1 knock back resistance point client-side translates to 10% knock back
// reduction.
KnockBackResistance() float64
}
// ArmourTier represents the tier, or material, that a piece of armour is made of.
ArmourTier struct {
// BaseDurability is the base durability of armour with this tier. This is otherwise the durability of
// the helmet with this tier.
BaseDurability float64
// KnockBackResistance is a number from 0-1 that decides the amount of knock back force that is resisted
// upon being attacked. 1 knock back resistance point client-side translates to 10% knock back reduction.
KnockBackResistance float64
// Name is the name of the tier.
Name string
}
// HelmetType is an Armour item that can be worn in the helmet slot.
HelmetType interface {
Armour
Helmet() bool
}
// ChestplateType is an Armour item that can be worn in the chestplate slot.
ChestplateType interface {
Armour
Chestplate() bool
}
// LeggingsType are an Armour item that can be worn in the leggings slot.
LeggingsType interface {
Armour
Leggings() bool
}
// BootsType are an Armour item that can be worn in the boots slot.
BootsType interface {
Armour
Boots() bool
}
)
// ArmourTiers returns a list of all armour tiers.
func ArmourTiers() []ArmourTier {
return []ArmourTier{ArmourTierLeather, ArmourTierGold, ArmourTierChain, ArmourTierIron, ArmourTierDiamond, ArmourTierNetherite}
} | server/item/armour.go | 0.586404 | 0.407245 | armour.go | starcoder |
package langreg
import (
"errors"
"fmt"
)
// RegionCodeInfo returns the English regional name that
// corresponds to the ISO 3166-1 alpha-2 region codes.
// Region codes should always be uppercase, and this is enforced.
// E.g. "US" is valid, but "us" is not.
func RegionCodeInfo(s string) (region string, err error) {
// codes have to be two characters long
if len(s) != 2 {
return "",
errors.New("ISO 3166-1 alpha-2 region codes must be 2 characters long")
}
switch s[0] {
case 'A':
switch s[1] {
case 'D':
return "Andorra", nil
case 'E':
return "United Arab Emirates", nil
case 'F':
return "Afghanistan", nil
case 'G':
return "Antigua and Barbuda", nil
case 'I':
return "Anguilla", nil
case 'L':
return "Albania", nil
case 'M':
return "Armenia", nil
case 'O':
return "Angola", nil
case 'Q':
return "Antarctica", nil
case 'R':
return "Argentina", nil
case 'S':
return "American Samoa", nil
case 'T':
return "Austria", nil
case 'U':
return "Australia", nil
case 'W':
return "Aruba", nil
case 'X':
return "Aland Islands !Åland Islands", nil
case 'Z':
return "Azerbaijan", nil
}
case 'B':
switch s[1] {
case 'A':
return "Bosnia and Herzegovina", nil
case 'B':
return "Barbados", nil
case 'D':
return "Bangladesh", nil
case 'E':
return "Belgium", nil
case 'F':
return "Burkina Faso", nil
case 'G':
return "Bulgaria", nil
case 'H':
return "Bahrain", nil
case 'I':
return "Burundi", nil
case 'J':
return "Benin", nil
case 'L':
return "Saint Barthélemy", nil
case 'M':
return "Bermuda", nil
case 'N':
return "Brunei Darussalam", nil
case 'O':
return "Bolivia, Plurinational State of", nil
case 'Q':
return "Bonaire, Sint Eustatius and Saba", nil
case 'R':
return "Brazil", nil
case 'S':
return "Bahamas", nil
case 'T':
return "Bhutan", nil
case 'V':
return "Bouvet Island", nil
case 'W':
return "Botswana", nil
case 'Y':
return "Belarus", nil
case 'Z':
return "Belize", nil
}
case 'C':
switch s[1] {
case 'A':
return "Canada", nil
case 'C':
return "Cocos (Keeling) Islands", nil
case 'D':
return "Congo, the Democratic Republic of the", nil
case 'F':
return "Central African Republic", nil
case 'G':
return "Congo", nil
case 'H':
return "Switzerland", nil
case 'I':
return "Cote dIvoire !Côte dIvoire", nil
case 'K':
return "Cook Islands", nil
case 'L':
return "Chile", nil
case 'M':
return "Cameroon", nil
case 'N':
return "China", nil
case 'O':
return "Colombia", nil
case 'R':
return "Costa Rica", nil
case 'U':
return "Cuba", nil
case 'V':
return "Cabo Verde", nil
case 'W':
return "Curaçao", nil
case 'X':
return "Christmas Island", nil
case 'Y':
return "Cyprus", nil
case 'Z':
return "Czech Republic", nil
}
case 'D':
switch s[1] {
case 'E':
return "Germany", nil
case 'J':
return "Djibouti", nil
case 'K':
return "Denmark", nil
case 'M':
return "Dominica", nil
case 'O':
return "Dominican Republic", nil
case 'Z':
return "Algeria", nil
}
case 'E':
switch s[1] {
case 'C':
return "Ecuador", nil
case 'E':
return "Estonia", nil
case 'G':
return "Egypt", nil
case 'H':
return "Western Sahara", nil
case 'R':
return "Eritrea", nil
case 'S':
return "Spain", nil
case 'T':
return "Ethiopia", nil
}
case 'F':
switch s[1] {
case 'I':
return "Finland", nil
case 'J':
return "Fiji", nil
case 'K':
return "Falkland Islands (Malvinas)", nil
case 'M':
return "Micronesia, Federated States of", nil
case 'O':
return "Faroe Islands", nil
case 'R':
return "France", nil
}
case 'G':
switch s[1] {
case 'A':
return "Gabon", nil
case 'B':
return "United Kingdom", nil
case 'D':
return "Grenada", nil
case 'E':
return "Georgia", nil
case 'F':
return "French Guiana", nil
case 'G':
return "Guernsey", nil
case 'H':
return "Ghana", nil
case 'I':
return "Gibraltar", nil
case 'L':
return "Greenland", nil
case 'M':
return "Gambia", nil
case 'N':
return "Guinea", nil
case 'P':
return "Guadeloupe", nil
case 'Q':
return "Equatorial Guinea", nil
case 'R':
return "Greece", nil
case 'S':
return "South Georgia and the South Sandwich Islands", nil
case 'T':
return "Guatemala", nil
case 'U':
return "Guam", nil
case 'W':
return "Guinea-Bissau", nil
case 'Y':
return "Guyana", nil
}
case 'H':
switch s[1] {
case 'K':
return "Hong Kong", nil
case 'M':
return "Heard Island and McDonald Islands", nil
case 'N':
return "Honduras", nil
case 'R':
return "Croatia", nil
case 'T':
return "Haiti", nil
case 'U':
return "Hungary", nil
}
case 'I':
switch s[1] {
case 'D':
return "Indonesia", nil
case 'E':
return "Ireland", nil
case 'L':
return "Israel", nil
case 'M':
return "Isle of Man", nil
case 'N':
return "India", nil
case 'O':
return "British Indian Ocean Territory", nil
case 'Q':
return "Iraq", nil
case 'R':
return "Iran, Islamic Republic of", nil
case 'S':
return "Iceland", nil
case 'T':
return "Italy", nil
}
case 'J':
switch s[1] {
case 'E':
return "Jersey", nil
case 'M':
return "Jamaica", nil
case 'O':
return "Jordan", nil
case 'P':
return "Japan", nil
}
case 'K':
switch s[1] {
case 'E':
return "Kenya", nil
case 'G':
return "Kyrgyzstan", nil
case 'H':
return "Cambodia", nil
case 'I':
return "Kiribati", nil
case 'M':
return "Comoros", nil
case 'N':
return "Saint Kitts and Nevis", nil
case 'P':
return "Korea, Democratic Peoples Republic of", nil
case 'R':
return "Korea, Republic of", nil
case 'W':
return "Kuwait", nil
case 'Y':
return "Cayman Islands", nil
case 'Z':
return "Kazakhstan", nil
}
case 'L':
switch s[1] {
case 'A':
return "Lao Peoples Democratic Republic", nil
case 'B':
return "Lebanon", nil
case 'C':
return "Saint Lucia", nil
case 'I':
return "Liechtenstein", nil
case 'K':
return "Sri Lanka", nil
case 'R':
return "Liberia", nil
case 'S':
return "Lesotho", nil
case 'T':
return "Lithuania", nil
case 'U':
return "Luxembourg", nil
case 'V':
return "Latvia", nil
case 'Y':
return "Libya", nil
}
case 'M':
switch s[1] {
case 'A':
return "Morocco", nil
case 'C':
return "Monaco", nil
case 'D':
return "Moldova, Republic of", nil
case 'E':
return "Montenegro", nil
case 'F':
return "Saint Martin (French part)", nil
case 'G':
return "Madagascar", nil
case 'H':
return "Marshall Islands", nil
case 'K':
return "Macedonia, the former Yugoslav Republic of", nil
case 'L':
return "Mali", nil
case 'M':
return "Myanmar", nil
case 'N':
return "Mongolia", nil
case 'O':
return "Macao", nil
case 'P':
return "Northern Mariana Islands", nil
case 'Q':
return "Martinique", nil
case 'R':
return "Mauritania", nil
case 'S':
return "Montserrat", nil
case 'T':
return "Malta", nil
case 'U':
return "Mauritius", nil
case 'V':
return "Maldives", nil
case 'W':
return "Malawi", nil
case 'X':
return "Mexico", nil
case 'Y':
return "Malaysia", nil
case 'Z':
return "Mozambique", nil
}
case 'N':
switch s[1] {
case 'A':
return "Namibia", nil
case 'C':
return "New Caledonia", nil
case 'E':
return "Niger", nil
case 'F':
return "Norfolk Island", nil
case 'G':
return "Nigeria", nil
case 'I':
return "Nicaragua", nil
case 'L':
return "Netherlands", nil
case 'O':
return "Norway", nil
case 'P':
return "Nepal", nil
case 'R':
return "Nauru", nil
case 'U':
return "Niue", nil
case 'Z':
return "New Zealand", nil
}
case 'O':
switch s[1] {
case 'M':
return "Oman", nil
}
case 'P':
switch s[1] {
case 'A':
return "Panama", nil
case 'E':
return "Peru", nil
case 'F':
return "French Polynesia", nil
case 'G':
return "Papua New Guinea", nil
case 'H':
return "Philippines", nil
case 'K':
return "Pakistan", nil
case 'L':
return "Poland", nil
case 'M':
return "Saint Pierre and Miquelon", nil
case 'N':
return "Pitcairn", nil
case 'R':
return "Puerto Rico", nil
case 'S':
return "Palestine, State of", nil
case 'T':
return "Portugal", nil
case 'W':
return "Palau", nil
case 'Y':
return "Paraguay", nil
}
case 'Q':
switch s[1] {
case 'A':
return "Qatar", nil
}
case 'R':
switch s[1] {
case 'E':
return "Reunion !Réunion", nil
case 'O':
return "Romania", nil
case 'S':
return "Serbia", nil
case 'U':
return "Russian Federation", nil
case 'W':
return "Rwanda", nil
}
case 'S':
switch s[1] {
case 'A':
return "Saudi Arabia", nil
case 'B':
return "Solomon Islands", nil
case 'C':
return "Seychelles", nil
case 'D':
return "Sudan", nil
case 'E':
return "Sweden", nil
case 'G':
return "Singapore", nil
case 'H':
return "Saint Helena, Ascension and T<NAME>ha", nil
case 'I':
return "Slovenia", nil
case 'J':
return "Svalbard and <NAME>", nil
case 'K':
return "Slovakia", nil
case 'L':
return "Sierra Leone", nil
case 'M':
return "San Marino", nil
case 'N':
return "Senegal", nil
case 'O':
return "Somalia", nil
case 'R':
return "Suriname", nil
case 'S':
return "South Sudan", nil
case 'T':
return "Sao Tome and Principe", nil
case 'V':
return "El Salvador", nil
case 'X':
return "Sint Maarten (Dutch part)", nil
case 'Y':
return "Syrian Arab Republic", nil
case 'Z':
return "Swaziland", nil
}
case 'T':
switch s[1] {
case 'C':
return "Turks and Caicos Islands", nil
case 'D':
return "Chad", nil
case 'F':
return "French Southern Territories", nil
case 'G':
return "Togo", nil
case 'H':
return "Thailand", nil
case 'J':
return "Tajikistan", nil
case 'K':
return "Tokelau", nil
case 'L':
return "Timor-Leste", nil
case 'M':
return "Turkmenistan", nil
case 'N':
return "Tunisia", nil
case 'O':
return "Tonga", nil
case 'R':
return "Turkey", nil
case 'T':
return "Trinidad and Tobago", nil
case 'V':
return "Tuvalu", nil
case 'W':
return "Taiwan, Province of China", nil
case 'Z':
return "Tanzania, United Republic of", nil
}
case 'U':
switch s[1] {
case 'A':
return "Ukraine", nil
case 'G':
return "Uganda", nil
case 'M':
return "United States Minor Outlying Islands", nil
case 'S':
return "United States", nil
case 'Y':
return "Uruguay", nil
case 'Z':
return "Uzbekistan", nil
}
case 'V':
switch s[1] {
case 'A':
return "Holy See (Vatican City State)", nil
case 'C':
return "Saint Vincent and the Grenadines", nil
case 'E':
return "Venezuela, Bolivarian Republic of", nil
case 'G':
return "Virgin Islands, British", nil
case 'I':
return "Virgin Islands, U.S.", nil
case 'N':
return "Viet Nam", nil
case 'U':
return "Vanuatu", nil
}
case 'W':
switch s[1] {
case 'F':
return "Wallis and Futuna", nil
case 'S':
return "Samoa", nil
}
case 'Y':
switch s[1] {
case 'E':
return "Yemen", nil
case 'T':
return "Mayotte", nil
}
case 'Z':
switch s[1] {
case 'A':
return "South Africa", nil
case 'M':
return "Zambia", nil
case 'W':
return "Zimbabwe", nil
}
}
return "",
fmt.Errorf("%q is not a valid ISO 3166-1 alpha-2 region code", s)
} | vendor/github.com/johngb/langreg/region_code_info.go | 0.514888 | 0.491761 | region_code_info.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.