code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package unit
import (
"errors"
"math/big"
"strings"
)
var (
// precision required for big.Float.
// Use SetPrecision to change the value.
precision uint = 1024
)
// Unit store value in Wei unit.
// Wei should be always big.Int.
type Unit struct {
Value *big.Int
}
// String return value of Unit in string format.
func (u Unit) String() string {
return u.Value.String()
}
// convertTo transform Unit.Value (always wei) to another unit and return pointer to big.Float.
func (u Unit) convertTo(unit float64) *big.Float {
val := newBigFloat().SetInt(u.Value)
diff := newBigFloat().Quo(big.NewFloat(Wei), big.NewFloat(unit))
return newBigFloat().Quo(val, diff)
}
// convertFrom transform value in passed unit.
// Return pointer to Unit.
func convertFrom(value *big.Float, unit float64) *Unit {
result := new(big.Int)
newBigFloat().Mul(value, big.NewFloat(Wei/unit)).Int(result)
return &Unit{Value: result}
}
// SetPrecision set the precision value for big.Float.
// Decrease value for faster conversion.
// Increase value if you need to work with very small or very large numbers.
// It is very easy to understand that you need to increase this value.
// For example: if you convert 1e1000 number and receive not zeros in the end of number then increase value.
func SetPrecision(prec uint) {
precision = prec
}
// ParseUnit create instance of Unit from passed value and unit name.
// Support units: wei, kwei, mwei, gwei, szabo, finney, ether.
// You may use it for convert units between themselves.
func ParseUnit(value *big.Float, unit string) (*Unit, error) {
unit = strings.ToLower(unit)
switch unit {
case "ether":
return NewEther(value), nil
case "wei":
intVal := new(big.Int)
value.Int(intVal)
return NewWei(intVal), nil
case "kwei":
return NewKWei(value), nil
case "mwei":
return NewMWei(value), nil
case "gwei":
return NewGWei(value), nil
case "finney":
return NewFinney(value), nil
case "szabo":
return NewSzabo(value), nil
default:
return nil, errors.New("unknown unit name")
}
}
// newBigFloat create new instance of big.Float with default properties.
func newBigFloat() *big.Float {
return new(big.Float).SetPrec(precision)
} | unit.go | 0.81648 | 0.418222 | unit.go | starcoder |
package parser
import (
"bufio"
"io"
"regexp"
"strconv"
"strings"
)
// Result represents a test result.
type Result int
// Test result constants
const (
PASS Result = iota
FAIL
SKIP
)
// Report is a collection of package tests.
type Report struct {
Packages []Package
}
// Package contains the test results of a single package.
type Package struct {
Name string
Time int
Tests []*Test
CoveragePct string
}
// Test contains the results of a single test.
type Test struct {
Name string
Time int
Result Result
Output []string
}
var (
regexStatus = regexp.MustCompile(`^--- (PASS|FAIL|SKIP): (.+) \((\d+\.\d+)(?: seconds|s)\)$`)
regexCoverage = regexp.MustCompile(`^coverage:\s+(\d+\.\d+)%\s+of\s+statements$`)
regexResult = regexp.MustCompile(`^(ok|FAIL)\s+(.+)\s(\d+\.\d+)s(?:\s+coverage:\s+(\d+\.\d+)%\s+of\s+statements)?$`)
)
// Parse parses go test output from reader r and returns a report with the
// results. An optional pkgName can be given, which is used in case a package
// result line is missing.
func Parse(r io.Reader, pkgName string) (*Report, error) {
reader := bufio.NewReader(r)
report := &Report{make([]Package, 0)}
// keep track of tests we find
var tests []*Test
// sum of tests' time, use this if current test has no result line (when it is compiled test)
testsTime := 0
// current test
var cur string
// coverage percentage report for current package
var coveragePct string
// parse lines
for {
l, _, err := reader.ReadLine()
if err != nil && err == io.EOF {
break
} else if err != nil {
return nil, err
}
line := string(l)
if strings.HasPrefix(line, "=== RUN ") {
// new test
cur = strings.TrimSpace(line[8:])
tests = append(tests, &Test{
Name: cur,
Result: FAIL,
Output: make([]string, 0),
})
} else if matches := regexResult.FindStringSubmatch(line); len(matches) == 5 {
if matches[4] != "" {
coveragePct = matches[4]
}
// all tests in this package are finished
report.Packages = append(report.Packages, Package{
Name: matches[2],
Time: parseTime(matches[3]),
Tests: tests,
CoveragePct: coveragePct,
})
tests = make([]*Test, 0)
coveragePct = ""
cur = ""
testsTime = 0
} else if matches := regexStatus.FindStringSubmatch(line); len(matches) == 4 {
cur = matches[2]
test := findTest(tests, cur)
if test == nil {
continue
}
// test status
if matches[1] == "PASS" {
test.Result = PASS
} else if matches[1] == "SKIP" {
test.Result = SKIP
} else {
test.Result = FAIL
}
test.Name = matches[2]
testTime := parseTime(matches[3]) * 10
test.Time = testTime
testsTime += testTime
} else if matches := regexCoverage.FindStringSubmatch(line); len(matches) == 2 {
coveragePct = matches[1]
} else if strings.HasPrefix(line, "\t") {
// test output
test := findTest(tests, cur)
if test == nil {
continue
}
test.Output = append(test.Output, line[1:])
}
}
if len(tests) > 0 {
// no result line found
report.Packages = append(report.Packages, Package{
Name: pkgName,
Time: testsTime,
Tests: tests,
CoveragePct: coveragePct,
})
}
return report, nil
}
func parseTime(time string) int {
t, err := strconv.Atoi(strings.Replace(time, ".", "", -1))
if err != nil {
return 0
}
return t
}
func findTest(tests []*Test, name string) *Test {
for i := 0; i < len(tests); i++ {
if tests[i].Name == name {
return tests[i]
}
}
return nil
}
// Failures counts the number of failed tests in this report
func (r *Report) Failures() int {
count := 0
for _, p := range r.Packages {
for _, t := range p.Tests {
if t.Result == FAIL {
count++
}
}
}
return count
} | parser/parser.go | 0.582966 | 0.425546 | parser.go | starcoder |
package ag
import (
mat "github.com/nlpodyssey/spago/pkg/mat32"
"github.com/nlpodyssey/spago/pkg/mat32/rand"
"github.com/nlpodyssey/spago/pkg/ml/ag/fn"
)
/*
* Top-level convenience functions
*/
var globalGraph = NewGraph(Rand(rand.NewLockedRand(42)))
// GetGlobalGraph returns the global graph.
// Although technically you could reassign the returned graph, please do not do so; imagine that its reference is immutable.
// Otherwise you are likely to generate inconsistent computations.
// To clean the global graph, you can use ClearGlobalGraph() or ClearGlobalGraphForReuse().
func GetGlobalGraph() *Graph {
return globalGraph
}
// ClearGlobalGraph clears the global graph. This is a destructive operation.
// See Graph.Clear() for more information.
func ClearGlobalGraph() {
globalGraph.Clear()
}
// ClearGlobalGraphForReuse does the same thing as ClearGlobalGraph(), with the difference that the
// graph structure is maintained.
// See Graph.ClearForReuse() for more information.
func ClearGlobalGraphForReuse() {
globalGraph.ClearForReuse()
}
// ZeroGrad sets the gradients of all nodes of the global graph to zero.
func ZeroGrad() {
globalGraph.ZeroGrad()
}
// NewVariable creates and returns a new node.
func NewVariable(value mat.Matrix, requiresGrad bool) Node {
return globalGraph.NewVariable(value, requiresGrad)
}
// NewScalar creates a variable node that doesn't require gradients.
func NewScalar(value mat.Float) Node {
return globalGraph.NewScalar(value)
}
// NewOperator creates a new operator along with its forward pass.
func NewOperator(f fn.Function, operands ...Node) Node {
return globalGraph.NewOperator(f, operands...)
}
// NewWrap creates a new wrapper Node for the given value, attaching it to
// the global graph.
func NewWrap(value GradValue) Node {
return globalGraph.NewWrap(value)
}
// NewWrapNoGrad is similar to NewWrap, but it disables automatic
// differentiation on the new node.
func NewWrapNoGrad(value GradValue) Node {
return globalGraph.NewWrapNoGrad(value)
}
// ReplaceValue replaces the current value of a variable Node with the given value,
// on the global graph. It panics if node is not a variable.
func ReplaceValue(node Node, value mat.Matrix) {
globalGraph.ReplaceValue(node, value)
}
// IncTimeStep increments the value of the global graph's TimeStep by one.
func IncTimeStep() {
globalGraph.IncTimeStep()
}
// TimeStep is an integer value associated with the global graph, which can be useful
// to perform truncated back propagation.
func TimeStep() int {
return globalGraph.TimeStep()
}
// Nodes returns the nodes of the graph.
func Nodes() []Node {
return globalGraph.Nodes()
}
// Forward computes the results of the entire global raph.
func Forward(opts ...ForwardOption) {
globalGraph.Forward(opts...)
}
// Backward performs the back-propagation.
// See Graph.Backward() for more information.
func Backward(node Node, opts ...BackwardOption) {
globalGraph.Backward(node, opts...)
}
// BackwardAll performs full back-propagation from the last node of the graph.
// It requires the root nodes to have assigned gradients already.
func BackwardAll() {
globalGraph.BackwardAll()
}
// Invoke returns a new node as a result of the application of the input operator.
func Invoke(operator OpName, xs ...Node) Node {
return globalGraph.Invoke(operator, xs...)
}
// Identity returns a new operator node as a result of the fn.Identity function.
func Identity(x Node) Node {
return globalGraph.Identity(x)
}
// Dropout returns a new operator node as a result of the fn.Dropout function.
func Dropout(x Node, p mat.Float) Node {
return globalGraph.Dropout(x, p)
}
// AtVec returns a new operator node as a result of the fn.AtVec function.
func AtVec(x Node, i int) Node {
return globalGraph.AtVec(x, i)
}
// At returns a new operator node as a result of the fn.At function.
func At(x Node, i int, j int) Node {
return globalGraph.At(x, i, j)
}
// Add returns a new operator node as a result of the fn.Add function.
// The first node may be null. This help to keep the code as concise as possible e.g. during accumulation.
func Add(x1 Node, x2 Node) Node {
return globalGraph.Add(x1, x2)
}
// Sub returns a new operator node as a result of the fn.Sub function.
func Sub(x1 Node, x2 Node) Node {
return globalGraph.Sub(x1, x2)
}
// SubScalar returns a new operator node as a result of the fn.SubScalar function.
func SubScalar(x1 Node, x2 Node) Node {
return globalGraph.SubScalar(x1, x2)
}
// AddScalar returns a new operator node as a result of the fn.AddScalar function.
func AddScalar(x1 Node, x2 Node) Node {
return globalGraph.AddScalar(x1, x2)
}
// ReverseSub returns a new operator node as a result of the fn.ReverseSub function.
func ReverseSub(x1 Node, x2 Node) Node {
return globalGraph.ReverseSub(x1, x2)
}
// Prod returns a new operator node as a result of the fn.Prod function.
func Prod(x1 Node, x2 Node) Node {
return globalGraph.Prod(x1, x2)
}
// Div returns a new operator node as a result of the fn.Div function.
func Div(x1 Node, x2 Node) Node {
return globalGraph.Div(x1, x2)
}
// ProdScalar returns a new operator node as a result of the fn.ProdScalar function.
func ProdScalar(x1 Node, x2 Node) Node {
return globalGraph.ProdScalar(x1, x2)
}
// DivScalar returns a new operator node as a result of the fn.DivScalar function.
func DivScalar(x1 Node, x2 Node) Node {
return globalGraph.DivScalar(x1, x2)
}
// Mul returns a new operator node as a result of the fn.Mul function.
func Mul(x1 Node, x2 Node) Node {
return globalGraph.Mul(x1, x2)
}
// Dot returns a new operator node as a result of the fn.Dot function.
func Dot(x1 Node, x2 Node) Node {
return globalGraph.Dot(x1, x2)
}
// Max returns a new operator node as a result of the fn.Max function.
func Max(x1 Node, x2 Node) Node {
return globalGraph.Max(x1, x2)
}
// Min returns a new operator node as a result of the fn.Min function.
func Min(x1 Node, x2 Node) Node {
return globalGraph.Min(x1, x2)
}
// Reshape returns a new operator node as a result of the fn.Reshape function.
func Reshape(x Node, rows, columns int) Node {
return globalGraph.Reshape(x, rows, columns)
}
// MaxPooling returns a new operator node as a result of the fn.MaxPooling function.
func MaxPooling(x Node, rows, columns int) Node {
return globalGraph.MaxPooling(x, rows, columns)
}
// View returns a new operator node as a result of the fn.View function.
func View(x Node, row, column, xStride, yStride int) Node {
return globalGraph.View(x, row, column, xStride, yStride)
}
// RowView returns a new operator node as a result of the fn.RowView function.
func RowView(x Node, row int) Node {
return globalGraph.RowView(x, row)
}
// ColView returns a new operator node as a result of the fn.ColView function.
func ColView(x Node, column int) Node {
return globalGraph.ColView(x, column)
}
// RotateR performs the right circular shift.
// `i` is the number of places by which the elements are shifted.
func RotateR(x Node, i int) Node {
return globalGraph.RotateR(x, i)
}
// Vec returns a new operator node as a result of the fn.Vec function.
func Vec(x Node) Node {
return globalGraph.Vec(x)
}
// T returns a new operator node as a result of the fn.T function.
func T(x Node) Node {
return globalGraph.T(x)
}
// Square returns a new operator node as a result of the fn.Prod(x, x) function.
func Square(x Node) Node {
return globalGraph.Square(x)
}
// Pow returns a new operator node as a result of the fn.Pow function.
func Pow(x Node, power mat.Float) Node {
return globalGraph.Pow(x, power)
}
// Sqrt returns a new operator node as a result of the `Sqrt` function.
func Sqrt(x Node) Node {
return globalGraph.Sqrt(x)
}
// Tan returns a new operator node as a result of the `Tan` function.
func Tan(x Node) Node {
return globalGraph.Tan(x)
}
// Tanh returns a new operator node as a result of the `Tanh` function.
func Tanh(x Node) Node {
return globalGraph.Tanh(x)
}
// Sigmoid returns a new operator node as a result of the `Sigmoid` function.
func Sigmoid(x Node) Node {
return globalGraph.Sigmoid(x)
}
// HardSigmoid returns a new operator node as a result of the `HardSigmoid` function.
func HardSigmoid(x Node) Node {
return globalGraph.HardSigmoid(x)
}
// HardTanh returns a new operator node as a result of the `HardTanh` function.
func HardTanh(x Node) Node {
return globalGraph.HardTanh(x)
}
// Softsign returns a new operator node as a result of the `SoftSign` function.
func Softsign(x Node) Node {
return globalGraph.Softsign(x)
}
// ReLU returns a new operator node as a result of the `ReLU` function.
func ReLU(x Node) Node {
return globalGraph.ReLU(x)
}
// CELU returns a new operator node as a result of the fn.CELU function.
func CELU(x Node, alpha Node) Node {
return globalGraph.CELU(x, alpha)
}
// GELU returns a new operator node as a result of the fn.GELU function.
func GELU(x Node) Node {
return globalGraph.GELU(x)
}
// ELU returns a new operator node as a result of the fn.ELU function.
func ELU(x Node, alpha Node) Node {
return globalGraph.ELU(x, alpha)
}
// PositiveELU returns a new operator node as a result of ELU(x, 1.0) + 1.
func PositiveELU(x Node) Node {
return globalGraph.PositiveELU(x)
}
// SwishB returns a new operator node as a result of the fn.SwishB function.
func SwishB(x Node, beta Node) Node {
return globalGraph.SwishB(x, beta)
}
// Swish returns a new operator node as a result of the fn.Swish function.
func Swish(x Node) Node {
return globalGraph.Swish(x)
}
// SiLU returns a new operator node as a result of the fn.SiLU function.
func SiLU(x Node) Node {
return globalGraph.SiLU(x)
}
// Mish returns a new operator node as a result of the `Mish` function.
func Mish(x Node) Node {
return globalGraph.Mish(x)
}
// LeakyReLU returns a new operator node as a result of the fn.LeakyReLU function.
func LeakyReLU(x Node, alpha Node) Node {
return globalGraph.LeakyReLU(x, alpha)
}
// SELU returns a new operator node as a result of the fn.SELU function.
func SELU(x Node, alpha Node, scale Node) Node {
return globalGraph.SELU(x, alpha, scale)
}
// SoftPlus returns a new operator node as a result of the fn.SoftPlus function.
func SoftPlus(x Node, beta Node, threshold Node) Node {
return globalGraph.SoftPlus(x, beta, threshold)
}
// SoftShrink returns a new operator node as a result of the fn.SoftShrink function.
func SoftShrink(x Node, lambda Node) Node {
return globalGraph.SoftShrink(x, lambda)
}
// Threshold returns a new operator node as a result of the fn.Threshold function.
func Threshold(x Node, threshold Node, k Node) Node {
return globalGraph.Threshold(x, threshold, k)
}
// Softmax returns a new operator node as a result of the fn.Softmax function.
func Softmax(x Node) Node {
return globalGraph.Softmax(x)
}
// LogSoftmax returns a new operator node as a result of Log(Softmax(x)).
func LogSoftmax(x Node) Node {
return globalGraph.LogSoftmax(x)
}
// SparseMax returns a new operator node as a result of the fn.SparseMax function.
func SparseMax(x Node) Node {
return globalGraph.SparseMax(x)
}
// SparseMaxLoss returns a new operator node as a result of the fn.SparseMaxLoss function.
func SparseMaxLoss(x Node) Node {
return globalGraph.SparseMaxLoss(x)
}
// Sin returns a new operator node as a result of the `Sin` function.
func Sin(x Node) Node {
return globalGraph.Sin(x)
}
// Cos returns a new operator node as a result of the `Cos` function.
func Cos(x Node) Node {
return globalGraph.Cos(x)
}
// Exp returns a new operator node as a result of the `Exp` function.
func Exp(x Node) Node {
return globalGraph.Exp(x)
}
// Log returns a new operator node as a result of the `Log` function.
func Log(x Node) Node {
return globalGraph.Log(x)
}
// Abs returns a new operator node as a result of the `Abs` function.
func Abs(x Node) Node {
return globalGraph.Abs(x)
}
// Neg returns a new operator node as a result of the `Neg` function.
func Neg(x Node) Node {
return globalGraph.Neg(x)
}
// Reciprocal returns a new operator node as a result of the `Reciprocal` function.
func Reciprocal(x Node) Node {
return globalGraph.Reciprocal(x)
}
// ReduceSum returns a new operator node as a result of the fn.ReduceSum function.
func ReduceSum(x Node) Node {
return globalGraph.ReduceSum(x)
}
// ReduceMean returns a new operator node as a result of the fn.ReduceMean function.
func ReduceMean(x Node) Node {
return globalGraph.ReduceMean(x)
}
// Sum returns the value that describes the sum of the sample.
func Sum(xs ...Node) Node {
return globalGraph.Sum(xs...)
}
// Mean returns the value that describes the average of the sample.
func Mean(xs []Node) Node {
return globalGraph.Mean(xs)
}
// Concat returns a new operator node as a result of the fn.Concat function.
func Concat(xs ...Node) Node {
return globalGraph.Concat(xs...)
}
// Stack returns a new operator node as a result of the fn.Stack function.
func Stack(xs ...Node) Node {
return globalGraph.Stack(xs...)
} | pkg/ml/ag/global.go | 0.805403 | 0.416025 | global.go | starcoder |
package iso20022
// Breakdown of cash movements into a fund as a result of investment funds transactions, eg, subscriptions or switch-in.
type FundCashInBreakdown3 struct {
// Amount of cash flow in, expressed as an amount of money.
Amount *ActiveOrHistoricCurrencyAndAmount `xml:"Amt,omitempty"`
// Amount of the cash flow in, expressed as a number of units.
UnitsNumber *FinancialInstrumentQuantity1 `xml:"UnitsNb,omitempty"`
// Indicates whether the cash flow is an item that did not appear on the previously sent report, for example, because it was received close to cut-off time.
NewAmountIndicator *YesNoIndicator `xml:"NewAmtInd,omitempty"`
// Type of transaction that resulted in the cash-in movement, for example, subscription, switch-in.
InvestmentFundTransactionInType *InvestmentFundTransactionInType1Choice `xml:"InvstmtFndTxInTp"`
// Specifies how the original order was expressed that resulted in the cash-in movement, that is cash or units.
OriginalOrderQuantityType *QuantityType1Choice `xml:"OrgnlOrdrQtyTp"`
// Charge for the placement of an order.
ChargeDetails []*Charge26 `xml:"ChrgDtls,omitempty"`
// Information related to the commission applied to an order, for example, back-end or front-end commission.
CommissionDetails []*Commission21 `xml:"ComssnDtls,omitempty"`
// Settlement currency for the transaction.
SettlementCurrency *ActiveCurrencyCode `xml:"SttlmCcy,omitempty"`
}
func (f *FundCashInBreakdown3) SetAmount(value, currency string) {
f.Amount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FundCashInBreakdown3) AddUnitsNumber() *FinancialInstrumentQuantity1 {
f.UnitsNumber = new(FinancialInstrumentQuantity1)
return f.UnitsNumber
}
func (f *FundCashInBreakdown3) SetNewAmountIndicator(value string) {
f.NewAmountIndicator = (*YesNoIndicator)(&value)
}
func (f *FundCashInBreakdown3) AddInvestmentFundTransactionInType() *InvestmentFundTransactionInType1Choice {
f.InvestmentFundTransactionInType = new(InvestmentFundTransactionInType1Choice)
return f.InvestmentFundTransactionInType
}
func (f *FundCashInBreakdown3) AddOriginalOrderQuantityType() *QuantityType1Choice {
f.OriginalOrderQuantityType = new(QuantityType1Choice)
return f.OriginalOrderQuantityType
}
func (f *FundCashInBreakdown3) AddChargeDetails() *Charge26 {
newValue := new(Charge26)
f.ChargeDetails = append(f.ChargeDetails, newValue)
return newValue
}
func (f *FundCashInBreakdown3) AddCommissionDetails() *Commission21 {
newValue := new(Commission21)
f.CommissionDetails = append(f.CommissionDetails, newValue)
return newValue
}
func (f *FundCashInBreakdown3) SetSettlementCurrency(value string) {
f.SettlementCurrency = (*ActiveCurrencyCode)(&value)
} | FundCashInBreakdown3.go | 0.812123 | 0.453746 | FundCashInBreakdown3.go | starcoder |
package rgb8
// Conversion of different RGB colorspaces with their native illuminators (reference whites) to CIE XYZ scaled to [0, 1e9] and back.
// RGB values MUST BE LINEAR and in the nominal range [0, 2^8].
// XYZ values are usually in [0, 1e9] but may be slightly outside this interval.
// Ref.: [24]
// AdobeToApple converts from Adobe RGB to Apple RGB colorspace.
func AdobeToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// AdobeToBest converts from Adobe RGB to Best RGB colorspace.
func AdobeToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// AdobeToBeta converts from Adobe RGB to Beta RGB colorspace.
func AdobeToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// AdobeToBruce converts from Adobe RGB to Bruce RGB colorspace.
func AdobeToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// AdobeToCIE converts from Adobe RGB to CIE RGB colorspace.
func AdobeToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// AdobeToColorMatch converts from Adobe RGB to ColorMatch RGB colorspace.
func AdobeToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// AdobeToDon converts from Adobe RGB to Don RGB colorspace.
func AdobeToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// AdobeToECI converts from Adobe RGB to ECI RGB colorspace.
func AdobeToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// AdobeToEktaSpace converts from Adobe RGB to EktaSpace RGB colorspace.
func AdobeToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// AdobeToNTSC converts from Adobe RGB to NTSC RGB colorspace.
func AdobeToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// AdobeToPAL converts from Adobe RGB to PAL RGB colorspace.
func AdobeToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// AdobeToProPhoto converts from Adobe RGB to ProPhoto RGB colorspace.
func AdobeToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// AdobeToSMPTE_C converts from Adobe RGB to SMPTE_C RGB colorspace.
func AdobeToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// AdobeToSRGB converts from Adobe RGB to SRGB RGB colorspace.
func AdobeToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// AdobeToWGamut converts from Adobe RGB to WGamut RGB colorspace.
func AdobeToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// AdobeToAdobe_D50 converts from Adobe RGB to Adobe_D50 RGB colorspace.
func AdobeToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// AdobeToApple_D50 converts from Adobe RGB to Apple_D50 RGB colorspace.
func AdobeToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// AdobeToBruce_D50 converts from Adobe RGB to Bruce_D50 RGB colorspace.
func AdobeToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// AdobeToCie_D50 converts from Adobe RGB to Cie_D50 RGB colorspace.
func AdobeToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// AdobeToNTSC_D50 converts from Adobe RGB to NTSC_D50 RGB colorspace.
func AdobeToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// AdobeToPAL_D50 converts from Adobe RGB to PAL_D50 RGB colorspace.
func AdobeToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// AdobeToSMPTE_C_D50 converts from Adobe RGB to SMPTE_C_D50 RGB colorspace.
func AdobeToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// AdobeToSRGB_D50 converts from Adobe RGB to SRGB_D50 RGB colorspace.
func AdobeToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// AppleToAdobe converts from Apple RGB to Adobe RGB colorspace.
func AppleToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// AppleToBest converts from Apple RGB to Best RGB colorspace.
func AppleToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// AppleToBeta converts from Apple RGB to Beta RGB colorspace.
func AppleToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// AppleToBruce converts from Apple RGB to Bruce RGB colorspace.
func AppleToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// AppleToCIE converts from Apple RGB to CIE RGB colorspace.
func AppleToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// AppleToColorMatch converts from Apple RGB to ColorMatch RGB colorspace.
func AppleToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// AppleToDon converts from Apple RGB to Don RGB colorspace.
func AppleToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// AppleToECI converts from Apple RGB to ECI RGB colorspace.
func AppleToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// AppleToEktaSpace converts from Apple RGB to EktaSpace RGB colorspace.
func AppleToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// AppleToNTSC converts from Apple RGB to NTSC RGB colorspace.
func AppleToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// AppleToPAL converts from Apple RGB to PAL RGB colorspace.
func AppleToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// AppleToProPhoto converts from Apple RGB to ProPhoto RGB colorspace.
func AppleToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// AppleToSMPTE_C converts from Apple RGB to SMPTE_C RGB colorspace.
func AppleToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// AppleToSRGB converts from Apple RGB to SRGB RGB colorspace.
func AppleToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// AppleToWGamut converts from Apple RGB to WGamut RGB colorspace.
func AppleToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// AppleToAdobe_D50 converts from Apple RGB to Adobe_D50 RGB colorspace.
func AppleToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// AppleToApple_D50 converts from Apple RGB to Apple_D50 RGB colorspace.
func AppleToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// AppleToBruce_D50 converts from Apple RGB to Bruce_D50 RGB colorspace.
func AppleToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// AppleToCie_D50 converts from Apple RGB to Cie_D50 RGB colorspace.
func AppleToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// AppleToNTSC_D50 converts from Apple RGB to NTSC_D50 RGB colorspace.
func AppleToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// AppleToPAL_D50 converts from Apple RGB to PAL_D50 RGB colorspace.
func AppleToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// AppleToSMPTE_C_D50 converts from Apple RGB to SMPTE_C_D50 RGB colorspace.
func AppleToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// AppleToSRGB_D50 converts from Apple RGB to SRGB_D50 RGB colorspace.
func AppleToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// BestToAdobe converts from Best RGB to Adobe RGB colorspace.
func BestToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// BestToApple converts from Best RGB to Apple RGB colorspace.
func BestToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// BestToBeta converts from Best RGB to Beta RGB colorspace.
func BestToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// BestToBruce converts from Best RGB to Bruce RGB colorspace.
func BestToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// BestToCIE converts from Best RGB to CIE RGB colorspace.
func BestToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// BestToColorMatch converts from Best RGB to ColorMatch RGB colorspace.
func BestToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// BestToDon converts from Best RGB to Don RGB colorspace.
func BestToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// BestToECI converts from Best RGB to ECI RGB colorspace.
func BestToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// BestToEktaSpace converts from Best RGB to EktaSpace RGB colorspace.
func BestToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// BestToNTSC converts from Best RGB to NTSC RGB colorspace.
func BestToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// BestToPAL converts from Best RGB to PAL RGB colorspace.
func BestToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// BestToProPhoto converts from Best RGB to ProPhoto RGB colorspace.
func BestToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// BestToSMPTE_C converts from Best RGB to SMPTE_C RGB colorspace.
func BestToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// BestToSRGB converts from Best RGB to SRGB RGB colorspace.
func BestToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// BestToWGamut converts from Best RGB to WGamut RGB colorspace.
func BestToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// BestToAdobe_D50 converts from Best RGB to Adobe_D50 RGB colorspace.
func BestToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// BestToApple_D50 converts from Best RGB to Apple_D50 RGB colorspace.
func BestToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// BestToBruce_D50 converts from Best RGB to Bruce_D50 RGB colorspace.
func BestToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// BestToCie_D50 converts from Best RGB to Cie_D50 RGB colorspace.
func BestToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// BestToNTSC_D50 converts from Best RGB to NTSC_D50 RGB colorspace.
func BestToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// BestToPAL_D50 converts from Best RGB to PAL_D50 RGB colorspace.
func BestToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// BestToSMPTE_C_D50 converts from Best RGB to SMPTE_C_D50 RGB colorspace.
func BestToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// BestToSRGB_D50 converts from Best RGB to SRGB_D50 RGB colorspace.
func BestToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BestToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// BetaToAdobe converts from Beta RGB to Adobe RGB colorspace.
func BetaToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// BetaToApple converts from Beta RGB to Apple RGB colorspace.
func BetaToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// BetaToBest converts from Beta RGB to Best RGB colorspace.
func BetaToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// BetaToBruce converts from Beta RGB to Bruce RGB colorspace.
func BetaToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// BetaToCIE converts from Beta RGB to CIE RGB colorspace.
func BetaToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// BetaToColorMatch converts from Beta RGB to ColorMatch RGB colorspace.
func BetaToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// BetaToDon converts from Beta RGB to Don RGB colorspace.
func BetaToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// BetaToECI converts from Beta RGB to ECI RGB colorspace.
func BetaToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// BetaToEktaSpace converts from Beta RGB to EktaSpace RGB colorspace.
func BetaToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// BetaToNTSC converts from Beta RGB to NTSC RGB colorspace.
func BetaToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// BetaToPAL converts from Beta RGB to PAL RGB colorspace.
func BetaToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// BetaToProPhoto converts from Beta RGB to ProPhoto RGB colorspace.
func BetaToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// BetaToSMPTE_C converts from Beta RGB to SMPTE_C RGB colorspace.
func BetaToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// BetaToSRGB converts from Beta RGB to SRGB RGB colorspace.
func BetaToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// BetaToWGamut converts from Beta RGB to WGamut RGB colorspace.
func BetaToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// BetaToAdobe_D50 converts from Beta RGB to Adobe_D50 RGB colorspace.
func BetaToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// BetaToApple_D50 converts from Beta RGB to Apple_D50 RGB colorspace.
func BetaToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// BetaToBruce_D50 converts from Beta RGB to Bruce_D50 RGB colorspace.
func BetaToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// BetaToCie_D50 converts from Beta RGB to Cie_D50 RGB colorspace.
func BetaToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// BetaToNTSC_D50 converts from Beta RGB to NTSC_D50 RGB colorspace.
func BetaToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// BetaToPAL_D50 converts from Beta RGB to PAL_D50 RGB colorspace.
func BetaToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// BetaToSMPTE_C_D50 converts from Beta RGB to SMPTE_C_D50 RGB colorspace.
func BetaToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// BetaToSRGB_D50 converts from Beta RGB to SRGB_D50 RGB colorspace.
func BetaToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BetaToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// BruceToAdobe converts from Bruce RGB to Adobe RGB colorspace.
func BruceToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// BruceToApple converts from Bruce RGB to Apple RGB colorspace.
func BruceToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// BruceToBest converts from Bruce RGB to Best RGB colorspace.
func BruceToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// BruceToBeta converts from Bruce RGB to Beta RGB colorspace.
func BruceToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// BruceToCIE converts from Bruce RGB to CIE RGB colorspace.
func BruceToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// BruceToColorMatch converts from Bruce RGB to ColorMatch RGB colorspace.
func BruceToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// BruceToDon converts from Bruce RGB to Don RGB colorspace.
func BruceToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// BruceToECI converts from Bruce RGB to ECI RGB colorspace.
func BruceToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// BruceToEktaSpace converts from Bruce RGB to EktaSpace RGB colorspace.
func BruceToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// BruceToNTSC converts from Bruce RGB to NTSC RGB colorspace.
func BruceToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// BruceToPAL converts from Bruce RGB to PAL RGB colorspace.
func BruceToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// BruceToProPhoto converts from Bruce RGB to ProPhoto RGB colorspace.
func BruceToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// BruceToSMPTE_C converts from Bruce RGB to SMPTE_C RGB colorspace.
func BruceToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// BruceToSRGB converts from Bruce RGB to SRGB RGB colorspace.
func BruceToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// BruceToWGamut converts from Bruce RGB to WGamut RGB colorspace.
func BruceToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// BruceToAdobe_D50 converts from Bruce RGB to Adobe_D50 RGB colorspace.
func BruceToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// BruceToApple_D50 converts from Bruce RGB to Apple_D50 RGB colorspace.
func BruceToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// BruceToBruce_D50 converts from Bruce RGB to Bruce_D50 RGB colorspace.
func BruceToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// BruceToCie_D50 converts from Bruce RGB to Cie_D50 RGB colorspace.
func BruceToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// BruceToNTSC_D50 converts from Bruce RGB to NTSC_D50 RGB colorspace.
func BruceToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// BruceToPAL_D50 converts from Bruce RGB to PAL_D50 RGB colorspace.
func BruceToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// BruceToSMPTE_C_D50 converts from Bruce RGB to SMPTE_C_D50 RGB colorspace.
func BruceToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// BruceToSRGB_D50 converts from Bruce RGB to SRGB_D50 RGB colorspace.
func BruceToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// CIEToAdobe converts from CIE RGB to Adobe RGB colorspace.
func CIEToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// CIEToApple converts from CIE RGB to Apple RGB colorspace.
func CIEToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// CIEToBest converts from CIE RGB to Best RGB colorspace.
func CIEToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// CIEToBeta converts from CIE RGB to Beta RGB colorspace.
func CIEToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// CIEToBruce converts from CIE RGB to Bruce RGB colorspace.
func CIEToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// CIEToColorMatch converts from CIE RGB to ColorMatch RGB colorspace.
func CIEToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// CIEToDon converts from CIE RGB to Don RGB colorspace.
func CIEToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// CIEToECI converts from CIE RGB to ECI RGB colorspace.
func CIEToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// CIEToEktaSpace converts from CIE RGB to EktaSpace RGB colorspace.
func CIEToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// CIEToNTSC converts from CIE RGB to NTSC RGB colorspace.
func CIEToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// CIEToPAL converts from CIE RGB to PAL RGB colorspace.
func CIEToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// CIEToProPhoto converts from CIE RGB to ProPhoto RGB colorspace.
func CIEToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// CIEToSMPTE_C converts from CIE RGB to SMPTE_C RGB colorspace.
func CIEToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// CIEToSRGB converts from CIE RGB to SRGB RGB colorspace.
func CIEToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// CIEToWGamut converts from CIE RGB to WGamut RGB colorspace.
func CIEToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// CIEToAdobe_D50 converts from CIE RGB to Adobe_D50 RGB colorspace.
func CIEToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// CIEToApple_D50 converts from CIE RGB to Apple_D50 RGB colorspace.
func CIEToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// CIEToBruce_D50 converts from CIE RGB to Bruce_D50 RGB colorspace.
func CIEToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// CIEToCie_D50 converts from CIE RGB to Cie_D50 RGB colorspace.
func CIEToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// CIEToNTSC_D50 converts from CIE RGB to NTSC_D50 RGB colorspace.
func CIEToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// CIEToPAL_D50 converts from CIE RGB to PAL_D50 RGB colorspace.
func CIEToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// CIEToSMPTE_C_D50 converts from CIE RGB to SMPTE_C_D50 RGB colorspace.
func CIEToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// CIEToSRGB_D50 converts from CIE RGB to SRGB_D50 RGB colorspace.
func CIEToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CIEToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// ColorMatchToAdobe converts from ColorMatch RGB to Adobe RGB colorspace.
func ColorMatchToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// ColorMatchToApple converts from ColorMatch RGB to Apple RGB colorspace.
func ColorMatchToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// ColorMatchToBest converts from ColorMatch RGB to Best RGB colorspace.
func ColorMatchToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// ColorMatchToBeta converts from ColorMatch RGB to Beta RGB colorspace.
func ColorMatchToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// ColorMatchToBruce converts from ColorMatch RGB to Bruce RGB colorspace.
func ColorMatchToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// ColorMatchToCIE converts from ColorMatch RGB to CIE RGB colorspace.
func ColorMatchToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// ColorMatchToDon converts from ColorMatch RGB to Don RGB colorspace.
func ColorMatchToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// ColorMatchToECI converts from ColorMatch RGB to ECI RGB colorspace.
func ColorMatchToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// ColorMatchToEktaSpace converts from ColorMatch RGB to EktaSpace RGB colorspace.
func ColorMatchToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// ColorMatchToNTSC converts from ColorMatch RGB to NTSC RGB colorspace.
func ColorMatchToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// ColorMatchToPAL converts from ColorMatch RGB to PAL RGB colorspace.
func ColorMatchToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// ColorMatchToProPhoto converts from ColorMatch RGB to ProPhoto RGB colorspace.
func ColorMatchToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// ColorMatchToSMPTE_C converts from ColorMatch RGB to SMPTE_C RGB colorspace.
func ColorMatchToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// ColorMatchToSRGB converts from ColorMatch RGB to SRGB RGB colorspace.
func ColorMatchToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// ColorMatchToWGamut converts from ColorMatch RGB to WGamut RGB colorspace.
func ColorMatchToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// ColorMatchToAdobe_D50 converts from ColorMatch RGB to Adobe_D50 RGB colorspace.
func ColorMatchToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// ColorMatchToApple_D50 converts from ColorMatch RGB to Apple_D50 RGB colorspace.
func ColorMatchToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// ColorMatchToBruce_D50 converts from ColorMatch RGB to Bruce_D50 RGB colorspace.
func ColorMatchToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// ColorMatchToCie_D50 converts from ColorMatch RGB to Cie_D50 RGB colorspace.
func ColorMatchToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// ColorMatchToNTSC_D50 converts from ColorMatch RGB to NTSC_D50 RGB colorspace.
func ColorMatchToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// ColorMatchToPAL_D50 converts from ColorMatch RGB to PAL_D50 RGB colorspace.
func ColorMatchToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// ColorMatchToSMPTE_C_D50 converts from ColorMatch RGB to SMPTE_C_D50 RGB colorspace.
func ColorMatchToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// ColorMatchToSRGB_D50 converts from ColorMatch RGB to SRGB_D50 RGB colorspace.
func ColorMatchToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ColorMatchToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// DonToAdobe converts from Don RGB to Adobe RGB colorspace.
func DonToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// DonToApple converts from Don RGB to Apple RGB colorspace.
func DonToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// DonToBest converts from Don RGB to Best RGB colorspace.
func DonToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// DonToBeta converts from Don RGB to Beta RGB colorspace.
func DonToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// DonToBruce converts from Don RGB to Bruce RGB colorspace.
func DonToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// DonToCIE converts from Don RGB to CIE RGB colorspace.
func DonToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// DonToColorMatch converts from Don RGB to ColorMatch RGB colorspace.
func DonToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// DonToECI converts from Don RGB to ECI RGB colorspace.
func DonToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// DonToEktaSpace converts from Don RGB to EktaSpace RGB colorspace.
func DonToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// DonToNTSC converts from Don RGB to NTSC RGB colorspace.
func DonToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// DonToPAL converts from Don RGB to PAL RGB colorspace.
func DonToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// DonToProPhoto converts from Don RGB to ProPhoto RGB colorspace.
func DonToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// DonToSMPTE_C converts from Don RGB to SMPTE_C RGB colorspace.
func DonToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// DonToSRGB converts from Don RGB to SRGB RGB colorspace.
func DonToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// DonToWGamut converts from Don RGB to WGamut RGB colorspace.
func DonToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// DonToAdobe_D50 converts from Don RGB to Adobe_D50 RGB colorspace.
func DonToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// DonToApple_D50 converts from Don RGB to Apple_D50 RGB colorspace.
func DonToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// DonToBruce_D50 converts from Don RGB to Bruce_D50 RGB colorspace.
func DonToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// DonToCie_D50 converts from Don RGB to Cie_D50 RGB colorspace.
func DonToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// DonToNTSC_D50 converts from Don RGB to NTSC_D50 RGB colorspace.
func DonToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// DonToPAL_D50 converts from Don RGB to PAL_D50 RGB colorspace.
func DonToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// DonToSMPTE_C_D50 converts from Don RGB to SMPTE_C_D50 RGB colorspace.
func DonToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// DonToSRGB_D50 converts from Don RGB to SRGB_D50 RGB colorspace.
func DonToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := DonToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// ECIToAdobe converts from ECI RGB to Adobe RGB colorspace.
func ECIToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// ECIToApple converts from ECI RGB to Apple RGB colorspace.
func ECIToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// ECIToBest converts from ECI RGB to Best RGB colorspace.
func ECIToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// ECIToBeta converts from ECI RGB to Beta RGB colorspace.
func ECIToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// ECIToBruce converts from ECI RGB to Bruce RGB colorspace.
func ECIToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// ECIToCIE converts from ECI RGB to CIE RGB colorspace.
func ECIToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// ECIToColorMatch converts from ECI RGB to ColorMatch RGB colorspace.
func ECIToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// ECIToDon converts from ECI RGB to Don RGB colorspace.
func ECIToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// ECIToEktaSpace converts from ECI RGB to EktaSpace RGB colorspace.
func ECIToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// ECIToNTSC converts from ECI RGB to NTSC RGB colorspace.
func ECIToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// ECIToPAL converts from ECI RGB to PAL RGB colorspace.
func ECIToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// ECIToProPhoto converts from ECI RGB to ProPhoto RGB colorspace.
func ECIToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// ECIToSMPTE_C converts from ECI RGB to SMPTE_C RGB colorspace.
func ECIToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// ECIToSRGB converts from ECI RGB to SRGB RGB colorspace.
func ECIToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// ECIToWGamut converts from ECI RGB to WGamut RGB colorspace.
func ECIToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// ECIToAdobe_D50 converts from ECI RGB to Adobe_D50 RGB colorspace.
func ECIToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// ECIToApple_D50 converts from ECI RGB to Apple_D50 RGB colorspace.
func ECIToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// ECIToBruce_D50 converts from ECI RGB to Bruce_D50 RGB colorspace.
func ECIToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// ECIToCie_D50 converts from ECI RGB to Cie_D50 RGB colorspace.
func ECIToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// ECIToNTSC_D50 converts from ECI RGB to NTSC_D50 RGB colorspace.
func ECIToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// ECIToPAL_D50 converts from ECI RGB to PAL_D50 RGB colorspace.
func ECIToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// ECIToSMPTE_C_D50 converts from ECI RGB to SMPTE_C_D50 RGB colorspace.
func ECIToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// ECIToSRGB_D50 converts from ECI RGB to SRGB_D50 RGB colorspace.
func ECIToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ECIToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// EktaSpaceToAdobe converts from EktaSpace RGB to Adobe RGB colorspace.
func EktaSpaceToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// EktaSpaceToApple converts from EktaSpace RGB to Apple RGB colorspace.
func EktaSpaceToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// EktaSpaceToBest converts from EktaSpace RGB to Best RGB colorspace.
func EktaSpaceToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// EktaSpaceToBeta converts from EktaSpace RGB to Beta RGB colorspace.
func EktaSpaceToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// EktaSpaceToBruce converts from EktaSpace RGB to Bruce RGB colorspace.
func EktaSpaceToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// EktaSpaceToCIE converts from EktaSpace RGB to CIE RGB colorspace.
func EktaSpaceToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// EktaSpaceToColorMatch converts from EktaSpace RGB to ColorMatch RGB colorspace.
func EktaSpaceToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// EktaSpaceToDon converts from EktaSpace RGB to Don RGB colorspace.
func EktaSpaceToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// EktaSpaceToECI converts from EktaSpace RGB to ECI RGB colorspace.
func EktaSpaceToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// EktaSpaceToNTSC converts from EktaSpace RGB to NTSC RGB colorspace.
func EktaSpaceToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// EktaSpaceToPAL converts from EktaSpace RGB to PAL RGB colorspace.
func EktaSpaceToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// EktaSpaceToProPhoto converts from EktaSpace RGB to ProPhoto RGB colorspace.
func EktaSpaceToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// EktaSpaceToSMPTE_C converts from EktaSpace RGB to SMPTE_C RGB colorspace.
func EktaSpaceToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// EktaSpaceToSRGB converts from EktaSpace RGB to SRGB RGB colorspace.
func EktaSpaceToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// EktaSpaceToWGamut converts from EktaSpace RGB to WGamut RGB colorspace.
func EktaSpaceToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// EktaSpaceToAdobe_D50 converts from EktaSpace RGB to Adobe_D50 RGB colorspace.
func EktaSpaceToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// EktaSpaceToApple_D50 converts from EktaSpace RGB to Apple_D50 RGB colorspace.
func EktaSpaceToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// EktaSpaceToBruce_D50 converts from EktaSpace RGB to Bruce_D50 RGB colorspace.
func EktaSpaceToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// EktaSpaceToCie_D50 converts from EktaSpace RGB to Cie_D50 RGB colorspace.
func EktaSpaceToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// EktaSpaceToNTSC_D50 converts from EktaSpace RGB to NTSC_D50 RGB colorspace.
func EktaSpaceToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// EktaSpaceToPAL_D50 converts from EktaSpace RGB to PAL_D50 RGB colorspace.
func EktaSpaceToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// EktaSpaceToSMPTE_C_D50 converts from EktaSpace RGB to SMPTE_C_D50 RGB colorspace.
func EktaSpaceToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// EktaSpaceToSRGB_D50 converts from EktaSpace RGB to SRGB_D50 RGB colorspace.
func EktaSpaceToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := EktaSpaceToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// NTSCToAdobe converts from NTSC RGB to Adobe RGB colorspace.
func NTSCToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// NTSCToApple converts from NTSC RGB to Apple RGB colorspace.
func NTSCToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// NTSCToBest converts from NTSC RGB to Best RGB colorspace.
func NTSCToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// NTSCToBeta converts from NTSC RGB to Beta RGB colorspace.
func NTSCToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// NTSCToBruce converts from NTSC RGB to Bruce RGB colorspace.
func NTSCToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// NTSCToCIE converts from NTSC RGB to CIE RGB colorspace.
func NTSCToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// NTSCToColorMatch converts from NTSC RGB to ColorMatch RGB colorspace.
func NTSCToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// NTSCToDon converts from NTSC RGB to Don RGB colorspace.
func NTSCToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// NTSCToECI converts from NTSC RGB to ECI RGB colorspace.
func NTSCToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// NTSCToEktaSpace converts from NTSC RGB to EktaSpace RGB colorspace.
func NTSCToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// NTSCToPAL converts from NTSC RGB to PAL RGB colorspace.
func NTSCToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// NTSCToProPhoto converts from NTSC RGB to ProPhoto RGB colorspace.
func NTSCToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// NTSCToSMPTE_C converts from NTSC RGB to SMPTE_C RGB colorspace.
func NTSCToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// NTSCToSRGB converts from NTSC RGB to SRGB RGB colorspace.
func NTSCToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// NTSCToWGamut converts from NTSC RGB to WGamut RGB colorspace.
func NTSCToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// NTSCToAdobe_D50 converts from NTSC RGB to Adobe_D50 RGB colorspace.
func NTSCToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// NTSCToApple_D50 converts from NTSC RGB to Apple_D50 RGB colorspace.
func NTSCToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// NTSCToBruce_D50 converts from NTSC RGB to Bruce_D50 RGB colorspace.
func NTSCToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// NTSCToCie_D50 converts from NTSC RGB to Cie_D50 RGB colorspace.
func NTSCToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// NTSCToNTSC_D50 converts from NTSC RGB to NTSC_D50 RGB colorspace.
func NTSCToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// NTSCToPAL_D50 converts from NTSC RGB to PAL_D50 RGB colorspace.
func NTSCToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// NTSCToSMPTE_C_D50 converts from NTSC RGB to SMPTE_C_D50 RGB colorspace.
func NTSCToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// NTSCToSRGB_D50 converts from NTSC RGB to SRGB_D50 RGB colorspace.
func NTSCToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// PALToAdobe converts from PAL RGB to Adobe RGB colorspace.
func PALToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// PALToApple converts from PAL RGB to Apple RGB colorspace.
func PALToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// PALToBest converts from PAL RGB to Best RGB colorspace.
func PALToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// PALToBeta converts from PAL RGB to Beta RGB colorspace.
func PALToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// PALToBruce converts from PAL RGB to Bruce RGB colorspace.
func PALToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// PALToCIE converts from PAL RGB to CIE RGB colorspace.
func PALToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// PALToColorMatch converts from PAL RGB to ColorMatch RGB colorspace.
func PALToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// PALToDon converts from PAL RGB to Don RGB colorspace.
func PALToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// PALToECI converts from PAL RGB to ECI RGB colorspace.
func PALToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// PALToEktaSpace converts from PAL RGB to EktaSpace RGB colorspace.
func PALToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// PALToNTSC converts from PAL RGB to NTSC RGB colorspace.
func PALToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// PALToProPhoto converts from PAL RGB to ProPhoto RGB colorspace.
func PALToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// PALToSMPTE_C converts from PAL RGB to SMPTE_C RGB colorspace.
func PALToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// PALToSRGB converts from PAL RGB to SRGB RGB colorspace.
func PALToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// PALToWGamut converts from PAL RGB to WGamut RGB colorspace.
func PALToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// PALToAdobe_D50 converts from PAL RGB to Adobe_D50 RGB colorspace.
func PALToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// PALToApple_D50 converts from PAL RGB to Apple_D50 RGB colorspace.
func PALToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// PALToBruce_D50 converts from PAL RGB to Bruce_D50 RGB colorspace.
func PALToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// PALToCie_D50 converts from PAL RGB to Cie_D50 RGB colorspace.
func PALToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// PALToNTSC_D50 converts from PAL RGB to NTSC_D50 RGB colorspace.
func PALToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// PALToPAL_D50 converts from PAL RGB to PAL_D50 RGB colorspace.
func PALToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// PALToSMPTE_C_D50 converts from PAL RGB to SMPTE_C_D50 RGB colorspace.
func PALToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// PALToSRGB_D50 converts from PAL RGB to SRGB_D50 RGB colorspace.
func PALToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// ProPhotoToAdobe converts from ProPhoto RGB to Adobe RGB colorspace.
func ProPhotoToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// ProPhotoToApple converts from ProPhoto RGB to Apple RGB colorspace.
func ProPhotoToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// ProPhotoToBest converts from ProPhoto RGB to Best RGB colorspace.
func ProPhotoToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// ProPhotoToBeta converts from ProPhoto RGB to Beta RGB colorspace.
func ProPhotoToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// ProPhotoToBruce converts from ProPhoto RGB to Bruce RGB colorspace.
func ProPhotoToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// ProPhotoToCIE converts from ProPhoto RGB to CIE RGB colorspace.
func ProPhotoToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// ProPhotoToColorMatch converts from ProPhoto RGB to ColorMatch RGB colorspace.
func ProPhotoToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// ProPhotoToDon converts from ProPhoto RGB to Don RGB colorspace.
func ProPhotoToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// ProPhotoToECI converts from ProPhoto RGB to ECI RGB colorspace.
func ProPhotoToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// ProPhotoToEktaSpace converts from ProPhoto RGB to EktaSpace RGB colorspace.
func ProPhotoToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// ProPhotoToNTSC converts from ProPhoto RGB to NTSC RGB colorspace.
func ProPhotoToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// ProPhotoToPAL converts from ProPhoto RGB to PAL RGB colorspace.
func ProPhotoToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// ProPhotoToSMPTE_C converts from ProPhoto RGB to SMPTE_C RGB colorspace.
func ProPhotoToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// ProPhotoToSRGB converts from ProPhoto RGB to SRGB RGB colorspace.
func ProPhotoToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// ProPhotoToWGamut converts from ProPhoto RGB to WGamut RGB colorspace.
func ProPhotoToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// ProPhotoToAdobe_D50 converts from ProPhoto RGB to Adobe_D50 RGB colorspace.
func ProPhotoToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// ProPhotoToApple_D50 converts from ProPhoto RGB to Apple_D50 RGB colorspace.
func ProPhotoToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// ProPhotoToBruce_D50 converts from ProPhoto RGB to Bruce_D50 RGB colorspace.
func ProPhotoToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// ProPhotoToCie_D50 converts from ProPhoto RGB to Cie_D50 RGB colorspace.
func ProPhotoToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// ProPhotoToNTSC_D50 converts from ProPhoto RGB to NTSC_D50 RGB colorspace.
func ProPhotoToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// ProPhotoToPAL_D50 converts from ProPhoto RGB to PAL_D50 RGB colorspace.
func ProPhotoToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// ProPhotoToSMPTE_C_D50 converts from ProPhoto RGB to SMPTE_C_D50 RGB colorspace.
func ProPhotoToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// ProPhotoToSRGB_D50 converts from ProPhoto RGB to SRGB_D50 RGB colorspace.
func ProPhotoToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := ProPhotoToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// SMPTE_CToAdobe converts from SMPTE_C RGB to Adobe RGB colorspace.
func SMPTE_CToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// SMPTE_CToApple converts from SMPTE_C RGB to Apple RGB colorspace.
func SMPTE_CToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// SMPTE_CToBest converts from SMPTE_C RGB to Best RGB colorspace.
func SMPTE_CToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// SMPTE_CToBeta converts from SMPTE_C RGB to Beta RGB colorspace.
func SMPTE_CToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// SMPTE_CToBruce converts from SMPTE_C RGB to Bruce RGB colorspace.
func SMPTE_CToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// SMPTE_CToCIE converts from SMPTE_C RGB to CIE RGB colorspace.
func SMPTE_CToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// SMPTE_CToColorMatch converts from SMPTE_C RGB to ColorMatch RGB colorspace.
func SMPTE_CToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// SMPTE_CToDon converts from SMPTE_C RGB to Don RGB colorspace.
func SMPTE_CToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// SMPTE_CToECI converts from SMPTE_C RGB to ECI RGB colorspace.
func SMPTE_CToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// SMPTE_CToEktaSpace converts from SMPTE_C RGB to EktaSpace RGB colorspace.
func SMPTE_CToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// SMPTE_CToNTSC converts from SMPTE_C RGB to NTSC RGB colorspace.
func SMPTE_CToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// SMPTE_CToPAL converts from SMPTE_C RGB to PAL RGB colorspace.
func SMPTE_CToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// SMPTE_CToProPhoto converts from SMPTE_C RGB to ProPhoto RGB colorspace.
func SMPTE_CToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// SMPTE_CToSRGB converts from SMPTE_C RGB to SRGB RGB colorspace.
func SMPTE_CToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// SMPTE_CToWGamut converts from SMPTE_C RGB to WGamut RGB colorspace.
func SMPTE_CToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// SMPTE_CToAdobe_D50 converts from SMPTE_C RGB to Adobe_D50 RGB colorspace.
func SMPTE_CToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// SMPTE_CToApple_D50 converts from SMPTE_C RGB to Apple_D50 RGB colorspace.
func SMPTE_CToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// SMPTE_CToBruce_D50 converts from SMPTE_C RGB to Bruce_D50 RGB colorspace.
func SMPTE_CToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// SMPTE_CToCie_D50 converts from SMPTE_C RGB to Cie_D50 RGB colorspace.
func SMPTE_CToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// SMPTE_CToNTSC_D50 converts from SMPTE_C RGB to NTSC_D50 RGB colorspace.
func SMPTE_CToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// SMPTE_CToPAL_D50 converts from SMPTE_C RGB to PAL_D50 RGB colorspace.
func SMPTE_CToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// SMPTE_CToSMPTE_C_D50 converts from SMPTE_C RGB to SMPTE_C_D50 RGB colorspace.
func SMPTE_CToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// SMPTE_CToSRGB_D50 converts from SMPTE_C RGB to SRGB_D50 RGB colorspace.
func SMPTE_CToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// SRGBToAdobe converts from SRGB RGB to Adobe RGB colorspace.
func SRGBToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// SRGBToApple converts from SRGB RGB to Apple RGB colorspace.
func SRGBToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// SRGBToBest converts from SRGB RGB to Best RGB colorspace.
func SRGBToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// SRGBToBeta converts from SRGB RGB to Beta RGB colorspace.
func SRGBToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// SRGBToBruce converts from SRGB RGB to Bruce RGB colorspace.
func SRGBToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// SRGBToCIE converts from SRGB RGB to CIE RGB colorspace.
func SRGBToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// SRGBToColorMatch converts from SRGB RGB to ColorMatch RGB colorspace.
func SRGBToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// SRGBToDon converts from SRGB RGB to Don RGB colorspace.
func SRGBToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// SRGBToECI converts from SRGB RGB to ECI RGB colorspace.
func SRGBToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// SRGBToEktaSpace converts from SRGB RGB to EktaSpace RGB colorspace.
func SRGBToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// SRGBToNTSC converts from SRGB RGB to NTSC RGB colorspace.
func SRGBToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// SRGBToPAL converts from SRGB RGB to PAL RGB colorspace.
func SRGBToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// SRGBToProPhoto converts from SRGB RGB to ProPhoto RGB colorspace.
func SRGBToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// SRGBToSMPTE_C converts from SRGB RGB to SMPTE_C RGB colorspace.
func SRGBToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// SRGBToWGamut converts from SRGB RGB to WGamut RGB colorspace.
func SRGBToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// SRGBToAdobe_D50 converts from SRGB RGB to Adobe_D50 RGB colorspace.
func SRGBToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// SRGBToApple_D50 converts from SRGB RGB to Apple_D50 RGB colorspace.
func SRGBToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// SRGBToBruce_D50 converts from SRGB RGB to Bruce_D50 RGB colorspace.
func SRGBToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// SRGBToCie_D50 converts from SRGB RGB to Cie_D50 RGB colorspace.
func SRGBToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// SRGBToNTSC_D50 converts from SRGB RGB to NTSC_D50 RGB colorspace.
func SRGBToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// SRGBToPAL_D50 converts from SRGB RGB to PAL_D50 RGB colorspace.
func SRGBToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// SRGBToSMPTE_C_D50 converts from SRGB RGB to SMPTE_C_D50 RGB colorspace.
func SRGBToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// SRGBToSRGB_D50 converts from SRGB RGB to SRGB_D50 RGB colorspace.
func SRGBToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// WGamutToAdobe converts from WGamut RGB to Adobe RGB colorspace.
func WGamutToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// WGamutToApple converts from WGamut RGB to Apple RGB colorspace.
func WGamutToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// WGamutToBest converts from WGamut RGB to Best RGB colorspace.
func WGamutToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// WGamutToBeta converts from WGamut RGB to Beta RGB colorspace.
func WGamutToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// WGamutToBruce converts from WGamut RGB to Bruce RGB colorspace.
func WGamutToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// WGamutToCIE converts from WGamut RGB to CIE RGB colorspace.
func WGamutToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// WGamutToColorMatch converts from WGamut RGB to ColorMatch RGB colorspace.
func WGamutToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// WGamutToDon converts from WGamut RGB to Don RGB colorspace.
func WGamutToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// WGamutToECI converts from WGamut RGB to ECI RGB colorspace.
func WGamutToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// WGamutToEktaSpace converts from WGamut RGB to EktaSpace RGB colorspace.
func WGamutToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// WGamutToNTSC converts from WGamut RGB to NTSC RGB colorspace.
func WGamutToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// WGamutToPAL converts from WGamut RGB to PAL RGB colorspace.
func WGamutToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// WGamutToProPhoto converts from WGamut RGB to ProPhoto RGB colorspace.
func WGamutToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// WGamutToSMPTE_C converts from WGamut RGB to SMPTE_C RGB colorspace.
func WGamutToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// WGamutToSRGB converts from WGamut RGB to SRGB RGB colorspace.
func WGamutToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// WGamutToAdobe_D50 converts from WGamut RGB to Adobe_D50 RGB colorspace.
func WGamutToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// WGamutToApple_D50 converts from WGamut RGB to Apple_D50 RGB colorspace.
func WGamutToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// WGamutToBruce_D50 converts from WGamut RGB to Bruce_D50 RGB colorspace.
func WGamutToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// WGamutToCie_D50 converts from WGamut RGB to Cie_D50 RGB colorspace.
func WGamutToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// WGamutToNTSC_D50 converts from WGamut RGB to NTSC_D50 RGB colorspace.
func WGamutToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// WGamutToPAL_D50 converts from WGamut RGB to PAL_D50 RGB colorspace.
func WGamutToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// WGamutToSMPTE_C_D50 converts from WGamut RGB to SMPTE_C_D50 RGB colorspace.
func WGamutToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// WGamutToSRGB_D50 converts from WGamut RGB to SRGB_D50 RGB colorspace.
func WGamutToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := WGamutToXYZ(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// Adobe_D50ToAdobe converts from Adobe_D50 RGB to Adobe RGB colorspace.
func Adobe_D50ToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// Adobe_D50ToApple converts from Adobe_D50 RGB to Apple RGB colorspace.
func Adobe_D50ToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// Adobe_D50ToBest converts from Adobe_D50 RGB to Best RGB colorspace.
func Adobe_D50ToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// Adobe_D50ToBeta converts from Adobe_D50 RGB to Beta RGB colorspace.
func Adobe_D50ToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// Adobe_D50ToBruce converts from Adobe_D50 RGB to Bruce RGB colorspace.
func Adobe_D50ToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// Adobe_D50ToCIE converts from Adobe_D50 RGB to CIE RGB colorspace.
func Adobe_D50ToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// Adobe_D50ToColorMatch converts from Adobe_D50 RGB to ColorMatch RGB colorspace.
func Adobe_D50ToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// Adobe_D50ToDon converts from Adobe_D50 RGB to Don RGB colorspace.
func Adobe_D50ToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// Adobe_D50ToECI converts from Adobe_D50 RGB to ECI RGB colorspace.
func Adobe_D50ToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// Adobe_D50ToEktaSpace converts from Adobe_D50 RGB to EktaSpace RGB colorspace.
func Adobe_D50ToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// Adobe_D50ToNTSC converts from Adobe_D50 RGB to NTSC RGB colorspace.
func Adobe_D50ToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// Adobe_D50ToPAL converts from Adobe_D50 RGB to PAL RGB colorspace.
func Adobe_D50ToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// Adobe_D50ToProPhoto converts from Adobe_D50 RGB to ProPhoto RGB colorspace.
func Adobe_D50ToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// Adobe_D50ToSMPTE_C converts from Adobe_D50 RGB to SMPTE_C RGB colorspace.
func Adobe_D50ToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// Adobe_D50ToSRGB converts from Adobe_D50 RGB to SRGB RGB colorspace.
func Adobe_D50ToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// Adobe_D50ToWGamut converts from Adobe_D50 RGB to WGamut RGB colorspace.
func Adobe_D50ToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// Adobe_D50ToApple_D50 converts from Adobe_D50 RGB to Apple_D50 RGB colorspace.
func Adobe_D50ToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// Adobe_D50ToBruce_D50 converts from Adobe_D50 RGB to Bruce_D50 RGB colorspace.
func Adobe_D50ToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// Adobe_D50ToCie_D50 converts from Adobe_D50 RGB to Cie_D50 RGB colorspace.
func Adobe_D50ToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// Adobe_D50ToNTSC_D50 converts from Adobe_D50 RGB to NTSC_D50 RGB colorspace.
func Adobe_D50ToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// Adobe_D50ToPAL_D50 converts from Adobe_D50 RGB to PAL_D50 RGB colorspace.
func Adobe_D50ToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// Adobe_D50ToSMPTE_C_D50 converts from Adobe_D50 RGB to SMPTE_C_D50 RGB colorspace.
func Adobe_D50ToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// Adobe_D50ToSRGB_D50 converts from Adobe_D50 RGB to SRGB_D50 RGB colorspace.
func Adobe_D50ToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AdobeToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// Apple_D50ToAdobe converts from Apple_D50 RGB to Adobe RGB colorspace.
func Apple_D50ToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// Apple_D50ToApple converts from Apple_D50 RGB to Apple RGB colorspace.
func Apple_D50ToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// Apple_D50ToBest converts from Apple_D50 RGB to Best RGB colorspace.
func Apple_D50ToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// Apple_D50ToBeta converts from Apple_D50 RGB to Beta RGB colorspace.
func Apple_D50ToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// Apple_D50ToBruce converts from Apple_D50 RGB to Bruce RGB colorspace.
func Apple_D50ToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// Apple_D50ToCIE converts from Apple_D50 RGB to CIE RGB colorspace.
func Apple_D50ToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// Apple_D50ToColorMatch converts from Apple_D50 RGB to ColorMatch RGB colorspace.
func Apple_D50ToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// Apple_D50ToDon converts from Apple_D50 RGB to Don RGB colorspace.
func Apple_D50ToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// Apple_D50ToECI converts from Apple_D50 RGB to ECI RGB colorspace.
func Apple_D50ToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// Apple_D50ToEktaSpace converts from Apple_D50 RGB to EktaSpace RGB colorspace.
func Apple_D50ToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// Apple_D50ToNTSC converts from Apple_D50 RGB to NTSC RGB colorspace.
func Apple_D50ToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// Apple_D50ToPAL converts from Apple_D50 RGB to PAL RGB colorspace.
func Apple_D50ToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// Apple_D50ToProPhoto converts from Apple_D50 RGB to ProPhoto RGB colorspace.
func Apple_D50ToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// Apple_D50ToSMPTE_C converts from Apple_D50 RGB to SMPTE_C RGB colorspace.
func Apple_D50ToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// Apple_D50ToSRGB converts from Apple_D50 RGB to SRGB RGB colorspace.
func Apple_D50ToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// Apple_D50ToWGamut converts from Apple_D50 RGB to WGamut RGB colorspace.
func Apple_D50ToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// Apple_D50ToAdobe_D50 converts from Apple_D50 RGB to Adobe_D50 RGB colorspace.
func Apple_D50ToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// Apple_D50ToBruce_D50 converts from Apple_D50 RGB to Bruce_D50 RGB colorspace.
func Apple_D50ToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// Apple_D50ToCie_D50 converts from Apple_D50 RGB to Cie_D50 RGB colorspace.
func Apple_D50ToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// Apple_D50ToNTSC_D50 converts from Apple_D50 RGB to NTSC_D50 RGB colorspace.
func Apple_D50ToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// Apple_D50ToPAL_D50 converts from Apple_D50 RGB to PAL_D50 RGB colorspace.
func Apple_D50ToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// Apple_D50ToSMPTE_C_D50 converts from Apple_D50 RGB to SMPTE_C_D50 RGB colorspace.
func Apple_D50ToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// Apple_D50ToSRGB_D50 converts from Apple_D50 RGB to SRGB_D50 RGB colorspace.
func Apple_D50ToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := AppleToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// Bruce_D50ToAdobe converts from Bruce_D50 RGB to Adobe RGB colorspace.
func Bruce_D50ToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// Bruce_D50ToApple converts from Bruce_D50 RGB to Apple RGB colorspace.
func Bruce_D50ToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// Bruce_D50ToBest converts from Bruce_D50 RGB to Best RGB colorspace.
func Bruce_D50ToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// Bruce_D50ToBeta converts from Bruce_D50 RGB to Beta RGB colorspace.
func Bruce_D50ToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// Bruce_D50ToBruce converts from Bruce_D50 RGB to Bruce RGB colorspace.
func Bruce_D50ToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// Bruce_D50ToCIE converts from Bruce_D50 RGB to CIE RGB colorspace.
func Bruce_D50ToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// Bruce_D50ToColorMatch converts from Bruce_D50 RGB to ColorMatch RGB colorspace.
func Bruce_D50ToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// Bruce_D50ToDon converts from Bruce_D50 RGB to Don RGB colorspace.
func Bruce_D50ToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// Bruce_D50ToECI converts from Bruce_D50 RGB to ECI RGB colorspace.
func Bruce_D50ToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// Bruce_D50ToEktaSpace converts from Bruce_D50 RGB to EktaSpace RGB colorspace.
func Bruce_D50ToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// Bruce_D50ToNTSC converts from Bruce_D50 RGB to NTSC RGB colorspace.
func Bruce_D50ToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// Bruce_D50ToPAL converts from Bruce_D50 RGB to PAL RGB colorspace.
func Bruce_D50ToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// Bruce_D50ToProPhoto converts from Bruce_D50 RGB to ProPhoto RGB colorspace.
func Bruce_D50ToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// Bruce_D50ToSMPTE_C converts from Bruce_D50 RGB to SMPTE_C RGB colorspace.
func Bruce_D50ToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// Bruce_D50ToSRGB converts from Bruce_D50 RGB to SRGB RGB colorspace.
func Bruce_D50ToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// Bruce_D50ToWGamut converts from Bruce_D50 RGB to WGamut RGB colorspace.
func Bruce_D50ToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// Bruce_D50ToAdobe_D50 converts from Bruce_D50 RGB to Adobe_D50 RGB colorspace.
func Bruce_D50ToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// Bruce_D50ToApple_D50 converts from Bruce_D50 RGB to Apple_D50 RGB colorspace.
func Bruce_D50ToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// Bruce_D50ToCie_D50 converts from Bruce_D50 RGB to Cie_D50 RGB colorspace.
func Bruce_D50ToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// Bruce_D50ToNTSC_D50 converts from Bruce_D50 RGB to NTSC_D50 RGB colorspace.
func Bruce_D50ToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// Bruce_D50ToPAL_D50 converts from Bruce_D50 RGB to PAL_D50 RGB colorspace.
func Bruce_D50ToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// Bruce_D50ToSMPTE_C_D50 converts from Bruce_D50 RGB to SMPTE_C_D50 RGB colorspace.
func Bruce_D50ToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// Bruce_D50ToSRGB_D50 converts from Bruce_D50 RGB to SRGB_D50 RGB colorspace.
func Bruce_D50ToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := BruceToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// Cie_D50ToAdobe converts from Cie_D50 RGB to Adobe RGB colorspace.
func Cie_D50ToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// Cie_D50ToApple converts from Cie_D50 RGB to Apple RGB colorspace.
func Cie_D50ToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// Cie_D50ToBest converts from Cie_D50 RGB to Best RGB colorspace.
func Cie_D50ToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// Cie_D50ToBeta converts from Cie_D50 RGB to Beta RGB colorspace.
func Cie_D50ToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// Cie_D50ToBruce converts from Cie_D50 RGB to Bruce RGB colorspace.
func Cie_D50ToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// Cie_D50ToCIE converts from Cie_D50 RGB to CIE RGB colorspace.
func Cie_D50ToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// Cie_D50ToColorMatch converts from Cie_D50 RGB to ColorMatch RGB colorspace.
func Cie_D50ToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// Cie_D50ToDon converts from Cie_D50 RGB to Don RGB colorspace.
func Cie_D50ToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// Cie_D50ToECI converts from Cie_D50 RGB to ECI RGB colorspace.
func Cie_D50ToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// Cie_D50ToEktaSpace converts from Cie_D50 RGB to EktaSpace RGB colorspace.
func Cie_D50ToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// Cie_D50ToNTSC converts from Cie_D50 RGB to NTSC RGB colorspace.
func Cie_D50ToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// Cie_D50ToPAL converts from Cie_D50 RGB to PAL RGB colorspace.
func Cie_D50ToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// Cie_D50ToProPhoto converts from Cie_D50 RGB to ProPhoto RGB colorspace.
func Cie_D50ToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// Cie_D50ToSMPTE_C converts from Cie_D50 RGB to SMPTE_C RGB colorspace.
func Cie_D50ToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// Cie_D50ToSRGB converts from Cie_D50 RGB to SRGB RGB colorspace.
func Cie_D50ToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// Cie_D50ToWGamut converts from Cie_D50 RGB to WGamut RGB colorspace.
func Cie_D50ToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// Cie_D50ToAdobe_D50 converts from Cie_D50 RGB to Adobe_D50 RGB colorspace.
func Cie_D50ToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// Cie_D50ToApple_D50 converts from Cie_D50 RGB to Apple_D50 RGB colorspace.
func Cie_D50ToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// Cie_D50ToBruce_D50 converts from Cie_D50 RGB to Bruce_D50 RGB colorspace.
func Cie_D50ToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// Cie_D50ToNTSC_D50 converts from Cie_D50 RGB to NTSC_D50 RGB colorspace.
func Cie_D50ToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// Cie_D50ToPAL_D50 converts from Cie_D50 RGB to PAL_D50 RGB colorspace.
func Cie_D50ToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// Cie_D50ToSMPTE_C_D50 converts from Cie_D50 RGB to SMPTE_C_D50 RGB colorspace.
func Cie_D50ToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// Cie_D50ToSRGB_D50 converts from Cie_D50 RGB to SRGB_D50 RGB colorspace.
func Cie_D50ToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := CieToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// NTSC_D50ToAdobe converts from NTSC_D50 RGB to Adobe RGB colorspace.
func NTSC_D50ToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// NTSC_D50ToApple converts from NTSC_D50 RGB to Apple RGB colorspace.
func NTSC_D50ToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// NTSC_D50ToBest converts from NTSC_D50 RGB to Best RGB colorspace.
func NTSC_D50ToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// NTSC_D50ToBeta converts from NTSC_D50 RGB to Beta RGB colorspace.
func NTSC_D50ToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// NTSC_D50ToBruce converts from NTSC_D50 RGB to Bruce RGB colorspace.
func NTSC_D50ToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// NTSC_D50ToCIE converts from NTSC_D50 RGB to CIE RGB colorspace.
func NTSC_D50ToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// NTSC_D50ToColorMatch converts from NTSC_D50 RGB to ColorMatch RGB colorspace.
func NTSC_D50ToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// NTSC_D50ToDon converts from NTSC_D50 RGB to Don RGB colorspace.
func NTSC_D50ToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// NTSC_D50ToECI converts from NTSC_D50 RGB to ECI RGB colorspace.
func NTSC_D50ToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// NTSC_D50ToEktaSpace converts from NTSC_D50 RGB to EktaSpace RGB colorspace.
func NTSC_D50ToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// NTSC_D50ToNTSC converts from NTSC_D50 RGB to NTSC RGB colorspace.
func NTSC_D50ToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// NTSC_D50ToPAL converts from NTSC_D50 RGB to PAL RGB colorspace.
func NTSC_D50ToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// NTSC_D50ToProPhoto converts from NTSC_D50 RGB to ProPhoto RGB colorspace.
func NTSC_D50ToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// NTSC_D50ToSMPTE_C converts from NTSC_D50 RGB to SMPTE_C RGB colorspace.
func NTSC_D50ToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// NTSC_D50ToSRGB converts from NTSC_D50 RGB to SRGB RGB colorspace.
func NTSC_D50ToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// NTSC_D50ToWGamut converts from NTSC_D50 RGB to WGamut RGB colorspace.
func NTSC_D50ToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// NTSC_D50ToAdobe_D50 converts from NTSC_D50 RGB to Adobe_D50 RGB colorspace.
func NTSC_D50ToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// NTSC_D50ToApple_D50 converts from NTSC_D50 RGB to Apple_D50 RGB colorspace.
func NTSC_D50ToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// NTSC_D50ToBruce_D50 converts from NTSC_D50 RGB to Bruce_D50 RGB colorspace.
func NTSC_D50ToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// NTSC_D50ToCie_D50 converts from NTSC_D50 RGB to Cie_D50 RGB colorspace.
func NTSC_D50ToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// NTSC_D50ToPAL_D50 converts from NTSC_D50 RGB to PAL_D50 RGB colorspace.
func NTSC_D50ToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// NTSC_D50ToSMPTE_C_D50 converts from NTSC_D50 RGB to SMPTE_C_D50 RGB colorspace.
func NTSC_D50ToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// NTSC_D50ToSRGB_D50 converts from NTSC_D50 RGB to SRGB_D50 RGB colorspace.
func NTSC_D50ToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := NTSCToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// PAL_D50ToAdobe converts from PAL_D50 RGB to Adobe RGB colorspace.
func PAL_D50ToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// PAL_D50ToApple converts from PAL_D50 RGB to Apple RGB colorspace.
func PAL_D50ToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// PAL_D50ToBest converts from PAL_D50 RGB to Best RGB colorspace.
func PAL_D50ToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// PAL_D50ToBeta converts from PAL_D50 RGB to Beta RGB colorspace.
func PAL_D50ToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// PAL_D50ToBruce converts from PAL_D50 RGB to Bruce RGB colorspace.
func PAL_D50ToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// PAL_D50ToCIE converts from PAL_D50 RGB to CIE RGB colorspace.
func PAL_D50ToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// PAL_D50ToColorMatch converts from PAL_D50 RGB to ColorMatch RGB colorspace.
func PAL_D50ToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// PAL_D50ToDon converts from PAL_D50 RGB to Don RGB colorspace.
func PAL_D50ToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// PAL_D50ToECI converts from PAL_D50 RGB to ECI RGB colorspace.
func PAL_D50ToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// PAL_D50ToEktaSpace converts from PAL_D50 RGB to EktaSpace RGB colorspace.
func PAL_D50ToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// PAL_D50ToNTSC converts from PAL_D50 RGB to NTSC RGB colorspace.
func PAL_D50ToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// PAL_D50ToPAL converts from PAL_D50 RGB to PAL RGB colorspace.
func PAL_D50ToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// PAL_D50ToProPhoto converts from PAL_D50 RGB to ProPhoto RGB colorspace.
func PAL_D50ToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// PAL_D50ToSMPTE_C converts from PAL_D50 RGB to SMPTE_C RGB colorspace.
func PAL_D50ToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// PAL_D50ToSRGB converts from PAL_D50 RGB to SRGB RGB colorspace.
func PAL_D50ToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// PAL_D50ToWGamut converts from PAL_D50 RGB to WGamut RGB colorspace.
func PAL_D50ToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// PAL_D50ToAdobe_D50 converts from PAL_D50 RGB to Adobe_D50 RGB colorspace.
func PAL_D50ToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// PAL_D50ToApple_D50 converts from PAL_D50 RGB to Apple_D50 RGB colorspace.
func PAL_D50ToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// PAL_D50ToBruce_D50 converts from PAL_D50 RGB to Bruce_D50 RGB colorspace.
func PAL_D50ToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// PAL_D50ToCie_D50 converts from PAL_D50 RGB to Cie_D50 RGB colorspace.
func PAL_D50ToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// PAL_D50ToNTSC_D50 converts from PAL_D50 RGB to NTSC_D50 RGB colorspace.
func PAL_D50ToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// PAL_D50ToSMPTE_C_D50 converts from PAL_D50 RGB to SMPTE_C_D50 RGB colorspace.
func PAL_D50ToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
}
// PAL_D50ToSRGB_D50 converts from PAL_D50 RGB to SRGB_D50 RGB colorspace.
func PAL_D50ToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := PALToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// SMPTE_C_D50ToAdobe converts from SMPTE_C_D50 RGB to Adobe RGB colorspace.
func SMPTE_C_D50ToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// SMPTE_C_D50ToApple converts from SMPTE_C_D50 RGB to Apple RGB colorspace.
func SMPTE_C_D50ToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// SMPTE_C_D50ToBest converts from SMPTE_C_D50 RGB to Best RGB colorspace.
func SMPTE_C_D50ToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// SMPTE_C_D50ToBeta converts from SMPTE_C_D50 RGB to Beta RGB colorspace.
func SMPTE_C_D50ToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// SMPTE_C_D50ToBruce converts from SMPTE_C_D50 RGB to Bruce RGB colorspace.
func SMPTE_C_D50ToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// SMPTE_C_D50ToCIE converts from SMPTE_C_D50 RGB to CIE RGB colorspace.
func SMPTE_C_D50ToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// SMPTE_C_D50ToColorMatch converts from SMPTE_C_D50 RGB to ColorMatch RGB colorspace.
func SMPTE_C_D50ToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// SMPTE_C_D50ToDon converts from SMPTE_C_D50 RGB to Don RGB colorspace.
func SMPTE_C_D50ToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// SMPTE_C_D50ToECI converts from SMPTE_C_D50 RGB to ECI RGB colorspace.
func SMPTE_C_D50ToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// SMPTE_C_D50ToEktaSpace converts from SMPTE_C_D50 RGB to EktaSpace RGB colorspace.
func SMPTE_C_D50ToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// SMPTE_C_D50ToNTSC converts from SMPTE_C_D50 RGB to NTSC RGB colorspace.
func SMPTE_C_D50ToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// SMPTE_C_D50ToPAL converts from SMPTE_C_D50 RGB to PAL RGB colorspace.
func SMPTE_C_D50ToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// SMPTE_C_D50ToProPhoto converts from SMPTE_C_D50 RGB to ProPhoto RGB colorspace.
func SMPTE_C_D50ToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// SMPTE_C_D50ToSMPTE_C converts from SMPTE_C_D50 RGB to SMPTE_C RGB colorspace.
func SMPTE_C_D50ToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// SMPTE_C_D50ToSRGB converts from SMPTE_C_D50 RGB to SRGB RGB colorspace.
func SMPTE_C_D50ToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// SMPTE_C_D50ToWGamut converts from SMPTE_C_D50 RGB to WGamut RGB colorspace.
func SMPTE_C_D50ToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// SMPTE_C_D50ToAdobe_D50 converts from SMPTE_C_D50 RGB to Adobe_D50 RGB colorspace.
func SMPTE_C_D50ToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// SMPTE_C_D50ToApple_D50 converts from SMPTE_C_D50 RGB to Apple_D50 RGB colorspace.
func SMPTE_C_D50ToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// SMPTE_C_D50ToBruce_D50 converts from SMPTE_C_D50 RGB to Bruce_D50 RGB colorspace.
func SMPTE_C_D50ToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// SMPTE_C_D50ToCie_D50 converts from SMPTE_C_D50 RGB to Cie_D50 RGB colorspace.
func SMPTE_C_D50ToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// SMPTE_C_D50ToNTSC_D50 converts from SMPTE_C_D50 RGB to NTSC_D50 RGB colorspace.
func SMPTE_C_D50ToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// SMPTE_C_D50ToPAL_D50 converts from SMPTE_C_D50 RGB to PAL_D50 RGB colorspace.
func SMPTE_C_D50ToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// SMPTE_C_D50ToSRGB_D50 converts from SMPTE_C_D50 RGB to SRGB_D50 RGB colorspace.
func SMPTE_C_D50ToSRGB_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SMPTE_CToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB_D50(x, y, z)
return
}
// SRGB_D50ToAdobe converts from SRGB_D50 RGB to Adobe RGB colorspace.
func SRGB_D50ToAdobe(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe(x, y, z)
return
}
// SRGB_D50ToApple converts from SRGB_D50 RGB to Apple RGB colorspace.
func SRGB_D50ToApple(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple(x, y, z)
return
}
// SRGB_D50ToBest converts from SRGB_D50 RGB to Best RGB colorspace.
func SRGB_D50ToBest(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBest(x, y, z)
return
}
// SRGB_D50ToBeta converts from SRGB_D50 RGB to Beta RGB colorspace.
func SRGB_D50ToBeta(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBeta(x, y, z)
return
}
// SRGB_D50ToBruce converts from SRGB_D50 RGB to Bruce RGB colorspace.
func SRGB_D50ToBruce(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce(x, y, z)
return
}
// SRGB_D50ToCIE converts from SRGB_D50 RGB to CIE RGB colorspace.
func SRGB_D50ToCIE(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCIE(x, y, z)
return
}
// SRGB_D50ToColorMatch converts from SRGB_D50 RGB to ColorMatch RGB colorspace.
func SRGB_D50ToColorMatch(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToColorMatch(x, y, z)
return
}
// SRGB_D50ToDon converts from SRGB_D50 RGB to Don RGB colorspace.
func SRGB_D50ToDon(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToDon(x, y, z)
return
}
// SRGB_D50ToECI converts from SRGB_D50 RGB to ECI RGB colorspace.
func SRGB_D50ToECI(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToECI(x, y, z)
return
}
// SRGB_D50ToEktaSpace converts from SRGB_D50 RGB to EktaSpace RGB colorspace.
func SRGB_D50ToEktaSpace(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToEktaSpace(x, y, z)
return
}
// SRGB_D50ToNTSC converts from SRGB_D50 RGB to NTSC RGB colorspace.
func SRGB_D50ToNTSC(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC(x, y, z)
return
}
// SRGB_D50ToPAL converts from SRGB_D50 RGB to PAL RGB colorspace.
func SRGB_D50ToPAL(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL(x, y, z)
return
}
// SRGB_D50ToProPhoto converts from SRGB_D50 RGB to ProPhoto RGB colorspace.
func SRGB_D50ToProPhoto(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToProPhoto(x, y, z)
return
}
// SRGB_D50ToSMPTE_C converts from SRGB_D50 RGB to SMPTE_C RGB colorspace.
func SRGB_D50ToSMPTE_C(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C(x, y, z)
return
}
// SRGB_D50ToSRGB converts from SRGB_D50 RGB to SRGB RGB colorspace.
func SRGB_D50ToSRGB(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSRGB(x, y, z)
return
}
// SRGB_D50ToWGamut converts from SRGB_D50 RGB to WGamut RGB colorspace.
func SRGB_D50ToWGamut(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToWGamut(x, y, z)
return
}
// SRGB_D50ToAdobe_D50 converts from SRGB_D50 RGB to Adobe_D50 RGB colorspace.
func SRGB_D50ToAdobe_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToAdobe_D50(x, y, z)
return
}
// SRGB_D50ToApple_D50 converts from SRGB_D50 RGB to Apple_D50 RGB colorspace.
func SRGB_D50ToApple_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToApple_D50(x, y, z)
return
}
// SRGB_D50ToBruce_D50 converts from SRGB_D50 RGB to Bruce_D50 RGB colorspace.
func SRGB_D50ToBruce_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToBruce_D50(x, y, z)
return
}
// SRGB_D50ToCie_D50 converts from SRGB_D50 RGB to Cie_D50 RGB colorspace.
func SRGB_D50ToCie_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToCie_D50(x, y, z)
return
}
// SRGB_D50ToNTSC_D50 converts from SRGB_D50 RGB to NTSC_D50 RGB colorspace.
func SRGB_D50ToNTSC_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToNTSC_D50(x, y, z)
return
}
// SRGB_D50ToPAL_D50 converts from SRGB_D50 RGB to PAL_D50 RGB colorspace.
func SRGB_D50ToPAL_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToPAL_D50(x, y, z)
return
}
// SRGB_D50ToSMPTE_C_D50 converts from SRGB_D50 RGB to SMPTE_C_D50 RGB colorspace.
func SRGB_D50ToSMPTE_C_D50(r, g, b uint8) (r1, g1, b1 uint8) {
x, y, z := SRGBToXYZ_D50(r, g, b)
r1, g1, b1 = XYZToSMPTE_C_D50(x, y, z)
return
} | i8/rgb8/wrappers.go | 0.877483 | 0.418756 | wrappers.go | starcoder |
// Commands similar to git, go tools and other modern CLI tools
// inspired by go, go-Commander, gh and subcommand
package cobra
import (
"fmt"
"io"
"reflect"
"strconv"
"strings"
"text/template"
)
var initializers []func()
// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
// Set this to true to enable it
var EnablePrefixMatching bool = false
//OnInitialize takes a series of func() arguments and appends them to a slice of func().
func OnInitialize(y ...func()) {
for _, x := range y {
initializers = append(initializers, x)
}
}
//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
//ints and then compared.
func Gt(a interface{}, b interface{}) bool {
var left, right int64
av := reflect.ValueOf(a)
switch av.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
left = int64(av.Len())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
left = av.Int()
case reflect.String:
left, _ = strconv.ParseInt(av.String(), 10, 64)
}
bv := reflect.ValueOf(b)
switch bv.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
right = int64(bv.Len())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
right = bv.Int()
case reflect.String:
right, _ = strconv.ParseInt(bv.String(), 10, 64)
}
return left > right
}
//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
func Eq(a interface{}, b interface{}) bool {
av := reflect.ValueOf(a)
bv := reflect.ValueOf(b)
switch av.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
panic("Eq called on unsupported type")
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return av.Int() == bv.Int()
case reflect.String:
return av.String() == bv.String()
}
return false
}
//rpad adds padding to the right of a string
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
// tmpl executes the given template text on data, writing the result to w.
func tmpl(w io.Writer, text string, data interface{}) error {
t := template.New("top")
t.Funcs(template.FuncMap{
"trim": strings.TrimSpace,
"rpad": rpad,
"gt": Gt,
"eq": Eq,
})
template.Must(t.Parse(text))
return t.Execute(w, data)
} | Godeps/_workspace/src/github.com/spf13/cobra/cobra.go | 0.662032 | 0.493958 | cobra.go | starcoder |
package main
import "fmt"
var helpDoc = `Usage: hvclient [options]
HVClient is a command-line interface to the GlobalSign Atlas Certificate
Management API (HVCA).
Access to HVCA requires an account. At the time of account setup, GlobalSign
will provide a mutual TLS certificate, an API key, and an API secret which can
be provided to HVClient via a configuration file.
General options:
-config=<file> File containing configuration options and HVCA account
credentials. Defaults to $HOME/.hvclient/hvclient.conf.
Certificate request options:
Key options:
One (and only one) of -publickey, -privatekey or -csr must be specified to
request a new certificate.
-publickey=<file> Public key to use for HVCA accounts which do not
require proof-of-possession of a private key.
-privatekey=<file> Private key to use for HVCA accounts which require
proof-of-possession by signing the public key.
-gencsr Generate a PKCS#10 certificate signing request (CSR)
for HVCA accounts which require proof-of-possession
with a signed PKCS#10 CSR. Useful when a user has an
HVCA account which requires this proof-of-possession,
but has no convenient way to generate their own CSR.
-csrout Output a PEM-encoded signed PKCS#10 CSR instead of
requesting a certificate from HVCA.
-csr=<file> PKCS#10 CSR to use for HVCA accounts which require
proof-of-possession with a signed PKCS#10 CSR.
-generate Use with -publickey, -privatekey or -csr to output
the JSON-encoded certificate request without actually
submitting it to HVCA. Useful for examining and
verifying the contents of a request before submitting
it.
Validity period options:
If all of these options are omitted, the request will default to a
not-before time of the current time, and a not-after time of the maximum
allowed by the account validation policy. Providing only -duration will
default to a not-before time of the current time, and a not-after time
of the current time plus the specified duration.
-notbefore=<time> The time before which the certificate is not valid. The
time format is 2016-01-02T15:04:05UTC. Defaults to the
current time.
-notafter=<time> The time after which the certificate is not valid. The
time format is 2016-01-02T15:04:05UTC. Defaults to the
maximum allowed by the account validation policy.
-duration=<value> An alternative to -notafter. The not-after time will be
calculated at the not-before time plus the specified
duration value, which should be in a flexible format
such as 10d, 30days, 24hrs, 8wk, 12w.
Certificate attribute value options:
At least one of these options should normally be selected.
-commonname=<string> Subject distinguished name (DN) common name
-organization=<string> Subject DN organization
-organizationalunit=<string> Comma-separated list of subject DN
organizational units
-streetaddress=<string> Subject DN street address
-locality=<string> Subject DN locality
-state=<string> Subject DN state or province
-country=<string> Subject DN country
-email=<string> Subject DN email address (deprecated, use
subject alternative names instead)
-businesscategory=<string> Subject DN business category
-joilocality=<string> Subject DN jurisdiction locality
-joistate=<string> Subject DN jurisdiction state or province
-joicountry=<string> Subject DN jurisdiction country
-extraattributes=<string> Comma-separated list of subject DN extra
attributes in format OID=value, for example
"172.16.31.10=surname,172.16.58.3=serial_number"
-dnsnames=<string> Comma-separated list of subject alternative
Names (SAN) domain names
-emails=<string> Comma-separated list of SAN email addresses
-ips=<string> Comma-separated list of SAN IP addresses
-uris=<string> Comma-separated list of SAN URIs
-ekus=<string> Comma-separated list of extended key usage
OIDs, e.g. "192.168.127.12.5.192.168.3.11"
-template=<file> Read values from the specified JSON-encoded
file. Options specified at the command line
override or append to the values in this
template, as appropriate.
-sampletemplate Output an example template which can be
modified and used with the -template option
Certificate and account information options:
-retrieve=<serial> Retrieve the previously-issued certificate with the
specified serial number
-revoke=<serial> Revoke the certificate with the specified serial number
-status=<serial> Show the issued/revoked status for the certificate with
the specified serial number
-updated=<serial> Show the last-updated time for the certificate with the
specified serial number
-certsissued List the certificates issued during a specified time
window. See the "List-producing API options" section
below.
-certsrevoked List the certificates revoked during a specified time
window. See the "List-producing API options" section
below.
-certsexpiring List the certificates that expired or that will expire
during a specified time window. See the "List-producing
API options" section below.
-countissued Show the total count of certificates issued by this
HVCA account
-countrevoked Show the total count of certificates revoked by this
HVCA account
-quota Show the remaining quota of certificate issuances for
this HVCA account
-trustchain Show the chain of trust for certificates issued by this
HVCA account. The output is one or more PEM-encoded
certificates containing the root and any intermediate
Certificate Authority certificates.
-policy Show the validation policy for this HVCA account
Domain claim options:
-claims List all verified domain claims for this account. See
the "List-producing API options" section below.
-pending Used with -claims, list all pending rather than
verified domain claims
-claimsubmit=<domain> Submit a new domain claim
-claimretrieve=<id> Show the details of the domain claim with the specified
ID
-claimreassert=<id> Reassert an existing domain claim, for example when the
assert-by time of the existing claim has passed
-claimdelete=<id> Delete the domain claim with the specified ID
-claimdns=<id> Request assertion of domain control using DNS for the
claim with the specified ID
-claimhttp=<id> Request assertion of domain control using HTTP for the
claim with the specified ID
-scheme=<scheme> Used with -claimhttp, specifies the protocol used to verify assertion of domain control
-authdomain=<authdomain> Used with -claimhttp and -claimsdns, specifies the authorization domain used to verify assertion of domain control
List-producing API options:
A number of options listed above return a paginated list of results and a
total count of items. The total count may be higher than the number of items
shown if the total count is higher than the specified or maximum number of
items per page. The remaining items may be retrieved by incrementing the page
number in subsequent usages of the same option.
The following options control the pagination:
-from=<time> The beginning of the time window, with a time format of
2016-01-02T15:04:05UTC. Defaults to 30 days prior to
the current time.
-to=<time> The end of the time window, with a time format of
2016-01-02T15:04:05UTC. Defaults to the current time.
-since=<duration> Used instead of -from and -to, this signifies a time
window from the specified duration in the past through
to the current time. The format is the same as for the
-duration option.
-page=<int> The page number. Defaults to 1
-pagesize=<int> The number of items per page. Defaults to 100.
-totalcount Show the total count of items in the population instead
of listing them.
Convenience options:
-genrsa=<int> Generate and output an RSA private key with the
specified bit size
-encrypt When used with -genrsa, prompt for a passphrase and
use it to encrypt the generated private key
Other options:
-h Show this help page.
-v Show version information.
`
var versionString = `HVClient 1.0
Usage: hvclient [options]
Copyright (c) 2019-2021 GMO GlobalSign Pte. Ltd.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
`
const sampleTemplate = `{
"validity": {
"not_before": 1477958400,
"not_after": 1509494400
},
"subject_dn": {
"country": "GB",
"state": "London",
"locality": "London",
"street_address": "1 GlobalSign Road",
"organization": "GMO GlobalSign",
"organizational_unit": [
"Operations",
"Development"
],
"common_name": "<NAME>",
"email": "<EMAIL>",
"jurisdiction_of_incorporation_locality_name": "London",
"jurisdiction_of_incorporation_state_or_province_name": "London",
"jurisdiction_of_incorporation_country_name": "United Kingdom",
"business_category": "Internet security",
"extra_attributes": [
{
"type": "2.5.4.4",
"value": "Surname"
}
]
},
"san": {
"dns_names": [
"test.demo.hvca.globalsign.com",
"test2.demo.hvca.globalsign.com"
],
"emails": [
"<EMAIL>",
"<EMAIL>"
],
"ip_addresses": [
"192.168.3.11"
],
"uris": [
"http://test.demo.hvca.globalsign.com/uri"
],
"other_names": [
{
"type": "1.3.6.1.4.1.311.20.2.3",
"value": "<EMAIL>"
}
]
},
"extended_key_usages": [
"1.3.6.1.5.5.7.3.1",
"1.3.6.1.5.5.7.3.2"
],
"subject_da": {
"gender": "m",
"date_of_birth": "1979-01-31",
"place_of_birth": "London",
"country_of_citizenship": [
"GB",
"US"
],
"country_of_residence": [
"US"
],
"extra_attributes": [
{
"type": "192.168.3.11.1.1.1"
},
{
"type": "192.168.3.11.1.1.2",
"value": "custom subject da value"
}
]
},
"qualified_statements": {
"semantics": {
"identifier": "1.1.1.1.1.1",
"name_authorities": [
"<EMAIL>"
]
},
"etsi_qc_compliance": true,
"etsi_qc_sscd_compliance": true,
"etsi_qc_type": "0.4.0.18172.16.58.3",
"etsi_qc_retention_period": 1,
"etsi_qc_pds": {
"EN": "https://demo.hvsign.globalsign.com/en/pds",
"RU": "https://demo.hvsign.globalsign.com/ru/pds"
}
},
"ms_extension_template": {
"id": "1.2.3.4",
"major_version": 3,
"minor_version": 7
},
"custom_extensions": {
"192.168.127.12.1": "NIL",
"192.168.127.12.2": "SOME TEXT"
}
}
`
// showHelp outputs online help documentation.
func showHelp() {
fmt.Print(helpDoc)
}
// showVersion outputs version and copyright information.
func showVersion() {
fmt.Print(versionString)
}
// showSampleTemplate outputs a sample certificate request template.
func showSampleTemplate() {
fmt.Print(sampleTemplate)
} | cmd/hvclient/help.go | 0.775775 | 0.418994 | help.go | starcoder |
package llrb
import "fmt"
type KeyT int64
const (
maxkey = KeyT(0x7fffffffffffffff)
minkey = -maxkey - 1
)
type node struct {
key KeyT
parent, left, right *node
black bool
}
type Tree struct {
root *node
}
func (t *Tree) Insert(k KeyT) {
t.root = insert(t.root, k)
t.root.parent = nil
t.root.black = true
}
func insert(x *node, k KeyT) *node {
if x == nil {
return &node{key: k}
}
var p *node
for x != nil {
p = x
if k <= p.key {
x = p.left
if x == nil {
p.left = &node{key: k}
p.left.parent = p
}
} else {
x = p.right
if x == nil {
p.right = &node{key: k}
p.right.parent = p
}
}
}
for p != nil {
if isRed(p.right) {
p = rotateLeft(p)
}
if isRed(p.left) && isRed(p.left.left) {
p = rotateRight(p)
}
if isRed(p.left) && isRed(p.right) {
colorFlip(p)
}
x = p
p = p.parent
}
return x
}
func rotateLeft(x *node) *node {
y := x.right
y.parent = x.parent
if x.parent != nil {
if x == x.parent.left {
x.parent.left = y
} else {
x.parent.right = y
}
}
x.right = y.left
if x.right != nil {
x.right.parent = x
}
y.left = x
x.parent = y
x.black, y.black = y.black, x.black
return y
}
func rotateRight(y *node) *node {
x := y.left
x.parent = y.parent
if y.parent != nil {
if y == y.parent.left {
y.parent.left = x
} else {
y.parent.right = x
}
}
y.left = x.right
if y.left != nil {
y.left.parent = y
}
x.right = y
y.parent = x
x.black, y.black = y.black, x.black
return x
}
func isRed(x *node) bool {
if x == nil {
return false
} else {
return !x.black
}
}
func colorFlip(x *node) {
x.black = !x.black
x.left.black = !x.left.black
x.right.black = !x.right.black
}
func fixUp(x *node) *node {
if isRed(x.right) {
x = rotateLeft(x)
}
if isRed(x.left) && isRed(x.left.left) {
x = rotateRight(x)
}
if isRed(x.left) && isRed(x.right) {
colorFlip(x)
}
return x
}
func moveRedRight(x *node) *node {
// invariant: either |x| or |x.right| is red
colorFlip(x)
if isRed(x.left.left) {
x = rotateRight(x)
colorFlip(x)
}
return x
}
func moveRedLeft(x *node) *node {
//invariant: either |x| or |x.right| is red
colorFlip(x)
if isRed(x.right.left) {
x.right = rotateRight(x.right)
x = rotateLeft(x)
colorFlip(x)
}
return x
}
func (t *Tree) DeleteMin() {
r, _ := deleteMin(t.root)
if r != nil {
r.black = true
}
t.root = r
}
func deleteMin(x *node) (*node, *node) {
if x == nil {
return nil, nil
}
for x.left != nil {
if !isRed(x.left) && !isRed(x.left.left) {
x = moveRedLeft(x)
}
x = x.left
}
p := x.parent
r := x.right
if p != nil {
if x == p.left {
p.left = r
} else {
p.right = r
}
}
p = nil
for r != nil {
p = fixUp(r)
r = p.parent
}
return p, x
}
func (t *Tree) Delete(k KeyT) bool {
r, d := delete(t.root, k)
if r != nil {
r.black = true
}
t.root = r
return d != nil
}
// NOT WORKING
func delete(x *node, k KeyT) (*node, *node) {
if x == nil {
return nil, nil
}
var del *node = nil
if k < x.key {
if !isRed(x.left) && !isRed(x.left.left) {
x = moveRedLeft(x)
}
x.left, del = delete(x.left, k)
} else {
if isRed(x.left) {
x = rotateRight(x)
}
if k == x.key && x.right == nil {
return nil, x
}
if !isRed(x.right) && !isRed(x.right.left) {
x = moveRedRight(x)
}
if k == x.key {
r, d := deleteMin(x.right)
d.left = x.left
d.right = r
d.parent = x.parent
if d.left != nil {
d.left.parent = d
}
if d.right != nil {
d.right.parent = d
}
d.black = x.black
del = x
x = d
} else {
x.right, del = delete(x.right, k)
}
}
return fixUp(x), del
}
func (t *Tree) isBST() bool {
return isBST(t.root, minkey, maxkey)
}
func isBST(x *node, min, max KeyT) bool {
switch {
case x == nil:
return true
case x.key < min || x.key > max:
return false
default:
return isBST(x.left, min, x.key) && isBST(x.right, x.key, max)
}
}
func (t *Tree) WriteDot() string {
return "digraph LLRB {\n" + writeDot(t.root) + "}\n"
}
func writeDot(x *node) string {
if x == nil {
return fmt.Sprintf("n%p[shape=point]", x)
}
c := "red"
if x.black {
c = "black"
}
s := fmt.Sprintf("n%p[label=%d, color=%s]\n", x, x.key, c)
if x.left != nil {
s += fmt.Sprintf("n%p -> n%p\n", x, x.left)
s += writeDot(x.left)
} else {
s += fmt.Sprintf("n%p -> n%pleft\n", x, x)
s += fmt.Sprintf("n%pleft[shape=point]\n", x)
}
if x.right != nil {
s += fmt.Sprintf("n%p -> n%p\n", x, x.right)
s += writeDot(x.right)
} else {
s += fmt.Sprintf("n%p -> n%pright\n", x, x)
s += fmt.Sprintf("n%pright[shape=point]\n", x)
}
return s
} | Lecture 10- Red-black trees/src/llrb/llrb.go | 0.592784 | 0.454835 | llrb.go | starcoder |
package cryptypes
import (
"database/sql/driver"
"time"
)
// EncryptedTime supports encrypting Time data
type EncryptedTime struct {
Field
Raw time.Time
}
// Scan converts the value from the DB into a usable EncryptedTime value
func (s *EncryptedTime) Scan(value interface{}) error {
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized EncryptedTime value into a value that can safely be stored in the DB
func (s EncryptedTime) Value() (driver.Value, error) {
return encrypt(s.Raw)
}
// NullEncryptedTime supports encrypting nullable Time data
type NullEncryptedTime struct {
Field
Raw time.Time
Empty bool
}
// Scan converts the value from the DB into a usable NullEncryptedTime value
func (s *NullEncryptedTime) Scan(value interface{}) error {
if value == nil {
s.Raw = time.Time{}
s.Empty = true
return nil
}
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized NullEncryptedTime value into a value that can safely be stored in the DB
func (s NullEncryptedTime) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encrypt(s.Raw)
}
// SignedTime supports signing Time data
type SignedTime struct {
Field
Raw time.Time
Valid bool
}
// Scan converts the value from the DB into a usable SignedTime value
func (s *SignedTime) Scan(value interface{}) (err error) {
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedTime value into a value that can safely be stored in the DB
func (s SignedTime) Value() (driver.Value, error) {
return sign(s.Raw)
}
// NullSignedTime supports signing nullable Time data
type NullSignedTime struct {
Field
Raw time.Time
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedTime value
func (s *NullSignedTime) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = time.Time{}
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedTime value into a value that can safely be stored in the DB
func (s NullSignedTime) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return sign(s.Raw)
}
// SignedEncryptedTime supports signing and encrypting Time data
type SignedEncryptedTime struct {
Field
Raw time.Time
Valid bool
}
// Scan converts the value from the DB into a usable SignedEncryptedTime value
func (s *SignedEncryptedTime) Scan(value interface{}) (err error) {
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedEncryptedTime value into a value that can safely be stored in the DB
func (s SignedEncryptedTime) Value() (driver.Value, error) {
return encryptSign(s.Raw)
}
// NullSignedEncryptedTime supports signing and encrypting nullable Time data
type NullSignedEncryptedTime struct {
Field
Raw time.Time
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedEncryptedTime value
func (s *NullSignedEncryptedTime) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = time.Time{}
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedEncryptedTime value into a value that can safely be stored in the DB
func (s NullSignedEncryptedTime) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encryptSign(s.Raw)
} | cryptypes/type_time.go | 0.816662 | 0.592667 | type_time.go | starcoder |
// Copyright 2016 Factom Foundation
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Package bip44 基于 BIP32 的系统,赋予树状结构中的各层特殊的意义。让同一个 seed 可以支援多币种、多帐户等。
package bip44
import (
bip32 "github.com/bnchain/bnchain/wallet/bipwallet/go-bip32"
bip39 "github.com/bnchain/bnchain/wallet/bipwallet/go-bip39"
)
// Purpose Purpose
const Purpose uint32 = 0x8000002C
//https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
//https://github.com/satoshilabs/slips/blob/master/slip-0044.md
//https://github.com/FactomProject/FactomDocs/blob/master/wallet_info/wallet_test_vectors.md
/*
const (
// TypeBitcoin 比特币类型
TypeBitcoin uint32 = 0x80000000
TypeTestnet uint32 = 0x80000001
// TypeLitecoin 莱特币类型
TypeLitecoin uint32 = 0x80000002
TypeDogecoin uint32 = 0x80000003
TypeReddcoin uint32 = 0x80000004
TypeDash uint32 = 0x80000005
TypePeercoin uint32 = 0x80000006
TypeNamecoin uint32 = 0x80000007
TypeFeathercoin uint32 = 0x80000008
TypeCounterparty uint32 = 0x80000009
TypeBlackcoin uint32 = 0x8000000a
TypeNuShares uint32 = 0x8000000b
TypeNuBits uint32 = 0x8000000c
TypeMazacoin uint32 = 0x8000000d
TypeViacoin uint32 = 0x8000000e
TypeClearingHouse uint32 = 0x8000000f
TypeRubycoin uint32 = 0x80000010
TypeGroestlcoin uint32 = 0x80000011
TypeDigitalcoin uint32 = 0x80000012
TypeCannacoin uint32 = 0x80000013
TypeDigiByte uint32 = 0x80000014
TypeOpenAssets uint32 = 0x80000015
TypeMonacoin uint32 = 0x80000016
TypeClams uint32 = 0x80000017
TypePrimecoin uint32 = 0x80000018
TypeNeoscoin uint32 = 0x80000019
TypeJumbucks uint32 = 0x8000001a
TypeziftrCOIN uint32 = 0x8000001b
TypeVertcoin uint32 = 0x8000001c
TypeNXT uint32 = 0x8000001d
TypeBurst uint32 = 0x8000001e
TypeMonetaryUnit uint32 = 0x8000001f
TypeZoom uint32 = 0x80000020
TypeVpncoin uint32 = 0x80000021
TypeCanadaeCoin uint32 = 0x80000022
TypeShadowCash uint32 = 0x80000023
TypeParkByte uint32 = 0x80000024
TypePandacoin uint32 = 0x80000025
TypeStartCOIN uint32 = 0x80000026
TypeMOIN uint32 = 0x80000027
TypeArgentum uint32 = 0x8000002D
TypeGlobalCurrencyReserve uint32 = 0x80000031
TypeNovacoin uint32 = 0x80000032
TypeAsiacoin uint32 = 0x80000033
TypeBitcoindark uint32 = 0x80000034
TypeDopecoin uint32 = 0x80000035
TypeTemplecoin uint32 = 0x80000036
TypeAIB uint32 = 0x80000037
TypeEDRCoin uint32 = 0x80000038
TypeSyscoin uint32 = 0x80000039
TypeSolarcoin uint32 = 0x8000003a
TypeSmileycoin uint32 = 0x8000003b
// TypeEther 以太坊类型
TypeEther uint32 = 0x8000003c
// TypeEtherClassic 经典以太坊
TypeEtherClassic uint32 = 0x8000003d
TypeOpenChain uint32 = 0x80000040
TypeOKCash uint32 = 0x80000045
TypeDogecoinDark uint32 = 0x8000004d
TypeElectronicGulden uint32 = 0x8000004e
TypeClubCoin uint32 = 0x8000004f
TypeRichCoin uint32 = 0x80000050
TypePotcoin uint32 = 0x80000051
TypeQuarkcoin uint32 = 0x80000052
TypeTerracoin uint32 = 0x80000053
TypeGridcoin uint32 = 0x80000054
TypeAuroracoin uint32 = 0x80000055
TypeIXCoin uint32 = 0x80000056
TypeGulden uint32 = 0x80000057
TypeBitBean uint32 = 0x80000058
TypeBata uint32 = 0x80000059
TypeMyriadcoin uint32 = 0x8000005a
TypeBitSend uint32 = 0x8000005b
TypeUnobtanium uint32 = 0x8000005c
TypeMasterTrader uint32 = 0x8000005d
TypeGoldBlocks uint32 = 0x8000005e
TypeSaham uint32 = 0x8000005f
TypeChronos uint32 = 0x80000060
TypeUbiquoin uint32 = 0x80000061
TypeEvotion uint32 = 0x80000062
TypeSaveTheOcean uint32 = 0x80000063
TypeBigUp uint32 = 0x80000064
TypeGameCredits uint32 = 0x80000065
TypeDollarcoins uint32 = 0x80000066
// TypeZayedcoin Zayed币
TypeZayedcoin uint32 = 0x80000067
TypeDubaicoin uint32 = 0x80000068
TypeStratis uint32 = 0x80000069
TypeShilling uint32 = 0x8000006a
TypePiggyCoin uint32 = 0x80000076
TypeMonero uint32 = 0x80000080
TypeNavCoin uint32 = 0x80000082
// TypeFactomFactoids FactomFactoids
TypeFactomFactoids uint32 = 0x80000083
// TypeFactomEntryCredits FactomEntryCredits
TypeFactomEntryCredits uint32 = 0x80000084
TypeZcash uint32 = 0x80000085
TypeLisk uint32 = 0x80000086
)
*/
// NewKeyFromMnemonic 新建Key
func NewKeyFromMnemonic(mnemonic string, coin, account, chain, address uint32) (*bip32.Key, error) {
seed, err := bip39.NewSeedWithErrorChecking(mnemonic, "")
if err != nil {
return nil, err
}
masterKey, err := bip32.NewMasterKey(seed)
if err != nil {
return nil, err
}
return NewKeyFromMasterKey(masterKey, coin, account, chain, address)
}
// NewKeyFromMasterKey 新建Key
func NewKeyFromMasterKey(masterKey *bip32.Key, coin, account, chain, address uint32) (*bip32.Key, error) {
child, err := masterKey.NewChildKey(Purpose)
if err != nil {
return nil, err
}
child, err = child.NewChildKey(coin)
if err != nil {
return nil, err
}
child, err = child.NewChildKey(account)
if err != nil {
return nil, err
}
child, err = child.NewChildKey(chain)
if err != nil {
return nil, err
}
child, err = child.NewChildKey(address)
if err != nil {
return nil, err
}
return child, nil
} | vendor/github.com/33cn/chain33/wallet/bipwallet/go-bip44/bip44.go | 0.542379 | 0.452475 | bip44.go | starcoder |
// Package bytes2 provides alternate implementations of functionality similar
// to go's bytes package.
package bytes2
import (
"fmt"
"io"
"unicode/utf8"
"github.com/xwb1989/sqlparser/dependency/hack"
)
// ChunkedWriter has the same interface as bytes.Buffer's write functions.
// It additionally provides a Reserve function that returns a []byte that
// the caller can directly change.
type ChunkedWriter struct {
bufs [][]byte
}
func NewChunkedWriter(chunkSize int) *ChunkedWriter {
cw := &ChunkedWriter{make([][]byte, 1)}
cw.bufs[0] = make([]byte, 0, chunkSize)
return cw
}
// Bytes This function can get expensive for large buffers.
func (cw *ChunkedWriter) Bytes() (b []byte) {
if len(cw.bufs) == 1 {
return cw.bufs[0]
}
b = make([]byte, 0, cw.Len())
for _, buf := range cw.bufs {
b = append(b, buf...)
}
return b
}
func (cw *ChunkedWriter) Len() int {
l := 0
for _, buf := range cw.bufs {
l += len(buf)
}
return l
}
func (cw *ChunkedWriter) Reset() {
b := cw.bufs[0][:0]
cw.bufs = make([][]byte, 1)
cw.bufs[0] = b
}
func (cw *ChunkedWriter) Truncate(n int) {
for i, buf := range cw.bufs {
if n > len(buf) {
n -= len(buf)
continue
}
cw.bufs[i] = buf[:n]
cw.bufs = cw.bufs[:i+1]
return
}
panic("bytes.ChunkedBuffer: truncation out of range")
}
func (cw *ChunkedWriter) Write(p []byte) (n int, err error) {
return cw.WriteString(hack.String(p))
}
func (cw *ChunkedWriter) WriteString(p string) (n int, err error) {
n = len(p)
lastbuf := cw.bufs[len(cw.bufs)-1]
for {
available := cap(lastbuf) - len(lastbuf)
required := len(p)
if available >= required {
cw.bufs[len(cw.bufs)-1] = append(lastbuf, p...)
return
}
cw.bufs[len(cw.bufs)-1] = append(lastbuf, p[:available]...)
p = p[available:]
lastbuf = make([]byte, 0, cap(cw.bufs[0]))
cw.bufs = append(cw.bufs, lastbuf)
}
}
func (cw *ChunkedWriter) Reserve(n int) (b []byte) {
if n > cap(cw.bufs[0]) {
panic(fmt.Sprintf("bytes.ChunkedBuffer: Reserve request too high: %d > %d", n, cap(cw.bufs[0])))
}
lastbuf := cw.bufs[len(cw.bufs)-1]
if n > cap(lastbuf)-len(lastbuf) {
b = make([]byte, n, cap(cw.bufs[0]))
cw.bufs = append(cw.bufs, b)
return b
}
l := len(lastbuf)
b = lastbuf[l : n+l]
cw.bufs[len(cw.bufs)-1] = lastbuf[:n+l]
return b
}
func (cw *ChunkedWriter) WriteByte(c byte) error {
cw.Reserve(1)[0] = c
return nil
}
func (cw *ChunkedWriter) WriteRune(r rune) (n int, err error) {
n = utf8.EncodeRune(cw.Reserve(utf8.RuneLen(r)), r)
return n, nil
}
func (cw *ChunkedWriter) WriteTo(w io.Writer) (n int64, err error) {
for _, buf := range cw.bufs {
m, err := w.Write(buf)
n += int64(m)
if err != nil {
return n, err
}
if m != len(buf) {
return n, io.ErrShortWrite
}
}
cw.Reset()
return n, nil
} | vendor/github.com/xwb1989/sqlparser/dependency/bytes2/chunked_writer.go | 0.642545 | 0.419172 | chunked_writer.go | starcoder |
package puzzle
import (
"fmt"
"strings"
)
// LineWidth the number of cells in each row, column and cube
const LineWidth = 9
const cubeWidth = LineWidth / 3
// Puzzle Representation of a Sudoku puzzle
type Puzzle [LineWidth][LineWidth]Square
// GetRow returns a given row
func (p Puzzle) GetRow(i int) Set {
return p[i]
}
// GetColumn returns a column by given index
func (p Puzzle) GetColumn(i int) Set {
var column Set
for x := 0; x < LineWidth; x++ {
column[x] = p[x][i]
}
return column
}
// GetCube returns squares that form a cube around given point
func (p Puzzle) GetCube(x, y int) Set {
xOffset := x / cubeWidth
yOffset := y / cubeWidth
var result Set
xIndex := xOffset * cubeWidth
yIndex := yOffset * cubeWidth
copy(result[0:3], p[xIndex][yIndex:yIndex+3])
copy(result[3:6], p[1+xIndex][yIndex:yIndex+3])
copy(result[6:9], p[2+xIndex][yIndex:yIndex+3])
return result
}
// CalculatePossibilities Gives puzzle with updated possibility values
func (p Puzzle) CalculatePossibilities() Puzzle {
var possByRow [LineWidth]Possibilies
for row := 0; row < LineWidth; row++ {
possByRow[row] = findElementPossbilities(p.GetRow(row))
}
var possByColumn [LineWidth]Possibilies
for col := 0; col < LineWidth; col++ {
possByColumn[col] = findElementPossbilities(p.GetColumn(col))
}
var possByCube [LineWidth]Possibilies
for cube := 0; cube < LineWidth; cube++ {
possByCube[cube] = findElementPossbilities(p.GetCube(cubePosition(cube)))
}
for x := 0; x < LineWidth; x++ {
for y := 0; y < LineWidth; y++ {
if !p[x][y].Solved() {
p[x][y].Possibilities = mergePoss(possByRow[x], possByColumn[y], possByCube[cubeNumber(x, y)])
}
}
}
return p
}
func cubePosition(cubeNumber int) (int, int) {
return (cubeNumber * cubeWidth) % LineWidth, (cubeNumber / cubeWidth) * cubeWidth
}
func cubeNumber(x, y int) int {
return (x / cubeWidth) + (y/cubeWidth)*cubeWidth
}
func findElementPossbilities(elements Set) Possibilies {
poss := Possibilies{true, true, true, true, true, true, true, true, true}
for _, element := range elements {
if element.Solved() {
poss[element.Value-1] = false
}
}
return poss
}
// PositionInCube Given an x,y, gives the position in a set
func PositionInCube(x, y int) int {
return (x%cubeWidth)*cubeWidth + (y % cubeWidth)
}
func mergePoss(row, col, cube Possibilies) Possibilies {
var result Possibilies
for i := range result {
result[i] = row[i] && col[i] && cube[i]
}
return result
}
func (p Puzzle) String() string {
var output []string
for _, r := range p {
var line []string
for _, v := range r {
if v.Solved() {
line = append(line, fmt.Sprintf("%v", v.Value))
} else {
line = append(line, "-")
}
}
output = append(output, strings.Join(line[:], " "))
}
return strings.Join(output[:], "\n")
} | cmd/go-sudoku/puzzle/puzzle.go | 0.7696 | 0.588121 | puzzle.go | starcoder |
package geosegmentize
import (
"github.com/cockroachdb/errors"
"github.com/twpayne/go-geom"
)
// MaxPoints is the maximum number of points segmentize is allowed to generate.
const MaxPoints = 16336
// SegmentizeGeom returns a modified geom.T having no segment longer
// than the given maximum segment length.
// segmentMaxAngleOrLength represents two different things depending
// on the object, which is about to segmentize as in case of geography
// it represents maximum segment angle whereas, in case of geometry it
// represents maximum segment distance.
// segmentizeCoords represents the function's definition which allows
// us to segmentize given two-points. We have to specify segmentizeCoords
// explicitly, as the algorithm for segmentization is significantly
// different for geometry and geography.
func SegmentizeGeom(
geometry geom.T,
segmentMaxAngleOrLength float64,
segmentizeCoords func(geom.Coord, geom.Coord, float64) ([]float64, error),
) (geom.T, error) {
if geometry.Empty() {
return geometry, nil
}
switch geometry := geometry.(type) {
case *geom.Point, *geom.MultiPoint:
return geometry, nil
case *geom.LineString:
var allFlatCoordinates []float64
for pointIdx := 1; pointIdx < geometry.NumCoords(); pointIdx++ {
coords, err := segmentizeCoords(geometry.Coord(pointIdx-1), geometry.Coord(pointIdx), segmentMaxAngleOrLength)
if err != nil {
return nil, err
}
allFlatCoordinates = append(
allFlatCoordinates,
coords...,
)
}
// Appending end point as it wasn't included in the iteration of coordinates.
allFlatCoordinates = append(allFlatCoordinates, geometry.Coord(geometry.NumCoords()-1)...)
return geom.NewLineStringFlat(geom.XY, allFlatCoordinates).SetSRID(geometry.SRID()), nil
case *geom.MultiLineString:
segMultiLine := geom.NewMultiLineString(geom.XY).SetSRID(geometry.SRID())
for lineIdx := 0; lineIdx < geometry.NumLineStrings(); lineIdx++ {
l, err := SegmentizeGeom(geometry.LineString(lineIdx), segmentMaxAngleOrLength, segmentizeCoords)
if err != nil {
return nil, err
}
err = segMultiLine.Push(l.(*geom.LineString))
if err != nil {
return nil, err
}
}
return segMultiLine, nil
case *geom.LinearRing:
var allFlatCoordinates []float64
for pointIdx := 1; pointIdx < geometry.NumCoords(); pointIdx++ {
coords, err := segmentizeCoords(geometry.Coord(pointIdx-1), geometry.Coord(pointIdx), segmentMaxAngleOrLength)
if err != nil {
return nil, err
}
allFlatCoordinates = append(
allFlatCoordinates,
coords...,
)
}
// Appending end point as it wasn't included in the iteration of coordinates.
allFlatCoordinates = append(allFlatCoordinates, geometry.Coord(geometry.NumCoords()-1)...)
return geom.NewLinearRingFlat(geom.XY, allFlatCoordinates).SetSRID(geometry.SRID()), nil
case *geom.Polygon:
segPolygon := geom.NewPolygon(geom.XY).SetSRID(geometry.SRID())
for loopIdx := 0; loopIdx < geometry.NumLinearRings(); loopIdx++ {
l, err := SegmentizeGeom(geometry.LinearRing(loopIdx), segmentMaxAngleOrLength, segmentizeCoords)
if err != nil {
return nil, err
}
err = segPolygon.Push(l.(*geom.LinearRing))
if err != nil {
return nil, err
}
}
return segPolygon, nil
case *geom.MultiPolygon:
segMultiPolygon := geom.NewMultiPolygon(geom.XY).SetSRID(geometry.SRID())
for polygonIdx := 0; polygonIdx < geometry.NumPolygons(); polygonIdx++ {
p, err := SegmentizeGeom(geometry.Polygon(polygonIdx), segmentMaxAngleOrLength, segmentizeCoords)
if err != nil {
return nil, err
}
err = segMultiPolygon.Push(p.(*geom.Polygon))
if err != nil {
return nil, err
}
}
return segMultiPolygon, nil
case *geom.GeometryCollection:
segGeomCollection := geom.NewGeometryCollection().SetSRID(geometry.SRID())
for geoIdx := 0; geoIdx < geometry.NumGeoms(); geoIdx++ {
g, err := SegmentizeGeom(geometry.Geom(geoIdx), segmentMaxAngleOrLength, segmentizeCoords)
if err != nil {
return nil, err
}
err = segGeomCollection.Push(g)
if err != nil {
return nil, err
}
}
return segGeomCollection, nil
}
return nil, errors.Newf("unknown type: %T", geometry)
} | pkg/geo/geosegmentize/geosegmentize.go | 0.717111 | 0.626224 | geosegmentize.go | starcoder |
package network
import (
"math"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/graph"
)
// HubAuthority is a Hyperlink-Induced Topic Search hub-authority score pair.
type HubAuthority struct {
Hub float64
Authority float64
}
// HITS returns the Hyperlink-Induced Topic Search hub-authority scores for
// nodes of the directed graph g. HITS terminates when the 2-norm of the
// vector difference between iterations is below tol. The returned map is
// keyed on the graph node IDs.
func HITS(g graph.Directed, tol float64) map[int64]HubAuthority {
nodes := graph.NodesOf(g.Nodes())
// Make a topological copy of g with dense node IDs.
indexOf := make(map[int64]int, len(nodes))
for i, n := range nodes {
indexOf[n.ID()] = i
}
nodesLinkingTo := make([][]int, len(nodes))
nodesLinkedFrom := make([][]int, len(nodes))
for i, n := range nodes {
id := n.ID()
for _, u := range graph.NodesOf(g.To(id)) {
nodesLinkingTo[i] = append(nodesLinkingTo[i], indexOf[u.ID()])
}
for _, v := range graph.NodesOf(g.From(id)) {
nodesLinkedFrom[i] = append(nodesLinkedFrom[i], indexOf[v.ID()])
}
}
indexOf = nil
w := make([]float64, 4*len(nodes))
auth := w[:len(nodes)]
hub := w[len(nodes) : 2*len(nodes)]
for i := range nodes {
auth[i] = 1
hub[i] = 1
}
deltaAuth := w[2*len(nodes) : 3*len(nodes)]
deltaHub := w[3*len(nodes):]
var norm float64
for {
norm = 0
for v := range nodes {
var a float64
for _, u := range nodesLinkingTo[v] {
a += hub[u]
}
deltaAuth[v] = auth[v]
auth[v] = a
norm += a * a
}
norm = math.Sqrt(norm)
for i := range auth {
auth[i] /= norm
deltaAuth[i] -= auth[i]
}
norm = 0
for u := range nodes {
var h float64
for _, v := range nodesLinkedFrom[u] {
h += auth[v]
}
deltaHub[u] = hub[u]
hub[u] = h
norm += h * h
}
norm = math.Sqrt(norm)
for i := range hub {
hub[i] /= norm
deltaHub[i] -= hub[i]
}
if floats.Norm(deltaAuth, 2) < tol && floats.Norm(deltaHub, 2) < tol {
break
}
}
hubAuth := make(map[int64]HubAuthority, len(nodes))
for i, n := range nodes {
hubAuth[n.ID()] = HubAuthority{Hub: hub[i], Authority: auth[i]}
}
return hubAuth
} | graph/network/hits.go | 0.712832 | 0.492859 | hits.go | starcoder |
package matrix
import (
"bytes"
"fmt"
"math"
"math/rand"
)
type Vector []float64
// ToMatrix wraps a 1-by-n matrix around the underlying data of a
// vector.
func (v Vector) ToMatrix(dim ...int) *Matrix {
if len(dim) == 0 {
return NewMatrix(1, len(v), v)
} else {
return NewMatrix(dim[0], len(v)/dim[0], v)
}
}
// NewVector wraps a vector around vals.
func NewVector(size int, vals []float64) Vector {
Require(len(vals) == 0 || len(vals) == size,
"NewVector: require len(vals) == 0 || len(vals) == size\n")
if len(vals) > 0 {
return vals
}
return make(Vector, size)
}
// String converts a Vector into a string so that it can be printed
// using fmt.Printf("%v\n", vector).
func (v Vector) String() string {
var buf bytes.Buffer
buf.Grow(len(v)*22 + 3 + 2)
fmt.Fprintf(&buf, "\n[\n")
for i := 0; i < len(v); i++ {
fmt.Fprintf(&buf, " %20.12e\n", v[i])
}
fmt.Fprintf(&buf, "]\n")
return buf.String()
}
// add returns a + sign*b and stores the result in vector a.
func (a Vector) add(b Vector, sign int) {
Require(len(a) == len(b),
"Add: dimension mismatch: len(a) == len(b)\n")
if sign < 0 {
for i := 0; i < len(a); i++ {
a[i] -= b[i]
}
} else {
for i := 0; i < len(a); i++ {
a[i] += b[i]
}
}
}
// Sub returns the difference a - b.
func (a Vector) Sub(b Vector) Vector {
s := make(Vector, len(a))
copy(s, a)
s.add(b, -1)
return s
}
// Add returns the sum a + b.
func (a Vector) Add(b Vector) Vector {
s := make(Vector, len(a))
copy(s, a)
s.add(b, 1)
return s
}
// Dot returns the dot product of two vectors.
func (a Vector) Dot(b Vector) float64 {
Require(len(a) == len(b),
"Dot: dimension mismatch: len(a) == len(b)\n")
d := float64(0)
for i := 0; i < len(a); i++ {
d += a[i] * b[i]
}
return d
}
// OuterProd returns the matrix a^t*b.
func (a Vector) OuterProd(b Vector) *Matrix {
o := NewMatrix(len(a), len(b), nil)
for i := 0; i < o.rows; i++ {
for j := 0; j < o.cols; j++ {
o.matrix[i][j] = a[i] * b[j]
}
}
return o
}
// Norm returns the p-norm of a vector where 0-norm is the infinity
// norm. |v|_p = (sum|v_i|^p)^(1/p)
// Only p = 0, 1, 2 are supported.
// Any value of p other than 0 and 1 will result in 2-norm.
func (v Vector) Norm(p int) float64 {
var n float64 = 0
switch p {
case 0: // infinity norm
for i := 0; i < len(v); i++ {
if v[i] > n {
n = v[i]
} else {
if v[i] < -n {
n = -v[i]
}
}
}
return n
case 1:
for i := 0; i < len(v); i++ {
if v[i] >= 0 {
n += v[i]
} else {
n -= v[i]
}
}
return n
case 2:
for i := 0; i < len(v); i++ {
n += v[i] * v[i]
}
return math.Sqrt(n)
default:
p2 := float64(p)
for i := 0; i < len(v); i++ {
n += math.Pow(v[i], p2)
}
return math.Pow(n, 1.0/p2)
}
}
// Permute rearrange the elements of the vector according to a
// permuatation.
func (b Vector) Permute(P permutation) {
if len(b) != len(P) {
panic("PermuteCols: permutation is of the wrong size\n")
}
i := 0
for i < len(P) {
if P[i] < 0 {
i++
continue
}
prev := i
var next int
for P[prev] != i {
next = P[prev]
b[next], b[prev] = b[prev], b[next]
P[prev] = -1 - next
prev = next
}
P[prev] = -1 - P[prev]
}
for i := 0; i < len(P); i++ {
P[i] = -1 - P[i]
}
}
// Fill fills a vector with value.
func (v Vector) Fill(val float64) Vector {
for i, _ := range v {
v[i] = val
}
return v
}
// Copy just a wrapper of the built-in function copy.
func (v Vector) Copy(m Vector) {
copy(v, m)
}
// Scale scales a vector.
func (v Vector) Scale(c float64) Vector {
for i, _ := range v {
v[i] *= c
}
return v
}
// Random set a random normal value (0, 1) for each element of the
// vector v.
func (v Vector) Random(seed ...int64) {
var s int64 = 1982
if len(seed) > 0 {
s = seed[0]
}
r := rand.New(rand.NewSource(s))
for i := 0; i < len(v); i++ {
v[i] = r.NormFloat64()
}
} | goml/matrix/vector.go | 0.716417 | 0.64944 | vector.go | starcoder |
package problem0715
import (
"sort"
)
// RangeModule 记录了跟踪的范围
type RangeModule struct {
ranges []*interval
}
// Constructor 返回新建的 RangeModule
func Constructor() RangeModule {
return RangeModule{ranges: make([]*interval, 0, 2048)}
}
// AddRange 添加追踪的返回
func (r *RangeModule) AddRange(left int, right int) {
it := &interval{left: left, right: right}
n := len(r.ranges)
i := sort.Search(n, func(i int) bool {
return left <= r.ranges[i].right
})
var j int
for j = i; j < n && r.ranges[j].left <= right; j++ {
it.add(r.ranges[j])
}
if i == j {
r.ranges = append(r.ranges, nil)
}
copy(r.ranges[i+1:], r.ranges[j:])
r.ranges = r.ranges[:n-j+i+1]
r.ranges[i] = it
}
// QueryRange 返回 true 如果 [left, right) 全部都在追踪范围内
func (r *RangeModule) QueryRange(left int, right int) bool {
n := len(r.ranges)
i := sort.Search(n, func(i int) bool {
return right <= r.ranges[i].right
})
return 0 <= i && i < n && r.ranges[i].isCover(left, right)
}
// RemoveRange 从追踪范围中删除 [left,right)
func (r *RangeModule) RemoveRange(left int, right int) {
it := &interval{left: left, right: right}
n := len(r.ranges)
if n == 0 {
return
}
i := sort.Search(n, func(i int) bool {
return left < r.ranges[i].right
})
temp := make([]*interval, 0, 16)
var j int
for j = i; j < n && r.ranges[j].left < right; j++ {
ra, rb := minus(r.ranges[j], it)
if ra != nil {
temp = append(temp, ra)
}
if rb != nil {
temp = append(temp, rb)
}
}
if i == j {
return
}
r.ranges = append(r.ranges, nil)
copy(r.ranges[i+len(temp):], r.ranges[j:])
r.ranges = r.ranges[:n-j+i+len(temp)]
for k := 0; k < len(temp); k++ {
r.ranges[i+k] = temp[k]
}
}
type interval struct {
left, right int
}
func (it *interval) isCover(left, right int) bool {
return it.left <= left && right <= it.right
}
func (it *interval) add(a *interval) {
if a.left < it.left {
it.left = a.left
}
if it.right < a.right {
it.right = a.right
}
}
// 返回 a-b
func minus(a, b *interval) (*interval, *interval) {
if b.left <= a.left && a.right <= b.right {
return nil, nil
}
if b.left <= a.left && a.left < b.right && b.right < a.right {
return &interval{left: b.right, right: a.right}, nil
}
if a.left < b.left && b.left < a.right && a.right < b.right {
return &interval{left: a.left, right: b.left}, nil
}
// a.left < b.left && b.right < a.right
return &interval{left: a.left, right: b.left},
&interval{left: b.right, right: a.right}
}
/**
* Your RangeModule object will be instantiated and called as such:
* obj := Constructor();
* obj.AddRange(left,right);
* param_2 := obj.QueryRange(left,right);
* obj.RemoveRange(left,right);
*/ | Algorithms/0715.range-module/range-module.go | 0.583203 | 0.461623 | range-module.go | starcoder |
package workspaceiface
import "github.com/orobix/azureml-go-sdk/workspace"
type WorkspaceAPI interface {
// GetDatastores Return the list of datastore of the AML Workspace provided as argument.
GetDatastores(resourceGroup, workspace string) ([]workspace.Datastore, error)
// GetDatastore Return the datastore with the name provided as argument.
GetDatastore(resourceGroup, workspace, datastoreName string) (*workspace.Datastore, error)
// DeleteDatastore Delete the datastore with the name provided as argument
DeleteDatastore(resourceGroup, workspace, datastoreName string) error
// CreateOrUpdateDatastore Create or update the datastore with the data provided as argument
CreateOrUpdateDatastore(resourceGroup, workspace string, datastore *workspace.Datastore) (*workspace.Datastore, error)
// GetDatasets Return the list of datasets of the AML Workspace. For each dataset, only its latest version is returned.
GetDatasets(resourceGroup, workspace string) ([]workspace.Dataset, error)
// GetDataset Return the dataset with the name and version provided as argument
GetDataset(resourceGroup, workspace, name string, version int) (*workspace.Dataset, error)
// GetDatasetNextVersion Return the next version of the dataset with the name provided as argument
GetDatasetNextVersion(resourceGroup, workspace, name string) (int, error)
// GetDatasetVersions Return all the versions of the dataset with the name provided as argument
GetDatasetVersions(resourceGroup, workspace, datasetName string) ([]workspace.Dataset, error)
// CreateOrUpdateDataset Create or update the dataset with the data provided as argument
CreateOrUpdateDataset(resourceGroup, workspace string, dataset *workspace.Dataset) (*workspace.Dataset, error)
// DeleteDataset Delete the dataset (all its versions) with the name provided as argument
DeleteDataset(resourceGroup, workspace, datasetName string) error
// DeleteDatasetVersion Delete the version provided as argument of the dataset with the specified name
DeleteDatasetVersion(resourceGroup, workspace, datasetName string, version int) error
} | workspace/workspaceiface/interface.go | 0.583678 | 0.469703 | interface.go | starcoder |
package day24
import (
"bufio"
"io"
"strconv"
"strings"
)
type Node struct {
ID int
PortA, PortB int
Edges []*Node
}
// CanConnect returns true if the two given Nodes can connect on either port.
func (n *Node) CanConnect(a *Node) bool {
return n.PortA == a.PortA ||
n.PortA == a.PortB ||
n.PortB == a.PortA ||
n.PortB == a.PortB
}
// FindMaxStrength returns the maximum strength bridge that can be formed
// starting from the given Node. Consumed nodes are tracked (and never
// re-traversed) in the seen bitmap. The ingress port of the given node is
// excluded from evaluation as it has also been consumed by the previous node.
func (n *Node) FindMaxStrength(seen uint64, ingress int) (max int) {
if seen&(1<<uint64(n.ID)) != 0 {
return 0
}
seen |= 1 << uint64(n.ID)
egress := n.PortA
if ingress == n.PortA {
egress = n.PortB
}
for i := 0; i < len(n.Edges); i++ {
if n.Edges[i].PortA == egress || n.Edges[i].PortB == egress {
m := n.Edges[i].FindMaxStrength(seen, egress)
if m > max {
max = m
}
}
}
max += n.PortA + n.PortB
return
}
// FindMaxLength returns the length and strength of the longest bridge that can
// be formed starting from the given Node. Consumed nodes are tracked (and never
// re-traversed) in the seen bitmap. The ingress port of the given node is
// excluded from evaluation as it has also been consumed by the previous node.
func (n *Node) FindMaxLength(seen uint64, ingress int) (length, strength int) {
if seen&(1<<uint64(n.ID)) != 0 {
return
}
seen |= 1 << uint64(n.ID)
egress := n.PortA
if ingress == n.PortA {
egress = n.PortB
}
for i := 0; i < len(n.Edges); i++ {
if n.Edges[i].PortA == egress || n.Edges[i].PortB == egress {
l, s := n.Edges[i].FindMaxLength(seen, egress)
if l == length && s > strength {
strength = s
} else if l > length {
length = l
strength = s
}
}
}
length++
strength += n.PortA + n.PortB
return
}
// Graph is an adjacency list representation of a graph of Nodes and their
// possible edges.
type Graph []*Node
// ParseGraph parses the challenge input from the given reader into a Graph of
// Nodes.
func ParseGraph(r io.Reader) Graph {
g := make(Graph, 0)
s := bufio.NewScanner(r)
i := 0
for s.Scan() {
tkns := strings.Split(s.Text(), "/")
a, _ := strconv.Atoi(tkns[0])
b, _ := strconv.Atoi(tkns[1])
g = append(g, &Node{ID: i, PortA: a, PortB: b})
i++
}
for i := 0; i < len(g)-1; i++ {
for j := i + 1; j < len(g); j++ {
if g[i].CanConnect(g[j]) {
g[i].Edges = append(g[i].Edges, g[j])
g[j].Edges = append(g[j].Edges, g[i])
}
}
}
return g
}
// MaxBridgeStrength is my solution to Part One and returns the maximum strength
// bridge that can be constructed from the available components.
func (g Graph) MaxBridgeStrength() (max int) {
for i := 0; i < len(g); i++ {
if g[i].PortA == 0 || g[i].PortB == 0 {
s := g[i].FindMaxStrength(0, 0)
if s > max {
max = s
}
}
}
return
}
// LongestBridgeStrength is my solution to Part Two and returns the strength of
// the longest bridge that can be constructed from the available components.
func (g Graph) LongestBridgeStrength() int {
length, strength := 0, 0
for i := 0; i < len(g); i++ {
if g[i].PortA == 0 || g[i].PortB == 0 {
l, s := g[i].FindMaxLength(0, 0)
if l == length && s > strength {
strength = s
} else if l > length {
length = l
strength = s
}
}
}
return strength
} | go/2017/day24/day24.go | 0.658088 | 0.489626 | day24.go | starcoder |
package paillier
import (
"crypto/rand"
"errors"
"io"
"math/big"
)
var one = big.NewInt(1)
// ErrMessageTooLong is returned when attempting to encrypt a message which is
// too large for the size of the public key.
var ErrMessageTooLong = errors.New("paillier: message too long for Paillier public key size")
// GenerateKey generates an Paillier keypair of the given bit size using the
// random source random (for example, crypto/rand.Reader).
func GenerateKey(random io.Reader, bits int) (*PrivateKey, error) {
// First, begin generation of p in the background.
var p *big.Int
var errChan = make(chan error, 1)
go func() {
var err error
p, err = rand.Prime(random, bits/2)
errChan <- err
}()
// Now, find a prime q in the foreground.
q, err := rand.Prime(random, bits/2)
if err != nil {
return nil, err
}
// Wait for generation of p to complete successfully.
if err := <-errChan; err != nil {
return nil, err
}
n := new(big.Int).Mul(p, q)
pp := new(big.Int).Mul(p, p)
qq := new(big.Int).Mul(q, q)
return &PrivateKey{
PublicKey: PublicKey{
N: n,
NSquared: new(big.Int).Mul(n, n),
G: new(big.Int).Add(n, one), // g = n + 1
},
p: p,
pp: pp,
pminusone: new(big.Int).Sub(p, one),
q: q,
qq: qq,
qminusone: new(big.Int).Sub(q, one),
pinvq: new(big.Int).ModInverse(p, q),
hp: h(p, pp, n),
hq: h(q, qq, n),
n: n,
}, nil
}
// PrivateKey represents a Paillier key.
type PrivateKey struct {
PublicKey
p *big.Int
pp *big.Int
pminusone *big.Int
q *big.Int
qq *big.Int
qminusone *big.Int
pinvq *big.Int
hp *big.Int
hq *big.Int
n *big.Int
}
// PublicKey represents the public part of a Paillier key.
type PublicKey struct {
N *big.Int // modulus
G *big.Int // n+1, since p and q are same length
NSquared *big.Int
}
func h(p *big.Int, pp *big.Int, n *big.Int) *big.Int {
gp := new(big.Int).Mod(new(big.Int).Sub(one, n), pp)
lp := l(gp, p)
hp := new(big.Int).ModInverse(lp, p)
return hp
}
func l(u *big.Int, n *big.Int) *big.Int {
return new(big.Int).Div(new(big.Int).Sub(u, one), n)
}
// Encrypt encrypts a plain text represented as a byte array. The passed plain
// text MUST NOT be larger than the modulus of the passed public key.
func Encrypt(pubKey *PublicKey, plainText []byte) ([]byte, error) {
c, _, err := EncryptAndNonce(pubKey, plainText)
return c, err
}
// EncryptAndNonce encrypts a plain text represented as a byte array, and in
// addition, returns the nonce used during encryption. The passed plain text
// MUST NOT be larger than the modulus of the passed public key.
func EncryptAndNonce(pubKey *PublicKey, plainText []byte) ([]byte, *big.Int, error) {
r, err := rand.Int(rand.Reader, pubKey.N)
if err != nil {
return nil, nil, err
}
c, err := EncryptWithNonce(pubKey, r, plainText)
if err != nil {
return nil, nil, err
}
return c.Bytes(), r, nil
}
// EncryptWithNonce encrypts a plain text represented as a byte array using the
// provided nonce to perform encryption. The passed plain text MUST NOT be
// larger than the modulus of the passed public key.
func EncryptWithNonce(pubKey *PublicKey, r *big.Int, plainText []byte) (*big.Int, error) {
m := new(big.Int).SetBytes(plainText)
if pubKey.N.Cmp(m) < 1 { // N < m
return nil, ErrMessageTooLong
}
// c = g^m * r^n mod n^2 = ((m*n+1) mod n^2) * r^n mod n^2
n := pubKey.N
c := new(big.Int).Mod(
new(big.Int).Mul(
new(big.Int).Mod(new(big.Int).Add(one, new(big.Int).Mul(m, n)), pubKey.NSquared),
new(big.Int).Exp(r, n, pubKey.NSquared),
),
pubKey.NSquared,
)
return c, nil
}
// Decrypt decrypts the passed cipher text.
func Decrypt(privKey *PrivateKey, cipherText []byte) ([]byte, error) {
c := new(big.Int).SetBytes(cipherText)
if privKey.NSquared.Cmp(c) < 1 { // c < n^2
return nil, ErrMessageTooLong
}
cp := new(big.Int).Exp(c, privKey.pminusone, privKey.pp)
lp := l(cp, privKey.p)
mp := new(big.Int).Mod(new(big.Int).Mul(lp, privKey.hp), privKey.p)
cq := new(big.Int).Exp(c, privKey.qminusone, privKey.qq)
lq := l(cq, privKey.q)
mqq := new(big.Int).Mul(lq, privKey.hq)
mq := new(big.Int).Mod(mqq, privKey.q)
m := crt(mp, mq, privKey)
return m.Bytes(), nil
}
func crt(mp *big.Int, mq *big.Int, privKey *PrivateKey) *big.Int {
u := new(big.Int).Mod(new(big.Int).Mul(new(big.Int).Sub(mq, mp), privKey.pinvq), privKey.q)
m := new(big.Int).Add(mp, new(big.Int).Mul(u, privKey.p))
return new(big.Int).Mod(m, privKey.n)
}
// AddCipher homomorphically adds together two cipher texts.
// To do this we multiply the two cipher texts, upon decryption, the resulting
// plain text will be the sum of the corresponding plain texts.
func AddCipher(pubKey *PublicKey, cipher1, cipher2 []byte) []byte {
x := new(big.Int).SetBytes(cipher1)
y := new(big.Int).SetBytes(cipher2)
// x * y mod n^2
return new(big.Int).Mod(
new(big.Int).Mul(x, y),
pubKey.NSquared,
).Bytes()
}
// Add homomorphically adds a passed constant to the encrypted integer
// (our cipher text). We do this by multiplying the constant with our
// ciphertext. Upon decryption, the resulting plain text will be the sum of
// the plaintext integer and the constant.
func Add(pubKey *PublicKey, cipher, constant []byte) []byte {
c := new(big.Int).SetBytes(cipher)
x := new(big.Int).SetBytes(constant)
// c * g ^ x mod n^2
return new(big.Int).Mod(
new(big.Int).Mul(c, new(big.Int).Exp(pubKey.G, x, pubKey.NSquared)),
pubKey.NSquared,
).Bytes()
}
// Mul homomorphically multiplies an encrypted integer (cipher text) by a
// constant. We do this by raising our cipher text to the power of the passed
// constant. Upon decryption, the resulting plain text will be the product of
// the plaintext integer and the constant.
func Mul(pubKey *PublicKey, cipher []byte, constant []byte) []byte {
c := new(big.Int).SetBytes(cipher)
x := new(big.Int).SetBytes(constant)
// c ^ x mod n^2
return new(big.Int).Exp(c, x, pubKey.NSquared).Bytes()
} | chengtay/paillier/paillier.go | 0.742795 | 0.440289 | paillier.go | starcoder |
package util
import (
"math"
"math/rand"
)
/*
* Non-linear interpolations between 0 and 1. Clamping is enforced lest the result not
* be defined outside of [0,1]
*/
// NLerp returns the value of the supplied non-linear function at t. Note t is clamped to [0,1]
func NLerp(t, start, end float64, f NonLinear) float64 {
if t < 0 {
return start
}
if t > 1 {
return end
}
t = f.Transform(t)
return (1-t)*start + t*end
}
// InvNLerp performs the inverse of NLerp and returns the value of t for a value v (clamped to [start, end]).
func InvNLerp(v, start, end float64, f NonLinear) float64 {
t := (v - start) / (end - start)
if t < 0 {
return 0
}
if t > 1 {
return 1
}
return f.InvTransform(t)
}
// RemapNL converts v from one space to another by applying InvNLerp to find t in the initial range, and
// then using t to find v' in the new range.
func RemapNL(v, istart, iend, ostart, oend float64, fi, fo NonLinear) float64 {
return NLerp(InvNLerp(v, istart, iend, fi), ostart, oend, fo)
}
// NLerp32 is a float32 version of NLerp for Path and x/image/vector
func NLerp32(t, start, end float32, f NonLinear) float32 {
if t < 0 {
return start
}
if t > 1 {
return end
}
t = float32(f.Transform(float64(t)))
return (1-t)*start + t*end
}
// InvNLerp32 is a float32 version of InvNLerp for Path and x/image/vector
func InvNLerp32(v, start, end float32, f NonLinear) float32 {
t := (v - start) / (end - start)
if t < 0 {
return 0
}
if t > 1 {
return 1
}
return float32(f.InvTransform(float64(t)))
}
// RemapNL32 is a float32 version of RemapNL for Path and x/image/vector
func RemapNL32(v, istart, iend, ostart, oend float32, fi, fo NonLinear) float32 {
return NLerp32(InvNLerp32(v, istart, iend, fi), ostart, oend, fo)
}
// NonLinear interface defines the transform and its inverse as used by NLerp etc.
// For mapping 0 -> 1 non-linearly. No checks! Only valid in range [0,1]
type NonLinear interface {
Transform(t float64) float64
InvTransform(v float64) float64
}
// NLLinear v = t
type NLLinear struct{}
func (nl *NLLinear) Transform(t float64) float64 {
return t
}
func (nl *NLLinear) InvTransform(v float64) float64 {
return v
}
// NLSquare v = t^2
type NLSquare struct{}
func (nl *NLSquare) Transform(t float64) float64 {
return t * t
}
func (nl *NLSquare) InvTransform(v float64) float64 {
return math.Sqrt(v)
}
// NLCube v = t^3
type NLCube struct{}
func (nl *NLCube) Transform(t float64) float64 {
return t * t * t
}
func (nl *NLCube) InvTransform(v float64) float64 {
return math.Pow(v, 1/3.0)
}
// NLExponential v = (exp(t*k) - 1) * scale
type NLExponential struct {
k float64
scale float64
}
func NewNLExponential(k float64) *NLExponential {
return &NLExponential{k, 1 / (math.Exp(k) - 1)}
}
func (nl *NLExponential) Transform(t float64) float64 {
return (math.Exp(t*nl.k) - 1) * nl.scale
}
func (nl *NLExponential) InvTransform(v float64) float64 {
return math.Log1p(v/nl.scale) / nl.k
}
// NLLogarithmic v = log(1+t*k) * scale
type NLLogarithmic struct {
k float64
scale float64
}
func NewNLLogarithmic(k float64) *NLLogarithmic {
return &NLLogarithmic{k, 1 / math.Log1p(k)}
}
func (nl *NLLogarithmic) Transform(t float64) float64 {
return math.Log1p(t*nl.k) * nl.scale
}
func (nl *NLLogarithmic) InvTransform(v float64) float64 {
return (math.Exp(v/nl.scale) - 1) / nl.k
}
// NLSin v = sin(t) with t mapped to [-Pi/2,Pi/2]
type NLSin struct{} // first derivative 0 at t=0,1
func (nl *NLSin) Transform(t float64) float64 {
return (math.Sin((t-0.5)*math.Pi) + 1) / 2
}
func (nl *NLSin) InvTransform(v float64) float64 {
return math.Asin((v*2)-1)/math.Pi + 0.5
}
// NLSin1 v = sin(t) with t mapped to [0,Pi/2]
type NLSin1 struct{} // first derivative 0 at t=1
func (nl *NLSin1) Transform(t float64) float64 {
return math.Sin(t * math.Pi / 2)
}
func (nl *NLSin1) InvTransform(v float64) float64 {
return math.Asin(v) / math.Pi * 2
}
// NLSin2 v = sin(t) with t mapped to [-Pi/2,0]
type NLSin2 struct{} // first derivative 0 at t=0,1
func (nl *NLSin2) Transform(t float64) float64 {
return math.Sin((t-1)*math.Pi/2) + 1
}
func (nl *NLSin2) InvTransform(v float64) float64 {
return math.Asin(v-1)*2/math.Pi + 1
}
// NLCircle1 v = 1 - sqrt(1-t^2)
type NLCircle1 struct{}
func (nl *NLCircle1) Transform(t float64) float64 {
return 1 - math.Sqrt(1-t*t)
}
func (nl *NLCircle1) InvTransform(v float64) float64 {
return math.Sqrt(1 - (v-1)*(v-1))
}
// NLCircle2 v = sqrt(2t-t^2)
type NLCircle2 struct{}
func (nl *NLCircle2) Transform(t float64) float64 {
return math.Sqrt(t * (2 - t))
}
func (nl *NLCircle2) InvTransform(v float64) float64 {
return 1 - math.Sqrt(1-v*v)
}
// NLCatenary v = cosh(t)
type NLCatenary struct{}
func (nl *NLCatenary) Transform(t float64) float64 {
return (math.Cosh(t) - 1) / (math.Cosh(1) - 1)
}
func (nl *NLCatenary) InvTransform(v float64) float64 {
return math.Acosh(v*(math.Cosh(1)-1) + 1)
}
// NLGauss v = gauss(t, k)
type NLGauss struct {
k, offs, scale float64
}
func NewNLGauss(k float64) *NLGauss {
offs := math.Exp(-k * k * 0.5)
scale := 1 / (1 - offs)
return &NLGauss{k, offs, scale}
}
func (nl *NLGauss) Transform(t float64) float64 {
x := nl.k * (t - 1)
x *= -0.5 * x
return (math.Exp(x) - nl.offs) * nl.scale
}
func (nl *NLGauss) InvTransform(v float64) float64 {
v /= nl.scale
v += nl.offs
v = math.Log(v)
v *= -2
v = math.Sqrt(v)
return 1 - v/nl.k
}
// NLLogistic v = logistic(t, k, mp)
type NLLogistic struct {
k, mp, offs, scale float64
}
// k > 0 and mp (0,1) - not checked
func NewNLLogistic(k, mp float64) *NLLogistic {
v0 := -mp * k
v0 = logisticTransform(v0)
v1 := (1 - mp) * k
v1 = logisticTransform(v1)
return &NLLogistic{k, mp, v0, 1 / (v1 - v0)}
}
func (nl *NLLogistic) Transform(t float64) float64 {
t = (t - nl.mp) * nl.k
return (logisticTransform(t) - nl.offs) * nl.scale
}
func (nl *NLLogistic) InvTransform(v float64) float64 {
v /= nl.scale
v += nl.offs
v = logisticInvTransform(v)
return v/nl.k + nl.mp
}
// L = 1, k = 1, mp = 0
func logisticTransform(t float64) float64 {
return 1 / (1 + math.Exp(-t))
}
// L = 1, k = 1, mp = 0
func logisticInvTransform(v float64) float64 {
return -math.Log(1/v - 1)
}
// NLP3 v = t^2 * (3-2t)
type NLP3 struct{} // first derivative 0 at t=0,1
func (nl *NLP3) Transform(t float64) float64 {
return t * t * (3 - 2*t)
}
func (nl *NLP3) InvTransform(v float64) float64 {
return bsInv(v, nl)
}
// NLP5 v = t^3 * (t*(6t-15) + 10)
type NLP5 struct{} // first and second derivatives 0 at t=0,1
func (nl *NLP5) Transform(t float64) float64 {
return t * t * t * (t*(t*6.0-15.0) + 10.0)
}
func (nl *NLP5) InvTransform(v float64) float64 {
return bsInv(v, nl)
}
// NLCompound v = nl[0](nl[1](nl[2](...nl[n-1](t))))
type NLCompound struct {
fs []NonLinear
}
func NewNLCompound(fs []NonLinear) *NLCompound {
return &NLCompound{fs}
}
func (nl *NLCompound) Transform(t float64) float64 {
for _, f := range nl.fs {
t = f.Transform(t)
}
return t
}
func (nl *NLCompound) InvTransform(v float64) float64 {
for i := len(nl.fs) - 1; i > -1; i-- {
v = nl.fs[i].InvTransform(v)
}
return v
}
// NLOmt v = 1-f(1-t)
type NLOmt struct {
f NonLinear
}
func NewNLOmt(f NonLinear) *NLOmt {
return &NLOmt{f}
}
func (nl *NLOmt) Transform(t float64) float64 {
return 1 - nl.f.Transform(1-t)
}
func (nl *NLOmt) InvTransform(v float64) float64 {
return 1 - nl.f.InvTransform(1-v)
}
// NewRandNL calculates a collection of ascending values [0,1] with an average increment of mean
// and standard deviation of std.
func NewNLRand(mean, std float64, sharp bool) *NLRand {
sum := 0.0
steps := []float64{0, 0} // 0 prefix
for sum < 1 {
v := rand.NormFloat64()*std + mean
if v < 0 {
v = 0
}
sum += v
if sum > 1 {
sum = 1
}
steps = append(steps, sum)
}
if sharp {
steps[0] = -steps[2]
steps = append(steps, 2-steps[len(steps)-2])
} else {
steps = append(steps, 1.0) // 1 postfix
}
return &NLRand{steps, 1.0 / float64(len(steps)-3)}
}
// NLRand uses random incremental steps from a normal distribution, smoothed with a cubic.
type NLRand struct {
Steps []float64
Dt float64
}
func (nl *NLRand) Transform(t float64) float64 {
// Find steps t is between and use cubic interpolation
// to calc v
fn := math.Floor(t / nl.Dt)
fr := t - fn*nl.Dt
frt := fr / nl.Dt
n := int(fn)
p := nl.Steps[n : n+4]
return Cubic(frt, p)
}
func (nl *NLRand) InvTransform(v float64) float64 {
return bsInv(v, nl)
}
// Numerical method to find inverse
func bsInv(v float64, f NonLinear) float64 {
n := 16
t := 0.5
s := 0.25
for ; n > 0; n-- {
if f.Transform(t) > v {
t -= s
} else {
t += s
}
s /= 2
}
return t
}
// Cubic calculates the value of f(t) for t in range [0,1] given the values of t at -1, 0, 1, 2 in p[]
// fitted to a cubic polynomial: f(t) = at^3 + bt^2 + ct + d. Clamped because it over/undershoots.
func Cubic(t float64, p []float64) float64 {
v := p[1] + 0.5*t*(p[2]-p[0]+t*(2.0*p[0]-5.0*p[1]+4.0*p[2]-p[3]+t*(3.0*(p[1]-p[2])+p[3]-p[0])))
if v < 0 {
v = 0
} else if v > 1 {
v = 1
}
return v
} | util/nlerp.go | 0.809653 | 0.649328 | nlerp.go | starcoder |
package wire
import (
"BtcoinProject/chaincfg/chainhash"
"bytes"
"io"
"time"
)
//maxblockheaderpayload is the maximum number of bytes a blcok header can be
//version 4 bytes timestamp 4bytes + bit 4bytes + nonce 4bytes + prevblock and
//merkleroot hashes
const MaxBlockHeaderPayload = 16 + (chainhash.HashSize * 2)
// BlockHeader defines information about a block and is used in
// the bitcion block(MsgBlock) and Header(MsgHeader)message
type BlockHeader struct {
// Version of the block ,this is not the same as the protocol version
Version int32
//Hash of previous block header in the block chain
PrevBlock chainhash.Hash
//Merkle tree reference to hash of all transactions for the block
MerkleRoot chainhash.Hash
// Time the block was created. This is, unfortunately, encoded as a
// uint32 on the wire and therefore is limited to 2106.
Timestamp time.Time
// Difficulty target for the block.
Bits uint32
// Nonce used to generate the block.
Nonce uint32
}
//blockhaderlen is a constant that repeesents the number of bytes for a block
//header.
const blockHeaderLen = 80
//blockhash computes the block indetifier hash for the given block haders
func (h *BlockHeader) BlockHash() chainhash.Hash {
//encode the header and double sha 256 everything prior to the number of
//transaction ignore the error returns since there is no way the encode could
//fail except being out of memory which would cause a run_time panic
buf := bytes.NewBuffer(make([]byte, 0, MaxBlockPayload))
_ = writeBlockHeader(buf, 0, h)
return chainhash.DoubleHashH(buf.Bytes())
}
//btcdecode decdes r using the bitcoin protocol encoding into the receiver
//this is part of the message interface implementation. see deserialize for
//decoding block haders stored to disk ,such as in a database,as opposed to
//decoding block haders from the wire.
func (h *BlockHeader) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
return readBlockHeader(r, pver, h)
}
//btcencode encodes the receiver to w using the bitcoin protocol encoding
//this is part of the meesage interface implementation. see serialize for
//encoding block haders to be stored to disk, such as in a
// database, as opposed to encoding block headers for the wire.
func (h *BlockHeader) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
return writeBlockHeader(w, pver, h)
}
//deserialize decodes a block header from r into the receiver using a format
//that is suitable for long-term storage such as a database while respecting
//the version field.
func(h *BlockHeader)Deserialize(r io.Reader)error{
//at the current time .there is no difference between the wire cncoding
//at protocol version0 and the stable long-term storage format.as a reslut
//make use of the readblockheader.
return readBlockHeader(r,0,h)
}
// Serialize encodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of writeBlockHeader.
return writeBlockHeader(w, 0, h)
}
// NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, merkle root hash, difficulty bits, and nonce used to generate the
// block with defaults for the remaining fields.
func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash,
bits uint32, nonce uint32) *BlockHeader {
// Limit the timestamp to one second precision since the protocol
// doesn't support better.
return &BlockHeader{
Version: version,
PrevBlock: *prevHash,
MerkleRoot: *merkleRootHash,
Timestamp: time.Unix(time.Now().Unix(), 0),
Bits: bits,
Nonce: nonce,
}
}
// readBlockHeader reads a bitcoin block header from r. See Deserialize for
// decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the wire.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
return readElements(r, &bh.Version, &bh.PrevBlock, &bh.MerkleRoot,
(*uint32Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
}
// writeBlockHeader writes a bitcoin block header to w. See Serialize for
// encoding block headers to be stored to disk, such as in a database, as
// opposed to encoding for the wire.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
sec := uint32(bh.Timestamp.Unix())
return writeElements(w, bh.Version, &bh.PrevBlock, &bh.MerkleRoot,
sec, bh.Bits, bh.Nonce)
}
//over | wire/blockheader.go | 0.771499 | 0.418281 | blockheader.go | starcoder |
package build
import (
"math"
"github.com/tang/go/amount"
"github.com/tang/go/network"
"github.com/tang/go/xdr"
)
const (
// MemoTextMaxLength represents the maximum number of bytes a valid memo of
// type "MEMO_TEXT" can be.
MemoTextMaxLength = 28
)
var (
// PublicNetwork is a mutator that configures the transaction for submission
// to the main public tang network.
PublicNetwork = Network{network.PublicNetworkPassphrase}
// TestNetwork is a mutator that configures the transaction for submission
// to the test tang network (often called testnet).
TestNetwork = Network{network.TestNetworkPassphrase}
// DefaultNetwork is a mutator that configures the
// transaction for submission to the default tang
// network. Integrators may change this value to
// another `Network` mutator if they would like to
// effect the default in a process-global manner.
// Replace or set your own custom passphrase on this
// var to set the default network for the process.
DefaultNetwork = Network{}
)
// Amount is a mutator capable of setting the amount
type Amount string
// Asset is struct used in path_payment mutators
type Asset struct {
Code string
Issuer string
Native bool
}
// AllowTrustAsset is a mutator capable of setting the asset on
// an operations that have one.
type AllowTrustAsset struct {
Code string
}
// Authorize is a mutator capable of setting the `authorize` flag
type Authorize struct {
Value bool
}
// AutoSequence loads the sequence to use for the transaction from an external
// provider.
type AutoSequence struct {
SequenceProvider
}
// BumpTo sets sequence number on BumpSequence operation
type BumpTo int64
// NativeAsset is a helper method to create native Asset object
func NativeAsset() Asset {
return Asset{Native: true}
}
// CreditAsset is a helper method to create credit Asset object
func CreditAsset(code, issuer string) Asset {
return Asset{code, issuer, false}
}
// CreditAmount is a mutator that configures a payment to be using credit
// asset and have the amount provided.
type CreditAmount struct {
Code string
Issuer string
Amount string
}
// Defaults is a mutator that sets defaults
type Defaults struct{}
// Destination is a mutator capable of setting the destination on
// an operations that have one.
type Destination struct {
AddressOrSeed string
}
// InflationDest is a mutator capable of setting the inflation destination
type InflationDest string
// HomeDomain is a mutator capable of setting home domain of the account
type HomeDomain string
// MemoHash is a mutator that sets a memo on the mutated transaction of type
// MEMO_HASH.
type MemoHash struct {
Value xdr.Hash
}
// Limit is a mutator that sets a limit on the change_trust operation
type Limit Amount
// MasterWeight is a mutator that sets account's master weight
type MasterWeight uint32
// MaxLimit represents the maximum value that can be passed as trutline Limit
var MaxLimit = Limit(amount.String(math.MaxInt64))
// MemoID is a mutator that sets a memo on the mutated transaction of type
// MEMO_ID.
type MemoID struct {
Value uint64
}
// MemoReturn is a mutator that sets a memo on the mutated transaction of type
// MEMO_RETURN.
type MemoReturn struct {
Value xdr.Hash
}
// MemoText is a mutator that sets a memo on the mutated transaction of type
// MEMO_TEXT.
type MemoText struct {
Value string
}
// NativeAmount is a mutator that configures a payment to be using native
// currency and have the amount provided (in lumens).
type NativeAmount struct {
Amount string
}
// OfferID is a mutator that sets offer ID on offer operations
type OfferID uint64
// PayWithPath is a mutator that configures a path_payment's send asset and max amount
type PayWithPath struct {
Asset
MaxAmount string
Path []Asset
}
// Through appends a new asset to the path
func (pathSend PayWithPath) Through(asset Asset) PayWithPath {
pathSend.Path = append(pathSend.Path, asset)
return pathSend
}
// PayWith is a helper to create PayWithPath struct
func PayWith(sendAsset Asset, maxAmount string) PayWithPath {
return PayWithPath{
Asset: sendAsset,
MaxAmount: maxAmount,
}
}
// Price is a mutator that sets price on offer operations
type Price string
// Rate is a mutator that sets selling/buying asset and price on offer operations
type Rate struct {
Selling Asset
Buying Asset
Price
}
// Sequence is a mutator that sets the sequence number on a transaction
type Sequence struct {
Sequence uint64
}
// SequenceProvider is the interface that other packages may implement to be
// used with the `AutoSequence` mutator.
type SequenceProvider interface {
SequenceForAccount(aid string) (xdr.SequenceNumber, error)
}
// Sign is a mutator that contributes a signature of the provided envelope's
// transaction with the configured key
type Sign struct {
Seed string
}
// SetFlag is a mutator capable of setting account flags
type SetFlag int32
// ClearFlag is a mutator capable of clearing account flags
type ClearFlag int32
// Signer is a mutator capable of adding, updating and deleting an account
// signer
type Signer struct {
Address string
Weight uint32
}
// SourceAccount is a mutator capable of setting the source account on
// an xdr.Operation and an xdr.Transaction
type SourceAccount struct {
AddressOrSeed string
}
// Thresholds is a mutator capable of setting account thresholds
type Thresholds struct {
Low *uint32
Medium *uint32
High *uint32
}
type Timebounds struct {
MinTime uint64
MaxTime uint64
}
// Trustor is a mutator capable of setting the trustor on
// allow_trust operation.
type Trustor struct {
Address string
}
// Network establishes the tang network that a transaction should apply to.
// This modifier influences how a transaction is hashed for the purposes of signature generation.
type Network struct {
Passphrase string
}
// ID returns the network ID derived from this struct's Passphrase
func (n *Network) ID() [32]byte {
return network.ID(n.Passphrase)
}
// BaseFee is a mutator capable of setting the base fee
type BaseFee struct {
Amount uint64
} | build/main.go | 0.797004 | 0.441252 | main.go | starcoder |
package surface
import (
"github.com/jphsd/texture"
"github.com/jphsd/texture/color"
col "image/color"
"math"
"math/rand"
)
// Surface collects the ambient light, lights, a material, and normal map required to describe
// an area. If the normal map is nil then the standard normal is use {0, 0, 1}
type Surface struct {
Ambient Light
Lights []Light
Mat Material
Normals texture.VectorField
}
var blinn = false
// Eval2 implements the ColorField interface.
func (s *Surface) Eval2(x, y float64) col.Color {
// For any point, the color rendered is the sum of the emissive, ambient and the diffuse/specular
// contributions from all of the lights.
material := s.Mat
normals := s.Normals
if normals == nil {
normals = texture.DefaultNormal
}
ambient := s.Ambient
view := []float64{0, 0, 1}
emm, amb, diff, spec, shine, rough := material.Eval2(x, y) // Emissive
// Emissive
lemm := &color.FRGBA{}
if emm != nil {
lemm = emm
}
// Ambient
acol, _, _, _ := ambient.Eval2(x, y)
lamb := amb.Prod(acol) // Ambient
col := lemm
col = col.Add(lamb)
if diff == nil {
return col
}
// Cummulative diffuse and specular for all lights
normal := normals.Eval2(x, y)
if rough > 0 {
normal = Roughen(rough, normal)
}
cdiff, cspec := &color.FRGBA{}, &color.FRGBA{}
for _, light := range s.Lights {
lcol, dir, dist, pow := light.Eval2(x, y)
if lcol.IsBlack() {
continue
}
lambert := Dot(dir, normal)
if lambert < 0 {
continue
}
if dist > 0 {
lcol = lcol.Scale(pow / (dist * dist))
}
cdiff = cdiff.Add(lcol.Prod(diff.Scale(lambert))) // Diffuse
if spec != nil {
if blinn {
// Blinn-Phong
half := Unit([]float64{dir[0] + view[0], dir[1] + view[1], dir[2] + view[2]})
dp := Dot(half, normal)
if dp > 0 {
phong := math.Pow(dp, shine*4)
cspec = cspec.Add(lcol.Prod(spec.Scale(phong))) // Specular
}
} else {
// Phong
dp := Dot(Reflect(dir, normal), view)
if dp > 0 {
phong := math.Pow(dp, shine)
cspec = cspec.Add(lcol.Prod(spec.Scale(phong))) // Specular
}
}
}
}
col = col.Add(cdiff)
col = col.Add(cspec)
return col
}
// Roughen perturbates a vector by replacing it with a randomly orientented unit vector.
func Roughen(r float64, vec []float64) []float64 {
// Construct a random unit vector pointing above the XY plane within r * 90 degrees
theta := rand.Float64() * 2 * math.Pi
phi := (1 - rand.Float64()*r) * math.Pi / 2
cp := math.Cos(phi)
rv := []float64{cp * math.Cos(theta), cp * math.Sin(theta), math.Sin(phi)}
// Rotate into same plane as vec if necessary
orig := []float64{0, 0, 1}
cv := Cross(vec, orig)
if cv[0]*cv[0]+cv[1]*cv[1]+cv[2]*cv[2] > 0 {
cv[0], cv[1], cv[2] = -cv[0], -cv[1], -cv[2] // Flipping the normal
quat := NewQuaternion(cv, math.Acos(Dot(vec, orig)))
rv = quat.Apply(rv)[0]
}
return rv
} | surface/surface.go | 0.730866 | 0.492676 | surface.go | starcoder |
package docs
import (
"bytes"
"encoding/json"
"strings"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{.Description}}",
"title": "{{.Title}}",
"contact": {
"name": "Source Code",
"url": "https://github.com/yoanyombapro1234/FeelGuuds/src/services/shopper_service"
},
"license": {
"name": "MIT License",
"url": "https://github.com/yoanyombapro1234/FeelGuuds/src/services/shopper_service/blob/master/LICENSE"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/": {
"get": {
"description": "renders service UI",
"produces": [
"text/html"
],
"tags": [
"HTTP API"
],
"summary": "Index",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/api/echo": {
"post": {
"description": "forwards the call to the backend service and echos the posted content",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Echo",
"responses": {
"202": {
"description": "Accepted",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/api/info": {
"get": {
"description": "returns the runtime information",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Runtime information",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.RuntimeResponse"
}
}
}
}
},
"/cache/{key}": {
"get": {
"description": "returns the content from cache if key exists",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Get payload from cache",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
},
"post": {
"description": "writes the posted content in cache",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Save payload in cache",
"responses": {
"202": {
"description": ""
}
}
},
"delete": {
"description": "deletes the key and its value from cache",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Delete payload from cache",
"responses": {
"202": {
"description": ""
}
}
}
},
"/chunked/{seconds}": {
"get": {
"description": "uses transfer-encoding type chunked to give a partial response and then waits for the specified period",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Chunked transfer encoding",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/delay/{seconds}": {
"get": {
"description": "waits for the specified period",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Delay",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/env": {
"get": {
"description": "returns the environment variables as a JSON array",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Environment",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
},
"/headers": {
"get": {
"description": "returns a JSON array with the request HTTP headers",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Headers",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
},
"/healthz": {
"get": {
"description": "used by Kubernetes liveness probe",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Kubernetes"
],
"summary": "Liveness check",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/metrics": {
"get": {
"description": "returns HTTP requests duration and Go runtime metrics",
"produces": [
"text/plain"
],
"tags": [
"Kubernetes"
],
"summary": "Prometheus metrics",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/panic": {
"get": {
"description": "crashes the process with exit code 255",
"tags": [
"HTTP API"
],
"summary": "Panic"
}
},
"/readyz": {
"get": {
"description": "used by Kubernetes readiness probe",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Kubernetes"
],
"summary": "Readiness check",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/readyz/disable": {
"post": {
"description": "signals the Kubernetes LB to stop sending requests to this instance",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Kubernetes"
],
"summary": "Disable ready state",
"responses": {
"202": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/readyz/enable": {
"post": {
"description": "signals the Kubernetes LB that this instance is ready to receive traffic",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Kubernetes"
],
"summary": "Enable ready state",
"responses": {
"202": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/status/{code}": {
"get": {
"description": "sets the response status code to the specified code",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Status code",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/store": {
"post": {
"description": "writes the posted content to disk at /data/hash and returns the SHA1 hash of the content",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Upload file",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/store/{hash}": {
"get": {
"description": "returns the content of the file /data/hash if exists",
"consumes": [
"application/json"
],
"produces": [
"text/plain"
],
"tags": [
"HTTP API"
],
"summary": "Download file",
"responses": {
"200": {
"description": "file",
"schema": {
"type": "string"
}
}
}
}
},
"/token": {
"post": {
"description": "issues a JWT token valid for one minute",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Generate JWT token",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.TokenResponse"
}
}
}
}
},
"/token/validate": {
"post": {
"description": "validates the JWT token",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Validate JWT token",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.TokenValidationResponse"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
}
}
}
},
"/version": {
"get": {
"description": "returns service version and git commit hash",
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Version",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/ws/echo": {
"post": {
"description": "echos content via websockets",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Echo over websockets",
"responses": {
"202": {
"description": "Accepted",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
}
},
"definitions": {
"api.MapResponse": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"api.RuntimeResponse": {
"type": "object",
"properties": {
"color": {
"type": "string"
},
"goarch": {
"type": "string"
},
"goos": {
"type": "string"
},
"hostname": {
"type": "string"
},
"logo": {
"type": "string"
},
"message": {
"type": "string"
},
"num_cpu": {
"type": "string"
},
"num_goroutine": {
"type": "string"
},
"revision": {
"type": "string"
},
"runtime": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"api.TokenResponse": {
"type": "object",
"properties": {
"expires_at": {
"type": "string"
},
"token": {
"type": "string"
}
}
},
"api.TokenValidationResponse": {
"type": "object",
"properties": {
"expires_at": {
"type": "string"
},
"token_name": {
"type": "string"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "2.0",
Host: "localhost:9898",
BasePath: "/",
Schemes: []string{"http", "https"},
Title: "Service API",
Description: "Go microservice template for Kubernetes.",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
} | src/services/shopper_service/pkg/api/docs/docs.go | 0.653127 | 0.401277 | docs.go | starcoder |
package rockredis
import (
"encoding/binary"
"errors"
"math"
)
const signMask uint64 = 0x8000000000000000
func encodeIntToCmpUint(v int64) uint64 {
return uint64(v) ^ signMask
}
func decodeCmpUintToInt(u uint64) int64 {
return int64(u ^ signMask)
}
// EncodeInt appends the encoded value to slice b and returns the appended slice.
// EncodeInt guarantees that the encoded value is in ascending order for comparison.
func EncodeInt(b []byte, v int64) []byte {
var data [8]byte
u := encodeIntToCmpUint(v)
binary.BigEndian.PutUint64(data[:], u)
return append(b, data[:]...)
}
// EncodeIntDesc appends the encoded value to slice b and returns the appended slice.
// EncodeIntDesc guarantees that the encoded value is in descending order for comparison.
func EncodeIntDesc(b []byte, v int64) []byte {
var data [8]byte
u := encodeIntToCmpUint(v)
binary.BigEndian.PutUint64(data[:], ^u)
return append(b, data[:]...)
}
// DecodeInt decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeInt(b []byte) ([]byte, int64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
u := binary.BigEndian.Uint64(b[:8])
v := decodeCmpUintToInt(u)
b = b[8:]
return b, v, nil
}
// DecodeIntDesc decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeIntDesc(b []byte) ([]byte, int64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
u := binary.BigEndian.Uint64(b[:8])
v := decodeCmpUintToInt(^u)
b = b[8:]
return b, v, nil
}
// EncodeUint appends the encoded value to slice b and returns the appended slice.
// EncodeUint guarantees that the encoded value is in ascending order for comparison.
func EncodeUint(b []byte, v uint64) []byte {
var data [8]byte
binary.BigEndian.PutUint64(data[:], v)
return append(b, data[:]...)
}
// EncodeUintDesc appends the encoded value to slice b and returns the appended slice.
// EncodeUintDesc guarantees that the encoded value is in descending order for comparison.
func EncodeUintDesc(b []byte, v uint64) []byte {
var data [8]byte
binary.BigEndian.PutUint64(data[:], ^v)
return append(b, data[:]...)
}
// DecodeUint decodes value encoded by EncodeUint before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeUint(b []byte) ([]byte, uint64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
v := binary.BigEndian.Uint64(b[:8])
b = b[8:]
return b, v, nil
}
// DecodeUintDesc decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeUintDesc(b []byte) ([]byte, uint64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
data := b[:8]
v := binary.BigEndian.Uint64(data)
b = b[8:]
return b, ^v, nil
}
func encodeFloatToCmpUint64(f float64) uint64 {
u := math.Float64bits(f)
if f >= 0 {
u |= signMask
} else {
u = ^u
}
return u
}
func decodeCmpUintToFloat(u uint64) float64 {
if u&signMask > 0 {
u &= ^signMask
} else {
u = ^u
}
return math.Float64frombits(u)
}
// EncodeFloat encodes a float v into a byte slice which can be sorted lexicographically later.
// EncodeFloat guarantees that the encoded value is in ascending order for comparison.
func EncodeFloat(b []byte, v float64) []byte {
u := encodeFloatToCmpUint64(v)
return EncodeUint(b, u)
}
// DecodeFloat decodes a float from a byte slice generated with EncodeFloat before.
func DecodeFloat(b []byte) ([]byte, float64, error) {
b, u, err := DecodeUint(b)
return b, decodeCmpUintToFloat(u), err
}
// EncodeFloatDesc encodes a float v into a byte slice which can be sorted lexicographically later.
// EncodeFloatDesc guarantees that the encoded value is in descending order for comparison.
func EncodeFloatDesc(b []byte, v float64) []byte {
u := encodeFloatToCmpUint64(v)
return EncodeUintDesc(b, u)
}
// DecodeFloatDesc decodes a float from a byte slice generated with EncodeFloatDesc before.
func DecodeFloatDesc(b []byte) ([]byte, float64, error) {
b, u, err := DecodeUintDesc(b)
return b, decodeCmpUintToFloat(u), err
} | rockredis/number.go | 0.892963 | 0.504639 | number.go | starcoder |
package schema
const ModelSchema = `{
"$id": "doc/spec/metadata.json",
"title": "Metadata",
"description": "Metadata concerning the other objects in the stream.",
"type": ["object"],
"properties": {
"service": {
"$id": "doc/spec/service.json",
"title": "Service",
"type": ["object", "null"],
"properties": {
"agent": {
"description": "Name and version of the Elastic APM agent",
"type": ["object", "null"],
"properties": {
"name": {
"description": "Name of the Elastic APM agent, e.g. \"Python\"",
"type": ["string", "null"],
"maxLength": 1024
},
"version": {
"description": "Version of the Elastic APM agent, e.g.\"1.0.0\"",
"type": ["string", "null"],
"maxLength": 1024
},
"ephemeral_id": {
"description": "Free format ID used for metrics correlation by some agents",
"type": ["string", "null"],
"maxLength": 1024
}
}
},
"framework": {
"description": "Name and version of the web framework used",
"type": ["object", "null"],
"properties": {
"name": {
"type": ["string", "null"],
"maxLength": 1024
},
"version": {
"type": ["string", "null"],
"maxLength": 1024
}
}
},
"language": {
"description": "Name and version of the programming language used",
"type": ["object", "null"],
"properties": {
"name": {
"type": ["string", "null"],
"maxLength": 1024
},
"version": {
"type": ["string", "null"],
"maxLength": 1024
}
}
},
"name": {
"description": "Immutable name of the service emitting this event",
"type": ["string", "null"],
"pattern": "^[a-zA-Z0-9 _-]+$",
"maxLength": 1024
},
"environment": {
"description": "Environment name of the service, e.g. \"production\" or \"staging\"",
"type": ["string", "null"],
"maxLength": 1024
},
"runtime": {
"description": "Name and version of the language runtime running this service",
"type": ["object", "null"],
"properties": {
"name": {
"type": ["string", "null"],
"maxLength": 1024
},
"version": {
"type": ["string", "null"],
"maxLength": 1024
}
}
},
"version": {
"description": "Version of the service emitting this event",
"type": ["string", "null"],
"maxLength": 1024
},
"node": {
"description": "Unique meaningful name of the service node.",
"type": ["object", "null"],
"properties": {
"configured_name": {
"type": ["string", "null"],
"maxLength": 1024
}
}
}
},
"type": "object",
"required": ["name", "agent"],
"properties.name.type": "string",
"properties.agent.type": "string",
"properties.agent.required": ["name", "version"],
"properties.agent.properties.name.type": "string",
"properties.agent.properties.version.type": "string",
"properties.runtime.required": ["name", "version"],
"properties.runtime.properties.name.type": "string",
"properties.runtime.properties.version.type": "string",
"properties.language.required": ["name"],
"properties.language.properties.name.type": "string"
},
"process": {
"$id": "doc/spec/process.json",
"title": "Process",
"type": ["object", "null"],
"properties": {
"pid": {
"description": "Process ID of the service",
"type": ["integer"]
},
"ppid": {
"description": "Parent process ID of the service",
"type": ["integer", "null"]
},
"title": {
"type": ["string", "null"],
"maxLength": 1024
},
"argv": {
"description": "Command line arguments used to start this process",
"type": ["array", "null"],
"minItems": 0,
"items": {
"type": "string"
}
}
},
"required": ["pid"]
},
"system": {
"$id": "doc/spec/system.json",
"title": "System",
"type": ["object", "null"],
"properties": {
"architecture": {
"description": "Architecture of the system the agent is running on.",
"type": ["string", "null"],
"maxLength": 1024
},
"hostname": {
"description": "Deprecated. Hostname of the system the agent is running on. Will be ignored if kubernetes information is set.",
"type": ["string", "null"],
"maxLength": 1024
},
"detected_hostname": {
"description": "Hostname of the host the monitored service is running on. It normally contains what the hostname command returns on the host machine. Will be ignored if kubernetes information is set, otherwise should always be set.",
"type": ["string", "null"],
"maxLength": 1024
},
"configured_hostname": {
"description": "Name of the host the monitored service is running on. It should only be set when configured by the user. If empty, will be set to detected_hostname or derived from kubernetes information if provided.",
"type": ["string", "null"],
"maxLength": 1024
},
"platform": {
"description": "Name of the system platform the agent is running on.",
"type": ["string", "null"],
"maxLength": 1024
},
"container": {
"properties": {
"id" : {
"description": "Container ID",
"type": ["string"],
"maxLength": 1024
}
},
"required": ["id"]
},
"kubernetes": {
"properties": {
"namespace": {
"description": "Kubernetes namespace",
"type": ["string", "null"],
"maxLength": 1024
},
"pod":{
"properties": {
"name": {
"description": "Kubernetes pod name",
"type": ["string", "null"],
"maxLength": 1024
},
"uid": {
"description": "Kubernetes pod uid",
"type": ["string", "null"],
"maxLength": 1024
}
}
},
"node":{
"properties": {
"name": {
"description": "Kubernetes node name",
"type": ["string", "null"],
"maxLength": 1024
}
}
}
}
}
}
},
"user": {
"description": "Describes the authenticated User for a request.",
"$id": "docs/spec/user.json",
"title": "User",
"type": ["object", "null"],
"properties": {
"id": {
"description": "Identifier of the logged in user, e.g. the primary key of the user",
"type": ["string", "integer", "null"],
"maxLength": 1024
},
"email": {
"description": "Email of the logged in user",
"type": ["string", "null"],
"maxLength": 1024
},
"username": {
"description": "The username of the logged in user",
"type": ["string", "null"],
"maxLength": 1024
}
}
},
"labels": {
"$id": "doc/spec/tags.json",
"title": "Tags",
"type": ["object", "null"],
"description": "A flat mapping of user-defined tags with string, boolean or number values.",
"patternProperties": {
"^[^.*\"]*$": {
"type": ["string", "boolean", "number", "null"],
"maxLength": 1024
}
},
"additionalProperties": false
}
},
"required": ["service"]
}
` | model/metadata/generated/schema/metadata.go | 0.731922 | 0.529081 | metadata.go | starcoder |
package changes
// Move represents a shuffling of some elements (specified by Offset
// and Count) over to a different spot (specified by Distance, which
// can be negative to indicate a move over to the left).
type Move struct {
Offset, Count, Distance int
}
// Revert reverts the move.
func (m Move) Revert() Change {
return Move{m.Offset + m.Distance, m.Count, -m.Distance}
}
// MergeReplace merges a move against a Replace. The replace always wins
func (m Move) MergeReplace(other Replace) (other1 *Replace, m1 *Splice) {
other.Before = other.Before.Apply(nil, m)
return &other, nil
}
// MergeSplice merges a splice with a move.
func (m Move) MergeSplice(o Splice) (Change, Change) {
x, y := o.MergeMove(m)
return y, x
}
// MergeMove merges a move against another Move
func (m Move) MergeMove(o Move) (ox []Move, mx []Move) {
if m == o {
return nil, nil
}
if m.Distance == 0 || m.Count == 0 || o.Distance == 0 || o.Count == 0 {
return []Move{o}, []Move{m}
}
if m.Offset >= o.Offset+o.Count || o.Offset >= m.Offset+m.Count {
return m.mergeMoveNoOverlap(o)
}
if m.Offset <= o.Offset && m.Offset+m.Count >= o.Offset+o.Count {
return m.mergeMoveContained(o)
}
if m.Offset >= o.Offset && m.Offset+m.Count <= o.Offset+o.Count {
return m.swap(o.mergeMoveContained(m))
}
if m.Offset < o.Offset {
return m.mergeMoveRightOverlap(o)
}
return m.swap(o.mergeMoveRightOverlap(m))
}
func (m Move) mergeMoveNoOverlap(o Move) (ox, mx []Move) {
mdest, odest := m.dest(), o.dest()
if !m.contains(odest) && !o.contains(mdest) {
return m.mergeMoveNoOverlapNoDestMixups(o)
}
if m.contains(odest) && o.contains(mdest) {
return m.mergeMoveNoOverlapMixedDests(o)
}
if o.contains(mdest) {
return m.swap(o.mergeMoveNoOverlap(m))
}
return m.mergeMoveNoOverlapContainedDest(o)
}
func (m Move) mergeMoveNoOverlapContainedDest(o Move) (ox, mx []Move) {
mdest, odest := m.dest(), o.dest()
mdestNew := mdest
if mdest >= odest && mdest <= o.Offset {
mdestNew += o.Count
} else if mdest > o.Offset && mdest <= odest {
mdestNew -= o.Count
}
m1 := m
if o.Offset <= m.Offset {
m1.Offset -= o.Count
}
m1.Count = m.Count + o.Count
if mdestNew <= m1.Offset {
m1.Distance = mdestNew - m1.Offset
} else {
m1.Distance = mdestNew - m1.Offset - m1.Count
}
if o.Offset > m.Offset && o.Offset < mdest {
o.Offset -= m.Count
} else if o.Offset >= mdest && o.Offset < m.Offset {
o.Offset += m.Count
}
odest += m.Distance
if odest <= o.Offset {
o.Distance = odest - o.Offset
} else {
o.Distance = odest - o.Offset - o.Count
}
return []Move{o}, []Move{m1}
}
func (m Move) mergeMoveNoOverlapNoDestMixups(o Move) (ox, mx []Move) {
mdest, odest := m.dest(), o.dest()
o1 := Move{Offset: m.mapPoint(o.Offset), Count: o.Count}
o1 = o1.withDest(m.mapPoint(odest))
if odest == mdest {
o1 = o1.withDest(m.Offset + m.Distance)
}
m1 := Move{Offset: o.mapPoint(m.Offset), Count: m.Count}
m1 = m1.withDest(o.mapPoint(mdest))
return []Move{o1}, []Move{m1}
}
func (m Move) mergeMoveNoOverlapMixedDests(o Move) (ox, mx []Move) {
var oleft, oright Move
mdest, odest := m.dest(), o.dest()
oleft.Count = mdest - o.Offset
oright.Count = o.Count - oleft.Count
oleft.Offset = m.Offset + m.Distance - oleft.Count
oright.Offset = m.Offset + m.Distance + m.Count
oleft.Distance = odest - m.Offset
oright.Distance = odest - m.Offset - m.Count
ox = []Move{oleft, oright}
distance := o.Offset - m.Offset - m.Count
if distance < 0 {
distance = -(m.Offset - o.Offset - o.Count)
}
mx = []Move{{
Offset: o.Offset + o.Distance - (odest - m.Offset),
Count: m.Count + o.Count,
Distance: distance,
}}
return ox, mx
}
func (m Move) mergeMoveRightOverlap(o Move) ([]Move, []Move) {
overlapSize := m.Offset + m.Count - o.Offset
overlapUndo := Move{Offset: o.Offset + o.Distance, Count: overlapSize}
non := Move{Offset: o.Offset + overlapSize, Count: o.Count - overlapSize}
if o.Distance > 0 {
overlapUndo.Distance = -o.Distance
non.Distance = o.Distance
} else {
overlapUndo.Distance = o.Count - overlapSize - o.Distance
non.Distance = o.Distance - overlapSize
}
l, r := m.mergeMoveNoOverlap(non)
return l, append([]Move{overlapUndo}, r...)
}
func (m Move) mergeMoveContained(o Move) ([]Move, []Move) {
odest := o.dest()
ox := o
ox.Offset += m.Distance
if m.Offset <= odest && odest <= m.Offset+m.Count {
return []Move{ox}, []Move{m}
}
if odest == m.dest() {
ox = ox.withDest(m.Offset + m.Distance)
mx := m
mx.Count -= o.Count
if o.Distance < 0 {
mx.Offset += o.Count
}
mx = mx.withDest(o.Offset + o.Count + o.Distance)
return []Move{ox}, []Move{mx}
}
ox = ox.withDest(m.mapPoint(odest))
mx := m
mx.Offset = o.mapPoint(m.Offset)
mx.Count = m.Count - o.Count
mx = mx.withDest(o.mapPoint(m.dest()))
return []Move{ox}, []Move{mx}
}
// MapIndex maps a particular index to the new location of the index
// after the move
func (m Move) MapIndex(idx int) int {
switch {
case idx >= m.Offset+m.Distance && idx < m.Offset:
return idx + m.Count
case idx >= m.Offset && idx < m.Offset+m.Count:
return idx + m.Distance
case idx >= m.Offset+m.Count && idx < m.Offset+m.Count+m.Distance:
return idx - m.Count
}
return idx
}
func (m Move) mapPoint(idx int) int {
switch {
case idx >= m.Offset+m.Distance && idx <= m.Offset:
return idx + m.Count
case idx >= m.Offset+m.Count && idx < m.Offset+m.Count+m.Distance:
return idx - m.Count
}
return idx
}
func (m Move) dest() int {
if m.Distance < 0 {
return m.Offset + m.Distance
}
return m.Offset + m.Distance + m.Count
}
func (m Move) withDest(dest int) Move {
m.Distance = dest - m.Offset - m.Count
if m.Distance < 0 {
m.Distance = dest - m.Offset
}
return m
}
func (m Move) contains(idx int) bool {
return idx > m.Offset && idx < m.Offset+m.Count
}
func (m Move) swap(l, r []Move) ([]Move, []Move) {
return r, l
}
func movesToChange(m []Move) Change {
result := make([]Change, len(m))
for _, mm := range m {
if mm.Count != 0 && mm.Distance != 0 {
result = append(result, mm)
}
}
switch len(result) {
case 0:
return nil
case 1:
return result[0]
}
return ChangeSet(result)
}
// Merge implements the Change.Merge method
func (m Move) Merge(other Change) (otherx, cx Change) {
if other == nil {
return nil, m
}
switch o := other.(type) {
case Replace:
return change(m.MergeReplace(o))
case Splice:
return m.MergeSplice(o)
case Move:
l, r := m.MergeMove(o)
return movesToChange(l), movesToChange(r)
case Custom:
return swap(o.ReverseMerge(m))
}
panic("Unexpected change")
}
// Change returns either nil or the underlying Move as a change.
func (m *Move) Change() Change {
if m == nil {
return nil
}
return *m
}
// Normalize ensures that distance is always positive
func (m Move) Normalize() Move {
if m.Distance < 0 {
return Move{m.Offset + m.Distance, -m.Distance, m.Count}
}
return m
} | changes/move.go | 0.853027 | 0.574037 | move.go | starcoder |
package digits
import (
"fmt"
"strconv"
"strings"
)
const bubblesBySegment = 15
// ParseDigits returns a 3D array of integers which represents the segments of the first circle
// then the different lines of a segment and finally the points which represents the decimals.
func ParseDigits(number string) [][][]int {
fmt.Printf("Parsing the digits\n")
number = strings.Replace(number, ".", "", -1)
// Generate the 3D array
var digits = make([][][]int, 10)
// Iterate all the digits of the given number
for digitIndex := 0; digitIndex < len(number)-1; digitIndex++ {
previous, _ := strconv.Atoi(string(number[digitIndex]))
current, _ := strconv.Atoi(string(number[digitIndex+1]))
segments := digits[previous]
// Create a new segment if no one has been found
if len(segments) == 0 {
segments = append(segments, MakeIntArray(bubblesBySegment))
}
// Set the position of the digit in the given number for an array of 10 numbers
position := digitIndex % bubblesBySegment
// Then generate the segment with the current value and its position
digits[previous] = GenerateSegment(segments, position, current)
}
return digits
}
// GenerateSegment returns a segment where the value has been filled
func GenerateSegment(segments [][]int, position, value int) [][]int {
filled := false
// Iterate the segments to find an available position
for i := range segments {
// If the position is free then fill it with the current digit
if segments[i][position] == -1 {
segments[i][position] = value
filled = true
break
}
}
// If a position has not been found in the existent segments' lines then create one
// and fill it with the current digit
if !filled {
segments = append(segments, MakeIntArray(bubblesBySegment))
segments[len(segments)-1][position] = value
}
return segments
}
// MakeIntArray returns a `size`-elements-array which elements are -1
func MakeIntArray(size int) (array []int) {
array = make([]int, size)
for i := range array {
array[i] = -1
}
return
} | digits/parser.go | 0.743541 | 0.55652 | parser.go | starcoder |
"Space-Efficient Online Computation of Quantile Summaries" (<NAME> 2001)
http://infolab.stanford.edu/~datar/courses/cs361a/papers/quantiles.pdf
This implementation is backed by a skiplist to make inserting elements into the
summary faster. Querying is still O(n).
*/
package gk
// Stream is a quantile summary
type Stream struct {
summary *skiplist
epsilon float64
n int
}
type tuple struct {
v float64
g int
delta int
}
// New returns a new stream with accuracy epsilon (0 <= epsilon <= 1)
func New(epsilon float64) *Stream {
return &Stream{
epsilon: epsilon,
summary: newSkiplist(),
}
}
// Insert inserts an item into the quantile summary
func (s *Stream) Insert(v float64) {
value := tuple{v, 1, 0}
elt := s.summary.Insert(value)
s.n++
if elt.prev[0] != s.summary.head && elt.next[0] != nil {
elt.value.delta = int(2 * s.epsilon * float64(s.n))
}
if s.n%int(1.0/float64(2.0*s.epsilon)) == 0 {
s.compress()
}
}
func (s *Stream) compress() {
var missing int
epsN := int(2 * s.epsilon * float64(s.n))
for elt := s.summary.head.next[0]; elt != nil && elt.next[0] != nil; {
next := elt.next[0]
t := elt.value
nt := &next.value
// value merging
if t.v == nt.v {
missing += nt.g
nt.delta += missing
nt.g = t.g
s.summary.Remove(elt)
} else if t.g+nt.g+missing+nt.delta < epsN {
nt.g += t.g + missing
missing = 0
s.summary.Remove(elt)
} else {
nt.g += missing
missing = 0
}
elt = next
}
}
// Query returns an epsilon estimate of the element at quantile 'q' (0 <= q <= 1)
func (s *Stream) Query(q float64) float64 {
// convert quantile to rank
r := int(q*float64(s.n) + 0.5)
var rmin int
epsN := int(s.epsilon * float64(s.n))
for elt := s.summary.head.next[0]; elt != nil; elt = elt.next[0] {
t := elt.value
rmin += t.g
n := elt.next[0]
if n == nil {
return t.v
}
if r+epsN < rmin+n.value.g+n.value.delta {
if r+epsN < rmin+n.value.g {
return t.v
}
return n.value.v
}
}
panic("not reached")
} | gk.go | 0.858659 | 0.512632 | gk.go | starcoder |
package knn
import (
"errors"
"fmt"
"github.com/gonum/matrix"
"github.com/sjwhitworth/golearn/base"
"github.com/sjwhitworth/golearn/kdtree"
"github.com/sjwhitworth/golearn/metrics/pairwise"
"github.com/sjwhitworth/golearn/utilities"
"gonum.org/v1/gonum/mat"
)
// A KNNClassifier consists of a data matrix, associated labels in the same order as the matrix, searching algorithm, and a distance function.
// The accepted distance functions at this time are 'euclidean', 'manhattan', and 'cosine'.
// The accepted searching algorithm here are 'linear', and 'kdtree'.
// Optimisations only occur when things are identically group into identical
// AttributeGroups, which don't include the class variable, in the same order.
// Using weighted KNN when Weighted set to be true (default: false).
type KNNClassifier struct {
base.BaseEstimator
TrainingData base.FixedDataGrid
DistanceFunc string
Algorithm string
NearestNeighbours int
AllowOptimisations bool
Weighted bool
}
// NewKnnClassifier returns a new classifier
func NewKnnClassifier(distfunc, algorithm string, neighbours int) *KNNClassifier {
KNN := KNNClassifier{}
KNN.DistanceFunc = distfunc
KNN.Algorithm = algorithm
KNN.NearestNeighbours = neighbours
KNN.Weighted = false
KNN.AllowOptimisations = true
return &KNN
}
// Fit stores the training data for later
func (KNN *KNNClassifier) Fit(trainingData base.FixedDataGrid) error {
KNN.TrainingData = trainingData
return nil
}
func (KNN *KNNClassifier) canUseOptimisations(what base.FixedDataGrid) bool {
// Check that the two have exactly the same layout
if !base.CheckStrictlyCompatible(what, KNN.TrainingData) {
return false
}
// Check that the two are DenseInstances
whatd, ok1 := what.(*base.DenseInstances)
_, ok2 := KNN.TrainingData.(*base.DenseInstances)
if !ok1 || !ok2 {
return false
}
// Check that no Class Attributes are mixed in with the data
classAttrs := whatd.AllClassAttributes()
normalAttrs := base.NonClassAttributes(whatd)
// Retrieve all the AGs
ags := whatd.AllAttributeGroups()
classAttrGroups := make([]base.AttributeGroup, 0)
for agName := range ags {
ag := ags[agName]
attrs := ag.Attributes()
matched := false
for _, a := range attrs {
for _, c := range classAttrs {
if a.Equals(c) {
matched = true
}
}
}
if matched {
classAttrGroups = append(classAttrGroups, ag)
}
}
for _, cag := range classAttrGroups {
attrs := cag.Attributes()
common := base.AttributeIntersect(normalAttrs, attrs)
if len(common) != 0 {
return false
}
}
// Check that all of the Attributes are numeric
for _, a := range normalAttrs {
if _, ok := a.(*base.FloatAttribute); !ok {
return false
}
}
// If that's fine, return true
return true
}
// Predict returns a classification for the vector, based on a vector input, using the KNN algorithm.
func (KNN *KNNClassifier) Predict(what base.FixedDataGrid) (base.FixedDataGrid, error) {
// Check what distance function we are using
var distanceFunc pairwise.PairwiseDistanceFunc
switch KNN.DistanceFunc {
case "euclidean":
distanceFunc = pairwise.NewEuclidean()
case "manhattan":
distanceFunc = pairwise.NewManhattan()
case "cosine":
distanceFunc = pairwise.NewCosine()
default:
return nil, errors.New("unsupported distance function")
}
// Check what searching algorith, we are using
if KNN.Algorithm != "linear" && KNN.Algorithm != "kdtree" {
return nil, errors.New("unsupported searching algorithm")
}
// Check Compatibility
allAttrs := base.CheckCompatible(what, KNN.TrainingData)
if allAttrs == nil {
// Don't have the same Attributes
return nil, errors.New("attributes not compatible")
}
// Use optimised version if permitted
if KNN.Algorithm == "linear" && KNN.AllowOptimisations {
if KNN.DistanceFunc == "euclidean" {
if KNN.canUseOptimisations(what) {
return KNN.optimisedEuclideanPredict(what.(*base.DenseInstances)), nil
}
}
}
fmt.Println("Optimisations are switched off")
// Remove the Attributes which aren't numeric
allNumericAttrs := make([]base.Attribute, 0)
for _, a := range allAttrs {
if fAttr, ok := a.(*base.FloatAttribute); ok {
allNumericAttrs = append(allNumericAttrs, fAttr)
}
}
// If every Attribute is a FloatAttribute, then we remove the last one
// because that is the Attribute we are trying to predict.
if len(allNumericAttrs) == len(allAttrs) {
allNumericAttrs = allNumericAttrs[:len(allNumericAttrs)-1]
}
// Generate return vector
ret := base.GeneratePredictionVector(what)
// Resolve Attribute specifications for both
whatAttrSpecs := base.ResolveAttributes(what, allNumericAttrs)
trainAttrSpecs := base.ResolveAttributes(KNN.TrainingData, allNumericAttrs)
// Reserve storage for most the most similar items
distances := make(map[int]float64)
// Reserve storage for voting map
maxmapInt := make(map[string]int)
maxmapFloat := make(map[string]float64)
// Reserve storage for row computations
trainRowBuf := make([]float64, len(allNumericAttrs))
predRowBuf := make([]float64, len(allNumericAttrs))
_, maxRow := what.Size()
curRow := 0
// build kdtree if algorithm is 'kdtree'
kd := kdtree.New()
srcRowNoMap := make([]int, 0)
if KNN.Algorithm == "kdtree" {
buildData := make([][]float64, 0)
KNN.TrainingData.MapOverRows(trainAttrSpecs, func(trainRow [][]byte, srcRowNo int) (bool, error) {
oneData := make([]float64, len(allNumericAttrs))
// Read the float values out
for i, _ := range allNumericAttrs {
oneData[i] = base.UnpackBytesToFloat(trainRow[i])
}
srcRowNoMap = append(srcRowNoMap, srcRowNo)
buildData = append(buildData, oneData)
return true, nil
})
err := kd.Build(buildData)
if err != nil {
return nil, err
}
}
// Iterate over all outer rows
what.MapOverRows(whatAttrSpecs, func(predRow [][]byte, predRowNo int) (bool, error) {
if (curRow%1) == 0 && curRow > 0 {
fmt.Printf("KNN: %.2f %% done\r", float64(curRow)*100.0/float64(maxRow))
}
curRow++
// Read the float values out
for i, _ := range allNumericAttrs {
predRowBuf[i] = base.UnpackBytesToFloat(predRow[i])
}
predMat := utilities.FloatsToMatrix(predRowBuf)
switch KNN.Algorithm {
case "linear":
// Find the closest match in the training data
KNN.TrainingData.MapOverRows(trainAttrSpecs, func(trainRow [][]byte, srcRowNo int) (bool, error) {
// Read the float values out
for i, _ := range allNumericAttrs {
trainRowBuf[i] = base.UnpackBytesToFloat(trainRow[i])
}
// Compute the distance
trainMat := utilities.FloatsToMatrix(trainRowBuf)
distances[srcRowNo] = distanceFunc.Distance(predMat, trainMat)
return true, nil
})
sorted := utilities.SortIntMap(distances)
values := sorted[:KNN.NearestNeighbours]
length := make([]float64, KNN.NearestNeighbours)
for k, v := range values {
length[k] = distances[v]
}
var maxClass string
if KNN.Weighted {
maxClass = KNN.weightedVote(maxmapFloat, values, length)
} else {
maxClass = KNN.vote(maxmapInt, values)
}
base.SetClass(ret, predRowNo, maxClass)
case "kdtree":
// search kdtree
values, length, err := kd.Search(KNN.NearestNeighbours, distanceFunc, predRowBuf)
if err != nil {
return false, err
}
// map values to srcRowNo
for k, v := range values {
values[k] = srcRowNoMap[v]
}
var maxClass string
if KNN.Weighted {
maxClass = KNN.weightedVote(maxmapFloat, values, length)
} else {
maxClass = KNN.vote(maxmapInt, values)
}
base.SetClass(ret, predRowNo, maxClass)
}
return true, nil
})
return ret, nil
}
func (KNN *KNNClassifier) String() string {
return fmt.Sprintf("KNNClassifier(%s, %d)", KNN.DistanceFunc, KNN.NearestNeighbours)
}
func (KNN *KNNClassifier) vote(maxmap map[string]int, values []int) string {
// Reset maxMap
for a := range maxmap {
maxmap[a] = 0
}
// Refresh maxMap
for _, elem := range values {
label := base.GetClass(KNN.TrainingData, elem)
if _, ok := maxmap[label]; ok {
maxmap[label]++
} else {
maxmap[label] = 1
}
}
// Sort the maxMap
var maxClass string
maxVal := -1
for a := range maxmap {
if maxmap[a] > maxVal {
maxVal = maxmap[a]
maxClass = a
}
}
return maxClass
}
func (KNN *KNNClassifier) weightedVote(maxmap map[string]float64, values []int, length []float64) string {
// Reset maxMap
for a := range maxmap {
maxmap[a] = 0
}
// Refresh maxMap
for k, elem := range values {
label := base.GetClass(KNN.TrainingData, elem)
if _, ok := maxmap[label]; ok {
maxmap[label] += (1 / length[k])
} else {
maxmap[label] = (1 / length[k])
}
}
// Sort the maxMap
var maxClass string
maxVal := -1.0
for a := range maxmap {
if maxmap[a] > maxVal {
maxVal = maxmap[a]
maxClass = a
}
}
return maxClass
}
// GetMetadata returns required serialization information for this classifier
func (KNN *KNNClassifier) GetMetadata() base.ClassifierMetadataV1 {
classifierParams := make(map[string]interface{})
classifierParams["distance_func"] = KNN.DistanceFunc
classifierParams["algorithm"] = KNN.Algorithm
classifierParams["neighbours"] = KNN.NearestNeighbours
classifierParams["weighted"] = KNN.Weighted
classifierParams["allow_optimizations"] = KNN.AllowOptimisations
return base.ClassifierMetadataV1{
FormatVersion: 1,
ClassifierName: "KNN",
ClassifierVersion: "1.0",
ClassifierMetadata: classifierParams,
}
}
// Save outputs a given KNN classifier.
func (KNN *KNNClassifier) Save(filePath string) error {
writer, err := base.CreateSerializedClassifierStub(filePath, KNN.GetMetadata())
if err != nil {
return err
}
fmt.Printf("writer: %v", writer)
return KNN.SaveWithPrefix(writer, "")
}
// SaveWithPrefix outputs KNN as part of another file.
func (KNN *KNNClassifier) SaveWithPrefix(writer *base.ClassifierSerializer, prefix string) error {
err := writer.WriteInstancesForKey(writer.Prefix(prefix, "TrainingInstances"), KNN.TrainingData, true)
if err != nil {
return err
}
err = writer.Close()
return err
}
// Load reloads a given KNN classifier when it's the only thing in the output file.
func (KNN *KNNClassifier) Load(filePath string) error {
reader, err := base.ReadSerializedClassifierStub(filePath)
if err != nil {
return err
}
return KNN.LoadWithPrefix(reader, "")
}
// LoadWithPrefix reloads a given KNN classifier when it's part of another file.
func (KNN *KNNClassifier) LoadWithPrefix(reader *base.ClassifierDeserializer, prefix string) error {
clsMetadata, err := reader.ReadMetadataAtPrefix(prefix)
if err != nil {
return err
}
if clsMetadata.ClassifierName != "KNN" {
return fmt.Errorf("This file doesn't contain a KNN classifier")
}
if clsMetadata.ClassifierVersion != "1.0" {
return fmt.Errorf("Can't understand this file format")
}
metadata := clsMetadata.ClassifierMetadata
KNN.DistanceFunc = metadata["distance_func"].(string)
KNN.Algorithm = metadata["algorithm"].(string)
//KNN.NearestNeighbours = metadata["neighbours"].(int)
KNN.Weighted = metadata["weighted"].(bool)
KNN.AllowOptimisations = metadata["allow_optimizations"].(bool)
// 101 on why JSON is a bad serialization format
floatNeighbours := metadata["neighbours"].(float64)
KNN.NearestNeighbours = int(floatNeighbours)
KNN.TrainingData, err = reader.GetInstancesForKey(reader.Prefix(prefix, "TrainingInstances"))
return err
}
// ReloadKNNClassifier reloads a KNNClassifier when it's the only thing in an output file.
func ReloadKNNClassifier(filePath string) (*KNNClassifier, error) {
stub := &KNNClassifier{}
err := stub.Load(filePath)
if err != nil {
return nil, err
}
return stub, nil
}
// A KNNRegressor consists of a data matrix, associated result variables in the same order as the matrix, and a name.
type KNNRegressor struct {
base.BaseEstimator
Values []float64
DistanceFunc string
}
// NewKnnRegressor mints a new classifier.
func NewKnnRegressor(distfunc string) *KNNRegressor {
KNN := KNNRegressor{}
KNN.DistanceFunc = distfunc
return &KNN
}
func (KNN *KNNRegressor) Fit(values []float64, numbers []float64, rows int, cols int) {
if rows != len(values) {
panic(matrix.ErrShape)
}
KNN.Data = mat.NewDense(rows, cols, numbers)
KNN.Values = values
}
func (KNN *KNNRegressor) Predict(vector *mat.Dense, K int) float64 {
// Get the number of rows
rows, _ := KNN.Data.Dims()
rownumbers := make(map[int]float64)
labels := make([]float64, 0)
// Check what distance function we are using
var distanceFunc pairwise.PairwiseDistanceFunc
switch KNN.DistanceFunc {
case "euclidean":
distanceFunc = pairwise.NewEuclidean()
case "manhattan":
distanceFunc = pairwise.NewManhattan()
default:
panic("unsupported distance function")
}
for i := 0; i < rows; i++ {
row := KNN.Data.RowView(i)
distance := distanceFunc.Distance(utilities.VectorToMatrix(row), vector)
rownumbers[i] = distance
}
sorted := utilities.SortIntMap(rownumbers)
values := sorted[:K]
var sum float64
for _, elem := range values {
value := KNN.Values[elem]
labels = append(labels, value)
sum += value
}
average := sum / float64(K)
return average
} | knn/knn.go | 0.714628 | 0.408011 | knn.go | starcoder |
package criteria
// Expression is used to express conditions for selecting an entity
type Expression interface {
// Accept calls the visitor callback of the appropriate type
Accept(visitor ExpressionVisitor) interface{}
// SetAnnotation puts the given annotation on the expression
SetAnnotation(key string, value interface{})
// Annotation reads back values set with SetAnnotation
Annotation(key string) interface{}
// Returns the parent expression or nil
Parent() Expression
setParent(parent Expression)
}
// IterateParents calls f for every member of the parent chain
// Stops iterating if f returns false
func IterateParents(exp Expression, f func(Expression) bool) {
if exp != nil {
exp = exp.Parent()
}
for exp != nil {
if !f(exp) {
return
}
exp = exp.Parent()
}
}
// BinaryExpression represents expressions with 2 children
// This could be generalized to n-ary expressions, but that is not necessary right now
type BinaryExpression interface {
Expression
Left() Expression
Right() Expression
}
// ExpressionVisitor is an implementation of the visitor pattern for expressions
type ExpressionVisitor interface {
Field(t *FieldExpression) interface{}
And(a *AndExpression) interface{}
Or(a *OrExpression) interface{}
Equals(e *EqualsExpression) interface{}
Parameter(v *ParameterExpression) interface{}
Literal(c *LiteralExpression) interface{}
}
type expression struct {
parent Expression
annotations map[string]interface{}
}
func (exp *expression) SetAnnotation(key string, value interface{}) {
if exp.annotations == nil {
exp.annotations = map[string]interface{}{}
}
exp.annotations[key] = value
}
func (exp *expression) Annotation(key string) interface{} {
return exp.annotations[key]
}
func (exp *expression) Parent() Expression {
result := exp.parent
return result
}
func (exp *expression) setParent(parent Expression) {
exp.parent = parent
}
// access a Field
// FieldExpression represents access to a field of the tested object
type FieldExpression struct {
expression
FieldName string
}
// Accept implements ExpressionVisitor
func (t *FieldExpression) Accept(visitor ExpressionVisitor) interface{} {
return visitor.Field(t)
}
// Field constructs a FieldExpression
func Field(id string) Expression {
return &FieldExpression{expression{}, id}
}
// Parameter (free variable of the expression)
// A ParameterExpression represents a parameter to be passed upon evaluation of the expression
type ParameterExpression struct {
expression
}
// Accept implements ExpressionVisitor
func (t *ParameterExpression) Accept(visitor ExpressionVisitor) interface{} {
return visitor.Parameter(t)
}
// Parameter constructs a value expression.
func Parameter() Expression {
return &ParameterExpression{}
}
// literal value
// A LiteralExpression represents a single constant value in the expression, think "5" or "asdf"
// the type of literals is not restricted at this level, but compilers or interpreters will have limitations on what they handle
type LiteralExpression struct {
expression
Value interface{}
}
// Accept implements ExpressionVisitor
func (t *LiteralExpression) Accept(visitor ExpressionVisitor) interface{} {
return visitor.Literal(t)
}
// Literal constructs a literal expression
func Literal(value interface{}) Expression {
return &LiteralExpression{expression{}, value}
}
// binaryExpression is an "abstract" type for binary expressions.
type binaryExpression struct {
expression
left Expression
right Expression
}
// Left implements BinaryExpression
func (exp *binaryExpression) Left() Expression {
return exp.left
}
// Right implements BinaryExpression
func (exp *binaryExpression) Right() Expression {
return exp.right
}
// make sure the children have the correct parent
func reparent(parent BinaryExpression) Expression {
parent.Left().setParent(parent)
parent.Right().setParent(parent)
return parent
}
// And
// AndExpression represents the conjunction operation of two terms
type AndExpression struct {
binaryExpression
}
// Accept implements ExpressionVisitor
func (t *AndExpression) Accept(visitor ExpressionVisitor) interface{} {
return visitor.And(t)
}
// And constructs an AndExpression
func And(left Expression, right Expression) Expression {
return reparent(&AndExpression{binaryExpression{expression{}, left, right}})
}
// Or
// OrExpression represents the disjunction operation of two terms
type OrExpression struct {
binaryExpression
}
// Accept implements ExpressionVisitor
func (t *OrExpression) Accept(visitor ExpressionVisitor) interface{} {
return visitor.Or(t)
}
// Or constructs an OrExpression
func Or(left Expression, right Expression) Expression {
return reparent(&OrExpression{binaryExpression{expression{}, left, right}})
}
// ==
// EqualsExpression represents the equality operator
type EqualsExpression struct {
binaryExpression
}
// Accept implements ExpressionVisitor
func (t *EqualsExpression) Accept(visitor ExpressionVisitor) interface{} {
return visitor.Equals(t)
}
// Equals constructs an EqualsExpression
func Equals(left Expression, right Expression) Expression {
return reparent(&EqualsExpression{binaryExpression{expression{}, left, right}})
} | criteria/criteria.go | 0.906855 | 0.453322 | criteria.go | starcoder |
package transform
import (
"errors"
"github.com/dolthub/go-mysql-server/sql"
)
// Expr applies a transformation function to the given expression
// tree from the bottom up. Each callback [f] returns a TreeIdentity
// that is aggregated into a final output indicating whether the
// expression tree was changed.
func Expr(e sql.Expression, f ExprFunc) (sql.Expression, TreeIdentity, error) {
children := e.Children()
if len(children) == 0 {
return f(e)
}
var (
newChildren []sql.Expression
err error
)
for i := 0; i < len(children); i++ {
c := children[i]
c, same, err := Expr(c, f)
if err != nil {
return nil, SameTree, err
}
if !same {
if newChildren == nil {
newChildren = make([]sql.Expression, len(children))
copy(newChildren, children)
}
newChildren[i] = c
}
}
sameC := SameTree
if len(newChildren) > 0 {
sameC = NewTree
e, err = e.WithChildren(newChildren...)
if err != nil {
return nil, SameTree, err
}
}
e, sameN, err := f(e)
if err != nil {
return nil, SameTree, err
}
return e, sameC && sameN, nil
}
// InspectExpr traverses the given expression tree from the bottom up, breaking if
// stop = true. Returns a bool indicating whether traversal was interrupted.
func InspectExpr(node sql.Expression, f func(sql.Expression) bool) bool {
stop := errors.New("stop")
_, _, err := Expr(node, func(e sql.Expression) (sql.Expression, TreeIdentity, error) {
ok := f(e)
if ok {
return nil, SameTree, stop
}
return e, SameTree, nil
})
return errors.Is(err, stop)
}
// Clone duplicates an existing sql.Expression, returning new nodes with the
// same structure and internal values. It can be useful when dealing with
// stateful expression nodes where an evaluation needs to create multiple
// independent histories of the internal state of the expression nodes.
func Clone(expr sql.Expression) (sql.Expression, error) {
expr, _, err := Expr(expr, func(e sql.Expression) (sql.Expression, TreeIdentity, error) {
return e, NewTree, nil
})
return expr, err
}
// ExprWithNode applies a transformation function to the given expression from the bottom up.
func ExprWithNode(n sql.Node, e sql.Expression, f ExprWithNodeFunc) (sql.Expression, TreeIdentity, error) {
children := e.Children()
if len(children) == 0 {
return f(n, e)
}
var (
newChildren []sql.Expression
err error
)
for i := 0; i < len(children); i++ {
c := children[i]
c, sameC, err := ExprWithNode(n, c, f)
if err != nil {
return nil, SameTree, err
}
if !sameC {
if newChildren == nil {
newChildren = make([]sql.Expression, len(children))
copy(newChildren, children)
}
newChildren[i] = c
}
}
sameC := SameTree
if len(newChildren) > 0 {
sameC = NewTree
e, err = e.WithChildren(newChildren...)
if err != nil {
return nil, SameTree, err
}
}
e, sameN, err := f(n, e)
if err != nil {
return nil, SameTree, err
}
return e, sameC && sameN, nil
}
// ExpressionToColumn converts the expression to the form that should be used in a Schema. Expressions that have Name()
// and Table() methods will use these; otherwise, String() and "" are used, respectively. The type and nullability are
// taken from the expression directly.
func ExpressionToColumn(e sql.Expression) *sql.Column {
var name string
if n, ok := e.(sql.Nameable); ok {
name = n.Name()
} else {
name = e.String()
}
var table string
if t, ok := e.(sql.Tableable); ok {
table = t.Table()
}
return &sql.Column{
Name: name,
Type: e.Type(),
Nullable: e.IsNullable(),
Source: table,
}
} | sql/transform/expr.go | 0.763043 | 0.482734 | expr.go | starcoder |
package bigquery
import (
"errors"
"fmt"
"golang.org/x/net/context"
)
// A pageFetcher returns a page of rows, starting from the row specified by token.
type pageFetcher interface {
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
}
// Iterator provides access to the result of a BigQuery lookup.
// Next must be called before the first call to Get.
type Iterator struct {
service service
err error // contains any error encountered during calls to Next.
// Once Next has been called at least once, schema has the result schema, rs contains the current
// page of data, and nextToken contains the token for fetching the next
// page (empty if there is no more data to be fetched).
schema Schema
rs [][]Value
nextToken string
// The remaining fields contain enough information to fetch the current
// page of data, and determine which row of data from this page is the
// current row.
pf pageFetcher
pageToken string
// The offset from the start of the current page to the current row.
// For a new iterator, this is -1.
offset int64
}
func newIterator(s service, pf pageFetcher) *Iterator {
return &Iterator{
service: s,
pf: pf,
offset: -1,
}
}
// fetchPage loads the current page of data from the server.
// The contents of rs and nextToken are replaced with the loaded data.
// If there is an error while fetching, the error is stored in it.err and false is returned.
func (it *Iterator) fetchPage(ctx context.Context) bool {
var res *readDataResult
var err error
for {
res, err = it.pf.fetch(ctx, it.service, it.pageToken)
if err != errIncompleteJob {
break
}
}
if err != nil {
it.err = err
return false
}
it.schema = res.schema
it.rs = res.rows
it.nextToken = res.pageToken
return true
}
// getEnoughData loads new data into rs until offset no longer points beyond the end of rs.
func (it *Iterator) getEnoughData(ctx context.Context) bool {
if len(it.rs) == 0 {
// Either we have not yet fetched any pages, or we are iterating over an empty dataset.
// In the former case, we should fetch a page of data, so that we can depend on the resultant nextToken.
// In the latter case, it is harmless to fetch a page of data.
if !it.fetchPage(ctx) {
return false
}
}
for it.offset >= int64(len(it.rs)) {
// If offset is still outside the bounds of the loaded data,
// but there are no more pages of data to fetch, then we have
// failed to satisfy the offset.
if it.nextToken == "" {
return false
}
// offset cannot be satisfied with the currently loaded data,
// so we fetch the next page. We no longer need the existing
// cached rows, so we remove them and update the offset to be
// relative to the new page that we're about to fetch.
// NOTE: we can't just set offset to 0, because after
// marshalling/unmarshalling, it's possible for the offset to
// point arbitrarily far beyond the end of rs.
// This can happen if the server returns a different size
// results page before and after marshalling.
it.offset -= int64(len(it.rs))
it.pageToken = it.nextToken
if !it.fetchPage(ctx) {
return false
}
}
return true
}
// Next advances the Iterator to the next row, making that row available
// via the Get method.
// Next must be called before the first call to Get or Schema, and blocks until data is available.
// Next returns false when there are no more rows available, either because
// the end of the output was reached, or because there was an error (consult
// the Err method to determine which).
func (it *Iterator) Next(ctx context.Context) bool {
if it.err != nil {
return false
}
// Advance offset to where we want it to be for the next call to Get.
it.offset++
// offset may now point beyond the end of rs, so we fetch data
// until offset is within its bounds again. If there are no more
// results available, offset will be left pointing beyond the bounds
// of rs.
// At the end of this method, rs will contain at least one element
// unless the dataset we are iterating over is empty.
return it.getEnoughData(ctx)
}
// Err returns the last error encountered by Next, or nil for no error.
func (it *Iterator) Err() error {
return it.err
}
// verifyState checks that the iterator is pointing to a valid row.
func (it *Iterator) verifyState() error {
if it.err != nil {
return fmt.Errorf("called on iterator in error state: %v", it.err)
}
// If Next has been called, then offset should always index into a
// valid row in rs, as long as there is still data available.
if it.offset >= int64(len(it.rs)) || it.offset < 0 {
return errors.New("called without preceding successful call to Next")
}
return nil
}
// Get loads the current row into dst, which must implement ValueLoader.
func (it *Iterator) Get(dst interface{}) error {
if err := it.verifyState(); err != nil {
return fmt.Errorf("Get %v", err)
}
if dst, ok := dst.(ValueLoader); ok {
return dst.Load(it.rs[it.offset])
}
return errors.New("Get called with unsupported argument type")
}
// Schema returns the schema of the result rows.
func (it *Iterator) Schema() (Schema, error) {
if err := it.verifyState(); err != nil {
return nil, fmt.Errorf("Schema %v", err)
}
return it.schema, nil
} | vendor/cloud.google.com/go/bigquery/iterator.go | 0.701815 | 0.429848 | iterator.go | starcoder |
package cmd
const addFlagsStr = `-n
--dry-run
Don’t actually add the file(s), just show if they exist and/or will be ignored.
-v
--verbose
Be verbose.
-f
--force
Allow adding otherwise ignored files.
-i
--interactive
Add modified contents in the working tree interactively to the index. Optional path arguments may be supplied to limit operation to a subset of the working tree. See “Interactive mode” for details.
-p
--patch
Interactively choose hunks of patch between the index and the work tree and add them to the index. This gives the user a chance to review the difference before adding modified contents to the index.
This effectively runs add --interactive, but bypasses the initial command menu and directly jumps to the patch subcommand. See “Interactive mode” for details.
-e
--edit
Open the diff vs. the index in an editor and let the user edit it. After the editor was closed, adjust the hunk headers and apply the patch to the index.
The intent of this option is to pick and choose lines of the patch to apply, or even to modify the contents of lines to be staged. This can be quicker and more flexible than using the interactive hunk selector. However, it is easy to confuse oneself and create a patch that does not apply to the index. See EDITING PATCHES below.
-u
--update
Update the index just where it already has an entry matching <pathspec>. This removes as well as modifies index entries to match the working tree, but adds no new files.
If no <pathspec> is given when -u option is used, all tracked files in the entire working tree are updated (old versions of Git used to limit the update to the current directory and its subdirectories).
-A
--all
--no-ignore-removal
Update the index not only where the working tree has a file matching <pathspec> but also where the index already has an entry. This adds, modifies, and removes index entries to match the working tree.
If no <pathspec> is given when -A option is used, all files in the entire working tree are updated (old versions of Git used to limit the update to the current directory and its subdirectories).
--no-all
--ignore-removal
Update the index by adding new files that are unknown to the index and files modified in the working tree, but ignore files that have been removed from the working tree. This option is a no-op when no <pathspec> is used.
This option is primarily to help users who are used to older versions of Git, whose "git add <pathspec>…" was a synonym for "git add --no-all <pathspec>…", i.e. ignored removed files.
-N
--intent-to-add
Record only the fact that the path will be added later. An entry for the path is placed in the index with no content. This is useful for, among other things, showing the unstaged content of such files with git diff and committing them with git commit -a.
--refresh
Don’t add the file(s), but only refresh their stat() information in the index.
--ignore-errors
If some files could not be added because of errors indexing them, do not abort the operation, but continue adding the others. The command shall still exit with non-zero status. The configuration variable add.ignoreErrors can be set to true to make this the default behaviour.
--ignore-missing
This option can only be used together with --dry-run. By using this option the user can check if any of the given files would be ignored, no matter if they are already present in the work tree or not.
--no-warn-embedded-repo
By default, git add will warn when adding an embedded repository to the index without using git submodule add to create an entry in .gitmodules. This option will suppress the warning (e.g., if you are manually performing operations on submodules).
--renormalize
Apply the "clean" process freshly to all tracked files to forcibly add them again to the index. This is useful after changing core.autocrlf configuration or the text attribute in order to correct files added with wrong CRLF/LF line endings. This option implies -u.
--chmod=(+|-)x
Override the executable bit of the added files. The executable bit is only changed in the index, the files on disk are left unchanged.
--pathspec-from-file=<file>
Pathspec is passed in <file> instead of commandline args. If <file> is exactly - then standard input is used. Pathspec elements are separated by LF or CR/LF. Pathspec elements can be quoted as explained for the configuration variable core.quotePath (see git-config[1]). See also --pathspec-file-nul and global --literal-pathspecs.
--pathspec-file-nul
Only meaningful with --pathspec-from-file. Pathspec elements are separated with NUL character and all other characters are taken literally (including newlines and quotes).
--
This option can be used to separate command-line options from the list of files, (useful when filenames might be mistaken for command-line options).`
const diffFlagsStr = `-p
-u
--patch
Generate patch (see section on generating patches). This is the default.
-s
--no-patch
Suppress diff output. Useful for commands like git show that show the patch by default, or to cancel the effect of --patch.
-U<n>
--unified=<n>
Generate diffs with <n> lines of context instead of the usual three. Implies --patch. Implies -p.
--output=<file>
Output to a specific file instead of stdout.
--output-indicator-new=<char>
--output-indicator-old=<char>
--output-indicator-context=<char>
Specify the character used to indicate new, old or context lines in the generated patch. Normally they are +, - and ' ' respectively.
--raw
Generate the diff in raw format.
--patch-with-raw
Synonym for -p --raw.
--indent-heuristic
Enable the heuristic that shifts diff hunk boundaries to make patches easier to read. This is the default.
--no-indent-heuristic
Disable the indent heuristic.
--minimal
Spend extra time to make sure the smallest possible diff is produced.
--patience
Generate a diff using the "patience diff" algorithm.
--histogram
Generate a diff using the "histogram diff" algorithm.
--anchored=<text>
Generate a diff using the "anchored diff" algorithm.
This option may be specified more than once.
If a line exists in both the source and destination, exists only once, and starts with this text, this algorithm attempts to prevent it from appearing as a deletion or addition in the output. It uses the "patience diff" algorithm internally.
--diff-algorithm={patience|minimal|histogram|myers}
Choose a diff algorithm. The variants are as follows:
default, myers
The basic greedy diff algorithm. Currently, this is the default.
minimal
Spend extra time to make sure the smallest possible diff is produced.
patience
Use "patience diff" algorithm when generating patches.
histogram
This algorithm extends the patience algorithm to "support low-occurrence common elements".
For instance, if you configured the diff.algorithm variable to a non-default value and want to use the default one, then you have to use --diff-algorithm=default option.
--stat[=<width>[,<name-width>[,<count>]]]
Generate a diffstat. By default, as much space as necessary will be used for the filename part, and the rest for the graph part. Maximum width defaults to terminal width, or 80 columns if not connected to a terminal, and can be overridden by <width>. The width of the filename part can be limited by giving another width <name-width> after a comma. The width of the graph part can be limited by using --stat-graph-width=<width> (affects all commands generating a stat graph) or by setting diff.statGraphWidth=<width> (does not affect git format-patch). By giving a third parameter <count>, you can limit the output to the first <count> lines, followed by ... if there are more.
These parameters can also be set individually with --stat-width=<width>, --stat-name-width=<name-width> and --stat-count=<count>.
--compact-summary
Output a condensed summary of extended header information such as file creations or deletions ("new" or "gone", optionally "+l" if it’s a symlink) and mode changes ("+x" or "-x" for adding or removing executable bit respectively) in diffstat. The information is put between the filename part and the graph part. Implies --stat.
--numstat
Similar to --stat, but shows number of added and deleted lines in decimal notation and pathname without abbreviation, to make it more machine friendly. For binary files, outputs two - instead of saying 0 0.
--shortstat
Output only the last line of the --stat format containing total number of modified files, as well as number of added and deleted lines.
-X[<param1,param2,…>]
--dirstat[=<param1,param2,…>]
Output the distribution of relative amount of changes for each sub-directory. The behavior of --dirstat can be customized by passing it a comma separated list of parameters. The defaults are controlled by the diff.dirstat configuration variable (see git-config[1]). The following parameters are available:
changes
Compute the dirstat numbers by counting the lines that have been removed from the source, or added to the destination. This ignores the amount of pure code movements within a file. In other words, rearranging lines in a file is not counted as much as other changes. This is the default behavior when no parameter is given.
lines
Compute the dirstat numbers by doing the regular line-based diff analysis, and summing the removed/added line counts. (For binary files, count 64-byte chunks instead, since binary files have no natural concept of lines). This is a more expensive --dirstat behavior than the changes behavior, but it does count rearranged lines within a file as much as other changes. The resulting output is consistent with what you get from the other --*stat options.
files
Compute the dirstat numbers by counting the number of files changed. Each changed file counts equally in the dirstat analysis. This is the computationally cheapest --dirstat behavior, since it does not have to look at the file contents at all.
cumulative
Count changes in a child directory for the parent directory as well. Note that when using cumulative, the sum of the percentages reported may exceed 100%. The default (non-cumulative) behavior can be specified with the noncumulative parameter.
<limit>
An integer parameter specifies a cut-off percent (3% by default). Directories contributing less than this percentage of the changes are not shown in the output.
Example: The following will count changed files, while ignoring directories with less than 10% of the total amount of changed files, and accumulating child directory counts in the parent directories: --dirstat=files,10,cumulative.
--cumulative
Synonym for --dirstat=cumulative
--dirstat-by-file[=<param1,param2>…]
Synonym for --dirstat=files,param1,param2…
--summary
Output a condensed summary of extended header information such as creations, renames and mode changes.
--patch-with-stat
Synonym for -p --stat.
-z
When --raw, --numstat, --name-only or --name-status has been given, do not munge pathnames and use NULs as output field terminators.
Without this option, pathnames with "unusual" characters are quoted as explained for the configuration variable core.quotePath (see git-config[1]).
--name-only
Show only names of changed files.
--name-status
Show only names and status of changed files. See the description of the --diff-filter option on what the status letters mean.
--submodule[=<format>]
Specify how differences in submodules are shown. When specifying --submodule=short the short format is used. This format just shows the names of the commits at the beginning and end of the range. When --submodule or --submodule=log is specified, the log format is used. This format lists the commits in the range like git-submodule[1] summary does. When --submodule=diff is specified, the diff format is used. This format shows an inline diff of the changes in the submodule contents between the commit range. Defaults to diff.submodule or the short format if the config option is unset.
--color[=<when>]
Show colored diff. --color (i.e. without =<when>) is the same as --color=always. <when> can be one of always, never, or auto. It can be changed by the color.ui and color.diff configuration settings.
--no-color
Turn off colored diff. This can be used to override configuration settings. It is the same as --color=never.
--color-moved[=<mode>]
Moved lines of code are colored differently. It can be changed by the diff.colorMoved configuration setting. The <mode> defaults to no if the option is not given and to zebra if the option with no mode is given. The mode must be one of:
no
Moved lines are not highlighted.
default
Is a synonym for zebra. This may change to a more sensible mode in the future.
plain
Any line that is added in one location and was removed in another location will be colored with color.diff.newMoved. Similarly color.diff.oldMoved will be used for removed lines that are added somewhere else in the diff. This mode picks up any moved line, but it is not very useful in a review to determine if a block of code was moved without permutation.
blocks
Blocks of moved text of at least 20 alphanumeric characters are detected greedily. The detected blocks are painted using either the color.diff.{old,new}Moved color. Adjacent blocks cannot be told apart.
zebra
Blocks of moved text are detected as in blocks mode. The blocks are painted using either the color.diff.{old,new}Moved color or color.diff.{old,new}MovedAlternative. The change between the two colors indicates that a new block was detected.
dimmed-zebra
Similar to zebra, but additional dimming of uninteresting parts of moved code is performed. The bordering lines of two adjacent blocks are considered interesting, the rest is uninteresting. dimmed_zebra is a deprecated synonym.
--no-color-moved
Turn off move detection. This can be used to override configuration settings. It is the same as --color-moved=no.
--color-moved-ws=<modes>
This configures how whitespace is ignored when performing the move detection for --color-moved. It can be set by the diff.colorMovedWS configuration setting. These modes can be given as a comma separated list:
no
Do not ignore whitespace when performing move detection.
ignore-space-at-eol
Ignore changes in whitespace at EOL.
ignore-space-change
Ignore changes in amount of whitespace. This ignores whitespace at line end, and considers all other sequences of one or more whitespace characters to be equivalent.
ignore-all-space
Ignore whitespace when comparing lines. This ignores differences even if one line has whitespace where the other line has none.
allow-indentation-change
Initially ignore any whitespace in the move detection, then group the moved code blocks only into a block if the change in whitespace is the same per line. This is incompatible with the other modes.
--no-color-moved-ws
Do not ignore whitespace when performing move detection. This can be used to override configuration settings. It is the same as --color-moved-ws=no.
--word-diff[=<mode>]
Show a word diff, using the <mode> to delimit changed words. By default, words are delimited by whitespace; see --word-diff-regex below. The <mode> defaults to plain, and must be one of:
color
Highlight changed words using only colors. Implies --color.
plain
Show words as [-removed-] and {+added+}. Makes no attempts to escape the delimiters if they appear in the input, so the output may be ambiguous.
porcelain
Use a special line-based format intended for script consumption. Added/removed/unchanged runs are printed in the usual unified diff format.
none
Disable word diff again.
Note that despite the name of the first mode, color is used to highlight the changed parts in all modes if enabled.
--word-diff-regex=<regex>
Use <regex> to decide what a word is, instead of considering runs of non-whitespace to be a word. Also implies --word-diff unless it was already enabled.
Every non-overlapping match of the <regex> is considered a word. Anything between these matches is considered whitespace and ignored(!) for the purposes of finding differences. You may want to append |[^[:space:]] to your regular expression to make sure that it matches all non-whitespace characters. A match that contains a newline is silently truncated(!) at the newline.
For example, --word-diff-regex=. will treat each character as a word and, correspondingly, show differences character by character.
The regex can also be set via a diff driver or configuration option, see gitattributes[5] or git-config[1]. Giving it explicitly overrides any diff driver or configuration setting. Diff drivers override configuration settings.
--color-words[=<regex>]
Equivalent to --word-diff=color plus (if a regex was specified) --word-diff-regex=<regex>.
--no-renames
Turn off rename detection, even when the configuration file gives the default to do so.
--[no-]rename-empty
Whether to use empty blobs as rename source.
--check
Warn if changes introduce conflict markers or whitespace errors. What are considered whitespace errors is controlled by core.whitespace configuration. By default, trailing whitespaces (including lines that consist solely of whitespaces) and a space character that is immediately followed by a tab character inside the initial indent of the line are considered whitespace errors. Exits with non-zero status if problems are found. Not compatible with --exit-code.
--ws-error-highlight=<kind>
Highlight whitespace errors in the context, old or new lines of the diff. Multiple values are separated by comma, none resets previous values, default reset the list to new and all is a shorthand for old,new,context. When this option is not given, and the configuration variable diff.wsErrorHighlight is not set, only whitespace errors in new lines are highlighted. The whitespace errors are colored with color.diff.whitespace.
--full-index
Instead of the first handful of characters, show the full pre- and post-image blob object names on the "index" line when generating patch format output.
--binary
In addition to --full-index, output a binary diff that can be applied with git-apply. Implies --patch.
--abbrev[=<n>]
Instead of showing the full 40-byte hexadecimal object name in diff-raw format output and diff-tree header lines, show only a partial prefix. This is independent of the --full-index option above, which controls the diff-patch output format. Non default number of digits can be specified with --abbrev=<n>.
-B[<n>][/<m>]
--break-rewrites[=[<n>][/<m>]]
Break complete rewrite changes into pairs of delete and create. This serves two purposes:
It affects the way a change that amounts to a total rewrite of a file not as a series of deletion and insertion mixed together with a very few lines that happen to match textually as the context, but as a single deletion of everything old followed by a single insertion of everything new, and the number m controls this aspect of the -B option (defaults to 60%). -B/70% specifies that less than 30% of the original should remain in the result for Git to consider it a total rewrite (i.e. otherwise the resulting patch will be a series of deletion and insertion mixed together with context lines).
When used with -M, a totally-rewritten file is also considered as the source of a rename (usually -M only considers a file that disappeared as the source of a rename), and the number n controls this aspect of the -B option (defaults to 50%). -B20% specifies that a change with addition and deletion compared to 20% or more of the file’s size are eligible for being picked up as a possible source of a rename to another file.
-M[<n>]
--find-renames[=<n>]
Detect renames. If n is specified, it is a threshold on the similarity index (i.e. amount of addition/deletions compared to the file’s size). For example, -M90% means Git should consider a delete/add pair to be a rename if more than 90% of the file hasn’t changed. Without a % sign, the number is to be read as a fraction, with a decimal point before it. I.e., -M5 becomes 0.5, and is thus the same as -M50%. Similarly, -M05 is the same as -M5%. To limit detection to exact renames, use -M100%. The default similarity index is 50%.
-C[<n>]
--find-copies[=<n>]
Detect copies as well as renames. See also --find-copies-harder. If n is specified, it has the same meaning as for -M<n>.
--find-copies-harder
For performance reasons, by default, -C option finds copies only if the original file of the copy was modified in the same changeset. This flag makes the command inspect unmodified files as candidates for the source of copy. This is a very expensive operation for large projects, so use it with caution. Giving more than one -C option has the same effect.
-D
--irreversible-delete
Omit the preimage for deletes, i.e. print only the header but not the diff between the preimage and /dev/null. The resulting patch is not meant to be applied with patch or git apply; this is solely for people who want to just concentrate on reviewing the text after the change. In addition, the output obviously lacks enough information to apply such a patch in reverse, even manually, hence the name of the option.
When used together with -B, omit also the preimage in the deletion part of a delete/create pair.
-l<num>
The -M and -C options require O(n^2) processing time where n is the number of potential rename/copy targets. This option prevents rename/copy detection from running if the number of rename/copy targets exceeds the specified number.
--diff-filter=[(A|C|D|M|R|T|U|X|B)…[*]]
Select only files that are Added (A), Copied (C), Deleted (D), Modified (M), Renamed (R), have their type (i.e. regular file, symlink, submodule, …) changed (T), are Unmerged (U), are Unknown (X), or have had their pairing Broken (B). Any combination of the filter characters (including none) can be used. When * (All-or-none) is added to the combination, all paths are selected if there is any file that matches other criteria in the comparison; if there is no file that matches other criteria, nothing is selected.
Also, these upper-case letters can be downcased to exclude. E.g. --diff-filter=ad excludes added and deleted paths.
Note that not all diffs can feature all types. For instance, diffs from the index to the working tree can never have Added entries (because the set of paths included in the diff is limited by what is in the index). Similarly, copied and renamed entries cannot appear if detection for those types is disabled.
-S<string>
Look for differences that change the number of occurrences of the specified string (i.e. addition/deletion) in a file. Intended for the scripter’s use.
It is useful when you’re looking for an exact block of code (like a struct), and want to know the history of that block since it first came into being: use the feature iteratively to feed the interesting block in the preimage back into -S, and keep going until you get the very first version of the block.
Binary files are searched as well.
-G<regex>
Look for differences whose patch text contains added/removed lines that match <regex>.
To illustrate the difference between -S<regex> --pickaxe-regex and -G<regex>, consider a commit with the following diff in the same file:
+ return frotz(nitfol, two->ptr, 1, 0);
...
- hit = frotz(nitfol, mf2.ptr, 1, 0);
While git log -G"frotz\(nitfol" will show this commit, git log -S"frotz\(nitfol" --pickaxe-regex will not (because the number of occurrences of that string did not change).
Unless --text is supplied patches of binary files without a textconv filter will be ignored.
See the pickaxe entry in gitdiffcore[7] for more information.
--find-object=<object-id>
Look for differences that change the number of occurrences of the specified object. Similar to -S, just the argument is different in that it doesn’t search for a specific string but for a specific object id.
The object can be a blob or a submodule commit. It implies the -t option in git-log to also find trees.
--pickaxe-all
When -S or -G finds a change, show all the changes in that changeset, not just the files that contain the change in <string>.
--pickaxe-regex
Treat the <string> given to -S as an extended POSIX regular expression to match.
-O<orderfile>
Control the order in which files appear in the output. This overrides the diff.orderFile configuration variable (see git-config[1]). To cancel diff.orderFile, use -O/dev/null.
The output order is determined by the order of glob patterns in <orderfile>. All files with pathnames that match the first pattern are output first, all files with pathnames that match the second pattern (but not the first) are output next, and so on. All files with pathnames that do not match any pattern are output last, as if there was an implicit match-all pattern at the end of the file. If multiple pathnames have the same rank (they match the same pattern but no earlier patterns), their output order relative to each other is the normal order.
<orderfile> is parsed as follows:
Blank lines are ignored, so they can be used as separators for readability.
Lines starting with a hash ("#") are ignored, so they can be used for comments. Add a backslash ("\") to the beginning of the pattern if it starts with a hash.
Each other line contains a single pattern.
Patterns have the same syntax and semantics as patterns used for fnmatch(3) without the FNM_PATHNAME flag, except a pathname also matches a pattern if removing any number of the final pathname components matches the pattern. For example, the pattern "foo*bar" matches "fooasdfbar" and "foo/bar/baz/asdf" but not "foobarx".
-R
Swap two inputs; that is, show differences from index or on-disk file to tree contents.
--relative[=<path>]
--no-relative
When run from a subdirectory of the project, it can be told to exclude changes outside the directory and show pathnames relative to it with this option. When you are not in a subdirectory (e.g. in a bare repository), you can name which subdirectory to make the output relative to by giving a <path> as an argument. --no-relative can be used to countermand both diff.relative config option and previous --relative.
-a
--text
Treat all files as text.
--ignore-cr-at-eol
Ignore carriage-return at the end of line when doing a comparison.
--ignore-space-at-eol
Ignore changes in whitespace at EOL.
-b
--ignore-space-change
Ignore changes in amount of whitespace. This ignores whitespace at line end, and considers all other sequences of one or more whitespace characters to be equivalent.
-w
--ignore-all-space
Ignore whitespace when comparing lines. This ignores differences even if one line has whitespace where the other line has none.
--ignore-blank-lines
Ignore changes whose lines are all blank.
--inter-hunk-context=<lines>
Show the context between diff hunks, up to the specified number of lines, thereby fusing hunks that are close to each other. Defaults to diff.interHunkContext or 0 if the config option is unset.
-W
--function-context
Show whole surrounding functions of changes.
--exit-code
Make the program exit with codes similar to diff(1). That is, it exits with 1 if there were differences and 0 means no differences.
--quiet
Disable all output of the program. Implies --exit-code.
--ext-diff
Allow an external diff helper to be executed. If you set an external diff driver with gitattributes[5], you need to use this option with git-log[1] and friends.
--no-ext-diff
Disallow external diff drivers.
--textconv
--no-textconv
Allow (or disallow) external text conversion filters to be run when comparing binary files. See gitattributes[5] for details. Because textconv filters are typically a one-way conversion, the resulting diff is suitable for human consumption, but cannot be applied. For this reason, textconv filters are enabled by default only for git-diff[1] and git-log[1], but not for git-format-patch[1] or diff plumbing commands.
--ignore-submodules[=<when>]
Ignore changes to submodules in the diff generation. <when> can be either "none", "untracked", "dirty" or "all", which is the default. Using "none" will consider the submodule modified when it either contains untracked or modified files or its HEAD differs from the commit recorded in the superproject and can be used to override any settings of the ignore option in git-config[1] or gitmodules[5]. When "untracked" is used submodules are not considered dirty when they only contain untracked content (but they are still scanned for modified content). Using "dirty" ignores all changes to the work tree of submodules, only changes to the commits stored in the superproject are shown (this was the behavior until 1.7.0). Using "all" hides all changes to submodules.
--src-prefix=<prefix>
Show the given source prefix instead of "a/".
--dst-prefix=<prefix>
Show the given destination prefix instead of "b/".
--no-prefix
Do not show any source or destination prefix.
--line-prefix=<prefix>
Prepend an additional prefix to every line of output.
--ita-invisible-in-index
By default entries added by "git add -N" appear as an existing empty file in "git diff" and a new file in "git diff --cached". This option makes the entry appear as a new file in "git diff" and non-existent in "git diff --cached". This option could be reverted with --ita-visible-in-index. Both options are experimental and could be removed in future.
For more detailed explanation on these common options, see also gitdiffcore[7].
-1 --base
-2 --ours
-3 --theirs
Compare the working tree with the "base" version (stage #1), "our branch" (stage #2) or "their branch" (stage #3). The index contains these stages only for unmerged entries i.e. while resolving conflicts. See git-read-tree[1] section "3-Way Merge" for detailed information.
-0
Omit diff output for unmerged entries and just show "Unmerged". Can be used only when comparing the working tree with the index.
<path>…
The <paths> parameters, when given, are used to limit the diff to the named paths (you can give directory names and get diff for all files under them).`
const statusFlagsStr = `-s
--short
Give the output in the short-format.
-b
--branch
Show the branch and tracking info even in short-format.
--show-stash
Show the number of entries currently stashed away.
--porcelain[=<version>]
Give the output in an easy-to-parse format for scripts. This is similar to the short output, but will remain stable across Git versions and regardless of user configuration. See below for details.
The version parameter is used to specify the format version. This is optional and defaults to the original version v1 format.
--long
Give the output in the long-format. This is the default.
-v
--verbose
In addition to the names of files that have been changed, also show the textual changes that are staged to be committed (i.e., like the output of git diff --cached). If -v is specified twice, then also show the changes in the working tree that have not yet been staged (i.e., like the output of git diff).
-u[<mode>]
--untracked-files[=<mode>]
Show untracked files.
The mode parameter is used to specify the handling of untracked files. It is optional: it defaults to all, and if specified, it must be stuck to the option (e.g. -uno, but not -u no).
The possible options are:
no - Show no untracked files.
normal - Shows untracked files and directories.
all - Also shows individual files in untracked directories.
When -u option is not used, untracked files and directories are shown (i.e. the same as specifying normal), to help you avoid forgetting to add newly created files. Because it takes extra work to find untracked files in the filesystem, this mode may take some time in a large working tree. Consider enabling untracked cache and split index if supported (see git update-index --untracked-cache and git update-index --split-index), Otherwise you can use no to have git status return more quickly without showing untracked files.
The default can be changed using the status.showUntrackedFiles configuration variable documented in git-config[1].
--ignore-submodules[=<when>]
Ignore changes to submodules when looking for changes. <when> can be either "none", "untracked", "dirty" or "all", which is the default. Using "none" will consider the submodule modified when it either contains untracked or modified files or its HEAD differs from the commit recorded in the superproject and can be used to override any settings of the ignore option in git-config[1] or gitmodules[5]. When "untracked" is used submodules are not considered dirty when they only contain untracked content (but they are still scanned for modified content). Using "dirty" ignores all changes to the work tree of submodules, only changes to the commits stored in the superproject are shown (this was the behavior before 1.7.0). Using "all" hides all changes to submodules (and suppresses the output of submodule summaries when the config option status.submoduleSummary is set).
--ignored[=<mode>]
Show ignored files as well.
The mode parameter is used to specify the handling of ignored files. It is optional: it defaults to traditional.
The possible options are:
traditional - Shows ignored files and directories, unless --untracked-files=all is specified, in which case individual files in ignored directories are displayed.
no - Show no ignored files.
matching - Shows ignored files and directories matching an ignore pattern.
When matching mode is specified, paths that explicitly match an ignored pattern are shown. If a directory matches an ignore pattern, then it is shown, but not paths contained in the ignored directory. If a directory does not match an ignore pattern, but all contents are ignored, then the directory is not shown, but all contents are shown.
-z
Terminate entries with NUL, instead of LF. This implies the --porcelain=v1 output format if no other format is given.
--column[=<options>]
--no-column
Display untracked files in columns. See configuration variable column.status for option syntax.--column and --no-column without options are equivalent to always and never respectively.
--ahead-behind
--no-ahead-behind
Display or do not display detailed ahead/behind counts for the branch relative to its upstream branch. Defaults to true.
--renames
--no-renames
Turn on/off rename detection regardless of user configuration. See also git-diff[1] --no-renames.
--find-renames[=<n>]
Turn on rename detection, optionally setting the similarity threshold. See also git-diff[1] --find-renames.`
const commitFlagsStr = `-a
--all
Tell the command to automatically stage files that have been modified and deleted, but new files you have not told Git about are not affected.
-p
--patch
Use the interactive patch selection interface to chose which changes to commit. See git-add[1] for details.
-C <commit>
--reuse-message=<commit>
Take an existing commit object, and reuse the log message and the authorship information (including the timestamp) when creating the commit.
-c <commit>
--reedit-message=<commit>
Like -C, but with -c the editor is invoked, so that the user can further edit the commit message.
--fixup=<commit>
Construct a commit message for use with rebase --autosquash. The commit message will be the subject line from the specified commit with a prefix of "fixup! ". See git-rebase[1] for details.
--squash=<commit>
Construct a commit message for use with rebase --autosquash. The commit message subject line is taken from the specified commit with a prefix of "squash! ". Can be used with additional commit message options (-m/-c/-C/-F). See git-rebase[1] for details.
--reset-author
When used with -C/-c/--amend options, or when committing after a conflicting cherry-pick, declare that the authorship of the resulting commit now belongs to the committer. This also renews the author timestamp.
--short
When doing a dry-run, give the output in the short-format. See git-status[1] for details. Implies --dry-run.
--branch
Show the branch and tracking info even in short-format.
--porcelain
When doing a dry-run, give the output in a porcelain-ready format. See git-status[1] for details. Implies --dry-run.
--long
When doing a dry-run, give the output in the long-format. Implies --dry-run.
-z
--null
When showing short or porcelain status output, print the filename verbatim and terminate the entries with NUL, instead of LF. If no format is given, implies the --porcelain output format. Without the -z option, filenames with "unusual" characters are quoted as explained for the configuration variable core.quotePath (see git-config[1]).
-F <file>
--file=<file>
Take the commit message from the given file. Use - to read the message from the standard input.
--author=<author>
Override the commit author. Specify an explicit author using the standard A U Thor <<EMAIL>> format. Otherwise <author> is assumed to be a pattern and is used to search for an existing commit by that author (i.e. rev-list --all -i --author=<author>); the commit author is then copied from the first such commit found.
--date=<date>
Override the author date used in the commit.
-m <msg>
--message=<msg>
Use the given <msg> as the commit message. If multiple -m options are given, their values are concatenated as separate paragraphs.
The -m option is mutually exclusive with -c, -C, and -F.
-t <file>
--template=<file>
When editing the commit message, start the editor with the contents in the given file. The commit.template configuration variable is often used to give this option implicitly to the command. This mechanism can be used by projects that want to guide participants with some hints on what to write in the message in what order. If the user exits the editor without editing the message, the commit is aborted. This has no effect when a message is given by other means, e.g. with the -m or -F options.
-s
--signoff
Add Signed-off-by line by the committer at the end of the commit log message. The meaning of a signoff depends on the project, but it typically certifies that committer has the rights to submit this work under the same license and agrees to a Developer Certificate of Origin (see http://developercertificate.org/ for more information).
-n
--no-verify
This option bypasses the pre-commit and commit-msg hooks. See also githooks[5].
--allow-empty
Usually recording a commit that has the exact same tree as its sole parent commit is a mistake, and the command prevents you from making such a commit. This option bypasses the safety, and is primarily for use by foreign SCM interface scripts.
--allow-empty-message
Like --allow-empty this command is primarily for use by foreign SCM interface scripts. It allows you to create a commit with an empty commit message without using plumbing commands like git-commit-tree[1].
--cleanup=<mode>
This option determines how the supplied commit message should be cleaned up before committing. The <mode> can be strip, whitespace, verbatim, scissors or default.
strip
Strip leading and trailing empty lines, trailing whitespace, commentary and collapse consecutive empty lines.
whitespace
Same as strip except #commentary is not removed.
verbatim
Do not change the message at all.
scissors
Same as whitespace except that everything from (and including) the line found below is truncated, if the message is to be edited. "#" can be customized with core.commentChar.
# ------------------------ >8 ------------------------
default
Same as strip if the message is to be edited. Otherwise whitespace.
The default can be changed by the commit.cleanup configuration variable (see git-config[1]).
-e
--edit
The message taken from file with -F, command line with -m, and from commit object with -C are usually used as the commit log message unmodified. This option lets you further edit the message taken from these sources.
--no-edit
Use the selected commit message without launching an editor. For example, git commit --amend --no-edit amends a commit without changing its commit message.
--amend
Replace the tip of the current branch by creating a new commit. The recorded tree is prepared as usual (including the effect of the -i and -o options and explicit pathspec), and the message from the original commit is used as the starting point, instead of an empty message, when no other message is specified from the command line via options such as -m, -F, -c, etc. The new commit has the same parents and author as the current one (the --reset-author option can countermand this).
It is a rough equivalent for:
$ git reset --soft HEAD^
$ ... do something else to come up with the right tree ...
$ git commit -c ORIG_HEAD
but can be used to amend a merge commit.
You should understand the implications of rewriting history if you amend a commit that has already been published. (See the "RECOVERING FROM UPSTREAM REBASE" section in git-rebase[1].)
--no-post-rewrite
Bypass the post-rewrite hook.
-i
--include
Before making a commit out of staged contents so far, stage the contents of paths given on the command line as well. This is usually not what you want unless you are concluding a conflicted merge.
-o
--only
Make a commit by taking the updated working tree contents of the paths specified on the command line, disregarding any contents that have been staged for other paths. This is the default mode of operation of git commit if any paths are given on the command line, in which case this option can be omitted. If this option is specified together with --amend, then no paths need to be specified, which can be used to amend the last commit without committing changes that have already been staged. If used together with --allow-empty paths are also not required, and an empty commit will be created.
--pathspec-from-file=<file>
Pathspec is passed in <file> instead of commandline args. If <file> is exactly - then standard input is used. Pathspec elements are separated by LF or CR/LF. Pathspec elements can be quoted as explained for the configuration variable core.quotePath (see git-config[1]). See also --pathspec-file-nul and global --literal-pathspecs.
--pathspec-file-nul
Only meaningful with --pathspec-from-file. Pathspec elements are separated with NUL character and all other characters are taken literally (including newlines and quotes).
-u[<mode>]
--untracked-files[=<mode>]
Show untracked files.
The mode parameter is optional (defaults to all), and is used to specify the handling of untracked files; when -u is not used, the default is normal, i.e. show untracked files and directories.
The possible options are:
no - Show no untracked files
normal - Shows untracked files and directories
all - Also shows individual files in untracked directories.
The default can be changed using the status.showUntrackedFiles configuration variable documented in git-config[1].
-v
--verbose
Show unified diff between the HEAD commit and what would be committed at the bottom of the commit message template to help the user describe the commit by reminding what changes the commit has. Note that this diff output doesn’t have its lines prefixed with #. This diff will not be a part of the commit message. See the commit.verbose configuration variable in git-config[1].
If specified twice, show in addition the unified diff between what would be committed and the worktree files, i.e. the unstaged changes to tracked files.
-q
--quiet
Suppress commit summary message.
--dry-run
Do not create a commit, but show a list of paths that are to be committed, paths with local changes that will be left uncommitted and paths that are untracked.
--status
Include the output of git-status[1] in the commit message template when using an editor to prepare the commit message. Defaults to on, but can be used to override configuration variable commit.status.
--no-status
Do not include the output of git-status[1] in the commit message template when using an editor to prepare the default commit message.
-S[<keyid>]
--gpg-sign[=<keyid>]
--no-gpg-sign
GPG-sign commits. The keyid argument is optional and defaults to the committer identity; if specified, it must be stuck to the option without a space. --no-gpg-sign is useful to countermand both commit.gpgSign configuration variable, and earlier --gpg-sign.
--
Do not interpret any more arguments as options.
<pathspec>…
When pathspec is given on the command line, commit the contents of the files that match the pathspec without recording the changes already added to the index. The contents of these files are also staged for the next commit on top of what have been staged before.
`
const branchFlagsStr = `-d
--delete
Delete a branch. The branch must be fully merged in its upstream branch, or in HEAD if no upstream was set with --track or --set-upstream-to.
-D
Shortcut for --delete --force.
--create-reflog
Create the branch’s reflog. This activates recording of all changes made to the branch ref, enabling use of date based sha1 expressions such as "<branchname>@{yesterday}". Note that in non-bare repositories, reflogs are usually enabled by default by the core.logAllRefUpdates config option. The negated form --no-create-reflog only overrides an earlier --create-reflog, but currently does not negate the setting of core.logAllRefUpdates.
-f
--force
Reset <branchname> to <startpoint>, even if <branchname> exists already. Without -f, git branch refuses to change an existing branch. In combination with -d (or --delete), allow deleting the branch irrespective of its merged status. In combination with -m (or --move), allow renaming the branch even if the new branch name already exists, the same applies for -c (or --copy).
-m
--move
Move/rename a branch and the corresponding reflog.
-M
Shortcut for --move --force.
-c
--copy
Copy a branch and the corresponding reflog.
-C
Shortcut for --copy --force.
--color[=<when>]
Color branches to highlight current, local, and remote-tracking branches. The value must be always (the default), never, or auto.
--no-color
Turn off branch colors, even when the configuration file gives the default to color output. Same as --color=never.
-i
--ignore-case
Sorting and filtering branches are case insensitive.
--column[=<options>]
--no-column
Display branch listing in columns. See configuration variable column.branch for option syntax.--column and --no-column without options are equivalent to always and never respectively.
This option is only applicable in non-verbose mode.
-r
--remotes
List or delete (if used with -d) the remote-tracking branches. Combine with --list to match the optional pattern(s).
-a
--all
List both remote-tracking branches and local branches. Combine with --list to match optional pattern(s).
-l
--list
List branches. With optional <pattern>..., e.g. git branch --list 'maint-*', list only the branches that match the pattern(s).
--show-current
Print the name of the current branch. In detached HEAD state, nothing is printed.
-v
-vv
--verbose
When in list mode, show sha1 and commit subject line for each head, along with relationship to upstream branch (if any). If given twice, print the path of the linked worktree (if any) and the name of the upstream branch, as well (see also git remote show <remote>). Note that the current worktree’s HEAD will not have its path printed (it will always be your current directory).
-q
--quiet
Be more quiet when creating or deleting a branch, suppressing non-error messages.
--abbrev=<length>
Alter the sha1’s minimum display length in the output listing. The default value is 7 and can be overridden by the core.abbrev config option.
--no-abbrev
Display the full sha1s in the output listing rather than abbreviating them.
-t
--track
When creating a new branch, set up branch.<name>.remote and branch.<name>.merge configuration entries to mark the start-point branch as "upstream" from the new branch. This configuration will tell git to show the relationship between the two branches in git status and git branch -v. Furthermore, it directs git pull without arguments to pull from the upstream when the new branch is checked out.
This behavior is the default when the start point is a remote-tracking branch. Set the branch.autoSetupMerge configuration variable to false if you want git switch, git checkout and git branch to always behave as if --no-track were given. Set it to always if you want this behavior when the start-point is either a local or remote-tracking branch.
--no-track
Do not set up "upstream" configuration, even if the branch.autoSetupMerge configuration variable is true.
--set-upstream
As this option had confusing syntax, it is no longer supported. Please use --track or --set-upstream-to instead.
-u <upstream>
--set-upstream-to=<upstream>
Set up <branchname>'s tracking information so <upstream> is considered <branchname>'s upstream branch. If no <branchname> is specified, then it defaults to the current branch.
--unset-upstream
Remove the upstream information for <branchname>. If no branch is specified it defaults to the current branch.
--edit-description
Open an editor and edit the text to explain what the branch is for, to be used by various other commands (e.g. format-patch, request-pull, and merge (if enabled)). Multi-line explanations may be used.
--contains [<commit>]
Only list branches which contain the specified commit (HEAD if not specified). Implies --list.
--no-contains [<commit>]
Only list branches which don’t contain the specified commit (HEAD if not specified). Implies --list.
--merged [<commit>]
Only list branches whose tips are reachable from the specified commit (HEAD if not specified). Implies --list, incompatible with --no-merged.
--no-merged [<commit>]
Only list branches whose tips are not reachable from the specified commit (HEAD if not specified). Implies --list, incompatible with --merged.
<branchname>
The name of the branch to create or delete. The new branch name must pass all checks defined by git-check-ref-format[1]. Some of these checks may restrict the characters allowed in a branch name.
<start-point>
The new branch head will point to this commit. It may be given as a branch name, a commit-id, or a tag. If this option is omitted, the current HEAD will be used instead.
<oldbranch>
The name of an existing branch to rename.
<newbranch>
The new name for an existing branch. The same restrictions as for <branchname> apply.
--sort=<key>
Sort based on the key given. Prefix - to sort in descending order of the value. You may use the --sort=<key> option multiple times, in which case the last key becomes the primary key. The keys supported are the same as those in git for-each-ref. Sort order defaults to the value configured for the branch.sort variable if exists, or to sorting based on the full refname (including refs/... prefix). This lists detached HEAD (if present) first, then local branches and finally remote-tracking branches. See git-config[1].
--points-at <object>
Only list branches of the given object.
--format <format>
A string that interpolates %(fieldname) from a branch ref being shown and the object it points at. The format is the same as that of git-for-each-ref[1].`
const tagFlagsStr = `-a
--annotate
Make an unsigned, annotated tag object
-s
--sign
Make a GPG-signed tag, using the default e-mail address’s key. The default behavior of tag GPG-signing is controlled by tag.gpgSign configuration variable if it exists, or disabled otherwise. See git-config[1].
--no-sign
Override tag.gpgSign configuration variable that is set to force each and every tag to be signed.
-u <keyid>
--local-user=<keyid>
Make a GPG-signed tag, using the given key.
-f
--force
Replace an existing tag with the given name (instead of failing)
-d
--delete
Delete existing tags with the given names.
-v
--verify
Verify the GPG signature of the given tag names.
-n<num>
<num> specifies how many lines from the annotation, if any, are printed when using -l. Implies --list.
The default is not to print any annotation lines. If no number is given to -n, only the first line is printed. If the tag is not annotated, the commit message is displayed instead.
-l
--list
List tags. With optional <pattern>..., e.g. git tag --list 'v-*', list only the tags that match the pattern(s).
Running "git tag" without arguments also lists all tags. The pattern is a shell wildcard (i.e., matched using fnmatch(3)). Multiple patterns may be given; if any of them matches, the tag is shown.
This option is implicitly supplied if any other list-like option such as --contains is provided. See the documentation for each of those options for details.
--sort=<key>
Sort based on the key given. Prefix - to sort in descending order of the value. You may use the --sort=<key> option multiple times, in which case the last key becomes the primary key. Also supports "version:refname" or "v:refname" (tag names are treated as versions). The "version:refname" sort order can also be affected by the "versionsort.suffix" configuration variable. The keys supported are the same as those in git for-each-ref. Sort order defaults to the value configured for the tag.sort variable if it exists, or lexicographic order otherwise. See git-config[1].
--color[=<when>]
Respect any colors specified in the --format option. The <when> field must be one of always, never, or auto (if <when> is absent, behave as if always was given).
-i
--ignore-case
Sorting and filtering tags are case insensitive.
--column[=<options>]
--no-column
Display tag listing in columns. See configuration variable column.tag for option syntax.--column and --no-column without options are equivalent to always and never respectively.
This option is only applicable when listing tags without annotation lines.
--contains [<commit>]
Only list tags which contain the specified commit (HEAD if not specified). Implies --list.
--no-contains [<commit>]
Only list tags which don’t contain the specified commit (HEAD if not specified). Implies --list.
--merged [<commit>]
Only list tags whose commits are reachable from the specified commit (HEAD if not specified), incompatible with --no-merged.
--no-merged [<commit>]
Only list tags whose commits are not reachable from the specified commit (HEAD if not specified), incompatible with --merged.
--points-at <object>
Only list tags of the given object (HEAD if not specified). Implies --list.
-m <msg>
--message=<msg>
Use the given tag message (instead of prompting). If multiple -m options are given, their values are concatenated as separate paragraphs. Implies -a if none of -a, -s, or -u <keyid> is given.
-F <file>
--file=<file>
Take the tag message from the given file. Use - to read the message from the standard input. Implies -a if none of -a, -s, or -u <keyid> is given.
-e
--edit
The message taken from file with -F and command line with -m are usually used as the tag message unmodified. This option lets you further edit the message taken from these sources.
--cleanup=<mode>
This option sets how the tag message is cleaned up. The <mode> can be one of verbatim, whitespace and strip. The strip mode is default. The verbatim mode does not change message at all, whitespace removes just leading/trailing whitespace lines and strip removes both whitespace and commentary.
--create-reflog
Create a reflog for the tag. To globally enable reflogs for tags, see core.logAllRefUpdates in git-config[1]. The negated form --no-create-reflog only overrides an earlier --create-reflog, but currently does not negate the setting of core.logAllRefUpdates.
--format=<format>
A string that interpolates %(fieldname) from a tag ref being shown and the object it points at. The format is the same as that of git-for-each-ref[1]. When unspecified, defaults to %(refname:strip=2).
<tagname>
The name of the tag to create, delete, or describe. The new tag name must pass all checks defined by git-check-ref-format[1]. Some of these checks may restrict the characters allowed in a tag name.
<commit>
<object>
The object that the new tag will refer to, usually a commit. Defaults to HEAD.`
const checkoutFlagsStr = `-q
--quiet
Quiet, suppress feedback messages.
--progress
--no-progress
Progress status is reported on the standard error stream by default when it is attached to a terminal, unless --quiet is specified. This flag enables progress reporting even if not attached to a terminal, regardless of --quiet.
-f
--force
When switching branches, proceed even if the index or the working tree differs from HEAD. This is used to throw away local changes.
When checking out paths from the index, do not fail upon unmerged entries; instead, unmerged entries are ignored.
--ours
--theirs
When checking out paths from the index, check out stage #2 (ours) or #3 (theirs) for unmerged paths.
Note that during git rebase and git pull --rebase, ours and theirs may appear swapped; --ours gives the version from the branch the changes are rebased onto, while --theirs gives the version from the branch that holds your work that is being rebased.
This is because rebase is used in a workflow that treats the history at the remote as the shared canonical one, and treats the work done on the branch you are rebasing as the third-party work to be integrated, and you are temporarily assuming the role of the keeper of the canonical history during the rebase. As the keeper of the canonical history, you need to view the history from the remote as ours (i.e. "our shared canonical history"), while what you did on your side branch as theirs (i.e. "one contributor’s work on top of it").
-b <new_branch>
Create a new branch named <new_branch> and start it at <start_point>; see git-branch[1] for details.
-B <new_branch>
Creates the branch <new_branch> and start it at <start_point>; if it already exists, then reset it to <start_point>. This is equivalent to running "git branch" with "-f"; see git-branch[1] for details.
-t
--track
When creating a new branch, set up "upstream" configuration. See "--track" in git-branch[1] for details.
If no -b option is given, the name of the new branch will be derived from the remote-tracking branch, by looking at the local part of the refspec configured for the corresponding remote, and then stripping the initial part up to the "*". This would tell us to use hack as the local branch when branching off of origin/hack (or remotes/origin/hack, or even refs/remotes/origin/hack). If the given name has no slash, or the above guessing results in an empty name, the guessing is aborted. You can explicitly give a name with -b in such a case.
--no-track
Do not set up "upstream" configuration, even if the branch.autoSetupMerge configuration variable is true.
--guess
--no-guess
If <branch> is not found but there does exist a tracking branch in exactly one remote (call it <remote>) with a matching name, treat as equivalent to
-l
Create the new branch’s reflog; see git-branch[1] for details.
--detach
Rather than checking out a branch to work on it, check out a commit for inspection and discardable experiments. This is the default behavior of git checkout <commit> when <commit> is not a branch name. See the "DETACHED HEAD" section below for details.
--orphan <new_branch>
Create a new orphan branch, named <new_branch>, started from <start_point> and switch to it. The first commit made on this new branch will have no parents and it will be the root of a new history totally disconnected from all the other branches and commits.
The index and the working tree are adjusted as if you had previously run git checkout <start_point>. This allows you to start a new history that records a set of paths similar to <start_point> by easily running git commit -a to make the root commit.
This can be useful when you want to publish the tree from a commit without exposing its full history. You might want to do this to publish an open source branch of a project whose current tree is "clean", but whose full history contains proprietary or otherwise encumbered bits of code.
If you want to start a disconnected history that records a set of paths that is totally different from the one of <start_point>, then you should clear the index and the working tree right after creating the orphan branch by running git rm -rf . from the top level of the working tree. Afterwards you will be ready to prepare your new files, repopulating the working tree, by copying them from elsewhere, extracting a tarball, etc.
--ignore-skip-worktree-bits
In sparse checkout mode, git checkout -- <paths> would update only entries matched by <paths> and sparse patterns in $GIT_DIR/info/sparse-checkout. This option ignores the sparse patterns and adds back any files in <paths>.
-m
--merge
When switching branches, if you have local modifications to one or more files that are different between the current branch and the branch to which you are switching, the command refuses to switch branches in order to preserve your modifications in context. However, with this option, a three-way merge between the current branch, your working tree contents, and the new branch is done, and you will be on the new branch.
When a merge conflict happens, the index entries for conflicting paths are left unmerged, and you need to resolve the conflicts and mark the resolved paths with git add (or git rm if the merge should result in deletion of the path).
When checking out paths from the index, this option lets you recreate the conflicted merge in the specified paths.
When switching branches with --merge, staged changes may be lost.
--conflict=<style>
The same as --merge option above, but changes the way the conflicting hunks are presented, overriding the merge.conflictStyle configuration variable. Possible values are "merge" (default) and "diff3" (in addition to what is shown by "merge" style, shows the original contents).
-p
--patch
Interactively select hunks in the difference between the <tree-ish> (or the index, if unspecified) and the working tree. The chosen hunks are then applied in reverse to the working tree (and if a <tree-ish> was specified, the index).
This means that you can use git checkout -p to selectively discard edits from your current working tree. See the “Interactive Mode” section of git-add[1] to learn how to operate the --patch mode.
Note that this option uses the no overlay mode by default (see also --overlay), and currently doesn’t support overlay mode.
--ignore-other-worktrees
git checkout refuses when the wanted ref is already checked out by another worktree. This option makes it check the ref out anyway. In other words, the ref can be held by more than one worktree.
--overwrite-ignore
--no-overwrite-ignore
Silently overwrite ignored files when switching branches. This is the default behavior. Use --no-overwrite-ignore to abort the operation when the new branch contains ignored files.
--recurse-submodules
--no-recurse-submodules
Using --recurse-submodules will update the content of all active submodules according to the commit recorded in the superproject. If local modifications in a submodule would be overwritten the checkout will fail unless -f is used. If nothing (or --no-recurse-submodules) is used, submodules working trees will not be updated. Just like git-submodule[1], this will detach HEAD of the submodule.
--overlay
--no-overlay
In the default overlay mode, git checkout never removes files from the index or the working tree. When specifying --no-overlay, files that appear in the index and working tree, but not in <tree-ish> are removed, to make them match <tree-ish> exactly.
--pathspec-from-file=<file>
Pathspec is passed in <file> instead of commandline args. If <file> is exactly - then standard input is used. Pathspec elements are separated by LF or CR/LF. Pathspec elements can be quoted as explained for the configuration variable core.quotePath (see git-config[1]). See also --pathspec-file-nul and global --literal-pathspecs.
--pathspec-file-nul
Only meaningful with --pathspec-from-file. Pathspec elements are separated with NUL character and all other characters are taken literally (including newlines and quotes).
<branch>
Branch to checkout; if it refers to a branch (i.e., a name that, when prepended with "refs/heads/", is a valid ref), then that branch is checked out. Otherwise, if it refers to a valid commit, your HEAD becomes "detached" and you are no longer on any branch (see below for details).
You can use the @{-N} syntax to refer to the N-th last branch/commit checked out using "git checkout" operation. You may also specify - which is synonymous to @{-1}.
As a special case, you may use A...B as a shortcut for the merge base of A and B if there is exactly one merge base. You can leave out at most one of A and B, in which case it defaults to HEAD.
<new_branch>
Name for the new branch.
<start_point>
The name of a commit at which to start the new branch; see git-branch[1] for details. Defaults to HEAD.
As a special case, you may use "A...B" as a shortcut for the merge base of A and B if there is exactly one merge base. You can leave out at most one of A and B, in which case it defaults to HEAD.
<tree-ish>
Tree to checkout from (when paths are given). If not specified, the index will be used.
--
Do not interpret any more arguments as options.
<pathspec>…
Limits the paths affected by the operation.
For more details, see the pathspec entry in gitglossary[7].`
const mergeFlagsStr = `--commit
--no-commit
Perform the merge and commit the result. This option can be used to override --no-commit.
With --no-commit perform the merge and stop just before creating a merge commit, to give the user a chance to inspect and further tweak the merge result before committing.
Note that fast-forward updates do not create a merge commit and therefore there is no way to stop those merges with --no-commit. Thus, if you want to ensure your branch is not changed or updated by the merge command, use --no-ff with --no-commit.
--edit
-e
--no-edit
Invoke an editor before committing successful mechanical merge to further edit the auto-generated merge message, so that the user can explain and justify the merge. The --no-edit option can be used to accept the auto-generated message (this is generally discouraged). The --edit (or -e) option is still useful if you are giving a draft message with the -m option from the command line and want to edit it in the editor.
Older scripts may depend on the historical behaviour of not allowing the user to edit the merge log message. They will see an editor opened when they run git merge. To make it easier to adjust such scripts to the updated behaviour, the environment variable GIT_MERGE_AUTOEDIT can be set to no at the beginning of them.
--cleanup=<mode>
This option determines how the merge message will be cleaned up before committing. See git-commit[1] for more details. In addition, if the <mode> is given a value of scissors, scissors will be appended to MERGE_MSG before being passed on to the commit machinery in the case of a merge conflict.
--ff
--no-ff
--ff-only
Specifies how a merge is handled when the merged-in history is already a descendant of the current history. --ff is the default unless merging an annotated (and possibly signed) tag that is not stored in its natural place in the refs/tags/ hierarchy, in which case --no-ff is assumed.
With --ff, when possible resolve the merge as a fast-forward (only update the branch pointer to match the merged branch; do not create a merge commit). When not possible (when the merged-in history is not a descendant of the current history), create a merge commit.
With --no-ff, create a merge commit in all cases, even when the merge could instead be resolved as a fast-forward.
With --ff-only, resolve the merge as a fast-forward when possible. When not possible, refuse to merge and exit with a non-zero status.
-S[<keyid>]
--gpg-sign[=<keyid>]
--no-gpg-sign
GPG-sign the resulting merge commit. The keyid argument is optional and defaults to the committer identity; if specified, it must be stuck to the option without a space. --no-gpg-sign is useful to countermand both commit.gpgSign configuration variable, and earlier --gpg-sign.
--log[=<n>]
--no-log
In addition to branch names, populate the log message with one-line descriptions from at most <n> actual commits that are being merged. See also git-fmt-merge-msg[1].
With --no-log do not list one-line descriptions from the actual commits being merged.
--signoff
--no-signoff
Add Signed-off-by line by the committer at the end of the commit log message. The meaning of a signoff depends on the project, but it typically certifies that committer has the rights to submit this work under the same license and agrees to a Developer Certificate of Origin (see http://developercertificate.org/ for more information).
With --no-signoff do not add a Signed-off-by line.
--stat
-n
--no-stat
Show a diffstat at the end of the merge. The diffstat is also controlled by the configuration option merge.stat.
With -n or --no-stat do not show a diffstat at the end of the merge.
--squash
--no-squash
Produce the working tree and index state as if a real merge happened (except for the merge information), but do not actually make a commit, move the HEAD, or record $GIT_DIR/MERGE_HEAD (to cause the next git commit command to create a merge commit). This allows you to create a single commit on top of the current branch whose effect is the same as merging another branch (or more in case of an octopus).
With --no-squash perform the merge and commit the result. This option can be used to override --squash.
With --squash, --commit is not allowed, and will fail.
--no-verify
This option bypasses the pre-merge and commit-msg hooks. See also githooks[5].
-s <strategy>
--strategy=<strategy>
Use the given merge strategy; can be supplied more than once to specify them in the order they should be tried. If there is no -s option, a built-in list of strategies is used instead (git merge-recursive when merging a single head, git merge-octopus otherwise).
-X <option>
--strategy-option=<option>
Pass merge strategy specific option through to the merge strategy.
--verify-signatures
--no-verify-signatures
Verify that the tip commit of the side branch being merged is signed with a valid key, i.e. a key that has a valid uid: in the default trust model, this means the signing key has been signed by a trusted key. If the tip commit of the side branch is not signed with a valid key, the merge is aborted.
--summary
--no-summary
Synonyms to --stat and --no-stat; these are deprecated and will be removed in the future.
-q
--quiet
Operate quietly. Implies --no-progress.
-v
--verbose
Be verbose.
--progress
--no-progress
Turn progress on/off explicitly. If neither is specified, progress is shown if standard error is connected to a terminal. Note that not all merge strategies may support progress reporting.
--autostash
--no-autostash
Automatically create a temporary stash entry before the operation begins, and apply it after the operation ends. This means that you can run the operation on a dirty worktree. However, use with care: the final stash application after a successful merge might result in non-trivial conflicts.
--allow-unrelated-histories
By default, git merge command refuses to merge histories that do not share a common ancestor. This option can be used to override this safety when merging histories of two projects that started their lives independently. As that is a very rare occasion, no configuration variable to enable this by default exists and will not be added.
-m <msg>
Set the commit message to be used for the merge commit (in case one is created).
If --log is specified, a shortlog of the commits being merged will be appended to the specified message.
The git fmt-merge-msg command can be used to give a good default for automated git merge invocations. The automated message can include the branch description.
-F <file>
--file=<file>
Read the commit message to be used for the merge commit (in case one is created).
If --log is specified, a shortlog of the commits being merged will be appended to the specified message.
--rerere-autoupdate
--no-rerere-autoupdate
Allow the rerere mechanism to update the index with the result of auto-conflict resolution if possible.
--overwrite-ignore
--no-overwrite-ignore
Silently overwrite ignored files from the merge result. This is the default behavior. Use --no-overwrite-ignore to abort.
--abort
Abort the current conflict resolution process, and try to reconstruct the pre-merge state. If an autostash entry is present, apply it to the worktree.
If there were uncommitted worktree changes present when the merge started, git merge --abort will in some cases be unable to reconstruct these changes. It is therefore recommended to always commit or stash your changes before running git merge.
git merge --abort is equivalent to git reset --merge when MERGE_HEAD is present unless MERGE_AUTOSTASH is also present in which case git merge --abort applies the stash entry to the worktree whereas git reset --merge will save the stashed changes in the stash list.
--quit
Forget about the current merge in progress. Leave the index and the working tree as-is. If MERGE_AUTOSTASH is present, the stash entry will be saved to the stash list.
--continue
After a git merge stops due to conflicts you can conclude the merge by running git merge --continue (see "HOW TO RESOLVE CONFLICTS" section below).
<commit>…
Commits, usually other branch heads, to merge into our branch. Specifying more than one commit will create a merge with more than two parents (affectionately called an Octopus merge).
If no commit is given from the command line, merge the remote-tracking branches that the current branch is configured to use as its upstream. See also the configuration section of this manual page.
When FETCH_HEAD (and no other commit) is specified, the branches recorded in the .git/FETCH_HEAD file by the previous invocation of git fetch for merging are merged to the current branch.`
const pullFlagsStr = `-q
--quiet
This is passed to both underlying git-fetch to squelch reporting of during transfer, and underlying git-merge to squelch output during merging.
-v
--verbose
Pass --verbose to git-fetch and git-merge.
--[no-]recurse-submodules[=yes|on-demand|no]
This option controls if new commits of populated submodules should be fetched, and if the working trees of active submodules should be updated, too (see git-fetch[1], git-config[1] and gitmodules[5]).
If the checkout is done via rebase, local submodule commits are rebased as well.
If the update is done via merge, the submodule conflicts are resolved and checked out.
Options related to merging
--commit
--no-commit
Perform the merge and commit the result. This option can be used to override --no-commit.
With --no-commit perform the merge and stop just before creating a merge commit, to give the user a chance to inspect and further tweak the merge result before committing.
Note that fast-forward updates do not create a merge commit and therefore there is no way to stop those merges with --no-commit. Thus, if you want to ensure your branch is not changed or updated by the merge command, use --no-ff with --no-commit.
--edit
-e
--no-edit
Invoke an editor before committing successful mechanical merge to further edit the auto-generated merge message, so that the user can explain and justify the merge. The --no-edit option can be used to accept the auto-generated message (this is generally discouraged).
Older scripts may depend on the historical behaviour of not allowing the user to edit the merge log message. They will see an editor opened when they run git merge. To make it easier to adjust such scripts to the updated behaviour, the environment variable GIT_MERGE_AUTOEDIT can be set to no at the beginning of them.
--cleanup=<mode>
This option determines how the merge message will be cleaned up before committing. See git-commit[1] for more details. In addition, if the <mode> is given a value of scissors, scissors will be appended to MERGE_MSG before being passed on to the commit machinery in the case of a merge conflict.
--ff
--no-ff
--ff-only
Specifies how a merge is handled when the merged-in history is already a descendant of the current history. --ff is the default unless merging an annotated (and possibly signed) tag that is not stored in its natural place in the refs/tags/ hierarchy, in which case --no-ff is assumed.
With --ff, when possible resolve the merge as a fast-forward (only update the branch pointer to match the merged branch; do not create a merge commit). When not possible (when the merged-in history is not a descendant of the current history), create a merge commit.
With --no-ff, create a merge commit in all cases, even when the merge could instead be resolved as a fast-forward.
With --ff-only, resolve the merge as a fast-forward when possible. When not possible, refuse to merge and exit with a non-zero status.
-S[<keyid>]
--gpg-sign[=<keyid>]
--no-gpg-sign
GPG-sign the resulting merge commit. The keyid argument is optional and defaults to the committer identity; if specified, it must be stuck to the option without a space. --no-gpg-sign is useful to countermand both commit.gpgSign configuration variable, and earlier --gpg-sign.
--log[=<n>]
--no-log
In addition to branch names, populate the log message with one-line descriptions from at most <n> actual commits that are being merged. See also git-fmt-merge-msg[1].
With --no-log do not list one-line descriptions from the actual commits being merged.
--signoff
--no-signoff
Add Signed-off-by line by the committer at the end of the commit log message. The meaning of a signoff depends on the project, but it typically certifies that committer has the rights to submit this work under the same license and agrees to a Developer Certificate of Origin (see http://developercertificate.org/ for more information).
With --no-signoff do not add a Signed-off-by line.
--stat
-n
--no-stat
Show a diffstat at the end of the merge. The diffstat is also controlled by the configuration option merge.stat.
With -n or --no-stat do not show a diffstat at the end of the merge.
--squash
--no-squash
Produce the working tree and index state as if a real merge happened (except for the merge information), but do not actually make a commit, move the HEAD, or record $GIT_DIR/MERGE_HEAD (to cause the next git commit command to create a merge commit). This allows you to create a single commit on top of the current branch whose effect is the same as merging another branch (or more in case of an octopus).
With --no-squash perform the merge and commit the result. This option can be used to override --squash.
With --squash, --commit is not allowed, and will fail.
--no-verify
This option bypasses the pre-merge and commit-msg hooks. See also githooks[5].
-s <strategy>
--strategy=<strategy>
Use the given merge strategy; can be supplied more than once to specify them in the order they should be tried. If there is no -s option, a built-in list of strategies is used instead (git merge-recursive when merging a single head, git merge-octopus otherwise).
-X <option>
--strategy-option=<option>
Pass merge strategy specific option through to the merge strategy.
--verify-signatures
--no-verify-signatures
Verify that the tip commit of the side branch being merged is signed with a valid key, i.e. a key that has a valid uid: in the default trust model, this means the signing key has been signed by a trusted key. If the tip commit of the side branch is not signed with a valid key, the merge is aborted.
--summary
--no-summary
Synonyms to --stat and --no-stat; these are deprecated and will be removed in the future.
--autostash
--no-autostash
Automatically create a temporary stash entry before the operation begins, and apply it after the operation ends. This means that you can run the operation on a dirty worktree. However, use with care: the final stash application after a successful merge might result in non-trivial conflicts.
--allow-unrelated-histories
By default, git merge command refuses to merge histories that do not share a common ancestor. This option can be used to override this safety when merging histories of two projects that started their lives independently. As that is a very rare occasion, no configuration variable to enable this by default exists and will not be added.
-r
--rebase[=false|true|merges|preserve|interactive]
When true, rebase the current branch on top of the upstream branch after fetching. If there is a remote-tracking branch corresponding to the upstream branch and the upstream branch was rebased since last fetched, the rebase uses that information to avoid rebasing non-local changes.
When set to merges, rebase using git rebase --rebase-merges so that the local merge commits are included in the rebase (see git-rebase[1] for details).
When set to preserve (deprecated in favor of merges), rebase with the --preserve-merges option passed to git rebase so that locally created merge commits will not be flattened.
When false, merge the current branch into the upstream branch.
When interactive, enable the interactive mode of rebase.
See pull.rebase, branch.<name>.rebase and branch.autoSetupRebase in git-config[1] if you want to make git pull always use --rebase instead of merging.
Note
This is a potentially dangerous mode of operation. It rewrites history, which does not bode well when you published that history already. Do not use this option unless you have read git-rebase[1] carefully.
--no-rebase
Override earlier --rebase.
Options related to fetching
--all
Fetch all remotes.
-a
--append
Append ref names and object names of fetched refs to the existing contents of .git/FETCH_HEAD. Without this option old data in .git/FETCH_HEAD will be overwritten.
--depth=<depth>
Limit fetching to the specified number of commits from the tip of each remote branch history. If fetching to a shallow repository created by git clone with --depth=<depth> option (see git-clone[1]), deepen or shorten the history to the specified number of commits. Tags for the deepened commits are not fetched.
--deepen=<depth>
Similar to --depth, except it specifies the number of commits from the current shallow boundary instead of from the tip of each remote branch history.
--shallow-since=<date>
Deepen or shorten the history of a shallow repository to include all reachable commits after <date>.
--shallow-exclude=<revision>
Deepen or shorten the history of a shallow repository to exclude commits reachable from a specified remote branch or tag. This option can be specified multiple times.
--unshallow
If the source repository is complete, convert a shallow repository to a complete one, removing all the limitations imposed by shallow repositories.
If the source repository is shallow, fetch as much as possible so that the current repository has the same history as the source repository.
--update-shallow
By default when fetching from a shallow repository, git fetch refuses refs that require updating .git/shallow. This option updates .git/shallow and accept such refs.
--negotiation-tip=<commit|glob>
By default, Git will report, to the server, commits reachable from all local refs to find common commits in an attempt to reduce the size of the to-be-received packfile. If specified, Git will only report commits reachable from the given tips. This is useful to speed up fetches when the user knows which local ref is likely to have commits in common with the upstream ref being fetched.
This option may be specified more than once; if so, Git will report commits reachable from any of the given commits.
The argument to this option may be a glob on ref names, a ref, or the (possibly abbreviated) SHA-1 of a commit. Specifying a glob is equivalent to specifying this option multiple times, one for each matching ref name.
See also the fetch.negotiationAlgorithm configuration variable documented in git-config[1].
--dry-run
Show what would be done, without making any changes.
-f
--force
When git fetch is used with <src>:<dst> refspec it may refuse to update the local branch as discussed in the <refspec> part of the git-fetch[1] documentation. This option overrides that check.
-k
--keep
Keep downloaded pack.
-p
--prune
Before fetching, remove any remote-tracking references that no longer exist on the remote. Tags are not subject to pruning if they are fetched only because of the default tag auto-following or due to a --tags option. However, if tags are fetched due to an explicit refspec (either on the command line or in the remote configuration, for example if the remote was cloned with the --mirror option), then they are also subject to pruning. Supplying --prune-tags is a shorthand for providing the tag refspec.
--no-tags
By default, tags that point at objects that are downloaded from the remote repository are fetched and stored locally. This option disables this automatic tag following. The default behavior for a remote may be specified with the remote.<name>.tagOpt setting. See git-config[1].
--refmap=<refspec>
When fetching refs listed on the command line, use the specified refspec (can be given more than once) to map the refs to remote-tracking branches, instead of the values of remote.*.fetch configuration variables for the remote repository. Providing an empty <refspec> to the --refmap option causes Git to ignore the configured refspecs and rely entirely on the refspecs supplied as command-line arguments. See section on "Configured Remote-tracking Branches" for details.
-t
--tags
Fetch all tags from the remote (i.e., fetch remote tags refs/tags/* into local tags with the same name), in addition to whatever else would otherwise be fetched. Using this option alone does not subject tags to pruning, even if --prune is used (though tags may be pruned anyway if they are also the destination of an explicit refspec; see --prune).
-j
--jobs=<n>
Number of parallel children to be used for all forms of fetching.
If the --multiple option was specified, the different remotes will be fetched in parallel. If multiple submodules are fetched, they will be fetched in parallel. To control them independently, use the config settings fetch.parallel and submodule.fetchJobs (see git-config[1]).
Typically, parallel recursive and multi-remote fetches will be faster. By default fetches are performed sequentially, not in parallel.
--set-upstream
If the remote is fetched successfully, pull and add upstream (tracking) reference, used by argument-less git-pull[1] and other commands. For more information, see branch.<name>.merge and branch.<name>.remote in git-config[1].
--upload-pack <upload-pack>
When given, and the repository to fetch from is handled by git fetch-pack, --exec=<upload-pack> is passed to the command to specify non-default path for the command run on the other end.
--progress
Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified. This flag forces progress status even if the standard error stream is not directed to a terminal.
-o <option>
--server-option=<option>
Transmit the given string to the server when communicating using protocol version 2. The given string must not contain a NUL or LF character. The server’s handling of server options, including unknown ones, is server-specific. When multiple --server-option=<option> are given, they are all sent to the other side in the order listed on the command line.
--show-forced-updates
By default, git checks if a branch is force-updated during fetch. This can be disabled through fetch.showForcedUpdates, but the --show-forced-updates option guarantees this check occurs. See git-config[1].
--no-show-forced-updates
By default, git checks if a branch is force-updated during fetch. Pass --no-show-forced-updates or set fetch.showForcedUpdates to false to skip this check for performance reasons. If used during git-pull the --ff-only option will still check for forced updates before attempting a fast-forward update. See git-config[1].
-4
--ipv4
Use IPv4 addresses only, ignoring IPv6 addresses.
-6
--ipv6
Use IPv6 addresses only, ignoring IPv4 addresses.`
const pushFlagsStr = `
--all
Push all branches (i.e. refs under refs/heads/); cannot be used with other <refspec>.
--prune
Remove remote branches that don’t have a local counterpart. For example a remote branch tmp will be removed if a local branch with the same name doesn’t exist any more. This also respects refspecs, e.g. git push --prune remote refs/heads/*:refs/tmp/* would make sure that remote refs/tmp/foo will be removed if refs/heads/foo doesn’t exist.
--mirror
Instead of naming each ref to push, specifies that all refs under refs/ (which includes but is not limited to refs/heads/, refs/remotes/, and refs/tags/) be mirrored to the remote repository. Newly created local refs will be pushed to the remote end, locally updated refs will be force updated on the remote end, and deleted refs will be removed from the remote end. This is the default if the configuration option remote.<remote>.mirror is set.
-n
--dry-run
Do everything except actually send the updates.
--porcelain
Produce machine-readable output. The output status line for each ref will be tab-separated and sent to stdout instead of stderr. The full symbolic names of the refs will be given.
-d
--delete
All listed refs are deleted from the remote repository. This is the same as prefixing all refs with a colon.
--tags
All refs under refs/tags are pushed, in addition to refspecs explicitly listed on the command line.
--follow-tags
Push all the refs that would be pushed without this option, and also push annotated tags in refs/tags that are missing from the remote but are pointing at commit-ish that are reachable from the refs being pushed. This can also be specified with configuration variable push.followTags. For more information, see push.followTags in git-config[1].
--[no-]signed
--signed=(true|false|if-asked)
GPG-sign the push request to update refs on the receiving side, to allow it to be checked by the hooks and/or be logged. If false or --no-signed, no signing will be attempted. If true or --signed, the push will fail if the server does not support signed pushes. If set to if-asked, sign if and only if the server supports signed pushes. The push will also fail if the actual call to gpg --sign fails. See git-receive-pack[1] for the details on the receiving end.
--[no-]atomic
Use an atomic transaction on the remote side if available. Either all refs are updated, or on error, no refs are updated. If the server does not support atomic pushes the push will fail.
-o <option>
--push-option=<option>
Transmit the given string to the server, which passes them to the pre-receive as well as the post-receive hook. The given string must not contain a NUL or LF character. When multiple --push-option=<option> are given, they are all sent to the other side in the order listed on the command line. When no --push-option=<option> is given from the command line, the values of configuration variable push.pushOption are used instead.
--receive-pack=<git-receive-pack>
--exec=<git-receive-pack>
Path to the git-receive-pack program on the remote end. Sometimes useful when pushing to a remote repository over ssh, and you do not have the program in a directory on the default $PATH.
--[no-]force-with-lease
--force-with-lease=<refname>
--force-with-lease=<refname>:<expect>
Usually, "git push" refuses to update a remote ref that is not an ancestor of the local ref used to overwrite it.
This option overrides this restriction if the current value of the remote ref is the expected value. "git push" fails otherwise.
Imagine that you have to rebase what you have already published. You will have to bypass the "must fast-forward" rule in order to replace the history you originally published with the rebased history. If somebody else built on top of your original history while you are rebasing, the tip of the branch at the remote may advance with her commit, and blindly pushing with --force will lose her work.
This option allows you to say that you expect the history you are updating is what you rebased and want to replace. If the remote ref still points at the commit you specified, you can be sure that no other people did anything to the ref. It is like taking a "lease" on the ref without explicitly locking it, and the remote ref is updated only if the "lease" is still valid.
--force-with-lease alone, without specifying the details, will protect all remote refs that are going to be updated by requiring their current value to be the same as the remote-tracking branch we have for them.
--force-with-lease=<refname>, without specifying the expected value, will protect the named ref (alone), if it is going to be updated, by requiring its current value to be the same as the remote-tracking branch we have for it.
--force-with-lease=<refname>:<expect> will protect the named ref (alone), if it is going to be updated, by requiring its current value to be the same as the specified value <expect> (which is allowed to be different from the remote-tracking branch we have for the refname, or we do not even have to have such a remote-tracking branch when this form is used). If <expect> is the empty string, then the named ref must not already exist.
Note that all forms other than --force-with-lease=<refname>:<expect> that specifies the expected current value of the ref explicitly are still experimental and their semantics may change as we gain experience with this feature.
"--no-force-with-lease" will cancel all the previous --force-with-lease on the command line.
A general note on safety: supplying this option without an expected value, i.e. as --force-with-lease or --force-with-lease=<refname> interacts very badly with anything that implicitly runs git fetch on the remote to be pushed to in the background, e.g. git fetch origin on your repository in a cronjob.
The protection it offers over --force is ensuring that subsequent changes your work wasn’t based on aren’t clobbered, but this is trivially defeated if some background process is updating refs in the background. We don’t have anything except the remote tracking info to go by as a heuristic for refs you’re expected to have seen & are willing to clobber.
If your editor or some other system is running git fetch in the background for you a way to mitigate this is to simply set up another remote:
git remote add origin-push $(git config remote.origin.url)
git fetch origin-push
Now when the background process runs git fetch origin the references on origin-push won’t be updated, and thus commands like:
git push --force-with-lease origin-push
Will fail unless you manually run git fetch origin-push. This method is of course entirely defeated by something that runs git fetch --all, in that case you’d need to either disable it or do something more tedious like:
git fetch # update 'master' from remote
git tag base master # mark our base point
git rebase -i master # rewrite some commits
git push --force-with-lease=master:base master:master
I.e. create a base tag for versions of the upstream code that you’ve seen and are willing to overwrite, then rewrite history, and finally force push changes to master if the remote version is still at base, regardless of what your local remotes/origin/master has been updated to in the background.
-f
--force
Usually, the command refuses to update a remote ref that is not an ancestor of the local ref used to overwrite it. Also, when --force-with-lease option is used, the command refuses to update a remote ref whose current value does not match what is expected.
This flag disables these checks, and can cause the remote repository to lose commits; use it with care.
Note that --force applies to all the refs that are pushed, hence using it with push.default set to matching or with multiple push destinations configured with remote.*.push may overwrite refs other than the current branch (including local refs that are strictly behind their remote counterpart). To force a push to only one branch, use a + in front of the refspec to push (e.g git push origin +master to force a push to the master branch). See the <refspec>... section above for details.
--repo=<repository>
This option is equivalent to the <repository> argument. If both are specified, the command-line argument takes precedence.
-u
--set-upstream
For every branch that is up to date or successfully pushed, add upstream (tracking) reference, used by argument-less git-pull[1] and other commands. For more information, see branch.<name>.merge in git-config[1].
--[no-]thin
These options are passed to git-send-pack[1]. A thin transfer significantly reduces the amount of sent data when the sender and receiver share many of the same objects in common. The default is --thin.
-q
--quiet
Suppress all output, including the listing of updated refs, unless an error occurs. Progress is not reported to the standard error stream.
-v
--verbose
Run verbosely.
--progress
Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified. This flag forces progress status even if the standard error stream is not directed to a terminal.
--no-recurse-submodules
--recurse-submodules=check|on-demand|only|no
May be used to make sure all submodule commits used by the revisions to be pushed are available on a remote-tracking branch. If check is used Git will verify that all submodule commits that changed in the revisions to be pushed are available on at least one remote of the submodule. If any commits are missing the push will be aborted and exit with non-zero status. If on-demand is used all submodules that changed in the revisions to be pushed will be pushed. If on-demand was not able to push all necessary revisions it will also be aborted and exit with non-zero status. If only is used all submodules will be recursively pushed while the superproject is left unpushed. A value of no or using --no-recurse-submodules can be used to override the push.recurseSubmodules configuration variable when no submodule recursion is required.
--no-verify
Toggle the pre-push hook (see githooks[5]). The default is --verify, giving the hook a chance to prevent the push. With --no-verify, the hook is bypassed completely.
-4
--ipv4
Use IPv4 addresses only, ignoring IPv6 addresses.
-6
--ipv6
Use IPv6 addresses only, ignoring IPv4 addresses.
`
const logFlagsStr = `--follow
Continue listing the history of a file beyond renames (works only for a single file).
--no-decorate
--decorate[=short|full|auto|no]
Print out the ref names of any commits that are shown. If short is specified, the ref name prefixes refs/heads/, refs/tags/ and refs/remotes/ will not be printed. If full is specified, the full ref name (including prefix) will be printed. If auto is specified, then if the output is going to a terminal, the ref names are shown as if short were given, otherwise no ref names are shown. The default option is short.
--decorate-refs=<pattern>
--decorate-refs-exclude=<pattern>
If no --decorate-refs is given, pretend as if all refs were included. For each candidate, do not use it for decoration if it matches any patterns given to --decorate-refs-exclude or if it doesn’t match any of the patterns given to --decorate-refs. The log.excludeDecoration config option allows excluding refs from the decorations, but an explicit --decorate-refs pattern will override a match in log.excludeDecoration.
--source
Print out the ref name given on the command line by which each commit was reached.
--[no-]mailmap
--[no-]use-mailmap
Use mailmap file to map author and committer names and email addresses to canonical real names and email addresses. See git-shortlog[1].
--full-diff
Without this flag, git log -p <path>... shows commits that touch the specified paths, and diffs about the same specified paths. With this, the full diff is shown for commits that touch the specified paths; this means that "<path>…" limits only commits, and doesn’t limit diff for those commits.
Note that this affects all diff-based output types, e.g. those produced by --stat, etc.
--log-size
Include a line “log size <number>” in the output for each commit, where <number> is the length of that commit’s message in bytes. Intended to speed up tools that read log messages from git log output by allowing them to allocate space in advance.
-L <start>,<end>:<file>
-L :<funcname>:<file>
Trace the evolution of the line range given by "<start>,<end>" (or the function name regex <funcname>) within the <file>. You may not give any pathspec limiters. This is currently limited to a walk starting from a single revision, i.e., you may only give zero or one positive revision arguments, and <start> and <end> (or <funcname>) must exist in the starting revision. You can specify this option more than once. Implies --patch. Patch output can be suppressed using --no-patch, but other diff formats (namely --raw, --numstat, --shortstat, --dirstat, --summary, --name-only, --name-status, --check) are not currently implemented.
<start> and <end> can take one of these forms:
number
If <start> or <end> is a number, it specifies an absolute line number (lines count from 1).
/regex/
This form will use the first line matching the given POSIX regex. If <start> is a regex, it will search from the end of the previous -L range, if any, otherwise from the start of file. If <start> is “^/regex/”, it will search from the start of file. If <end> is a regex, it will search starting at the line given by <start>.
+offset or -offset
This is only valid for <end> and will specify a number of lines before or after the line given by <start>.
If “:<funcname>” is given in place of <start> and <end>, it is a regular expression that denotes the range from the first funcname line that matches <funcname>, up to the next funcname line. “:<funcname>” searches from the end of the previous -L range, if any, otherwise from the start of file. “^:<funcname>” searches from the start of file.
<revision range>
Show only commits in the specified revision range. When no <revision range> is specified, it defaults to HEAD (i.e. the whole history leading to the current commit). origin..HEAD specifies all the commits reachable from the current commit (i.e. HEAD), but not from origin. For a complete list of ways to spell <revision range>, see the Specifying Ranges section of gitrevisions[7].
[--] <path>…
Show only commits that are enough to explain how the files that match the specified paths came to be. See History Simplification below for details and other simplification modes.
Paths may need to be prefixed with -- to separate them from options or the revision range, when confusion arises.
Commit Limiting
Besides specifying a range of commits that should be listed using the special notations explained in the description, additional commit limiting may be applied.
Using more options generally further limits the output (e.g. --since=<date1> limits to commits newer than <date1>, and using it with --grep=<pattern> further limits to commits whose log message has a line that matches <pattern>), unless otherwise noted.
Note that these are applied before commit ordering and formatting options, such as --reverse.
-<number>
-n <number>
--max-count=<number>
Limit the number of commits to output.
--skip=<number>
Skip number commits before starting to show the commit output.
--since=<date>
--after=<date>
Show commits more recent than a specific date.
--until=<date>
--before=<date>
Show commits older than a specific date.
--author=<pattern>
--committer=<pattern>
Limit the commits output to ones with author/committer header lines that match the specified pattern (regular expression). With more than one --author=<pattern>, commits whose author matches any of the given patterns are chosen (similarly for multiple --committer=<pattern>).
--grep-reflog=<pattern>
Limit the commits output to ones with reflog entries that match the specified pattern (regular expression). With more than one --grep-reflog, commits whose reflog message matches any of the given patterns are chosen. It is an error to use this option unless --walk-reflogs is in use.
--grep=<pattern>
Limit the commits output to ones with log message that matches the specified pattern (regular expression). With more than one --grep=<pattern>, commits whose message matches any of the given patterns are chosen (but see --all-match).
When --notes is in effect, the message from the notes is matched as if it were part of the log message.
--all-match
Limit the commits output to ones that match all given --grep, instead of ones that match at least one.
--invert-grep
Limit the commits output to ones with log message that do not match the pattern specified with --grep=<pattern>.
-i
--regexp-ignore-case
Match the regular expression limiting patterns without regard to letter case.
--basic-regexp
Consider the limiting patterns to be basic regular expressions; this is the default.
-E
--extended-regexp
Consider the limiting patterns to be extended regular expressions instead of the default basic regular expressions.
-F
--fixed-strings
Consider the limiting patterns to be fixed strings (don’t interpret pattern as a regular expression).
-P
--perl-regexp
Consider the limiting patterns to be Perl-compatible regular expressions.
Support for these types of regular expressions is an optional compile-time dependency. If Git wasn’t compiled with support for them providing this option will cause it to die.
--remove-empty
Stop when a given path disappears from the tree.
--merges
Print only merge commits. This is exactly the same as --min-parents=2.
--no-merges
Do not print commits with more than one parent. This is exactly the same as --max-parents=1.
--min-parents=<number>
--max-parents=<number>
--no-min-parents
--no-max-parents
Show only commits which have at least (or at most) that many parent commits. In particular, --max-parents=1 is the same as --no-merges, --min-parents=2 is the same as --merges. --max-parents=0 gives all root commits and --min-parents=3 all octopus merges.
--no-min-parents and --no-max-parents reset these limits (to no limit) again. Equivalent forms are --min-parents=0 (any commit has 0 or more parents) and --max-parents=-1 (negative numbers denote no upper limit).
--first-parent
Follow only the first parent commit upon seeing a merge commit. This option can give a better overview when viewing the evolution of a particular topic branch, because merges into a topic branch tend to be only about adjusting to updated upstream from time to time, and this option allows you to ignore the individual commits brought in to your history by such a merge. Cannot be combined with --bisect.
--not
Reverses the meaning of the ^ prefix (or lack thereof) for all following revision specifiers, up to the next --not.
--all
Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>.
--branches[=<pattern>]
Pretend as if all the refs in refs/heads are listed on the command line as <commit>. If <pattern> is given, limit branches to ones matching given shell glob. If pattern lacks ?, *, or [, /* at the end is implied.
--tags[=<pattern>]
Pretend as if all the refs in refs/tags are listed on the command line as <commit>. If <pattern> is given, limit tags to ones matching given shell glob. If pattern lacks ?, *, or [, /* at the end is implied.
--remotes[=<pattern>]
Pretend as if all the refs in refs/remotes are listed on the command line as <commit>. If <pattern> is given, limit remote-tracking branches to ones matching given shell glob. If pattern lacks ?, *, or [, /* at the end is implied.
--glob=<glob-pattern>
Pretend as if all the refs matching shell glob <glob-pattern> are listed on the command line as <commit>. Leading refs/, is automatically prepended if missing. If pattern lacks ?, *, or [, /* at the end is implied.
--exclude=<glob-pattern>
Do not include refs matching <glob-pattern> that the next --all, --branches, --tags, --remotes, or --glob would otherwise consider. Repetitions of this option accumulate exclusion patterns up to the next --all, --branches, --tags, --remotes, or --glob option (other options or arguments do not clear accumulated patterns).
The patterns given should not begin with refs/heads, refs/tags, or refs/remotes when applied to --branches, --tags, or --remotes, respectively, and they must begin with refs/ when applied to --glob or --all. If a trailing /* is intended, it must be given explicitly.
--reflog
Pretend as if all objects mentioned by reflogs are listed on the command line as <commit>.
--alternate-refs
Pretend as if all objects mentioned as ref tips of alternate repositories were listed on the command line. An alternate repository is any repository whose object directory is specified in objects/info/alternates. The set of included objects may be modified by core.alternateRefsCommand, etc. See git-config[1].
--single-worktree
By default, all working trees will be examined by the following options when there are more than one (see git-worktree[1]): --all, --reflog and --indexed-objects. This option forces them to examine the current working tree only.
--ignore-missing
Upon seeing an invalid object name in the input, pretend as if the bad input was not given.
--bisect
Pretend as if the bad bisection ref refs/bisect/bad was listed and as if it was followed by --not and the good bisection refs refs/bisect/good-* on the command line. Cannot be combined with --first-parent.
--stdin
In addition to the <commit> listed on the command line, read them from the standard input. If a -- separator is seen, stop reading commits and start reading paths to limit the result.
--cherry-mark
Like --cherry-pick (see below) but mark equivalent commits with = rather than omitting them, and inequivalent ones with +.
--cherry-pick
Omit any commit that introduces the same change as another commit on the “other side” when the set of commits are limited with symmetric difference.
For example, if you have two branches, A and B, a usual way to list all commits on only one side of them is with --left-right (see the example below in the description of the --left-right option). However, it shows the commits that were cherry-picked from the other branch (for example, “3rd on b” may be cherry-picked from branch A). With this option, such pairs of commits are excluded from the output.
--left-only
--right-only
List only commits on the respective side of a symmetric difference, i.e. only those which would be marked < resp. > by --left-right.
For example, --cherry-pick --right-only A...B omits those commits from B which are in A or are patch-equivalent to a commit in A. In other words, this lists the + commits from git cherry A B. More precisely, --cherry-pick --right-only --no-merges gives the exact list.
--cherry
A synonym for --right-only --cherry-mark --no-merges; useful to limit the output to the commits on our side and mark those that have been applied to the other side of a forked history with git log --cherry upstream...mybranch, similar to git cherry upstream mybranch.
-g
--walk-reflogs
Instead of walking the commit ancestry chain, walk reflog entries from the most recent one to older ones. When this option is used you cannot specify commits to exclude (that is, ^commit, commit1..commit2, and commit1...commit2 notations cannot be used).
With --pretty format other than oneline and reference (for obvious reasons), this causes the output to have two extra lines of information taken from the reflog. The reflog designator in the output may be shown as ref@{Nth} (where Nth is the reverse-chronological index in the reflog) or as ref@{timestamp} (with the timestamp for that entry), depending on a few rules:
If the starting point is specified as ref@{Nth}, show the index format.
If the starting point was specified as ref@{now}, show the timestamp format.
If neither was used, but --date was given on the command line, show the timestamp in the format requested by --date.
Otherwise, show the index format.
Under --pretty=oneline, the commit message is prefixed with this information on the same line. This option cannot be combined with --reverse. See also git-reflog[1].
Under --pretty=reference, this information will not be shown at all.
--merge
After a failed merge, show refs that touch files having a conflict and don’t exist on all heads to merge.
--boundary
Output excluded boundary commits. Boundary commits are prefixed with -.
History Simplification
Sometimes you are only interested in parts of the history, for example the commits modifying a particular <path>. But there are two parts of History Simplification, one part is selecting the commits and the other is how to do it, as there are various strategies to simplify the history.
The following options select the commits to be shown:
<paths>
Commits modifying the given <paths> are selected.
--simplify-by-decoration
Commits that are referred by some branch or tag are selected.
Note that extra commits can be shown to give a meaningful history.
The following options affect the way the simplification is performed:
Default mode
Simplifies the history to the simplest history explaining the final state of the tree. Simplest because it prunes some side branches if the end result is the same (i.e. merging branches with the same content)
--show-pulls
Include all commits from the default mode, but also any merge commits that are not TREESAME to the first parent but are TREESAME to a later parent. This mode is helpful for showing the merge commits that "first introduced" a change to a branch.
--full-history
Same as the default mode, but does not prune some history.
--dense
Only the selected commits are shown, plus some to have a meaningful history.
--sparse
All commits in the simplified history are shown.
--simplify-merges
Additional option to --full-history to remove some needless merges from the resulting history, as there are no selected commits contributing to this merge.
--ancestry-path
When given a range of commits to display (e.g. commit1..commit2 or commit2 ^commit1), only display commits that exist directly on the ancestry chain between the commit1 and commit2, i.e. commits that are both descendants of commit1, and ancestors of commit2.
A more detailed explanation follows.
Suppose you specified foo as the <paths>. We shall call commits that modify foo !TREESAME, and the rest TREESAME. (In a diff filtered for foo, they look different and equal, respectively.)
In the following, we will always refer to the same example history to illustrate the differences between simplification settings. We assume that you are filtering for a file foo in this commit graph:
.-A---M---N---O---P---Q
/ / / / / /
I B C D E Y
\ / / / / /
------------- X
The horizontal line of history A---Q is taken to be the first parent of each merge. The commits are:
I is the initial commit, in which foo exists with contents “asdf”, and a file quux exists with contents “quux”. Initial commits are compared to an empty tree, so I is !TREESAME.
In A, foo contains just “foo”.
B contains the same change as A. Its merge M is trivial and hence TREESAME to all parents.
C does not change foo, but its merge N changes it to “foobar”, so it is not TREESAME to any parent.
D sets foo to “baz”. Its merge O combines the strings from N and D to “foobarbaz”; i.e., it is not TREESAME to any parent.
E changes quux to “xyzzy”, and its merge P combines the strings to “quux xyzzy”. P is TREESAME to O, but not to E.
X is an independent root commit that added a new file side, and Y modified it. Y is TREESAME to X. Its merge Q added side to P, and Q is TREESAME to P, but not to Y.
rev-list walks backwards through history, including or excluding commits based on whether --full-history and/or parent rewriting (via --parents or --children) are used. The following settings are available.
Default mode
Commits are included if they are not TREESAME to any parent (though this can be changed, see --sparse below). If the commit was a merge, and it was TREESAME to one parent, follow only that parent. (Even if there are several TREESAME parents, follow only one of them.) Otherwise, follow all parents.
This results in:
.-A---N---O
/ / /
I---------D
Note how the rule to only follow the TREESAME parent, if one is available, removed B from consideration entirely. C was considered via N, but is TREESAME. Root commits are compared to an empty tree, so I is !TREESAME.
Parent/child relations are only visible with --parents, but that does not affect the commits selected in default mode, so we have shown the parent lines.
--full-history without parent rewriting
This mode differs from the default in one point: always follow all parents of a merge, even if it is TREESAME to one of them. Even if more than one side of the merge has commits that are included, this does not imply that the merge itself is! In the example, we get
I A B N D O P Q
M was excluded because it is TREESAME to both parents. E, C and B were all walked, but only B was !TREESAME, so the others do not appear.
Note that without parent rewriting, it is not really possible to talk about the parent/child relationships between the commits, so we show them disconnected.
--full-history with parent rewriting
Ordinary commits are only included if they are !TREESAME (though this can be changed, see --sparse below).
Compare to --full-history without rewriting above. Note that E was pruned away because it is TREESAME, but the parent list of P was rewritten to contain Es parent I. The same happened for C and N, and X, Y and Q.
In addition to the above settings, you can change whether TREESAME affects inclusion:
--dense
Commits that are walked are included if they are not TREESAME to any parent.
--sparse
All commits that are walked are included.
Note that without --full-history, this still simplifies merges: if one of the parents is TREESAME, we follow only that one, so the other sides of the merge are never walked.
--simplify-merges
First, build a history graph in the same way that --full-history with parent rewriting does (see above).
Then simplify each commit C to its replacement C Prime in the final history according to the following rules:
Set C Prime to C.
Replace each parent P of C Prime with its simplification P Prime. In the process, drop parents that are ancestors of other parents or that are root commits TREESAME to an empty tree, and remove duplicates, but take care to never drop all parents that we are TREESAME to.
If after this parent rewriting, C' is a root or merge commit (has zero or >1 parents), a boundary commit, or !TREESAME, it remains. Otherwise, it is replaced with its only parent.
The effect of this is best shown by way of comparing to --full-history with parent rewriting. The example turns into:
.-A---M---N---O
/ / /
I B D
\ / /
---------
Note the major differences in N, P, and Q over --full-history:
N's parent list had I removed, because it is an ancestor of the other parent M. Still, N remained because it is !TREESAME.
P's parent list similarly had I removed. P was then removed completely, because it had one parent and is TREESAME.
Q's parent list had Y simplified to X. X was then removed, because it was a TREESAME root. Q was then removed completely, because it had one parent and is TREESAME.
There is another simplification mode available:
--ancestry-path
Limit the displayed commits to those directly on the ancestry chain between the “from” and “to” commits in the given commit range. I.e. only display commits that are ancestor of the “to” commit and descendants of the “from” commit.
As an example use case, consider the following commit history:
D---E-------F
/ \ \
B---C---G---H---I---J
/ \
A-------K---------------L--M
A regular D..M computes the set of commits that are ancestors of M, but excludes the ones that are ancestors of D. This is useful to see what happened to the history leading to M since D, in the sense that “what does M have that did not exist in D”. The result in this example would be all the commits, except A and B (and D itself, of course).
When we want to find out what commits in M are contaminated with the bug introduced by D and need fixing, however, we might want to view only the subset of D..M that are actually descendants of D, i.e. excluding C and K. This is exactly what the --ancestry-path option does. Applied to the D..M range, it results in:
E-------F
\ \
G---H---I---J
\
L--M
Before discussing another option, --show-pulls, we need to create a new example history.
A common problem users face when looking at simplified history is that a commit they know changed a file somehow does not appear in the file’s simplified history. Let’s demonstrate a new example and show how options such as --full-history and --simplify-merges works in that case:
--show-pulls
In addition to the commits shown in the default history, show each merge commit that is not TREESAME to its first parent but is TREESAME to a later parent.
When a merge commit is included by --show-pulls, the merge is treated as if it "pulled" the change from another branch. When using --show-pulls on this example (and no other options) the resulting graph is:
I---X---R---N
Here, the merge commits R and N are included because they pulled the commits X and R into the base branch, respectively. These merges are the reason the commits A and B do not appear in the default history.
When --show-pulls is paired with --simplify-merges, the graph includes all of the necessary information:
The --simplify-by-decoration option allows you to view only the big picture of the topology of the history, by omitting commits that are not referenced by tags. Commits are marked as !TREESAME (in other words, kept after history simplification rules described above) if (1) they are referenced by tags, or (2) they change the contents of the paths given on the command line. All other commits are marked as TREESAME (subject to be simplified away).
Commit Ordering
By default, the commits are shown in reverse chronological order.
--date-order
Show no parents before all of its children are shown, but otherwise show commits in the commit timestamp order.
--author-date-order
Show no parents before all of its children are shown, but otherwise show commits in the author timestamp order.
--topo-order
Show no parents before all of its children are shown, and avoid showing commits on multiple lines of history intermixed.
For example, in a commit history like this:
---1----2----4----7
\ \
3----5----6----8---
where the numbers denote the order of commit timestamps, git rev-list and friends with --date-order show the commits in the timestamp order: 8 7 6 5 4 3 2 1.
With --topo-order, they would show 8 6 5 3 7 4 2 1 (or 8 7 4 2 6 5 3 1); some older commits are shown before newer ones in order to avoid showing the commits from two parallel development track mixed together.
--reverse
Output the commits chosen to be shown (see Commit Limiting section above) in reverse order. Cannot be combined with --walk-reflogs.
Object Traversal
These options are mostly targeted for packing of Git repositories.
--no-walk[=(sorted|unsorted)]
Only show the given commits, but do not traverse their ancestors. This has no effect if a range is specified. If the argument unsorted is given, the commits are shown in the order they were given on the command line. Otherwise (if sorted or no argument was given), the commits are shown in reverse chronological order by commit time. Cannot be combined with --graph.
--do-walk
Overrides a previous --no-walk.
Commit Formatting
--pretty[=<format>]
--format=<format>
Pretty-print the contents of the commit logs in a given format, where <format> can be one of oneline, short, medium, full, fuller, reference, email, raw, format:<string> and tformat:<string>. When <format> is none of the above, and has %placeholder in it, it acts as if --pretty=tformat:<format> were given.
See the "PRETTY FORMATS" section for some additional details for each format. When =<format> part is omitted, it defaults to medium.
Note: you can specify the default pretty format in the repository configuration (see git-config[1]).
--abbrev-commit
Instead of showing the full 40-byte hexadecimal commit object name, show only a partial prefix. Non default number of digits can be specified with "--abbrev=<n>" (which also modifies diff output, if it is displayed).
This should make "--pretty=oneline" a whole lot more readable for people using 80-column terminals.
--no-abbrev-commit
Show the full 40-byte hexadecimal commit object name. This negates --abbrev-commit and those options which imply it such as "--oneline". It also overrides the log.abbrevCommit variable.
--oneline
This is a shorthand for "--pretty=oneline --abbrev-commit" used together.
--encoding=<encoding>
The commit objects record the encoding used for the log message in their encoding header; this option can be used to tell the command to re-code the commit log message in the encoding preferred by the user. For non plumbing commands this defaults to UTF-8. Note that if an object claims to be encoded in X and we are outputting in X, we will output the object verbatim; this means that invalid sequences in the original commit may be copied to the output.
--expand-tabs=<n>
--expand-tabs
--no-expand-tabs
Perform a tab expansion (replace each tab with enough spaces to fill to the next display column that is multiple of <n>) in the log message before showing it in the output. --expand-tabs is a short-hand for --expand-tabs=8, and --no-expand-tabs is a short-hand for --expand-tabs=0, which disables tab expansion.
By default, tabs are expanded in pretty formats that indent the log message by 4 spaces (i.e. medium, which is the default, full, and fuller).
--notes[=<ref>]
Show the notes (see git-notes[1]) that annotate the commit, when showing the commit log message. This is the default for git log, git show and git whatchanged commands when there is no --pretty, --format, or --oneline option given on the command line.
By default, the notes shown are from the notes refs listed in the core.notesRef and notes.displayRef variables (or corresponding environment overrides). See git-config[1] for more details.
With an optional <ref> argument, use the ref to find the notes to display. The ref can specify the full refname when it begins with refs/notes/; when it begins with notes/, refs/ and otherwise refs/notes/ is prefixed to form a full name of the ref.
Multiple --notes options can be combined to control which notes are being displayed. Examples: "--notes=foo" will show only notes from "refs/notes/foo"; "--notes=foo --notes" will show both notes from "refs/notes/foo" and from the default notes ref(s).
--no-notes
Do not show notes. This negates the above --notes option, by resetting the list of notes refs from which notes are shown. Options are parsed in the order given on the command line, so e.g. "--notes --notes=foo --no-notes --notes=bar" will only show notes from "refs/notes/bar".
--show-notes[=<ref>]
--[no-]standard-notes
These options are deprecated. Use the above --notes/--no-notes options instead.
--show-signature
Check the validity of a signed commit object by passing the signature to gpg --verify and show the output.
--relative-date
Synonym for --date=relative.
--date=<format>
Only takes effect for dates shown in human-readable format, such as when using --pretty. log.date config variable sets a default value for the log command’s --date option. By default, dates are shown in the original time zone (either committer’s or author’s). If -local is appended to the format (e.g., iso-local), the user’s local time zone is used instead.
--date=relative shows dates relative to the current time, e.g. “2 hours ago”. The -local option has no effect for --date=relative.
--date=local is an alias for --date=default-local.
--date=iso
shows timestamps in a ISO 8601-like format. The differences to the strict ISO 8601 format are:
a space instead of the T date/time delimiter
a space between time and time zone
no colon between hours and minutes of the time zone
--date=iso-strict shows timestamps in strict ISO 8601 format.
--date=rfc shows timestamps in RFC 2822 format, often found in email messages.
--date=short shows only the date, but not the time, in YYYY-MM-DD format.
--date=raw shows the date as seconds since the epoch (1970-01-01 00:00:00 UTC), followed by a space, and then the timezone as an offset from UTC (a + or - with four digits; the first two are hours, and the second two are minutes). I.e., as if the timestamp were formatted with strftime("%s %z")). Note that the -local option does not affect the seconds-since-epoch value (which is always measured in UTC), but does switch the accompanying timezone value.
--date=human shows the timezone if the timezone does not match the current time-zone, and doesn’t print the whole date if that matches (ie skip printing year for dates that are "this year", but also skip the whole date itself if it’s in the last few days and we can just say what weekday it was). For older dates the hour and minute is also omitted.
--date=unix shows the date as a Unix epoch timestamp (seconds since 1970). As with --raw, this is always in UTC and therefore -local has no effect.
--date=format:... feeds the format ... to your system strftime, except for %z and %Z, which are handled internally. Use --date=format:%c to show the date in your system locale’s preferred format. See the strftime manual for a complete list of format placeholders. When using -local, the correct syntax is --date=format-local:....
--date=default
is the default format, and is similar to --date=rfc2822, with a few exceptions:
there is no comma after the day-of-week
the time zone is omitted when the local time zone is used
--parents
Print also the parents of the commit (in the form "commit parent…"). Also enables parent rewriting, see History Simplification above.
--children
Print also the children of the commit (in the form "commit child…"). Also enables parent rewriting, see History Simplification above.
--left-right
Mark which side of a symmetric difference a commit is reachable from. Commits from the left side are prefixed with < and those from the right with >. If combined with --boundary, those commits are prefixed with -.
For example, if you have this topology:
y---b---b branch B
/ \ /
/ .
/ / \
o---x---a---a branch A
you would get an output like this:
$ git rev-list --left-right --boundary --pretty=oneline A...B
>bbbbbbb... 3rd on b
>bbbbbbb... 2nd on b
<aaaaaaa... 3rd on a
<aaaaaaa... 2nd on a
-yyyyyyy... 1st on b
-xxxxxxx... 1st on a
--graph
Draw a text-based graphical representation of the commit history on the left hand side of the output. This may cause extra lines to be printed in between commits, in order for the graph history to be drawn properly. Cannot be combined with --no-walk.
This enables parent rewriting, see History Simplification above.
This implies the --topo-order option by default, but the --date-order option may also be specified.
--show-linear-break[=<barrier>]
When --graph is not used, all history branches are flattened which can make it hard to see that the two consecutive commits do not belong to a linear branch. This option puts a barrier in between them in that case. If <barrier> is specified, it is the string that will be shown instead of the default one.
Diff Formatting
Listed below are options that control the formatting of diff output. Some of them are specific to git-rev-list[1], however other diff options may be given. See git-diff-files[1] for more options.
-c
With this option, diff output for a merge commit shows the differences from each of the parents to the merge result simultaneously instead of showing pairwise diff between a parent and the result one at a time. Furthermore, it lists only files which were modified from all parents.
--cc
This flag implies the -c option and further compresses the patch output by omitting uninteresting hunks whose contents in the parents have only two variants and the merge result picks one of them without modification.
--combined-all-paths
This flag causes combined diffs (used for merge commits) to list the name of the file from all parents. It thus only has effect when -c or --cc are specified, and is likely only useful if filename changes are detected (i.e. when either rename or copy detection have been requested).
-m
This flag makes the merge commits show the full diff like regular commits; for each merge parent, a separate log entry and diff is generated. An exception is that only diff against the first parent is shown when --first-parent option is given; in that case, the output represents the changes the merge brought into the then-current branch.
-r
Show recursive diffs.
-t
Show the tree objects in the diff output. This implies -r.`
const rebaseFlagsStr = `--onto <newbase>
Starting point at which to create the new commits. If the --onto option is not specified, the starting point is <upstream>. May be any valid commit, and not just an existing branch name.
As a special case, you may use "A...B" as a shortcut for the merge base of A and B if there is exactly one merge base. You can leave out at most one of A and B, in which case it defaults to HEAD.
--keep-base
Set the starting point at which to create the new commits to the merge base of <upstream> <branch>. Running git rebase --keep-base <upstream> <branch> is equivalent to running git rebase --onto <upstream>… <upstream>.
This option is useful in the case where one is developing a feature on top of an upstream branch. While the feature is being worked on, the upstream branch may advance and it may not be the best idea to keep rebasing on top of the upstream but to keep the base commit as-is.
Although both this option and --fork-point find the merge base between <upstream> and <branch>, this option uses the merge base as the starting point on which new commits will be created, whereas --fork-point uses the merge base to determine the set of commits which will be rebased.
See also INCOMPATIBLE OPTIONS below.
<upstream>
Upstream branch to compare against. May be any valid commit, not just an existing branch name. Defaults to the configured upstream for the current branch.
<branch>
Working branch; defaults to HEAD.
--continue
Restart the rebasing process after having resolved a merge conflict.
--abort
Abort the rebase operation and reset HEAD to the original branch. If <branch> was provided when the rebase operation was started, then HEAD will be reset to <branch>. Otherwise HEAD will be reset to where it was when the rebase operation was started.
--quit
Abort the rebase operation but HEAD is not reset back to the original branch. The index and working tree are also left unchanged as a result. If a temporary stash entry was created using --autostash, it will be saved to the stash list.
--apply
Use applying strategies to rebase (calling git-am internally). This option may become a no-op in the future once the merge backend handles everything the apply one does.
See also INCOMPATIBLE OPTIONS below.
--empty={drop,keep,ask}
How to handle commits that are not empty to start and are not clean cherry-picks of any upstream commit, but which become empty after rebasing (because they contain a subset of already upstream changes). With drop (the default), commits that become empty are dropped. With keep, such commits are kept. With ask (implied by --interactive), the rebase will halt when an empty commit is applied allowing you to choose whether to drop it, edit files more, or just commit the empty changes. Other options, like --exec, will use the default of drop unless -i/--interactive is explicitly specified.
Note that commits which start empty are kept (unless --no-keep-empty is specified), and commits which are clean cherry-picks (as determined by git log --cherry-mark ...) are detected and dropped as a preliminary step (unless --reapply-cherry-picks is passed).
See also INCOMPATIBLE OPTIONS below.
--no-keep-empty
--keep-empty
Do not keep commits that start empty before the rebase (i.e. that do not change anything from its parent) in the result. The default is to keep commits which start empty, since creating such commits requires passing the --allow-empty override flag to git commit, signifying that a user is very intentionally creating such a commit and thus wants to keep it.
Usage of this flag will probably be rare, since you can get rid of commits that start empty by just firing up an interactive rebase and removing the lines corresponding to the commits you don’t want. This flag exists as a convenient shortcut, such as for cases where external tools generate many empty commits and you want them all removed.
For commits which do not start empty but become empty after rebasing, see the --empty flag.
See also INCOMPATIBLE OPTIONS below.
--reapply-cherry-picks
--no-reapply-cherry-picks
Reapply all clean cherry-picks of any upstream commit instead of preemptively dropping them. (If these commits then become empty after rebasing, because they contain a subset of already upstream changes, the behavior towards them is controlled by the --empty flag.)
By default (or if --no-reapply-cherry-picks is given), these commits will be automatically dropped. Because this necessitates reading all upstream commits, this can be expensive in repos with a large number of upstream commits that need to be read.
--reapply-cherry-picks allows rebase to forgo reading all upstream commits, potentially improving performance.
See also INCOMPATIBLE OPTIONS below.
--allow-empty-message
No-op. Rebasing commits with an empty message used to fail and this option would override that behavior, allowing commits with empty messages to be rebased. Now commits with an empty message do not cause rebasing to halt.
See also INCOMPATIBLE OPTIONS below.
--skip
Restart the rebasing process by skipping the current patch.
--edit-todo
Edit the todo list during an interactive rebase.
--show-current-patch
Show the current patch in an interactive rebase or when rebase is stopped because of conflicts. This is the equivalent of git show REBASE_HEAD.
-m
--merge
Use merging strategies to rebase. When the recursive (default) merge strategy is used, this allows rebase to be aware of renames on the upstream side. This is the default.
Note that a rebase merge works by replaying each commit from the working branch on top of the <upstream> branch. Because of this, when a merge conflict happens, the side reported as ours is the so-far rebased series, starting with <upstream>, and theirs is the working branch. In other words, the sides are swapped.
See also INCOMPATIBLE OPTIONS below.
-s <strategy>
--strategy=<strategy>
Use the given merge strategy. If there is no -s option git merge-recursive is used instead. This implies --merge.
Because git rebase replays each commit from the working branch on top of the <upstream> branch using the given strategy, using the ours strategy simply empties all patches from the <branch>, which makes little sense.
See also INCOMPATIBLE OPTIONS below.
-X <strategy-option>
--strategy-option=<strategy-option>
Pass the <strategy-option> through to the merge strategy. This implies --merge and, if no strategy has been specified, -s recursive. Note the reversal of ours and theirs as noted above for the -m option.
See also INCOMPATIBLE OPTIONS below.
--rerere-autoupdate
--no-rerere-autoupdate
Allow the rerere mechanism to update the index with the result of auto-conflict resolution if possible.
-S[<keyid>]
--gpg-sign[=<keyid>]
--no-gpg-sign
GPG-sign commits. The keyid argument is optional and defaults to the committer identity; if specified, it must be stuck to the option without a space. --no-gpg-sign is useful to countermand both commit.gpgSign configuration variable, and earlier --gpg-sign.
-q
--quiet
Be quiet. Implies --no-stat.
-v
--verbose
Be verbose. Implies --stat.
--stat
Show a diffstat of what changed upstream since the last rebase. The diffstat is also controlled by the configuration option rebase.stat.
-n
--no-stat
Do not show a diffstat as part of the rebase process.
--no-verify
This option bypasses the pre-rebase hook. See also githooks[5].
--verify
Allows the pre-rebase hook to run, which is the default. This option can be used to override --no-verify. See also githooks[5].
-C<n>
Ensure at least <n> lines of surrounding context match before and after each change. When fewer lines of surrounding context exist they all must match. By default no context is ever ignored. Implies --apply.
See also INCOMPATIBLE OPTIONS below.
--no-ff
--force-rebase
-f
Individually replay all rebased commits instead of fast-forwarding over the unchanged ones. This ensures that the entire history of the rebased branch is composed of new commits.
You may find this helpful after reverting a topic branch merge, as this option recreates the topic branch with fresh commits so it can be remerged successfully without needing to "revert the reversion" (see the revert-a-faulty-merge How-To for details).
--fork-point
--no-fork-point
Use reflog to find a better common ancestor between <upstream> and <branch> when calculating which commits have been introduced by <branch>.
When --fork-point is active, fork_point will be used instead of <upstream> to calculate the set of commits to rebase, where fork_point is the result of git merge-base --fork-point <upstream> <branch> command (see git-merge-base[1]). If fork_point ends up being empty, the <upstream> will be used as a fallback.
If <upstream> is given on the command line, then the default is --no-fork-point, otherwise the default is --fork-point.
If your branch was based on <upstream> but <upstream> was rewound and your branch contains commits which were dropped, this option can be used with --keep-base in order to drop those commits from your branch.
See also INCOMPATIBLE OPTIONS below.
--ignore-whitespace
--whitespace=<option>
These flags are passed to the git apply program (see git-apply[1]) that applies the patch. Implies --apply.
See also INCOMPATIBLE OPTIONS below.
--committer-date-is-author-date
--ignore-date
These flags are passed to git am to easily change the dates of the rebased commits (see git-am[1]).
See also INCOMPATIBLE OPTIONS below.
--signoff
Add a Signed-off-by: trailer to all the rebased commits. Note that if --interactive is given then only commits marked to be picked, edited or reworded will have the trailer added.
See also INCOMPATIBLE OPTIONS below.
-i
--interactive
Make a list of the commits which are about to be rebased. Let the user edit that list before rebasing. This mode can also be used to split commits (see SPLITTING COMMITS below).
The commit list format can be changed by setting the configuration option rebase.instructionFormat. A customized instruction format will automatically have the long commit hash prepended to the format.
See also INCOMPATIBLE OPTIONS below.
-r
--rebase-merges[=(rebase-cousins|no-rebase-cousins)]
By default, a rebase will simply drop merge commits from the todo list, and put the rebased commits into a single, linear branch. With --rebase-merges, the rebase will instead try to preserve the branching structure within the commits that are to be rebased, by recreating the merge commits. Any resolved merge conflicts or manual amendments in these merge commits will have to be resolved/re-applied manually.
By default, or when no-rebase-cousins was specified, commits which do not have <upstream> as direct ancestor will keep their original branch point, i.e. commits that would be excluded by git-log[1]'s --ancestry-path option will keep their original ancestry by default. If the rebase-cousins mode is turned on, such commits are instead rebased onto <upstream> (or <onto>, if specified).
The --rebase-merges mode is similar in spirit to the deprecated --preserve-merges but works with interactive rebases, where commits can be reordered, inserted and dropped at will.
It is currently only possible to recreate the merge commits using the recursive merge strategy; Different merge strategies can be used only via explicit exec git merge -s <strategy> [...] commands.
See also REBASING MERGES and INCOMPATIBLE OPTIONS below.
-p
--preserve-merges
[DEPRECATED: use --rebase-merges instead] Recreate merge commits instead of flattening the history by replaying commits a merge commit introduces. Merge conflict resolutions or manual amendments to merge commits are not preserved.
This uses the --interactive machinery internally, but combining it with the --interactive option explicitly is generally not a good idea unless you know what you are doing (see BUGS below).
See also INCOMPATIBLE OPTIONS below.
-x <cmd>
--exec <cmd>
Append "exec <cmd>" after each line creating a commit in the final history. <cmd> will be interpreted as one or more shell commands. Any command that fails will interrupt the rebase, with exit code 1.
You may execute several commands by either using one instance of --exec with several commands:
git rebase -i --exec "cmd1 && cmd2 && ..."
or by giving more than one --exec:
git rebase -i --exec "cmd1" --exec "cmd2" --exec ...
If --autosquash is used, "exec" lines will not be appended for the intermediate commits, and will only appear at the end of each squash/fixup series.
This uses the --interactive machinery internally, but it can be run without an explicit --interactive.
See also INCOMPATIBLE OPTIONS below.
--root
Rebase all commits reachable from <branch>, instead of limiting them with an <upstream>. This allows you to rebase the root commit(s) on a branch. When used with --onto, it will skip changes already contained in <newbase> (instead of <upstream>) whereas without --onto it will operate on every change. When used together with both --onto and --preserve-merges, all root commits will be rewritten to have <newbase> as parent instead.
See also INCOMPATIBLE OPTIONS below.
--autosquash
--no-autosquash
When the commit log message begins with "squash! …" (or "fixup! …"), and there is already a commit in the todo list that matches the same ..., automatically modify the todo list of rebase -i so that the commit marked for squashing comes right after the commit to be modified, and change the action of the moved commit from pick to squash (or fixup). A commit matches the ... if the commit subject matches, or if the ... refers to the commit’s hash. As a fall-back, partial matches of the commit subject work, too. The recommended way to create fixup/squash commits is by using the --fixup/--squash options of git-commit[1].
If the --autosquash option is enabled by default using the configuration variable rebase.autoSquash, this option can be used to override and disable this setting.
See also INCOMPATIBLE OPTIONS below.
--autostash
--no-autostash
Automatically create a temporary stash entry before the operation begins, and apply it after the operation ends. This means that you can run rebase on a dirty worktree. However, use with care: the final stash application after a successful rebase might result in non-trivial conflicts.
--reschedule-failed-exec
--no-reschedule-failed-exec
Automatically reschedule exec commands that failed. This only makes sense in interactive mode (or when an --exec option was provided).`
const resetFlagsStr = `--soft
Does not touch the index file or the working tree at all (but resets the head to <commit>, just like all modes do). This leaves all your changed files "Changes to be committed", as git status would put it.
--mixed
Resets the index but not the working tree (i.e., the changed files are preserved but not marked for commit) and reports what has not been updated. This is the default action.
If -N is specified, removed paths are marked as intent-to-add (see git-add[1]).
--hard
Resets the index and working tree. Any changes to tracked files in the working tree since <commit> are discarded.
--merge
Resets the index and updates the files in the working tree that are different between <commit> and HEAD, but keeps those which are different between the index and working tree (i.e. which have changes which have not been added). If a file that is different between <commit> and the index has unstaged changes, reset is aborted.
In other words, --merge does something like a git read-tree -u -m <commit>, but carries forward unmerged index entries.
--keep
Resets index entries and updates files in the working tree that are different between <commit> and HEAD. If a file that is different between <commit> and HEAD has local changes, reset is aborted.
--[no-]recurse-submodules
When the working tree is updated, using --recurse-submodules will also recursively reset the working tree of all active submodules according to the commit recorded in the superproject, also setting the submodules' HEAD to be detached at that commit.` | cmd/flags_man_pages.go | 0.683631 | 0.568655 | flags_man_pages.go | starcoder |
package data
import (
"fmt"
"time"
)
// Condition defines parameters to look for in a point or a schedule.
type Condition struct {
// general parameters
ID string
Description string
ConditionType string
MinTimeActive float64
Active bool
// used with point value rules
NodeID string
PointType string
PointID string
PointIndex int
PointValueType string
Operator string
PointValue float64
PointTextValue string
// used with shedule rules
StartTime string
EndTime string
Weekdays []time.Weekday
}
func (c Condition) String() string {
ret := fmt.Sprintf(" COND: %v, V:%v, A:%v\n", c.Description, c.PointValue, c.Active)
return ret
}
// Action defines actions that can be taken if a rule is active.
// Template can optionally be used to customize the message that is sent and
// uses Io Type or IDs to fill in the values. Example might be:
// JamMonitoring: Alert: {{ description }} is in ALARM state with tank level of {{ tankLevel }}.
type Action struct {
ID string
Description string
Action string
NodeID string
PointType string
PointValueType string
PointValue float64
PointTextValue string
PointChannel int
PointDevice string
PointFilePath string
}
func (a Action) String() string {
ret := fmt.Sprintf(" ACTION: %v, %v\n", a.Description, a.PointValue)
return ret
}
// RuleConfig contains parts of the rule that a users changes
type RuleConfig struct {
}
// RuleState contains parts of a rule that the system changes
type RuleState struct {
Active bool `json:"active"`
LastAction time.Time `json:"lastAction"`
}
// Rule defines a conditions and actions that are run if condition is true. Global indicates
// the rule applies to all Devices. The rule config and state is separated so we can make updates
// to the Rule without config affecting state, and state affecting config as these are typically
// done by two different entities.
type Rule struct {
ID string
Description string
Active bool
Conditions []Condition
Actions []Action
ActionsInactive []Action
}
func (r Rule) String() string {
ret := fmt.Sprintf("Rule: %v\n", r.Description)
ret += fmt.Sprintf(" active: %v\n", r.Active)
for _, c := range r.Conditions {
ret += fmt.Sprintf("%v", c)
}
for _, a := range r.Actions {
ret += fmt.Sprintf("%v", a)
}
return ret
}
// NodeToRule converts nodes that make up a rule to a node
func NodeToRule(ruleNode NodeEdge, conditionNodes, actionNodes, actionInactiveNodes []NodeEdge) (*Rule, error) {
ret := &Rule{}
ret.ID = ruleNode.ID
for _, p := range ruleNode.Points {
switch p.Type {
case PointTypeDescription:
ret.Description = p.Text
case PointTypeActive:
ret.Active = FloatToBool(p.Value)
}
}
for _, cond := range conditionNodes {
var newCond Condition
newCond.ID = cond.ID
newCond.PointIndex = -1
for _, p := range cond.Points {
switch p.Type {
case PointTypeDescription:
newCond.Description = p.Text
case PointTypeConditionType:
newCond.ConditionType = p.Text
case PointTypeID:
newCond.NodeID = p.Text
case PointTypePointType:
newCond.PointType = p.Text
case PointTypePointID:
newCond.PointID = p.Text
case PointTypePointIndex:
newCond.PointIndex = int(p.Value)
case PointTypeValueType:
newCond.PointValueType = p.Text
case PointTypeOperator:
newCond.Operator = p.Text
case PointTypeValue:
newCond.PointValue = p.Value
case PointTypeMinActive:
newCond.MinTimeActive = p.Value
case PointTypeActive:
newCond.Active = FloatToBool(p.Value)
case PointTypeStart:
newCond.StartTime = p.Text
case PointTypeEnd:
newCond.EndTime = p.Text
case PointTypeWeekday:
if p.Value > 0 {
newCond.Weekdays = append(newCond.Weekdays, time.Weekday(p.Index))
}
}
}
ret.Conditions = append(ret.Conditions, newCond)
}
nodeToAction := func(n NodeEdge) Action {
var newAct Action
newAct.ID = n.ID
for _, p := range n.Points {
switch p.Type {
case PointTypeDescription:
newAct.Description = p.Text
case PointTypeActionType:
newAct.Action = p.Text
case PointTypeID:
newAct.NodeID = p.Text
case PointTypePointType:
newAct.PointType = p.Text
case PointTypeValueType:
newAct.PointValueType = p.Text
case PointTypeValue:
newAct.PointValue = p.Value
newAct.PointTextValue = p.Text
case PointTypeChannel:
newAct.PointChannel = int(p.Value)
case PointTypeDevice:
newAct.PointDevice = p.Text
case PointTypeFilePath:
newAct.PointFilePath = p.Text
}
}
return newAct
}
for _, act := range actionNodes {
ret.Actions = append(ret.Actions, nodeToAction(act))
}
for _, act := range actionInactiveNodes {
ret.ActionsInactive = append(ret.ActionsInactive, nodeToAction(act))
}
return ret, nil
} | data/rule.go | 0.726814 | 0.475484 | rule.go | starcoder |
package roman_kachanovsky
type Node struct{
left *Node
right *Node
value int
}
type Tree struct{
root *Node
size int
}
func (tree *Tree) Size() int {
return tree.size
}
func (tree *Tree) Root() *Node {
return tree.root
}
// Constructor
func NewTree() *Tree {
tree := new(Tree)
tree.size = 0
return tree
}
func (root *Node) insert(node *Node) {
if node.value > root.value {
if root.right == nil {
root.right = node
} else {
root.right.insert(node)
}
} else if node.value < root.value {
if root.left == nil {
root.left = node
} else {
root.left.insert(node)
}
}
}
func (tree *Tree) Insert(value int) {
if tree.root == nil {
tree.root = &Node{nil, nil, value}
} else {
tree.root.insert(&Node{nil, nil, value})
}
tree.size++
}
func search(root *Node, value int) (bool, *Node) {
if root != nil {
if root.value == value {
return true, root
} else if root.value < value {
return search(root.right, value)
} else {
return search(root.left, value)
}
}
return false, nil
}
func (tree *Tree) Exists(value int) (bool, *Node) {
return search(tree.root, value)
}
// Find the left most node
func minValue(node *Node) int {
if node.left == nil {
return node.value
}
return minValue(node.right)
}
func changeLinks(parent *Node, node *Node) {
if parent.left == node {
if node.left != nil {
parent.left = node.left
} else {
parent.left = node.right
}
} else if parent.right == node {
if node.left != nil {
parent.right = node.left
} else {
parent.right = node.right
}
}
}
func del(node *Node, parent *Node, value int) bool {
switch {
case node.value == value:
if node.left != nil && node.right != nil {
node.value = minValue(node.right)
return del(node.right, node, node.value)
}
changeLinks(parent, node)
return true
case node.value > value:
if node.left == nil {
return false
}
return del(node.left, node, node.value)
case node.value < value:
if node.right == nil {
return false
}
return del(node.right, node, node.value)
}
return false
}
func (tree *Tree) Delete(value int) bool {
res, _ := tree.Exists(value)
if !res || tree.root == nil {
return false
}
if tree.root.value == value {
tmpRoot := &Node{nil, nil, 0}
tmpRoot.left = tree.root
res := del(tree.root, tmpRoot, value)
tree.root = tmpRoot.left
if res {
tree.size--
}
return res
}
if del(tree.root.left, tree.root, value) || del(tree.root.right, tree.root, value) {
tree.size--
return true
}
return false
} | Binary_Search_Tree/Go/roman_kachanovsky/binary_search_tree.go | 0.742982 | 0.490724 | binary_search_tree.go | starcoder |
package matrix
import (
"github.com/pingcap/tidb-dashboard/pkg/keyvisual/decorator"
)
// Axis stores consecutive buckets. Each bucket has StartKey, EndKey, and some statistics. The EndKey of each bucket is
// the StartKey of its next bucket. The actual data structure is stored in columns. Therefore satisfies:
// len(Keys) == len(ValuesList[i]) + 1. In particular, ValuesList[0] is the base column.
type Axis struct {
Keys []string
ValuesList [][]uint64
}
// CreateAxis checks the given parameters and uses them to build the Axis.
func CreateAxis(keys []string, valuesList [][]uint64) Axis {
keysLen := len(keys)
if keysLen <= 1 {
panic("Keys length must be greater than 1")
}
if len(valuesList) == 0 {
panic("ValuesList length must be greater than 0")
}
for _, values := range valuesList {
if keysLen != len(values)+1 {
panic("Keys length must be equal to Values length + 1")
}
}
return Axis{
Keys: keys,
ValuesList: valuesList,
}
}
// CreateEmptyAxis constructs a minimal empty Axis with the given parameters.
func CreateEmptyAxis(startKey, endKey string, valuesListLen int) Axis {
keys := []string{startKey, endKey}
values := []uint64{0}
valuesList := make([][]uint64, valuesListLen)
for i := range valuesList {
valuesList[i] = values
}
return CreateAxis(keys, valuesList)
}
// Shrink reduces all statistical values.
func (axis *Axis) Shrink(ratio uint64) {
for _, values := range axis.ValuesList {
for i := range values {
values[i] /= ratio
}
}
}
// Range returns a sub Axis with specified range.
func (axis *Axis) Range(startKey string, endKey string) Axis {
start, end, ok := KeysRange(axis.Keys, startKey, endKey)
if !ok {
return CreateEmptyAxis(startKey, endKey, len(axis.ValuesList))
}
keys := axis.Keys[start:end]
valuesList := make([][]uint64, len(axis.ValuesList))
for i := range valuesList {
valuesList[i] = axis.ValuesList[i][start : end-1]
}
return CreateAxis(keys, valuesList)
}
// Focus uses the base column as the chunk for the Focus operation to obtain the partitioning scheme, and uses this to
// reduce other columns.
func (axis *Axis) Focus(labeler decorator.Labeler, threshold uint64, ratio int, target int) Axis {
if target >= len(axis.Keys)-1 {
return *axis
}
baseChunk := createChunk(axis.Keys, axis.ValuesList[0])
newChunk := baseChunk.Focus(labeler, threshold, ratio, target, MergeColdLogicalRange)
valuesListLen := len(axis.ValuesList)
newValuesList := make([][]uint64, valuesListLen)
newValuesList[0] = newChunk.Values
for i := 1; i < valuesListLen; i++ {
baseChunk.SetValues(axis.ValuesList[i])
newValuesList[i] = baseChunk.Reduce(newChunk.Keys).Values
}
return CreateAxis(newChunk.Keys, newValuesList)
}
// Divide uses the base column as the chunk for the Divide operation to obtain the partitioning scheme, and uses this to
// reduce other columns.
func (axis *Axis) Divide(labeler decorator.Labeler, target int) Axis {
if target >= len(axis.Keys)-1 {
return *axis
}
baseChunk := createChunk(axis.Keys, axis.ValuesList[0])
newChunk := baseChunk.Divide(labeler, target, MergeColdLogicalRange)
valuesListLen := len(axis.ValuesList)
newValuesList := make([][]uint64, valuesListLen)
newValuesList[0] = newChunk.Values
for i := 1; i < valuesListLen; i++ {
baseChunk.SetValues(axis.ValuesList[i])
newValuesList[i] = baseChunk.Reduce(newChunk.Keys).Values
}
return CreateAxis(newChunk.Keys, newValuesList)
}
type FocusMode int
const (
NotMergeLogicalRange FocusMode = iota
MergeColdLogicalRange
)
type chunk struct {
// Keys and ValuesList[i] from Axis
Keys []string
Values []uint64
}
func createChunk(keys []string, values []uint64) chunk {
keysLen := len(keys)
if keysLen <= 1 {
panic("Keys length must be greater than 1")
}
if keysLen != len(values)+1 {
panic("Keys length must be equal to Values length + 1")
}
return chunk{
Keys: keys,
Values: values,
}
}
func createZeroChunk(keys []string) chunk {
keysLen := len(keys)
if keysLen <= 1 {
panic("Keys length must be greater than 1")
}
return createChunk(keys, make([]uint64, keysLen-1))
}
func (c *chunk) SetValues(values []uint64) {
if len(values)+1 != len(c.Keys) {
panic("Keys length must be equal to Values length + 1")
}
c.Values = values
}
func (c *chunk) SetZeroValues() {
newValues := make([]uint64, len(c.Values))
c.SetValues(newValues)
}
// Set all values to 0.
func (c *chunk) Clear() {
MemsetUint64(c.Values, 0)
}
// Calculation
// Reduce generates new chunks based on the more sparse newKeys.
func (c *chunk) Reduce(newKeys []string) chunk {
keys := c.Keys
CheckReduceOf(keys, newKeys)
newValues := make([]uint64, len(newKeys)-1)
if len(keys) == len(newKeys) {
copy(newValues, c.Values)
return createChunk(newKeys, newValues)
}
endKeys := newKeys[1:]
j := 0
for i, value := range c.Values {
if i > 0 && equal(keys[i], endKeys[j]) {
j++
}
newValues[j] += value
}
return createChunk(newKeys, newValues)
}
// GetFocusRows estimates the number of rows generated by executing a Focus with a specified threshold.
func (c *chunk) GetFocusRows(threshold uint64) (count int) {
start := 0
var bucketSum uint64
generateBucket := func(end int) {
if end > start {
count++
start = end
bucketSum = 0
}
}
for i, value := range c.Values {
if value >= threshold || bucketSum >= threshold {
generateBucket(i)
}
bucketSum += value
}
generateBucket(len(c.Values))
return
}
// Given a `threshold`, merge the rows with less traffic,
// and merge the most `ratio` rows at a time.
// `target` is the estimated final number of rows.
func (c *chunk) Focus(labeler decorator.Labeler, threshold uint64, ratio int, target int, mode FocusMode) chunk {
newKeys := make([]string, 0, target)
newValues := make([]uint64, 0, target)
newKeys = append(newKeys, c.Keys[0])
start := 0
var bucketSum uint64
generateBucket := func(end int) {
if end > start {
newKeys = append(newKeys, c.Keys[end])
newValues = append(newValues, bucketSum)
start = end
bucketSum = 0
}
}
for i, value := range c.Values {
if value >= threshold ||
bucketSum >= threshold ||
i-start >= ratio ||
labeler.CrossBorder(c.Keys[start], c.Keys[i]) {
generateBucket(i)
}
bucketSum += value
}
generateBucket(len(c.Values))
newChunk := createChunk(newKeys, newValues)
if mode == MergeColdLogicalRange && len(newValues) >= target {
newChunk = newChunk.MergeColdLogicalRange(labeler, threshold, target)
}
return newChunk
}
func (c *chunk) MergeColdLogicalRange(labeler decorator.Labeler, threshold uint64, target int) chunk {
threshold /= 4 // TODO: This var can be adjusted
newKeys := make([]string, 0, target)
newValues := make([]uint64, 0, target)
newKeys = append(newKeys, c.Keys[0])
coldStart := 0
coldEnd := 0
var coldRangeSum uint64
mergeColdRange := func() {
if coldEnd <= coldStart {
return
}
newKeys = append(newKeys, c.Keys[coldEnd])
newValues = append(newValues, coldRangeSum)
coldStart = coldEnd
coldRangeSum = 0
}
generateRange := func(end int) {
if end <= coldEnd {
return
}
var rangeSum uint64
for i := coldEnd; i < end; i++ {
rangeSum += c.Values[i]
}
if coldRangeSum > threshold || rangeSum > threshold {
mergeColdRange()
}
if rangeSum > threshold {
newKeys = append(newKeys, c.Keys[coldEnd+1:end+1]...)
newValues = append(newValues, c.Values[coldEnd:end]...)
coldStart = end
} else {
coldRangeSum += rangeSum
}
coldEnd = end
}
for i := range c.Values {
if labeler.CrossBorder(c.Keys[i], c.Keys[i+1]) {
generateRange(i + 1)
}
}
generateRange(len(c.Values))
mergeColdRange()
return createChunk(newKeys, newValues)
}
// Divide uses binary search to find a suitable threshold, which can reduce the number of buckets of Axis to near the target.
func (c *chunk) Divide(labeler decorator.Labeler, target int, mode FocusMode) chunk {
if target >= len(c.Values) {
return *c
}
// get upperThreshold
var upperThreshold uint64 = 1
for _, value := range c.Values {
upperThreshold += value
}
// search threshold
var lowerThreshold uint64 = 1
targetFocusRows := target * 2 / 3 // TODO: This var can be adjusted
for lowerThreshold < upperThreshold {
mid := (lowerThreshold + upperThreshold) >> 1
if c.GetFocusRows(mid) > targetFocusRows {
lowerThreshold = mid + 1
} else {
upperThreshold = mid
}
}
threshold := lowerThreshold
focusRows := c.GetFocusRows(threshold)
ratio := len(c.Values)/(target-focusRows) + 1
return c.Focus(labeler, threshold, ratio, target, mode)
} | pkg/keyvisual/matrix/axis.go | 0.786787 | 0.49231 | axis.go | starcoder |
package relations
import (
"bytes"
"io"
"sort"
"github.com/oleiade/lane"
"github.com/s2gatev/hcache"
)
// pair is used in the construction of the subsequential transducer.
// It contains a transducer state and the symbols that remain to be
// added to the output.
type pair struct {
state *tState
remaining string
}
func (p *pair) equal(o *pair) bool {
return p.state.index == o.state.index && p.remaining == o.remaining
}
// pairs is a slice compatible with the hcache structure.
// The underlying values are of type *pair.
type pairs []hcache.Key
func (ps pairs) Len() int {
return len(ps)
}
func (ps pairs) Less(i, j int) bool {
p1 := ps[i].(*pair)
p2 := ps[j].(*pair)
if p1.state.index == p2.state.index {
return p1.remaining < p2.remaining
}
return p1.state.index < p2.state.index
}
func (ps pairs) Swap(i, j int) {
ps[i], ps[j] = ps[j], ps[i]
}
// sState is a state in a subsequential transducer.
type sState struct {
remainingPairs pairs
next map[rune]*sState
out map[rune]string
final bool
finalOut []string
isVisited bool
}
func newSState() *sState {
return &sState{next: make(map[rune]*sState), out: make(map[rune]string)}
}
// getFinalOut gets final outputs if pair has final state.
func (ss *sState) getFinalOut() []string {
var finalRemaining []string
for _, p := range ss.remainingPairs {
p := p.(*pair)
if p.state.final {
finalRemaining = append(finalRemaining, p.remaining)
}
}
return finalRemaining
}
// lcp calculates the longest common prefix of the input strings.
func lcp(strs [][]rune) string {
if len(strs) == 0 {
return ""
}
for i, c := range strs[0] {
for _, s := range strs[1:] {
if i == len(s) || s[i] != c {
return string(strs[0][:i])
}
}
}
return string(strs[0])
}
// RegularRelation is a struct containing the initial state of the
// subsequential transducer that recognizes the input regular relation.
type RegularRelation struct {
start *sState
}
// Transduce feeds the input string into the RegularRelation transducer
// and returns all possible results from the output transducer tape.
func (s *RegularRelation) Transduce(input string) ([]string, bool) {
node := s.start
var output string
for _, symbol := range input {
nextnode, ok := node.next[symbol]
if !ok {
return nil, false
}
output += node.out[symbol]
node = nextnode
}
if !node.final {
return nil, false
}
var result []string
for _, o := range node.finalOut {
result = append(result, output+o)
}
return result, true
}
// Build builds a RegularRelation subsequential transducer from the
// input regular relation expression.
// NOTE: All operations must be explicitly written in the regular expression.
func Build(source io.Reader) (*RegularRelation, error) {
tr, err := newTransducer(source)
if err != nil {
return nil, err
}
stateQueue := lane.NewQueue()
sc := hcache.New()
initPair := &pair{state: tr.root, remaining: ""}
start := sc.GetOrInsert(newSState(), initPair).(*sState)
start.remainingPairs = append(start.remainingPairs, initPair)
stateQueue.Enqueue(start)
for stateQueue.Size() != 0 {
state := stateQueue.Dequeue().(*sState)
// Check if state should be final and add outputs to final output.
if final := state.getFinalOut(); len(final) != 0 {
state.final = true
state.finalOut = append(state.finalOut, final...)
}
// Get groups of pairs that have states with same input symbol.
withInput := make(map[rune]pairs)
for _, p := range state.remainingPairs {
p := p.(*pair)
for in := range p.state.next {
withInput[in] = append(withInput[in], p)
}
}
for in, ps := range withInput {
// Get all remaining+out strings from states with given input
// and map them to corresponding next state.
var outputs [][]rune
nextStates := make(map[int]*tState)
for _, p := range ps {
p := p.(*pair)
remaining := bytes.Runes([]byte(p.remaining))
for _, o := range p.state.next[in] {
out := append(remaining, bytes.Runes([]byte(o.out))...)
outputs = append(outputs, out)
nextStates[len(outputs)-1] = o.state
}
}
// Calculate longest common prefix.
state.out[in] = lcp(outputs)
// Create new pairs by removing the longest common prefix from
// the outputs.
var newPairs pairs
for i, out := range outputs {
newPairs = append(newPairs, &pair{
state: nextStates[i],
remaining: string(out[len(state.out[in]):]),
})
}
sort.Sort(newPairs)
// Check if state with such state pairs exists...
nextState := sc.GetOrInsert(newSState(), newPairs...).(*sState)
// ...and populate the state with the new pairs if necessary.
if !nextState.isVisited {
nextState.isVisited = true
nextState.remainingPairs = newPairs
stateQueue.Enqueue(nextState)
}
state.next[in] = nextState
}
}
return &RegularRelation{start: start}, nil
} | regular_relation.go | 0.733833 | 0.426859 | regular_relation.go | starcoder |
package pinata
import (
"bytes"
"fmt"
"strings"
)
// Stick offers methods of hitting the Pinata and extracting its goodness.
type Stick interface {
// Error returns the first error encountered or nil if all operations so far
// were successful.
Error() error
// ClearError clears the error and returns it. If there is no error the
// method has no effect and returns nil, otherwise it returns the error that
// was cleared.
ClearError() error
// PathString gets the string value at the given path within the Pinata. The
// last element in the path must be a string, the rest must be a
// map[string]interface{}. The input Pinata must hold a
// map[string]interface{} as well.
PathString(Pinata, ...string) string
// String returns the Pinata as a string if it is one.
String(Pinata) string
// IndexString gets the string value at the given index within the Pinata.
// The input Pinata must hold a []interface{}.
IndexString(Pinata, int) string
// PathFloat64 gets the float64 value at the given path within the Pinata.
// The last element in the path must be a float64, the rest must be a
// map[string]interface{}. The input Pinata must hold a
// map[string]interface{} as well.
PathFloat64(Pinata, ...string) float64
// Float64 returns the Pinata as a float64 if it is one.
Float64(Pinata) float64
// IndexFloat64 gets the string float64 at the given index within the Pinata.
// The input Pinata must hold a []interface{}.
IndexFloat64(Pinata, int) float64
// PathBool gets the bool value at the given path within the Pinata.
// The last element in the path must be a bool, the rest must be a
// map[string]interface{}. The input Pinata must hold a
// map[string]interface{} as well.
PathBool(Pinata, ...string) bool
// Bool returns the Pinata as a bool if it is one.
Bool(Pinata) bool
// IndexBool gets the string bool at the given index within the Pinata.
// The input Pinata must hold a []interface{}.
IndexBool(Pinata, int) bool
// PathNil asserts nil value at the given path within the Pinata. The last
// element in the path must be a nil, the rest must be a
// map[string]interface{}. The input Pinata must hold a
// map[string]interface{} as well.
PathNil(Pinata, ...string)
// Nil asserts the Pinata holds a nil value.
Nil(Pinata)
// IndexNil asserts a nil value at the given index within the Pinata. The
// input Pinata must hold a []interface{}.
IndexNil(Pinata, int)
// Path gets the Pinata value at the given path within the Pinata. All
// elements in the path must be of type map[string]interface{}. The input
// Pinata must hold a map[string]interface{} as well.
Path(Pinata, ...string) Pinata
// Index gets the Pinata value at the given index within the Pinata.
// The input Pinata must hold a []interface{}.
Index(Pinata, int) Pinata
}
type stick struct {
err error
}
func (s *stick) ClearError() error {
err := s.err
s.err = nil
return err
}
func (s *stick) Error() error {
return s.err
}
// this method assumes s.err != nil
func (s *stick) unsupported(errCtx *ErrorContext, methodName string, input func() []interface{}, advice string) {
s.err = &Error{
context: &ErrorContext{
methodName: methodName,
methodArgs: input,
next: errCtx,
},
reason: ErrorReasonIncompatibleType,
advice: advice,
}
}
// this method assumes s.err != nil
func (s *stick) indexUnsupported(errCtx *ErrorContext, methodName string, index int) {
s.err = &Error{
context: &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return []interface{}{index} },
next: errCtx,
},
reason: ErrorReasonIncompatibleType,
advice: "call this method on a slice pinata",
}
}
// this method assumes s.err != nil
func (s *stick) pathUnsupported(errCtx *ErrorContext, methodName string, path []string) {
s.err = &Error{
context: &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return toInterfaceSlice(path) },
next: errCtx,
},
reason: ErrorReasonIncompatibleType,
advice: "call this method on a map pinata",
}
}
// this method assumes s.err != nil
func (s *stick) internalString(p Pinata, methodName string, input func() []interface{}) string {
if _, ok := p.Map(); ok {
s.unsupported(p.context, methodName, input, "this is a map")
return ""
}
if _, ok := p.Slice(); ok {
s.unsupported(p.context, methodName, input, "this is a slice")
return ""
}
if v, ok := p.Value().(string); ok {
return v
}
s.unsupported(p.context, methodName, input, "this is not a string")
return ""
}
// this method assumes s.err != nil
func (s *stick) internalFloat64(p Pinata, methodName string, input func() []interface{}) float64 {
if _, ok := p.Map(); ok {
s.unsupported(p.context, methodName, input, "this is a map")
return 0
}
if _, ok := p.Slice(); ok {
s.unsupported(p.context, methodName, input, "this is a slice")
return 0
}
if v, ok := p.Value().(float64); ok {
return v
}
s.unsupported(p.context, methodName, input, "this is not a float64")
return 0
}
// this method assumes s.err != nil
func (s *stick) internalBool(p Pinata, methodName string, input func() []interface{}) bool {
if _, ok := p.Map(); ok {
s.unsupported(p.context, methodName, input, "this is a map")
return false
}
if _, ok := p.Slice(); ok {
s.unsupported(p.context, methodName, input, "this is a slice")
return false
}
if v, ok := p.Value().(bool); ok {
return v
}
s.unsupported(p.context, methodName, input, "this is not a bool")
return false
}
// this method assumes s.err != nil
func (s *stick) internalNil(p Pinata, methodName string, input func() []interface{}) {
if p.Value() == nil {
return
}
if _, ok := p.Map(); ok {
s.unsupported(p.context, methodName, input, "this is a map")
}
if _, ok := p.Slice(); ok {
s.unsupported(p.context, methodName, input, "this is a slice")
}
s.unsupported(p.context, methodName, input, "this is not nil")
}
func (s *stick) String(p Pinata) string {
if s.err != nil {
return ""
}
return s.internalString(p, "String", func() []interface{} { return nil })
}
func (s *stick) Bool(p Pinata) bool {
if s.err != nil {
return false
}
return s.internalBool(p, "Bool", func() []interface{} { return nil })
}
func (s *stick) Float64(p Pinata) float64 {
if s.err != nil {
return 0
}
return s.internalFloat64(p, "Float64", func() []interface{} { return nil })
}
func (s *stick) Nil(p Pinata) {
if s.err != nil {
return
}
s.internalNil(p, "Nil", func() []interface{} { return nil })
}
// this method assumes s.err != nil
func (s *stick) internalIndex(p Pinata, methodName string, index int) Pinata {
if slice, ok := p.Slice(); ok {
if index < 0 || index >= len(slice) {
s.err = &Error{
context: &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return []interface{}{index} },
next: p.context,
},
reason: ErrorReasonInvalidInput,
advice: fmt.Sprintf("specify an index from 0 to %d", len(slice)-1),
}
return Pinata{}
}
return newPinataWithContext(slice[index], &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return []interface{}{index} },
next: p.context,
})
}
s.indexUnsupported(p.context, methodName, index)
return Pinata{}
}
func (s *stick) Index(p Pinata, index int) Pinata {
if s.err != nil {
return Pinata{}
}
return s.internalIndex(p, "Index", index)
}
func (s *stick) IndexString(p Pinata, index int) string {
if s.err != nil {
return ""
}
const methodName = "IndexString"
pinata := s.internalIndex(p, methodName, index)
if s.err != nil {
return ""
}
pinata.context = p.context
return s.internalString(pinata, methodName, func() []interface{} { return []interface{}{index} })
}
func (s *stick) IndexFloat64(p Pinata, index int) float64 {
if s.err != nil {
return 0
}
const methodName = "IndexFloat64"
pinata := s.internalIndex(p, methodName, index)
if s.err != nil {
return 0
}
pinata.context = p.context
return s.internalFloat64(pinata, methodName, func() []interface{} { return []interface{}{index} })
}
func (s *stick) IndexBool(p Pinata, index int) bool {
if s.err != nil {
return false
}
const methodName = "IndexBool"
pinata := s.internalIndex(p, methodName, index)
if s.err != nil {
return false
}
pinata.context = p.context
return s.internalBool(pinata, methodName, func() []interface{} { return []interface{}{index} })
}
func (s *stick) IndexNil(p Pinata, index int) {
if s.err != nil {
return
}
const methodName = "IndexNil"
pinata := s.internalIndex(p, methodName, index)
if s.err != nil {
return
}
pinata.context = p.context
s.internalNil(pinata, methodName, func() []interface{} { return []interface{}{index} })
}
// this method assumes s.err != nil
func (s *stick) internalPath(p Pinata, methodName string, path ...string) Pinata {
contents, ok := p.Map()
if !ok {
s.pathUnsupported(p.context, methodName, path)
return Pinata{}
}
if len(path) == 0 {
s.err = &Error{
context: &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return toInterfaceSlice(path) },
next: p.context,
},
reason: ErrorReasonInvalidInput,
advice: "specify a path",
}
return Pinata{}
}
for i := 0; i < len(path)-1; i++ {
current := path[i]
if v, ok := contents[current]; ok {
if v, ok := v.(map[string]interface{}); ok {
contents = v
} else {
s.err = &Error{
context: &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return toInterfaceSlice(path) },
next: p.context,
},
reason: ErrorReasonIncompatibleType,
advice: fmt.Sprintf(`"%s" does not hold a pinata`, strings.Join(path[:i+1], `", "`)),
}
return Pinata{}
}
} else {
s.err = &Error{
context: &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return toInterfaceSlice(path) },
next: p.context,
},
reason: ErrorReasonNotFound,
advice: fmt.Sprintf(`"%s" does not exist`, strings.Join(path[:i+1], `", "`)),
}
return Pinata{}
}
}
if v, ok := contents[path[len(path)-1]]; ok {
return newPinataWithContext(v, &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return toInterfaceSlice(path) },
next: p.context,
})
}
s.err = &Error{
context: &ErrorContext{
methodName: methodName,
methodArgs: func() []interface{} { return toInterfaceSlice(path) },
next: p.context,
},
reason: ErrorReasonNotFound,
advice: fmt.Sprintf(`"%s" does not exist`, strings.Join(path, `", "`)),
}
return Pinata{}
}
func (s *stick) Path(p Pinata, path ...string) Pinata {
if s.err != nil {
return Pinata{}
}
return s.internalPath(p, "Path", path...)
}
func (s *stick) PathString(p Pinata, path ...string) string {
if s.err != nil {
return ""
}
const methodName = "PathString"
pinata := s.internalPath(p, methodName, path...)
if s.err != nil {
return ""
}
pinata.context = p.context
return s.internalString(pinata, methodName, func() []interface{} { return toInterfaceSlice(path) })
}
func (s *stick) PathFloat64(p Pinata, path ...string) float64 {
if s.err != nil {
return 0
}
const methodName = "PathFloat64"
pinata := s.internalPath(p, methodName, path...)
if s.err != nil {
return 0
}
pinata.context = p.context
return s.internalFloat64(pinata, methodName, func() []interface{} { return toInterfaceSlice(path) })
}
func (s *stick) PathBool(p Pinata, path ...string) bool {
if s.err != nil {
return false
}
const methodName = "PathBool"
pinata := s.internalPath(p, methodName, path...)
if s.err != nil {
return false
}
pinata.context = p.context
return s.internalBool(pinata, methodName, func() []interface{} { return toInterfaceSlice(path) })
}
func (s *stick) PathNil(p Pinata, path ...string) {
if s.err != nil {
return
}
const methodName = "PathNil"
pinata := s.internalPath(p, methodName, path...)
if s.err != nil {
return
}
pinata.context = p.context
s.internalNil(pinata, methodName, func() []interface{} { return toInterfaceSlice(path) })
}
// Pinata holds the data.
type Pinata struct {
context *ErrorContext
value interface{}
mapFunc func() (map[string]interface{}, bool)
sliceFunc func() ([]interface{}, bool)
}
// Value returns the raw Pinata value.
func (p Pinata) Value() interface{} {
return p.value
}
// Map returns the Pinata value as a map if it is one (the bool indicates
// success).
func (p Pinata) Map() (map[string]interface{}, bool) {
if p.mapFunc != nil {
return p.mapFunc()
}
return noMap()
}
// Slice returns the Pinata value as a slice if it is one (the bool indicates
// success).
func (p Pinata) Slice() ([]interface{}, bool) {
if p.sliceFunc != nil {
return p.sliceFunc()
}
return noSlice()
}
// New is a starting point for a pinata celebration.
func New(contents interface{}) (Stick, Pinata) {
return NewStick(), NewPinata(contents)
}
// NewStick returns a new Stick to hit a Pinata with.
func NewStick() Stick {
return &stick{}
}
// NewPinata creates a new Pinata holding the specified value.
func NewPinata(contents interface{}) Pinata {
return newPinataWithContext(contents, nil)
}
func noMap() (map[string]interface{}, bool) { return nil, false }
func noSlice() ([]interface{}, bool) { return nil, false }
func newPinataWithContext(contents interface{}, context *ErrorContext) Pinata {
switch t := contents.(type) {
case map[string]interface{}:
return Pinata{
value: t,
sliceFunc: noSlice,
mapFunc: func() (map[string]interface{}, bool) {
return t, true
},
context: context,
}
case []interface{}:
return Pinata{
value: t,
sliceFunc: func() ([]interface{}, bool) {
return t, true
},
mapFunc: noMap,
context: context,
}
default:
return Pinata{
value: t,
sliceFunc: noSlice,
mapFunc: noMap,
context: context,
}
}
}
// ErrorReason describes the reason for returning an Error.
type ErrorReason string
const (
// ErrorReasonIncompatibleType indicates the contents of the Pinata is not compatible with the invoked method.
ErrorReasonIncompatibleType ErrorReason = "incompatible type"
// ErrorReasonNotFound indicates the input has not been found in the Pinata.
ErrorReasonNotFound = "not found"
// ErrorReasonInvalidInput indicates the input is not in the expected range or format.
ErrorReasonInvalidInput = "invalid input"
)
// ErrorContext contains information about the circumstances of an error.
type ErrorContext struct {
methodName string
methodArgs func() []interface{}
next *ErrorContext
}
// MethodName returns the name of the method that caused the error.
func (ec ErrorContext) MethodName() string {
return ec.methodName
}
// MethodArgs returns the input parameters of the method that caused the error.
func (ec ErrorContext) MethodArgs() []interface{} {
return ec.methodArgs()
}
// Next gets additional context linked to this one.
func (ec ErrorContext) Next() (ErrorContext, bool) {
if ec.next != nil {
return *ec.next, true
}
return ErrorContext{}, false
}
// Error is set on the Pinata when something goes wrong.
type Error struct {
reason ErrorReason
context *ErrorContext
advice string
}
// Reason indicates why the error occurred.
func (p Error) Reason() ErrorReason {
return p.reason
}
// Context returns more information about the circumstances of the error.
func (p Error) Context() (ErrorContext, bool) {
if p.context != nil {
return *p.context, true
}
return ErrorContext{}, false
}
// Advice contains a human readable hint detailing how to remedy this error.
func (p Error) Advice() string {
return p.advice
}
// Error returns a summary of the problem.
func (p Error) Error() string {
var summaries []string
current := p.context
for current != nil {
var methodArgs = current.MethodArgs()
var summary string
if len(methodArgs) > 0 {
var buf bytes.Buffer
_, _ = buf.WriteString(current.MethodName())
_ = buf.WriteByte('(')
for i := range methodArgs {
_, _ = buf.WriteString("%#v")
if i < len(methodArgs)-1 {
_, _ = buf.WriteString(", ")
}
}
_ = buf.WriteByte(')')
summary = fmt.Sprintf(buf.String(), methodArgs...)
summaries = append(summaries, summary)
} else {
summaries = append(summaries, current.MethodName()+"()")
}
current = current.next
}
return fmt.Sprintf("pinata: %s (%s) at %v", p.Reason(), p.Advice(), strings.Join(summaries, " at "))
}
func toInterfaceSlice(c []string) []interface{} {
ifaces := make([]interface{}, len(c))
for i := range c {
ifaces[i] = c[i]
}
return ifaces
} | pinata.go | 0.628179 | 0.539893 | pinata.go | starcoder |
package fp
import ()
// MapString make a map from the current slice to a new slice using the function
func MapString(f func(string, int) string, input []string) (output []string) {
output = make([]string, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapInt make a map from the current slice to a new slice using the function
func MapInt(f func(int, int) int, input []int) (output []int) {
output = make([]int, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapInt8 make a map from the current slice to a new slice using the function
func MapInt8(f func(int8, int) int8, input []int8) (output []int8) {
output = make([]int8, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapInt16 make a map from the current slice to a new slice using the function
func MapInt16(f func(int16, int) int16, input []int16) (output []int16) {
output = make([]int16, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapInt32 make a map from the current slice to a new slice using the function
func MapInt32(f func(int32, int) int32, input []int32) (output []int32) {
output = make([]int32, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapInt64 make a map from the current slice to a new slice using the function
func MapInt64(f func(int64, int) int64, input []int64) (output []int64) {
output = make([]int64, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapUint8 make a map from the current slice to a new slice using the function
func MapUint8(f func(uint8, int) uint8, input []uint8) (output []uint8) {
output = make([]uint8, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapUint16 make a map from the current slice to a new slice using the function
func MapUint16(f func(uint16, int) uint16, input []uint16) (output []uint16) {
output = make([]uint16, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapUint32 make a map from the current slice to a new slice using the function
func MapUint32(f func(uint32, int) uint32, input []uint32) (output []uint32) {
output = make([]uint32, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapUint64 make a map from the current slice to a new slice using the function
func MapUint64(f func(uint64, int) uint64, input []uint64) (output []uint64) {
output = make([]uint64, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapFloat32 make a map from the current slice to a new slice using the function
func MapFloat32(f func(float32, int) float32, input []float32) (output []float32) {
output = make([]float32, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapFloat64 make a map from the current slice to a new slice using the function
func MapFloat64(f func(float64, int) float64, input []float64) (output []float64) {
output = make([]float64, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// MapByte make a map from the current slice to a new slice using the function
func MapByte(f func(byte, int) byte, input []byte) (output []byte) {
output = make([]byte, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
}
// Map make a map from the current slice to a new slice using the function
func Map(f func(any, int) any, input []any) (output []any) {
output = make([]any, 0)
for idx, data := range input {
output = append(output, f(data, idx))
}
return
} | functional/map.go | 0.808974 | 0.44559 | map.go | starcoder |
package sprite
const Font_a = `
X
X X
XXX
X X
X X`
const Font_b = `
XX
X X
XX
X X
XX`
const Font_c = `
XX
X
X
X
XX`
const Font_d = `
XX
X X
X X
X X
XXX`
const Font_e = `
XXX
X
XX
X
XXX`
const Font_f = `
XXX
X
XX
X
X`
const Font_g = `
XX
X
X
X X
XXX`
const Font_h = `
X X
X X
XXX
X X
X X`
const Font_i = `
XXX
X
X
X
XXX`
const Font_j = `
XXX
X
X
X
XX`
const Font_k = `
X X
X X
XX
X X
X X`
const Font_l = `
X
X
X
X
XXX`
const Font_m = `
X X
XXX
XXX
X X
X X`
const Font_n = `
XX
X X
X X
X X
X X`
const Font_o = `
XX
X X
X X
X X
XX`
const Font_p = `
XX
X X
XX
X
X`
const Font_q = `
XX
X X
X X
XXX
X`
const Font_r = `
XX
X X
XX
X X
X X`
const Font_s = `
XX
X
XXX
X
XX`
const Font_t = `
XXX
X
X
X
X`
const Font_u = `
X X
X X
X X
X X
XX`
const Font_v = `
X X
X X
X X
X X
X`
const Font_w = `
X X
X X
XXX
XXX
X X`
const Font_x = `
X X
X X
X
X X
X X`
const Font_y = `
X X
X X
XXX
X
XXX`
const Font_z = `
XXX
X
X
X
XXX`
const Font_0 = `
XX
X X
X X
X X
XX`
const Font_1 = `
X
XX
X
X
XXX`
const Font_2 = `
XX
X
XXX
X
XXX`
const Font_3 = `
XX
X
XX
X
XXX`
const Font_4 = `
X X
X X
XXX
X
X`
const Font_5 = `
XXX
X
XXX
X
XX`
const Font_6 = `
XX
X
XXX
X X
XX`
const Font_7 = `
XXX
X
X
X
X`
const Font_8 = `
XX
X X
XXX
X X
XX`
const Font_9 = `
XX
X X
XXX
X
XXX`
const Font_period = `
X`
const Font_comma = `
X
X`
const Font_slash = `
X
X
X
X
X`
const Font_exclamation = `
X
X
X
X`
const Font_dash = `
XXX`
const Font_percent = `
X X
X
X
X
X X`
// NewPakuFont provides a new font from based upon Paku Paku
func NewPakuFont() *Font {
m := map[rune]string{
'A': Font_a,
'B': Font_b,
'C': Font_c,
'D': Font_d,
'E': Font_e,
'F': Font_f,
'G': Font_g,
'H': Font_h,
'I': Font_i,
'J': Font_j,
'K': Font_k,
'L': Font_l,
'M': Font_m,
'N': Font_n,
'O': Font_o,
'P': Font_p,
'Q': Font_q,
'R': Font_r,
'S': Font_s,
'T': Font_t,
'U': Font_u,
'V': Font_v,
'W': Font_w,
'X': Font_x,
'Y': Font_y,
'Z': Font_z,
'a': Font_a,
'b': Font_b,
'c': Font_c,
'd': Font_d,
'e': Font_e,
'f': Font_f,
'g': Font_g,
'h': Font_h,
'i': Font_i,
'j': Font_j,
'k': Font_k,
'l': Font_l,
'm': Font_m,
'n': Font_n,
'o': Font_o,
'p': Font_p,
'q': Font_q,
'r': Font_r,
's': Font_s,
't': Font_t,
'u': Font_u,
'v': Font_v,
'w': Font_w,
'x': Font_x,
'y': Font_y,
'z': Font_z,
'0': Font_0,
'1': Font_1,
'2': Font_2,
'3': Font_3,
'4': Font_4,
'5': Font_5,
'6': Font_6,
'7': Font_7,
'8': Font_8,
'9': Font_9,
'.': Font_period,
',': Font_comma,
'/': Font_slash,
'!': Font_exclamation,
'-': Font_dash,
'%': Font_percent,
}
return NewFont(m, 4, 7)
} | font_paku.go | 0.706798 | 0.442757 | font_paku.go | starcoder |
package shp
import (
"github.com/everystreet/go-geojson/v2"
"github.com/golang/geo/r2"
)
// ShapeType represents a shape type in the shp file.
type ShapeType uint
// Valid shape types. All shapes in a single shp file must be of the same type.
const (
// Null shapes are allowed in any shp file, regardless of the type specified in the header.
NullType ShapeType = 0
PointType ShapeType = 1
PolylineType ShapeType = 3
PolygonType ShapeType = 5
MultiPointType ShapeType = 8
PointZType ShapeType = 11
PolylineZType ShapeType = 13
PolygonZType ShapeType = 15
MultiPointZType ShapeType = 18
PointMType ShapeType = 21
PolylineMType ShapeType = 23
PolygonMType ShapeType = 25
MultiPointMType ShapeType = 28
MultiPatchType ShapeType = 31
)
func (t ShapeType) String() string {
switch t {
case NullType:
return "Null Shape"
case PointType:
return "Point"
case PolylineType:
return "PolyLine"
case PolygonType:
return "Polygon"
case MultiPointType:
return "MultiPoint"
case PointZType:
return "PointZ"
case PolylineZType:
return "PolyLineZ"
case PolygonZType:
return "PolygonZ"
case MultiPointZType:
return "MultiPointZ"
case PointMType:
return "PointM"
case PolylineMType:
return "PolyLineM"
case PolygonMType:
return "PolygonM"
case MultiPointMType:
return "MultiPointM"
case MultiPatchType:
return "MultiPatch"
default:
return ""
}
}
// Shape provides common information for all shapes of any type.
type Shape interface {
Type() ShapeType
RecordNumber() uint32
Validate(Validator) error
GeoJSONFeature() *geojson.Feature
points() []r2.Point
}
// Shapes represents a collection of shapes.
type Shapes []Shape
// BoundingBox returns the bounding box that encompasses all shapes.
func (s Shapes) BoundingBox() BoundingBox {
var points []r2.Point
for _, shape := range s {
points = append(points, shape.points()...)
}
rect := r2.RectFromPoints(points...)
return BoundingBox{
MinX: rect.X.Lo,
MinY: rect.Y.Lo,
MaxX: rect.X.Hi,
MaxY: rect.Y.Hi,
}
} | shp/shape.go | 0.796015 | 0.701138 | shape.go | starcoder |
package sampler
import (
"time"
metrics "github.com/rcrowley/go-metrics"
)
type (
// DurationSampler is the sampler for sampling duration.
DurationSampler struct {
sample metrics.Sample
}
)
func nanoToMilli(f float64) float64 {
return f / 1000000
}
// NewDurationSampler creates a DurationSampler.
func NewDurationSampler() *DurationSampler {
return &DurationSampler{
// https://github.com/rcrowley/go-metrics/blob/3113b8401b8a98917cde58f8bbd42a1b1c03b1fd/sample_test.go#L65
sample: metrics.NewExpDecaySample(1028, 0.015),
}
}
// Update updates the sample.
func (ds *DurationSampler) Update(d time.Duration) {
ds.sample.Update(int64(d))
}
// P25 returns the duration in millisecond greater than 25%.
func (ds *DurationSampler) P25() float64 {
return nanoToMilli(ds.sample.Percentile(0.25))
}
// P50 returns the duration in millisecond greater than 50%.
func (ds *DurationSampler) P50() float64 {
return nanoToMilli(ds.sample.Percentile(0.5))
}
// P75 returns the duration in millisecond greater than 75%.
func (ds *DurationSampler) P75() float64 {
return nanoToMilli(ds.sample.Percentile(0.75))
}
// P95 returns the duration in millisecond greater than 95%.
func (ds *DurationSampler) P95() float64 {
return nanoToMilli(ds.sample.Percentile(0.95))
}
// P98 returns the duration in millisecond greater than 98%.
func (ds *DurationSampler) P98() float64 {
return nanoToMilli(ds.sample.Percentile(0.98))
}
// P99 returns the duration in millisecond greater than 99%.
func (ds *DurationSampler) P99() float64 {
return nanoToMilli(ds.sample.Percentile(0.99))
}
// P999 returns the duration in millisecond greater than 99.9%.
func (ds *DurationSampler) P999() float64 {
return nanoToMilli(ds.sample.Percentile(0.999))
}
// Percentiles returns 7 metrics by order:
// P25, P50, P75, P95, P98, P99, P999
func (ds *DurationSampler) Percentiles() []float64 {
ps := ds.sample.Percentiles([]float64{
0.25, 0.5, 0.75,
0.95, 0.98, 0.99,
0.999,
})
for i, p := range ps {
ps[i] = nanoToMilli(p)
}
return ps
}
// Count return total count of DurationSampler.
func (ds *DurationSampler) Count() float64 {
return float64(ds.sample.Count())
} | pkg/util/sampler/sampler.go | 0.881347 | 0.483648 | sampler.go | starcoder |
package help
var HelpMessages = map[string]Help{
"help": Help{
Name: "/help",
ShortDesc: "command help",
Synopsis: map[string]string{
"": "show this help",
"<command>": "show help for <command> (without the `/`)",
},
Overview: "Give help on an internal command",
Description: "Find out more information on topics in Stimmtausch by using the /help command. For instance, to learn about the `/log` command, type `/help log`.\n\n Available topics:\n\n {HELPTOPICS}",
},
"log": Help{
Name: "/log",
ShortDesc: "connection logging",
Synopsis: map[string]string{
"": "show this help",
"--help": "show this help",
"--list": "list open log files",
"<file>": "start logging the current world to the specified file",
"--off <file>": "stop logging to the specified file",
},
Overview: "Command to control logging output from worlds.",
Description: "Logging in Stimmtausch is controlled through the /log command. Invoked with a file name, it starts logging the current world's output to the specified file (absolute, or relative to the directory in which Stimmtausch was started). You can turn logging off at any time by calling `/log --off <file>`. To list what logs are open, you can call `/log --list`",
},
"fg": Help{
Name: "/fg",
ShortDesc: "bring world to the foreground",
Synopsis: map[string]string{
"": "rotate to the next active world to the right (same as `/]`)",
">": "rotate to the next world to the right",
"<": "rotate to the next world to the left",
"]": "rotate to the next active world to the right",
"[": "rotate to the next active world to the left",
"<world>": "switch to the named world",
},
Overview: "Command to control moving between worlds.",
Description: "Moving between worlds in Stimmtausch is accomplished with the /fg command. You can rotate between worlds by using the special world names > and <, otherwise you can specifi which world you would like to bring to the foreground.",
SeeAlso: "`/>` (same as `/fg >`), `/<` (same as `/fg <`), `/]` (same as `/fg ]`), `/[` (same as `/fg [`)",
},
"connect": Help{
Name: "/connect",
ShortDesc: "connect to worlds",
Synopsis: map[string]string{
"<named world>": "connect to the named world",
//"<named server>": "connect to the named server without a username",
//"<address>:<port>": "connect to the server address and port specified",
},
Overview: "Command to connect to worlds.",
Description: "Connecting to worlds in Stimmtausch is accomplished with the /connect command. You can connect to worlds named in your configuration files.", // Additionally, you can connect to servers named in your configuration without user information, or to a specified address and port. In each of the latter two cases, you will be given a temporary world name which you can use with other commands.
SeeAlso: "`/c` (shortcut for `/connect`), `/disconnect`, `/fg`",
},
"disconnect": Help{
Name: "/disconnect",
ShortDesc: "disconnect from worlds",
Synopsis: map[string]string{
"[-r]": "disconnect from the current world",
"[-r] <world>": "disconnect from the world specified",
},
Overview: "Command to disconnect from worlds.",
Description: "Disconnecting from worlds in Stimmtausch is accomplished with the /disconnect command. It accepts a world name. Passing `-r` will remove the world from the world list as well.",
SeeAlso: "`/dc` (shortcut for `/disconnect`), `/connect`, `/remove`",
},
"remove": Help{
Name: "/remove",
ShortDesc: "remove the world from the UI",
Synopsis: map[string]string{
"<world>": "removes the current world from the UI's world list",
},
Overview: "Command to remove the current world from the UI's world list",
Description: "This command will remove the current world from the world list in the UI. It will do so whether or not the world is currently connected. There is currently no way to add that world back in, so use with care!",
SeeAlso: "`/disconnect -r`",
},
"quit": Help{
Name: "/quit",
ShortDesc: "quit Stimmtausch",
Synopsis: map[string]string{
"": "disconnect from all worlds and quit Stimmtausch",
},
Overview: "Command to quit Stimmtausch.",
Description: "Quitting Stimmtausch is accomplished to the /quit command.", // Note that if you send `/quit` from _any_ client attached to Stimmtausch (e.g: if you're using Stimmtausch in headless mode or as a server), it will quit, detaching every connected client.",
},
"syslog": Help{
Name: "/syslog",
ShortDesc: "log to the system log",
Synopsis: map[string]string{
"<level> <message>": "You may log arbitrary information to the system log via the /syslog command. Why? We're sure you have your reasons! The log level is the first argument, and may be one of 'TRACE', 'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'.",
},
},
} | help/builtins.go | 0.614741 | 0.437824 | builtins.go | starcoder |
package array
/*
# Minimum Window Substring
# https://leetcode.com/explore/interview/card/top-interview-questions-hard/116/array-and-strings/838/
Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).
Example:
Input: S = "ADOBECODEBANC", T = "ABC"
Output: "BANC"
Note:
If there is no such window in S that covers all characters in T, return the empty string "".
If there is such window, you are guaranteed that there will always be only one unique minimum window in S.
Hint #1
Use two pointers to create a window of letters in S, which would have all the characters from T.
Hint #2
Since you have to find the minimum window in S which has all the characters from T, you need to expand and contract the window using the two pointers and keep checking the window for all the characters. This approach is also called Sliding Window Approach.
L ------------------------ R , Suppose this is the window that contains all characters of T
L----------------- R , this is the contracted window. We found a smaller window that still contains all the characters in T
When the window is no longer valid, start expanding again using the right pointer.
*/
func MinWindow(s string, t string) string {
return minWindow(s, t)
}
// sliding window
func minWindow(s string, t string) string {
if len(s) == 0 || len(t) == 0 {
return ""
}
// Dictionary which keeps a count of all the unique characters in t.
dictT := make(map[byte]int, 0)
for i := 0; i < len(t); i++ {
dictT[t[i]]++
}
// Number of unique characters in t, which need to be present in the desired window.
required := len(dictT)
// Left and Right pointer
l, r := 0, 0
// formed is used to keep track of how many unique characters in t
// are present in the current window in its desired frequency.
// e.g. if t is "AABC" then the window must have two A's, one B and one C.
// Thus formed would be = 3 when all these conditions are met.
formed := 0
// Dictionary which keeps a count of all the unique characters in the current window.
windowCounts := make(map[byte]int, 0)
// ans list of the form (window length, left, right)
ans := []int{-1, 0, 0}
for r < len(s) {
// Add one character from the right to the window
c := s[r]
windowCounts[c]++
// If the frequency of the current character added equals to the
// desired count in t then increment the formed count by 1.
if windowCounts[c] == dictT[c] {
formed++
}
for l <= r && formed == required {
c = s[l]
// Save the smallest window until now.
if ans[0] == -1 || r-l+1 < ans[0] {
ans[0] = r - l + 1
ans[1] = l
ans[2] = r
}
// The character at the position pointed by the
// `Left` pointer is no longer a part of the window.
windowCounts[c]--
if windowCounts[c] < dictT[c] {
formed--
}
// Move the left pointer ahead, this would help to look for a new window.
l++
}
// Keep expanding the window once we are done contracting.
r++
}
if ans[0] == -1 {
return ""
}
return s[ans[1] : ans[2]+1]
}
// leetcode 优解
func minWindow2(s string, t string) string {
result := ""
tMap := make([]int, 128)
sMap := make([]int, 128)
required := 0
for _, val := range t {
if tMap[val] == 0 {
required++
}
tMap[val]++
}
counter := 0
for r, l := 0, 0; r < len(s); r++ {
sMap[s[r]]++
if sMap[s[r]] == tMap[s[r]] {
counter++
}
if counter == required {
for l < r {
sMap[s[l]]--
if sMap[s[l]] < tMap[s[l]] {
counter--
break
}
l++
}
if result == "" || len(s[l:r+1]) < len(result) {
result = s[l : r+1]
}
l++
}
}
return result
} | interview/hard/array/string.go | 0.850748 | 0.4953 | string.go | starcoder |
// Package pipeline implements a processing pipeline with multiple steps.
package pipeline
import (
"errors"
"fmt"
. "github.com/abitofhelp/go-helpers/string"
. "github.com/abitofhelp/go-helpers/time"
"strings"
"sync"
"time"
)
// Type Status indicates the current state of the pipeline.
type Status int
// Constants for the states of a Status.
const (
Aborting Status = iota
Starting
Running
Stopping
Stopped
)
// Type IPipeline is an interface that requires implementations of its pipeline methods.
type IPipeline interface {
// Function Start initiates processing in the pipeline.
Start() error
// Function Abort abends processing in the pipeline.
Abort() error
// Function Stop terminates processing after all steps have been completed.
Stop() error
}
// Type Pipeline is a struct that provides data and methods to create and manage a pipeline.
type Pipeline struct {
// Field startedUtc is the date/time in UTC when the pipeline commences its work.
startedUtc time.Time
// Field endedUtc is the date/time in UTC when the pipeline completed its work.
endedUtc time.Time
// Field status indicates the current status of the pipeline.
status Status
// Field path is the file system path to a directory containing image files to process.
path string
// Field scannerBufferSize is the number of reusable bytes to use for the directory scanner's work.
scannerBufferSize uint64
// Field pathChanSize is the number of file system paths that will be buffered in a channel in the pipeline.
pathChanSize uint64
// Field pathConsumerCount is the number of concurrent and parallel goroutines that will consume paths from a channel in the pipeline.
pathConsumerCount uint64
// Field pathsChannel is the channel containing paths to the files that will be processed.
pathsChannel chan string
// Field commandChannel is the channel that will signal to start the pipeline.
commandChannel chan bool
}
// Function New is a factory that creates an initialized Pipeline.
// Parameter path to the directory containing files to process.
// Parameter scannerBufferSize is the number of reusable bytes to use for the directory scanner's work.
// Parameter pathChanSize is the number of file system paths that will be buffered in a channel in the pipeline.
// Parameter pathConsumerCount is the number of concurrent and parallel goroutines that will consume paths from a channel in the pipeline.
// Returns an initialized pipeline or error.
func New(path string, scannerBufferSize uint64, pathChanSize uint64, pathConsumerCount uint64) (*Pipeline, error) {
pipeline := &Pipeline{}
if pipeline == nil {
return nil, errors.New("failed to create an instance of Pipeline")
}
err := pipeline.setPath(path)
if err != nil {
return nil, err
}
err = pipeline.setScannerBufferSize(scannerBufferSize)
if err != nil {
return nil, err
}
err = pipeline.setPathChanSize(pathChanSize)
if err != nil {
return nil, err
}
err = pipeline.setPathConsumerCount(pathConsumerCount)
if err != nil {
return nil, err
}
err = pipeline.setEndedUtc(Zero())
if err != nil {
return nil, err
}
err = pipeline.setStatus(Stopped)
if err != nil {
return nil, err
}
// Create the channel that will provide paths to files for processing.
err = pipeline.setPathsChannel(make(chan string, pipeline.PathChanSize()))
if err != nil {
return nil, err
}
// Create the the channel that will signal to start the pipeline.
err = pipeline.setCommandChannel(make(chan bool))
if err != nil {
return nil, err
}
return pipeline, nil
}
// Method Path gets the path from the instance.
func (p Pipeline) Path() string {
return p.path
}
// Method SetPath sets the value of the path in the instance.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setPath(path string) error {
if path == "" {
return errors.New("the path cannot be empty")
}
path = CleanStringForPlatform(path)
p.path = path
return nil
}
// Method ScannerBufferSize gets the number of reusable bytes to use for the directory scanner's work.
func (p Pipeline) ScannerBufferSize() uint64 {
return p.scannerBufferSize
}
// Method setScannerBufferSize sets the number of reusable bytes to use for the directory scanner's work.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setScannerBufferSize(scannerBufferSize uint64) error {
p.scannerBufferSize = scannerBufferSize
return nil
}
// Method PathChanSize gets the number of file system paths that will be buffered in a channel in the pipeline.
func (p Pipeline) PathChanSize() uint64 {
return p.pathChanSize
}
// Method setPathChanSize sets the number of file system paths that will be buffered in a channel in the pipeline.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setPathChanSize(pathChanSize uint64) error {
p.pathChanSize = pathChanSize
return nil
}
// Method PathConsumerCount gets the number of concurrent and parallel goroutines that will consume paths from a channel in the pipeline.
func (p Pipeline) PathConsumerCount() uint64 {
return p.pathConsumerCount
}
// Method setPathConsumerCount sets the number of concurrent and parallel goroutines that will consume paths from a channel in the pipeline.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setPathConsumerCount(pathConsumerCount uint64) error {
p.pathConsumerCount = pathConsumerCount
return nil
}
// Method Status gets the current status from the instance of a Pipeline.
func (p Pipeline) Status() Status {
return p.status
}
// Method SetStatus sets the status of the Pipeline.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setStatus(status Status) error {
p.status = status
return nil
}
// Method Started gets the UTC date/time when the pipeline started processing.
func (p Pipeline) StartedUtc() time.Time {
return p.startedUtc
}
// Method SetStartedUtc sets the value of the startedUtc, when the pipeline started processing.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setStartedUtc(startedUtc time.Time) error {
utc := CleanStringForPlatform(startedUtc.Location().String())
if strings.Compare(utc, "UTC") != 0 {
return errors.New("the startedUtc value must be in UTC")
}
p.startedUtc = startedUtc
return nil
}
// Method Ended gets the UTC date/time when the pipeline completed processing.
func (p Pipeline) EndedUtc() time.Time {
return p.endedUtc
}
// Method SetEndedUtc sets the value of the endedUtc, when the pipeline completed processing.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setEndedUtc(endedUtc time.Time) error {
utc := CleanStringForPlatform(endedUtc.Location().String())
if strings.Compare(utc, "UTC") != 0 {
return errors.New("the endedUtc value must be in UTC")
}
p.endedUtc = endedUtc
return nil
}
// Method PathsChannel gets the channel containing paths to the files that will be processed.
func (p Pipeline) PathsChannel() chan string {
return p.pathsChannel
}
// Method setPathsChannel sets the channel containing paths to the files that will be processed.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setPathsChannel(pathsChannel chan string) error {
p.pathsChannel = pathsChannel
return nil
}
// Method CommandChannel gets the the channel that will signal to start the pipeline.
func (p Pipeline) CommandChannel() chan bool {
return p.commandChannel
}
// Method setCommandChannel sets the channel that will signal to start the pipeline.
// If there is an error, an error is returned, otherwise nil.
func (p *Pipeline) setCommandChannel(commandChannel chan bool) error {
p.commandChannel = commandChannel
return nil
}
// Method Start initiates processing in the pipeline.
func (p Pipeline) Start() error {
var wg sync.WaitGroup
// Recursively scan the path for files to process...
go p.loadPathsToChannel(p.Path(), p.PathsChannel(), p.CommandChannel(), &wg)
// Start the loading of paths into the paths channel...
p.CommandChannel() <- true
// Spin off a goroutine to process each file in the channel
for path := range p.PathsChannel() {
go func() {
fmt.Printf("\nProcessing: %s", path)
// Do something... Pass the something along to the next step.
}()
}
// Wait for all goroutines to complete.
wg.Wait()
return nil
}
// Method Abort abends processing in the pipeline.
func (p Pipeline) Abort() error {
// TODO
return nil
}
// Method Stop terminates processing after all steps have been completed.
func (p Pipeline) Stop() error {
// TODO
return nil
} | pipeline/pipeline.go | 0.832305 | 0.508971 | pipeline.go | starcoder |
package types
import (
"sync"
"github.com/attic-labs/noms/go/chunks"
"github.com/attic-labs/noms/go/d"
"github.com/attic-labs/noms/go/hash"
"github.com/attic-labs/noms/go/util/sizecache"
)
// ValueReader is an interface that knows how to read Noms Values, e.g. datas/Database. Required to avoid import cycle between this package and the package that implements Value reading.
type ValueReader interface {
ReadValue(h hash.Hash) Value
}
// ValueWriter is an interface that knows how to write Noms Values, e.g. datas/Database. Required to avoid import cycle between this package and the package that implements Value writing.
type ValueWriter interface {
WriteValue(v Value) Ref
}
// ValueReadWriter is an interface that knows how to read and write Noms Values, e.g. datas/Database. Required to avoid import cycle between this package and the package that implements Value read/writing.
type ValueReadWriter interface {
ValueReader
ValueWriter
}
// ValueStore provides methods to read and write Noms Values to a BatchStore. It validates Values as they are written, but does not guarantee that these Values are persisted to the BatchStore until a subsequent Flush. or Close.
// Currently, WriteValue validates the following properties of a Value v:
// - v can be correctly serialized and its Ref taken
// - all Refs in v point to a Value that can be read from this ValueStore
// - all Refs in v point to a Value of the correct Type
type ValueStore struct {
bs BatchStore
cache map[hash.Hash]chunkCacheEntry
mu *sync.Mutex
valueCache *sizecache.SizeCache
}
const defaultValueCacheSize = 1 << 25 // 32MB
type chunkCacheEntry interface {
Present() bool
Hint() hash.Hash
Type() *Type
}
// NewTestValueStore creates a simple struct that satisfies ValueReadWriter and is backed by a chunks.TestStore.
func NewTestValueStore() *ValueStore {
return newLocalValueStore(chunks.NewTestStore())
}
func newLocalValueStore(cs chunks.ChunkStore) *ValueStore {
return NewValueStore(NewBatchStoreAdaptor(cs))
}
// NewValueStore returns a ValueStore instance that owns the provided BatchStore and manages its lifetime. Calling Close on the returned ValueStore will Close bs.
func NewValueStore(bs BatchStore) *ValueStore {
return NewValueStoreWithCache(bs, defaultValueCacheSize)
}
func NewValueStoreWithCache(bs BatchStore, cacheSize uint64) *ValueStore {
return &ValueStore{bs, map[hash.Hash]chunkCacheEntry{}, &sync.Mutex{}, sizecache.New(cacheSize)}
}
func (lvs *ValueStore) BatchStore() BatchStore {
return lvs.bs
}
// ReadValue reads and decodes a value from lvs. It is not considered an error for the requested chunk to be empty; in this case, the function simply returns nil.
func (lvs *ValueStore) ReadValue(r hash.Hash) Value {
if v, ok := lvs.valueCache.Get(r); ok {
if v == nil {
return nil
}
return v.(Value)
}
chunk := lvs.bs.Get(r)
if chunk.IsEmpty() {
lvs.valueCache.Add(r, 0, nil)
return nil
}
v := DecodeValue(chunk, lvs)
lvs.valueCache.Add(r, uint64(len(chunk.Data())), v)
var entry chunkCacheEntry = absentChunk{}
if v != nil {
lvs.cacheChunks(v, r)
// r is trivially a hint for v, so consider putting that in the cache. If we got to v by reading some higher-level chunk, this entry gets dropped on the floor because r already has a hint in the cache. If we later read some other chunk that references v, cacheChunks will overwrite this with a hint pointing to that chunk.
// If we don't do this, top-level Values that get read but not written -- such as the existing Head of a Database upon a Commit -- can be erroneously left out during a pull.
entry = hintedChunk{v.Type(), r}
}
if cur := lvs.check(r); cur == nil || cur.Hint().IsEmpty() {
lvs.set(r, entry)
}
return v
}
// WriteValue takes a Value, schedules it to be written it to lvs, and returns an appropriately-typed types.Ref. v is not guaranteed to be actually written until after Flush().
func (lvs *ValueStore) WriteValue(v Value) Ref {
d.Chk.True(v != nil)
// Encoding v causes any child chunks, e.g. internal nodes if v is a meta sequence, to get written. That needs to happen before we try to validate v.
c := EncodeValue(v, lvs)
d.Chk.False(c.IsEmpty())
hash := c.Hash()
height := maxChunkHeight(v) + 1
r := constructRef(MakeRefType(v.Type()), hash, height)
if lvs.isPresent(hash) {
return r
}
hints := lvs.chunkHintsFromCache(v)
lvs.bs.SchedulePut(c, height, hints)
lvs.set(hash, (*presentChunk)(v.Type()))
return r
}
func (lvs *ValueStore) Flush() {
lvs.bs.Flush()
}
// Close closes the underlying BatchStore
func (lvs *ValueStore) Close() error {
lvs.Flush()
return lvs.bs.Close()
}
// cacheChunks looks at the Chunks reachable from v and, for each one checks if there's a hint in the cache. If there isn't, or if the hint is a self-reference, the chunk gets r set as its new hint.
func (lvs *ValueStore) cacheChunks(v Value, r hash.Hash) {
for _, reachable := range v.Chunks() {
hash := reachable.TargetHash()
if cur := lvs.check(hash); cur == nil || cur.Hint().IsEmpty() || cur.Hint() == hash {
lvs.set(hash, hintedChunk{getTargetType(reachable), r})
}
}
}
func (lvs *ValueStore) isPresent(r hash.Hash) (present bool) {
if entry := lvs.check(r); entry != nil && entry.Present() {
present = true
}
return
}
func (lvs *ValueStore) check(r hash.Hash) chunkCacheEntry {
lvs.mu.Lock()
defer lvs.mu.Unlock()
return lvs.cache[r]
}
func (lvs *ValueStore) set(r hash.Hash, entry chunkCacheEntry) {
lvs.mu.Lock()
defer lvs.mu.Unlock()
lvs.cache[r] = entry
}
func (lvs *ValueStore) checkAndSet(r hash.Hash, entry chunkCacheEntry) {
if cur := lvs.check(r); cur == nil || cur.Hint().IsEmpty() {
lvs.set(r, entry)
}
}
func (lvs *ValueStore) chunkHintsFromCache(v Value) Hints {
return lvs.checkChunksInCache(v, false)
}
func (lvs *ValueStore) ensureChunksInCache(v Value) {
lvs.checkChunksInCache(v, true)
}
func (lvs *ValueStore) checkChunksInCache(v Value, readValues bool) Hints {
hints := map[hash.Hash]struct{}{}
for _, reachable := range v.Chunks() {
// First, check the type cache to see if reachable is already known to be valid.
targetHash := reachable.TargetHash()
entry := lvs.check(targetHash)
// If it's not already in the cache, attempt to read the value directly, which will put it and its chunks into the cache.
if entry == nil || !entry.Present() {
var reachableV Value
if readValues {
// TODO: log or report that we needed to ReadValue here BUG 1762
reachableV = lvs.ReadValue(targetHash)
entry = lvs.check(targetHash)
}
if reachableV == nil {
d.Chk.Fail("Attempted to write Value containing Ref to non-existent object.", "%s\n, contains ref %s, which points to a non-existent Value.", v.Hash(), reachable.TargetHash())
}
}
if hint := entry.Hint(); !hint.IsEmpty() {
hints[hint] = struct{}{}
}
targetType := getTargetType(reachable)
d.PanicIfTrue(!entry.Type().Equals(targetType), "Value to write contains ref %s, which points to a value of a different type: %+v != %+v", reachable.TargetHash(), entry.Type(), targetType)
}
return hints
}
func getTargetType(refBase Ref) *Type {
refType := refBase.Type()
d.Chk.True(RefKind == refType.Kind())
return refType.Desc.(CompoundDesc).ElemTypes[0]
}
type hintedChunk struct {
t *Type
hint hash.Hash
}
func (h hintedChunk) Present() bool {
return true
}
func (h hintedChunk) Hint() (r hash.Hash) {
return h.hint
}
func (h hintedChunk) Type() *Type {
return h.t
}
type presentChunk Type
func (p *presentChunk) Present() bool {
return true
}
func (p *presentChunk) Hint() (h hash.Hash) {
return
}
func (p *presentChunk) Type() *Type {
return (*Type)(p)
}
type absentChunk struct{}
func (a absentChunk) Present() bool {
return false
}
func (a absentChunk) Hint() (r hash.Hash) {
return
}
func (a absentChunk) Type() *Type {
panic("Not reached. Should never call Type() on an absentChunk.")
} | go/types/value_store.go | 0.649467 | 0.537406 | value_store.go | starcoder |
package statictimeseries
import (
"fmt"
"sort"
"time"
"github.com/grokify/gotilla/sort/sortutil"
"github.com/grokify/gotilla/time/month"
"github.com/grokify/gotilla/time/timeutil"
"github.com/grokify/gotilla/type/maputil"
"github.com/pkg/errors"
)
type DataSeries struct {
SeriesName string
ItemMap map[string]DataItem
IsFloat bool
Interval timeutil.Interval // Informational
}
func NewDataSeries() DataSeries {
return DataSeries{ItemMap: map[string]DataItem{}}
}
type DataItem struct {
SeriesName string
Time time.Time
IsFloat bool
Value int64
ValueFloat float64
}
// AddItem adds data item. It will sum values when
// existing time unit is encountered.
func (series *DataSeries) AddItem(item DataItem) {
if series.ItemMap == nil {
series.ItemMap = map[string]DataItem{}
}
if len(item.SeriesName) == 0 {
item.SeriesName = series.SeriesName
}
item.Time = item.Time.UTC()
rfc := item.Time.Format(time.RFC3339)
if _, ok := series.ItemMap[rfc]; !ok {
series.ItemMap[rfc] = item
} else {
existingItem := series.ItemMap[rfc]
existingItem.Value += item.Value
existingItem.ValueFloat += item.ValueFloat
series.ItemMap[rfc] = existingItem
}
}
func (series *DataSeries) SetSeriesName(seriesName string) {
series.SeriesName = seriesName
for k, v := range series.ItemMap {
v.SeriesName = seriesName
series.ItemMap[k] = v
}
}
func (series *DataSeries) Keys() []string {
keys := []string{}
for key := range series.ItemMap {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
func (series *DataSeries) ItemsSorted() []DataItem {
keys := series.Keys()
items := []DataItem{}
for _, key := range keys {
item, ok := series.ItemMap[key]
if !ok {
panic(fmt.Sprintf("KEY_NOT_FOUND [%s]", key))
}
items = append(items, item)
}
return items
}
func (series *DataSeries) Last() (DataItem, error) {
items := series.ItemsSorted()
if len(items) == 0 {
return DataItem{}, errors.New("E_NO_ITEMS")
}
return items[len(items)-1], nil
}
func (series *DataSeries) Pop() (DataItem, error) {
items := series.ItemsSorted()
if len(items) == 0 {
return DataItem{}, errors.New("E_NO_ERROR")
}
last := items[len(items)-1]
rfc := last.Time.Format(time.RFC3339)
delete(series.ItemMap, rfc)
return last, nil
}
func (series *DataSeries) minMaxValuesInt64Only() (int64, int64) {
int64s := []int64{}
for _, item := range series.ItemMap {
int64s = append(int64s, item.Value)
}
if len(int64s) == 0 {
return 0, 0
}
sort.Sort(sortutil.Int64Slice(int64s))
return int64s[0], int64s[len(int64s)-1]
}
func (series *DataSeries) minMaxValuesFloat64Only() (float64, float64) {
float64s := []float64{}
for _, item := range series.ItemMap {
float64s = append(float64s, item.ValueFloat)
}
if len(float64s) == 0 {
return 0, 0
}
float64slice := sort.Float64Slice(float64s)
sort.Sort(float64slice)
return float64slice[0], float64slice[len(float64slice)-1]
}
func (series *DataSeries) MinMaxValues() (int64, int64) {
if series.IsFloat {
min, max := series.minMaxValuesFloat64Only()
return int64(min), int64(max)
}
return series.minMaxValuesInt64Only()
}
func (series *DataSeries) MinMaxValuesFloat64() (float64, float64) {
if series.IsFloat {
return series.minMaxValuesFloat64Only()
}
min, max := series.minMaxValuesInt64Only()
return float64(min), float64(max)
}
func (series *DataSeries) MinValue() int64 {
min, _ := series.MinMaxValues()
return min
}
func (series *DataSeries) MaxValue() int64 {
_, max := series.MinMaxValues()
return max
}
func (ds *DataSeries) getTimes() []time.Time {
times := []time.Time{}
for _, item := range ds.ItemMap {
times = append(times, item.Time)
}
return times
}
func (ds *DataSeries) GetTimeSlice(sortSlice bool) timeutil.TimeSlice {
times := timeutil.TimeSlice{}
for _, item := range ds.ItemMap {
times = append(times, item.Time)
}
if sortSlice {
sort.Sort(times)
}
return times
}
func (series *DataSeries) ToMonth() DataSeries {
newDataSeries := DataSeries{
SeriesName: series.SeriesName,
ItemMap: map[string]DataItem{},
IsFloat: series.IsFloat,
Interval: timeutil.Month}
for _, item := range series.ItemMap {
newDataSeries.AddItem(DataItem{
SeriesName: item.SeriesName,
Time: month.MonthBegin(item.Time, 0),
IsFloat: item.IsFloat,
Value: item.Value,
ValueFloat: item.ValueFloat})
}
return newDataSeries
}
func (ds *DataSeries) ToMonthCumulative(timesInput ...time.Time) (DataSeries, error) {
newDataSeries := DataSeries{
SeriesName: ds.SeriesName,
ItemMap: map[string]DataItem{},
IsFloat: ds.IsFloat,
Interval: timeutil.Month}
dsMonth := ds.ToMonth()
var min time.Time
var max time.Time
var err error
if len(timesInput) > 0 {
min, max, err = timeutil.TimeSliceMinMax(timesInput)
if err != nil {
return newDataSeries, err
}
} else {
min, max, err = timeutil.TimeSliceMinMax(dsMonth.getTimes())
if err != nil {
return newDataSeries, err
}
}
times := timeutil.TimeSeriesSlice(timeutil.Month, []time.Time{min, max})
cItems := []DataItem{}
for _, t := range times {
rfc := t.Format(time.RFC3339)
if item, ok := dsMonth.ItemMap[rfc]; ok {
if len(cItems) > 0 {
prevCItem := cItems[len(cItems)-1]
cItems = append(cItems, DataItem{
SeriesName: newDataSeries.SeriesName,
IsFloat: newDataSeries.IsFloat,
Time: t,
Value: item.Value + prevCItem.Value,
ValueFloat: item.ValueFloat + prevCItem.ValueFloat})
} else {
cItems = append(cItems, DataItem{
SeriesName: newDataSeries.SeriesName,
IsFloat: newDataSeries.IsFloat,
Time: t,
Value: item.Value,
ValueFloat: item.ValueFloat})
}
} else {
if len(cItems) > 0 {
prevCItem := cItems[len(cItems)-1]
cItems = append(cItems, DataItem{
SeriesName: newDataSeries.SeriesName,
IsFloat: newDataSeries.IsFloat,
Time: t,
Value: prevCItem.Value,
ValueFloat: prevCItem.ValueFloat})
} else {
cItems = append(cItems, DataItem{
SeriesName: newDataSeries.SeriesName,
IsFloat: newDataSeries.IsFloat,
Time: t,
Value: 0,
ValueFloat: 0})
}
}
}
for _, cItem := range cItems {
newDataSeries.AddItem(cItem)
}
return newDataSeries, nil
}
func AggregateSeries(s1 DataSeries) DataSeries {
aggregate := NewDataSeries()
sortedItems := s1.SortedItems()
sum := int64(0)
for _, atomicItem := range sortedItems {
aggregateItem := DataItem{
SeriesName: atomicItem.SeriesName,
Time: atomicItem.Time,
Value: atomicItem.Value + sum,
}
sum = aggregateItem.Value
aggregate.AddItem(aggregateItem)
}
return aggregate
}
// SortedItems returns sorted DataItems. This currently uses
// a simple string sort on RFC3339 times. For dates that are not
// handled properly this way, this can be enhanced to use more
// proper comparison
func (series *DataSeries) SortedItems() []DataItem {
itemsSorted := []DataItem{}
timesSorted := maputil.StringKeysSorted(series.ItemMap)
for _, rfc3339 := range timesSorted {
itemsSorted = append(itemsSorted, series.ItemMap[rfc3339])
}
return itemsSorted
}
func DataSeriesTimeSeries(series *DataSeries, interval timeutil.Interval) []time.Time {
return timeutil.TimeSeriesSlice(interval, DataSeriesItemTimes(series))
}
func DataSeriesItemTimes(series *DataSeries) []time.Time {
times := []time.Time{}
for _, item := range series.ItemMap {
times = append(times, item.Time)
}
return times
}
func DataSeriesMinMaxTimes(series *DataSeries) (time.Time, time.Time) {
return timeutil.SliceMinMax(DataSeriesItemTimes(series))
}
func (series *DataSeries) MinMaxTimes() (time.Time, time.Time) {
return DataSeriesMinMaxTimes(series)
}
func DataSeriesDivide(numer, denom DataSeries) (DataSeries, error) {
denomKeys := denom.Keys()
ds := NewDataSeries()
ds.IsFloat = true
if numer.Interval == denom.Interval {
ds.Interval = denom.Interval
}
ds.SeriesName = numer.SeriesName + " / " + denom.SeriesName
for _, dKey := range denomKeys {
nItem, nOk := numer.ItemMap[dKey]
dItem, dOk := denom.ItemMap[dKey]
if !nOk && !dOk {
continue
} else if !dOk || dItem.Value == 0 {
return ds, fmt.Errorf("E_DENOM_MISSING_OR_ZERO TIME [%s] NUMERATOR [%v]",
dKey, nItem.Value)
}
ds.AddItem(DataItem{
Time: dItem.Time,
ValueFloat: float64(nItem.Value) / float64(dItem.Value),
IsFloat: true,
})
}
return ds, nil
} | data/statictimeseries/data_series.go | 0.564339 | 0.453322 | data_series.go | starcoder |
package agozon
var LocaleJPMap = map[string]LocaleSearchIndex{
"All": LocaleJP.All, "Apparel": LocaleJP.Apparel, "Appliances": LocaleJP.Appliances, "Automotive": LocaleJP.Automotive, "Baby": LocaleJP.Baby,
"Beauty": LocaleJP.Beauty, "Blended": LocaleJP.Blended, "Books": LocaleJP.Books, "Classical": LocaleJP.Classical, "DVD": LocaleJP.DVD, "Electronics": LocaleJP.Electronics,
"ForeignBooks": LocaleJP.ForeignBooks, "GiftCards": LocaleJP.GiftCards, "Grocery": LocaleJP.Grocery, "HealthPersonalCare": LocaleJP.HealthPersonalCare, "Hobbies": LocaleJP.Hobbies, "HomeImprovement": LocaleJP.HomeImprovement, "Jewelry": LocaleJP.Jewelry, "KindleStore": LocaleJP.KindleStore,
"Kitchen": LocaleJP.Kitchen, "MP3Downloads": LocaleJP.MP3Downloads, "MobileApps": LocaleJP.MobileApps, "Music": LocaleJP.Music, "MusicTracks": LocaleJP.MusicTracks, "MusicalInstruments": LocaleJP.MusicalInstruments, "OfficeProducts": LocaleJP.OfficeProducts, "PCHardware": LocaleJP.PCHardware,
"PetSupplies": LocaleJP.PetSupplies, "Shoes": LocaleJP.Shoes, "Software": LocaleJP.Software, "SportingGoods": LocaleJP.SportingGoods, "Toys": LocaleJP.Toys,
"VHS": LocaleJP.VHS, "Video": LocaleJP.Video, "VideoDownload": LocaleJP.VideoDownload, "VideoGames": LocaleJP.VideoGames, "Watches": LocaleJP.Watches,
}
var LocaleJP = struct {
All, Apparel, Appliances, Automotive, Baby,
Beauty, Blended, Books, Classical, DVD, Electronics,
ForeignBooks, GiftCards, Grocery, HealthPersonalCare, Hobbies, HomeImprovement, Jewelry, KindleStore,
Kitchen, MP3Downloads, MobileApps, Music, MusicTracks, MusicalInstruments, OfficeProducts, PCHardware,
PetSupplies, Shoes, Software, SportingGoods, Toys,
VHS, Video, VideoDownload, VideoGames, Watches LocaleSearchIndex
}{
Apparel: LocaleSearchIndex{
BrowseNode: 361299011,
SortValues: []string{"-price", "price", "relevancerank", "salesrank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Beauty: LocaleSearchIndex{
BrowseNode: 0,
SortValues: []string{"-price", "price", "relevancerank", "reviewrank", "salesrank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Hobbies: LocaleSearchIndex{
BrowseNode: 13331821,
SortValues: []string{"-mfg-age-min", "-price", "-release-date", "-releasedate", "-titlerank", "mfg-age-min", "price", "release-date", "releasedate", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Availability", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Jewelry: LocaleSearchIndex{
BrowseNode: 85896051,
SortValues: []string{"-price", "price", "relevancerank", "reviewrank", "salesrank"},
ItemSearchParameters: []string{"Availability", "ItemPage", "Keywords", "MerchantId", "MinPercentageOff", "Sort", "Title"},
},
Kitchen: LocaleSearchIndex{
BrowseNode: 0,
SortValues: []string{"-price", "date-desc-rank", "price", "relevancerank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Author", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "Sort", "Title"},
},
MusicTracks: LocaleSearchIndex{
BrowseNode: 562032,
SortValues: []string{"-titlerank", "titlerank"},
ItemSearchParameters: []string{"Author", "Availability", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort"},
},
Software: LocaleSearchIndex{
BrowseNode: 637630,
SortValues: []string{"-price", "-release-date", "-releasedate", "-titlerank", "price", "release-date", "releasedate", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Classical: LocaleSearchIndex{
BrowseNode: 562032,
SortValues: []string{"-orig-rel-date", "-price", "-pricerank", "-releasedate", "-titlerank", "orig-rel-date", "price", "pricerank", "releasedate", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Artist", "Availability", "Composer", "Conductor", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Orchestra", "Sort", "Title"},
},
MP3Downloads: LocaleSearchIndex{
BrowseNode: 2129039051,
SortValues: []string{"-albumrank", "-artistalbumrank", "-price", "-price-new-bin", "-runtime", "-titlerank", "albumrank", "artistalbumrank", "price", "price-new-bin", "releasedate", "relevancerank", "reviewrank_authority", "runtime", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Availability", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
PCHardware: LocaleSearchIndex{
BrowseNode: 0,
SortValues: []string{"-price", "-price-new-bin", "price", "price-new-bin", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
SportingGoods: LocaleSearchIndex{
BrowseNode: 14315361,
SortValues: []string{"-price", "-release-date", "-titlerank", "price", "release-date", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Availability", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Appliances: LocaleSearchIndex{
BrowseNode: 2277725051,
SortValues: []string{"-price", "price", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Books: LocaleSearchIndex{
BrowseNode: 465610,
SortValues: []string{"-price", "-publication_date", "-titlerank", "-unit-sales", "daterank", "inverse-pricerank", "price", "pricerank", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Author", "Availability", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Power", "Publisher", "Sort", "Title"},
},
GiftCards: LocaleSearchIndex{
BrowseNode: 0,
SortValues: []string{"-price", "date-desc-rank", "price", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice"},
},
MobileApps: LocaleSearchIndex{
BrowseNode: 2381131051,
SortValues: []string{"-price", "pmrank", "price", "relevancerank", "reviewrank", "reviewrank_authority"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
OfficeProducts: LocaleSearchIndex{
BrowseNode: 86732051,
SortValues: []string{"-price", "price", "relevancerank", "reviewrank", "salesrank"},
ItemSearchParameters: []string{"Availability", "Brand", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
VHS: LocaleSearchIndex{
BrowseNode: 2130989051,
SortValues: []string{"-orig-rel-date", "-price", "-pricerank", "-releasedate", "-titlerank", "orig-rel-date", "price", "pricerank", "releasedate", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Actor", "Availability", "Director", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Publisher", "Sort", "Title"},
},
VideoGames: LocaleSearchIndex{
BrowseNode: 637872,
SortValues: []string{"-price", "-releasedate", "-titlerank", "price", "release-date", "releasedate", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "ReleaseDate", "Sort", "Title"},
},
MusicalInstruments: LocaleSearchIndex{
BrowseNode: 2123630051,
SortValues: []string{"-price", "price", "relevancerank", "reviewrank", "salesrank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
PetSupplies: LocaleSearchIndex{
BrowseNode: 2127213051,
SortValues: []string{"-price", "-price-new-bin", "price", "price-new-bin", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Shoes: LocaleSearchIndex{
BrowseNode: 2016926051,
SortValues: []string{"-launch-date", "-price", "price", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Toys: LocaleSearchIndex{
BrowseNode: 13331821,
SortValues: []string{"-price", "-release-date", "-releasedate", "-titlerank", "price", "release-date", "releasedate", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Availability", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Automotive: LocaleSearchIndex{
BrowseNode: 2017305051,
SortValues: []string{"-price", "price", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Blended: LocaleSearchIndex{
BrowseNode: 0,
SortValues: []string{},
ItemSearchParameters: []string{"Availability", "ItemPage", "Keywords"},
},
Electronics: LocaleSearchIndex{
BrowseNode: 3210991,
SortValues: []string{"-price", "-release-date", "-releasedate", "-titlerank", "price", "release-date", "releasedate", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Author", "Availability", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
HealthPersonalCare: LocaleSearchIndex{
BrowseNode: 161669011,
SortValues: []string{"-price", "-release-date", "-releasedate", "-titlerank", "price", "release-date", "releasedate", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
HomeImprovement: LocaleSearchIndex{
BrowseNode: 0,
SortValues: []string{"-price", "price", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
Music: LocaleSearchIndex{
BrowseNode: 562032,
SortValues: []string{"-orig-rel-date", "-price", "-pricerank", "-releasedate", "-titlerank", "orig-rel-date", "price", "pricerank", "releasedate", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Artist", "Availability", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "ReleaseDate", "Sort", "Title"},
},
Video: LocaleSearchIndex{
BrowseNode: 561972,
SortValues: []string{"-orig-rel-date", "-price", "-pricerank", "-releasedate", "-titlerank", "orig-rel-date", "price", "pricerank", "releasedate", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Actor", "Availability", "Director", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Publisher", "Sort", "Title"},
},
Baby: LocaleSearchIndex{
BrowseNode: 13331821,
SortValues: []string{"-price", "price", "psrank", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Author", "Availability", "Brand", "ItemPage", "Keywords", "Manufacturer", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
DVD: LocaleSearchIndex{
BrowseNode: 562002,
SortValues: []string{"-orig-rel-date", "-price", "-pricerank", "-releasedate", "-titlerank", "orig-rel-date", "price", "pricerank", "releasedate", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Actor", "Availability", "Director", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Publisher", "ReleaseDate", "Sort", "Title"},
},
All: LocaleSearchIndex{
BrowseNode: 0,
SortValues: []string{},
ItemSearchParameters: []string{"Availability", "ItemPage", "Keywords", "MerchantId"},
},
ForeignBooks: LocaleSearchIndex{
BrowseNode: 388316011,
SortValues: []string{"-price", "-publication_date", "-titlerank", "-unit-sales", "daterank", "inverse-pricerank", "price", "pricerank", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Author", "Availability", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Power", "Publisher", "Sort", "Title"},
},
VideoDownload: LocaleSearchIndex{
BrowseNode: 0,
SortValues: []string{"date-desc-rank", "popularity-rank", "price-asc-rank", "price-desc-rank", "relevancerank", "review-rank"},
ItemSearchParameters: []string{"Actor", "Availability", "Director", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Publisher", "Sort", "Title"},
},
Grocery: LocaleSearchIndex{
BrowseNode: 57240051,
SortValues: []string{"-price", "price", "relevancerank", "reviewrank", "salesrank"},
ItemSearchParameters: []string{"Availability", "Brand", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Sort", "Title"},
},
KindleStore: LocaleSearchIndex{
BrowseNode: 2250739051,
SortValues: []string{"-price", "daterank", "price", "relevancerank", "reviewrank", "reviewrank_authority", "salesrank"},
ItemSearchParameters: []string{"Author", "Availability", "ItemPage", "Keywords", "MaximumPrice", "MerchantId", "MinPercentageOff", "MinimumPrice", "Publisher", "Sort", "Title"},
},
Watches: LocaleSearchIndex{
BrowseNode: 331952011,
SortValues: []string{"-price", "-titlerank", "price", "salesrank", "titlerank"},
ItemSearchParameters: []string{"Availability", "ItemPage", "Keywords", "MerchantId", "MinPercentageOff", "Sort", "Title"}}} | LocaleJP.go | 0.525125 | 0.420243 | LocaleJP.go | starcoder |
package engine
import (
"fmt"
"math"
"gonum.org/v1/gonum/mat"
)
type Transform struct {
data mat.Matrix
}
func NewTransform() Transform {
return Transform{
mat.NewDense(4, 4, []float64{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}),
}
}
func (transform Transform) Transpose() Transform {
return Transform{transform.data.T()}
}
func (transform Transform) Inverse() Transform {
var result mat.Dense
err := result.Inverse(transform.data)
if err != nil {
panic(fmt.Sprintf("Cannot compute inverse: %v", transform))
}
return Transform{&result}
}
func (transform Transform) Chain(other Transform) Transform {
var result mat.Dense
result.Mul(other.data, transform.data)
return Transform{&result}
}
func (transform Transform) Translate(x, y, z float64) Transform {
return transform.Chain(Transform{
mat.NewDense(4, 4, []float64{
1, 0, 0, x,
0, 1, 0, y,
0, 0, 1, z,
0, 0, 0, 1,
}),
})
}
func (transform Transform) Scale(x, y, z float64) Transform {
return transform.Chain(Transform{
mat.NewDense(4, 4, []float64{
x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, 0, 1,
}),
})
}
func (transform Transform) RotateX(r float64) Transform {
sinr, cosr := math.Sincos(r)
return transform.Chain(Transform{
mat.NewDense(4, 4, []float64{
1, 0, 0, 0,
0, cosr, -sinr, 0,
0, sinr, cosr, 0,
0, 0, 0, 1,
}),
})
}
func (transform Transform) RotateY(r float64) Transform {
sinr, cosr := math.Sincos(r)
return transform.Chain(Transform{
mat.NewDense(4, 4, []float64{
cosr, 0, sinr, 0,
0, 1, 0, 0,
-sinr, 0, cosr, 0,
0, 0, 0, 1,
}),
})
}
func (transform Transform) RotateZ(r float64) Transform {
sinr, cosr := math.Sincos(r)
return transform.Chain(Transform{
mat.NewDense(4, 4, []float64{
cosr, -sinr, 0, 0,
sinr, cosr, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}),
})
}
func (transform Transform) Shear(x_y, x_z, y_x, y_z, z_x, z_y float64) Transform {
return transform.Chain(Transform{
mat.NewDense(4, 4, []float64{
1, x_y, x_z, 0,
y_x, 1, y_z, 0,
z_x, z_y, 1, 0,
0, 0, 0, 1,
}),
})
} | pkg/engine/transform.go | 0.624523 | 0.616647 | transform.go | starcoder |
package levy
import (
"math"
"fmt"
)
// Fast, accurate algorithm for numerical simulation of Levy stable stochastic processes
// <NAME>. 1994
type Levy struct {}
func NewLevy() *Levy {
return new(Levy)
}
// Stochastic variable
func (levy Levy) Vf(alpha float64) (float64, error) {
var vf, x, y float64
if alpha >= 0.3 && alpha <= 1.99 {
x = randNormal(0, 1)
y = randNormal(0, 1)
s, _ := levy.Sigmax(alpha)
x = x * s
vf = x / math.Pow(math.Abs(y), 1.0 / alpha)
return vf, nil
}
return vf, fmt.Errorf("alpha out of range %f (not element of [0.3,1.99])", alpha)
}
func (levy Levy) Sigmax(alpha float64) (float64, error) {
var s float64
if alpha >= 0.3 && alpha <= 1.99 {
numerator := math.Gamma(alpha + 1.0) * math.Sin(math.Pi * alpha / 2.0)
denominator := math.Gamma((alpha + 1)/2.0) * alpha * math.Pow(2.0, (alpha - 1.0) / 2.0)
s = math.Pow(numerator / denominator, 1.0 / alpha)
return s, nil
}
return s, fmt.Errorf("alpha out of range %f (not element of [0.3,1.99])", alpha)
}
func (levy Levy) K(alpha float64) (float64, error) {
var k float64
if alpha >= 0.3 && alpha <= 1.99 {
k = alpha * math.Gamma((alpha + 1.0)/(2.0 * alpha))/ math.Gamma(1.0 / alpha)
k *= math.Pow(alpha * math.Gamma((alpha + 1.0)/2.0) / (math.Gamma(alpha + 1.0) * math.Sin(math.Pi * alpha / 2.0)), 1.0 / alpha)
return k, nil
}
return k, fmt.Errorf("alpha out of range %f (not element of [0.3,1.99])", alpha)
}
func (levy Levy) C(alpha float64) (float64, error) {
var estimate float64
if alpha >= 0.3 && alpha <= 1.99 {
x := []float64{0.75, 0.8, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 1.95, 1.99}
y := []float64{2.2085, 2.483, 2.7675, 2.945, 2.941, 2.9005, 2.8315, 2.737, 2.6125, 2.4465, 2.206, 1.7915, 1.3925, 0.6089}
li := NewLinear()
li.Fit(x, y)
estimate, err := interpolate(li, alpha)
if err != nil {
return estimate, err
}
return estimate, nil
}
return estimate, fmt.Errorf("alpha out of range %f (not element of [0.3,1.99])", alpha)
}
func (levy Levy) Levy(alpha, gamma float64, n int) (float64, error) {
var v, w, z float64
if gamma <= 0 {
return z, fmt.Errorf("gamma out of range %f", gamma)
}
if n < 0 {
return z, fmt.Errorf("iteration less than zero %f", n)
}
if alpha >= 0.3 && alpha <= 1.99 {
w = 0
for i := 0; i <= n; i++ {
v, _ = levy.Vf(alpha)
for v < -10 {
v, _ = levy.Vf(alpha)
}
k, _ := levy.K(alpha)
c, _ := levy.C(alpha)
w += v * ((k - 1.0) * math.Exp(-v / c) + 1.0)
}
// The Levy random variable
z = 1.0 / math.Pow(float64(n), 1.0 / alpha) * w * gamma
return z, nil
}
return z, fmt.Errorf("alpha out of range %f (not element of [0.3,1.99])", alpha)
} | levy/levy.go | 0.730866 | 0.54577 | levy.go | starcoder |
package iso20022
// Amount of money due to a party as compensation for a service.
type Commission10 struct {
// Service for which the commission is asked or paid.
Type *CommissionType6Code `xml:"Tp,omitempty"`
// Service for which the commission is asked or paid.
ExtendedType *Extended350Code `xml:"XtndedTp,omitempty"`
// Basis upon which a commission is charged, eg, flat fee.
Basis *TaxationBasis4Code `xml:"Bsis,omitempty"`
// Basis upon which a commission is charged, eg, flat fee.
ExtendedBasis *Extended350Code `xml:"XtndedBsis,omitempty"`
// Commission expressed as an amount of money.
Amount *ActiveCurrencyAnd13DecimalAmount `xml:"Amt,omitempty"`
// Commission expressed as a percentage.
Rate *PercentageRate `xml:"Rate,omitempty"`
// Party entitled to the amount of money resulting from a commission.
RecipientIdentification *PartyIdentification2Choice `xml:"RcptId,omitempty"`
// Reference to the agreement established between the fund and another party. This element, amongst others, defines the conditions of the commissions.
CommercialAgreementReference *Max35Text `xml:"ComrclAgrmtRef,omitempty"`
// Voluntary non-enforcement of the right to all or part of a commission.
WaivingDetails *CommissionWaiver3 `xml:"WvgDtls,omitempty"`
}
func (c *Commission10) SetType(value string) {
c.Type = (*CommissionType6Code)(&value)
}
func (c *Commission10) SetExtendedType(value string) {
c.ExtendedType = (*Extended350Code)(&value)
}
func (c *Commission10) SetBasis(value string) {
c.Basis = (*TaxationBasis4Code)(&value)
}
func (c *Commission10) SetExtendedBasis(value string) {
c.ExtendedBasis = (*Extended350Code)(&value)
}
func (c *Commission10) SetAmount(value, currency string) {
c.Amount = NewActiveCurrencyAnd13DecimalAmount(value, currency)
}
func (c *Commission10) SetRate(value string) {
c.Rate = (*PercentageRate)(&value)
}
func (c *Commission10) AddRecipientIdentification() *PartyIdentification2Choice {
c.RecipientIdentification = new(PartyIdentification2Choice)
return c.RecipientIdentification
}
func (c *Commission10) SetCommercialAgreementReference(value string) {
c.CommercialAgreementReference = (*Max35Text)(&value)
}
func (c *Commission10) AddWaivingDetails() *CommissionWaiver3 {
c.WaivingDetails = new(CommissionWaiver3)
return c.WaivingDetails
} | Commission10.go | 0.736306 | 0.427098 | Commission10.go | starcoder |
package executor
import (
"unsafe"
jsoniter "github.com/json-iterator/go"
)
// OrderedMapItem is a key-value pair for an item in an OrderedMap.
type OrderedMapItem struct {
Key string
Value interface{}
}
// OrderedMap represents a map that maintains the order of its key-value pairs. It's more or less
// just a list that serializes to a JSON map.
type OrderedMap struct {
items []OrderedMapItem
}
// NewOrderedMap creates a new ordered map.
func NewOrderedMap() *OrderedMap {
return &OrderedMap{}
}
// NewOrderedMapWithLength creates a new ordered map with n elements pre-allocated and
// zero-initialized.
func NewOrderedMapWithLength(n int) *OrderedMap {
return &OrderedMap{
items: make([]OrderedMapItem, n),
}
}
// Set writes a key-value pair to the map at the given index.
func (m *OrderedMap) Set(index int, key string, value interface{}) {
m.items[index] = OrderedMapItem{
Key: key,
Value: value,
}
}
// Append appends a key-value pair to the map. It is the caller's responsibility to make sure the
// key doesn't already exist in the map.
func (m *OrderedMap) Append(key string, value interface{}) {
m.items = append(m.items, OrderedMapItem{
Key: key,
Value: value,
})
}
// Len returns the length of the map.
func (m *OrderedMap) Len() int {
return len(m.items)
}
// Items provides the items in the map, in the order they were added.
func (m *OrderedMap) Items() []OrderedMapItem {
return m.items
}
// MarshalJSON marshals the map to JSON, maintaining the correct key order.
func (m *OrderedMap) MarshalJSON() ([]byte, error) {
return jsoniter.Marshal(m)
}
type orderedMapEncoder struct{}
func (e *orderedMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
m := *((*OrderedMap)(ptr))
return m.Len() == 0
}
func (e *orderedMapEncoder) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
m := *((*OrderedMap)(ptr))
stream.WriteObjectStart()
for i, kv := range m.items {
if i != 0 {
stream.WriteMore()
}
stream.WriteObjectField(kv.Key)
stream.WriteVal(kv.Value)
}
stream.WriteObjectEnd()
}
func init() {
jsoniter.RegisterTypeEncoder("executor.OrderedMap", &orderedMapEncoder{})
} | graphql/executor/ordered_map.go | 0.731826 | 0.431644 | ordered_map.go | starcoder |
package types
import (
"bytes"
"fmt"
"regexp"
"sort"
"strings"
"github.com/attic-labs/noms/go/d"
"github.com/attic-labs/noms/go/hash"
)
var EmptyStructType = MakeStructType("", []string{}, []*Type{})
var EmptyStruct = Struct{ValueSlice{}, EmptyStructType, &hash.Hash{}}
type StructData map[string]Value
type Struct struct {
values []Value
t *Type
h *hash.Hash
}
func NewStruct(name string, data StructData) Struct {
fieldNames := make(sort.StringSlice, len(data))
i := 0
for fn := range data {
fieldNames[i] = fn
i++
}
sort.Sort(fieldNames)
fieldTypes := make([]*Type, len(data))
values := make(ValueSlice, len(data))
for i, fn := range fieldNames {
fieldTypes[i] = data[fn].Type()
values[i] = data[fn]
}
return Struct{values, MakeStructType(name, fieldNames, fieldTypes), &hash.Hash{}}
}
func NewStructWithType(t *Type, data ValueSlice) Struct {
desc := t.Desc.(StructDesc)
d.PanicIfFalse(len(data) == len(desc.fields))
for i, field := range desc.fields {
v := data[i]
assertSubtype(field.t, v)
}
return Struct{data, t, &hash.Hash{}}
}
func (s Struct) hashPointer() *hash.Hash {
return s.h
}
// Value interface
func (s Struct) Equals(other Value) bool {
return s.Hash() == other.Hash()
}
func (s Struct) Less(other Value) bool {
return valueLess(s, other)
}
func (s Struct) Hash() hash.Hash {
if s.h.IsEmpty() {
*s.h = getHash(s)
}
return *s.h
}
func (s Struct) WalkValues(cb ValueCallback) {
for _, v := range s.values {
cb(v)
}
}
func (s Struct) WalkRefs(cb RefCallback) {
for _, v := range s.values {
v.WalkRefs(cb)
}
}
func (s Struct) Type() *Type {
return s.t
}
func (s Struct) desc() StructDesc {
return s.t.Desc.(StructDesc)
}
// MaybeGet returns the value of a field in the struct. If the struct does not a have a field with
// the name name then this returns (nil, false).
func (s Struct) MaybeGet(n string) (Value, bool) {
_, i := s.desc().findField(n)
if i == -1 {
return nil, false
}
return s.values[i], true
}
// Get returns the value of a field in the struct. If the struct does not a have a field with the
// name name then this panics.
func (s Struct) Get(n string) Value {
_, i := s.desc().findField(n)
if i == -1 {
d.Chk.Fail(fmt.Sprintf(`Struct has no field "%s"`, n))
}
return s.values[i]
}
// Set returns a new struct where the field name has been set to value. If name is not an
// existing field in the struct or the type of value is different from the old value of the
// struct field a new struct type is created.
func (s Struct) Set(n string, v Value) Struct {
f, i := s.desc().findField(n)
if i == -1 || !IsSubtype(f.t, v.Type()) {
// New/change field
data := make(StructData, len(s.values)+1)
for i, f := range s.desc().fields {
data[f.name] = s.values[i]
}
data[n] = v
return NewStruct(s.desc().Name, data)
}
values := make([]Value, len(s.values))
copy(values, s.values)
values[i] = v
return Struct{values, s.t, &hash.Hash{}}
}
func (s Struct) Diff(last Struct, changes chan<- ValueChanged, closeChan <-chan struct{}) {
if s.Equals(last) {
return
}
fs1, fs2 := s.Type().Desc.(StructDesc).fields, last.Type().Desc.(StructDesc).fields
i1, i2 := 0, 0
for i1 < len(fs1) && i2 < len(fs2) {
f1, f2 := fs1[i1], fs2[i2]
fn1, fn2 := f1.name, f2.name
var change ValueChanged
if fn1 == fn2 {
if !s.values[i1].Equals(last.values[i2]) {
change = ValueChanged{ChangeType: DiffChangeModified, V: String(fn1)}
}
i1++
i2++
} else if fn1 < fn2 {
change = ValueChanged{ChangeType: DiffChangeAdded, V: String(fn1)}
i1++
} else {
change = ValueChanged{ChangeType: DiffChangeRemoved, V: String(fn2)}
i2++
}
if change != (ValueChanged{}) && !sendChange(changes, closeChan, change) {
return
}
}
for ; i1 < len(fs1); i1++ {
if !sendChange(changes, closeChan, ValueChanged{ChangeType: DiffChangeAdded, V: String(fs1[i1].name)}) {
return
}
}
for ; i2 < len(fs2); i2++ {
if !sendChange(changes, closeChan, ValueChanged{ChangeType: DiffChangeRemoved, V: String(fs2[i2].name)}) {
return
}
}
}
var escapeChar = "Q"
var headFieldNamePattern = regexp.MustCompile("[a-zA-Z]")
var tailFieldNamePattern = regexp.MustCompile("[a-zA-Z0-9_]")
var spaceRegex = regexp.MustCompile("[ ]")
var escapeRegex = regexp.MustCompile(escapeChar)
var fieldNameComponentRe = regexp.MustCompile("^" + headFieldNamePattern.String() + tailFieldNamePattern.String() + "*")
var fieldNameRe = regexp.MustCompile(fieldNameComponentRe.String() + "$")
type encodingFunc func(string, *regexp.Regexp) string
func CamelCaseFieldName(input string) string {
//strip invalid struct characters and leave spaces
encode := func(s1 string, p *regexp.Regexp) string {
if p.MatchString(s1) || spaceRegex.MatchString(s1) {
return s1
}
return ""
}
strippedField := escapeField(input, encode)
splitField := strings.Fields(strippedField)
if len(splitField) == 0 {
return ""
}
//Camelcase field
output := strings.ToLower(splitField[0])
if len(splitField) > 1 {
for _, field := range splitField[1:] {
output += strings.Title(strings.ToLower(field))
}
}
//Because we are removing characters, we may generate an invalid field name
//i.e. -- 1A B, we will remove the first bad chars and process until 1aB
//1aB is invalid struct field name so we will return ""
if !IsValidStructFieldName(output) {
return ""
}
return output
}
func escapeField(input string, encode encodingFunc) string {
output := ""
pattern := headFieldNamePattern
for _, ch := range input {
output += encode(string([]rune{ch}), pattern)
pattern = tailFieldNamePattern
}
return output
}
// EscapeStructField escapes names for use as noms structs with regards to non CSV imported data.
// Disallowed characters are encoded as 'Q<hex-encoded-utf8-bytes>'.
// Note that Q itself is also escaped since it is the escape character.
func EscapeStructField(input string) string {
if !escapeRegex.MatchString(input) && IsValidStructFieldName(input) {
return input
}
encode := func(s1 string, p *regexp.Regexp) string {
if p.MatchString(s1) && s1 != escapeChar {
return s1
}
var hs = fmt.Sprintf("%X", s1)
var buf bytes.Buffer
buf.WriteString(escapeChar)
if len(hs) == 1 {
buf.WriteString("0")
}
buf.WriteString(hs)
return buf.String()
}
return escapeField(input, encode)
}
// IsValidStructFieldName returns whether the name is valid as a field name in a struct.
// Valid names must start with `a-zA-Z` and after that `a-zA-Z0-9_`.
func IsValidStructFieldName(name string) bool {
return fieldNameRe.MatchString(name)
}
func verifyFieldNames(names []string) {
if len(names) == 0 {
return
}
last := names[0]
verifyFieldName(last)
for i := 1; i < len(names); i++ {
verifyFieldName(names[i])
if strings.Compare(names[i], last) <= 0 {
d.Chk.Fail("Field names must be unique and ordered alphabetically")
}
last = names[i]
}
}
func verifyName(name, kind string) {
d.PanicIfTrue(!IsValidStructFieldName(name), `Invalid struct%s name: "%s"`, kind, name)
}
func verifyFieldName(name string) {
verifyName(name, " field")
}
func verifyStructName(name string) {
if name != "" {
verifyName(name, "")
}
} | go/types/struct.go | 0.595963 | 0.431225 | struct.go | starcoder |
package schema
import (
"errors"
"strconv"
)
type Thermal struct {
// The type of a resource. [RO]
OdataType string `json:"@odata.type"`
// The identifier that uniquely identifies the Resource within
// the collection of similar Resources. [RO]
Id string `json:"Id"`
// The name of the Resource or array member. [RO]
Name string `json:"Name"`
// The set of temperature sensors for this chassis. [RW]
Temperatures []ThermalTemperatures
// The set of fans for this chassis. [RW]
Fans []ThermalFans
// The OEM extension. [RW]
// Oem ThermalOem `json:"Oem"`
// The redundancy information for the set of fans in this chassis. [RW]
Redundancy []ThermalRedundancy
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
}
type ThermalTemperatures struct {
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
// The identifier for the member within the collection. [RO]
MemberId string `json:"MemberId"`
// The temperature sensor name. [RO]
Name string `json:"Name,omitempty"`
// The numerical identifier of the temperature sensor. [RO]
SensorNumber int64 `json:"SensorNumber,omitempty"`
// The status and health of a Resource and its children. [RW]
Status CommonStatus `json:"Status"`
// The temperature in degrees Celsius. [RO]
ReadingCelsius float64 `json:"ReadingCelsius,omitempty"`
// The value at which the reading is above normal range. [RO]
UpperThresholdNonCritical float64 `json:"UpperThresholdNonCritical,omitempty"`
// The value at which the reading is above normal range but not yet fatal. [RO]
UpperThresholdCritical float64 `json:"UpperThresholdCritical,omitempty"`
// The value at which the reading is above normal range and fatal. [RO]
UpperThresholdFatal float64 `json:"UpperThresholdFatal,omitempty"`
// The value at which the reading is below normal range. [RO]
LowerThresholdNonCritical float64 `json:"LowerThresholdNonCritical,omitempty"`
// The value at which the reading is below normal range but not yet fatal. [RO]
LowerThresholdCritical float64 `json:"LowerThresholdCritical,omitempty"`
// The value at which the reading is below normal range and fatal. [RO]
LowerThresholdFatal float64 `json:"LowerThresholdFatal,omitempty"`
// Minimum value for this sensor. [RO]
MinReadingRangeTemp float64 `json:"MinReadingRangeTemp,omitempty"`
// Maximum value for this sensor. [RO]
MaxReadingRangeTemp float64 `json:"MaxReadingRangeTemp,omitempty"`
// The area or device to which this temperature measurement applies. [RO]
// Valid values:
// ACInput: An AC input.
// ACMaintenanceBypassInput: An AC maintenance bypass input.
// ACOutput: An AC output.
// ACStaticBypassInput: An AC static bypass input.
// ACUtilityInput: An AC utility input.
// ASIC: An ASIC device, such as a networking chip or chipset component.
// Accelerator: An accelerator.
// Back: The back of the chassis.
// Backplane: A backplane within the chassis.
// CPU: A processor (CPU).
// CPUSubsystem: The entire processor (CPU) subsystem.
// Chassis: The entire chassis.
// ComputeBay: Within a compute bay.
// CoolingSubsystem: The entire cooling, or air and liquid, subsystem.
// DCBus: A DC bus.
// Exhaust: The air exhaust point or points or region of the chassis.
// ExpansionBay: Within an expansion bay.
// FPGA: An FPGA.
// Fan: A fan.
// Front: The front of the chassis.
// GPU: A graphics processor (GPU).
// GPUSubsystem: The entire graphics processor (GPU) subsystem.
// Intake: The air intake point or points or region of the chassis.
// LiquidInlet: The liquid inlet point of the chassis.
// LiquidOutlet: The liquid outlet point of the chassis.
// Lower: The lower portion of the chassis.
// Memory: A memory device.
// MemorySubsystem: The entire memory subsystem.
// Motor: A motor.
// NetworkBay: Within a networking bay.
// NetworkingDevice: A networking device.
// PowerSubsystem: The entire power subsystem.
// PowerSupply: A power supply.
// PowerSupplyBay: Within a power supply bay.
// Rectifier: A rectifier device.
// Room: The room.
// StorageBay: Within a storage bay.
// StorageDevice: A storage device.
// SystemBoard: The system board (PCB).
// Transformer: A transformer.
// Upper: The upper portion of the chassis.
// VoltageRegulator: A voltage regulator device.
PhysicalContext string `json:"PhysicalContext"`
// The areas or devices to which this temperature applies. [RO]
RelatedItem []CommonOid
}
type ThermalFans struct {
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
// The identifier for the member within the collection. [RO]
MemberId string `json:"MemberId"`
// The temperature sensor name. [RO]
Name string `json:"Name,omitempty"`
// The numerical identifier of the temperature sensor. [RO]
SensorNumber int64 `json:"SensorNumber,omitempty"`
// The area or device to which this temperature measurement applies. [RO]
// Valid values:
// ACInput: An AC input.
// ACMaintenanceBypassInput: An AC maintenance bypass input.
// ACOutput: An AC output.
// ACStaticBypassInput: An AC static bypass input.
// ACUtilityInput: An AC utility input.
// ASIC: An ASIC device, such as a networking chip or chipset component.
// Accelerator: An accelerator.
// Back: The back of the chassis.
// Backplane: A backplane within the chassis.
// CPU: A processor (CPU).
// CPUSubsystem: The entire processor (CPU) subsystem.
// Chassis: The entire chassis.
// ComputeBay: Within a compute bay.
// CoolingSubsystem: The entire cooling, or air and liquid, subsystem.
// DCBus: A DC bus.
// Exhaust: The air exhaust point or points or region of the chassis.
// ExpansionBay: Within an expansion bay.
// FPGA: An FPGA.
// Fan: A fan.
// Front: The front of the chassis.
// GPU: A graphics processor (GPU).
// GPUSubsystem: The entire graphics processor (GPU) subsystem.
// Intake: The air intake point or points or region of the chassis.
// LiquidInlet: The liquid inlet point of the chassis.
// LiquidOutlet: The liquid outlet point of the chassis.
// Lower: The lower portion of the chassis.
// Memory: A memory device.
// MemorySubsystem: The entire memory subsystem.
// Motor: A motor.
// NetworkBay: Within a networking bay.
// NetworkingDevice: A networking device.
// PowerSubsystem: The entire power subsystem.
// PowerSupply: A power supply.
// PowerSupplyBay: Within a power supply bay.
// Rectifier: A rectifier device.
// Room: The room.
// StorageBay: Within a storage bay.
// StorageDevice: A storage device.
// SystemBoard: The system board (PCB).
// Transformer: A transformer.
// Upper: The upper portion of the chassis.
// VoltageRegulator: A voltage regulator device.
PhysicalContext string `json:"PhysicalContext"`
// The status and health of a Resource and its children. [RW]
Status CommonStatus `json:"Status"`
// The fan speed. [RO]
Reading int64 `json:"Reading"`
// The units in which the fan reading and thresholds are measured. [RO]
// Valid values:
// Percent: The fan reading and thresholds are measured as a percentage.
// RPM: The fan reading and thresholds are measured in rotations per minute.
ReadingUnits string `json:"ReadingUnits"`
// The value at which the reading is below normal range and fatal. [RO]
LowerThresholdFatal int64 `json:"LowerThresholdFatal"`
// Minimum value for this sensor. [RO]
MinReadingRange int64 `json:"MinReadingRange"`
// Maximum value for this sensor. [RO]
MaxReadingRange int64 `json:"MaxReadingRange"`
// The set of redundancy groups for this fan. [RW]
Redundancy []CommonOid
// The areas or devices to which this temperature applies. [RO]
RelatedItem []CommonOid
Oem FanOem `json:"Oem"`
}
type ThermalLeds struct {
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
// The identifier for the member within the collection. [RO]
MemberId string `json:"MemberId"`
// The temperature sensor name. [RO]
Name string `json:"Name,omitempty"`
// The numerical identifier of the temperature sensor. [RO]
SensorNumber int64 `json:"SensorNumber,omitempty"`
// The area or device to which this temperature measurement applies. [RO]
// Valid values:
// ACInput: An AC input.
// ACMaintenanceBypassInput: An AC maintenance bypass input.
// ACOutput: An AC output.
// ACStaticBypassInput: An AC static bypass input.
// ACUtilityInput: An AC utility input.
// ASIC: An ASIC device, such as a networking chip or chipset component.
// Accelerator: An accelerator.
// Back: The back of the chassis.
// Backplane: A backplane within the chassis.
// CPU: A processor (CPU).
// CPUSubsystem: The entire processor (CPU) subsystem.
// Chassis: The entire chassis.
// ComputeBay: Within a compute bay.
// CoolingSubsystem: The entire cooling, or air and liquid, subsystem.
// DCBus: A DC bus.
// Exhaust: The air exhaust point or points or region of the chassis.
// ExpansionBay: Within an expansion bay.
// FPGA: An FPGA.
// Fan: A fan.
// Front: The front of the chassis.
// GPU: A graphics processor (GPU).
// GPUSubsystem: The entire graphics processor (GPU) subsystem.
// Intake: The air intake point or points or region of the chassis.
// LiquidInlet: The liquid inlet point of the chassis.
// LiquidOutlet: The liquid outlet point of the chassis.
// Lower: The lower portion of the chassis.
// Memory: A memory device.
// MemorySubsystem: The entire memory subsystem.
// Motor: A motor.
// NetworkBay: Within a networking bay.
// NetworkingDevice: A networking device.
// PowerSubsystem: The entire power subsystem.
// PowerSupply: A power supply.
// PowerSupplyBay: Within a power supply bay.
// Rectifier: A rectifier device.
// Room: The room.
// StorageBay: Within a storage bay.
// StorageDevice: A storage device.
// SystemBoard: The system board (PCB).
// Transformer: A transformer.
// Upper: The upper portion of the chassis.
// VoltageRegulator: A voltage regulator device.
PhysicalContext string `json:"PhysicalContext"`
// The status and health of a Resource and its children. [RW]
Status CommonStatus `json:"Status"`
// The fan speed. [RO]
Reading int64 `json:"Reading"`
// The units in which the fan reading and thresholds are measured. [RO]
// Valid values:
// Percent: The fan reading and thresholds are measured as a percentage.
// RPM: The fan reading and thresholds are measured in rotations per minute.
ReadingUnits string `json:"ReadingUnits"`
// The value at which the reading is below normal range and fatal. [RO]
LowerThresholdFatal int64 `json:"LowerThresholdFatal"`
// Minimum value for this sensor. [RO]
MinReadingRange int64 `json:"MinReadingRange"`
// Maximum value for this sensor. [RO]
MaxReadingRange int64 `json:"MaxReadingRange"`
// The set of redundancy groups for this fan. [RW]
Redundancy []CommonOid
// The areas or devices to which this temperature applies. [RO]
RelatedItem []CommonOid
}
type ThermalButtons struct {
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
// The identifier for the member within the collection. [RO]
MemberId string `json:"MemberId"`
// The temperature sensor name. [RO]
Name string `json:"Name,omitempty"`
// The numerical identifier of the temperature sensor. [RO]
SensorNumber int64 `json:"SensorNumber,omitempty"`
// The area or device to which this temperature measurement applies. [RO]
// Valid values:
// ACInput: An AC input.
// ACMaintenanceBypassInput: An AC maintenance bypass input.
// ACOutput: An AC output.
// ACStaticBypassInput: An AC static bypass input.
// ACUtilityInput: An AC utility input.
// ASIC: An ASIC device, such as a networking chip or chipset component.
// Accelerator: An accelerator.
// Back: The back of the chassis.
// Backplane: A backplane within the chassis.
// CPU: A processor (CPU).
// CPUSubsystem: The entire processor (CPU) subsystem.
// Chassis: The entire chassis.
// ComputeBay: Within a compute bay.
// CoolingSubsystem: The entire cooling, or air and liquid, subsystem.
// DCBus: A DC bus.
// Exhaust: The air exhaust point or points or region of the chassis.
// ExpansionBay: Within an expansion bay.
// FPGA: An FPGA.
// Fan: A fan.
// Front: The front of the chassis.
// GPU: A graphics processor (GPU).
// GPUSubsystem: The entire graphics processor (GPU) subsystem.
// Intake: The air intake point or points or region of the chassis.
// LiquidInlet: The liquid inlet point of the chassis.
// LiquidOutlet: The liquid outlet point of the chassis.
// Lower: The lower portion of the chassis.
// Memory: A memory device.
// MemorySubsystem: The entire memory subsystem.
// Motor: A motor.
// NetworkBay: Within a networking bay.
// NetworkingDevice: A networking device.
// PowerSubsystem: The entire power subsystem.
// PowerSupply: A power supply.
// PowerSupplyBay: Within a power supply bay.
// Rectifier: A rectifier device.
// Room: The room.
// StorageBay: Within a storage bay.
// StorageDevice: A storage device.
// SystemBoard: The system board (PCB).
// Transformer: A transformer.
// Upper: The upper portion of the chassis.
// VoltageRegulator: A voltage regulator device.
PhysicalContext string `json:"PhysicalContext"`
// The status and health of a Resource and its children. [RW]
Status CommonStatus `json:"Status"`
// The fan speed. [RO]
Reading int64 `json:"Reading"`
// The units in which the fan reading and thresholds are measured. [RO]
// Valid values:
// Percent: The fan reading and thresholds are measured as a percentage.
// RPM: The fan reading and thresholds are measured in rotations per minute.
ReadingUnits string `json:"ReadingUnits"`
// The value at which the reading is below normal range and fatal. [RO]
LowerThresholdFatal int64 `json:"LowerThresholdFatal"`
// Minimum value for this sensor. [RO]
MinReadingRange int64 `json:"MinReadingRange"`
// Maximum value for this sensor. [RO]
MaxReadingRange int64 `json:"MaxReadingRange"`
// The set of redundancy groups for this fan. [RW]
Redundancy []CommonOid
// The areas or devices to which this temperature applies. [RO]
RelatedItem []CommonOid
}
type ThermalRedundancy struct {
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
// The identifier for the member within the collection. [RO]
MemberId string `json:"MemberId"`
// The temperature sensor name. [RO]
Name string `json:"Name,omitempty"`
// The links to components of this redundancy set. [RO]
RedundancySet []CommonOid
// The redundancy mode of the group. [RW]
// Valid values:
// Failover: Failure of one unit automatically causes a standby or offline unit in the redundancy set to take over its functions.
// N+m: Multiple units are available and active such that normal operation will continue if one or more units fail.
// NotRedundant: The subsystem is not configured in a redundancy mode,
// either due to configuration or the functionality has been disabled by the user.
// Sharing: Multiple units contribute or share such that operation will continue, but at a reduced capacity, if one or more units fail.
// Sparing: One or more spare units are available to take over the function of a failed unit, but takeover is not automatic.
Mode string `json:"Mode"`
// The status and health of a Resource and its children. [RW]
Status CommonStatus `json:"Status"`
// The minumum number of members needed for this group to be redundant. [RO]
MinNumNeeded int64 `json:"MinNumNeeded,omitempty"`
// The maximum number of members allowable for this particular redundancy group. [RO]
MaxNumSupported int64 `json:"MaxNumSupported,omitempty"`
}
type FanOem struct {
Custom FanOemCustom `json:"Custom"`
}
type FanOemCustom struct {
// The duty cycle of the fan. [RW]
// Valid values:
// "10"-"100": The duty cycle can be set from 10 to 100 (Unit:%).
Duty string `json:"Duty"`
}
func (fan FanOemCustom) Validation() error {
if len(fan.Duty) == 0 {
return errors.New("The duty is empty")
}
duty, err := strconv.ParseInt(fan.Duty, 10, 32)
if err != nil {
return errors.New("The duty is not a number")
}
if duty < 0 || duty > 100 {
return errors.New("The duty is not a valid number")
}
return nil
} | cmd/pemgr-server/schema/chassis-thermal.go | 0.735926 | 0.463019 | chassis-thermal.go | starcoder |
package doctor
import (
"fmt"
"github.com/pkg/errors"
"github.com/lieut-data/go-moneywell/api"
"github.com/lieut-data/go-moneywell/api/money"
)
const (
// ProblemNotFullySplit identifies a split transaction whose bucketed children do not sum
// to the transaction amount. This leads to an imbalance that doesn't show up as
// unassigned.
ProblemNotFullySplit = 1
// ProblemSplitParentAssignedBucket identifies a split transaction that is itself assigned
// a bucket. Only the children should be assigned to buckets. This problem has never been
// observed in an actual MoneyWell document.
ProblemSplitParentAssignedBucket = 2
// ProblemTransferInsideCashFlowAssignedBucket identifies a transfer that should not be
// assigned a bucket since both accounts are inside the cash flow.
ProblemTransferInsideCashFlowAssignedBucket = 3
// ProblemTransferOutsideCashFlowAssignedBucket identifies a transfer that should not be
// assigned a bucket since both accounts are outside the cash flow.
ProblemTransferOutsideCashFlowAssignedBucket = 4
// ProblemTransferOutOfCashFlowMissingBucket identifies a transfer that should be assigned
// a bucket since it moves money out of the cash flow.
ProblemTransferOutOfCashFlowMissingBucket = 5
// ProblemTransferFromCashFlowAssignedBucket identifies a transfer that should not be
// assigned a bucket since it receives money from inside the cash flow.
ProblemTransferFromCashFlowAssignedBucket = 6
// ProblemBucketOptionalInsideCashFlow identifies a transaction marked as bucket optional
// that should not be.
ProblemBucketOptionalInsideCashFlow = 7
// ProblemMissingBucketInsideCashFlow identifies a transaction missing an assigned bucket.
ProblemMissingBucketInsideCashFlow = 8
// ProblemBucketOutsideCashFlow identifies a non-transfer transaction incorrectly having
// a transaction assigned.
ProblemBucketOutsideCashFlow = 9
)
// ProblematicTranscations represents a transaction diagnosed with a potential problem.
type ProblematicTransaction struct {
Transaction int64
Problem int
Description string
}
// GetProblematicTransactions finds transactions with potential problems, typically leading to
// an imbalance between accounts and buckets within MoneyWell.
func GetProblematicTransactions(
settings api.Settings,
accounts []api.Account,
transactions []api.Transaction,
) ([]ProblematicTransaction, error) {
problematicTransactions := []ProblematicTransaction{}
for _, transaction := range transactions {
// Ignore transactions before the cash flow start date. They won't contribute
// to any current imbalance.
if transaction.Date.Before(settings.CashFlowStartDate) {
continue
}
// Ignore $0.00 transactions. These won't contribute to an imbalance, and might
// be used to demarcate initial balances.
if transaction.Amount.IsZero() {
continue
}
account, err := getAccount(accounts, transaction.Account)
if err != nil {
return nil, errors.WithStack(err)
}
problematicSplitTransactions, err := checkSplitTransaction(
account,
transactions,
transaction,
)
if err != nil {
return nil, errors.WithStack(err)
}
problematicTransactions = append(
problematicTransactions,
problematicSplitTransactions...,
)
problematicTransferTransactions, err := checkTransferTransaction(
accounts,
account,
transactions,
transaction,
)
if err != nil {
return nil, errors.WithStack(err)
}
problematicTransactions = append(
problematicTransactions,
problematicTransferTransactions...,
)
problematicBucketOptionalTransactions, err := checkBucketOptionalTransaction(
account,
transactions,
transaction,
)
if err != nil {
return nil, errors.WithStack(err)
}
problematicTransactions = append(
problematicTransactions,
problematicBucketOptionalTransactions...,
)
problematicMissingBucketTransactions, err := checkMissingBucketTransaction(
account,
transactions,
transaction,
)
if err != nil {
return nil, errors.WithStack(err)
}
problematicTransactions = append(
problematicTransactions,
problematicMissingBucketTransactions...,
)
problematicInvalidBucketTransactions, err := checkInvalidBucketTransaction(
account,
transactions,
transaction,
)
if err != nil {
return nil, errors.WithStack(err)
}
problematicTransactions = append(
problematicTransactions,
problematicInvalidBucketTransactions...,
)
}
return problematicTransactions, nil
}
func checkSplitTransaction(
account api.Account,
transactions []api.Transaction,
transaction api.Transaction,
) ([]ProblematicTransaction, error) {
if !transaction.IsSplit {
return nil, nil
}
problematicTransactions := []ProblematicTransaction{}
// The split parent in a transaction should not be assigned a bucket.
if transaction.Bucket != 0 {
problematicTransactions = append(problematicTransactions, ProblematicTransaction{
Transaction: transaction.PrimaryKey,
Problem: ProblemSplitParentAssignedBucket,
Description: fmt.Sprintf(
"%s should not be assigned to a bucket",
describeTransaction("split parent", account, transaction),
),
})
}
// The children of a split transaction should sum to the transaction amount. Otherwise,
// this creates an imbalance that doesn't even show up in the "Unassigned" Smart Bucket
// within MoneyWell.
// Find and sum the children
// TODO: Avoid O(n^2) only if this ever seems slow.
childBalance := money.Money{}
for _, child := range transactions {
if child.SplitParent == transaction.PrimaryKey {
childBalance = childBalance.Add(child.Amount)
}
}
if transaction.Amount != childBalance {
problematicTransactions = append(problematicTransactions, ProblematicTransaction{
Transaction: transaction.PrimaryKey,
Problem: ProblemNotFullySplit,
Description: fmt.Sprintf(
"%s is not fully split (off by %s)",
describeTransaction(
"transaction",
account,
transaction,
),
transaction.Amount.Add(
childBalance.Multiply(-1),
),
),
})
}
return problematicTransactions, nil
}
func checkTransferTransaction(
accounts []api.Account,
account api.Account,
transactions []api.Transaction,
transaction api.Transaction,
) ([]ProblematicTransaction, error) {
if !transaction.IsTransfer() {
return nil, nil
}
// Assume split transactions are checked elsewhere.
if transaction.IsSplit {
return nil, nil
}
problematicTransactions := []ProblematicTransaction{}
transferAccount, err := getAccount(accounts, transaction.TransferAccount)
if err != nil {
return nil, errors.WithStack(err)
}
// A transfer between accounts inside the cash flow should not have a bucket assigned.
if account.IncludeInCashFlow && transferAccount.IncludeInCashFlow && transaction.Bucket != 0 {
problematicTransactions = append(problematicTransactions, ProblematicTransaction{
Transaction: transaction.PrimaryKey,
Problem: ProblemTransferInsideCashFlowAssignedBucket,
Description: fmt.Sprintf(
"%s between accounts in the cash flow should not be assigned to a bucket",
describeTransaction("transfer", account, transaction),
),
})
}
// A transfer between accounts outside the cash flow should not have a bucket assigned.
if !account.IncludeInCashFlow && !transferAccount.IncludeInCashFlow && transaction.Bucket != 0 {
problematicTransactions = append(problematicTransactions, ProblematicTransaction{
Transaction: transaction.PrimaryKey,
Problem: ProblemTransferOutsideCashFlowAssignedBucket,
Description: fmt.Sprintf(
"%s between accounts outside the cash flow should not be assigned to a bucket",
describeTransaction("transfer", account, transaction),
),
})
}
// A transfer between accounts with one in the cash flow and one outside should have a
// bucket assigned only on the account inside the cash flow.
if account.IncludeInCashFlow && !transferAccount.IncludeInCashFlow && transaction.Bucket == 0 {
problematicTransactions = append(problematicTransactions, ProblematicTransaction{
Transaction: transaction.PrimaryKey,
Problem: ProblemTransferOutOfCashFlowMissingBucket,
Description: fmt.Sprintf(
"%s from account inside cash flow to account outside cash flow should be assigned to a bucket",
describeTransaction("transfer", account, transaction),
),
})
} else if !account.IncludeInCashFlow && transferAccount.IncludeInCashFlow && transaction.Bucket != 0 {
problematicTransactions = append(problematicTransactions, ProblematicTransaction{
Transaction: transaction.PrimaryKey,
Problem: ProblemTransferFromCashFlowAssignedBucket,
Description: fmt.Sprintf(
"%s from account outside cash flow to account inside cash flow should not be assigned to a bucket",
describeTransaction("transfer", account, transaction),
),
})
}
return problematicTransactions, nil
}
func checkBucketOptionalTransaction(
account api.Account,
transactions []api.Transaction,
transaction api.Transaction,
) ([]ProblematicTransaction, error) {
// If it's not marked as bucket optional, it's not a problem!
if !transaction.IsBucketOptional {
return nil, nil
}
// Assume split transactions are checked elsewhere.
if transaction.IsSplit {
return nil, nil
}
// A transaction against an account outside the cash flow won't impact the cash flow.
if !account.IncludeInCashFlow {
return nil, nil
}
// A transaction marked as bucket optional but that strangely has a bucket assigned seems
// to occur from time to time normally.
if transaction.Bucket != 0 {
return nil, nil
}
// Assume transfer transfers are checked elsewhere.
if transaction.IsTransfer() {
return nil, nil
}
// A transaction should generally not be marked as bucket optional in an account that is
// part of the of the cash flow.
return []ProblematicTransaction{
{
Transaction: transaction.PrimaryKey,
Problem: ProblemBucketOptionalInsideCashFlow,
Description: fmt.Sprintf(
"%s should not be marked as bucket optional in a cash flow account",
describeTransaction("transaction", account, transaction),
),
},
}, nil
}
func checkMissingBucketTransaction(
account api.Account,
transactions []api.Transaction,
transaction api.Transaction,
) ([]ProblematicTransaction, error) {
// If a bucket is assigned, it's not missing!
if transaction.Bucket != 0 {
return nil, nil
}
// Assume transfer and split transactions are checked elsewhere.
if transaction.IsTransfer() || transaction.IsSplit {
return nil, nil
}
// A transaction against an account outside the cash flow won't impact the cash flow.
if !account.IncludeInCashFlow {
return nil, nil
}
return []ProblematicTransaction{
{
Transaction: transaction.PrimaryKey,
Problem: ProblemMissingBucketInsideCashFlow,
Description: fmt.Sprintf(
"%s is not assigned to a bucket",
describeTransaction("transaction", account, transaction),
),
},
}, nil
}
func checkInvalidBucketTransaction(
account api.Account,
transactions []api.Transaction,
transaction api.Transaction,
) ([]ProblematicTransaction, error) {
// Assume transfer and split transactions are checked elsewhere.
if transaction.IsTransfer() || transaction.IsSplit {
return nil, nil
}
// If a bucket is not assigned, it's not invalid!
if transaction.Bucket == 0 {
return nil, nil
}
// If the account is inside the cash flow, having a bucket assigned is normal.
if account.IncludeInCashFlow {
return nil, nil
}
return []ProblematicTransaction{
{
Transaction: transaction.PrimaryKey,
Problem: ProblemBucketOutsideCashFlow,
Description: fmt.Sprintf(
"%s is incorrectly assigned to a bucket",
describeTransaction("transaction", account, transaction),
),
},
}, nil
}
func describeTransaction(description string, account api.Account, transaction api.Transaction) string {
memo := transaction.Memo
if len(memo) > 1 {
memo = fmt.Sprintf(" (%s)", memo)
}
return fmt.Sprintf(
"%s[%d] on %s against %s for %s%s",
description,
transaction.PrimaryKey,
transaction.Date.Format("2006-01-02"),
account.Name,
transaction.Amount,
memo,
)
}
func getAccount(accounts []api.Account, accountPrimaryKey int64) (api.Account, error) {
// TODO: Avoid O(n^2) only if this ever seems slow.
for _, account := range accounts {
if account.PrimaryKey == accountPrimaryKey {
return account, nil
}
}
return api.Account{}, errors.Errorf("failed to find account %v", accountPrimaryKey)
} | internal/doctor/problems.go | 0.64969 | 0.440469 | problems.go | starcoder |
package iso20022
// Parameters applied to the settlement of a security transfer.
type DeliverInformation16 struct {
// Party that delivers (transferor) securities to the receiving agent (transferee).
Transferor *PartyIdentification70Choice `xml:"Trfr,omitempty"`
// Account from which the securities are to be delivered.
TransferorRegisteredAccount *Account19 `xml:"TrfrRegdAcct,omitempty"`
// Identification of a related party or intermediary.
IntermediaryInformation []*Intermediary34 `xml:"IntrmyInf,omitempty"`
// Date and time at which the securities are to be exchanged at the International Central Securities Depository (ICSD) or Central Securities Depository (CSD).
RequestedSettlementDate *ISODate `xml:"ReqdSttlmDt,omitempty"`
// Total amount of money paid /to be paid or received in exchange for the financial instrument in the individual order.
SettlementAmount *ActiveCurrencyAndAmount `xml:"SttlmAmt,omitempty"`
// Indicates whether the settlement amount includes the stamp duty amount.
StampDuty *StampDutyType2Code `xml:"StmpDty,omitempty"`
// Deal amount.
NetAmount *ActiveCurrencyAndAmount `xml:"NetAmt,omitempty"`
// Chain of parties involved in the settlement of a transaction.
SettlementPartiesDetails *DeliveringPartiesAndAccount13 `xml:"SttlmPtiesDtls,omitempty"`
// Charge related to the transfer of a financial instrument.
ChargeDetails []*Charge29 `xml:"ChrgDtls,omitempty"`
// Commission related to the transfer of a financial instrument.
CommissionDetails []*Commission23 `xml:"ComssnDtls,omitempty"`
// Tax related to the transfer of a financial instrument.
TaxDetails []*Tax28 `xml:"TaxDtls,omitempty"`
// Specifies foreign exchange details applied to the payment of charges, taxes and commissions as a result of the transfer.
ForeignExchangeDetails []*ForeignExchangeTerms26 `xml:"FXDtls,omitempty"`
// Indicates whether the financial instrument is to be physically delivered.
PhysicalTransfer *PhysicalTransferType1Code `xml:"PhysTrf,omitempty"`
// Parameters of a physical delivery.
PhysicalTransferDetails *DeliveryParameters4 `xml:"PhysTrfDtls,omitempty"`
// Unique and unambiguous investor's identification of a transfer. This reference can typically be used in a hub scenario to give the reference of the transfer as assigned by the underlying client.
ClientReference *AdditionalReference7 `xml:"ClntRef,omitempty"`
}
func (d *DeliverInformation16) AddTransferor() *PartyIdentification70Choice {
d.Transferor = new(PartyIdentification70Choice)
return d.Transferor
}
func (d *DeliverInformation16) AddTransferorRegisteredAccount() *Account19 {
d.TransferorRegisteredAccount = new(Account19)
return d.TransferorRegisteredAccount
}
func (d *DeliverInformation16) AddIntermediaryInformation() *Intermediary34 {
newValue := new (Intermediary34)
d.IntermediaryInformation = append(d.IntermediaryInformation, newValue)
return newValue
}
func (d *DeliverInformation16) SetRequestedSettlementDate(value string) {
d.RequestedSettlementDate = (*ISODate)(&value)
}
func (d *DeliverInformation16) SetSettlementAmount(value, currency string) {
d.SettlementAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (d *DeliverInformation16) SetStampDuty(value string) {
d.StampDuty = (*StampDutyType2Code)(&value)
}
func (d *DeliverInformation16) SetNetAmount(value, currency string) {
d.NetAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (d *DeliverInformation16) AddSettlementPartiesDetails() *DeliveringPartiesAndAccount13 {
d.SettlementPartiesDetails = new(DeliveringPartiesAndAccount13)
return d.SettlementPartiesDetails
}
func (d *DeliverInformation16) AddChargeDetails() *Charge29 {
newValue := new (Charge29)
d.ChargeDetails = append(d.ChargeDetails, newValue)
return newValue
}
func (d *DeliverInformation16) AddCommissionDetails() *Commission23 {
newValue := new (Commission23)
d.CommissionDetails = append(d.CommissionDetails, newValue)
return newValue
}
func (d *DeliverInformation16) AddTaxDetails() *Tax28 {
newValue := new (Tax28)
d.TaxDetails = append(d.TaxDetails, newValue)
return newValue
}
func (d *DeliverInformation16) AddForeignExchangeDetails() *ForeignExchangeTerms26 {
newValue := new (ForeignExchangeTerms26)
d.ForeignExchangeDetails = append(d.ForeignExchangeDetails, newValue)
return newValue
}
func (d *DeliverInformation16) SetPhysicalTransfer(value string) {
d.PhysicalTransfer = (*PhysicalTransferType1Code)(&value)
}
func (d *DeliverInformation16) AddPhysicalTransferDetails() *DeliveryParameters4 {
d.PhysicalTransferDetails = new(DeliveryParameters4)
return d.PhysicalTransferDetails
}
func (d *DeliverInformation16) AddClientReference() *AdditionalReference7 {
d.ClientReference = new(AdditionalReference7)
return d.ClientReference
} | DeliverInformation16.go | 0.773131 | 0.46478 | DeliverInformation16.go | starcoder |
package db
import (
"00-newapp-template/pkg/acme"
)
// SimpleDB is two arrays holding Gophers and Things
type SimpleDB struct {
gg []acme.Gopher
tt []acme.Thing
}
// NewSimpleDB provides the most basic 'mock' data possible for Gophers and Things
func NewSimpleDB() (s SimpleDB) {
s.gg = []acme.Gopher{
{ID: "1", Name: "Gopher1", Description: "The first Gopher (#1st)"},
{ID: "2", Name: "Gopher2", Description: "The second Gopher (#2nd)"},
{ID: "4", Name: "Gopher4", Description: "The fourth Gopher (#4th)"},
{ID: "8", Name: "Gopher8", Description: "The eighth Gopher (#8th)"},
}
s.tt = []acme.Thing{
{ID: "1", GopherID: "1", Name: "Head", Description: "Hat"},
{ID: "2", GopherID: "2", Name: "Head", Description: "Hat"},
{ID: "3", GopherID: "4", Name: "Head", Description: "Hat"},
{ID: "4", GopherID: "8", Name: "Head", Description: "Hat"},
{ID: "5", GopherID: "1", Name: "Feet", Description: "Shoes"},
{ID: "6", GopherID: "2", Name: "Feet", Description: "Shoes"},
{ID: "7", GopherID: "4", Name: "Feet", Description: "Shoes"},
{ID: "8", GopherID: "8", Name: "Feet", Description: "Shoes"},
{ID: "9", GopherID: "1", Name: "Waist", Description: "Belt"},
{ID: "10", GopherID: "2", Name: "Waist", Description: "Belt"},
{ID: "11", GopherID: "4", Name: "Waist", Description: "Belt"},
{ID: "12", GopherID: "8", Name: "Waist", Description: "Belt"},
}
return
}
// GopherThings returns array of acme.Things for a given Gopher ID
func (s *SimpleDB) GopherThings(gopherID string) (things []acme.Thing) {
for _, v := range s.tt {
if string(v.GopherID) == gopherID {
things = append(things, v)
}
}
return
}
// Gophers returns array of acme.Gophers
func (s *SimpleDB) Gophers() []acme.Gopher {
return s.gg
}
// Gophers returns array of acme.Gophers
func (s *SimpleDB) Things() []acme.Thing {
return s.tt
}
// Gophers returns array of acme.Gophers
func (s *SimpleDB) AddGopher(g acme.Gopher) {
for i := range s.gg {
// Skip if ID already exists
if s.gg[i].ID == g.ID {
return
}
}
// Add it.
s.gg = append(s.gg, g)
return
}
// Gophers returns array of acme.Gophers
func (s *SimpleDB) AddThing(t acme.Thing) {
for i := range s.tt {
// Skip if ID already exists
if s.tt[i].ID == t.ID {
return
}
}
// Add it.
s.tt = append(s.tt, t)
return
}
// DeleteGopher 'cascade deleted' from gophers and things.
func (s *SimpleDB) DeleteGopher(gopherID string) {
var gophers []acme.Gopher
var things []acme.Thing
for _, g := range s.gg {
if string(g.ID) == gopherID {
continue
}
gophers = append(gophers, g)
}
s.gg = gophers
for _, t := range s.tt {
if string(t.GopherID) == gopherID {
continue
}
things = append(things, t)
}
s.tt = things
}
// UpdateGopher replaces the matching Gopher with the one passed in.
func (s *SimpleDB) UpdateGopher(newGopher acme.Gopher) {
var gophers []acme.Gopher
for _, g := range s.gg {
if string(newGopher.ID) == string(g.ID) {
gophers = append(gophers, newGopher)
continue
}
gophers = append(gophers, g)
}
s.gg = gophers
}
func (s *SimpleDB) UpdateThing(newThing acme.Thing) {
var things []acme.Thing
for _, t := range s.tt {
if (t.GopherID == newThing.GopherID) && (t.ID == newThing.ID) {
things = append(things, newThing)
continue
}
things = append(things, t)
}
s.tt = things
}
// DeleteThing deletes Thing that matches ID and Gopher ID
func (s *SimpleDB) DeleteThing(gopherID string, thingID string) {
var things []acme.Thing
for _, t := range s.tt {
if string(t.GopherID) == gopherID && string(t.ID) == thingID {
continue
}
things = append(things, t)
}
s.tt = things
} | 00-newapp-template/pkg/server/db/db.go | 0.526586 | 0.424591 | db.go | starcoder |
package tools
import (
"math"
"strings"
)
// CalculateAge to year
func CalculateAge(age float64, unit string) float64 {
calAge := age
if unit != "year" {
switch unit {
case "month":
calAge = age / 12
case "week":
calAge = age / 52
case "day":
calAge = age / 365
}
}
return math.Ceil(calAge*100) / 100
}
// CalculateExercise to minutes and moderate type weekly
func CalculateExercise(value int, unit, frequency, intensity string) int {
converted := value
if unit == "hours" {
converted = converted * 60
}
if intensity == "high" {
converted = converted * 2
} else if intensity == "low" {
converted = (int)(converted / 2)
}
if frequency == "daily" {
converted = converted * 7
} else if frequency == "monthly" {
converted = (int)(converted / 4)
}
return converted
}
// CalculateDietConsumption in servings weekly
func CalculateDietConsumption(value int, frequency string) int {
converted := value
if frequency == "daily" {
converted = converted * 7
} else if frequency == "monthly" {
converted = (int)(converted / 4)
}
return converted
}
// CalculateMMOLValue to convert value into mmol/L
func CalculateMMOLValue(value float64, unit string) float64 {
if strings.ToLower(unit) == "mg/dl" {
return value / 18
}
return value
}
// CalculateCholMMOLValue to convert value into mmol/L
func CalculateCholMMOLValue(value float64, unit string) float64 {
if strings.ToLower(unit) == "mg/dl" {
return value / 38.67
}
return value
}
// CalculateLength to convert units
func CalculateLength(value float64, unit, toUnit string) float64 {
result := value
denominator := 1.0
if unit == "cm" && toUnit == "m" {
result = result / 100
} else if unit == "m" && toUnit == "cm" {
result = result * 100
} else if unit == "cm" && toUnit == "inch" {
result = result / 2.54
} else if unit == "inch" && toUnit == "cm" {
result = result * 2.54
} else if unit == "cm" && toUnit == "ft" {
result = result / 30.48
} else if unit == "ft" && toUnit == "cm" {
result = result * 30.48
} else if unit == "m" && toUnit == "ft" {
result = result * 3.281
} else if unit == "ft" && toUnit == "m" {
result = result / 3.281
} else if unit == "m" && toUnit == "inch" {
result = result * 39.37
} else if unit == "inch" && toUnit == "m" {
result = result / 39.37
} else if unit == "ft" && toUnit == "inch" {
result = result / 12
} else if unit == "inch" && toUnit == "ft" {
result = result * 12
}
return result / denominator
}
// CalculateAlcoholConsumption in servings weekly
func CalculateAlcoholConsumption(value float64, frequency string) float64 {
converted := value
if frequency == "daily" {
converted = converted * 7
} else if frequency == "monthly" {
converted = converted / 4
}
return converted
} | tools/calculate.go | 0.727685 | 0.644225 | calculate.go | starcoder |
package pinapi
import (
"encoding/json"
)
// SpecialsFixturesContestant struct for SpecialsFixturesContestant
type SpecialsFixturesContestant struct {
// Contestant Id.
Id *int64 `json:"id,omitempty"`
// Name of the contestant.
Name *string `json:"name,omitempty"`
// Rotation Number.
RotNum *int `json:"rotNum,omitempty"`
}
// NewSpecialsFixturesContestant instantiates a new SpecialsFixturesContestant object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewSpecialsFixturesContestant() *SpecialsFixturesContestant {
this := SpecialsFixturesContestant{}
return &this
}
// NewSpecialsFixturesContestantWithDefaults instantiates a new SpecialsFixturesContestant object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewSpecialsFixturesContestantWithDefaults() *SpecialsFixturesContestant {
this := SpecialsFixturesContestant{}
return &this
}
// GetId returns the Id field value if set, zero value otherwise.
func (o *SpecialsFixturesContestant) GetId() int64 {
if o == nil || o.Id == nil {
var ret int64
return ret
}
return *o.Id
}
// GetIdOk returns a tuple with the Id field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesContestant) GetIdOk() (*int64, bool) {
if o == nil || o.Id == nil {
return nil, false
}
return o.Id, true
}
// HasId returns a boolean if a field has been set.
func (o *SpecialsFixturesContestant) HasId() bool {
if o != nil && o.Id != nil {
return true
}
return false
}
// SetId gets a reference to the given int64 and assigns it to the Id field.
func (o *SpecialsFixturesContestant) SetId(v int64) {
o.Id = &v
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *SpecialsFixturesContestant) GetName() string {
if o == nil || o.Name == nil {
var ret string
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesContestant) GetNameOk() (*string, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *SpecialsFixturesContestant) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given string and assigns it to the Name field.
func (o *SpecialsFixturesContestant) SetName(v string) {
o.Name = &v
}
// GetRotNum returns the RotNum field value if set, zero value otherwise.
func (o *SpecialsFixturesContestant) GetRotNum() int {
if o == nil || o.RotNum == nil {
var ret int
return ret
}
return *o.RotNum
}
// GetRotNumOk returns a tuple with the RotNum field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesContestant) GetRotNumOk() (*int, bool) {
if o == nil || o.RotNum == nil {
return nil, false
}
return o.RotNum, true
}
// HasRotNum returns a boolean if a field has been set.
func (o *SpecialsFixturesContestant) HasRotNum() bool {
if o != nil && o.RotNum != nil {
return true
}
return false
}
// SetRotNum gets a reference to the given int and assigns it to the RotNum field.
func (o *SpecialsFixturesContestant) SetRotNum(v int) {
o.RotNum = &v
}
func (o SpecialsFixturesContestant) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Id != nil {
toSerialize["id"] = o.Id
}
if o.Name != nil {
toSerialize["name"] = o.Name
}
if o.RotNum != nil {
toSerialize["rotNum"] = o.RotNum
}
return json.Marshal(toSerialize)
}
type NullableSpecialsFixturesContestant struct {
value *SpecialsFixturesContestant
isSet bool
}
func (v NullableSpecialsFixturesContestant) Get() *SpecialsFixturesContestant {
return v.value
}
func (v *NullableSpecialsFixturesContestant) Set(val *SpecialsFixturesContestant) {
v.value = val
v.isSet = true
}
func (v NullableSpecialsFixturesContestant) IsSet() bool {
return v.isSet
}
func (v *NullableSpecialsFixturesContestant) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableSpecialsFixturesContestant(val *SpecialsFixturesContestant) *NullableSpecialsFixturesContestant {
return &NullableSpecialsFixturesContestant{value: val, isSet: true}
}
func (v NullableSpecialsFixturesContestant) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableSpecialsFixturesContestant) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | pinapi/model_specials_fixtures_contestant.go | 0.755907 | 0.50653 | model_specials_fixtures_contestant.go | starcoder |
package raytracer
import (
"math"
)
// Vector definition.
type Vector [4]float64
func sameSideTest(v1, v2 Vector, shifting float64) bool {
return dot(v1, v2)-shifting > -DIFF
}
func subVector(v1, v2 Vector) Vector {
return Vector{
v1[0] - v2[0],
v1[1] - v2[1],
v1[2] - v2[2],
1,
}
}
func psubVector(v1, v2 *Vector) *Vector {
return &Vector{
v1[0] - v2[0],
v1[1] - v2[1],
v1[2] - v2[2],
1,
}
}
func addVector(v1, v2 Vector) Vector {
return Vector{
v1[0] + v2[0],
v1[1] + v2[1],
v1[2] + v2[2],
v1[0] + v2[0],
}
}
func addVectors(vList ...Vector) Vector {
total := vList[0]
for i, v := range vList {
if i == 0 {
continue
}
total = addVector(total, v)
}
return total
}
func limitVector(v Vector, factor float64) Vector {
// I know how to loop from 0 to 2, but for-loop
// introduces an additional jump instruction and extra
// condition check. which increases cpu-cycles.
// and I avoid that here.
result := Vector{v[0], v[1], v[2], v[3]}
if result[0] > factor {
result[0] = factor
}
if result[1] > factor {
result[1] = factor
}
if result[2] > factor {
result[2] = factor
}
return result
}
func scaleVector(v Vector, factor float64) Vector {
if factor == 0 {
return Vector{}
}
return Vector{
v[0] * factor,
v[1] * factor,
v[2] * factor,
v[3],
}
}
func crossProduct(v1, v2 Vector) Vector {
return Vector{
v1[1]*v2[2] - v1[2]*v2[1],
v1[2]*v2[0] - v1[0]*v2[2],
v1[0]*v2[1] - v1[1]*v2[0],
}
}
func pcrossProduct(v1, v2 *Vector) *Vector {
return &Vector{
v1[1]*v2[2] - v1[2]*v2[1],
v1[2]*v2[0] - v1[0]*v2[2],
v1[0]*v2[1] - v1[1]*v2[0],
}
}
func vectorNorm(v Vector) float64 {
return (v[0] * v[0]) + (v[1] * v[1]) + (v[2] * v[2])
}
func pvectorNorm(v *Vector) float64 {
return (v[0] * v[0]) + (v[1] * v[1]) + (v[2] * v[2])
}
func dot(v1, v2 Vector) float64 {
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]
}
func pdot(v1, v2 *Vector) float64 {
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]
}
func combine(v1, v2 Vector, f1, f2 float64) Vector {
x := (f1 * v1[0]) + (f2 * v2[0])
y := (f1 * v1[1]) + (f2 * v2[1])
z := (f1 * v1[2]) + (f2 * v2[2])
w := (f1 * v1[3]) + (f2 * v2[3])
return Vector{x, y, z, w}
}
func pcombine(v1, v2 *Vector, f1, f2 float64) *Vector {
x := (f1 * v1[0]) + (f2 * v2[0])
y := (f1 * v1[1]) + (f2 * v2[1])
z := (f1 * v1[2]) + (f2 * v2[2])
w := (f1 * v1[3]) + (f2 * v2[3])
return &Vector{x, y, z, w}
}
func pnormalizeVector(v *Vector) *Vector {
vn := pvectorNorm(v)
if vn == 0 {
return v
}
invlen := 1.0 / math.Sqrt(vn)
return &Vector{
v[0] * invlen,
v[1] * invlen,
v[2] * invlen,
v[3],
}
}
func normalizeVector(v Vector) Vector {
vn := vectorNorm(v)
if vn == 0 {
return v
}
invlen := 1.0 / math.Sqrt(vn)
return Vector{
v[0] * invlen,
v[1] * invlen,
v[2] * invlen,
v[3],
}
}
func vectorTransform(v Vector, m Matrix) Vector {
var result Vector
result[0] = v[0]*m[0][0] + v[1]*m[1][0] + v[2]*m[2][0] + v[3]*m[3][0]
result[1] = v[0]*m[0][1] + v[1]*m[1][1] + v[2]*m[2][1] + v[3]*m[3][1]
result[2] = v[0]*m[0][2] + v[1]*m[1][2] + v[2]*m[2][2] + v[3]*m[3][2]
result[3] = v[0]*m[0][3] + v[1]*m[1][3] + v[2]*m[2][3] + v[3]*m[3][3]
return result
}
func vectorDistance(v1, v2 Vector) float64 {
diff := subVector(v2, v1)
return vectorLength(diff)
}
func pvectorDistance(v1, v2 *Vector) float64 {
diff := psubVector(v2, v1)
return vectorLength(*diff)
}
func absVector(v Vector) Vector {
return Vector{
math.Abs(v[0]),
math.Abs(v[1]),
math.Abs(v[2]),
math.Abs(v[3]),
}
}
// barycentricCoordinates is a hell of a thing.
func barycentricCoordinates(v1, v2, v3, p Vector) (u, v, w float64, success bool) {
var a1, a2 int64
var n, e1, e2, pt Vector
e1 = subVector(v1, v3)
e2 = subVector(v2, v3)
pt = subVector(p, v3)
n = crossProduct(e1, e2)
n = absVector(n)
a1 = 0
if n[1] > n[a1] {
a1 = 1
}
if n[2] > n[a1] {
a1 = 2
}
switch a1 {
case 0:
a1 = 1
a2 = 2
case 1:
a1 = 0
a2 = 2
default:
a1 = 0
a2 = 1
}
u = (pt[a2]*e2[a1] - pt[a1]*e2[a2]) / (e1[a2]*e2[a1] - e1[a1]*e2[a2])
v = (pt[a2]*e1[a1] - pt[a1]*e1[a2]) / (e2[a2]*e1[a1] - e2[a1]*e1[a2])
w = 1 - u - v
success = (u >= DIFF) && (v >= DIFF) && (u+v <= 1.0+DIFF)
return
}
func calculateBounds(vlist []Vector) (min, max Vector) {
if len(vlist) == 0 {
return
}
min = vlist[0]
max = vlist[0]
for i := range vlist {
for j := 0; j < 4; j++ {
if vlist[i][j] < min[j] {
min[j] = vlist[i][j]
}
if vlist[i][j] > max[j] {
max[j] = vlist[i][j]
}
}
}
return
}
// TODO: Refactor.
func localToAbsoluteList(vertices []Vector, matrix Matrix) []Vector {
result := make([]Vector, len(vertices))
for i := 0; i < len(vertices); i++ {
result[i] = vectorTransform(vertices[i], matrix)
}
return result
}
func vectorLength(v Vector) float64 {
return math.Sqrt(vectorNorm(v))
}
func reflectVector(v, n Vector) Vector {
return combine(v, n, 1.0, -2*dot(v, n))
}
func vectorSum(v Vector) float64 {
return v[0] + v[1] + v[2]
}
func refractVector(v, n Vector, ior float64) Vector {
if ior < DIFF {
return v
}
ior = 1.0 / ior
nDotI := dot(n, v)
k := 1.0 - ior*ior*(1.0-nDotI*nDotI)
if k < 0 {
return Vector{}
}
return subVector(scaleVector(v, ior), scaleVector(n, (ior*nDotI+math.Sqrt(k))))
} | raytracer/vector.go | 0.762247 | 0.678813 | vector.go | starcoder |
package tree
import (
"errors"
)
// Tree is the structure of tree
type Tree struct {
roots []*Node
Style *Style
lastIndent int
lastNodes []*Node
}
// GetRoots return the root node list of certain node
func (t *Tree) GetRoots() ([]*Node, error) {
if len(t.roots) == 0 {
return nil, errors.New("No root nodes found")
}
return t.roots, nil
}
// AddRoots add the root node list of certain node
func (t *Tree) AddRoots(nodes []*Node) error {
for _, v := range nodes {
if v.tree != nil {
return errors.New("Some nodes in the node list already have a tree")
}
}
t.unsafeAddRoots(nodes)
return nil
}
// RemoveRoots remove the root node list of certain node
func (t *Tree) RemoveRoots(nodes []*Node) error {
for _, v := range nodes {
if v.tree != t {
return errors.New("Some nodes in the node list do not belong to this tree")
}
}
t.unsafeRemoveRoots(nodes)
return nil
}
// GetNodeList return the node list from current node
func (t *Tree) GetNodeList() ([]*Node, error) {
roots, err := t.GetRoots()
if err != nil {
return nil, err
}
var nodeList []*Node
for _, v := range roots {
nodeList = append(nodeList, v.GetNodeList()...)
}
return nodeList, nil
}
// AddNode to current tree with certain indent
func (t *Tree) AddNode(indent int, node *Node) error {
if len(t.lastNodes) == 0 {
if t.AddRoots([]*Node{node}) != nil {
return errors.New("Cannot add a root")
}
t.lastNodes = append(t.lastNodes, node)
} else {
switch i := indent - t.lastIndent; i {
case 1:
if t.lastNodes[len(t.lastNodes)-1].AddChildren([]*Node{node}) != nil {
return errors.New("Cannot add a child")
}
t.lastNodes = append(t.lastNodes, node)
default:
if i > 0 {
return errors.New("Abnormal indentation")
}
t.lastNodes = t.lastNodes[:len(t.lastNodes)+i]
fallthrough
case 0:
if t.lastNodes[len(t.lastNodes)-1].AddSiblings([]*Node{node}) != nil {
return errors.New("Cannot add a sibling")
}
t.lastNodes[len(t.lastNodes)-1] = node
}
t.lastIndent = indent
}
return nil
}
// GetPrefixList return the prefix list
func (t *Tree) GetPrefixList() ([]string, error) {
if t.Style == nil {
return nil, errors.New("No style found")
}
nodeList, err := t.GetNodeList()
if err != nil {
return nil, err
}
var prefixList []string
for _, v := range nodeList {
prefixList = append(prefixList, t.Style.getPrefix(v.prefix))
}
return prefixList, nil
} | tree.go | 0.634543 | 0.402333 | tree.go | starcoder |
package api
import (
"fmt"
)
type RuleKey string
/*
A Rule defines a mapping from a list of Methods and Matches to an AllConstraints
struct. A Rule applies to a request if one of the Methods and all of the Matches
apply.
If a Rule applies, the constraints inferred from the Matches should be merged
with each of the ClusterConstraints, which are then used to find a live
Instance. The ClusterConstraints are randomly shuffled using their weights
to affect the distribution. Each ClusterConstraint is examined to find a
matching Instance, until one is found.
It is possible to set a cohort seed on a SharedRules, Route, or Rule object.
Only one of these will apply to any given request. A rule is the most
specific way we have to direct a request to some backend so any request
that matches a rule will use a cohort seed if set. This is true regardless
of the rule source (SharedRules or Route).
See CohortSeed docs for additional details of what a cohort seed does.
*/
type Rule struct {
RuleKey RuleKey `json:"rule_key"`
Methods []string `json:"methods"`
Matches Matches `json:"matches"`
Constraints AllConstraints `json:"constraints"`
CohortSeed *CohortSeed `json:"cohort_seed"`
}
type Rules []Rule
// Checks for equality with another Rule slice. Slices will be equal if each
// element i is Equal to ith element of the other slice and the slices are
// of the same length.
func (r Rules) Equals(o Rules) bool {
if len(r) != len(o) {
return false
}
for i, e := range r {
if !e.Equals(o[i]) {
return false
}
}
return true
}
func (rs Rules) AsMap() map[RuleKey]Rule {
m := map[RuleKey]Rule{}
for _, r := range rs {
m[r.RuleKey] = r
}
return m
}
// A verifiableMatch is a subset of the fields from a Match object, used
// exclusively to check whether there are two Rule-s in the same Rules object
// that match the same kind and value with the same behavior.
type verifiableMatch struct {
Kind MatchKind
Behavior MatchBehavior
From Metadatum
}
// Check for validity of a slice of Rule objects. A valid rule is one that is
// composed only of valid Rule structs.
func (r Rules) IsValid() *ValidationError {
errs := &ValidationError{}
seenKey := map[RuleKey]bool{}
seenMatch := map[verifiableMatch]bool{}
for _, r := range r {
if seenKey[r.RuleKey] {
errs.AddNew(ErrorCase{
"rules", fmt.Sprintf("multiple instances of key %s", string(r.RuleKey)),
})
}
seenKey[r.RuleKey] = true
for _, m := range r.Matches {
vm := verifiableMatch{m.Kind, m.Behavior, m.From}
if seenMatch[vm] {
errs.AddNew(ErrorCase{
"rules",
fmt.Sprintf(
"multiple instances of match kind %s with behavior %s from {%s: %s}",
string(m.Kind),
string(m.Behavior),
string(m.From.Key),
string(m.From.Value)),
})
}
seenMatch[vm] = true
}
errs.MergePrefixed(r.IsValid(), fmt.Sprintf("rules[%v]", r.RuleKey))
}
return errs.OrNil()
}
func (r Rule) methodsEqual(o Rule) bool {
if len(r.Methods) != len(o.Methods) {
return false
}
m := make(map[string]bool)
for _, e := range r.Methods {
m[e] = true
}
for _, e := range o.Methods {
if !m[e] {
return false
}
}
return true
}
// Checks for equality between two Rules. Rules are equal if the rule key,
// methods, constraints, and matches are all equal.
func (r Rule) Equals(o Rule) bool {
if r.RuleKey != o.RuleKey {
return false
}
if !r.methodsEqual(o) {
return false
}
if !r.Constraints.Equals(o.Constraints) {
return false
}
if !r.Matches.Equals(o.Matches) {
return false
}
return CohortSeedPtrEquals(r.CohortSeed, o.CohortSeed)
}
var validMethod map[string]bool = map[string]bool{
"GET": true,
"PUT": true,
"POST": true,
"DELETE": true,
}
// Checks this rule for validity. A rule is considered valid if it has a RuleKey,
// at least one valid HTTP method (GET, PUT, POST, DELETE), the defined
// matches are valid, and the Constraints are valid.
func (r Rule) IsValid() *ValidationError {
ecase := func(f, m string) ErrorCase {
return ErrorCase{f, m}
}
errs := &ValidationError{}
errCheckKey(string(r.RuleKey), errs, "rule_key")
for _, m := range r.Methods {
if !validMethod[m] {
errs.AddNew(ecase(
"methods",
fmt.Sprintf("%s is not a valid method", m),
))
}
}
if len(r.Methods) == 0 && len(r.Matches) == 0 {
errs.AddNew(ecase("", "at least one method or match must be present"))
}
errs.MergePrefixed(r.Matches.IsValid(), "")
errs.MergePrefixed(r.Constraints.IsValid("constraints"), "")
if r.CohortSeed != nil {
errs.MergePrefixed(r.CohortSeed.IsValid(), "")
}
return errs.OrNil()
} | vendor/github.com/turbinelabs/api/rule.go | 0.814901 | 0.550184 | rule.go | starcoder |
package controller
import (
"strings"
)
func GetScadaOEMHeader() (headerResult []string, fieldResult []string) {
oldHeader := `AI intern ActivPower
AI intern WindSpeed
AI intern NacellePos
AI intern WindDirection
AI intern PitchAngle1
AI intern PitchAngle2
AI intern PitchAngle3
C intern SpeedGenerator
C intern SpeedRotor
AI intern ReactivPower
AI intern Frequency Grid
AI GearOilPressure
Temp Outdoor
Temp Nacelle
Temp GearBox HSS NDE
Temp GearBox HSS DE
Temp GearBox IMS DE
Temp GearOilSump
Temp GearBox IMS NDE
Temp GeneratorBearing DE
Temp GeneratorBearing NDE
Temp MainBearing
Temp YawBrake 1
Temp YawBrake 2
Temp G1L1
Temp G1L2
Temp G1L3
Temp YawBrake 4
AI HydrSystemPressure
Temp BottomControlSection Low
AI intern TempConv1
AI intern TempConv2
AI intern TempConv3
AI intern R PidAngleOut
AI intern I1
AI intern I2
AI intern I3
AI intern NacelleDrill
AI intern PitchAkku V1
AI intern PitchAkku V2
AI intern PitchAkku V3
AI intern PitchConv Current1
AI intern PitchConv Current2
AI intern PitchConv Current3
AI intern PitchAngleSP Diff1
AI intern PitchAngleSP Diff2
AI intern PitchAngleSP Diff3
AI intern RpmDiff
AI intern U1
AI intern U2
AI intern U3
AI Intern WindSpeedDif
AI speed RotFR
AI WindSpeed1
AI WindSpeed2
AI WindVane1
AI WindVane2
AI internCurrentAsym
AI intern WindVaneDiff
AI intern PitchSpeed2
AI intern Speed RPMDiff FR1 RotCNT
AI DrTrVibValue
AI intern InLastErrorConv1
AI intern InLastErrorConv2
AI intern InLastErrorConv3
AI TowerVibValueAxial
AI intern DiffGenSpeedSPToAct
Temp YawBrake 5
AI intern SpeedGenerator Proximity
AI intern SpeedDiff Encoder Proximity
Temp CabinetTopBox Low
Temp CabinetTopBox
Temp BottomControlSection
Temp BottomPowerSection
Temp BottomPowerSection Low
AI intern Pitch1 Status High
AI intern Pitch2 Status High
AI intern Pitch3 Status High
AI intern InPosition1 ch3
AI intern InPosition2 ch3
AI intern InPosition3 ch3
AI intern Temp Brake Blade1
AI intern Temp Brake Blade2
AI intern Temp Brake Blade3
AI intern Temp PitchMotor Blade1
AI intern Temp PitchMotor Blade2
AI intern Temp PitchMotor Blade3
AI intern Temp Hub Additional1
AI intern Temp Hub Additional2
AI intern Temp Hub Additional3
AI intern Pitch1 Status Low
AI intern Pitch2 Status Low
AI intern Pitch3 Status Low
AI intern Battery VoltageBlade1 center
AI intern Battery VoltageBlade2 center
AI intern Battery VoltageBlade3 center
AI intern Battery ChargingCur Blade1
AI intern Battery ChargingCur Blade2
AI intern Battery ChargingCur Blade3
AI intern Battery DischargingCur Blade1
AI intern Battery DischargingCur Blade2
AI intern Battery DischargingCur Blade3
AI intern PitchMotor BrakeVoltage Blade1
AI intern PitchMotor BrakeVoltage Blade2
AI intern PitchMotor BrakeVoltage Blade3
AI intern PitchMotor BrakeCurrent Blade1
AI intern PitchMotor BrakeCurrent Blade2
AI intern PitchMotor BrakeCurrent Blade3
AI intern Temp HubBox Blade1
AI intern Temp HubBox Blade2
AI intern Temp HubBox Blade3
AI intern Temp Pitch1 HeatSink
AI intern Temp Pitch2 HeatSink
AI intern Temp Pitch3 HeatSink
AI intern ErrorStackBlade1
AI intern ErrorStackBlade2
AI intern ErrorStackBlade3
AI intern Temp BatteryBox Blade1
AI intern Temp BatteryBox Blade2
AI intern Temp BatteryBox Blade3
AI intern DC LinkVoltage1
AI intern DC LinkVoltage2
AI intern DC LinkVoltage3
Temp Yaw Motor1
Temp Yaw Motor2
Temp Yaw Motor3
Temp Yaw Motor4
AO DFIG Power Setpiont
AO DFIG Q Setpoint
AI DFIG Torque actual
AI DFIG SpeedGenerator Encoder
AI intern DFIG DC Link Voltage actual
AI intern DFIG MSC current
AI intern DFIG Main voltage
AI intern DFIG Main current
AI intern DFIG active power actual
AI intern DFIG reactive power actual
AI intern DFIG active power actual LSC
AI intern DFIG LSC current
AI intern DFIG Data log number
AI intern Damper OscMagnitude
AI intern Damper PassbandFullLoad
AI YawBrake TempRise1
AI YawBrake TempRise2
AI YawBrake TempRise3
AI YawBrake TempRise4
AI intern NacelleDrill at NorthPosSensor`
newHeader := `Active Power
Wind Speed
Nacelle Pos
Wind Direction
Pitch Angle1
Pitch Angle2
Pitch Angle3
Generator Speed
Rotor Speed
Reactive Power
Frequency Grid
Gear Oil Pressure
Ambient Temp
Temp Nacelle
Temp GearBox HSS NDE
Temp GearBox HSS DE
Temp GearBox IMS DE
Temp Gear Oil Sump
Temp GearBox IMS NDE
Temp Generator Bearing DE
Temp Generator Bearing NDE
Temp Main Bearing
Temp Yaw Brake 1
Temp Yaw Brake 2
Temp G1L1
Temp G1L2
Temp G1L3
Temp Yaw Brake 4
Hydr System Pressure
Temp Bottom Control Section Low
Temp Conv1
Temp Conv2
Temp Conv3
R Pid Angle Out
I1
I2
I3
Nacelle Drill
Pitch Akku V1
Pitch Akku V2
Pitch Akku V3
Pitch Conv Current1
Pitch Conv Current2
Pitch Conv Current3
Pitch Angle SP Diff1
Pitch Angle SP Diff2
Pitch Angle SP Diff3
Rpm Diff
U1
U2
U3
Wind Speed Dif
Speed Rot FR
Wind Speed1
Wind Speed2
Wind Vane1
Wind Vane2
Current Asym
Wind Vane Diff
Pitch Speed2
Speed RPM Diff FR1 Rot CNT
Dr Tr Vib Value
In Last Error Conv1
In Last Error Conv2
In Last Error Conv3
AI Tower Vib Value Axial
Diff Gen Speed SP To Act
Temp YawBrake 5
Speed Generator Proximity
Speed Diff Encoder Proximity
Temp CabinetTopBox Low
Temp CabinetTopBox
Temp Bottom Control Section
Temp Bottom Power Section
Temp Bottom Power Section Low
Pitch1 Status High
Pitch2 Status High
Pitch3 Status High
In Position1 ch3
In Position2 ch3
In Position3 ch3
Temp Brake Blade1
Temp Brake Blade2
Temp Brake Blade3
Temp Pitch Motor Blade1
Temp Pitch Motor Blade2
Temp Pitch Motor Blade3
Temp Hub Additional1
Temp Hub Additional2
Temp Hub Additional3
Pitch1 Status Low
Pitch2 Status Low
Pitch3 Status Low
Battery Voltage Blade1 center
Battery Voltage Blade2 center
Battery Voltage Blade3 center
Battery Charging Cur Blade1
Battery Charging Cur Blade2
Battery Charging Cur Blade3
Battery Discharging Cur Blade1
Battery Discharging Cur Blade2
Battery Discharging Cur Blade3
Pitch Motor Brake Voltage Blade1
Pitch Motor Brake Voltage Blade2
Pitch Motor Brake Voltage Blade3
Pitch Motor Brake Current Blade1
Pitch Motor Brake Current Blade2
Pitch Motor Brake Current Blade3
Temp HubBox Blade1
Temp HubBox Blade2
Temp HubBox Blade3
Temp Pitch1 HeatSink
Temp Pitch2 HeatSink
Temp Pitch3 HeatSink
Error Stack Blade1
Error Stack Blade2
Error Stack Blade3
Temp Battery Box Blade1
Temp Battery Box Blade2
Temp Battery Box Blade3
DC Link Voltage1
DC Link Voltage2
DC Link Voltage3
Temp Yaw Motor1
Temp Yaw Motor2
Temp Yaw Motor3
Temp Yaw Motor4
AO DFIG Power Setpiont
AO DFIG Q Setpoint
AI DFIG Torque actual
AI DFIG Speed Generator Encoder
DFIG DC Link Voltage actual
DFIG MSC current
DFIG Main voltage
DFIG Main current
DFIG Active power actual
DFIG reactive power actual
DFIG active power actual LSC
DFIG LSC current
DFIG Data log number
Damper Osc Magnitude
Damper Pass band FullLoad
AI Yaw Brake Temp Rise1
AI Yaw Brake Temp Rise2
AI Yaw Brake Temp Rise3
AI Yaw Brake Temp Rise4
Nacelle Drill at North Pos Sensor`
oldHeaderList := strings.Split(oldHeader, "\n")
newHeaderList := strings.Split(newHeader, "\n")
for idx, val := range oldHeaderList {
fieldResult = append(fieldResult, strings.ToLower(strings.Replace(strings.TrimSuffix(val, " "), " ", "_", -69)))
headerResult = append(headerResult, newHeaderList[idx])
}
return
}
func GetScadaHFDHeader() (headerResult []string, fieldResult []string) {
// vArrRealtime := []string{"TimeStamp", "Turbine", "ActivePower_kW", "WindSpeed_ms", "NacellePos", "WindDirection",
// "PitchAngle", "PitchAngle1", "PitchAngle2", "PitchAngle3", "GenSpeed_RPM", "RotorSpeed_RPM",
// "ReactivePower_kVAr", "Frequency_Hz", "TempOutdoor", "TempNacelle", "TempGearBoxHSSNDE", "TempGearBoxHSSDE",
// "TempGearBoxIMSDE", "TempGearBoxOilSump", "TempGearBoxIMSNDE", "TempGeneratorBearingDE", "TempGeneratorBearingNDE",
// "TempHubBearing", "TempG1L1", "TempG1L2", "TempG1L3", "TempBottomControlSection", "TempConv1",
// "TempConv2", "TempConv3", "PitchAccuV1", "PitchAccuV2", "PitchAccuV3", "PowerFactor",
// "Total_Prod_Day_kWh"}
fieldResult = []string{"Fast_ActivePower_kW", "Fast_WindSpeed_ms", "Slow_NacellePos", "Slow_WindDirection",
"Fast_PitchAngle", "Fast_PitchAngle1", "Fast_PitchAngle2", "Fast_PitchAngle3", "Fast_GenSpeed_RPM", "Fast_RotorSpeed_RPM",
"Fast_ReactivePower_kVAr", "Fast_Frequency_Hz", "Slow_TempOutdoor", "Slow_TempNacelle", "Slow_TempGearBoxHSSNDE", "Slow_TempGearBoxHSSDE",
"Slow_TempGearBoxIMSDE", "Slow_TempGearBoxOilSump", "Slow_TempGearBoxIMSNDE", "Slow_TempGeneratorBearingDE", "Slow_TempGeneratorBearingNDE",
"Slow_TempHubBearing", "Slow_TempG1L1", "Slow_TempG1L2", "Slow_TempG1L3", "Slow_TempBottomControlSection", "Slow_TempConv1",
"Slow_TempConv2", "Slow_TempConv3", "Fast_PitchAccuV1", "Fast_PitchAccuV2", "Fast_PitchAccuV3", "Fast_PowerFactor",
"Fast_Total_Prod_Day_kWh",
"Fast_ActivePower_kW_Count", "Fast_WindSpeed_ms_Count", "Slow_NacellePos_Count", "Slow_WindDirection_Count",
"Fast_PitchAngle_Count", "Fast_PitchAngle1_Count", "Fast_PitchAngle2_Count", "Fast_PitchAngle3_Count", "Fast_GenSpeed_RPM_Count", "Fast_RotorSpeed_RPM_Count",
"Fast_ReactivePower_kVAr_Count", "Fast_Frequency_Hz_Count", "Slow_TempOutdoor_Count", "Slow_TempNacelle_Count", "Slow_TempGearBoxHSSNDE_Count", "Slow_TempGearBoxHSSDE_Count",
"Slow_TempGearBoxIMSDE_Count", "Slow_TempGearBoxOilSump_Count", "Slow_TempGearBoxIMSNDE_Count", "Slow_TempGeneratorBearingDE_Count", "Slow_TempGeneratorBearingNDE_Count",
"Slow_TempHubBearing_Count", "Slow_TempG1L1_Count", "Slow_TempG1L2_Count", "Slow_TempG1L3_Count", "Slow_TempBottomControlSection_Count", "Slow_TempConv1_Count",
"Slow_TempConv2_Count", "Slow_TempConv3_Count", "Fast_PitchAccuV1_Count", "Fast_PitchAccuV2_Count", "Fast_PitchAccuV3_Count", "Fast_PowerFactor_Count",
"Fast_Total_Prod_Day_kWh_Count", "turbinestate", "statedescription"}
headerResult = []string{"Active Power", "Wind Speed", "Nacelle Pos", "Wind Direction",
"Pitch Angle", "Pitch Angle1", "Pitch Angle2", "Pitch Angle3", "Generator Speed", "Rotor Speed",
"Reactive Power", "Frequency Grid", "Ambient Temp", "Temp Nacelle", "Temp GearBox HSS NDE", "Temp GearBox HSS DE",
"Temp GearBox IMS DE", "Temp GearBox Oil Sump", "Temp GearBox IMS NDE", "Temp Generator Bearing DE", "Temp Generator Bearing NDE",
"Temp Main Bearing", "Temp G1L1", "Temp G1L2", "Temp G1L3", "Temp Bottom Control Section", "Temp Conv1",
"Temp Conv2", "Temp Conv3", "Pitch Accu V1", "Pitch Accu V2", "Pitch Accu V3", "Power Factor",
"Total Production Day",
"Active Power Count", "Wind Speed Count", "Nacelle Pos Count", "Wind Direction Count",
"Pitch Angle Count", "Pitch Angle1 Count", "Pitch Angle2 Count", "Pitch Angle3 Count", "Generator Speed Count", "Rotor Speed Count",
"Reactive Power Count", "Frequency Grid Count", "Ambient Temp Count", "Temp Nacelle Count", "Temp GearBox HSS NDE Count", "Temp GearBox HSS DE Count",
"Temp GearBox IMS DE Count", "Temp GearBox Oil Sump Count", "Temp GearBox IMS NDE Count", "Temp Generator Bearing DE Count", "Temp Generator Bearing NDE Count",
"Temp Main Bearing Count", "Temp G1L1 Count", "Temp G1L2 Count", "Temp G1L3 Count", "Temp Bottom Control Section Count", "Temp Conv1 Count",
"Temp Conv2 Count", "Temp Conv3 Count", "Pitch Accu V1 Count", "Pitch Accu V2 Count", "Pitch Accu V3 Count", "Power Factor Count",
"Total Production Day Count", "TurbineState", "TurbineState Desc"}
return
} | web/controller/databrowserHeaderList.go | 0.709724 | 0.401277 | databrowserHeaderList.go | starcoder |
package impl
import (
"fmt"
"reflect"
"strings"
sqldb "github.com/domonda/go-sqldb"
)
// Update table rows(s) with values using the where statement with passed in args starting at $1.
func Update(conn sqldb.Connection, table string, values sqldb.Values, where, argFmt string, args []interface{}) error {
if len(values) == 0 {
return fmt.Errorf("Update table %s: no values passed", table)
}
query, vals := buildUpdateQuery(table, values, where, args)
err := conn.Exec(query, vals...)
return WrapNonNilErrorWithQuery(err, query, argFmt, vals)
}
// UpdateReturningRow updates a table row with values using the where statement with passed in args starting at $1
// and returning a single row with the columns specified in returning argument.
func UpdateReturningRow(conn sqldb.Connection, table string, values sqldb.Values, returning, where string, args ...interface{}) sqldb.RowScanner {
if len(values) == 0 {
return sqldb.RowScannerWithError(fmt.Errorf("UpdateReturningRow table %s: no values passed", table))
}
query, vals := buildUpdateQuery(table, values, where, args)
query += " RETURNING " + returning
return conn.QueryRow(query, vals...)
}
// UpdateReturningRows updates table rows with values using the where statement with passed in args starting at $1
// and returning multiple rows with the columns specified in returning argument.
func UpdateReturningRows(conn sqldb.Connection, table string, values sqldb.Values, returning, where string, args ...interface{}) sqldb.RowsScanner {
if len(values) == 0 {
return sqldb.RowsScannerWithError(fmt.Errorf("UpdateReturningRows table %s: no values passed", table))
}
query, vals := buildUpdateQuery(table, values, where, args)
query += " RETURNING " + returning
return conn.QueryRows(query, vals...)
}
func buildUpdateQuery(table string, values sqldb.Values, where string, args []interface{}) (string, []interface{}) {
names, vals := values.Sorted()
var query strings.Builder
fmt.Fprintf(&query, `UPDATE %s SET `, table)
for i := range names {
if i > 0 {
query.WriteByte(',')
}
fmt.Fprintf(&query, `"%s"=$%d`, names[i], 1+len(args)+i)
}
fmt.Fprintf(&query, ` WHERE %s`, where)
return query.String(), append(args, vals...)
}
// UpdateStruct updates a row of table using the exported fields
// of rowStruct which have a `db` tag that is not "-".
// Struct fields with a `db` tag matching any of the passed ignoreColumns will not be used.
// If restrictToColumns are provided, then only struct fields with a `db` tag
// matching any of the passed column names will be used.
func UpdateStruct(conn sqldb.Connection, table string, rowStruct interface{}, namer sqldb.StructFieldNamer, argFmt string, ignoreColumns, restrictToColumns []string) error {
v := reflect.ValueOf(rowStruct)
for v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
switch {
case v.Kind() == reflect.Ptr && v.IsNil():
return fmt.Errorf("UpdateStruct of table %s: can't insert nil", table)
case v.Kind() != reflect.Struct:
return fmt.Errorf("UpdateStruct of table %s: expected struct but got %T", table, rowStruct)
}
columns, flags, vals := structFieldValues(v, namer, ignoreColumns, restrictToColumns, true)
if len(columns) == 0 {
return fmt.Errorf("UpdateStruct of table %s: %T has no exported struct fields with `db` tag", table, rowStruct)
}
var b strings.Builder
fmt.Fprintf(&b, `UPDATE %s SET `, table)
first := true
for i := range columns {
if f := flags[i]; f.IsPrimaryKey() || f.IsReadOnly() {
continue
}
if first {
first = false
} else {
b.WriteByte(',')
}
fmt.Fprintf(&b, `"%s"=$%d`, columns[i], i+1)
}
b.WriteString(` WHERE `)
hasPK := false
for i := range columns {
if !flags[i].IsPrimaryKey() {
continue
}
if !hasPK {
hasPK = true
} else {
b.WriteString(` AND `)
}
fmt.Fprintf(&b, `"%s"=$%d`, columns[i], i+1)
}
if !hasPK {
return fmt.Errorf("UpdateStruct of table %s: %T has no exported struct fields with ,pk tag value suffix to mark primary key column(s)", table, rowStruct)
}
query := b.String()
err := conn.Exec(query, vals...)
return WrapNonNilErrorWithQuery(err, query, argFmt, vals)
} | impl/update.go | 0.655777 | 0.444022 | update.go | starcoder |
package conf
// Uint32Var defines a uint32 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a uint32 variable in which to store the value of the flag and/or environment variable.
func (c *Configurator) Uint32Var(p *uint32, name string, value uint32, usage string) {
c.env().Uint32Var(p, name, value, usage)
c.flag().Uint32Var(p, name, value, usage)
}
// Uint32 defines a uint32 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a uint32 variable that stores the value of the flag and/or environment variable.
func (c *Configurator) Uint32(name string, value uint32, usage string) *uint32 {
p := new(uint32)
c.Uint32Var(p, name, value, usage)
return p
}
// Uint32VarE defines a uint32 environment variable with specified name, default value, and usage string.
// The argument p points to a uint32 variable in which to store the value of the environment variable.
func (c *Configurator) Uint32VarE(p *uint32, name string, value uint32, usage string) {
c.env().Uint32Var(p, name, value, usage)
}
// Uint32E defines a uint32 environment variable with specified name, default value, and usage string.
// The return value is the address of a uint32 variable that stores the value of the environment variable.
func (c *Configurator) Uint32E(name string, value uint32, usage string) *uint32 {
p := new(uint32)
c.Uint32VarE(p, name, value, usage)
return p
}
// Uint32VarF defines a uint32 flag with specified name, default value, and usage string.
// The argument p points to a uint32 variable in which to store the value of the flag.
func (c *Configurator) Uint32VarF(p *uint32, name string, value uint32, usage string) {
c.flag().Uint32Var(p, name, value, usage)
}
// Uint32F defines a uint32 flag with specified name, default value, and usage string.
// The return value is the address of a uint32 variable that stores the value of the flag.
func (c *Configurator) Uint32F(name string, value uint32, usage string) *uint32 {
p := new(uint32)
c.Uint32VarF(p, name, value, usage)
return p
}
// Uint32Var defines a uint32 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a uint32 variable in which to store the value of the flag and/or environment variable.
func Uint32Var(p *uint32, name string, value uint32, usage string) {
Global.Uint32Var(p, name, value, usage)
}
// Uint32 defines a uint32 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a uint32 variable that stores the value of the flag and/or environment variable.
func Uint32(name string, value uint32, usage string) *uint32 {
return Global.Uint32(name, value, usage)
}
// Uint32VarE defines a uint32 environment variable with specified name, default value, and usage string.
// The argument p points to a uint32 variable in which to store the value of the environment variable.
func Uint32VarE(p *uint32, name string, value uint32, usage string) {
Global.Uint32VarE(p, name, value, usage)
}
// Uint32E defines a uint32 environment variable with specified name, default value, and usage string.
// The return value is the address of a uint32 variable that stores the value of the environment variable.
func Uint32E(name string, value uint32, usage string) *uint32 {
return Global.Uint32E(name, value, usage)
}
// Uint32VarF defines a uint32 flag with specified name, default value, and usage string.
// The argument p points to a uint32 variable in which to store the value of the flag.
func Uint32VarF(p *uint32, name string, value uint32, usage string) {
Global.Uint32VarF(p, name, value, usage)
}
// Uint32F defines a uint32 flag with specified name, default value, and usage string.
// The return value is the address of a uint32 variable that stores the value of the flag.
func Uint32F(name string, value uint32, usage string) *uint32 {
return Global.Uint32F(name, value, usage)
} | value_uint32.go | 0.761893 | 0.647979 | value_uint32.go | starcoder |
package tensor
import (
"fmt"
"math"
"reflect"
)
type Tensor struct {
number interface{}
value reflect.Value
dtype reflect.Type
shape []int
name string
errors RaiseError{}
}
func NewTensor(a interface{}) *Tensor {
val := reflect.ValueOf(a)
var shape []int
typ := val.Type()
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
val = typ.Elem()
}
return &Tensor{value: val, number: a, shape: shape}
}
func (t *Tensor) Shape() []int {
return t.shape
}
func (t *Tensor) Name() []int {
return t.name
}
func (t *Tensor) Dtype() reflect.Type {
return t.dtype
}
func (t *Tensor) ToValue() *reflect.Value {
return &reflect.Value(t.number)
}
func Placeholder(shape []int) *Tensor {
return &Tensor{shape: shape}
}
func TensorFromValue(v reflect.Value) (*Tensor, error){
if v == nil{
return nil, fmt.Errorf("You have passed an empty interface (%T)to be transformed. ", v.Addr().Type())
}
return &Tensor(value: v, dtype: v.Elem().Type())
}
func AssertionError(x, y *Tensor) error {
valX, valY := reflect.Value(x.value).Type(), reflect.Value(y.value).Type()
if valX != valY {
return fmt.Errorf("Assertion Error: given tensors do not have the same types: %T ----- %T\n", valX, valY)
}
for i := range x.shape {
if x.shape[i] != y.shape[i] {
return fmt.Errorf("Assertion Error: given tensors do not have the same shapes")
break
}
}
return nil
}
func (t *Tensor) SetName(name string) {
t.name = name
}
func printType(a interface{}) {
fmt.Printf("a: %v\n", a)
}
func (t *Tensor) TensorArray() interface{} {
typ, arr := t.value, make([]float64, 1)
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
val = typ.Elem()
arr = append(arr, val)
}
return arr
}
func (*t.Tensor) AtShape(shape []int, a int) interface{} {
num := t.number
for _, k := range shape {
num := num[k]
}
return num
}
func (t *Tensor) Add(t2 *Tensor) (*Tensor, error) {
// CHECK MATCHING SHAPES
if err := AssertionError(t, t2); err != nil {
return nil, err
}
typ, count := t.value, 0
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
val = typ.Elem()
count += 1
t.number += t2.AtShape(shape, count)
}
return t, nil
}
func (t *Tensor) Substract(t2 *Tensor) (*Tensor, error) {
// CHECK MATCHING SHAPES
if err := AssertionError(t, t2); err != nil {
return nil, err
}
typ, count := t.value, 0
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
val = typ.Elem()
count += 1
t.number -= t2.AtShape(shape, count)
}
return t, nil
}
func (t *Tensor) Multiply(t2 *Tensor) (*Tensor, error) {
// CHECK MATCHING SHAPES
if err := AssertionError(t, t2); err != nil {
return nil, err
}
typ, count := t.value, 0
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
val = typ.Elem()
count += 1
t.number *= t2.AtShape(shape, count)
}
return t, nil
}
func isZero(x float64) bool {
return x == 0
}
func isZeroCounts(x float64, count int) int {
count += x == 0
}
func DivisionByZero() error {
return fmt.Errorf("Division by Zero present. Inspect your tensors via the Shape, Inspect or Placeholder function. ")
}
func (t *Tensor) Divide(t2 *Tensor) (*Tensor, error) {
// CHECK MATCHING SHAPES
if err := AssertionError(t, t2); err != nil {
return nil, err
}
typ, count := t.value, 0
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
val = typ.Elem()
count += 1
elem := t2.AtShape(shape, count)
if isZero(elem) {
return nil, DivisionByZero()
}
t.number /= t2.AtShape(shape, count)
}
return t, nil
}
func (t *Tensor) Map(f func(interface{}) interface{}) *Tensor {
typ, count := t.value, 0
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
val = typ.Elem()
val = f(val)
}
return t.Placeholder(t.value)
}
// a and b are indices for shape
func (t *Tensor) Gather(a, b int) {
}
// follow the tf documentation and implement
func (t *Tensor) ZeroCounts() int {
typ, count := t.value, 0
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
val = typ.Elem()
count += isZero(val)
}
return count
}
func (t* Tensor) GetAxis(axis int) (t*Tensor, error){
val := t.value
if t.OutOfAxis(axis) {
return nil, fmt.Errorf("Axis %d is not present in tensor shape. ", axis, "Validate your tensors with TensorSpec or Shape \n")
}
for i:=0;i<axis;i++{
val = val[i]
}
return TensorFromValue(val)
}
func (t *Tensor) OutOfAxis(axis int) bool {
return axis > len(t.shape)
}
func switch(a,b int) int,int{
return b,a
}
func (t *Tensor) Gather(a, b, axis int) (*Tensor, error) {
if t.OutOfAxis(axis) {
return nil, fmt.Errorf("Axis %d is not present in tensor shape. ", axis, "Validate your tensors with TensorSpec or Shape \n")
}
k, err := t.GetAxis(axis)
if err !=nil{
return nil, err
}
if b>a{
return nil, fmt.Errorf("In the interval %d to %d you have passed the larger number first. You can also deal with this by calling the switch function\n. ",a,b)
}
return k[a:b]
}
func ValueError(value reflect.Value) error{
ty := value.Type()
if ty == nil && ty == string{
return fmt.Errorf("%T does not match Tensor interface", ty)
}
tensor := TensorFromValue(value)
if len(tensor.shape) > 1e6{
return fmt.Errorf("You have entered an interface size greater than the RAM size: %d", len(tensor.shape))
}
if _,err:=tensor.GetAxis(0);err!=nil{
return fmt.Errorf("The given tensor is not valid due to no axis found. Treat carefully.")
}
return nil
}
// Squeze removes the dimensison with size 1
func (t* Tensor) Squeeze(){
typ, count := t.value, 0
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, val.Len())
if val.Len() > 0 {
val = val.Index(0)
}
if shape[0] == 1{
f := t.number[:][:]
}
}
}
func tape(f func(x float64) float64, x float64) float64 {
epsilon := 1e-6
deltax := f(x+epsilon) - f(x)
return deltax / epsilon
}
func sigmoid(x float64) float64 {
return 1 / (1 - math.Exp(-x))
}
func sigmoidDerivative(x float64) float64 {
return sigmoid(x) * (1 - sigmoid(x))
}
type RaiseError interface {
Init()
Check() bool
Raise() error
}
type ValueError struct {
tensor *Tensor
raised bool
}
func (ve *ValueError) Check(tensor Tensor) (bool, error) {
value := tensor.Value()
ty := value.Type()
if ty == nil && ty == string {
return true, fmt.Errorf("%T does not match Tensor interface", ty)
}
if len(tensor.shape) > 1e6 {
return true, fmt.Errorf("You have entered an interface size greater than the RAM size: %d", len(tensor.shape))
}
if _, err := tensor.GetAxis(0); err != nil {
return true, fmt.Errorf("The given tensor is not valid due to no axis found. Treat carefully.")
}
return false, nil
}
func (ve *ValueError) Raise() error {
raised, err := ve.Check(ve.tensor)
ve.raised = raised
if err != nil {
return fmt.Errorf("ValueError raised. ")
}
return nil
}
type AssertionError struct {
tensor1, tensor2 *Tensor
raised bool
}
func (ae *AssertionError) Check(x, y *Tensor) (bool, error) {
valX, valY := reflect.Value(x.value).Type(), reflect.Value(y.value).Type()
if valX != valY {
return true, fmt.Errorf("Assertion Error: given tensors do not have the same types: %T ----- %T\n", valX, valY)
}
for i := range x.shape {
if x.shape[i] != y.shape[i] {
return true, fmt.Errorf("Assertion Error: given tensors do not have the same shapes")
break
}
}
return false, nil
}
func (ae *AssertionError) Raise() error {
raised, err := ae.Check(ae.tensor1, ae.tensor2)
ae.raised = raised
if err != nil {
return fmt.Errorf("Assertion error raised. ")
}
return nil
}
type OutOfRangeError struct {
tensor *Tensor
a, b int
}
type UnknownError struct {
tensor *Tensor
}
func (ue *UnknownError) Check(t *Tensor) (bool,error){
ue.tensor = t
if ue.tensor.Value().Type() == nil || ue.tensor.Value().Type() == string{
return true, fmt.Errorf("%T\n unknown and cannot be converted into Tensor", type(t))
}
return false, nil
}
func (ue *UnknownError) Raise() error{
raised, err := ue.Check(ue.tensor)
ue.raised = raised
if err!=nil{
return fmt.Errorf("Unknown error raised.")
}
return nil
}
// func main() {
// // f := [][]int64{
// // {4, 4},
// // {1, 1},
// // }
// l := []int64{1, 3, 4, 5, 4, 3}
// t := NewTensor(l)
// // k := NewTensor(f)
// // fmt.Println(reflect.ValueOf(k.value))
// // fmt.Println(AssertionError(t, k))
// //var x float64 = 3.4
// v := reflect.ValueOf(t.number)
// k := v.Type()
// fmt.Println(k)
// // fmt.Println("kind is float64:", v.Kind() == reflect.Float64)
// // fmt.Println("value:", v.Float64())
// //n := v.Interface().(v.Type)
// value := reflect.New(t.number[0].(reflect.Type)).Interface()
// fmt.Println(value)
// //fmt.Println(n)
// }
// TODO: gradient tape, poveži z optimizerjem
// uvozi data loaderje pa dodaj malo funkcionalnosti
// tam pri automl | tensor/tensor.go | 0.680135 | 0.498901 | tensor.go | starcoder |
package bytealg
const (
// Index can search any valid length of string.
MaxLen = int(-1) >> 31
MaxBruteForce = MaxLen
)
// Compare two byte slices.
// Returns -1 if the first differing byte is lower in a, or 1 if the first differing byte is greater in b.
// If the byte slices are equal, returns 0.
// If the lengths are different and there are no differing bytes, compares based on length.
func Compare(a, b []byte) int {
// Compare for differing bytes.
for i := 0; i < len(a) && i < len(b); i++ {
switch {
case a[0] < b[0]:
return -1
case a[0] > b[0]:
return 1
}
}
// Compare lengths.
switch {
case len(a) > len(b):
return 1
case len(a) < len(b):
return -1
default:
return 0
}
}
// Count the number of instances of a byte in a slice.
func Count(b []byte, c byte) int {
// Use a simple implementation, as there is no intrinsic that does this like we want.
n := 0
for _, v := range b {
if v == c {
n++
}
}
return n
}
// Count the number of instances of a byte in a string.
func CountString(s string, c byte) int {
// Use a simple implementation, as there is no intrinsic that does this like we want.
// Currently, the compiler does not generate zero-copy byte-string conversions, so this needs to be seperate from Count.
n := 0
for i := 0; i < len(s); i++ {
if s[i] == c {
n++
}
}
return n
}
// Cutover is not reachable in TinyGo, but must exist as it is referenced.
func Cutover(n int) int {
// Setting MaxLen and MaxBruteForce should force a different path to be taken.
// This should never be called.
panic("cutover is unreachable")
}
// Equal checks if two byte slices are equal.
// It is equivalent to bytes.Equal.
func Equal(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
// Index finds the base index of the first instance of the byte sequence b in a.
// If a does not contain b, this returns -1.
func Index(a, b []byte) int {
for i := 0; i <= len(a)-len(b); i++ {
if Equal(a[i:i+len(b)], b) {
return i
}
}
return -1
}
// Index finds the index of the first instance of the specified byte in the slice.
// If the byte is not found, this returns -1.
func IndexByte(b []byte, c byte) int {
for i, v := range b {
if v == c {
return i
}
}
return -1
}
// Index finds the index of the first instance of the specified byte in the string.
// If the byte is not found, this returns -1.
func IndexByteString(s string, c byte) int {
for i := 0; i < len(s); i++ {
if s[i] == c {
return i
}
}
return -1
}
// Index finds the base index of the first instance of a substring in a string.
// If the substring is not found, this returns -1.
func IndexString(str, sub string) int {
for i := 0; i <= len(str)-len(sub); i++ {
if str[i:i+len(sub)] == sub {
return i
}
}
return -1
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The following code has been copied from the Go 1.15 release tree.
// PrimeRK is the prime base used in Rabin-Karp algorithm.
const PrimeRK = 16777619
// HashStrBytes returns the hash and the appropriate multiplicative
// factor for use in Rabin-Karp algorithm.
func HashStrBytes(sep []byte) (uint32, uint32) {
hash := uint32(0)
for i := 0; i < len(sep); i++ {
hash = hash*PrimeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, PrimeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}
// HashStr returns the hash and the appropriate multiplicative
// factor for use in Rabin-Karp algorithm.
func HashStr(sep string) (uint32, uint32) {
hash := uint32(0)
for i := 0; i < len(sep); i++ {
hash = hash*PrimeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, PrimeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}
// HashStrRevBytes returns the hash of the reverse of sep and the
// appropriate multiplicative factor for use in Rabin-Karp algorithm.
func HashStrRevBytes(sep []byte) (uint32, uint32) {
hash := uint32(0)
for i := len(sep) - 1; i >= 0; i-- {
hash = hash*PrimeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, PrimeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}
// HashStrRev returns the hash of the reverse of sep and the
// appropriate multiplicative factor for use in Rabin-Karp algorithm.
func HashStrRev(sep string) (uint32, uint32) {
hash := uint32(0)
for i := len(sep) - 1; i >= 0; i-- {
hash = hash*PrimeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, PrimeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}
// IndexRabinKarpBytes uses the Rabin-Karp search algorithm to return the index of the
// first occurence of substr in s, or -1 if not present.
func IndexRabinKarpBytes(s, sep []byte) int {
// Rabin-Karp search
hashsep, pow := HashStrBytes(sep)
n := len(sep)
var h uint32
for i := 0; i < n; i++ {
h = h*PrimeRK + uint32(s[i])
}
if h == hashsep && Equal(s[:n], sep) {
return 0
}
for i := n; i < len(s); {
h *= PrimeRK
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
if h == hashsep && Equal(s[i-n:i], sep) {
return i - n
}
}
return -1
}
// IndexRabinKarp uses the Rabin-Karp search algorithm to return the index of the
// first occurence of substr in s, or -1 if not present.
func IndexRabinKarp(s, substr string) int {
// Rabin-Karp search
hashss, pow := HashStr(substr)
n := len(substr)
var h uint32
for i := 0; i < n; i++ {
h = h*PrimeRK + uint32(s[i])
}
if h == hashss && s[:n] == substr {
return 0
}
for i := n; i < len(s); {
h *= PrimeRK
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
if h == hashss && s[i-n:i] == substr {
return i - n
}
}
return -1
} | src/internal/bytealg/bytealg.go | 0.812123 | 0.518973 | bytealg.go | starcoder |
package gorgonia
import (
"fmt"
"hash"
"sort"
"github.com/chewxy/hm"
"github.com/pkg/errors"
"gorgonia.org/tensor"
)
type sparsemaxOp struct {
axis int
}
func newSparsemaxOp(axes ...int) *sparsemaxOp {
axis := -1
if len(axes) > 0 {
axis = axes[0]
}
sparsemaxop := &sparsemaxOp{
axis: axis,
}
return sparsemaxop
}
// Sparsemax - implements the sparsemax operation described here: http://proceedings.mlr.press/v48/martins16.pdf
func Sparsemax(x *Node, axes ...int) (*Node, error) {
op := newSparsemaxOp(axes...)
return ApplyOp(op, x)
}
func (op *sparsemaxOp) Arity() int {
return 1
}
func (op *sparsemaxOp) ReturnsPtr() bool { return false }
func (op *sparsemaxOp) CallsExtern() bool { return false }
func (op *sparsemaxOp) WriteHash(h hash.Hash) {
fmt.Fprintf(h, "Sparsemax{}()")
}
func (op *sparsemaxOp) Hashcode() uint32 { return simpleHash(op) }
func (op *sparsemaxOp) String() string {
return fmt.Sprintf("Sparsemax{}()")
}
func (op *sparsemaxOp) InferShape(inputs ...DimSizer) (tensor.Shape, error) {
s := inputs[0].(tensor.Shape).Clone()
return s, nil
}
func (op *sparsemaxOp) Type() hm.Type {
a := hm.TypeVariable('a')
return hm.NewFnType(a, a)
}
func (op *sparsemaxOp) OverwritesInput() int { return -1 }
func (op *sparsemaxOp) checkInput(inputs ...Value) (tensor.Tensor, error) {
if err := checkArity(op, len(inputs)); err != nil {
return nil, err
}
var in tensor.Tensor
var ok bool
if in, ok = inputs[0].(tensor.Tensor); !ok {
return nil, errors.Errorf("Expected input to be a tensor, got %T", inputs[0])
}
return in, nil
}
func (op *sparsemaxOp) Do(inputs ...Value) (Value, error) {
inputTensor, err := op.checkInput(inputs...)
if err != nil {
return nil, fmt.Errorf("Can't check Sparsemax input: %w", err)
}
if op.axis != -1 {
err = inputTensor.T(0, op.axis)
if err != nil {
return nil, fmt.Errorf("error tranposing the input tensor: %w", err)
}
err = inputTensor.Reshape(inputTensor.Shape()[0], -1)
if err != nil {
return nil, fmt.Errorf("error reshaping the input tensor: %w", err)
}
err = inputTensor.T(0, 1)
if err != nil {
return nil, fmt.Errorf("error tranposing the input tensor: %w", err)
}
}
var output interface{}
switch inputTensor.Dtype() {
case tensor.Float64:
output = op.float64sparseMax(inputTensor)
case tensor.Float32:
output = op.float32sparseMax(inputTensor)
default:
return nil, fmt.Errorf("invalid input type for Sparsemax, expected float64 or float32, got: %v", inputTensor.Dtype())
}
return tensor.New(tensor.Of(inputTensor.Dtype()), tensor.WithShape(inputTensor.Shape().Clone()...), tensor.WithEngine(inputTensor.Engine()), tensor.WithBacking(output)), nil
}
// FIXME: go2 generics
func (op *sparsemaxOp) float32sparseMax(inputTensor tensor.Tensor) interface{} {
sortedData := make([]float32, inputTensor.Size())
inputData := inputTensor.Data().([]float32)
copy(sortedData, inputData)
sort.Slice(sortedData, func(i, j int) bool {
return sortedData[i] > sortedData[j]
})
kArray := make([]float32, len(sortedData))
cumArray := make([]float32, len(sortedData))
cumSum := float32(0.0)
maxIndex := 0
for i := 0; i < len(sortedData); i++ {
kArray[i] = 1 + float32(i)*sortedData[i]
cumSum += sortedData[i]
cumArray[i] = cumSum - sortedData[i]
if kArray[i] > cumArray[i] {
maxIndex = i + 1
}
}
threshold := float32(cumArray[maxIndex-1]-1) / float32(maxIndex)
output := make([]float32, inputTensor.Size())
for i := 0; i < len(inputData); i++ {
vF := inputData[i]
if vF-threshold > 0 {
output[i] = vF - threshold
}
}
return output
}
func (op *sparsemaxOp) float64sparseMax(inputTensor tensor.Tensor) interface{} {
sortedData := make([]float64, inputTensor.Size())
inputData := inputTensor.Data().([]float64)
copy(sortedData, inputData)
sort.Slice(sortedData, func(i, j int) bool {
return sortedData[i] > sortedData[j]
})
kArray := make([]float64, len(sortedData))
cumArray := make([]float64, len(sortedData))
cumSum := 0.0
maxIndex := 0
for i := 0; i < len(sortedData); i++ {
kArray[i] = 1 + float64(i)*sortedData[i]
cumSum += sortedData[i]
cumArray[i] = cumSum - sortedData[i]
if kArray[i] > cumArray[i] {
maxIndex = i + 1
}
}
threshold := float64(cumArray[maxIndex-1]-1) / float64(maxIndex)
output := make([]float64, inputTensor.Size())
for i := 0; i < len(inputData); i++ {
vF := inputData[i]
if vF-threshold > 0 {
output[i] = vF - threshold
}
}
return output
}
// DoDiff calculates the diff and sets its value to the output node. Implementation for ADOp interface.
func (op *sparsemaxOp) DoDiff(ctx ExecutionContext, inputs Nodes, output *Node) error {
if len(inputs) != 2 {
return fmt.Errorf("SparsemaxOp.DoDiff needs 2 arguments")
}
odv := output.boundTo.(*dualValue)
odvd := odv.Value.(tensor.Tensor)
diffOp := &sparsemaxDiffOp{}
result, err := diffOp.Do(inputs[0].boundTo, inputs[1].boundTo)
if err != nil {
return err
}
err = result.(*tensor.Dense).Reshape(odvd.Shape()...)
if err != nil {
return err
}
sum, err := odvd.(*tensor.Dense).Add(result.(*tensor.Dense), tensor.UseUnsafe())
if err != nil {
return err
}
odv.d = sum
return nil
}
// SymDiff applies the diff op. Implementation for SDOp interface.
func (op *sparsemaxOp) SymDiff(inputs Nodes, output, grad *Node) (Nodes, error) {
err := checkArity(op, len(inputs))
if err != nil {
return nil, err
}
t := inputs[0]
diffOp := &sparsemaxDiffOp{}
nodes := make(Nodes, 1)
nodes[0], err = ApplyOp(diffOp, t, grad)
return nodes, err
}
// DiffWRT is an implementation for the SDOp interface
func (op *sparsemaxOp) DiffWRT(inputs int) []bool {
if inputs != 1 {
panic(fmt.Sprintf("sparsemax operator only supports one input, got %d instead", inputs))
}
return []bool{true}
}
type sparsemaxDiffOp struct {
}
func newSparsemaxOpDiff() *sparsemaxDiffOp {
return &sparsemaxDiffOp{}
}
func (op *sparsemaxDiffOp) Arity() int {
return 2
}
func (op *sparsemaxDiffOp) ReturnsPtr() bool { return false }
func (op *sparsemaxDiffOp) CallsExtern() bool { return false }
func (op *sparsemaxDiffOp) WriteHash(h hash.Hash) {
fmt.Fprintf(h, "SparsemaxDiff{}()")
}
func (op *sparsemaxDiffOp) Hashcode() uint32 { return simpleHash(op) }
func (op *sparsemaxDiffOp) String() string {
return fmt.Sprintf("SparsemaxDiff{}()")
}
func (op *sparsemaxDiffOp) InferShape(inputs ...DimSizer) (tensor.Shape, error) {
s := inputs[0].(tensor.Shape).Clone()
return s, nil
}
func (op *sparsemaxDiffOp) Type() hm.Type {
aType := hm.TypeVariable('a')
ta := newTensorType(1, aType)
return hm.NewFnType(ta, ta, ta) // f(float64, float64) float64
}
func (op *sparsemaxDiffOp) OverwritesInput() int { return -1 }
func (op *sparsemaxDiffOp) checkInput(inputs ...Value) (tensor.Tensor, tensor.Tensor, error) {
if err := checkArity(op, len(inputs)); err != nil {
return nil, nil, err
}
var (
in tensor.Tensor
gradient tensor.Tensor
ok bool
)
switch t := inputs[0].(type) {
case *dualValue:
if in, ok = t.Value.(tensor.Tensor); !ok {
return nil, nil, errors.Errorf("input should be a tensor.Tensor, got %T", inputs[0])
}
case tensor.Tensor:
in = t
default:
return nil, nil, errors.Errorf("input type is not supported, got %T", inputs[0])
}
switch t := inputs[1].(type) {
case *dualValue:
if gradient, ok = t.Value.(tensor.Tensor); !ok {
return nil, nil, errors.Errorf("gradient should be a tensor, got %T", inputs[1])
}
case tensor.Tensor:
gradient = t
default:
return nil, nil, errors.Errorf("gradient type is not supported, got %T", inputs[1])
}
return in, gradient, nil
}
func (op *sparsemaxDiffOp) Do(inputs ...Value) (Value, error) {
inputTensor, gradTensor, err := op.checkInput(inputs...)
if err != nil {
return nil, fmt.Errorf("Can't check SparsemaxDiff input: %w", err)
}
if inputTensor.Size() != gradTensor.Size() {
return nil, fmt.Errorf("sparsemaxDiffOp.Do inputs sizes should be equal")
}
var diff interface{}
switch inputTensor.Dtype() {
case tensor.Float64:
outputData, ok := gradTensor.Data().([]float64)
if !ok {
return nil, fmt.Errorf("sparsemaxDiffOp.Do expected input to be []float64, got %T", inputTensor.Data())
}
diff = op.float64sparseMaxDiff(inputTensor.Data().([]float64), outputData)
case tensor.Float32:
outputData, ok := gradTensor.Data().([]float32)
if !ok {
return nil, fmt.Errorf("sparsemaxDiffOp.Do expected input to be []float32, got %T", inputTensor.Data())
}
diff = op.float32sparseMaxDiff(inputTensor.Data().([]float32), outputData)
default:
return nil, fmt.Errorf("sparsemaxDiffOp.Do expected input to be []float64 or []float32, got %T", inputTensor.Data())
}
val := tensor.New(
tensor.Of(inputTensor.Dtype()),
tensor.WithShape(inputTensor.Size()),
tensor.WithEngine(inputTensor.Engine()),
tensor.WithBacking(diff),
)
return val, nil
}
// FIXME: go2 generics
func (op *sparsemaxDiffOp) float32sparseMaxDiff(data, outputData []float32) interface{} {
nonZeros := float32(0.0)
inputSum := float32(0.0)
diff := make([]float32, len(data))
for i, v := range data {
if v == 0.0 {
continue
}
diff[i] = 1.0
inputSum += outputData[i]
nonZeros++
}
sum := float32(0.0)
if nonZeros > 0 {
sum = inputSum / nonZeros
}
for i := range diff {
diff[i] *= (outputData[i] - sum)
}
return diff
}
func (op *sparsemaxDiffOp) float64sparseMaxDiff(data, outputData []float64) interface{} {
nonZeros := 0.0
inputSum := 0.0
diff := make([]float64, len(data))
for i, v := range data {
if v == 0.0 {
continue
}
diff[i] = 1.0
inputSum += outputData[i]
nonZeros++
}
sum := 0.0
if nonZeros > 0 {
sum = inputSum / nonZeros
}
for i := range diff {
diff[i] *= (outputData[i] - sum)
}
return diff
}
// ensure it complies with the Op interface
var (
_ Op = &sparsemaxDiffOp{}
_ Op = &sparsemaxOp{}
_ SDOp = &sparsemaxOp{}
_ ADOp = &sparsemaxOp{}
) | op_sparsemax.go | 0.730674 | 0.537709 | op_sparsemax.go | starcoder |
package timeseries
import (
"errors"
"fmt"
"math"
"time"
)
var (
EmptyTimeSeriesErr = errors.New("no records in timeseries")
)
// Record represents a point in the timeseries, currently holds only a float
// value
type Record struct {
Timestamp int64
Value float64
}
// TimeSeries represents a time series, essentially an append-only log of point
// values in time
type TimeSeries struct {
Name string
Retention int64
ctime time.Time
Records []*Record
}
func newRecord(value float64) *Record {
return &Record{time.Now().UnixNano(), value}
}
// NewTestSeries create a new TimeSeries by accepting a name and a retention value
func NewTimeSeries(name string, retention int64) *TimeSeries {
return &TimeSeries{
Name: name,
Retention: retention,
ctime: time.Now(),
Records: []*Record{},
}
}
func (ts *TimeSeries) Len() int {
return len(ts.Records)
}
// AddPoint add a new point to an existing TimeSeries
func (ts *TimeSeries) AddPoint(value float64) Record {
record := newRecord(value)
ts.AddRecord(record)
return *record
}
func (ts *TimeSeries) AddRecord(record *Record) {
last, err := ts.Last()
if err != nil {
ts.Records = append(ts.Records, record)
} else {
first, _ := ts.First()
if record.Timestamp >= last.Timestamp {
ts.Records = append(ts.Records, record)
} else if record.Timestamp <= first.Timestamp {
ts.Records = append([]*Record{record}, ts.Records...)
} else {
// We need to binary search for the correct position of the
// new record
mid, left, right := 0, 0, ts.Len()-1
found := false
for left <= right {
mid = (left + right) / 2
if ts.Records[mid].Timestamp < record.Timestamp {
left = mid + 1
} else if ts.Records[mid].Timestamp > record.Timestamp {
right = mid - 1
} else {
found = true
break
}
}
if !found {
if ts.Records[left].Timestamp > record.Timestamp {
mid = left
} else {
mid = right
}
}
ts.Records = append(ts.Records, nil)
copy(ts.Records[mid+1:], ts.Records[mid:])
ts.Records[mid] = record
}
}
}
func (ts *TimeSeries) Average() (float64, error) {
if len(ts.Records) == 0 {
return 0.0, EmptyTimeSeriesErr
}
var sum float64 = 0.0
for _, v := range ts.Records {
sum += v.Value
}
return sum / float64(len(ts.Records)), nil
}
func (ts *TimeSeries) Max() (*Record, error) {
if len(ts.Records) == 0 {
return nil, EmptyTimeSeriesErr
}
max := ts.Records[0]
for _, v := range ts.Records {
if v.Value > max.Value {
max = v
}
}
return max, nil
}
func (ts *TimeSeries) Min() (*Record, error) {
if len(ts.Records) == 0 {
return nil, EmptyTimeSeriesErr
}
min := ts.Records[0]
for _, v := range ts.Records {
if v.Value < min.Value {
min = v
}
}
return min, nil
}
func (ts *TimeSeries) First() (*Record, error) {
if len(ts.Records) == 0 {
return nil, EmptyTimeSeriesErr
}
return ts.Records[0], nil
}
func (ts *TimeSeries) Last() (*Record, error) {
if len(ts.Records) == 0 {
return nil, EmptyTimeSeriesErr
}
last := len(ts.Records) - 1
return ts.Records[last], nil
}
func (ts *TimeSeries) Range(lo, hi int64) (*TimeSeries, error) {
if len(ts.Records) == 0 {
return nil, EmptyTimeSeriesErr
}
result := make([]*Record, 0)
for _, record := range ts.Records {
if record.Timestamp >= lo && record.Timestamp <= hi {
result = append(result, record)
}
}
tempTs := NewTimeSeries(fmt.Sprintf("%s%s", "range-tmp-", ts.Name), 0)
tempTs.Records = result
return tempTs, nil
}
func (ts *TimeSeries) Find(timestamp int64) (*Record, int) {
if first, err := ts.First(); err != nil || first.Timestamp > timestamp {
return nil, -1
}
if last, err := ts.Last(); err != nil || last.Timestamp < timestamp {
return nil, -1
}
var mid, left, right int = 0, 0, len(ts.Records) - 1
for left < right {
mid = (left + right) / 2
if ts.Records[mid].Timestamp < timestamp {
left = mid
} else if ts.Records[mid].Timestamp > timestamp {
right = mid
} else {
return ts.Records[mid], mid
}
}
return nil, -1
}
func (ts *TimeSeries) AverageInterval(interval_ms int64) ([]Record, error) {
first, err := ts.First()
if err != nil {
return nil, err
}
last, err := ts.Last()
if err != nil {
return nil, err
}
interval := interval_ms * 1e6
firstTs := (first.Timestamp / interval) * interval
result := make([]Record, 0)
var current int64 = firstTs + interval
var sum float64 = 0.0
var total int = 0
for current < last.Timestamp {
sum = 0.0
total = 0
for _, r := range ts.Records {
if r.Timestamp > current-interval && r.Timestamp < current {
sum += r.Value
total += 1
}
}
if !math.IsNaN(sum / float64(total)) {
result = append(result, Record{current, sum / float64(total)})
}
current += interval
}
return result, nil
} | timeseries/timeseries.go | 0.696784 | 0.457016 | timeseries.go | starcoder |
package blur
import (
"errors"
"github.com/Ernyoke/Imger/convolution"
"github.com/Ernyoke/Imger/padding"
"image"
"math"
)
// BoxGray applies average blur to a grayscale image. The amount of bluring effect depends on the kernel size, where
// both width and height can be specified. The anchor point specifies a point inside the kernel. The pixel value
// will be updated after the convolution was done for the given area.
// Border types supported: see convolution package.
func BoxGray(img *image.Gray, kernelSize image.Point, anchor image.Point, border padding.Border) (*image.Gray, error) {
kernel := generateBoxKernel(&kernelSize)
result, error := convolution.ConvolveGray(img, kernel.Normalize(), anchor, border)
if error != nil {
return nil, error
}
return result, nil
}
// BoxRGBA applies average blur to an RGBA image. The amount of bluring effect depends on the kernel size, where
// both width and height can be specified. The anchor point specifies a point inside the kernel. The pixel value
// will be updated after the convolution was done for the given area.
// Border types supported: see convolution package.
func BoxRGBA(img *image.RGBA, kernelSize image.Point, anchor image.Point, border padding.Border) (*image.RGBA, error) {
kernel := generateBoxKernel(&kernelSize)
result, error := convolution.ConvolveRGBA(img, kernel.Normalize(), anchor, border)
if error != nil {
return nil, error
}
return result, nil
}
// GaussianBlurGray applies average blur to a grayscale image. The amount of bluring effect depends on the kernel radius
// and sigma value. The anchor point specifies a point inside the kernel. The pixel value will be updated after the
// convolution was done for the given area. For border types see convolution package.
func GaussianBlurGray(img *image.Gray, radius float64, sigma float64, border padding.Border) (*image.Gray, error) {
if radius <= 0 {
return nil, errors.New("radius must be bigger then 0")
}
return convolution.ConvolveGray(img, generateGaussianKernel(radius, sigma).Normalize(), image.Point{X: int(math.Ceil(radius)), Y: int(math.Ceil(radius))}, border)
}
// GaussianBlurRGBA applies average blur to an RGBA image. The amount of bluring effect depends on the kernel radius
// and sigma value. The anchor point specifies a point inside the kernel. The pixel value will be updated after the
// convolution was done for the given area. For border types see convolution package.
func GaussianBlurRGBA(img *image.RGBA, radius float64, sigma float64, border padding.Border) (*image.RGBA, error) {
if radius <= 0 {
return nil, errors.New("radius must be bigger then 0")
}
return convolution.ConvolveRGBA(img, generateGaussianKernel(radius, sigma).Normalize(), image.Point{X: int(math.Ceil(radius)), Y: int(math.Ceil(radius))}, border)
}
// -------------------------------------------------------------------------------------------------------
func generateBoxKernel(kernelSize *image.Point) *convolution.Kernel {
kernel, _ := convolution.NewKernel(kernelSize.X, kernelSize.Y)
for x := 0; x < kernelSize.X; x++ {
for y := 0; y < kernelSize.Y; y++ {
kernel.Set(x, y, 1.0/float64(kernelSize.X*kernelSize.Y))
}
}
return kernel
}
func generateGaussianKernel(radius float64, sigma float64) *convolution.Kernel {
length := int(math.Ceil(2*radius + 1))
kernel, _ := convolution.NewKernel(length, length)
for x := 0; x < length; x++ {
for y := 0; y < length; y++ {
kernel.Set(x, y, gaussianFunc(float64(x)-radius, float64(y)-radius, sigma))
}
}
return kernel
}
func gaussianFunc(x, y, sigma float64) float64 {
sigSqr := sigma * sigma
return (1.0 / (2 * math.Pi * sigSqr)) * math.Exp(-(x*x+y*y)/(2*sigSqr))
} | blur/blur.go | 0.914539 | 0.726668 | blur.go | starcoder |
package forGraphBLASGo
import "github.com/intel/forGoParallel/pipeline"
type vectorSelect[D, Ds any] struct {
op IndexUnaryOp[bool, D, Ds]
u *vectorReference[D]
value Ds
}
func newVectorSelect[D, Ds any](op IndexUnaryOp[bool, D, Ds], u *vectorReference[D], value Ds) computeVectorT[D] {
return vectorSelect[D, Ds]{op: op, u: u, value: value}
}
func (compute vectorSelect[D, Ds]) resize(newSize int) computeVectorT[D] {
return newVectorSelect[D, Ds](compute.op, compute.u.resize(newSize), compute.value)
}
func (compute vectorSelect[D, Ds]) computeElement(index int) (result D, ok bool) {
if u, uok := compute.u.extractElement(index); uok {
if compute.op(u, index, 0, compute.value) {
return u, true
}
}
return
}
func (compute vectorSelect[D, Ds]) computePipeline() *pipeline.Pipeline[any] {
p := compute.u.getPipeline()
if p == nil {
return nil
}
p.Add(
pipeline.Par(pipeline.Receive(func(_ int, data any) any {
slice := data.(vectorSlice[D])
slice.filter(func(index int, value D) (newIndex int, newValue D, ok bool) {
return index, value, compute.op(value, index, 0, compute.value)
})
return slice
})),
)
return p
}
type vectorSelectScalar[D, Ds any] struct {
op IndexUnaryOp[bool, D, Ds]
u *vectorReference[D]
value *scalarReference[Ds]
}
func newVectorSelectScalar[D, Ds any](op IndexUnaryOp[bool, D, Ds], u *vectorReference[D], value *scalarReference[Ds]) computeVectorT[D] {
return vectorSelectScalar[D, Ds]{op: op, u: u, value: value}
}
func (compute vectorSelectScalar[D, Ds]) resize(newSize int) computeVectorT[D] {
return newVectorSelectScalar[D, Ds](compute.op, compute.u.resize(newSize), compute.value)
}
func (compute vectorSelectScalar[D, Ds]) computeElement(index int) (result D, ok bool) {
if u, uok := compute.u.extractElement(index); uok {
if v, vok := compute.value.extractElement(); vok {
if compute.op(u, index, 0, v) {
return u, true
}
} else {
panic(EmptyObject)
}
}
return
}
func (compute vectorSelectScalar[D, Ds]) computePipeline() *pipeline.Pipeline[any] {
p := compute.u.getPipeline()
if p == nil {
return nil
}
v, vok := compute.value.extractElement()
if !vok {
panic(EmptyObject)
}
p.Add(
pipeline.Par(pipeline.Receive(func(_ int, data any) any {
slice := data.(vectorSlice[D])
slice.filter(func(index int, value D) (newIndex int, newValue D, ok bool) {
return index, value, compute.op(value, index, 0, v)
})
return slice
})),
)
return p
} | functional_Vector_ComputedSelect.go | 0.642545 | 0.632687 | functional_Vector_ComputedSelect.go | starcoder |
package transform
type WHT32 struct {
fScale uint
iScale uint
data []int
}
// For perfect reconstruction, forward results are scaled by 16*sqrt(2) unless
// the parameter is set to false (scaled by sqrt(2), in which case rounding
// may introduce errors)
func NewWHT32(scale bool) (*WHT32, error) {
this := new(WHT32)
this.data = make([]int, 1024)
if scale == true {
this.fScale = 0
this.iScale = 10
} else {
this.fScale = 5
this.iScale = 5
}
return this, nil
}
// For perfect reconstruction, forward results are scaled by 16*sqrt(2) unless
// the parameter is set to false (scaled by sqrt(2), in which case rounding
// may introduce errors)
func (this *WHT32) Forward(src, dst []int) (uint, uint, error) {
return this.compute(src, dst, this.fScale)
}
func (this *WHT32) compute(input, output []int, shift uint) (uint, uint, error) {
processRows(input, this.data)
processColumns(this.data, output, shift)
return 1024, 1024, nil
}
func processRows(input, buffer []int) {
dataptr := 0
// Pass 1: process rows.
for i := 0; i < 1024; i += 32 {
// Aliasing for speed
x0 := input[i]
x1 := input[i+1]
x2 := input[i+2]
x3 := input[i+3]
x4 := input[i+4]
x5 := input[i+5]
x6 := input[i+6]
x7 := input[i+7]
x8 := input[i+8]
x9 := input[i+9]
x10 := input[i+10]
x11 := input[i+11]
x12 := input[i+12]
x13 := input[i+13]
x14 := input[i+14]
x15 := input[i+15]
x16 := input[i+16]
x17 := input[i+17]
x18 := input[i+18]
x19 := input[i+19]
x20 := input[i+20]
x21 := input[i+21]
x22 := input[i+22]
x23 := input[i+23]
x24 := input[i+24]
x25 := input[i+25]
x26 := input[i+26]
x27 := input[i+27]
x28 := input[i+28]
x29 := input[i+29]
x30 := input[i+30]
x31 := input[i+31]
a0 := x0 + x1
a1 := x2 + x3
a2 := x4 + x5
a3 := x6 + x7
a4 := x8 + x9
a5 := x10 + x11
a6 := x12 + x13
a7 := x14 + x15
a8 := x16 + x17
a9 := x18 + x19
a10 := x20 + x21
a11 := x22 + x23
a12 := x24 + x25
a13 := x26 + x27
a14 := x28 + x29
a15 := x30 + x31
a16 := x0 - x1
a17 := x2 - x3
a18 := x4 - x5
a19 := x6 - x7
a20 := x8 - x9
a21 := x10 - x11
a22 := x12 - x13
a23 := x14 - x15
a24 := x16 - x17
a25 := x18 - x19
a26 := x20 - x21
a27 := x22 - x23
a28 := x24 - x25
a29 := x26 - x27
a30 := x28 - x29
a31 := x30 - x31
b0 := a0 + a1
b1 := a2 + a3
b2 := a4 + a5
b3 := a6 + a7
b4 := a8 + a9
b5 := a10 + a11
b6 := a12 + a13
b7 := a14 + a15
b8 := a16 + a17
b9 := a18 + a19
b10 := a20 + a21
b11 := a22 + a23
b12 := a24 + a25
b13 := a26 + a27
b14 := a28 + a29
b15 := a30 + a31
b16 := a0 - a1
b17 := a2 - a3
b18 := a4 - a5
b19 := a6 - a7
b20 := a8 - a9
b21 := a10 - a11
b22 := a12 - a13
b23 := a14 - a15
b24 := a16 - a17
b25 := a18 - a19
b26 := a20 - a21
b27 := a22 - a23
b28 := a24 - a25
b29 := a26 - a27
b30 := a28 - a29
b31 := a30 - a31
a0 = b0 + b1
a1 = b2 + b3
a2 = b4 + b5
a3 = b6 + b7
a4 = b8 + b9
a5 = b10 + b11
a6 = b12 + b13
a7 = b14 + b15
a8 = b16 + b17
a9 = b18 + b19
a10 = b20 + b21
a11 = b22 + b23
a12 = b24 + b25
a13 = b26 + b27
a14 = b28 + b29
a15 = b30 + b31
a16 = b0 - b1
a17 = b2 - b3
a18 = b4 - b5
a19 = b6 - b7
a20 = b8 - b9
a21 = b10 - b11
a22 = b12 - b13
a23 = b14 - b15
a24 = b16 - b17
a25 = b18 - b19
a26 = b20 - b21
a27 = b22 - b23
a28 = b24 - b25
a29 = b26 - b27
a30 = b28 - b29
a31 = b30 - b31
b0 = a0 + a1
b1 = a2 + a3
b2 = a4 + a5
b3 = a6 + a7
b4 = a8 + a9
b5 = a10 + a11
b6 = a12 + a13
b7 = a14 + a15
b8 = a16 + a17
b9 = a18 + a19
b10 = a20 + a21
b11 = a22 + a23
b12 = a24 + a25
b13 = a26 + a27
b14 = a28 + a29
b15 = a30 + a31
b16 = a0 - a1
b17 = a2 - a3
b18 = a4 - a5
b19 = a6 - a7
b20 = a8 - a9
b21 = a10 - a11
b22 = a12 - a13
b23 = a14 - a15
b24 = a16 - a17
b25 = a18 - a19
b26 = a20 - a21
b27 = a22 - a23
b28 = a24 - a25
b29 = a26 - a27
b30 = a28 - a29
b31 = a30 - a31
buffer[dataptr] = b0 + b1
buffer[dataptr+1] = b2 + b3
buffer[dataptr+2] = b4 + b5
buffer[dataptr+3] = b6 + b7
buffer[dataptr+4] = b8 + b9
buffer[dataptr+5] = b10 + b11
buffer[dataptr+6] = b12 + b13
buffer[dataptr+7] = b14 + b15
buffer[dataptr+8] = b16 + b17
buffer[dataptr+9] = b18 + b19
buffer[dataptr+10] = b20 + b21
buffer[dataptr+11] = b22 + b23
buffer[dataptr+12] = b24 + b25
buffer[dataptr+13] = b26 + b27
buffer[dataptr+14] = b28 + b29
buffer[dataptr+15] = b30 + b31
buffer[dataptr+16] = b0 - b1
buffer[dataptr+17] = b2 - b3
buffer[dataptr+18] = b4 - b5
buffer[dataptr+19] = b6 - b7
buffer[dataptr+20] = b8 - b9
buffer[dataptr+21] = b10 - b11
buffer[dataptr+22] = b12 - b13
buffer[dataptr+23] = b14 - b15
buffer[dataptr+24] = b16 - b17
buffer[dataptr+25] = b18 - b19
buffer[dataptr+26] = b20 - b21
buffer[dataptr+27] = b22 - b23
buffer[dataptr+28] = b24 - b25
buffer[dataptr+29] = b26 - b27
buffer[dataptr+30] = b28 - b29
buffer[dataptr+31] = b30 - b31
dataptr += 32
}
}
func processColumns(buffer, output []int, shift uint) {
dataptr := 0
adjust := (1 << shift) >> 1
// Pass 2: process columns.
for i := 0; i < 32; i++ {
// Aliasing for speed
x0 := buffer[dataptr]
x1 := buffer[dataptr+32]
x2 := buffer[dataptr+64]
x3 := buffer[dataptr+96]
x4 := buffer[dataptr+128]
x5 := buffer[dataptr+160]
x6 := buffer[dataptr+192]
x7 := buffer[dataptr+224]
x8 := buffer[dataptr+256]
x9 := buffer[dataptr+288]
x10 := buffer[dataptr+320]
x11 := buffer[dataptr+352]
x12 := buffer[dataptr+384]
x13 := buffer[dataptr+416]
x14 := buffer[dataptr+448]
x15 := buffer[dataptr+480]
x16 := buffer[dataptr+512]
x17 := buffer[dataptr+544]
x18 := buffer[dataptr+576]
x19 := buffer[dataptr+608]
x20 := buffer[dataptr+640]
x21 := buffer[dataptr+672]
x22 := buffer[dataptr+704]
x23 := buffer[dataptr+736]
x24 := buffer[dataptr+768]
x25 := buffer[dataptr+800]
x26 := buffer[dataptr+832]
x27 := buffer[dataptr+864]
x28 := buffer[dataptr+896]
x29 := buffer[dataptr+928]
x30 := buffer[dataptr+960]
x31 := buffer[dataptr+992]
a0 := x0 + x1
a1 := x2 + x3
a2 := x4 + x5
a3 := x6 + x7
a4 := x8 + x9
a5 := x10 + x11
a6 := x12 + x13
a7 := x14 + x15
a8 := x16 + x17
a9 := x18 + x19
a10 := x20 + x21
a11 := x22 + x23
a12 := x24 + x25
a13 := x26 + x27
a14 := x28 + x29
a15 := x30 + x31
a16 := x0 - x1
a17 := x2 - x3
a18 := x4 - x5
a19 := x6 - x7
a20 := x8 - x9
a21 := x10 - x11
a22 := x12 - x13
a23 := x14 - x15
a24 := x16 - x17
a25 := x18 - x19
a26 := x20 - x21
a27 := x22 - x23
a28 := x24 - x25
a29 := x26 - x27
a30 := x28 - x29
a31 := x30 - x31
b0 := a0 + a1
b1 := a2 + a3
b2 := a4 + a5
b3 := a6 + a7
b4 := a8 + a9
b5 := a10 + a11
b6 := a12 + a13
b7 := a14 + a15
b8 := a16 + a17
b9 := a18 + a19
b10 := a20 + a21
b11 := a22 + a23
b12 := a24 + a25
b13 := a26 + a27
b14 := a28 + a29
b15 := a30 + a31
b16 := a0 - a1
b17 := a2 - a3
b18 := a4 - a5
b19 := a6 - a7
b20 := a8 - a9
b21 := a10 - a11
b22 := a12 - a13
b23 := a14 - a15
b24 := a16 - a17
b25 := a18 - a19
b26 := a20 - a21
b27 := a22 - a23
b28 := a24 - a25
b29 := a26 - a27
b30 := a28 - a29
b31 := a30 - a31
a0 = b0 + b1
a1 = b2 + b3
a2 = b4 + b5
a3 = b6 + b7
a4 = b8 + b9
a5 = b10 + b11
a6 = b12 + b13
a7 = b14 + b15
a8 = b16 + b17
a9 = b18 + b19
a10 = b20 + b21
a11 = b22 + b23
a12 = b24 + b25
a13 = b26 + b27
a14 = b28 + b29
a15 = b30 + b31
a16 = b0 - b1
a17 = b2 - b3
a18 = b4 - b5
a19 = b6 - b7
a20 = b8 - b9
a21 = b10 - b11
a22 = b12 - b13
a23 = b14 - b15
a24 = b16 - b17
a25 = b18 - b19
a26 = b20 - b21
a27 = b22 - b23
a28 = b24 - b25
a29 = b26 - b27
a30 = b28 - b29
a31 = b30 - b31
b0 = a0 + a1
b1 = a2 + a3
b2 = a4 + a5
b3 = a6 + a7
b4 = a8 + a9
b5 = a10 + a11
b6 = a12 + a13
b7 = a14 + a15
b8 = a16 + a17
b9 = a18 + a19
b10 = a20 + a21
b11 = a22 + a23
b12 = a24 + a25
b13 = a26 + a27
b14 = a28 + a29
b15 = a30 + a31
b16 = a0 - a1
b17 = a2 - a3
b18 = a4 - a5
b19 = a6 - a7
b20 = a8 - a9
b21 = a10 - a11
b22 = a12 - a13
b23 = a14 - a15
b24 = a16 - a17
b25 = a18 - a19
b26 = a20 - a21
b27 = a22 - a23
b28 = a24 - a25
b29 = a26 - a27
b30 = a28 - a29
b31 = a30 - a31
output[i] = (b0 + b1 + adjust) >> shift
output[i+32] = (b2 + b3 + adjust) >> shift
output[i+64] = (b4 + b5 + adjust) >> shift
output[i+96] = (b6 + b7 + adjust) >> shift
output[i+128] = (b8 + b9 + adjust) >> shift
output[i+160] = (b10 + b11 + adjust) >> shift
output[i+192] = (b12 + b13 + adjust) >> shift
output[i+224] = (b14 + b15 + adjust) >> shift
output[i+256] = (b16 + b17 + adjust) >> shift
output[i+288] = (b18 + b19 + adjust) >> shift
output[i+320] = (b20 + b21 + adjust) >> shift
output[i+352] = (b22 + b23 + adjust) >> shift
output[i+384] = (b24 + b25 + adjust) >> shift
output[i+416] = (b26 + b27 + adjust) >> shift
output[i+448] = (b28 + b29 + adjust) >> shift
output[i+480] = (b30 + b31 + adjust) >> shift
output[i+512] = (b0 - b1 + adjust) >> shift
output[i+544] = (b2 - b3 + adjust) >> shift
output[i+576] = (b4 - b5 + adjust) >> shift
output[i+608] = (b6 - b7 + adjust) >> shift
output[i+640] = (b8 - b9 + adjust) >> shift
output[i+672] = (b10 - b11 + adjust) >> shift
output[i+704] = (b12 - b13 + adjust) >> shift
output[i+736] = (b14 - b15 + adjust) >> shift
output[i+768] = (b16 - b17 + adjust) >> shift
output[i+800] = (b18 - b19 + adjust) >> shift
output[i+832] = (b20 - b21 + adjust) >> shift
output[i+864] = (b22 - b23 + adjust) >> shift
output[i+896] = (b24 - b25 + adjust) >> shift
output[i+928] = (b26 - b27 + adjust) >> shift
output[i+960] = (b28 - b29 + adjust) >> shift
output[i+992] = (b30 - b31 + adjust) >> shift
dataptr++
}
}
// The transform is symmetric (except, potentially, for scaling)
func (this *WHT32) Inverse(src, dst []int) (uint, uint, error) {
return this.compute(src, dst, this.iScale)
} | go/src/kanzi/transform/WHT32.go | 0.654232 | 0.459379 | WHT32.go | starcoder |
package rando
import (
"errors"
"math/rand"
"time"
)
// Random provides convenient utility funcs for generating random data
type Random struct {
rand *rand.Rand
}
// NewRandom returns a new Random seeded with the current UTC time converted to nanoseconds.
func NewRandom() *Random {
return NewSeededRandom(time.Now().UTC().UnixNano())
}
// NewSeededRandom returns a new Random seeded with the provided value.
func NewSeededRandom(seed int64) *Random {
return &Random{rand: rand.New(rand.NewSource(seed))}
}
// RandomString returns a random string of the given length from a standard lowercase-only alphanumeric alphabet.
func (r *Random) RandomString(length int) string {
return r.RandomStringUsingCustomAlphabet(length, []rune("0123456789abcdefghijklmnopqrstuvwxyz"))
}
// RandomStringUsingCustomAlphabet return a random string of the given length from a custom alphabet.
func (r *Random) RandomStringUsingCustomAlphabet(length int, alphabet []rune) string {
res := make([]rune, length)
for i := range res {
res[i] = alphabet[r.rand.Intn(len(alphabet))]
}
return string(res)
}
// RandomSelectionFromStringSlice returns a random string from a given slice of strings.
func (r *Random) RandomSelectionFromStringSlice(values []string) string {
if len(values) == 0 {
return ""
}
return values[rand.Intn(len(values))]
}
// RandomBool returns a random boolean value.
func (r *Random) RandomBool() bool {
return rand.Intn(2) == 0
}
func (r *Random) SampleStringSlice(values []string, size int) ([]string, error) {
if size < 0 || size > len(values) {
return nil, errors.New("size is negative or greater than the the length of input values")
}
if size == 0 {
return []string{}, nil
}
sampled := make(map[string]struct{})
for i := size; i >= 0; i-- {
candidate := r.RandomSelectionFromStringSlice(values)
if _, found := sampled[candidate]; !found {
sampled[candidate] = struct{}{}
}
}
res := make([]string, 0)
for k := range sampled {
res = append(res, k)
}
return res, nil
} | rando.go | 0.779322 | 0.445047 | rando.go | starcoder |
package recsplit
import (
"encoding/binary"
"fmt"
"math"
"math/bits"
"github.com/ledgerwatch/erigon-lib/etl"
"github.com/spaolacci/murmur3"
)
const RecSplitLogPrefix = "recsplit"
const MaxLeafSize = 24
/** <NAME>'s (http://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html)
* 13th variant of the 64-bit finalizer function in Austin Appleby's
* MurmurHash3 (https://github.com/aappleby/smhasher).
*
* @param z a 64-bit integer.
* @return a 64-bit integer obtained by mixing the bits of `z`.
*/
func remix(z uint64) uint64 {
z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9
z = (z ^ (z >> 27)) * 0x94d049bb133111eb
return z ^ (z >> 31)
}
// RecSplit is the implementation of Recursive Split algorithm for constructing perfect hash mapping, described in
// https://arxiv.org/pdf/1910.06416.pdf <NAME>, <NAME>, and <NAME>.
// Recsplit: Minimal perfect hashing via recursive splitting. In 2020 Proceedings of the Symposium on Algorithm Engineering and Experiments (ALENEX),
// pages 175−185. SIAM, 2020.
type RecSplit struct {
bucketSize int
keyExpectedCount uint64 // Number of keys in the hash table
keysAdded uint64 // Number of keys actually added to the recSplit (to check the match with keyExpectedCount)
bucketCount uint64 // Number of buckets
hasher murmur3.Hash128 // Salted hash function to use for splitting into initial buckets and mapping to 64-bit fingerprints
collector *etl.Collector
built bool // Flag indicating that the hash function has been built and no more keys can be added
currentBucketIdx uint64 // Current bucket being accumulated
currentBucket []uint64 // 64-bit fingerprints of keys in the current bucket accumulated before the recsplit is performed for that bucket
gr GolombRice // Helper object to encode the tree of hash function salts using Golomb-Rice code.
// Helper object to encode the sequence of cumulative number of keys in the buckets
// and the sequence of of cumulative bit offsets of buckets in the Golomb-Rice code.
ef DoubleEliasFano
bucketSizeAcc []uint64 // Bucket size accumulator
bucketPosAcc []uint64 // Accumulator for position of every bucket in the encoding of the hash function
leafSize uint16 // Leaf size for recursive split algorithm
primaryAggrBound uint16 // The lower bound for primary key aggregation (computed from leafSize)
secondaryAggrBound uint16 // The lower bound for secondary key aggregation (computed from leadSize)
startSeed []uint64
golombRice []uint32
buffer []uint64
count []uint16
salt uint32 // Murmur3 hash used for converting keys to 64-bit values and assigning to buckets
collision bool
tmpDir string
trace bool
}
type RecSplitArgs struct {
KeyCount int
BucketSize int
Salt uint32 // Hash seed (salt) for the hash function used for allocating the initial buckets - need to be generated randomly
LeafSize uint16
TmpDir string
StartSeed []uint64 // For each level of recursive split, the hash seed (salt) used for that level - need to be generated randomly and be large enough to accomodate all the levels
}
// NewRecSplit creates a new RecSplit instance with given number of keys and given bucket size
// Typical bucket size is 100 - 2000, larger bucket sizes result in smaller representations of hash functions, at a cost of slower access
// salt parameters is used to randomise the hash function construction, to ensure that different Erigon instances (nodes)
// are likely to use different hash function, to collision attacks are unlikely to slow down any meaningful number of nodes at the same time
func NewRecSplit(args RecSplitArgs) (*RecSplit, error) {
bucketCount := (args.KeyCount + args.BucketSize - 1) / args.BucketSize
rs := &RecSplit{bucketSize: args.BucketSize, keyExpectedCount: uint64(args.KeyCount), bucketCount: uint64(bucketCount)}
rs.salt = args.Salt
rs.hasher = murmur3.New128WithSeed(rs.salt)
rs.tmpDir = args.TmpDir
rs.collector = etl.NewCollector(rs.tmpDir, etl.NewSortableBuffer(etl.BufferOptimalSize))
rs.currentBucket = make([]uint64, 0, args.BucketSize)
rs.bucketSizeAcc = make([]uint64, 1, bucketCount+1)
rs.bucketPosAcc = make([]uint64, 1, bucketCount+1)
if args.LeafSize > MaxLeafSize {
return nil, fmt.Errorf("exceeded max leaf size %d: %d", MaxLeafSize, args.LeafSize)
}
rs.leafSize = args.LeafSize
rs.primaryAggrBound = rs.leafSize * uint16(math.Max(2, math.Ceil(0.35*float64(rs.leafSize)+1./2.)))
if rs.leafSize < 7 {
rs.secondaryAggrBound = rs.primaryAggrBound * 2
} else {
rs.secondaryAggrBound = rs.primaryAggrBound * uint16(math.Ceil(0.21*float64(rs.leafSize)+9./10.))
}
rs.startSeed = args.StartSeed
rs.count = make([]uint16, rs.secondaryAggrBound)
return rs, nil
}
func (rs *RecSplit) SetTrace(trace bool) {
rs.trace = trace
}
// remap converts the number x which is assumed to be uniformly distributed over the range [0..2^64) to the number that is uniformly
// distributed over the range [0..n)
func remap(x uint64, n uint64) uint64 {
hi, _ := bits.Mul64(x, n)
return hi
}
const mask48 uint64 = (1 << 48) - 1
// remap converts the number x which is assumed to be uniformly distributed over the range [0..2^64) to the number that is uniformly
// distributed over the range [0..n), under assumption that n is less than 2^16
func remap16(x uint64, n uint16) uint16 {
return uint16(((x & mask48) * uint64(n)) >> 48)
}
// ResetNextSalt resets the RecSplit and uses the next salt value to try to avoid collisions
// when mapping keys to 64-bit values
func (rs *RecSplit) ResetNextSalt() {
rs.collision = false
rs.keysAdded = 0
rs.salt++
rs.hasher = murmur3.New128WithSeed(rs.salt)
rs.collector = etl.NewCollector(rs.tmpDir, etl.NewSortableBuffer(etl.BufferOptimalSize))
rs.currentBucket = rs.currentBucket[:0]
rs.bucketSizeAcc = rs.bucketSizeAcc[:1] // First entry is always zero
rs.bucketPosAcc = rs.bucketPosAcc[:0] // First entry is always zero
}
func (rs *RecSplit) splitParams(m uint16) (fanout, unit uint16) {
if m > rs.secondaryAggrBound { // High-level aggregation (fanout 2)
unit = rs.secondaryAggrBound * (((m+1)/2 + rs.secondaryAggrBound - 1) / rs.secondaryAggrBound)
fanout = 2
} else if m > rs.primaryAggrBound { // Second-level aggregation
unit = rs.primaryAggrBound
fanout = (m + rs.primaryAggrBound - 1) / rs.primaryAggrBound
} else { // First-level aggregation
unit = rs.leafSize
fanout = (m + rs.leafSize - 1) / rs.leafSize
}
return
}
func (rs *RecSplit) computeGolombRice(m uint16, table []uint32) {
fanout, unit := rs.splitParams(m)
k := make([]uint16, fanout)
k[fanout-1] = m
for i := uint16(0); i < fanout-1; i++ {
k[i] = unit
k[fanout-1] -= k[i]
}
sqrt_prod := float64(1)
for i := uint16(0); i < fanout; i++ {
sqrt_prod *= math.Sqrt(float64(k[i]))
}
p := math.Sqrt(float64(m)) / (math.Pow(2*math.Pi, (float64(fanout)-1.)/2.0) * sqrt_prod)
golombRiceLength := uint32(math.Ceil(math.Log2(-math.Log((math.Sqrt(5)+1.0)/2.0) / math.Log1p(-p)))) // log2 Golomb modulus
if golombRiceLength > 0x1F {
panic("golombRiceLength > 0x1F")
}
table[m] = golombRiceLength << 27
for i := uint16(0); i < fanout; i++ {
golombRiceLength += table[k[i]] & 0xFFFF
}
if golombRiceLength > 0xFFFF {
panic("golombRiceLength > 0xFFFF")
}
table[m] |= golombRiceLength // Sum of Golomb-Rice codeslengths in the subtree, stored in the lower 16 bits
nodes := uint32(1)
for i := uint16(0); i < fanout; i++ {
nodes += (table[k[i]] >> 16) & 0x7FF
}
if rs.leafSize >= 3 && nodes > 0x7FF {
panic("rs.leafSize >= 3 && nodes > 0x7FF")
}
table[m] |= nodes << 16
}
// golombParam returns the optimal Golomb parameter to use for encoding
// salt for the part of the hash function separating m elements. It is based on
// calculations with assumptions that we draw hash functions at random
func (rs *RecSplit) golombParam(m uint16) int {
s := uint16(len(rs.golombRice))
for m >= s {
rs.golombRice = append(rs.golombRice, 0)
// For the case where bucket is larger than planned
if s == 0 {
rs.golombRice[0] = (bijMemo[0] << 27) | bijMemo[0]
} else if s <= rs.leafSize {
rs.golombRice[s] = (bijMemo[s] << 27) | (uint32(1) << 16) | bijMemo[s]
} else {
rs.computeGolombRice(s, rs.golombRice)
}
s++
}
return int(rs.golombRice[m] >> 27)
}
// Add key to the RecSplit. There can be many more keys than what fits in RAM, and RecSplit
// spills data onto disk to accomodate that. The key gets copied by the collector, therefore
// the slice underlying key is not getting accessed by RecSplit after this invocation.
func (rs *RecSplit) AddKey(key []byte) error {
if rs.built {
return fmt.Errorf("cannot add keys after perfect hash function had been built")
}
rs.hasher.Reset()
rs.hasher.Write(key) //nolint:errcheck
hi, lo := rs.hasher.Sum128()
var bucketKey [16]byte
binary.BigEndian.PutUint64(bucketKey[:], remap(hi, rs.bucketCount))
binary.BigEndian.PutUint64(bucketKey[8:], lo)
rs.keysAdded++
return rs.collector.Collect(bucketKey[:], []byte{})
}
func (rs *RecSplit) recsplitCurrentBucket() error {
// Extend rs.bucketSizeAcc to accomodate current bucket index + 1
for len(rs.bucketSizeAcc) <= int(rs.currentBucketIdx)+1 {
rs.bucketSizeAcc = append(rs.bucketSizeAcc, rs.bucketSizeAcc[len(rs.bucketSizeAcc)-1])
}
rs.bucketSizeAcc[int(rs.currentBucketIdx)+1] += uint64(len(rs.currentBucket))
if len(rs.currentBucket) > 1 {
for i, key := range rs.currentBucket[1:] {
if key == rs.currentBucket[i] {
rs.collision = true
return fmt.Errorf("duplicate key %x", key)
}
}
bitPos := rs.gr.bitCount
if rs.buffer == nil {
rs.buffer = make([]uint64, len(rs.currentBucket))
} else {
for len(rs.buffer) < len(rs.currentBucket) {
rs.buffer = append(rs.buffer, 0)
}
}
unary := rs.recsplit(0 /* level */, rs.currentBucket, nil /* unary */)
rs.gr.appendUnaryAll(unary)
if rs.trace {
fmt.Printf("recsplitBucket(%d, %d, bitsize = %d)\n", rs.currentBucketIdx, len(rs.currentBucket), rs.gr.bitCount-bitPos)
}
}
// Extend rs.bucketPosAcc to accomodate current bucket index + 1
for len(rs.bucketPosAcc) <= int(rs.currentBucketIdx)+1 {
rs.bucketPosAcc = append(rs.bucketPosAcc, rs.bucketPosAcc[len(rs.bucketPosAcc)-1])
}
rs.bucketPosAcc[int(rs.currentBucketIdx)+1] = uint64(rs.gr.Bits())
// clear for the next buckey
rs.currentBucket = rs.currentBucket[:0]
return nil
}
// recsplit applies recSplit algorithm to the given bucket
func (rs *RecSplit) recsplit(level int, bucket []uint64, unary []uint64) []uint64 {
if rs.trace {
fmt.Printf("recsplit(%d, %d, %x)\n", level, len(bucket), bucket)
}
// Pick initial salt for this level of recursive split
salt := rs.startSeed[level]
m := uint16(len(bucket))
if m <= rs.leafSize {
// No need to build aggregation levels - just find find bijection
var mask uint32
for {
mask = 0
var fail bool
for i := uint16(0); !fail && i < m; i++ {
bit := uint32(1) << remap16(remix(bucket[i]+salt), m)
if mask&bit != 0 {
fail = true
} else {
mask |= bit
}
}
if !fail {
break
}
salt++
}
salt -= rs.startSeed[level]
log2golomb := rs.golombParam(m)
if rs.trace {
fmt.Printf("encode bij %d with log2golomn %d at p = %d\n", salt, log2golomb, rs.gr.bitCount)
}
rs.gr.appendFixed(salt, log2golomb)
unary = append(unary, salt>>log2golomb)
} else {
fanout, unit := rs.splitParams(m)
count := rs.count
for {
for i := uint16(0); i < fanout-1; i++ {
count[i] = 0
}
var fail bool
for i := uint16(0); i < m; i++ {
count[remap16(remix(bucket[i]+salt), m)/unit]++
}
for i := uint16(0); i < fanout-1; i++ {
fail = fail || (count[i] != unit)
}
if !fail {
break
}
salt++
}
for i, c := uint16(0), uint16(0); i < fanout; i++ {
count[i] = c
c += unit
}
for i := uint16(0); i < m; i++ {
j := remap16(remix(bucket[i]+salt), m) / unit
rs.buffer[count[j]] = bucket[i]
count[j]++
}
copy(bucket, rs.buffer)
salt -= rs.startSeed[level]
log2golomb := rs.golombParam(m)
if rs.trace {
fmt.Printf("encode fanout %d: %d with log2golomn %d at p = %d\n", fanout, salt, log2golomb, rs.gr.bitCount)
}
rs.gr.appendFixed(salt, log2golomb)
unary = append(unary, salt>>log2golomb)
var i uint16
for i = 0; i < m-unit; i += unit {
unary = rs.recsplit(level+1, bucket[i:i+unit], unary)
}
if m-i > 1 {
unary = rs.recsplit(level+1, bucket[i:], unary)
}
}
return unary
}
// loadFunc is required to satisfy the type etl.LoadFunc type, to use with collector.Load
func (rs *RecSplit) loadFunc(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {
// k is the BigEndian encoding of the bucket number, and the v is the key that is assigned into that bucket
bucketIdx := binary.BigEndian.Uint64(k)
if rs.currentBucketIdx != bucketIdx {
if rs.currentBucketIdx != math.MaxUint64 {
if err := rs.recsplitCurrentBucket(); err != nil {
return err
}
}
rs.currentBucketIdx = bucketIdx
}
rs.currentBucket = append(rs.currentBucket, binary.BigEndian.Uint64(k[8:]))
return nil
}
// Build has to be called after all the keys have been added, and it initiates the process
// of building the perfect hash function.
func (rs *RecSplit) Build() error {
if rs.built {
return fmt.Errorf("already built")
}
if rs.keysAdded != rs.keyExpectedCount {
return fmt.Errorf("expected keys %d, got %d", rs.keyExpectedCount, rs.keysAdded)
}
rs.currentBucketIdx = math.MaxUint64 // To make sure 0 bucket is detected
defer rs.collector.Close(RecSplitLogPrefix)
if err := rs.collector.Load(RecSplitLogPrefix, nil /* db */, "" /* toBucket */, rs.loadFunc, etl.TransformArgs{}); err != nil {
return err
}
if len(rs.currentBucket) > 0 {
if err := rs.recsplitCurrentBucket(); err != nil {
return err
}
}
rs.gr.appendFixed(1, 1) // Sentinel (avoids checking for parts of size 1)
// Construct Elias Fano index
rs.ef.Build(rs.bucketSizeAcc, rs.bucketPosAcc)
rs.built = true
return nil
}
func (rs *RecSplit) skipBits(m uint16) int {
return int(rs.golombRice[m] & 0xffff)
}
func (rs *RecSplit) skipNodes(m uint16) int {
return int(rs.golombRice[m]>>16) & 0x7FF
}
func (rs *RecSplit) Lookup(key []byte, trace bool) int {
rs.hasher.Reset()
rs.hasher.Write(key) //nolint:errcheck
bucketHash, fingerprint := rs.hasher.Sum128()
if trace {
fmt.Printf("lookup key %x, fingerprint %x\n", key, fingerprint)
}
bucket := remap(bucketHash, rs.bucketCount)
cumKeys, cumKeysNext, bitPos := rs.ef.Get3(bucket)
m := uint16(cumKeysNext - cumKeys) // Number of keys in this bucket
if trace {
fmt.Printf("bucket: %d, m = %d, bitPos = %d, unaryOffset = %d\n", bucket, m, bitPos, rs.skipBits(m))
}
rs.gr.ReadReset(int(bitPos), rs.skipBits(m))
var level int
var p int
for m > rs.secondaryAggrBound { // fanout = 2
if trace {
p = rs.gr.currFixedOffset
}
d := rs.gr.ReadNext(rs.golombParam(m))
if trace {
fmt.Printf("level %d, p = %d, d = %d golomb %d\n", level, p, d, rs.golombParam(m))
}
hmod := remap16(remix(fingerprint+rs.startSeed[level]+d), m)
split := (((m+1)/2 + rs.secondaryAggrBound - 1) / rs.secondaryAggrBound) * rs.secondaryAggrBound
if hmod < split {
m = split
} else {
rs.gr.SkipSubtree(rs.skipNodes(split), rs.skipBits(split))
m -= split
cumKeys += uint64(split)
}
level++
}
if m > rs.primaryAggrBound {
if trace {
p = rs.gr.currFixedOffset
}
d := rs.gr.ReadNext(rs.golombParam(m))
if trace {
fmt.Printf("level %d, p = %d, d = %d golomb %d\n", level, p, d, rs.golombParam(m))
}
hmod := remap16(remix(fingerprint+rs.startSeed[level]+d), m)
part := hmod / rs.primaryAggrBound
if rs.primaryAggrBound < m-part*rs.primaryAggrBound {
m = rs.primaryAggrBound
} else {
m = m - part*rs.primaryAggrBound
}
cumKeys += uint64(rs.primaryAggrBound * part)
if part != 0 {
rs.gr.SkipSubtree(rs.skipNodes(rs.primaryAggrBound)*int(part), rs.skipBits(rs.primaryAggrBound)*int(part))
}
level++
}
if m > rs.leafSize {
if trace {
p = rs.gr.currFixedOffset
}
d := rs.gr.ReadNext(rs.golombParam(m))
if trace {
fmt.Printf("level %d, p = %d, d = %d, golomb %d\n", level, p, d, rs.golombParam(m))
}
hmod := remap16(remix(fingerprint+rs.startSeed[level]+d), m)
part := hmod / rs.leafSize
if rs.leafSize < m-part*rs.leafSize {
m = rs.leafSize
} else {
m = m - part*rs.leafSize
}
cumKeys += uint64(rs.leafSize * part)
if part != 0 {
rs.gr.SkipSubtree(int(part), rs.skipBits(rs.leafSize)*int(part))
}
level++
}
if trace {
p = rs.gr.currFixedOffset
}
b := rs.gr.ReadNext(rs.golombParam(m))
if trace {
fmt.Printf("level %d, p = %d, b = %d, golomn = %d\n", level, p, b, rs.golombParam(m))
}
return int(cumKeys) + int(remap16(remix(fingerprint+rs.startSeed[level]+b), m))
}
// Stats returns the size of golomb rice encoding and ellias fano encoding
func (rs RecSplit) Stats() (int, int) {
return len(rs.gr.Data()), len(rs.ef.Data())
}
// Collision returns true if there was a collision detected during mapping of keys
// into 64-bit values
// RecSplit needs to be reset, re-populated with keys, and rebuilt
func (rs RecSplit) Collision() bool {
return rs.collision
} | recsplit/recsplit.go | 0.755727 | 0.440229 | recsplit.go | starcoder |
// Package inject provides a simple dependency injector.
package inject
import (
"errors"
"reflect"
)
type Injector struct {
instances map[reflect.Type]reflect.Value
factories map[reflect.Type]reflect.Value
}
func NewInjector() *Injector {
return &Injector{
make(map[reflect.Type]reflect.Value),
make(map[reflect.Type]reflect.Value),
}
}
func (in *Injector) Instance(instance interface{}) {
in.instances[reflect.TypeOf(instance)] = reflect.ValueOf(instance)
}
func (in *Injector) Factory(factory interface{}) {
v := reflect.ValueOf(factory)
if v.Kind() != reflect.Func {
panic("inject: expected factory func, not " + v.Type().String())
}
t := reflect.TypeOf(factory)
if t.NumIn() != 0 {
panic("inject: expected factory func with 0 parameters")
}
if t.NumOut() != 1 {
panic("inject: expected factory with 1 result")
}
in.factories[t.Out(0)] = v
}
func (in *Injector) Inject(container interface{}) error {
vcont := reflect.ValueOf(container)
if vcont.Kind() != reflect.Ptr {
return errors.New("inject: expected struct pointer, not " + vcont.Kind().String())
}
vcont = reflect.Indirect(vcont)
if vcont.Kind() != reflect.Struct {
return errors.New("inject: expected struct pointer, not " + vcont.Kind().String() + " pointer")
}
vtype := vcont.Type()
for i := 0; i < vtype.NumField(); i++ {
ftype := vtype.Field(i).Type
if instance, ok := in.instances[ftype]; ok {
vcont.Field(i).Set(instance)
continue
}
if factory, ok := in.factories[ftype]; ok {
vcont.Field(i).Set(factory.Call(nil)[0])
continue
}
return errors.New("Could not populate '" + vtype.Field(i).Name + "': no " + vtype.Field(i).Type.String())
}
return nil
}
func (in *Injector) Create(factory interface{}) (interface{}, error) {
v := reflect.ValueOf(factory)
if v.Kind() != reflect.Func {
return nil, errors.New("inject: expected factory func, not " + v.Type().String())
}
t := reflect.TypeOf(factory)
if t.NumIn() != 1 {
return nil, errors.New("inject: expected factory func with 1 parameters")
}
vin := reflect.New(t.In(0).Elem())
if err := in.Inject(vin.Interface()); err != nil {
return nil, err
}
out := v.Call([]reflect.Value{vin})
return out[0].Interface(), nil
} | pkg/inject/inject.go | 0.707101 | 0.417509 | inject.go | starcoder |
package io
import (
"fmt"
"reflect"
)
type AnySet struct {
orderedElems interface{}
mappedElems interface{}
}
func NewAnySet(iElems interface{}) (as *AnySet, err error) {
/*
Generic Set
Input should be a slice of any hashable type
*/
refValue := reflect.ValueOf(iElems)
if !(refValue.Kind() == reflect.Slice ||
refValue.Kind() == reflect.Array) {
return nil, fmt.Errorf("unable to build set from non-slice type")
}
if refValue.Len() == 0 {
return nil, fmt.Errorf("empty input to AnySet")
}
firstElement := refValue.Index(0)
newMapType := reflect.MapOf(firstElement.Type(), reflect.TypeOf(true))
newMap := reflect.MakeMap(newMapType)
newSlice := reflect.MakeSlice(
reflect.SliceOf(firstElement.Type()),
0, 0)
for i := 0; i < refValue.Len(); i++ {
newMap.SetMapIndex(refValue.Index(i), reflect.ValueOf(true))
newSlice = reflect.Append(newSlice, refValue.Index(i))
}
as = new(AnySet)
as.mappedElems = newMap.Interface()
as.orderedElems = newSlice.Interface()
return as, nil
}
func (as *AnySet) Add(iElem interface{}) {
/*
Single element as input - must be of same type as existing
*/
refValue := reflect.ValueOf(iElem)
vMap := reflect.ValueOf(as.mappedElems)
vSlice := reflect.ValueOf(as.orderedElems)
if !vMap.MapIndex(refValue).IsValid() {
vMap.SetMapIndex(refValue, reflect.ValueOf(true))
vSlice = reflect.Append(vSlice, refValue)
}
as.orderedElems = vSlice.Interface()
}
func (as *AnySet) Del(iElem interface{}) {
/*
Single element as input - must be of same type as existing
*/
refValue := reflect.ValueOf(iElem)
refType := refValue.Type()
vMap := reflect.ValueOf(as.mappedElems)
vSlice := reflect.ValueOf(as.orderedElems)
/*
Make a new slice to hold the ordered remaining values
*/
emptyValue := reflect.ValueOf(nil)
if vMap.MapIndex(refValue).IsValid() {
vMap.SetMapIndex(refValue, emptyValue) // Delete the key value
newSlice := reflect.MakeSlice(reflect.SliceOf(refType), 0, 0)
for i := 0; i < vSlice.Len(); i++ {
elem := vSlice.Index(i)
// Note that DeepEqual does not work between reflection
// values elem and refValue - this is a workaround
if vMap.MapIndex(elem).IsValid() {
newSlice = reflect.Append(newSlice, elem)
}
}
as.orderedElems = newSlice.Interface()
}
}
// Intersect provides a list of all elements in both this object and input.
func (as *AnySet) Intersect(input interface{}) (out interface{}) {
/*
Slice as input - must be of same type as existing
*/
if as == nil {
return nil
}
refValue := reflect.ValueOf(input)
refType := refValue.Type()
vMap := reflect.ValueOf(as.mappedElems)
newSlice := reflect.MakeSlice(refType, 0, 0)
if !(refValue.Kind() == reflect.Slice ||
refValue.Kind() == reflect.Array) {
return fmt.Errorf("unable to do intersect of a non-slice type")
}
if refValue.Len() == 0 {
return fmt.Errorf("empty input to intersect")
}
for i := 0; i < refValue.Len(); i++ {
name := refValue.Index(i)
if vMap.MapIndex(name).IsValid() { // Name is found in A
newSlice = reflect.Append(newSlice, name)
}
}
return newSlice.Interface()
}
// Subtract provides a list of all elements in this object and not in input.
func (as *AnySet) Subtract(inputSlice interface{}) (out interface{}) {
/*
Slice as input - must be of same type as existing
*/
/*
Provides a list of all elements in A and not in B
Output => A \ B
"The Relative Complement of B in A"
where B is input and A is the set
*/
refValue := reflect.ValueOf(inputSlice)
if !(refValue.Kind() == reflect.Slice ||
refValue.Kind() == reflect.Array) {
return fmt.Errorf("unable to do intersect of a non-slice type")
}
if refValue.Len() == 0 {
return as.orderedElems
}
firstElement := refValue.Index(0)
newMapType := reflect.MapOf(firstElement.Type(), reflect.TypeOf(true))
iIntersection := as.Intersect(inputSlice)
intersection := reflect.ValueOf(iIntersection)
intMap := reflect.MakeMap(newMapType)
for i := 0; i < intersection.Len(); i++ {
elem := intersection.Index(i)
intMap.SetMapIndex(elem, reflect.ValueOf(true))
}
newSlice := reflect.MakeSlice(
reflect.SliceOf(firstElement.Type()),
0, 0)
orderedElems := reflect.ValueOf(as.orderedElems)
for i := 0; i < orderedElems.Len(); i++ { // All of A
elem := orderedElems.Index(i)
intMap.MapIndex(elem)
if !intMap.MapIndex(elem).IsValid() { // Name is not found in A ∩ B
newSlice = reflect.Append(newSlice, elem)
}
}
return newSlice.Interface()
}
// Contains returns True if the set fully contains the input.
func (as *AnySet) Contains(input interface{}) bool {
/*
Slice as input - must be of same type as existing
*/
refValue := reflect.ValueOf(input)
if !(refValue.Kind() == reflect.Slice ||
refValue.Kind() == reflect.Array) {
return false
}
if refValue.Len() == 0 {
return false
}
/*
True if set fully contains the input
*/
iIntersection := as.Intersect(input)
intersection := reflect.ValueOf(iIntersection)
return intersection.Len() == refValue.Len()
}
func DownSizeSlice(iSlice interface{}, newLen int, direction DirectionEnum) (iOut interface{}, err error) {
refValue := reflect.ValueOf(iSlice)
refType := reflect.TypeOf(iSlice)
if !(refValue.Kind() == reflect.Slice ||
refValue.Kind() == reflect.Array) {
return nil, fmt.Errorf("unable to resize non-slice type")
}
oldLen := refValue.Len()
if oldLen <= newLen {
return iSlice, nil
}
out := reflect.MakeSlice(refType, 0, 0)
ibase := 0
if direction == LAST {
ibase = oldLen - newLen
}
for i := 0; i < newLen; i++ {
ii := i + ibase
out = reflect.Append(out, refValue.Index(ii))
}
return out.Interface(), nil
}
func GenericComparison(left, right interface{},
op ComparisonOperatorEnum) (result bool, err error) {
/*
Evaluate: (left op right) as a boolean
*/
// Shortcut returns
switch {
case left == nil && right == nil:
fallthrough
case left == nil: // Left should hold a value
fallthrough
case right == nil: // Left will always compare true to nil
return false, fmt.Errorf("nil comparison value")
case op == EQ:
return reflect.DeepEqual(left, right), nil
case op == NEQ:
return !reflect.DeepEqual(left, right), nil
}
var lFloat, rFloat float64
var lInt, rInt int64
lFloat, err = GetValueAsFloat64(left)
if err == nil {
rFloat, err = GetValueAsFloat64(right)
if err != nil {
return false, fmt.Errorf("left and right values do not match")
}
switch op {
case LT:
return lFloat < rFloat, nil
case LTE:
return lFloat <= rFloat, nil
case GT:
return lFloat > rFloat, nil
case GTE:
return lFloat >= rFloat, nil
}
}
lInt, err = GetValueAsInt64(left)
if err == nil {
rInt, err = GetValueAsInt64(right)
if err != nil {
return false, fmt.Errorf("left and right values do not match")
}
switch op {
case LT:
return lInt < rInt, nil
case LTE:
return lInt <= rInt, nil
case GT:
return lInt > rInt, nil
case GTE:
return lInt >= rInt, nil
}
}
return false, nil
}
func GetValueAsFloat64(iValue interface{}) (val float64, err error) {
switch value := iValue.(type) {
case int:
val = float64(value)
case int32:
val = float64(value)
case int64:
val = float64(value)
case float32:
val = float64(value)
case float64:
val = value
default:
return 0, fmt.Errorf("not a float")
}
return val, nil
}
func GetValueAsInt64(iValue interface{}) (val int64, err error) {
switch value := iValue.(type) {
case int:
val = int64(value)
case int32:
val = int64(value)
case int64:
val = value
case float32:
val = int64(value)
case float64:
val = int64(value)
default:
return 0, fmt.Errorf("not an int")
}
return val, nil
}
/*
Utility datatypes.
*/
type ComparisonOperatorEnum uint8
const (
_ ComparisonOperatorEnum = iota
EQ
NEQ
LT
LTE
GT
GTE
)
func StringToComparisonOperatorEnum(opstr string) (oper ComparisonOperatorEnum) {
switch opstr {
case "=":
return EQ
case "<>", "!=":
return NEQ
case "<":
return LT
case "<=":
return LTE
case ">":
return GT
case ">=":
return GTE
default:
return 0
}
}
func (co ComparisonOperatorEnum) String() string {
switch co {
case EQ:
return "="
case NEQ:
return "!="
case LT:
return "<"
case LTE:
return "<="
case GT:
return ">"
case GTE:
return ">="
default:
return "NONE"
}
} | utils/io/generics.go | 0.657538 | 0.439026 | generics.go | starcoder |
package draw
import (
"fmt"
"math"
"strings"
"unicode/utf8"
"github.com/nsf/termbox-go"
)
// Bordered game window area size.
const gameBoyWidth, gameBoyHeight = 96, 24
// Margins inside the bordered game window.
const marginX, marginY = 3, 1
// Draw is a general function for drawing to the terminal.
// The text can be a rune, a string, or a multiline string, which will be drawn relative to the
// specified anchor. This function should always be used instead of termbox.SetCell().
func Draw(anchor Anchor, color Color, text interface{}) {
switch t := text.(type) {
case rune:
positionX, positionY, _, _ := anchor()
fg, bg := color()
termbox.SetCell(positionX, positionY, t, fg, bg)
case string:
rows := strings.Split(t, "\n")
textHeight := len(rows)
textWidth := maxLength(rows)
_, _, drawDirectionX, drawDirectionY := anchor()
offsetX := int(math.Ceil(float64(textWidth) * drawDirectionX))
offsetY := int(math.Round(float64(textHeight) * drawDirectionY))
for i, row := range rows {
for j, ch := range []rune(row) { // Converting to []rune first gets us tight alignment.
Draw(Offset(anchor, offsetX+j, offsetY+i), color, ch)
}
}
default:
panic(fmt.Errorf("unsupported Draw text type %T", text))
}
}
// SetCursor uses an Anchor to determine the position of the cursor, so it can be used in
// conjunction with Draw to place the cursor.
func SetCursor(anchor Anchor) {
x, y, _, _ := anchor()
termbox.SetCursor(x, y-1)
}
// Anchor defines a position offset and direction that can be used for drawing.
type Anchor func() (positionX, positionY int, drawDirectionX, drawDirectionY float64)
// Origin is an Anchor on the top-left corner of the terminal window.
func Origin() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
return 0, 0, 0, 0
}
// TopLeft is an Anchor on the top-left corner of the game window.
func TopLeft() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
termWidth, termHeight := termbox.Size()
leftX := (termWidth - gameBoyWidth) / 2
topY := (termHeight - gameBoyHeight) / 2
// Add an inner margin while also ensuring the text is always on-screen.
return max(0, leftX+marginX+2), max(0, topY+marginY+1), 0, 0
}
// TopRight is an Anchor on the top-right corner of the game window.
func TopRight() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
termWidth, termHeight := termbox.Size()
rightX := (termWidth + gameBoyWidth) / 2
topY := (termHeight - gameBoyHeight) / 2
// Add an inner margin while also ensuring the text is always on-screen.
return min(termWidth, rightX-marginX), max(0, topY+marginY+1), -1, 0
}
// BotRight is an Anchor on the bottom-right corner of the game window.
func BotRight() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
termWidth, termHeight := termbox.Size()
rightX := (termWidth + gameBoyWidth) / 2
botY := (termHeight + gameBoyHeight) / 2
// Add an inner margin while also ensuring the text is always on-screen.
return min(termWidth, rightX-marginX), min(termHeight, botY-marginY) - 1, -1, 0
}
// Center is an Anchor in the center-middle of the terminal that draws from the center outward.
func Center() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
termWidth, termHeight := termbox.Size()
centerX := termWidth / 2
centerY := termHeight / 2
return centerX, centerY, -0.5, -0.5
}
// CenterLeft is an Anchor in the center-middle of the terminal that draws toward the left side.
func CenterLeft() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
positionX, positionY, _, _ = Center()
return positionX, positionY, -1, -0.5
}
// CenterRight is an Anchor in the center-middle of the terminal that draws toward the right side.
func CenterRight() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
positionX, positionY, _, _ = Center()
return positionX, positionY, 0, -0.5
}
// CenterTop is an Anchor in the center-middle of the terminal that draws toward the top.
func CenterTop() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
positionX, positionY, _, _ = Center()
return positionX, positionY, -0.5, -1
}
// MiddleRight is an Anchor that is vertically centered and on the right edge of the game window.
func MiddleRight() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
termWidth, termHeight := termbox.Size()
rightX := (termWidth + gameBoyWidth) / 2
centerY := termHeight / 2
// Add an inner margin while also ensuring the text is always on-screen.
return min(termWidth, rightX-marginX), centerY, -1, 0
}
// MiddleLeft is an Anchor that is vertically centered and on the left edge of the game window.
func MiddleLeft() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
termWidth, termHeight := termbox.Size()
leftX := (termWidth - gameBoyWidth) / 2
centerY := termHeight / 2
// Add an inner margin while also ensuring the text is always on-screen.
return min(termWidth, leftX+marginX), centerY, 0, 0
}
// Offset returns an Anchor that is cffset from the specified Anchor by a specified position.
func Offset(anchor Anchor, x, y int) Anchor {
return func() (positionX, positionY int, drawDirectionX, drawDirectionY float64) {
positionX, positionY, drawDirectionX, drawDirectionY = anchor()
positionX += x
positionY += y
return
}
}
// Color defines foreground and background attributes for drawing.
type Color func() (fg, bg termbox.Attribute)
// Normal is the default Color.
func Normal() (fg, bg termbox.Attribute) {
return termbox.ColorDefault, termbox.ColorDefault
}
// Inverted is a Color that inverts the text and background color to appear as a highlight.
func Inverted() (fg, bg termbox.Attribute) {
return termbox.ColorBlack, termbox.ColorWhite
}
// Magenta is a magenta Color.
func Magenta() (fg, bg termbox.Attribute) {
return termbox.ColorMagenta, termbox.ColorDefault
}
// Green is not a creative Color.
func Green() (fg, bg termbox.Attribute) {
return termbox.ColorGreen, termbox.ColorDefault
}
func Border(decoration string) {
if decoration == "" {
return
}
termWidth, termHeight := termbox.Size()
topY := (termHeight - gameBoyHeight) / 2
bottomY := (termHeight + gameBoyHeight) / 2
leftX := (termWidth - gameBoyWidth) / 2
rightX := (termWidth + gameBoyWidth) / 2
borderRunes := []rune(decoration)
for i := 0; i < gameBoyWidth/2; i++ {
ch := borderRunes[i%len(borderRunes)]
Draw(Offset(Origin, leftX+i*2, topY), Normal, ch)
Draw(Offset(Origin, leftX+i*2, bottomY), Normal, ch)
}
for i := 0; i <= gameBoyHeight; i++ {
ch := borderRunes[i%len(borderRunes)]
Draw(Offset(Origin, leftX, topY+i), Normal, ch)
Draw(Offset(Origin, rightX, topY+i), Normal, ch)
}
}
func maxLength(ss []string) int {
result := 0
for _, s := range ss {
result = max(result, utf8.RuneCountInString(s))
}
return result
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
} | pkg/client/draw/draw_lib.go | 0.788543 | 0.452173 | draw_lib.go | starcoder |
package clock
type VectorClock map[string]uint64
// Compare determines the relationship between two vector clocks.
// a may be less than b, equal to b, greater than b, or there may be no comparison.
func (a VectorClock) Compare(b VectorClock) CompareResult {
// Determine if the scalars are all pairwise equal, less than or equal, OR
// greater than or equal in one pass.
equal := true
lessEqual := true
greaterEqual := true
for k := range allKeys(a, b) {
// Set missing keys to zero to compare properly
if _, ok := a[k]; !ok {
a[k] = 0
}
if _, ok := b[k]; !ok {
b[k] = 0
}
if a[k] != b[k] {
equal = false
}
if !(a[k] <= b[k]) {
lessEqual = false
}
if !(a[k] >= b[k]) {
greaterEqual = false
}
}
if lessEqual && !equal {
// The clock a < b if they are pairwise <= and the entire clock is not
// equal.
return Less
} else if greaterEqual && !equal {
// Opposite of above
return Greater
} else if equal {
// Simple case!
return Equal
}
return NoRelation
}
// OneUp returns true if a is b plus one for only one key, which is returned as the second argument.
// The values for the key in self are completely ignored.
func (a VectorClock) OneUpExcept(self string, b VectorClock) (bool, string) {
oneupkey := ""
for k := range allKeys(a, b) {
// Skip self
if k == self {
continue
}
// Add defaults for missing values
if _, ok := a[k]; !ok {
a[k] = 0
}
if _, ok := b[k]; !ok {
b[k] = 0
}
// Test oneup property
if a[k] == b[k]+1 {
if oneupkey != "" {
return false, ""
}
oneupkey = k
}
}
return oneupkey != "", oneupkey
}
// Increment increases the value for a given key by one.
func (a VectorClock) Increment(k string) {
cur, ok := a[k]
if !ok {
cur = 0
}
a[k] = cur + 1
}
// Max modifies a to be the pairwise max of a and b.
func (a VectorClock) Max(b VectorClock) {
for k := range allKeys(a, b) {
if a[k] < b[k] {
a[k] = b[k]
}
}
}
// Copy returns a new identical vector clock.
func (a VectorClock) Copy() VectorClock {
b := make(VectorClock)
for k := range a {
b[k] = a[k]
}
return b
}
// Subset returns a vector clock consisting of the subset of keys given.
func (a VectorClock) Subset(keys []string) VectorClock {
s := VectorClock{}
for _, key := range keys {
v, ok := a[key]
if !ok {
s[key] = 0
} else {
s[key] = v
}
}
return s
}
// allKeys zips together all the keys for two clocks.
// If any key is missing from one but not the other, it is
// defaulted to zero in the clock missing the key.
func allKeys(a, b VectorClock) map[string]struct{} {
result := make(map[string]struct{})
for k := range a {
result[k] = struct{}{}
if _, ok := b[k]; !ok {
b[k] = 0
}
}
for k := range b {
result[k] = struct{}{}
if _, ok := a[k]; !ok {
a[k] = 0
}
}
return result
} | pkg/clock/vector.go | 0.84338 | 0.612831 | vector.go | starcoder |
package goanda
// Supporting OANDA docs - http://developer.oanda.com/rest-live-v20/instrument-ep/
import (
"errors"
"strconv"
"time"
)
// GranularityFromDuration tries to find a granularity for the given duration
func GranularityFromDuration(d time.Duration) (Granularity, error) {
if _, ok := candlestickGranularity[Granularity(d)]; ok {
return Granularity(d), nil
}
return 0, errors.New("No such granularity")
}
// Granularity defines a candle's time period
type Granularity time.Duration
// Duration returns the granularity as a time.Duration
func (g Granularity) Duration() time.Duration {
return time.Duration(g)
}
// String returns the granularity as a string, formatted to the oanda standard
func (g Granularity) String() string {
return candlestickGranularity[g]
}
// Granularities available to the API
const (
GranularityFiveSeconds = Granularity(time.Second * 5)
GranularityTenSeconds = Granularity(time.Second * 10)
GranularityFifteenSeconds = Granularity(time.Second * 15)
GranularityThirtySeconds = Granularity(time.Second * 30)
GranularityMinute = Granularity(time.Minute)
GranularityTwoMinutes = Granularity(time.Minute * 2)
GranularityFourMinutes = Granularity(time.Minute * 4)
GranularityFiveMinutes = Granularity(time.Minute * 5)
GranularityTenMinutes = Granularity(time.Minute * 10)
GranularityFifteenMinutes = Granularity(time.Minute * 15)
GranularityThirtyMinutes = Granularity(time.Minute * 30)
GranularityHour = Granularity(time.Hour)
GranularityTwoHours = Granularity(time.Hour * 2)
GranularityThreeHours = Granularity(time.Hour * 3)
GranularityFourHours = Granularity(time.Hour * 4)
GranularitySixHours = Granularity(time.Hour * 6)
GranularityEightHours = Granularity(time.Hour * 8)
GranularityTwelveHours = Granularity(time.Hour * 12)
GranularityDay = Granularity(time.Hour * 24)
GranularityWeek = Granularity(time.Hour * 24 * 7)
GranularityMonth = Granularity(time.Hour * 24 * 7 * 30)
)
var candlestickGranularity = map[Granularity]string{
GranularityFiveSeconds: "S5",
GranularityTenSeconds: "S10",
GranularityFifteenSeconds: "S15",
GranularityThirtySeconds: "S30",
GranularityMinute: "M1",
GranularityTwoMinutes: "M2",
GranularityFourMinutes: "M4",
GranularityFiveMinutes: "M5",
GranularityTenMinutes: "M10",
GranularityFifteenMinutes: "M15",
GranularityThirtyMinutes: "M30",
GranularityHour: "H1",
GranularityTwoHours: "H2",
GranularityThreeHours: "H3",
GranularityFourHours: "H4",
GranularitySixHours: "H6",
GranularityEightHours: "H8",
GranularityTwelveHours: "H12",
GranularityDay: "D",
GranularityWeek: "W",
GranularityMonth: "M",
}
type Candle struct {
Open float64 `json:"o,string"`
Close float64 `json:"c,string"`
Low float64 `json:"l,string"`
High float64 `json:"h,string"`
}
type Candles struct {
Complete bool `json:"complete"`
Volume int `json:"volume"`
Time time.Time `json:"time"`
Mid Candle `json:"mid"`
}
type BidAskCandles struct {
Candles []struct {
Ask struct {
C float64 `json:"c,string"`
H float64 `json:"h,string"`
L float64 `json:"l,string"`
O float64 `json:"o,string"`
} `json:"ask"`
Bid struct {
C float64 `json:"c,string"`
H float64 `json:"h,string"`
L float64 `json:"l,string"`
O float64 `json:"o,string"`
} `json:"bid"`
Complete bool `json:"complete"`
Time time.Time `json:"time"`
Volume int `json:"volume"`
} `json:"candles"`
}
type InstrumentHistory struct {
Instrument string `json:"instrument"`
Granularity string `json:"granularity"`
Candles []Candles `json:"candles"`
}
type Bucket struct {
Price string `json:"price"`
LongCountPercent string `json:"longCountPercent"`
ShortCountPercent string `json:"shortCountPercent"`
}
type BrokerBook struct {
Instrument string `json:"instrument"`
Time time.Time `json:"time"`
Price string `json:"price"`
BucketWidth string `json:"bucketWidth"`
Buckets []Bucket `json:"buckets"`
}
type InstrumentPricing struct {
Time time.Time `json:"time"`
Prices []struct {
Type string `json:"type"`
Time time.Time `json:"time"`
Bids []struct {
Price float64 `json:"price,string"`
Liquidity int `json:"liquidity"`
} `json:"bids"`
Asks []struct {
Price float64 `json:"price,string"`
Liquidity int `json:"liquidity"`
} `json:"asks"`
CloseoutBid float64 `json:"closeoutBid,string"`
CloseoutAsk float64 `json:"closeoutAsk,string"`
Status string `json:"status"`
Tradeable bool `json:"tradeable"`
UnitsAvailable struct {
Default struct {
Long string `json:"long"`
Short string `json:"short"`
} `json:"default"`
OpenOnly struct {
Long string `json:"long"`
Short string `json:"short"`
} `json:"openOnly"`
ReduceFirst struct {
Long string `json:"long"`
Short string `json:"short"`
} `json:"reduceFirst"`
ReduceOnly struct {
Long string `json:"long"`
Short string `json:"short"`
} `json:"reduceOnly"`
} `json:"unitsAvailable"`
QuoteHomeConversionFactors struct {
PositiveUnits string `json:"positiveUnits"`
NegativeUnits string `json:"negativeUnits"`
} `json:"quoteHomeConversionFactors"`
Instrument string `json:"instrument"`
} `json:"prices"`
}
func (c *Connection) GetCandles(instrument string, count int, g Granularity) (InstrumentHistory, error) {
ca := InstrumentHistory{}
err := c.requestAndUnmarshal(
"/instruments/"+
instrument+
"/candles?count="+
strconv.Itoa(count)+
"&granularity="+
g.String(),
&ca,
)
return ca, err
}
func (c *Connection) GetTimeToCandles(instrument string, count int, g Granularity, to time.Time) (InstrumentHistory, error) {
ih := InstrumentHistory{}
err := c.requestAndUnmarshal(
"/instruments/"+
instrument+
"/candles?count="+
strconv.Itoa(count)+
"&to="+
strconv.Itoa(int(to.Unix()))+
"&granularity="+
g.String(),
&ih,
)
return ih, err
}
func (c *Connection) GetTimeFromCandles(instrument string, count int, g Granularity, from time.Time) (InstrumentHistory, error) {
ih := InstrumentHistory{}
err := c.requestAndUnmarshal(
"/instruments/"+
instrument+
"/candles?count="+
strconv.Itoa(count)+
"&from="+
strconv.Itoa(int(from.Unix()))+
"&granularity="+
g.String(),
&ih,
)
return ih, err
}
func (c *Connection) GetBidAskCandles(instrument string, count string, g Granularity) (BidAskCandles, error) {
ca := BidAskCandles{}
err := c.requestAndUnmarshal(
"/instruments/"+
instrument+
"/candles?count="+
count+
"&granularity="+
g.String()+
"&price=BA",
&ca,
)
return ca, err
}
func (c *Connection) OrderBook(instrument string) (BrokerBook, error) {
bb := BrokerBook{}
err := c.requestAndUnmarshal(
"/instruments/"+
instrument+
"/orderBook",
&bb,
)
return bb, err
}
func (c *Connection) PositionBook(instrument string) (BrokerBook, error) {
bb := BrokerBook{}
err := c.requestAndUnmarshal(
"/instruments/"+
instrument+
"/positionBook",
&bb,
)
return bb, err
}
func (c *Connection) GetInstrumentPrice(instrument string) (InstrumentPricing, error) {
ip := InstrumentPricing{}
err := c.requestAndUnmarshal(
"/accounts/"+
c.accountID+
"/pricing?instruments="+
instrument,
&ip,
)
return ip, err
} | instrument.go | 0.727492 | 0.467393 | instrument.go | starcoder |
package ggol
import (
"sync"
)
// "T" in the Game interface represents the type of unit, it's defined by you.
type Game[T any] interface {
// ResetUnits all units with initial unit.
ResetUnits()
// Generate next units, the way you generate next units will be depending on the NextUnitGenerator function
// you passed in SetNextUnitGenerator.
GenerateNextUnits()
// Set NextUnitGenerator, which tells the game how you want to generate next unit of the given unit.
SetNextUnitGenerator(nextUnitGenerator NextUnitGenerator[T])
// Set the status of the unit at the given coordinate.
SetUnit(coord *Coordinate, unit *T) (err error)
// Get the size of the game.
GetSize() (size *Size)
// Get the status of the unit at the given coordinate.
GetUnit(coord *Coordinate) (unit *T, err error)
// Get all units in the area.
GetUnitsInArea(area *Area) (units [][]*T, err error)
// Get all units in the game.
GetUnits() (units [][]*T)
// Iterate through units in the given area.
IterateUnitsInArea(area *Area, callback UnitsIteratorCallback[T]) (err error)
// Iterate through all units in the game
IterateUnits(callback UnitsIteratorCallback[T])
}
type gameInfo[T any] struct {
size *Size
initialUnit *T
units [][]*T
nextUnitGenerator NextUnitGenerator[T]
locker sync.RWMutex
}
func defaultNextUnitGenerator[T any](coord *Coordinate, unit *T, getAdjacentUnit AdjacentUnitGetter[T]) (nextUnit *T) {
return unit
}
// Return a new Game with the given size and initalUnit.
func NewGame[T any](
size *Size,
initialUnit *T,
) (Game[T], error) {
if size.Width < 0 || size.Height < 0 {
return nil, &ErrSizeIsInvalid{size}
}
newG := gameInfo[T]{
size,
initialUnit,
createInitialUnits(size, initialUnit),
defaultNextUnitGenerator[T],
sync.RWMutex{},
}
return &newG, nil
}
func createInitialUnits[T any](size *Size, initialUnit *T) [][]*T {
units := make([][]*T, size.Width)
for x := 0; x < size.Width; x++ {
newRowOfUnits := make([]*T, size.Height)
units[x] = newRowOfUnits
for y := 0; y < size.Height; y++ {
units[x][y] = initialUnit
}
}
return units
}
func (g *gameInfo[T]) isCoordinateInvalid(c *Coordinate) bool {
return c.X < 0 || c.X >= g.size.Width || c.Y < 0 || c.Y >= g.size.Height
}
func (g *gameInfo[T]) isAreaInvalid(area *Area) bool {
return area.From.X > area.To.X || area.From.Y > area.To.Y
}
func (g *gameInfo[T]) getAdjacentUnit(
originCoord *Coordinate,
relativeCoord *Coordinate,
) (unit *T, crossBorder bool) {
targetX := originCoord.X + relativeCoord.X
targetY := originCoord.Y + relativeCoord.Y
var isCrossBorder bool = false
if (g.isCoordinateInvalid(&Coordinate{X: targetX, Y: targetY})) {
isCrossBorder = true
for targetX < 0 {
targetX += g.size.Width
}
for targetY < 0 {
targetY += g.size.Height
}
targetX = targetX % g.size.Width
targetY = targetY % g.size.Height
}
return g.units[targetX][targetY], isCrossBorder
}
// ResetUnits game.
func (g *gameInfo[T]) ResetUnits() {
g.locker.Lock()
defer g.locker.Unlock()
g.units = createInitialUnits(g.size, g.initialUnit)
}
// Generate next units.
func (g *gameInfo[T]) GenerateNextUnits() {
g.locker.Lock()
defer g.locker.Unlock()
nextUnits := make([][]*T, g.size.Width)
for x := 0; x < g.size.Width; x++ {
nextUnits[x] = make([]*T, g.size.Height)
for y := 0; y < g.size.Height; y++ {
coord := Coordinate{X: x, Y: y}
nextUnit := g.nextUnitGenerator(&coord, g.units[x][y], g.getAdjacentUnit)
nextUnits[x][y] = nextUnit
}
}
for x := 0; x < g.size.Width; x++ {
for y := 0; y < g.size.Height; y++ {
g.units[x][y] = nextUnits[x][y]
}
}
}
func (g *gameInfo[T]) SetNextUnitGenerator(iterator NextUnitGenerator[T]) {
g.nextUnitGenerator = iterator
}
// Update the unit at the given coordinate.
func (g *gameInfo[T]) SetUnit(c *Coordinate, unit *T) error {
g.locker.Lock()
defer g.locker.Unlock()
if g.isCoordinateInvalid(c) {
return &ErrCoordinateIsInvalid{c}
}
g.units[c.X][c.Y] = unit
return nil
}
// Get the game size.
func (g *gameInfo[T]) GetSize() *Size {
g.locker.RLock()
defer g.locker.RUnlock()
return g.size
}
// Get the unit at the coordinate.
func (g *gameInfo[T]) GetUnit(c *Coordinate) (*T, error) {
g.locker.RLock()
defer g.locker.RUnlock()
if g.isCoordinateInvalid(c) {
return nil, &ErrCoordinateIsInvalid{c}
}
return g.units[c.X][c.Y], nil
}
// Get all units in the game
func (g *gameInfo[T]) GetUnits() [][]*T {
g.locker.RLock()
defer g.locker.RUnlock()
return g.units
}
// Get all units in the given area.
func (g *gameInfo[T]) GetUnitsInArea(area *Area) ([][]*T, error) {
g.locker.RLock()
defer g.locker.RUnlock()
if g.isCoordinateInvalid(&area.From) {
return nil, &ErrCoordinateIsInvalid{&area.From}
}
if g.isCoordinateInvalid(&area.To) {
return nil, &ErrCoordinateIsInvalid{&area.To}
}
if g.isAreaInvalid(area) {
return nil, &ErrAreaIsInvalid{area}
}
unitsInArea := make([][]*T, 0)
for x := area.From.X; x <= area.To.X; x++ {
newRow := make([]*T, 0)
for y := area.From.Y; y <= area.To.Y; y++ {
newRow = append(newRow, g.units[x][y])
}
unitsInArea = append(unitsInArea, newRow)
}
return unitsInArea, nil
}
// We will iterate all units in the game and call the callbacks with coordiante and unit.
func (g *gameInfo[T]) IterateUnits(callback UnitsIteratorCallback[T]) {
for x := 0; x < g.size.Width; x++ {
for y := 0; y < g.size.Height; y++ {
callback(&Coordinate{X: x, Y: y}, g.units[x][y])
}
}
}
// We will iterate all units in the given area and call the callbacks with coordiante and unit.
func (g *gameInfo[T]) IterateUnitsInArea(area *Area, callback UnitsIteratorCallback[T]) error {
if g.isCoordinateInvalid(&area.From) {
return &ErrCoordinateIsInvalid{&area.From}
}
if g.isCoordinateInvalid(&area.To) {
return &ErrCoordinateIsInvalid{&area.To}
}
if g.isAreaInvalid(area) {
return &ErrAreaIsInvalid{area}
}
for x := area.From.X; x <= area.To.X; x++ {
for y := area.From.Y; y <= area.To.Y; y++ {
callback(&Coordinate{X: x, Y: y}, g.units[x][y])
}
}
return nil
} | ggol.go | 0.708717 | 0.541227 | ggol.go | starcoder |
package gpsabl
import (
"time"
)
// Copyright 2019 by <EMAIL>. All
// rights reserved. Use of this source code is governed
// by a BSD-style license that can be found in the
// LICENSE file.
// TrackSummary - the struct to store track statistic data
type TrackSummary struct {
Distance float64
HorizontalDistance float64
MinimumAltitude float32
MaximumAltitude float32
ElevationGain float32
ElevationLose float32
UpwardsDistance float64
DownwardsDistance float64
TimeDataValid bool
StartTime time.Time
EndTime time.Time
MovingTime time.Duration
UpwardsTime time.Duration
DownwardsTime time.Duration
}
// SetValues - Set the Values of a TrackSummary (Implement the TrackSummaryProvider )
func (sum *TrackSummary) SetValues(distance float64,
horizontalDistance float64,
minimumAltitude float32,
maximumAltitude float32,
elevationGain float32,
elevationLose float32,
upwardsDistance float64,
downwardsDistance float64,
timeDataValid bool,
startTime time.Time,
endTime time.Time,
movingTime time.Duration,
upwardsTime time.Duration,
downwardsTime time.Duration) {
sum.MinimumAltitude = minimumAltitude
sum.MaximumAltitude = maximumAltitude
sum.Distance = distance
sum.HorizontalDistance = horizontalDistance
sum.DownwardsDistance = downwardsDistance
sum.UpwardsDistance = upwardsDistance
sum.ElevationGain = elevationGain
sum.ElevationLose = elevationLose
sum.TimeDataValid = timeDataValid
sum.StartTime = startTime
sum.EndTime = endTime
sum.MovingTime = movingTime
sum.DownwardsTime = downwardsTime
sum.UpwardsTime = upwardsTime
}
// GetElevationGain - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetElevationGain() float32 {
return sum.ElevationGain
}
// GetElevationLose - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetElevationLose() float32 {
return sum.ElevationLose
}
// GetUpwardsDistance - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetUpwardsDistance() float64 {
return sum.UpwardsDistance
}
// GetHorizontalDistance - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetHorizontalDistance() float64 {
return sum.HorizontalDistance
}
// GetDownwardsDistance - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetDownwardsDistance() float64 {
return sum.DownwardsDistance
}
// GetDistance - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetDistance() float64 {
return sum.Distance
}
// GetAltitudeRange - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetAltitudeRange() float32 {
return sum.MaximumAltitude - sum.MinimumAltitude
}
// GetMaximumAltitude Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetMaximumAltitude() float32 {
return sum.MaximumAltitude
}
// GetMinimumAltitude - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetMinimumAltitude() float32 {
return sum.MinimumAltitude
}
// GetStartTime - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetStartTime() time.Time {
return sum.StartTime
}
// GetEndTime - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetEndTime() time.Time {
return sum.EndTime
}
// GetTimeDataValid - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetTimeDataValid() bool {
return sum.TimeDataValid
}
// GetMovingTime - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetMovingTime() time.Duration {
return sum.MovingTime
}
// GetUpwardsTime - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetUpwardsTime() time.Duration {
return sum.UpwardsTime
}
// GetDownwardsTime - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetDownwardsTime() time.Duration {
return sum.DownwardsTime
}
// GetAvarageSpeed - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetAvarageSpeed() float64 {
if sum.TimeDataValid && sum.MovingTime > 0 {
return sum.Distance / float64(sum.MovingTime/time.Second)
}
return 0
}
// GetUpwardsSpeed - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetUpwardsSpeed() float64 {
if sum.TimeDataValid && sum.UpwardsTime > 0 {
return sum.UpwardsDistance / float64(sum.UpwardsTime/time.Second)
}
return 0
}
// GetDownwardsSpeed - Implement the TrackSummaryProvider interface for TrackSummary
func (sum TrackSummary) GetDownwardsSpeed() float64 {
if sum.TimeDataValid && sum.DownwardsTime > 0 {
return sum.DownwardsDistance / float64(sum.DownwardsTime/time.Second)
}
return 0
}
// TrackFile - A struct to handle track files
type TrackFile struct {
TrackSummary
FilePath string
Name string
Description string
NumberOfTracks int
Tracks []Track
}
// NewTrackFile - Constructor for the TrackFile struct
func NewTrackFile(filePath string) TrackFile {
ret := TrackFile{}
ret.FilePath = filePath
return ret
}
// Track - the struct to handle track info in gpsa
type Track struct {
TrackSummary
Name string
Description string
NumberOfSegments int
TrackSegments []TrackSegment
}
// TrackSegment - the struct to handle track segment info in gpsa
type TrackSegment struct {
TrackSummary
TrackPoints []TrackPoint
}
// TrackPoint - the struct to handle track point info in gpsa
type TrackPoint struct {
Number int
Elevation float32
Latitude float32
Longitude float32
Time time.Time
TimeValid bool
HorizontalDistanceBefore float64
HorizontalDistanceNext float64
DistanceNext float64
DistanceBefore float64
DistanceToThisPoint float64
CorectedElevation float32
VerticalDistanceBefore float32
VerticalDistanceNext float32
CountUpwards bool
CountDownwards bool
CountMoving bool
MovingTime time.Duration
TimeDurationBefore time.Duration
TimeDurationNext time.Duration
UpwardsTime time.Duration
DownwardsTime time.Duration
AvarageSpeed float64
SpeedBefore float64
SpeedNext float64
}
// GetDistance - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetDistance() float64 {
return pnt.DistanceBefore
}
// GetHorizontalDistance - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetHorizontalDistance() float64 {
return pnt.HorizontalDistanceBefore
}
// GetAltitudeRange - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetAltitudeRange() float32 {
return 0.0
}
// GetMaximumAltitude Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetMaximumAltitude() float32 {
return pnt.Elevation
}
// GetMinimumAltitude - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetMinimumAltitude() float32 {
return pnt.Elevation
}
// GetElevationGain - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetElevationGain() float32 {
if pnt.VerticalDistanceNext > 0 && pnt.CountMoving {
return pnt.VerticalDistanceNext
}
return 0
}
// GetElevationLose - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetElevationLose() float32 {
if pnt.VerticalDistanceNext < 0 && pnt.CountMoving {
return pnt.VerticalDistanceNext
}
return 0
}
// GetUpwardsDistance - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetUpwardsDistance() float64 {
if pnt.CountUpwards && pnt.CountMoving {
return pnt.DistanceBefore
}
return 0
}
// GetDownwardsDistance - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetDownwardsDistance() float64 {
if pnt.CountDownwards && pnt.CountMoving {
return pnt.DistanceBefore
}
return 0
}
// GetStartTime - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetStartTime() time.Time {
return pnt.Time
}
// GetEndTime - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetEndTime() time.Time {
return pnt.Time
}
// GetTimeDataValid - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetTimeDataValid() bool {
return pnt.TimeValid
}
// GetMovingTime - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetMovingTime() time.Duration {
return pnt.MovingTime
}
// GetUpwardsTime - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetUpwardsTime() time.Duration {
return pnt.UpwardsTime
}
// GetDownwardsTime - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetDownwardsTime() time.Duration {
return pnt.DownwardsTime
}
// GetAvarageSpeed - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetAvarageSpeed() float64 {
return pnt.AvarageSpeed
}
// GetUpwardsSpeed - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetUpwardsSpeed() float64 {
if pnt.CountMoving && pnt.CountUpwards {
return pnt.AvarageSpeed
}
return 0
}
// GetDownwardsSpeed - Implement the TrackSummaryProvider interface for TrackPoint
func (pnt TrackPoint) GetDownwardsSpeed() float64 {
if pnt.CountMoving && pnt.CountDownwards {
return pnt.AvarageSpeed
}
return 0
} | src/tobi.backfrak.de/internal/gpsabl/Track.go | 0.896382 | 0.525125 | Track.go | starcoder |
Package binding defines interfaces for protocol bindings.
NOTE: Most applications that emit or consume events should use the ../client
package, which provides a simpler API to the underlying binding.
The interfaces in this package provide extra encoding and protocol information
to allow efficient forwarding and end-to-end reliable delivery between a
Receiver and a Sender belonging to different bindings. This is useful for
intermediary applications that route or forward events, but not necessary for
most "endpoint" applications that emit or consume events.
Protocol Bindings
A protocol binding usually implements a Message, a Sender and Receiver, a StructuredWriter and a BinaryWriter (depending on the supported encodings of the protocol) and an Write[ProtocolMessage] method.
Read and write events
The core of this package is the binding.Message interface.
Through binding.MessageReader It defines how to read a protocol specific message for an
encoded event in structured mode or binary mode.
The entity who receives a protocol specific data structure representing a message
(e.g. an HttpRequest) encapsulates it in a binding.Message implementation using a NewMessage method (e.g. http.NewMessage).
Then the entity that wants to send the binding.Message back on the wire,
translates it back to the protocol specific data structure (e.g. a Kafka ConsumerMessage), using
the writers BinaryWriter and StructuredWriter specific to that protocol.
Binding implementations exposes their writers
through a specific Write[ProtocolMessage] function (e.g. kafka.EncodeProducerMessage),
in order to simplify the encoding process.
The encoding process can be customized in order to mutate the final result with binding.TransformerFactory.
A bunch of these are provided directly by the binding/transformer module.
Usually binding.Message implementations can be encoded only one time, because the encoding process drain the message itself.
In order to consume a message several times, the binding/buffering package provides several APIs to buffer the Message.
A message can be converted to an event.Event using binding.ToEvent() method.
An event.Event can be used as Message casting it to binding.EventMessage.
In order to simplify the encoding process for each protocol, this package provide several utility methods like binding.Write and binding.DirectWrite.
The binding.Write method tries to preserve the structured/binary encoding, in order to be as much efficient as possible.
Messages can be eventually wrapped to change their behaviours and binding their lifecycle, like the binding.FinishMessage.
Every Message wrapper implements the MessageWrapper interface
Sender and Receiver
A Receiver receives protocol specific messages and wraps them to into binding.Message implementations.
A Sender converts arbitrary Message implementations to a protocol-specific form using the protocol specific Write method
and sends them.
Message and ExactlyOnceMessage provide methods to allow acknowledgments to
propagate when a reliable messages is forwarded from a Receiver to a Sender.
QoS 0 (unreliable), 1 (at-least-once) and 2 (exactly-once) are supported.
Transport
A binding implementation providing Sender and Receiver implementations can be used as a Transport through the BindingTransport adapter.
*/
package binding | vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go | 0.865622 | 0.577078 | doc.go | starcoder |
package circularqueue
import "errors"
const (
initSize = 32
)
// ErrEmptyQueue tells you a CircularQueue is empty.
var ErrEmptyQueue = errors.New("CircularQueue is empty")
// CircularQueue allocate new memory when necessary.
type CircularQueue struct {
buffer []interface{}
readableIndex int
writableIndex int
}
// NewCircularQueue creates a CircularQueue.
// Usually, you use this function.
func NewCircularQueue() *CircularQueue {
return NewCircularQueueWithSize(initSize)
}
// NewCircularQueueWithSize creates a CircularQueue with
// a size you specified.
func NewCircularQueueWithSize(s int) *CircularQueue {
return &CircularQueue{
buffer: make([]interface{}, s),
readableIndex: 0,
writableIndex: 0,
}
}
// Len returns item count.
func (b *CircularQueue) Len() int {
if b.IsEmpty() {
return 0
}
var length int
if b.readableIndex < b.writableIndex {
length = b.writableIndex - b.readableIndex
} else if b.readableIndex > b.writableIndex {
length = len(b.buffer) - b.readableIndex + b.writableIndex
}
return length
}
// Push pushes a item into this queue.
// Donot worry if this queue is full.
func (b *CircularQueue) Push(m interface{}) {
b.ensureWritableSpace()
b.buffer[b.writableIndex] = m
b.hasWritten()
}
func (b *CircularQueue) ensureWritableSpace() {
if b.isFull() {
b.makeSpace()
}
}
func (b *CircularQueue) makeSpace() {
buf := make([]interface{}, 1+cap(b.buffer)*2)
length := b.Len()
if b.readableIndex < b.writableIndex {
copy(buf, b.buffer[b.readableIndex:b.writableIndex])
b.readableIndex = 0
b.writableIndex = length
} else if b.readableIndex > b.writableIndex {
copy(buf, b.buffer[b.readableIndex:len(b.buffer)])
copy(buf[len(b.buffer)-b.readableIndex:], b.buffer[:b.writableIndex])
b.readableIndex = 0
b.writableIndex = length
}
b.buffer = buf
}
func (b *CircularQueue) hasWritten() {
b.writableIndex++
if b.writableIndex >= len(b.buffer) {
if b.readableIndex > 0 {
b.writableIndex = 0
}
}
}
// IsEmpty returns true if this queue if empty.
func (b *CircularQueue) IsEmpty() bool {
return b.readableIndex == b.writableIndex
}
func (b *CircularQueue) isFull() bool {
return (b.readableIndex == 0 && b.writableIndex == len(b.buffer)) ||
b.writableIndex+1 == b.readableIndex
}
// Peek peeks the first readable item in this queue. It does not modify this queue.
func (b *CircularQueue) Peek() (interface{}, error) {
if b.IsEmpty() {
return nil, ErrEmptyQueue
}
return b.buffer[b.readableIndex], nil
}
// Retrieve removes the first readable item in this queue.
func (b *CircularQueue) Retrieve() error {
if b.IsEmpty() {
return ErrEmptyQueue
}
b.buffer[b.readableIndex] = nil // GC could collect this item soon.
b.readableIndex++
if b.writableIndex >= len(b.buffer) {
b.writableIndex = 0
}
if b.readableIndex >= len(b.buffer) {
b.readableIndex = 0
}
return nil
}
// Pop pops a item.
func (b *CircularQueue) Pop() (interface{}, error) {
m, err := b.Peek()
if err != nil {
return nil, err
}
err = b.Retrieve()
if err != nil {
return nil, err
}
return m, nil
} | circularqueue.go | 0.686685 | 0.425009 | circularqueue.go | starcoder |
package bufio
import (
"bytes"
"io"
)
// LimitedReader implements a limited io.WriterTo.
type LimitedReader struct {
*Reader
N int64
}
// Read reads data into p.
// It returns the number of bytes read into p.
// The bytes are taken from at most one Read on the underlying Reader,
// hence n may be less than len(p).
// To read exactly len(p) bytes, use io.ReadFull(b, p).
// At EOF, the count will be zero and err will be io.EOF.
func (b *LimitedReader) Read(p []byte) (n int, err error) {
if b.N <= 0 {
err = io.EOF
return
}
if int64(len(p)) > b.N {
p = p[:b.N]
}
n, err = b.Reader.Read(p)
b.N -= int64(n)
return
}
// WriteTo implements io.WriterTo.
// This may make multiple calls to the Read method of the underlying Reader.
// If the underlying reader supports the WriteTo method,
// this calls the underlying WriteTo without buffering.
func (b *LimitedReader) WriteTo(w io.Writer) (n int64, err error) {
if b.N <= 0 {
return
}
n, err = b.writeBuf(w)
if b.N <= 0 || err != nil {
return
}
if b.w-b.r < len(b.buf) {
b.fill() // buffer not full
}
for b.r < b.w {
// b.r < b.w => buffer is not empty
m, err := b.writeBuf(w)
n += m
if b.N <= 0 || err != nil {
return n, err
}
b.fill() // buffer is empty
}
if b.err == io.EOF {
b.err = nil
}
return n, b.readErr()
}
// writeBuf writes the Reader's buffer to the writer.
func (b *LimitedReader) writeBuf(w io.Writer) (n int64, err error) {
l := int64(b.w)
if int64(b.w-b.r) > b.N {
l = b.N + int64(b.r)
}
nw, err := w.Write(b.buf[b.r:l])
if nw < 0 {
panic(errNegativeWrite)
}
b.r += nw
n = int64(nw)
b.N -= n
return
}
// NewReaderWithBuf returns a new Reader using the specified buffer.
func NewReaderWithBuf(buf []byte) *Reader {
r := new(Reader)
r.reset(buf, nil)
return r
}
// GetBuf returns the underlying buffer.
func (b *Reader) GetBuf() []byte {
return b.buf
}
// ReadSlice reads until the first occurrence of delim in the input,
// returning a slice pointing at the bytes in the buffer.
// The bytes stop being valid at the next read.
// If ReadSlice encounters an error before finding a delimiter,
// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
// Because the data returned from ReadSlice will be overwritten
// by the next I/O operation, most clients should use
// ReadBytes or ReadString instead.
// ReadSlice returns err != nil if and only if line does not end in delim.
func (b *LimitedReader) ReadSlice(delim byte) (line []byte, err error) {
s := 0 // search start index
for {
l := int64(b.w)
if int64(b.w-b.r) > b.N {
l = b.N + int64(b.r)
}
// Search buffer.
if i := bytes.IndexByte(b.buf[b.r+s:l], delim); i >= 0 {
i += s
line = b.buf[b.r : b.r+i+1]
b.r += i + 1
break
}
// Pending error?
if b.err != nil {
line = b.buf[b.r:l]
b.r = int(l)
err = b.readErr()
break
}
s = int(l) - b.r // do not rescan area we scanned before
if int64(s) >= b.N {
line = b.buf[b.r:l]
b.r = int(l)
err = io.EOF
break
}
// Buffer full?
if b.Buffered() >= len(b.buf) {
b.r = b.w
line = b.buf
err = ErrBufferFull
break
}
b.fill() // buffer is not full
}
// Handle last byte, if any.
if i := len(line) - 1; i >= 0 {
b.lastByte = int(line[i])
b.lastRuneSize = -1
}
b.N -= int64(len(line))
return
} | bufio/reader.go | 0.622 | 0.430088 | reader.go | starcoder |
package wspr
import (
"context"
"errors"
"fmt"
"log"
"strings"
"time"
)
// Send transmits the given transmission using the given functions to activate the transmitter and to transmit the symbol.
func Send(ctx context.Context, activateTransmitter func(bool), transmitSymbol func(Symbol), transmission Transmission) bool {
defer activateTransmitter(false)
if !waitForTransmitStart(ctx) {
return false
}
log.Print("transmission start")
for i, symbol := range transmission {
fmt.Print(".")
transmitSymbol(symbol)
if i == 0 {
activateTransmitter(true)
}
select {
case <-time.After(SymbolDuration):
case <-ctx.Done():
return false
}
}
fmt.Println()
log.Print("transmission end")
return true
}
func waitForTransmitStart(ctx context.Context) bool {
for {
log.Print("waiting for next transmission cycle")
select {
case <-ctx.Done():
return false
case now := <-time.After(1 * time.Second):
if isTransmitStart(now) {
return true
}
}
}
}
func isTransmitStart(t time.Time) bool {
return t.Minute()%2 == 0 && t.Second() == 0
}
// Symbol in WSPR. The value represents the delta to the base frequency.
type Symbol float64
const symbolDelta = float64(12000) / float64(8192)
// The four WSPR symbols.
const (
Sym0 = Symbol(0.0 * symbolDelta)
Sym1 = Symbol(1.0 * symbolDelta)
Sym2 = Symbol(2.0 * symbolDelta)
Sym3 = Symbol(3.0 * symbolDelta)
)
// Symbols contains all WSPR symbols.
var Symbols = []Symbol{Sym0, Sym1, Sym2, Sym3}
// SymbolDuration is the duration of one WSRP symbol.
var SymbolDuration = (8192 * 1000 / 12) * time.Microsecond
// Transmission of WSPR symbols.
type Transmission [162]Symbol
// ToTransmission converts the given data into a WSPR transmission.
func ToTransmission(callsign string, locator string, dBm int) (Transmission, error) {
n, err := packCallsign(callsign)
if err != nil {
return Transmission{}, err
}
m, err := packLocator(locator)
if err != nil {
return Transmission{}, err
}
m = packPower(m, dBm)
c := compress(n, m)
parity := calcParity(c)
interleaved := interleave(parity)
transmission := synchronize(interleaved)
return transmission, nil
}
func packCallsign(callsign string) (uint32, error) {
if len(callsign) > 6 {
return 0, errors.New("callsign too long (> 6)")
}
aligned, err := alignCallsign(callsign)
if err != nil {
return 0, err
}
packed := charValue(aligned[0])
packed = packed*36 + charValue(aligned[1])
packed = packed*10 + charValue(aligned[2])
packed = packed*27 + (charValue(aligned[3]) - 10)
packed = packed*27 + (charValue(aligned[4]) - 10)
packed = packed*27 + (charValue(aligned[5]) - 10)
packed = packed & 0x0FFFFFFF
return packed, nil
}
func alignCallsign(callsign string) ([]byte, error) {
aligned := callsign
if isNumber(callsign[1]) {
aligned = " " + aligned
}
if len(aligned) > 6 {
return []byte{}, errors.New("callsign too long (> 6)")
}
for len(aligned) < 6 {
aligned += " "
}
aligned = strings.ToUpper(aligned)
if !(isNumber(aligned[0]) || isLetter(aligned[0]) || isSpace(aligned[0])) {
return []byte{}, errors.New("wrong character at callsign start")
}
if !isLetter(aligned[1]) {
return []byte{}, errors.New("callsign must have a letter in the prefix")
}
if !isNumber(aligned[2]) {
return []byte{}, errors.New("callsign must have number at 2nd or 3rd place")
}
if !(isSuffix(aligned[3]) && isSuffix(aligned[4]) && isSuffix(aligned[5])) {
return []byte{}, errors.New("callsign must only have letters in the suffix")
}
return []byte(aligned), nil
}
func packLocator(loc string) (uint32, error) {
if len(loc) < 4 {
return 0, errors.New("locator must have at least four characters")
}
normalized := strings.ToUpper(string(loc[0:4]))
if !(isLocatorLetter(normalized[0]) && isLocatorLetter(normalized[1])) {
return 0, errors.New("locator must have letters a the 1st and the 2nd position")
}
if !(isNumber(normalized[2]) && isNumber(normalized[3])) {
return 0, errors.New("locator must have numbers at the 3rd and 4th position")
}
v := func(i int) uint32 {
if i < 2 {
return charValue(normalized[i]) - 10
}
return charValue(normalized[i])
}
packed := (179-10*v(0)-v(2))*180 + 10*v(1) + v(3)
packed = packed & 0x00007FFF
return packed, nil
}
func packPower(packedLocator uint32, dBm int) uint32 {
return (packedLocator << 7) + uint32(dBm) + 64
}
func isNumber(b byte) bool {
return b >= '0' && b <= '9'
}
func isLetter(b byte) bool {
return b >= 'A' && b <= 'Z'
}
func isLocatorLetter(b byte) bool {
return b >= 'A' && b <= 'R'
}
func isSpace(b byte) bool {
return b == ' '
}
func isSuffix(b byte) bool {
return isLetter(b) || isSpace(b)
}
func charValue(b byte) uint32 {
switch {
case isNumber(b):
return uint32(b - '0')
case isSpace(b):
return 36
default:
return uint32(b-'A') + 10
}
}
func compress(n, m uint32) (c [11]byte) {
c[0] = byte((0x0FF00000 & n) >> 20)
c[1] = byte((0x000FF000 & n) >> 12)
c[2] = byte((0x00000FF0 & n) >> 4)
c[3] = byte((0x0000000F&n)<<4) | byte((0x003C0000&m)>>18)
c[4] = byte((0x0003FC00 & m) >> 10)
c[5] = byte((0x000003FC & m) >> 2)
c[6] = byte((0x00000003 & m) << 6)
return
}
func calcParity(c [11]byte) (parity [162]byte) {
const (
polynom1 = uint32(0xf2d05351)
polynom2 = uint32(0xe4613c47)
)
var (
reg0, reg1 uint32
)
parityIndex := 0
for i := 0; i < len(c); i++ {
for j := 7; j >= 0; j-- {
reg0 = (reg0 << 1) | uint32((c[i]>>uint8(j))&0x01)
reg1 = reg0
result0 := reg0 & polynom1
result1 := reg1 & polynom2
count0 := 0
count1 := 0
for k := 0; k < 32; k++ {
if ((result0 >> uint8(k)) & 0x01) == 1 {
count0++
}
if ((result1 >> uint8(k)) & 0x01) == 1 {
count1++
}
}
if count0%2 == 1 {
parity[parityIndex] = 1
}
parityIndex++
if count1%2 == 1 {
parity[parityIndex] = 1
}
parityIndex++
}
}
return
}
func interleave(parity [162]byte) (interleaved [162]byte) {
p := 0
for p < 162 {
for k := 0; k <= 255; k++ {
i := uint8(k)
j := uint8(0)
for l := 7; l >= 0; l-- {
j |= (i & 0x01) << uint8(l)
i = i >> 1
}
if j < 162 {
interleaved[j] = parity[p]
p++
}
}
}
return
}
func synchronize(interleaved [162]byte) (transmission Transmission) {
syncWord := []byte{
1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,
1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,
1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0,
}
for i := 0; i < len(interleaved); i++ {
transmission[i] = Symbols[syncWord[i]+2*interleaved[i]]
}
return
} | wspr/wspr.go | 0.618896 | 0.403214 | wspr.go | starcoder |
package main
import (
"math"
)
type Rnd interface {
Float64() float64
}
/***********************
* Vec3
************************/
// Vec3 defines a vector in 3D space
type Vec3 struct {
X, Y, Z float64
}
// Scale scales the vector by the value (return a new vector)
func (v Vec3) Scale(t float64) Vec3 {
return Vec3{X: v.X * t, Y: v.Y * t, Z: v.Z * t}
}
// Mult multiplies the vector by the other one (return a new vector)
func (v Vec3) Mult(v2 Vec3) Vec3 {
return Vec3{X: v.X * v2.X, Y: v.Y * v2.Y, Z: v.Z * v2.Z}
}
// Sub substracts the 2 vectors (return a new vector)
func (v Vec3) Sub(v2 Vec3) Vec3 {
return Vec3{X: v.X - v2.X, Y: v.Y - v2.Y, Z: v.Z - v2.Z}
}
// Add adds the 2 vectors (return a new vector)
func (v Vec3) Add(v2 Vec3) Vec3 {
return Vec3{X: v.X + v2.X, Y: v.Y + v2.Y, Z: v.Z + v2.Z}
}
// Length returns the size of the vector
func (v Vec3) Length() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y + v.Z*v.Z)
}
// Unit returns a new vector with same direction and length 1
func (v Vec3) Unit() Vec3 {
return v.Scale(1.0 / v.Length())
}
// Negate returns a new vector with X/Y/Z negated
func (v Vec3) Negate() Vec3 {
return Vec3{-v.X, -v.Y, -v.Z}
}
// Dot returns the dot product (a scalar) of 2 vectors
func Dot(v1 Vec3, v2 Vec3) float64 {
return v1.X*v2.X + v1.Y*v2.Y + v1.Z*v2.Z
}
// Cross returns the cross product of 2 vectors (another vector)
func Cross(v1 Vec3, v2 Vec3) Vec3 {
return Vec3{v1.Y*v2.Z - v1.Z*v2.Y, -(v1.X*v2.Z - v1.Z*v2.X), v1.X*v2.Y - v1.Y*v2.X}
}
// Reflect simply reflects the vector based on the normal n
func (v Vec3) Reflect(n Vec3) Vec3 {
return v.Sub(n.Scale(2.0 * Dot(v, n)))
}
/***********************
* Point3
************************/
// Point3 defines a point in 3D space
type Point3 struct {
X, Y, Z float64
}
// Translate translates the point to a new location (return a new point)
func (p Point3) Translate(v Vec3) Point3 {
return Point3{p.X + v.X, p.Y + v.Y, p.Z + v.Z}
}
// Sub subtracts a point to another Point which gives a vector
func (p Point3) Sub(p2 Point3) Vec3 {
return Vec3{p.X - p2.X, p.Y - p2.Y, p.Z - p2.Z}
}
// Vec3 converts a point to a vector (centered at origin)
func (p Point3) Vec3() Vec3 {
return Vec3{p.X, p.Y, p.Z}
}
/***********************
* Ray
************************/
// Ray represents a ray defined by its origin and direction
type Ray struct {
Origin Point3
Direction Vec3
rnd Rnd
}
// PointAt returns a new point along the ray (0 will return the origin)
func (r *Ray) PointAt(t float64) Point3 {
return r.Origin.Translate(r.Direction.Scale(t))
}
/***********************
* Color
************************/
// Color defines the basic Red/Green/Blue as raw float64 values
type Color struct {
R, G, B float64
}
var (
White = Color{1.0, 1.0, 1.0}
Black = Color{}
)
// Scale scales the Color by the value (return a new Color)
func (c Color) Scale(t float64) Color {
return Color{R: c.R * t, G: c.G * t, B: c.B * t}
}
// Mult Multiplies 2 colors together (component by component multiplication)
func (c Color) Mult(c2 Color) Color {
return Color{R: c.R * c2.R, G: c.G * c2.G, B: c.B * c2.B}
}
// Add adds the 2 colors (return a new color)
func (c Color) Add(c2 Color) Color {
return Color{R: c.R + c2.R, G: c.G + c2.G, B: c.B + c2.B}
}
// PixelValue converts a raw Color into a pixel value (0-255) packed into a uint32
func (c Color) PixelValue() uint32 {
r := uint32(math.Min(255.0, c.R*255.99))
g := uint32(math.Min(255.0, c.G*255.99))
b := uint32(math.Min(255.0, c.B*255.99))
return ((r & 0xFF) << 16) | ((g & 0xFF) << 8) | (b & 0xFF)
}
/***********************
* Hitable
************************/
type HitRecord struct {
t float64 // which t generated the hit
p Point3 // which point when hit
normal Vec3 // normal at that point
material Material // the material associated to this record
}
// Hitable defines the interface of objects that can be hit by a ray
type Hitable interface {
hit(r *Ray, tMin float64, tMax float64) (bool, *HitRecord)
}
// HitableList defines a simple list of hitable
type HitableList []Hitable
// hit defines the method for a list of hitables: will return the one closest
func (hl HitableList) hit(r *Ray, tMin float64, tMax float64) (bool, *HitRecord) {
var res *HitRecord
hitAnything := false
closestSoFar := tMax
for _, h := range hl {
if hit, hr := h.hit(r, tMin, closestSoFar); hit {
hitAnything = true
res = hr
closestSoFar = hr.t
}
}
return hitAnything, res
}
/***********************
* Utilities functions
************************/
func randomInUnitSphere(rnd Rnd) Vec3 {
for {
p := Vec3{2.0*rnd.Float64() - 1.0, 2.0*rnd.Float64() - 1.0, 2.0*rnd.Float64() - 1.0}
if Dot(p, p) < 1.0 {
return p
}
}
}
func randomInUnitDisk(rnd Rnd) Vec3 {
for {
p := Vec3{2.0*rnd.Float64() - 1.0, 2.0*rnd.Float64() - 1.0, 0}
if Dot(p, p) < 1.0 {
return p
}
}
} | model.go | 0.854627 | 0.615839 | model.go | starcoder |
package binary
import (
"fmt"
"image/color"
"reflect"
"github.com/flywave/gltf"
)
// MakeSliceBuffer returns the slice type associated with c and t and with the given element count.
// If the buffer is an slice which type matches with the expected by the acr then it will
// be used as backing slice.
func MakeSliceBuffer(c gltf.ComponentType, t gltf.AccessorType, count uint32, buffer interface{}) interface{} {
if buffer == nil {
return MakeSlice(c, t, count)
}
c1, t1, count1 := Type(buffer)
if count1 == 0 || c1 != c || t1 != t {
return MakeSlice(c, t, count)
}
if count1 < count {
tmpSlice := MakeSlice(c, t, count-count1)
return reflect.AppendSlice(reflect.ValueOf(buffer), reflect.ValueOf(tmpSlice)).Interface()
}
if count1 > count {
return reflect.ValueOf(buffer).Slice(0, int(count)).Interface()
}
return buffer
}
// MakeSlice returns the slice type associated with c and t and with the given element count.
// For example, if c is gltf.ComponentFloat and t is gltf.AccessorVec3
// then MakeSlice(c, t, 5) is equivalent to make([][3]float32, 5).
func MakeSlice(c gltf.ComponentType, t gltf.AccessorType, count uint32) interface{} {
var tp reflect.Type
switch c {
case gltf.ComponentUbyte:
tp = reflect.TypeOf((*uint8)(nil))
case gltf.ComponentByte:
tp = reflect.TypeOf((*int8)(nil))
case gltf.ComponentUshort:
tp = reflect.TypeOf((*uint16)(nil))
case gltf.ComponentShort:
tp = reflect.TypeOf((*int16)(nil))
case gltf.ComponentUint:
tp = reflect.TypeOf((*uint32)(nil))
case gltf.ComponentFloat:
tp = reflect.TypeOf((*float32)(nil))
}
tp = tp.Elem()
switch t {
case gltf.AccessorVec2:
tp = reflect.ArrayOf(2, tp)
case gltf.AccessorVec3:
tp = reflect.ArrayOf(3, tp)
case gltf.AccessorVec4:
tp = reflect.ArrayOf(4, tp)
case gltf.AccessorMat2:
tp = reflect.ArrayOf(2, reflect.ArrayOf(2, tp))
case gltf.AccessorMat3:
tp = reflect.ArrayOf(3, reflect.ArrayOf(3, tp))
case gltf.AccessorMat4:
tp = reflect.ArrayOf(4, reflect.ArrayOf(4, tp))
}
return reflect.MakeSlice(reflect.SliceOf(tp), int(count), int(count)).Interface()
}
// Type returns the associated glTF type data.
// It panics if data is not an slice.
func Type(data interface{}) (c gltf.ComponentType, t gltf.AccessorType, count uint32) {
v := reflect.ValueOf(data)
if v.Kind() != reflect.Slice {
panic(fmt.Sprintf("go3mf: binary.Type expecting a slice but got %s", v.Kind()))
}
count = uint32(v.Len())
switch data.(type) {
case []int8:
c, t = gltf.ComponentByte, gltf.AccessorScalar
case [][2]int8:
c, t = gltf.ComponentByte, gltf.AccessorVec2
case [][3]int8:
c, t = gltf.ComponentByte, gltf.AccessorVec3
case [][4]int8:
c, t = gltf.ComponentByte, gltf.AccessorVec4
case [][2][2]int8:
c, t = gltf.ComponentByte, gltf.AccessorMat2
case [][3][3]int8:
c, t = gltf.ComponentByte, gltf.AccessorMat3
case [][4][4]int8:
c, t = gltf.ComponentByte, gltf.AccessorMat4
case []uint8:
c, t = gltf.ComponentUbyte, gltf.AccessorScalar
case [][2]uint8:
c, t = gltf.ComponentUbyte, gltf.AccessorVec2
case [][3]uint8:
c, t = gltf.ComponentUbyte, gltf.AccessorVec3
case []color.RGBA, [][4]uint8:
c, t = gltf.ComponentUbyte, gltf.AccessorVec4
case [][2][2]uint8:
c, t = gltf.ComponentUbyte, gltf.AccessorMat2
case [][3][3]uint8:
c, t = gltf.ComponentUbyte, gltf.AccessorMat3
case [][4][4]uint8:
c, t = gltf.ComponentUbyte, gltf.AccessorMat4
case []int16:
c, t = gltf.ComponentShort, gltf.AccessorScalar
case [][2]int16:
c, t = gltf.ComponentShort, gltf.AccessorVec2
case [][3]int16:
c, t = gltf.ComponentShort, gltf.AccessorVec3
case [][4]int16:
c, t = gltf.ComponentShort, gltf.AccessorVec4
case [][2][2]int16:
c, t = gltf.ComponentShort, gltf.AccessorMat2
case [][3][3]int16:
c, t = gltf.ComponentShort, gltf.AccessorMat3
case [][4][4]int16:
c, t = gltf.ComponentShort, gltf.AccessorMat4
case []uint16:
c, t = gltf.ComponentUshort, gltf.AccessorScalar
case [][2]uint16:
c, t = gltf.ComponentUshort, gltf.AccessorVec2
case [][3]uint16:
c, t = gltf.ComponentUshort, gltf.AccessorVec3
case []color.RGBA64, [][4]uint16:
c, t = gltf.ComponentUshort, gltf.AccessorVec4
case [][2][2]uint16:
c, t = gltf.ComponentUshort, gltf.AccessorMat2
case [][3][3]uint16:
c, t = gltf.ComponentUshort, gltf.AccessorMat3
case [][4][4]uint16:
c, t = gltf.ComponentUshort, gltf.AccessorMat4
case []uint32:
c, t = gltf.ComponentUint, gltf.AccessorScalar
case [][2]uint32:
c, t = gltf.ComponentUint, gltf.AccessorVec2
case [][3]uint32:
c, t = gltf.ComponentUint, gltf.AccessorVec3
case [][4]uint32:
c, t = gltf.ComponentUint, gltf.AccessorVec4
case [][2][2]uint32:
c, t = gltf.ComponentUint, gltf.AccessorMat2
case [][3][3]uint32:
c, t = gltf.ComponentUint, gltf.AccessorMat3
case [][4][4]uint32:
c, t = gltf.ComponentUint, gltf.AccessorMat4
case []float32:
c, t = gltf.ComponentFloat, gltf.AccessorScalar
case [][2]float32:
c, t = gltf.ComponentFloat, gltf.AccessorVec2
case [][3]float32:
c, t = gltf.ComponentFloat, gltf.AccessorVec3
case [][4]float32:
c, t = gltf.ComponentFloat, gltf.AccessorVec4
case [][2][2]float32:
c, t = gltf.ComponentFloat, gltf.AccessorMat2
case [][3][3]float32:
c, t = gltf.ComponentFloat, gltf.AccessorMat3
case [][4][4]float32:
c, t = gltf.ComponentFloat, gltf.AccessorMat4
default:
panic(fmt.Sprintf("go3mf: binary.Type expecting a glTF supported type but got %s", v.Kind()))
}
return
} | binary/slice.go | 0.643665 | 0.435781 | slice.go | starcoder |
package tokens
import (
"fmt"
"strings"
"github.com/pulumi/pulumi/pkg/util/contract"
)
// tokenBuffer is a parseable token buffer that simply carries a position.
type tokenBuffer struct {
Tok Type
Pos int
}
func newTokenBuffer(tok Type) *tokenBuffer {
return &tokenBuffer{
Tok: tok,
Pos: 0,
}
}
func (b *tokenBuffer) Curr() Type {
return b.Tok[b.Pos:]
}
func (b *tokenBuffer) From(from int) Type {
return b.Tok[from:b.Pos]
}
func (b *tokenBuffer) Eat(s string) {
ate := b.MayEat(s)
contract.Assertf(ate, "Expected to eat '%v'", s)
}
func (b *tokenBuffer) MayEat(s string) bool {
if strings.HasPrefix(string(b.Curr()), s) {
b.Advance(len(s))
return true
}
return false
}
func (b *tokenBuffer) Advance(by int) {
b.Pos += by
}
func (b *tokenBuffer) Done() bool {
return b.Pos == len(b.Tok)
}
func (b *tokenBuffer) Finish() {
b.Pos = len(b.Tok)
}
// typePartDelims are separator characters that are used to parse recursive types.
var typePartDelims = MapTypeSeparator + FunctionTypeParamSeparator + FunctionTypeSeparator
// parseNextType parses one type out of the given token, mutating the buffer in place and returning the resulting type
// token. This allows recursive parsing of complex decorated types below (like `map[[]string]func(func())`).
func parseNextType(b *tokenBuffer) Type {
// First, check for decorated types.
tok := b.Curr()
if tok.Pointer() {
ptr := parseNextPointerType(b)
return ptr.Tok
} else if tok.Array() {
arr := parseNextArrayType(b)
return arr.Tok
} else if tok.Map() {
mam := parseNextMapType(b)
return mam.Tok
} else if tok.Function() {
fnc := parseNextFunctionType(b)
return fnc.Tok
}
// Otherwise, we have either a qualified or simple (primitive) name. Since we might be deep in the middle
// of parsing another token, however, we only parse up to any other decorator termination/separator tokens.
s := string(tok)
sep := strings.IndexAny(s, typePartDelims)
if sep == -1 {
b.Finish()
return tok
}
b.Advance(sep)
return tok[:sep]
}
// PointerType is a type token that decorates an element type token turn it into a pointer: `"*" <Elem>`.
type PointerType struct {
Tok Type // the full pointer type token.
Elem Type // the element portion of the pointer type token.
}
const (
PointerTypeDecors = PointerTypePrefix + "%v"
PointerTypePrefix = "*"
)
// NewPointerTypeName creates a new array type name from an element type.
func NewPointerTypeName(elem TypeName) TypeName {
return TypeName(fmt.Sprintf(PointerTypeDecors, elem))
}
// NewPointerTypeToken creates a new array type token from an element type.
func NewPointerTypeToken(elem Type) Type {
return Type(fmt.Sprintf(PointerTypeDecors, elem))
}
// IsPointerType returns true if the given type token represents an encoded pointer type.
func IsPointerType(tok Type) bool {
return strings.HasPrefix(tok.String(), PointerTypePrefix)
}
// ParsePointerType removes the pointer decorations from a token and returns its underlying type.
func ParsePointerType(tok Type) PointerType {
b := newTokenBuffer(tok)
ptr := parseNextPointerType(b)
if !b.Done() {
contract.Failf("Did not expect anything extra after the pointer type %v; got: '%v'", tok, b.Curr())
}
return ptr
}
// parseNextPointerType parses the next pointer type from the given buffer.
func parseNextPointerType(b *tokenBuffer) PointerType {
mark := b.Pos // remember where this token begins.
b.Eat(PointerTypePrefix) // eat the "*" part.
elem := parseNextType(b) // parse the required element type token.
contract.Assert(elem != "")
return PointerType{Tok: b.From(mark), Elem: elem}
}
// ArrayType is a type token that decorates an element type token to turn it into an array: `"[]" <Elem>`.
type ArrayType struct {
Tok Type // the full array type token.
Elem Type // the element portion of the array type token.
}
const (
ArrayTypeDecors = ArrayTypePrefix + "%v"
ArrayTypePrefix = "[]"
)
// NewArrayTypeName creates a new array type name from an element type.
func NewArrayTypeName(elem TypeName) TypeName {
return TypeName(fmt.Sprintf(ArrayTypeDecors, elem))
}
// NewArrayTypeToken creates a new array type token from an element type.
func NewArrayTypeToken(elem Type) Type {
return Type(fmt.Sprintf(ArrayTypeDecors, elem))
}
// IsArrayType returns true if the given type token represents an encoded pointer type.
func IsArrayType(tok Type) bool {
return strings.HasPrefix(tok.String(), ArrayTypePrefix)
}
// ParseArrayType removes the array decorations from a token and returns its underlying type.
func ParseArrayType(tok Type) ArrayType {
b := newTokenBuffer(tok)
arr := parseNextArrayType(b)
if !b.Done() {
contract.Failf("Did not expect anything extra after the array type %v; got: '%v'", tok, b.Curr())
}
return arr
}
// parseNextArrayType parses the next array type from the given buffer.
func parseNextArrayType(b *tokenBuffer) ArrayType {
mark := b.Pos // remember where this token begins.
b.Eat(ArrayTypePrefix) // eat the "[]" part.
elem := parseNextType(b) // parse the required element type token.
contract.Assert(elem != "")
return ArrayType{Tok: b.From(mark), Elem: elem}
}
// MapType is a type token that decorates a key and element type token to turn them into a map:
// `"map[" <Key> "]" <Elem>`.
type MapType struct {
Tok Type // the full map type token.
Key Type // the key portion of the map type token.
Elem Type // the element portion of the map type token.
}
const (
MapTypeDecors = MapTypePrefix + "%v" + MapTypeSeparator + "%v"
MapTypePrefix = "map["
MapTypeSeparator = "]"
)
// NewMapTypeName creates a new map type name from an element type.
func NewMapTypeName(key TypeName, elem TypeName) TypeName {
return TypeName(fmt.Sprintf(MapTypeDecors, key, elem))
}
// NewMapTypeToken creates a new map type token from an element type.
func NewMapTypeToken(key Type, elem Type) Type {
return Type(fmt.Sprintf(MapTypeDecors, key, elem))
}
// IsMapType returns true if the given type token represents an encoded pointer type.
func IsMapType(tok Type) bool {
return strings.HasPrefix(tok.String(), MapTypePrefix)
}
// ParseMapType removes the map decorations from a token and returns its underlying type.
func ParseMapType(tok Type) MapType {
b := newTokenBuffer(tok)
mam := parseNextMapType(b)
if !b.Done() {
contract.Failf("Did not expect anything extra after the map type %v; got: '%v'", tok, b.Curr())
}
return mam
}
// parseNextMapType parses the next map type from the given buffer.
func parseNextMapType(b *tokenBuffer) MapType {
mark := b.Pos // remember where this token begins.
b.Eat(MapTypePrefix) // eat the "map[" prefix.
// Now parse the key part.
key := parseNextType(b)
contract.Assert(key != "")
// Next, we expect to find the "]" separator token; eat it.
b.Eat(MapTypeSeparator)
// Next, parse the element type part.
elem := parseNextType(b)
contract.Assert(elem != "")
return MapType{Tok: b.From(mark), Key: key, Elem: elem}
}
// FunctionType is a type token that decorates a set of optional parameter and return tokens to turn them into a
// function type: `(" [ <Param1> [ "," <ParamN> ]* ] ")" [ <Return> ]`).
type FunctionType struct {
Tok Type // the full map type token.
Parameters []Type // the parameter parts of the type token.
Return *Type // the (optional) return part of the type token.
}
const (
FunctionTypeDecors = FunctionTypePrefix + "%v" + FunctionTypeSeparator + "%v"
FunctionTypePrefix = "("
FunctionTypeParamSeparator = ","
FunctionTypeSeparator = ")"
)
// NewFunctionTypeName creates a new function type token from parameter and return types.
func NewFunctionTypeName(params []TypeName, ret *TypeName) TypeName {
// Stringify the parameters (if any).
sparams := ""
for i, param := range params {
if i > 0 {
sparams += FunctionTypeParamSeparator
}
sparams += string(param)
}
// Stringify the return type (if any).
sret := ""
if ret != nil {
sret = string(*ret)
}
return TypeName(fmt.Sprintf(FunctionTypeDecors, sparams, sret))
}
// NewFunctionTypeToken creates a new function type token from parameter and return types.
func NewFunctionTypeToken(params []Type, ret *Type) Type {
// Stringify the parameters (if any).
sparams := ""
for i, param := range params {
if i > 0 {
sparams += FunctionTypeParamSeparator
}
sparams += string(param)
}
// Stringify the return type (if any).
sret := ""
if ret != nil {
sret = string(*ret)
}
return Type(fmt.Sprintf(FunctionTypeDecors, sparams, sret))
}
// IsFunctionType returns true if the given type token represents an encoded pointer type.
func IsFunctionType(tok Type) bool {
return strings.HasPrefix(tok.String(), FunctionTypePrefix)
}
// ParseFunctionType removes the function decorations from a token and returns its underlying type.
func ParseFunctionType(tok Type) FunctionType {
b := newTokenBuffer(tok)
fnc := parseNextFunctionType(b)
if !b.Done() {
contract.Failf("Did not expect anything extra after the function type %v; got: '%v'", tok, b.Curr())
}
return fnc
}
// parseNextFunctionType parses the next function type from the given token, returning any excess.
func parseNextFunctionType(b *tokenBuffer) FunctionType {
mark := b.Pos // remember the start of this token.
b.Eat(FunctionTypePrefix) // eat the function prefix "(".
// Parse out parameters until we encounter and eat a ")".
var params []Type
for !b.MayEat(FunctionTypeSeparator) {
next := parseNextType(b)
if next == "" {
contract.Assert(strings.HasPrefix(string(b.Curr()), FunctionTypeSeparator))
} else {
params = append(params, next)
// Eat the separator, if any, and keep going.
if !b.MayEat(FunctionTypeParamSeparator) {
contract.Assert(strings.HasPrefix(string(b.Curr()), FunctionTypeSeparator))
}
}
}
// Next, if there is anything remaining, parse out the return type.
var ret *Type
if !b.Done() {
if rett := parseNextType(b); rett != "" {
ret = &rett
}
}
return FunctionType{Tok: b.From(mark), Parameters: params, Return: ret}
} | pkg/tokens/decors.go | 0.763131 | 0.476214 | decors.go | starcoder |
package solutions
type graphNode struct {
parentNode *graphNode
word string
}
type void struct{}
var member void
var possibleStates = []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"}
func findLadders(startWord string, endWord string, words []string) (ladders [][]string) {
dictionary := make(map[string]void)
for wordIndex := range words {
dictionary[words[wordIndex]] = member
}
if _, ok := dictionary[endWord]; !ok || startWord == endWord {
return make([][]string, 0)
}
return createLadders(bfsLadders(startWord, endWord, dictionary))
}
func bfsLadders(startWord string, endWord string, dictionary map[string]void) (map[string][]graphNode, map[string][]graphNode) {
sourceGraphNode, targetGraphNode := graphNode{word: startWord}, graphNode{word: endWord}
sourceMap, targetMap := make(map[string][]graphNode), make(map[string][]graphNode)
found, forward, backwards := false, true, true
sourceMap = addToMap(sourceMap, sourceGraphNode)
targetMap = addToMap(targetMap, targetGraphNode)
for !found && (len(sourceMap) > 0 || len(targetMap) > 0) {
forward, backwards = getDirection(sourceMap, targetMap)
if forward {
sourceMap = traverseNextLevel(sourceMap, endWord, dictionary)
} else if backwards {
targetMap = traverseNextLevel(targetMap, startWord, dictionary)
} else {
continue
}
found, sourceMap, targetMap = hasConverged(sourceMap, targetMap)
}
return sourceMap, targetMap
}
func traverseNextLevel(sourceMap map[string][]graphNode, endWord string, dictionary map[string]void) map[string][]graphNode {
targetMap := make(map[string][]graphNode, 0)
for _, nodes := range sourceMap {
for nodeIndex := range nodes {
currentNode := nodes[nodeIndex]
delete(dictionary, currentNode.word)
nextStatesLength := len(currentNode.word) * len(possibleStates) - 1
if nextStatesLength > len(dictionary) {
states := nextStatesFromDictionary(currentNode, dictionary)
for stateIndex := range states {
targetMap = addToMap(targetMap, states[stateIndex])
}
} else {
for index := 0; index < len(currentNode.word); index++ {
states := nextStates(currentNode, index, endWord, dictionary)
for _, newNodes := range states {
for nodeIndex := range newNodes {
targetMap = addToMap(targetMap, newNodes[nodeIndex])
}
}
}
}
}
}
return targetMap
}
func getDirection(sourceMap map[string][]graphNode, targetMap map[string][]graphNode) (bool, bool) {
forwardDirection := (len(sourceMap) <= len(targetMap)) && len(sourceMap) != 0
backwardsDirection := (len(sourceMap) > len(targetMap)) || len(sourceMap) == 0
return forwardDirection, backwardsDirection
}
func nextStates(currentNode graphNode, index int, endWord string, dictionary map[string]void) (states map[string][]graphNode) {
currentWord := currentNode.word
mapNewStates := make(map[string][]graphNode)
for stateIndex := range possibleStates {
prefix, suffix := currentWord[:index], ""
if index < len(currentWord) - 1 {
suffix = currentWord[index + 1:]
}
newState := prefix + possibleStates[stateIndex] + suffix
if newState == endWord {
result := make(map[string][]graphNode, 0)
result = addToMap(result, graphNode{parentNode: ¤tNode, word: newState})
return result
}
if _, ok := dictionary[newState]; ok {
mapNewStates = addToMap(mapNewStates, graphNode{parentNode: ¤tNode, word: newState})
}
}
return mapNewStates
}
func hasConverged(sourceMap map[string][]graphNode, targetMap map[string][]graphNode) (bool, map[string][]graphNode, map[string][]graphNode) {
converged := false
for word := range sourceMap {
if _, ok := targetMap[word]; ok {
converged = true
break
}
}
if converged {
for word := range sourceMap {
if _, ok := targetMap[word]; !ok {
delete(sourceMap, word)
}
}
for word := range targetMap {
if _, ok := sourceMap[word]; !ok {
delete(targetMap, word)
}
}
}
return converged, sourceMap, targetMap
}
func addToMap(mapNode map[string][]graphNode, node graphNode) map[string][]graphNode {
mapNode[node.word] = append(mapNode[node.word], node)
return mapNode
}
func createLadders(sourceStates map[string][]graphNode, targetStates map[string][]graphNode) (ladders [][]string) {
resultLadders := make([][]string, 0)
for word, sourceNodes := range sourceStates {
for sourceNodeIndex := range sourceNodes {
sourceLadder := createLadder(&sourceNodes[sourceNodeIndex], true)
targetNodes := targetStates[word]
for targetNodeIndex := range targetNodes {
targetLadder := createLadder(&targetNodes[targetNodeIndex], false)
targetLadder = targetLadder[1:]
resultLadders = append(resultLadders, append(sourceLadder, targetLadder...))
}
}
}
return resultLadders
}
func createLadder(node *graphNode, reverse bool) (ladder []string) {
ladder = make([]string, 0)
ladder = append(ladder, node.word)
for {
if node.parentNode == nil {
break
} else {
node = node.parentNode
ladder = append(ladder, node.word)
}
}
if reverse {
reversedLadder := make([]string, len(ladder))
length := len(ladder) - 1
for index := 0; index <= length; index++ {
reversedLadder[length - index] = ladder[index]
}
return reversedLadder
}
return
}
func nextStatesFromDictionary(parent graphNode, dictionary map[string]void) []graphNode {
nextStates := make([]graphNode, 0)
for word := range dictionary {
if checkDifference(word, parent.word) == 1 {
nextStates = append(nextStates, graphNode{parentNode: &parent, word: word})
}
}
return nextStates
}
func checkDifference(word1 string, word2 string) int {
difference := 0
for index := 0; index < len(word1); index++ {
if word1[index] != word2[index] {
difference++
}
}
return difference
} | solutions/126.go | 0.736021 | 0.481941 | 126.go | starcoder |
package object
import (
"github.com/gopherd/three/core"
"github.com/gopherd/three/driver/renderer"
"github.com/gopherd/three/geometry"
)
type CameraType int
const (
PerspectiveCameraType CameraType = iota
OrthographicCameraType
)
// Camera represents a camera object
type Camera interface {
Object
CameraType() CameraType
Projection() core.Matrix4
SetViewOffset(fullWidth, fullHeight, x, y, width, height core.Float)
IntersectsBox(box geometry.Box3) bool
ContainsPoint(pos core.Vector3) bool
}
type cameraImpl struct {
object3d
matrixWorldInverse core.Matrix4
proj struct {
matrix core.Matrix4
matrixInverse core.Matrix4
frustum geometry.Frustum
view struct {
enabled bool
fullWidth, fullHeight core.Float
offsetX, offsetY core.Float
width, height core.Float
}
notNeedsUpdate bool
}
zoom core.Float
near, far core.Float
}
// TODO(delay) Bounds implements Object Bounds method
func (camera *cameraImpl) Bounds() geometry.Box3 {
return geometry.Box3{}
}
// TODO(delay) Render implements Object Render method
func (camera *cameraImpl) Render(renderer renderer.Renderer, proj, view, transform core.Matrix4) {
}
// SetViewOffset implements Camera SetViewOffset method
func (camera *cameraImpl) SetViewOffset(fullWidth, fullHeight, x, y, width, height core.Float) {
camera.proj.view.enabled = true
camera.proj.view.fullWidth = fullWidth
camera.proj.view.fullHeight = fullHeight
camera.proj.view.offsetX = x
camera.proj.view.offsetY = y
camera.proj.view.width = width
camera.proj.view.height = height
camera.setProjectionNeedsUpdate(true)
}
// IntersectsBox implements Camera IntersectsBox method
func (camera *cameraImpl) IntersectsBox(box geometry.Box3) bool {
return camera.proj.frustum.IntersectsBox(box)
}
// ContainsPoint implements Camera ContainsPoint method
func (camera *cameraImpl) ContainsPoint(point core.Vector3) bool {
return camera.proj.frustum.ContainsPoint(point)
}
func (camera *cameraImpl) isProjectionNeedsUpdate() bool {
return !camera.proj.notNeedsUpdate
}
func (camera *cameraImpl) setProjectionNeedsUpdate(needsUpdate bool) {
camera.proj.notNeedsUpdate = !needsUpdate
}
func (camera *cameraImpl) projectionMatrixChanged() {
camera.setProjectionNeedsUpdate(false)
camera.proj.matrixInverse = camera.proj.matrix.Invert()
camera.proj.frustum.SetFromProjectionMatrix(camera.proj.matrix)
} | object/camera.go | 0.601125 | 0.506652 | camera.go | starcoder |
package key
import (
"fmt"
pb "github.com/youtube/vitess/go/vt/proto/topodata"
)
// This file contains the functions to convert topo data to and from proto3
// KeyspaceIdTypeToProto translates a KeyspaceIdType to proto, or panics
func KeyspaceIdTypeToProto(k KeyspaceIdType) pb.KeyspaceIdType {
switch k {
case KIT_UNSET:
return pb.KeyspaceIdType_UNSET
case KIT_UINT64:
return pb.KeyspaceIdType_UINT64
case KIT_BYTES:
return pb.KeyspaceIdType_BYTES
}
panic(fmt.Errorf("Invalid value for KeyspaceIdType: %v", k))
}
// ProtoToKeyspaceIdType translates a proto KeyspaceIdType, or panics
func ProtoToKeyspaceIdType(k pb.KeyspaceIdType) KeyspaceIdType {
switch k {
case pb.KeyspaceIdType_UNSET:
return KIT_UNSET
case pb.KeyspaceIdType_UINT64:
return KIT_UINT64
case pb.KeyspaceIdType_BYTES:
return KIT_BYTES
}
panic(fmt.Errorf("Invalid value for KeyspaceIdType: %v", k))
}
// KeyRangeToProto translates a KeyRange to proto, or panics
func KeyRangeToProto(k KeyRange) *pb.KeyRange {
return &pb.KeyRange{
Start: []byte(k.Start),
End: []byte(k.End),
}
}
// ProtoToKeyRange translates a proto KeyRange, or panics
func ProtoToKeyRange(k *pb.KeyRange) KeyRange {
if k == nil {
return KeyRange{}
}
return KeyRange{
Start: KeyspaceId(k.Start),
End: KeyspaceId(k.End),
}
}
// KeyRangesToProto translates an array of KeyRange to proto
func KeyRangesToProto(ks []KeyRange) []*pb.KeyRange {
if len(ks) == 0 {
return nil
}
result := make([]*pb.KeyRange, len(ks))
for i, k := range ks {
result[i] = KeyRangeToProto(k)
}
return result
}
// ProtoToKeyRanges translates a proto into an array of KeyRanges
func ProtoToKeyRanges(ks []*pb.KeyRange) []KeyRange {
if len(ks) == 0 {
return nil
}
result := make([]KeyRange, len(ks))
for i, k := range ks {
result[i] = ProtoToKeyRange(k)
}
return result
}
// KeyspaceIdsToProto translates an array of KeyspaceId to proto
func KeyspaceIdsToProto(l []KeyspaceId) [][]byte {
if len(l) == 0 {
return nil
}
result := make([][]byte, len(l))
for i, k := range l {
result[i] = []byte(k)
}
return result
}
// ProtoToKeyspaceIds translates a proto into an array of KeyspaceIds
func ProtoToKeyspaceIds(l [][]byte) []KeyspaceId {
if len(l) == 0 {
return nil
}
result := make([]KeyspaceId, len(l))
for i, k := range l {
result[i] = KeyspaceId(k)
}
return result
} | go/vt/key/proto3.go | 0.578329 | 0.409988 | proto3.go | starcoder |
package learnML
import (
"../matrix"
"../rand"
"math"
)
type SupervisedLearner interface {
// Returns the name of this learner
Name() string
// Train this learner
Train(features, labels *matrix.Matrix, paras ...float64)
// Partially train using a single pattern
TrainIncremental(feat, lab matrix.Vector)
// Make a prediction
Predict(in matrix.Vector) matrix.Vector
// This default implementation just copies the data, without
// changing it in any way.
FilterData(featIn, labIn, featOut, labOut *matrix.Matrix)
}
// CountMisclassifications measures the misclassifications with the
// provided test data.
func CountMisclassifications(learner SupervisedLearner, features, labels *matrix.Matrix) int {
matrix.Require(features.Rows() != labels.Rows(),
"CountMisclassifications: Mismatching number of rows\n")
mis := 0
for i := 0; i < features.Rows(); i++ {
pred := learner.Predict(features.Row(i))
lab := labels.Row(i)
for j := 0; j < len(lab); j++ {
if pred[j] != lab[j] {
mis++
}
}
}
return mis
}
// SSE computes the sum square error.
func SSE(learner SupervisedLearner, features, labels *matrix.Matrix) float64 {
matrix.Require(features.Rows() == labels.Rows(),
"SSE: Mismatching number of rows\n")
sse := 0.0
for i := 0; i < features.Rows(); i++ {
pred := learner.Predict(features.Row(i))
lab := labels.Row(i)
for j := 0; j < len(lab); j++ {
diff := pred[j] - lab[j]
sse += diff * diff
}
}
return sse
}
// perform m-repititions n-fold cross-validation
func MRepNFoldCrossValidation(learner SupervisedLearner,
features, labels *matrix.Matrix, m, n int) matrix.Vector {
matrix.Require(features.Rows() == labels.Rows(),
"MRepNFoldCrossValidation: features and labels must have the same number of rows\n")
// partition
start := []int{0, 0}
end := []int{0, 0}
rows := labels.Rows()
// foldSize contains the size of each foldSize
foldSize := make([]int, n)
foldSize[0] = rows / n
for i := 1; i < n; i++ {
foldSize[i] = foldSize[0]
}
for i := 0; i < rows%n; i++ {
foldSize[i]++
}
r := rand.NewRand(1982)
var trainDataX, testDataX, trainDataY, testDataY matrix.Matrix
sse := matrix.NewVector(m, nil)
for i := 0; i < m; i++ {
// shuffling data
for j := rows; j > 1; j-- {
l := int(r.Next(uint64(j)))
features.SwapRows(j-1, l)
labels.SwapRows(j-1, l)
}
// training
startRemoveIndex := 0
end[1] = rows
sse[i] = 0.0
for j := 0; j < n; j++ {
// copy data into training and testing data
start = start[:1]
end = end[:1]
start[0] = startRemoveIndex
startRemoveIndex += foldSize[j]
end[0] = startRemoveIndex
testDataX.WrapRows(features, start, end)
testDataY.WrapRows(labels, start, end)
start = start[:2]
end = end[:2]
start[1] = end[0]
end[0] = start[0]
start[0] = 0
trainDataX.WrapRows(features, start, end)
trainDataY.WrapRows(labels, start, end)
// train
learner.Train(&trainDataX, &trainDataY)
// compute SSE
sse[i] += SSE(learner, &testDataX, &testDataY)
}
// average sse is
sse[i] /= float64(rows)
sse[i] = math.Sqrt(sse[i])
}
return sse
} | goml/learnML/supervised.go | 0.630116 | 0.634076 | supervised.go | starcoder |
package randomness
import (
"crypto/rand"
"hash"
"math/big"
)
// GenerateSecureRandom receives the number of desired randomness bytes and returns a slice of that size from Crypto.Rand
func GenerateSecureRandom(keySize int) ([]byte, error) {
k := make([]byte, keySize)
_, err := rand.Read(k)
if err != nil {
return nil, err
}
return k, nil
}
// To remove modulo bias, we must calculate RAND_MAX - (RAND_MAX % N)
// Since we use a PRF that outputs a specific number of bytes (requiredBytes)
// RAND_MAX = ((2**(8*requiredBytes)) - 1)
// N = maxValue
// CalculateModuloBias returns those two values and returns the upper bound to avoid modulo bias
func CalculateModuloBias(maxValue *big.Int, requiredBytes int) *big.Int {
var (
zero = big.NewInt(0)
one = big.NewInt(1)
two = big.NewInt(2)
)
if maxValue.Cmp(zero) == 0 {
return zero
}
// Convert the number of required bytes to bits to calculate the max PRF possible value (2**b) - 1
nBits := big.NewInt(int64(8 * requiredBytes))
// randMax is the max possible number that the PRF can generate within its byte output
randMax := big.NewInt(0).Exp(two, nBits, nil)
randMax.Sub(randMax, one)
randExcess := new(big.Int)
randExcess.Mod(randMax, maxValue)
//randExcess.Add(randExcess, one)
// randLimit is the max value that the RNG can generate. (randLimit = randMax - randExcess)
randLimit := big.NewInt(0).Sub(randMax, randExcess)
return randLimit
}
// RandInInterval receives a seed and returns a random value (using a PRF) between [0, max-1]
func RandInInterval(max *big.Int, seed []byte, h hash.Hash) *big.Int {
// BitLen returns an int with the length of the absolute value of max in bits
maxBitLength := max.BitLen()
// If max == 0, the BitLen function returns 0
if maxBitLength == 0 {
return big.NewInt(0)
}
requiredBytes := (maxBitLength + 7) / 8 // Convert the max bit length to byte
totalSpace := 3 * requiredBytes // Increase the random search space
randomByteValue := make([]byte, totalSpace) // Variable allocation for the random value (in byte)
randomIntValue := big.NewInt(0) // Variable allocation for the random value (in big.Int)
// Calculate randomLimit, which sets the upper bound to ensure no modulo bias comes from the PRF
randomLimit := CalculateModuloBias(max, totalSpace)
r := seed
for {
r = PRF(h, r) // Call the PRF to generate a random stream from the seed
copy(randomByteValue, r[0:totalSpace]) // Copy just the necessary bytes
randomIntValue.SetBytes(randomByteValue) // convert the randomByteValue to Big.Int
// Check if obtained random value is smaller than randomLimit
if randomIntValue.Cmp(randomLimit) < 0 {
return randomIntValue.Mod(randomIntValue, max)
}
}
}
// PRF is a pseudorandom function
func PRF(h hash.Hash, seed []byte) []byte {
h.Reset()
h.Write(seed)
return h.Sum(nil)
} | randomness/csprng.go | 0.860428 | 0.45641 | csprng.go | starcoder |
package query
import (
"connectordb/datastream"
"errors"
)
//Operator is an interface describing the functions that are needed for query. The standard operator implements these,
// but for import sake and for simplified mocking, only the necessary interface is shown here
type Operator interface {
GetStreamIndexRange(streampath string, i1 int64, i2 int64, transform string) (datastream.DataRange, error)
GetStreamTimeRange(streampath string, t1 float64, t2 float64, limit int64, transform string) (datastream.DataRange, error)
GetShiftedStreamTimeRange(streampath string, t1 float64, t2 float64, ishift, limit int64, transform string) (datastream.DataRange, error)
}
//StreamQuery contains all the necessary information to perform a query on the given stream. It is the structure used
//to encode a query for merge and dataset. It uses the Operator's functions internally.
//Note that while both index-based and time based elements are in the struct, it is only valid to use one at a time
type StreamQuery struct {
Stream string `json:"stream"` //The stream name in form usr/dev/stream
Transform string `json:"transform,omitempty"` //The transform to perform on the stream
I1 int64 `json:"i1,omitempty"` //The first index to get
I2 int64 `json:"i2,omitempty"` //The end index of the range to get
T1 float64 `json:"t1,omitempty"` //The start time of the range to get
T2 float64 `json:"t2,omitempty"` //The end time of the range to get
Limit int64 `json:"limit,omitempty"` //The limit of number of datapoints to allow
indexbacktrack int64 `json:"-"` //The number of elements to backtrack before a starting time (used for time queries)
}
//IsValid checks if the StreamQuery encodes a valid query. It does not check whether
//the Stream is a valid stream, but only that it exists
func (s *StreamQuery) IsValid() bool {
return s.Stream != ""
}
//HasRange returns True if the stream has some sort of range-based query non-zero
//meaning that one of the indices or times or limit is non-zero
func (s *StreamQuery) HasRange() bool {
return s.I1 != 0 || s.I2 != 0 || s.T1 != 0 || s.T2 != 0 || s.Limit != 0
}
//Run runs the query that the struct encodes on the given operator.
func (s *StreamQuery) Run(qm Operator) (datastream.DataRange, error) {
if s.T1 != 0 || s.T2 != 0 || s.Limit != 0 {
//First check that only one method of querying is active
if s.I1 != 0 || s.I2 != 0 {
//query by index is also active. Not cool. Not cool at all
return nil, errors.New("Only one query method (index or time) can be used at a time")
}
//Alright, query by time
if s.indexbacktrack > 0 {
return qm.GetShiftedStreamTimeRange(s.Stream, s.T1, s.T2, -s.indexbacktrack, s.Limit, s.Transform)
}
return qm.GetStreamTimeRange(s.Stream, s.T1, s.T2, s.Limit, s.Transform)
}
//The query method is by integer (or no query method is chosen, meaning whole stream)
return qm.GetStreamIndexRange(s.Stream, s.I1, s.I2, s.Transform)
} | src/connectordb/query/query.go | 0.761538 | 0.464962 | query.go | starcoder |
package network
import (
"bytes"
"fmt"
"github.com/rqme/neat"
)
type Neuron struct {
neat.NeuronType
neat.ActivationType
X, Y float64 // Hint at where neuron might be positioned in a 2D representation
}
type Neurons []Neuron
type Synapse struct {
Source, Target int // Indexes of source and target neurons
Weight float64
}
type Synapses []Synapse
func (s Synapses) Len() int { return len(s) }
func (s Synapses) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s Synapses) Less(i, j int) bool {
if s[i].Source == s[j].Source {
return s[i].Target < s[j].Target
} else {
return s[i].Source == s[j].Source
}
}
type Activation func(float64) float64
type Classic struct {
// Structure
Neurons Neurons
Synapses Synapses
// Internal state
biases, inputs, hiddens, outputs int
funcs []Activation
}
func New(neurons Neurons, synapses Synapses) (net *Classic, err error) {
// Begin a new network
net = &Classic{Neurons: neurons, Synapses: synapses}
// Create the internal state and check for errors
oo := false // out-of-order check
l := neat.Bias // last neuron type created
net.funcs = make([]Activation, len(neurons))
for i, ng := range neurons {
switch ng.NeuronType {
case neat.Bias:
net.biases += 1
oo = oo || l > neat.Bias
case neat.Input:
net.inputs += 1
oo = oo || l > neat.Input
case neat.Hidden:
net.hiddens += 1
oo = oo || l > neat.Hidden
case neat.Output:
net.outputs += 1
oo = oo || l > neat.Output
}
l = ng.NeuronType
switch ng.ActivationType {
case neat.Direct:
net.funcs[i] = neat.DirectActivation
case neat.Sigmoid:
net.funcs[i] = neat.SigmoidActivation
case neat.SteependSigmoid:
net.funcs[i] = neat.SteependSigmoidActivation
case neat.Tanh:
net.funcs[i] = neat.TanhActivation
case neat.InverseAbs:
net.funcs[i] = neat.InverseAbsActivation
default:
err = fmt.Errorf("network.classic.New - Unknown ActivationType %v", byte(ng.ActivationType))
break
}
}
if oo {
err = fmt.Errorf("network.classic.New - Neurons are out of order")
return
}
// Ensure we have inputs and outputs
if net.inputs == 0 {
err = fmt.Errorf("network.classic.New - Network must have at least 1 input neuron")
return
}
if net.outputs == 0 {
err = fmt.Errorf("network.classic.New - Network must have at least 1 output neuron")
return
}
// Ensure the synapses map to neurons and count the sources
cnt := len(net.Neurons)
for _, s := range net.Synapses {
if s.Source > cnt || s.Target > cnt {
err = fmt.Errorf("network.classic.New - Synapses do not map to defined neurons")
return
}
}
// Sort the synapses for efficient processing
//sort.Sort(net.Synapses)
return
}
func (n Classic) String() string {
b := bytes.NewBufferString("Network is \n")
b.WriteString("\tNeurons:\n")
for i, neuron := range n.Neurons {
b.WriteString(fmt.Sprintf("\t [%d] Type: %v Activation: %v Position: [%f, %f]\n", i, neuron.NeuronType, neuron.ActivationType, neuron.X, neuron.Y))
}
b.WriteString("\tSynapses:\n")
for i, synapse := range n.Synapses {
b.WriteString(fmt.Sprintf("\t [%d] Source: %d Target: %d Weight: %f\n", i, synapse.Source, synapse.Target, synapse.Weight))
}
return b.String()
}
func (n Classic) Activate(inputs []float64) (outputs []float64, err error) {
// Create the data structure
val := make([]float64, len(n.Neurons))
// Set the biases
for i := 0; i < n.biases; i++ {
val[i] = 1.0
}
// Copy inputs into the network
if len(inputs) > n.inputs {
err = fmt.Errorf("network.classic.Activate - There are more input values (%d) than input neurons (%d)", len(inputs), n.inputs)
return
}
copy(val[n.biases:], inputs)
// Iterate the network synapse by synapse
for _, s := range n.Synapses {
v := n.funcs[s.Source](val[s.Source])
val[s.Target] += v * s.Weight
}
// Return the output values
offset := len(val) - n.outputs
outputs = make([]float64, n.outputs)
for i := 0; i < len(outputs); i++ {
v := n.funcs[i+offset](val[i+offset])
outputs[i] = v
}
return
} | network/classic.go | 0.676727 | 0.461502 | classic.go | starcoder |
package table
import (
"github.com/shopspring/decimal"
)
// CellType is the type of a table cell.
type CellType int
// Table is a matrix of table cells.
type Table struct {
columns []int
rows []*Row
}
// New creates a new table with column groups.
func New(groups ...int) *Table {
var columns []int
for groupNo, groupSize := range groups {
for i := 0; i < groupSize; i++ {
columns = append(columns, groupNo)
}
}
return &Table{columns: columns}
}
// Width returns the width of this table.
func (t *Table) Width() int {
return len(t.columns)
}
// AddRow adds a row.
func (t *Table) AddRow() *Row {
var (
cells = make([]cell, 0, t.Width())
row = &Row{cells}
)
t.rows = append(t.rows, row)
return row
}
// AddSeparatorRow adds a separator row.
func (t *Table) AddSeparatorRow() {
var r = t.AddRow()
for i := 0; i < t.Width(); i++ {
r.addCell(SeparatorCell{})
}
}
// AddEmptyRow adds a separator row.
func (t *Table) AddEmptyRow() {
var r = t.AddRow()
for i := 0; i < t.Width(); i++ {
r.addCell(emptyCell{})
}
}
// Row is a table row.
type Row struct {
cells []cell
}
func (r *Row) addCell(c cell) {
r.cells = append(r.cells, c)
}
// AddEmpty adds an empty cell.
func (r *Row) AddEmpty() *Row {
r.addCell(emptyCell{})
return r
}
// AddText adds a text cell.
func (r *Row) AddText(content string, align Alignment) *Row {
r.addCell(textCell{
Indent: 0,
Content: content,
Align: align,
})
return r
}
// AddNumber adds a number cell.
func (r *Row) AddNumber(n decimal.Decimal) *Row {
r.addCell(numberCell{n})
return r
}
// AddIndented adds an indented cell.
func (r *Row) AddIndented(content string, indent int) *Row {
r.addCell(textCell{
Content: content,
Indent: indent,
Align: Left,
})
return r
}
// FillEmpty fills the row with empty cells.
func (r *Row) FillEmpty() {
for i := len(r.cells); i < cap(r.cells); i++ {
r.AddEmpty()
}
}
type cell interface {
isSep() bool
}
// Alignment is the alignment of a table cell.
type Alignment int
const (
// Left aligns to the left.
Left Alignment = iota
// Right align to the right.
Right
// Center centers.
Center
)
// textCell is a cell containing text.
type textCell struct {
Content string
Align Alignment
Indent int
}
func (t textCell) isSep() bool {
return false
}
// textCell is a cell containing text.
type numberCell struct {
n decimal.Decimal
}
func (t numberCell) isSep() bool {
return false
}
// SeparatorCell is a cell containing a separator.
type SeparatorCell struct{}
func (SeparatorCell) isSep() bool {
return true
}
// emptyCell is an empty cell.
type emptyCell struct{}
func (emptyCell) isSep() bool {
return false
} | lib/table/table.go | 0.783202 | 0.409339 | table.go | starcoder |
package timecode
import (
"fmt"
"time"
)
// Range is a pair of decimal seconds defining a time interval
// starting at Range[0] and ending at Range[1]
type Range [2]float64
// Canon returns the range in proper order, where r[0] <= r[1]
func (r Range) Canon() Range {
if r[0] > r[1] {
r[0], r[1] = r[1], r[0]
}
return r
}
// Size returns the duration of the Range
func (r Range) Size() time.Duration {
dx := r[1] - r[0]
if dx < 0 {
dx = -dx
}
return time.Duration(dx * float64(time.Second))
}
func (r Range) String() string {
const s = float64(time.Second)
return fmt.Sprintf("(%s-%s)", time.Duration(r[0]*s), time.Duration(r[1]*s))
}
// Timecode outputs the timecode in HH:MM:SS:FF format
func (r Range) Timecode(fps float64) string {
return toString(r[1], fps)
}
// Timecodes outputs the start and end timecodes in HH:MM:SS:FF format
// TODO(as): should replace Timecode with this. The Range float64 types
// might be better off as custom duration types that use integral units, rather
// than float64, we could add methods on these directly and then remove
// Timecode
func (r Range) Timecodes(fps float64) (string, string) {
return toString(r[0], fps), toString(r[1], fps)
}
func (r Range) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("[%f,%f]", r[0], r[1])), nil
}
// Parse parses an input string in HH:MM:SS:FF, HH:MM:SS;FF, or
// HH:MM:SS format, defined by the following convention
// HH = hour, MM = minute, SS = second, and FF is the frame number, the
// frameRate argument is either 0, or a fractional frame rate upon which
// to calculate the precise Range value based on the FF argument, if present.
func Parse(timecode string, fps float64) (Range, error) {
if fps == 0 {
fps = defaultFps
}
var (
h, m, s float64
f uint64
)
n, err := fmt.Sscanf(timecode, "%f:%f:%f:%d", &h, &m, &s, &f)
if n < 3 {
n, _ = fmt.Sscanf(timecode, "%f:%f:%f;%d", &h, &m, &s, &f)
}
if n < 3 {
return Range{}, err
}
// To avoid floating point issues, we convert frames per second
// into nanosecond per frame. If we have 1 fps, then frame exposure
// time takes 1e9 nanoseconds, for 2 fps, 1e9/2 ns, and so forth. We
// can then multiply the frame number by the number of nanoseconds
// per frame and convert that into a floating point representation
// as the final step
nspf := uint64(1e9 / fps) // ns/frame
fdur := time.Duration(nspf * f).Seconds() // n.o. seconds these frames take up
return Range{0, h*3600 + m*60 + s + fdur}, err
}
// toString converts the float64 number of seconds s into a string timecode
func toString(s float64, fps float64) string {
if fps == 0 {
fps = defaultFps
}
{
d := int64(s)
h := d / 3600
d %= 3600
m := d / 60
m %= 60
s := d % 60
f := 0 // TODO(as): frame number
return fmt.Sprintf("%02d:%02d:%02d:%02d", h, m, s, f)
}
} | vendor/github.com/cbsinteractive/pkg/timecode/range.go | 0.69285 | 0.492981 | range.go | starcoder |
package text
import (
"bytes"
)
//--------------------------------------------------------------------------------------------------
/*
OTransform - A representation of a transformation relating to a leaps document. This can either be a
text addition, a text deletion, or both.
*/
type OTransform struct {
Position int `json:"position"`
Delete int `json:"num_delete"`
Insert string `json:"insert"`
Version int `json:"version"`
TReceived int64 `json:"received,omitempty"`
}
//--------------------------------------------------------------------------------------------------
func intMin(left, right int) int {
if left < right {
return left
}
return right
}
func intMax(left, right int) int {
if left > right {
return left
}
return right
}
/*
FixOutOfDateTransform - When a transform created for a specific version is later determined to come
after one or more other transforms it can be fixed. This fix translates the transform such that
being applied in the correct order will preserve the original intention.
In order to apply these fixes this function should be called with the target transform and the
actual versioned transform that the target currently 'believes' it is. So, for example, if the
transform was written for version 7 and was actually 10 you would call FixOutOfDateTransform in this
order:
FixOutOfDateTransform(target, version7)
FixOutOfDateTransform(target, version8)
FixOutOfDateTransform(target, version9)
Once the transform is adjusted through this fix it can be harmlessly dispatched to all other clients
which will end up with the same document as the client that submitted this transform.
NOTE: These fixes do not regard or alter the versions of either transform.
*/
func FixOutOfDateTransform(sub, pre *OTransform) {
// Get insertion lengths (codepoints)
subInsert, preInsert := bytes.Runes([]byte(sub.Insert)), bytes.Runes([]byte(pre.Insert))
subLength, preLength := len(subInsert), len(preInsert)
if pre.Position <= sub.Position {
if preLength > 0 && pre.Delete == 0 {
sub.Position += preLength
} else if pre.Delete > 0 && (pre.Position+pre.Delete) <= sub.Position {
sub.Position += (preLength - pre.Delete)
} else if pre.Delete > 0 && (pre.Position+pre.Delete) > sub.Position {
overhang := intMin(sub.Delete, (pre.Position+pre.Delete)-sub.Position)
sub.Delete -= overhang
sub.Position = pre.Position + preLength
}
} else if sub.Delete > 0 && (sub.Position+sub.Delete) > pre.Position {
posGap := pre.Position - sub.Position
excess := intMax(0, (sub.Delete - posGap))
if excess > pre.Delete {
sub.Delete += (preLength - pre.Delete)
newInsert := make([]rune, subLength+preLength)
copy(newInsert[:], subInsert)
copy(newInsert[subLength:], preInsert)
sub.Insert = string(newInsert)
} else {
sub.Delete = posGap
}
}
}
/*
FixPrematureTransform - Used by clients to fix incoming and outgoing transforms when local changes
have been applied to a document before being routed through the server.
In order for a client UI to be unblocking it must apply local changes as the user types them before
knowing the correct order of the change. Therefore, it is possible to apply a local change before
receiving incoming transforms that are meant to be applied beforehand.
As a solution to those situations this function allows a client to alter and incoming interations
such that if they were to be applied to the local document after our local change they would result
in the same document. The outgoing transform is also modified for sending out to the server.
It is possible that the local change has already been dispatched to the server, in which case it is
the servers responsibility to fix the transform so that other clients end up at the same result.
NOTE: These fixes do not regard or alter the versions of either transform.
*/
func FixPrematureTransform(unapplied, unsent *OTransform) {
var before, after *OTransform
// Order the OTs by position in the document.
if unapplied.Position <= unsent.Position {
before = unapplied
after = unsent
} else {
before = unsent
after = unapplied
}
// Get insertion lengths (codepoints)
bInsert, aInsert := bytes.Runes([]byte(before.Insert)), bytes.Runes([]byte(after.Insert))
bLength, aLength := len(bInsert), len(aInsert)
if before.Delete == 0 {
after.Position += bLength
} else if (before.Delete + before.Position) <= after.Position {
after.Position += (bLength - before.Delete)
} else {
posGap := after.Position - before.Position
excess := intMax(0, before.Delete-posGap)
if excess > after.Delete {
before.Delete += (aLength - after.Delete)
before.Insert = before.Insert + after.Insert
} else {
before.Delete = posGap
}
after.Delete = intMax(0, after.Delete-excess)
after.Position = before.Position + bLength
}
}
/*
MergeTransforms - Takes two transforms (the next to be sent, and the one that follows) and attempts
to merge them into one transform. This will not be possible with some combinations, and the function
returns a boolean to indicate whether the merge was successful.
*/
func MergeTransforms(first, second *OTransform) bool {
var overlap, remainder int
// Get insertion lengths (codepoints)
fInsert := bytes.Runes([]byte(first.Insert))
fLength := len(fInsert)
if first.Position+fLength == second.Position {
first.Insert = first.Insert + second.Insert
first.Delete += second.Delete
return true
}
if second.Position == first.Position {
remainder = intMax(0, second.Delete-fLength)
first.Delete += remainder
first.Insert = second.Insert + first.Insert[second.Delete:]
return true
}
if second.Position > first.Position && second.Position < (first.Position+fLength) {
overlap = second.Position - first.Position
remainder = intMax(0, second.Delete-(fLength-overlap))
first.Delete += remainder
first.Insert = first.Insert[0:overlap] + second.Insert +
first.Insert[intMin(fLength, overlap+second.Delete):]
return true
}
return false
}
//-------------------------------------------------------------------------------------------------- | src/github.com/jeffail/leaps/lib/text/transforms.go | 0.746971 | 0.577317 | transforms.go | starcoder |
package graph
import (
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
)
// WorkbookChartAxes
type WorkbookChartAxes struct {
Entity
// Represents the category axis in a chart. Read-only.
categoryAxis *WorkbookChartAxis;
// Represents the series axis of a 3-dimensional chart. Read-only.
seriesAxis *WorkbookChartAxis;
// Represents the value axis in an axis. Read-only.
valueAxis *WorkbookChartAxis;
}
// NewWorkbookChartAxes instantiates a new workbookChartAxes and sets the default values.
func NewWorkbookChartAxes()(*WorkbookChartAxes) {
m := &WorkbookChartAxes{
Entity: *NewEntity(),
}
return m
}
// GetCategoryAxis gets the categoryAxis property value. Represents the category axis in a chart. Read-only.
func (m *WorkbookChartAxes) GetCategoryAxis()(*WorkbookChartAxis) {
if m == nil {
return nil
} else {
return m.categoryAxis
}
}
// GetSeriesAxis gets the seriesAxis property value. Represents the series axis of a 3-dimensional chart. Read-only.
func (m *WorkbookChartAxes) GetSeriesAxis()(*WorkbookChartAxis) {
if m == nil {
return nil
} else {
return m.seriesAxis
}
}
// GetValueAxis gets the valueAxis property value. Represents the value axis in an axis. Read-only.
func (m *WorkbookChartAxes) GetValueAxis()(*WorkbookChartAxis) {
if m == nil {
return nil
} else {
return m.valueAxis
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *WorkbookChartAxes) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["categoryAxis"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewWorkbookChartAxis() })
if err != nil {
return err
}
if val != nil {
m.SetCategoryAxis(val.(*WorkbookChartAxis))
}
return nil
}
res["seriesAxis"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewWorkbookChartAxis() })
if err != nil {
return err
}
if val != nil {
m.SetSeriesAxis(val.(*WorkbookChartAxis))
}
return nil
}
res["valueAxis"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewWorkbookChartAxis() })
if err != nil {
return err
}
if val != nil {
m.SetValueAxis(val.(*WorkbookChartAxis))
}
return nil
}
return res
}
func (m *WorkbookChartAxes) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *WorkbookChartAxes) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteObjectValue("categoryAxis", m.GetCategoryAxis())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("seriesAxis", m.GetSeriesAxis())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("valueAxis", m.GetValueAxis())
if err != nil {
return err
}
}
return nil
}
// SetCategoryAxis sets the categoryAxis property value. Represents the category axis in a chart. Read-only.
func (m *WorkbookChartAxes) SetCategoryAxis(value *WorkbookChartAxis)() {
m.categoryAxis = value
}
// SetSeriesAxis sets the seriesAxis property value. Represents the series axis of a 3-dimensional chart. Read-only.
func (m *WorkbookChartAxes) SetSeriesAxis(value *WorkbookChartAxis)() {
m.seriesAxis = value
}
// SetValueAxis sets the valueAxis property value. Represents the value axis in an axis. Read-only.
func (m *WorkbookChartAxes) SetValueAxis(value *WorkbookChartAxis)() {
m.valueAxis = value
} | models/microsoft/graph/workbook_chart_axes.go | 0.706393 | 0.412826 | workbook_chart_axes.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.