code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// WorkforceIntegrationEncryption
type WorkforceIntegrationEncryption struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Possible values are: sharedSecret, unknownFutureValue.
protocol *WorkforceIntegrationEncryptionProtocol
// Encryption shared secret.
secret *string
}
// NewWorkforceIntegrationEncryption instantiates a new workforceIntegrationEncryption and sets the default values.
func NewWorkforceIntegrationEncryption()(*WorkforceIntegrationEncryption) {
m := &WorkforceIntegrationEncryption{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateWorkforceIntegrationEncryptionFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateWorkforceIntegrationEncryptionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewWorkforceIntegrationEncryption(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *WorkforceIntegrationEncryption) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *WorkforceIntegrationEncryption) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["protocol"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseWorkforceIntegrationEncryptionProtocol)
if err != nil {
return err
}
if val != nil {
m.SetProtocol(val.(*WorkforceIntegrationEncryptionProtocol))
}
return nil
}
res["secret"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSecret(val)
}
return nil
}
return res
}
// GetProtocol gets the protocol property value. Possible values are: sharedSecret, unknownFutureValue.
func (m *WorkforceIntegrationEncryption) GetProtocol()(*WorkforceIntegrationEncryptionProtocol) {
if m == nil {
return nil
} else {
return m.protocol
}
}
// GetSecret gets the secret property value. Encryption shared secret.
func (m *WorkforceIntegrationEncryption) GetSecret()(*string) {
if m == nil {
return nil
} else {
return m.secret
}
}
// Serialize serializes information the current object
func (m *WorkforceIntegrationEncryption) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
if m.GetProtocol() != nil {
cast := (*m.GetProtocol()).String()
err := writer.WriteStringValue("protocol", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("secret", m.GetSecret())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *WorkforceIntegrationEncryption) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetProtocol sets the protocol property value. Possible values are: sharedSecret, unknownFutureValue.
func (m *WorkforceIntegrationEncryption) SetProtocol(value *WorkforceIntegrationEncryptionProtocol)() {
if m != nil {
m.protocol = value
}
}
// SetSecret sets the secret property value. Encryption shared secret.
func (m *WorkforceIntegrationEncryption) SetSecret(value *string)() {
if m != nil {
m.secret = value
}
} | models/workforce_integration_encryption.go | 0.706393 | 0.41253 | workforce_integration_encryption.go | starcoder |
package term
import (
"fmt"
"reflect"
)
// TransformSubexprs returns a Term with f() applied to each immediate
// subexpression.
func TransformSubexprs(t Term, f func(Term) Term) Term {
switch t := t.(type) {
case Universe, Builtin, Var, LocalVar, NaturalLit, DoubleLit, BoolLit,
IntegerLit:
return t
case Lambda:
return Lambda{
Label: t.Label,
Type: f(t.Type),
Body: f(t.Body),
}
case Pi:
return Pi{
Label: t.Label,
Type: f(t.Type),
Body: f(t.Body),
}
case App:
return App{
Fn: f(t.Fn),
Arg: f(t.Arg),
}
case Let:
newLet := Let{}
for _, b := range t.Bindings {
newBinding := Binding{
Variable: b.Variable,
Value: f(b.Value),
}
if b.Annotation != nil {
newBinding.Annotation = f(b.Annotation)
}
newLet.Bindings = append(newLet.Bindings, newBinding)
}
newLet.Body = f(t.Body)
return newLet
case Annot:
return Annot{
Expr: f(t.Expr),
Annotation: f(t.Annotation),
}
case TextLit:
result := TextLit{Suffix: t.Suffix}
if t.Chunks == nil {
return result
}
result.Chunks = Chunks{}
for _, chunk := range t.Chunks {
result.Chunks = append(result.Chunks,
Chunk{
Prefix: chunk.Prefix,
Expr: f(chunk.Expr),
})
}
return result
case If:
return If{
Cond: f(t.Cond),
T: f(t.T),
F: f(t.F),
}
case Op:
return Op{
OpCode: t.OpCode,
L: f(t.L),
R: f(t.R),
}
case EmptyList:
return EmptyList{Type: f(t.Type)}
case NonEmptyList:
result := make(NonEmptyList, len(t))
for j, e := range t {
result[j] = f(e)
}
return result
case Some:
return Some{f(t.Val)}
case RecordType:
result := make(RecordType, len(t))
for k, v := range t {
result[k] = f(v)
}
return result
case RecordLit:
result := make(RecordLit, len(t))
for k, v := range t {
result[k] = f(v)
}
return result
case ToMap:
result := ToMap{Record: f(t.Record)}
if t.Type != nil {
result.Type = f(t.Type)
}
return result
case Field:
return Field{
Record: f(t.Record),
FieldName: t.FieldName,
}
case Project:
return Project{
Record: f(t.Record),
FieldNames: t.FieldNames,
}
case ProjectType:
return ProjectType{
Record: f(t.Record),
Selector: f(t.Selector),
}
case UnionType:
result := make(UnionType, len(t))
for k, v := range t {
if v == nil {
result[k] = nil
continue
}
result[k] = f(v)
}
return result
case Merge:
result := Merge{
Handler: f(t.Handler),
Union: f(t.Union),
}
if t.Annotation != nil {
result.Annotation = f(t.Annotation)
}
return result
case Assert:
return Assert{Annotation: f(t.Annotation)}
case With:
return With{
Record: f(t.Record),
Path: t.Path,
Value: f(t.Value),
}
case Import:
return t
default:
panic(fmt.Sprintf("unknown term type %+v (%v)", t, reflect.ValueOf(t).Type()))
}
}
// MaybeTransformSubexprs returns a Term with f() applied to each
// immediate subexpression. If f() returns an error at any point,
// MaybeTransformSubexprs returns that error.
func MaybeTransformSubexprs(t Term, f func(Term) (Term, error)) (Term, error) {
switch t := t.(type) {
case Universe, Builtin, Var, LocalVar, NaturalLit, DoubleLit, BoolLit,
IntegerLit:
return t, nil
case Lambda:
typ, err := f(t.Type)
if err != nil {
return nil, err
}
body, err := f(t.Body)
if err != nil {
return nil, err
}
return Lambda{Label: t.Label, Type: typ, Body: body}, nil
case Pi:
typ, err := f(t.Type)
if err != nil {
return nil, err
}
body, err := f(t.Body)
if err != nil {
return nil, err
}
return Pi{Label: t.Label, Type: typ, Body: body}, nil
case App:
fn, err := f(t.Fn)
if err != nil {
return nil, err
}
arg, err := f(t.Arg)
if err != nil {
return nil, err
}
return App{Fn: fn, Arg: arg}, nil
case Let:
var err error
newLet := Let{}
for _, b := range t.Bindings {
value, err := f(b.Value)
if err != nil {
return nil, err
}
newBinding := Binding{
Variable: b.Variable,
Value: value,
}
if b.Annotation != nil {
newBinding.Annotation, err = f(b.Annotation)
if err != nil {
return nil, err
}
}
newLet.Bindings = append(newLet.Bindings, newBinding)
}
newLet.Body, err = f(t.Body)
return newLet, err
case Annot:
expr, err := f(t.Expr)
if err != nil {
return nil, err
}
annotation, err := f(t.Annotation)
if err != nil {
return nil, err
}
return Annot{Expr: expr, Annotation: annotation}, nil
case TextLit:
result := TextLit{Suffix: t.Suffix}
if t.Chunks == nil {
return result, nil
}
result.Chunks = Chunks{}
for _, chunk := range t.Chunks {
expr, err := f(chunk.Expr)
if err != nil {
return nil, err
}
result.Chunks = append(result.Chunks,
Chunk{Prefix: chunk.Prefix, Expr: expr})
}
return result, nil
case If:
cond, err := f(t.Cond)
if err != nil {
return nil, err
}
T, err := f(t.T)
if err != nil {
return nil, err
}
F, err := f(t.F)
if err != nil {
return nil, err
}
return If{Cond: cond, T: T, F: F}, nil
case Op:
l, err := f(t.L)
if err != nil {
return nil, err
}
r, err := f(t.R)
if err != nil {
return nil, err
}
return Op{OpCode: t.OpCode, L: l, R: r}, nil
case EmptyList:
typ, err := f(t.Type)
return EmptyList{Type: typ}, err
case NonEmptyList:
result := make(NonEmptyList, len(t))
for j, e := range t {
var err error
result[j], err = f(e)
if err != nil {
return nil, err
}
}
return result, nil
case Some:
val, err := f(t.Val)
return Some{val}, err
case RecordType:
result := make(RecordType, len(t))
for k, v := range t {
var err error
result[k], err = f(v)
if err != nil {
return nil, err
}
}
return result, nil
case RecordLit:
result := make(RecordLit, len(t))
for k, v := range t {
var err error
result[k], err = f(v)
if err != nil {
return nil, err
}
}
return result, nil
case ToMap:
record, err := f(t.Record)
if err != nil {
return nil, err
}
result := ToMap{Record: record}
if t.Type != nil {
result.Type, err = f(t.Type)
}
return result, err
case Field:
record, err := f(t.Record)
return Field{
Record: record,
FieldName: t.FieldName,
}, err
case Project:
record, err := f(t.Record)
return Project{
Record: record,
FieldNames: t.FieldNames,
}, err
case ProjectType:
record, err := f(t.Record)
if err != nil {
return nil, err
}
selector, err := f(t.Selector)
if err != nil {
return nil, err
}
return ProjectType{Record: record, Selector: selector}, nil
case UnionType:
result := make(UnionType, len(t))
for k, v := range t {
if v == nil {
result[k] = nil
continue
}
var err error
result[k], err = f(v)
if err != nil {
return nil, err
}
}
return result, nil
case Merge:
handler, err := f(t.Handler)
if err != nil {
return nil, err
}
union, err := f(t.Union)
if err != nil {
return nil, err
}
result := Merge{
Handler: handler,
Union: union,
}
if t.Annotation != nil {
result.Annotation, err = f(t.Annotation)
}
return result, err
case Assert:
annotation, err := f(t.Annotation)
return Assert{Annotation: annotation}, err
case With:
record, err := f(t.Record)
if err != nil {
return nil, err
}
value, err := f(t.Value)
if err != nil {
return nil, err
}
return With{
Record: record,
Path: t.Path,
Value: value,
}, nil
case Import:
return t, nil
default:
if t == nil {
panic(fmt.Sprintf("nil term"))
}
panic(fmt.Sprintf("unknown term type %+v (%v)", t, reflect.ValueOf(t).Type()))
}
} | term/transform.go | 0.540196 | 0.576244 | transform.go | starcoder |
package iso20022
// Set of characteristics related to a cheque instruction, such as cheque type or cheque number.
type Cheque5 struct {
// Specifies the type of cheque to be issued by the first agent.
ChequeType *ChequeType2Code `xml:"ChqTp,omitempty"`
// Identifies the cheque number.
ChequeNumber *Max35Text `xml:"ChqNb,omitempty"`
// Identifies the party that ordered the issuance of the cheque.
ChequeFrom *NameAndAddress3 `xml:"ChqFr,omitempty"`
// Specifies the delivery method of the cheque by the debtor's agent.
DeliveryMethod *ChequeDeliveryMethod1Choice `xml:"DlvryMtd,omitempty"`
// Identifies the party to whom the debtor's agent should send the cheque.
DeliverTo *NameAndAddress3 `xml:"DlvrTo,omitempty"`
// Urgency or order of importance that the originator would like the recipient of the payment instruction to apply to the processing of the payment instruction.
InstructionPriority *Priority2Code `xml:"InstrPrty,omitempty"`
// Date when the draft becomes payable and the debtor's account is debited.
ChequeMaturityDate *ISODate `xml:"ChqMtrtyDt,omitempty"`
// Code agreed between the initiating party and the debtor's agent, that specifies the cheque layout, company logo and digitised signature to be used to print the cheque.
FormsCode *Max35Text `xml:"FrmsCd,omitempty"`
// Information that needs to be printed on a cheque, used by the payer to add miscellaneous information.
MemoField *Max35Text `xml:"MemoFld,omitempty"`
// Regional area in which the cheque can be cleared, when a country has no nation-wide cheque clearing organisation.
RegionalClearingZone *Max35Text `xml:"RgnlClrZone,omitempty"`
// Specifies the print location of the cheque.
PrintLocation *Max35Text `xml:"PrtLctn,omitempty"`
}
func (c *Cheque5) SetChequeType(value string) {
c.ChequeType = (*ChequeType2Code)(&value)
}
func (c *Cheque5) SetChequeNumber(value string) {
c.ChequeNumber = (*Max35Text)(&value)
}
func (c *Cheque5) AddChequeFrom() *NameAndAddress3 {
c.ChequeFrom = new(NameAndAddress3)
return c.ChequeFrom
}
func (c *Cheque5) AddDeliveryMethod() *ChequeDeliveryMethod1Choice {
c.DeliveryMethod = new(ChequeDeliveryMethod1Choice)
return c.DeliveryMethod
}
func (c *Cheque5) AddDeliverTo() *NameAndAddress3 {
c.DeliverTo = new(NameAndAddress3)
return c.DeliverTo
}
func (c *Cheque5) SetInstructionPriority(value string) {
c.InstructionPriority = (*Priority2Code)(&value)
}
func (c *Cheque5) SetChequeMaturityDate(value string) {
c.ChequeMaturityDate = (*ISODate)(&value)
}
func (c *Cheque5) SetFormsCode(value string) {
c.FormsCode = (*Max35Text)(&value)
}
func (c *Cheque5) SetMemoField(value string) {
c.MemoField = (*Max35Text)(&value)
}
func (c *Cheque5) SetRegionalClearingZone(value string) {
c.RegionalClearingZone = (*Max35Text)(&value)
}
func (c *Cheque5) SetPrintLocation(value string) {
c.PrintLocation = (*Max35Text)(&value)
} | Cheque5.go | 0.76708 | 0.479321 | Cheque5.go | starcoder |
package redis
import (
"context"
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/go-redis/redis/v7"
"github.com/benthosdev/benthos/v4/public/bloblang"
"github.com/benthosdev/benthos/v4/public/service"
)
func redisProcConfig() *service.ConfigSpec {
spec := service.NewConfigSpec().
Stable().
Summary(`Performs actions against Redis that aren't possible using a ` + "[`cache`](/docs/components/processors/cache)" + ` processor. Actions are
performed for each message and the message contents are replaced with the result. In order to merge the result into the original message compose this processor within a ` + "[`branch` processor](/docs/components/processors/branch)" + `.`).
Categories("Integration")
for _, f := range clientFields() {
spec = spec.Field(f)
}
return spec.
Field(service.NewInterpolatedStringField("command").
Description("The command to execute.").
Version("4.3.0").
Example("scard").
Example("incrby").
Example(`${! meta("command") }`).
Default("")).
Field(service.NewBloblangField("args_mapping").
Description("A [Bloblang mapping](/docs/guides/bloblang/about) which should evaluate to an array of values matching in size to the number of arguments required for the specified Redis command.").
Version("4.3.0").
Example("root = [ this.key ]").
Example(`root = [ meta("kafka_key"), this.count ]`).
Default(``)).
Field(service.NewStringAnnotatedEnumField("operator", map[string]string{
"keys": `Returns an array of strings containing all the keys that match the pattern specified by the ` + "`key` field" + `.`,
"scard": `Returns the cardinality of a set, or ` + "`0`" + ` if the key does not exist.`,
"sadd": `Adds a new member to a set. Returns ` + "`1`" + ` if the member was added.`,
"incrby": `Increments the number stored at ` + "`key`" + ` by the message content. If the key does not exist, it is set to ` + "`0`" + ` before performing the operation. Returns the value of ` + "`key`" + ` after the increment.`,
}).
Description("The operator to apply.").
Deprecated().
Optional()).
Field(service.NewInterpolatedStringField("key").
Description("A key to use for the target operator.").
Deprecated().
Optional()).
Field(service.NewIntField("retries").
Description("The maximum number of retries before abandoning a request.").
Default(3).
Advanced()).
Field(service.NewIntField("retry_period").
Description("The time to wait before consecutive retry attempts.").
Default("500ms").
Advanced()).
LintRule(`
root = if this.contains("operator") && this.contains("command") {
[ "only one of 'operator' (old style) or 'command' (new style) fields should be specified" ]
}
`).
Example("Querying Cardinality",
`If given payloads containing a metadata field `+"`set_key`"+` it's possible to query and store the cardinality of the set for each message using a `+"[`branch` processor](/docs/components/processors/branch)"+` in order to augment rather than replace the message contents:`,
`
pipeline:
processors:
- branch:
processors:
- redis:
url: TODO
command: scard
args_mapping: 'root = [ meta("set_key") ]'
result_map: 'root.cardinality = this'
`).
Example("Running Total",
`If we have JSON data containing number of friends visited during covid 19:
`+"```json"+`
{"name":"ash","month":"feb","year":2019,"friends_visited":10}
{"name":"ash","month":"apr","year":2019,"friends_visited":-2}
{"name":"bob","month":"feb","year":2019,"friends_visited":3}
{"name":"bob","month":"apr","year":2019,"friends_visited":1}
`+"```"+`
We can add a field that contains the running total number of friends visited:
`+"```json"+`
{"name":"ash","month":"feb","year":2019,"friends_visited":10,"total":10}
{"name":"ash","month":"apr","year":2019,"friends_visited":-2,"total":8}
{"name":"bob","month":"feb","year":2019,"friends_visited":3,"total":3}
{"name":"bob","month":"apr","year":2019,"friends_visited":1,"total":4}
`+"```"+`
Using the `+"`incrby`"+` command:`,
`
pipeline:
processors:
- branch:
processors:
- redis:
url: TODO
command: incrby
args_mapping: 'root = [ this.name, this.friends_visited ]'
result_map: 'root.total = this'
`)
}
func init() {
err := service.RegisterBatchProcessor(
"redis", redisProcConfig(),
func(conf *service.ParsedConfig, mgr *service.Resources) (service.BatchProcessor, error) {
return newRedisProcFromConfig(conf, mgr)
})
if err != nil {
panic(err)
}
}
//------------------------------------------------------------------------------
type redisProc struct {
log *service.Logger
key *service.InterpolatedString
operator redisOperator
command *service.InterpolatedString
argsMapping *bloblang.Executor
client redis.UniversalClient
retries int
retryPeriod time.Duration
}
func newRedisProcFromConfig(conf *service.ParsedConfig, res *service.Resources) (*redisProc, error) {
client, err := getClient(conf)
if err != nil {
return nil, err
}
retries, err := conf.FieldInt("retries")
if err != nil {
return nil, err
}
retryPeriod, err := conf.FieldDuration("retry_period")
if err != nil {
return nil, err
}
command, err := conf.FieldInterpolatedString("command")
if err != nil {
return nil, err
}
var argsMapping *bloblang.Executor
if testStr, _ := conf.FieldString("args_mapping"); testStr != "" {
if argsMapping, err = conf.FieldBloblang("args_mapping"); err != nil {
return nil, err
}
}
r := &redisProc{
log: res.Logger(),
command: command,
argsMapping: argsMapping,
retries: retries,
retryPeriod: retryPeriod,
client: client,
}
if conf.Contains("key") {
if r.key, err = conf.FieldInterpolatedString("key"); err != nil {
return nil, err
}
}
if conf.Contains("operator") {
operatorStr, err := conf.FieldString("operator")
if err != nil {
return nil, err
}
if r.operator, err = getRedisOperator(operatorStr); err != nil {
return nil, err
}
}
return r, nil
}
type redisOperator func(r *redisProc, key string, part *service.Message) error
func newRedisKeysOperator() redisOperator {
return func(r *redisProc, key string, part *service.Message) error {
res, err := r.client.Keys(key).Result()
for i := 0; i <= r.retries && err != nil; i++ {
r.log.Errorf("Keys command failed: %v\n", err)
<-time.After(r.retryPeriod)
res, err = r.client.Keys(key).Result()
}
if err != nil {
return err
}
iRes := make([]interface{}, 0, len(res))
for _, v := range res {
iRes = append(iRes, v)
}
part.SetStructured(iRes)
return nil
}
}
func newRedisSCardOperator() redisOperator {
return func(r *redisProc, key string, part *service.Message) error {
res, err := r.client.SCard(key).Result()
for i := 0; i <= r.retries && err != nil; i++ {
r.log.Errorf("SCard command failed: %v\n", err)
<-time.After(r.retryPeriod)
res, err = r.client.SCard(key).Result()
}
if err != nil {
return err
}
part.SetBytes(strconv.AppendInt(nil, res, 10))
return nil
}
}
func newRedisSAddOperator() redisOperator {
return func(r *redisProc, key string, part *service.Message) error {
mBytes, err := part.AsBytes()
if err != nil {
return err
}
res, err := r.client.SAdd(key, mBytes).Result()
for i := 0; i <= r.retries && err != nil; i++ {
r.log.Errorf("SAdd command failed: %v\n", err)
<-time.After(r.retryPeriod)
res, err = r.client.SAdd(key, mBytes).Result()
}
if err != nil {
return err
}
part.SetBytes(strconv.AppendInt(nil, res, 10))
return nil
}
}
func newRedisIncrByOperator() redisOperator {
return func(r *redisProc, key string, part *service.Message) error {
mBytes, err := part.AsBytes()
if err != nil {
return err
}
valueInt, err := strconv.Atoi(string(mBytes))
if err != nil {
return err
}
res, err := r.client.IncrBy(key, int64(valueInt)).Result()
for i := 0; i <= r.retries && err != nil; i++ {
r.log.Errorf("incrby command failed: %v\n", err)
<-time.After(r.retryPeriod)
res, err = r.client.IncrBy(key, int64(valueInt)).Result()
}
if err != nil {
return err
}
part.SetBytes(strconv.AppendInt(nil, res, 10))
return nil
}
}
func getRedisOperator(opStr string) (redisOperator, error) {
switch opStr {
case "keys":
return newRedisKeysOperator(), nil
case "sadd":
return newRedisSAddOperator(), nil
case "scard":
return newRedisSCardOperator(), nil
case "incrby":
return newRedisIncrByOperator(), nil
}
return nil, fmt.Errorf("operator not recognised: %v", opStr)
}
func (r *redisProc) execRaw(ctx context.Context, index int, inBatch service.MessageBatch, msg *service.Message) error {
resMsg, err := inBatch.BloblangQuery(index, r.argsMapping)
if err != nil {
return fmt.Errorf("args mapping failed: %v", err)
}
iargs, err := resMsg.AsStructured()
if err != nil {
return err
}
args, ok := iargs.([]interface{})
if !ok {
return fmt.Errorf("mapping returned non-array result: %T", iargs)
}
for i, v := range args {
n, isN := v.(json.Number)
if !isN {
continue
}
var nerr error
if args[i], nerr = n.Int64(); nerr != nil {
if args[i], nerr = n.Float64(); nerr != nil {
args[i] = n.String()
}
}
}
command := inBatch.InterpolatedString(index, r.command)
args = append([]interface{}{command}, args...)
res, err := r.client.DoContext(ctx, args...).Result()
for i := 0; i <= r.retries && err != nil; i++ {
r.log.Errorf("%v command failed: %v", command, err)
<-time.After(r.retryPeriod)
res, err = r.client.DoContext(ctx, args...).Result()
}
if err != nil {
return err
}
msg.SetStructured(res)
return nil
}
func (r *redisProc) ProcessBatch(ctx context.Context, inBatch service.MessageBatch) ([]service.MessageBatch, error) {
newMsg := inBatch.Copy()
for index, part := range newMsg {
if r.operator != nil {
key := inBatch.InterpolatedString(index, r.key)
if err := r.operator(r, key, part); err != nil {
r.log.Debugf("Operator failed for key '%s': %v", key, err)
part.SetError(fmt.Errorf("redis operator failed: %w", err))
}
} else {
if err := r.execRaw(ctx, index, inBatch, part); err != nil {
r.log.Debugf("Args mapping failed: %v", err)
part.SetError(err)
}
}
}
return []service.MessageBatch{newMsg}, nil
}
func (r *redisProc) Close(ctx context.Context) error {
return r.client.Close()
} | internal/impl/redis/processor.go | 0.728072 | 0.6922 | processor.go | starcoder |
package structpbconv
import (
"fmt"
"reflect"
"strings"
"github.com/golang/protobuf/ptypes/struct"
)
// tagKey defines a structure tag name for ConvertStructPB.
const tagKey = "structpb"
// Convert converts a structpb.Struct object to a concrete object.
func Convert(src *structpb.Struct, dst interface{}) error {
return convertStruct(src, reflect.ValueOf(dst))
}
func toPrimitive(src *structpb.Value) (reflect.Value, bool) {
switch t := src.GetKind().(type) {
case *structpb.Value_BoolValue:
return reflect.ValueOf(t.BoolValue), true
case *structpb.Value_NullValue:
return reflect.ValueOf(nil), true
case *structpb.Value_NumberValue:
return reflect.ValueOf(t.NumberValue), true
case *structpb.Value_StringValue:
return reflect.ValueOf(t.StringValue), true
default:
return reflect.Value{}, false
}
}
func convertValue(src *structpb.Value, dest reflect.Value) error {
dst := reflect.Indirect(dest)
if v, ok := toPrimitive(src); ok {
if !v.Type().AssignableTo(dst.Type()) {
if !v.Type().ConvertibleTo(dst.Type()) {
return fmt.Errorf("cannot assign %T to %s", src.GetKind(), dst.Type())
}
v = v.Convert(dst.Type())
}
dst.Set(v)
return nil
}
switch t := src.GetKind().(type) {
case *structpb.Value_ListValue:
return convertList(t.ListValue, dst)
case *structpb.Value_StructValue:
return convertStruct(t.StructValue, dst)
default:
return fmt.Errorf("unsuported value: %T", src.GetKind())
}
}
func convertList(src *structpb.ListValue, dest reflect.Value) error {
dst := reflect.Indirect(dest)
if dst.Kind() != reflect.Slice {
return fmt.Errorf("cannot convert %T to %s", src, dst.Type())
}
values := src.GetValues()
elemType := dst.Type().Elem()
converted := make([]reflect.Value, len(values))
for i, value := range values {
element := reflect.New(elemType).Elem()
if err := convertValue(value, element); err != nil {
return err
}
converted[i] = element
}
dst.Set(reflect.Append(dst, converted...))
return nil
}
func convertStruct(src *structpb.Struct, dest reflect.Value) error {
dst := reflect.Indirect(dest)
if dst.Kind() == reflect.Struct {
fields := src.GetFields()
for i := 0; i < dst.NumField(); i++ {
target := dst.Field(i)
field := dst.Type().Field(i)
name := field.Tag.Get(tagKey)
if name == "" {
name = strings.ToLower(field.Name)
}
if v, ok := fields[name]; ok {
if err := convertValue(v, target); err != nil {
return err
}
}
}
return nil
} else if dst.Kind() == reflect.Map {
elemType := dst.Type().Elem()
mapType := reflect.MapOf(reflect.TypeOf(string("")), elemType)
aMap := reflect.MakeMap(mapType)
fields := src.GetFields()
for key, value := range fields {
element := reflect.New(elemType).Elem()
if err := convertValue(value, element); err != nil {
return err
}
aMap.SetMapIndex(reflect.ValueOf(key), element)
}
dst.Set(aMap)
return nil
}
return fmt.Errorf("cannot convert %T to %s", src, dst.Type())
} | pkg/structpbconv/structpbconv.go | 0.704872 | 0.444866 | structpbconv.go | starcoder |
package gah
import (
"image"
"image/color"
"image/draw"
"image/png"
"os"
)
// Vec2i is a simple 2D int vector
type Vec2i struct {
X, Y int
}
// Vec2f is a simple 2D float vector
type Vec2f struct {
X, Y float64
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
// ImgFastSaveToPNG skips png compression for much faster saving speed at the cost of more memory
func ImgFastSaveToPNG(img image.Image, path string) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
enc := &png.Encoder{
CompressionLevel: png.NoCompression,
}
return enc.Encode(file, img)
}
// ImageToRGBA converts an image to an RGBA image, hopefully using a lower level go construct for better performance
// ripped straight from fogleman/gg
func ImageToRGBA(src image.Image) *image.RGBA {
bounds := src.Bounds()
dst := image.NewRGBA(bounds)
draw.Draw(dst, bounds, src, bounds.Min, draw.Src)
return dst
}
// ImgGet returns a [0, 1] grayscale value representing the color on the given image at the given coordinates
func ImgGet(img image.Image, x, y int) float64 {
w, h := img.Bounds().Dx(), img.Bounds().Dy()
if x < 0 || x >= w || y < 0 || y >= h {
return 0
}
r, _, _, _ := color.GrayModel.Convert(img.At(x, y)).RGBA()
return float64(r & 0xFF)
}
// ImgGetRGBA returns the rgba components of the color on the given image at the given coordinates
func ImgGetRGBA(img image.Image, x, y int) (r, g, b, a int) {
w, h := img.Bounds().Dx(), img.Bounds().Dy()
if x < 0 || x >= w || y < 0 || y >= h {
return 0, 0, 0, 0
}
tr, tg, tb, ta := img.At(x, y).RGBA()
return int(tr & 0xFF), int(tg & 0xFF), int(tb & 0xFF), int(ta & 0xFF)
}
// RGBMix linearly interpolates between the two given colors, alpha may be maxed to 0xFF to prevent decay
func RGBMix(color1 color.RGBA, color2 color.RGBA, ratio2 float64, maxAlpha bool) color.RGBA {
r1 := float64(color1.R) / 255
g1 := float64(color1.G) / 255
b1 := float64(color1.B) / 255
a1 := float64(color1.A) / 255
r2 := float64(color2.R) / 255
g2 := float64(color2.G) / 255
b2 := float64(color2.B) / 255
a2 := float64(color2.A) / 255
var a uint8
if maxAlpha {
a = 0xFF
} else {
a = uint8(((1-ratio2)*a1 + ratio2*a2) * 255)
}
return color.RGBA{
uint8(((1-ratio2)*r1 + ratio2*r2) * 255),
uint8(((1-ratio2)*g1 + ratio2*g2) * 255),
uint8(((1-ratio2)*b1 + ratio2*b2) * 255),
a,
}
}
// MixF mixes the provided values together using the given ratio for val2
func MixF(val1 float64, val2 float64, ratio2 float64) float64 {
return (1-ratio2)*val1 + ratio2*val2
}
// MixI works as MixF does but on integers
func MixI(val1 int, val2 int, ratio2 float64) int {
return int(MixF(float64(val1), float64(val2), ratio2))
}
// ScaleF2F returns a number in the interval [outMin, outMax] that is percentage-wise as much removed from each end of the interval as inNum in [inMin, inMax]
func ScaleF2F(inNum float64, inMin float64, inMax float64, outMin float64, outMax float64) float64 {
return (inNum-inMin)*(outMax-outMin)/(inMax-inMin) + outMin
}
// ScaleF2I returns a number in the interval [outMin, outMax] that is percentage-wise as much removed from each end of the interval as inNum in [inMin, inMax]
func ScaleF2I(inNum float64, inMin float64, inMax float64, outMin int, outMax int) int {
return int((inNum-inMin)*(float64(outMax-outMin))/(inMax-inMin) + float64(outMin))
}
// ScaleI2F returns a number in the interval [outMin, outMax] that is percentage-wise as much removed from each end of the interval as inNum in [inMin, inMax]
func ScaleI2F(inNum int, inMin int, inMax int, outMin float64, outMax float64) float64 {
return float64(inNum-inMin)*(outMax-outMin)/float64(inMax-inMin) + outMin
}
// ScaleI2I returns a number in the interval [outMin, outMax] that is percentage-wise as much removed from each end of the interval as inNum in [inMin, inMax]
func ScaleI2I(inNum int, inMin int, inMax int, outMin int, outMax int) int {
return int((inNum-inMin)*(outMax-outMin)/(inMax-inMin) + outMin)
}
// Clamp returns a number inside [inMin, inMax]
func Clamp(inNum float64, inMin float64, inMax float64) float64 {
if inNum > inMax {
return inMax
}
if inNum < inMin {
return inMin
}
return inNum
} | util.go | 0.766992 | 0.414484 | util.go | starcoder |
package dpfilters
import (
"github.com/signalfx/golib/datapoint"
"github.com/signalfx/signalfx-agent/internal/core/common/dpmeta"
"github.com/signalfx/signalfx-agent/internal/utils/filter"
)
// DatapointFilter can be used to filter out datapoints
type DatapointFilter interface {
// Matches takes a datapoint and returns whether it is matched by the
// filter
Matches(*datapoint.Datapoint) bool
}
// BasicDatapointFilter is designed to filter SignalFx datapoint objects. It
// can filter based on the monitor type, dimensions, or the metric name. It
// supports both static, globbed, and regex patterns for filter values. If
// dimensions are specified, they must all match for the datapoint to match. If
// multiple metric names are given, only one must match for the datapoint to
// match the filter since datapoints can only have one metric name.
type basicDatapointFilter struct {
monitorType string
dimFilter filter.StringMapFilter
metricFilter filter.StringFilter
negated bool
}
// New returns a new filter with the given configuration
func New(monitorType string, metricNames []string, dimensions map[string][]string, negated bool) (DatapointFilter, error) {
var dimFilter filter.StringMapFilter
if len(dimensions) > 0 {
var err error
dimFilter, err = filter.NewStringMapFilter(dimensions)
if err != nil {
return nil, err
}
}
var metricFilter filter.StringFilter
if len(metricNames) > 0 {
var err error
metricFilter, err = filter.NewBasicStringFilter(metricNames)
if err != nil {
return nil, err
}
}
return &basicDatapointFilter{
monitorType: monitorType,
metricFilter: metricFilter,
dimFilter: dimFilter,
negated: negated,
}, nil
}
// Matches tests a datapoint to see whether it is excluded by this filter. In
// order to match on monitor type, the datapoint should have the "monitorType"
// key set in it's Meta field.
func (f *basicDatapointFilter) Matches(dp *datapoint.Datapoint) bool {
if dpMonitorType, ok := dp.Meta[dpmeta.MonitorTypeMeta].(string); ok {
if f.monitorType != "" && dpMonitorType != f.monitorType {
return false
}
} else {
// If we have a monitorType on the filter but none on the datapoint, it
// can never match.
if f.monitorType != "" {
return false
}
}
matched := (f.metricFilter == nil || f.metricFilter.Matches(dp.Metric)) &&
(f.dimFilter == nil || f.dimFilter.Matches(dp.Dimensions))
if f.negated {
return !matched
}
return matched
} | internal/core/dpfilters/filter.go | 0.764628 | 0.42913 | filter.go | starcoder |
package assert
import (
"math"
"reflect"
"testing"
)
func True(t *testing.T, description string, actual interface{}) {
t.Helper()
Eq(t, description, actual, true)
}
func False(t *testing.T, description string, actual interface{}) {
t.Helper()
Eq(t, description, actual, false)
}
func Nil(t *testing.T, description string, actual interface{}) {
t.Helper()
if actual != nil {
t.Fatalf("%v. Expected nil, was %v", description, actual)
}
}
func NotNil(t *testing.T, description string, actual interface{}) {
t.Helper()
if actual == nil {
t.Fatalf("%v. Expected not nil, was nil", description)
}
}
func Eq(t *testing.T, description string, actual interface{}, expected interface{}) {
t.Helper()
actualType := reflect.TypeOf(actual)
expectedType := reflect.TypeOf(expected)
if actualType != expectedType {
t.Fatalf("%v. Expected =>'%T'<=, was =>'%T'<=", description, expectedType, actualType)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("%v. Expected =>'%v'<=, was =>'%v'<=", description, expected, actual)
}
}
func NotEq(t *testing.T, description string, actual interface{}, expected interface{}) {
t.Helper()
actualType := reflect.TypeOf(actual)
expectedType := reflect.TypeOf(expected)
if actualType != expectedType {
t.Fatalf("%v. Expected %T, was %T", description, expectedType, actualType)
}
if reflect.DeepEqual(actual, expected) {
t.Fatalf("%v. Expected not %v, was %v", description, expected, actual)
}
}
func EqEpsilon(t *testing.T, actual interface{}, expected interface{}, epsilon float64) {
t.Helper()
act := asFloat64(t, actual)
exp := asFloat64(t, expected)
if math.Abs(act-exp) > epsilon {
t.Fatalf("Expected %f, was %f (+-%f)", exp, act, epsilon)
}
}
func EqSlice(t *testing.T, actual interface{}, expected interface{}, epsilon float64) {
t.Helper()
var act reflect.Value
var exp reflect.Value
switch reflect.TypeOf(actual).Kind() {
case reflect.Slice:
act = reflect.ValueOf(actual)
default:
t.Fatalf("Slice is expected, was %T", actual)
}
switch reflect.TypeOf(expected).Kind() {
case reflect.Slice:
exp = reflect.ValueOf(expected)
default:
t.Fatalf("Slice is expected, was %T", expected)
}
if exp.Len() != act.Len() {
t.Fatalf("Slice len expected %v, was %v", exp.Len(), act.Len())
}
for i := 0; i < exp.Len(); i++ {
expValue := exp.Index(i).Interface()
actValue := act.Index(i).Interface()
EqEpsilon(t, actValue, expValue, epsilon)
}
}
func asFloat64(t *testing.T, i interface{}) float64 {
switch v := i.(type) {
case uint:
return float64(v)
case int:
return float64(v)
case int8:
return float64(v)
case uint8:
return float64(v)
case int16:
return float64(v)
case uint16:
return float64(v)
case int32:
return float64(v)
case uint32:
return float64(v)
case int64:
return float64(v)
case uint64:
return float64(v)
case float32:
return float64(v)
case float64:
return v
}
t.Fatalf("Unsupported type %T for %v", i, i)
return 0
} | assert/assert.go | 0.661595 | 0.557665 | assert.go | starcoder |
package conv
import (
"fmt"
"math/big"
"strings"
)
// BytesLe2Hex returns an hexadecimal string of a number stored in a
// little-endian order slice x.
func BytesLe2Hex(x []byte) string {
b := &strings.Builder{}
b.Grow(2*len(x) + 2)
fmt.Fprint(b, "0x")
if len(x) == 0 {
fmt.Fprint(b, "00")
}
for i := len(x) - 1; i >= 0; i-- {
fmt.Fprintf(b, "%02x", x[i])
}
return b.String()
}
// BytesLe2BigInt converts a little-endian slice x into a big-endian
// math/big.Int.
func BytesLe2BigInt(x []byte) *big.Int {
n := len(x)
b := new(big.Int)
if len(x) > 0 {
y := make([]byte, n)
for i := 0; i < n; i++ {
y[n-1-i] = x[i]
}
b.SetBytes(y)
}
return b
}
// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z.
// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros).
// If x does not fit in the slice or is negative, z is not modified.
func BigInt2BytesLe(z []byte, x *big.Int) {
xLen := (x.BitLen() + 7) >> 3
zLen := len(z)
if zLen >= xLen && x.Sign() >= 0 {
y := x.Bytes()
for i := 0; i < xLen; i++ {
z[i] = y[xLen-1-i]
}
for i := xLen; i < zLen; i++ {
z[i] = 0
}
}
}
// Uint64Le2BigInt converts a little-endian slice x into a big number.
func Uint64Le2BigInt(x []uint64) *big.Int {
n := len(x)
b := new(big.Int)
var bi big.Int
for i := n - 1; i >= 0; i-- {
bi.SetUint64(x[i])
b.Lsh(b, 64)
b.Add(b, &bi)
}
return b
}
// Uint64Le2Hex returns an hexadecimal string of a number stored in a
// little-endian order slice x.
func Uint64Le2Hex(x []uint64) string {
b := new(strings.Builder)
b.Grow(16*len(x) + 2)
fmt.Fprint(b, "0x")
if len(x) == 0 {
fmt.Fprint(b, "00")
}
for i := len(x) - 1; i >= 0; i-- {
fmt.Fprintf(b, "%016x", x[i])
}
return b.String()
}
// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z.
// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros).
// If x does not fit in the slice or is negative, z is not modified.
func BigInt2Uint64Le(z []uint64, x *big.Int) {
xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words
zLen := len(z)
if zLen >= xLen && x.Sign() > 0 {
var y, yi big.Int
y.Set(x)
two64 := big.NewInt(1)
two64.Lsh(two64, 64).Sub(two64, big.NewInt(1))
for i := 0; i < xLen; i++ {
yi.And(&y, two64)
z[i] = yi.Uint64()
y.Rsh(&y, 64)
}
}
for i := xLen; i < zLen; i++ {
z[i] = 0
}
} | internal/conv/conv.go | 0.650911 | 0.475788 | conv.go | starcoder |
package check
import "fmt"
// MapStringBool is the type of a check function for a slice of strings. It
// takes a slice of strings as a parameter and returns an error or nil if
// there is no error
type MapStringBool func(v map[string]bool) error
// MapStringBoolStringCheck returns a check function that checks that every
// key in the map passes the supplied String check func
func MapStringBoolStringCheck(sc String) MapStringBool {
return func(v map[string]bool) error {
for k := range v {
if err := sc(k); err != nil {
return fmt.Errorf(
"map entry: %q - the key does not pass the test: %s",
k, err)
}
}
return nil
}
}
// MapStringBoolContains returns a check function that checks that at least
// one entry in the list matches the supplied String check func. The
// condition parameter should describe the check that is being performed. For
// instance, if the check is that the string length must be greater than 5
// characters then the condition parameter should be "the string should be
// greater than 5 characters"
func MapStringBoolContains(sc String, condition string) MapStringBool {
return func(v map[string]bool) error {
for k := range v {
if err := sc(k); err == nil {
return nil
}
}
return fmt.Errorf("none of the list entries passes the test: %s",
condition)
}
}
// MapStringBoolLenEQ returns a check function that checks that the length of
// the list equals the supplied value
func MapStringBoolLenEQ(i int) MapStringBool {
return func(v map[string]bool) error {
if len(v) != i {
return fmt.Errorf("the number of entries (%d)"+" must equal %d",
len(v), i)
}
return nil
}
}
// MapStringBoolLenGT returns a check function that checks that the length of
// the list is greater than the supplied value
func MapStringBoolLenGT(i int) MapStringBool {
return func(v map[string]bool) error {
if len(v) <= i {
return fmt.Errorf(
"the number of entries (%d) must be greater than %d",
len(v), i)
}
return nil
}
}
// MapStringBoolLenLT returns a check function that checks that the length of
// the list is less than the supplied value
func MapStringBoolLenLT(i int) MapStringBool {
return func(v map[string]bool) error {
if len(v) >= i {
return fmt.Errorf("the number of entries (%d) must be less than %d",
len(v), i)
}
return nil
}
}
// MapStringBoolLenBetween returns a check function that checks that the length
// of the list is between the two supplied values (inclusive)
func MapStringBoolLenBetween(low, high int) MapStringBool {
if low >= high {
panic(fmt.Sprintf(
"Impossible checks passed to MapStringBoolLenBetween: "+
"the lower limit (%d) should be less than the upper limit (%d)",
low, high))
}
return func(v map[string]bool) error {
if len(v) < low {
return fmt.Errorf(
"the number of entries in the map (%d)"+
" must be between %d and %d - too short",
len(v), low, high)
}
if len(v) > high {
return fmt.Errorf(
"the number of entries in the map (%d)"+
" must be between %d and %d - too long",
len(v), low, high)
}
return nil
}
}
// MapStringBoolTrueCountEQ returns a check function that checks that the
// number of entries in the map set to true equals the supplied value
func MapStringBoolTrueCountEQ(i int) MapStringBool {
return func(v map[string]bool) error {
trueCount := 0
for _, b := range v {
if b {
trueCount++
}
}
if trueCount != i {
return fmt.Errorf(
"the number of entries set to true (%d) must equal %d",
trueCount, i)
}
return nil
}
}
// MapStringBoolTrueCountGT returns a check function that checks that the
// number of entries in the map set to true is greater than the supplied value
func MapStringBoolTrueCountGT(i int) MapStringBool {
return func(v map[string]bool) error {
trueCount := 0
for _, b := range v {
if b {
trueCount++
}
}
if trueCount <= i {
return fmt.Errorf(
"the number of entries set to true (%d)"+
" must be greater than %d",
trueCount, i)
}
return nil
}
}
// MapStringBoolTrueCountLT returns a check function that checks that the
// number of entries in the map set to true is less than the supplied value
func MapStringBoolTrueCountLT(i int) MapStringBool {
return func(v map[string]bool) error {
trueCount := 0
for _, b := range v {
if b {
trueCount++
}
}
if trueCount >= i {
return fmt.Errorf(
"the number of entries set to true (%d) must be less than %d",
trueCount, i)
}
return nil
}
}
// MapStringBoolTrueCountBetween returns a check function that checks that
// the number of entries in the map set to true is between the two supplied
// values (inclusive)
func MapStringBoolTrueCountBetween(low, high int) MapStringBool {
if low >= high {
panic(fmt.Sprintf(
"Impossible checks passed to MapStringBoolTrueCountBetween: "+
"the lower limit (%d) should be less than the upper limit (%d)",
low, high))
}
return func(v map[string]bool) error {
trueCount := 0
for _, b := range v {
if b {
trueCount++
}
}
if trueCount < low {
return fmt.Errorf(
"the number of entries set to true (%d)"+
" must be between %d and %d - too short",
trueCount, low, high)
}
if trueCount > high {
return fmt.Errorf(
"the number of entries set to true (%d)"+
" must be between %d and %d - too long",
trueCount, low, high)
}
return nil
}
}
// MapStringBoolOr returns a function that will check that the value, when
// passed to each of the check funcs in turn, passes at least one of them
func MapStringBoolOr(chkFuncs ...MapStringBool) MapStringBool {
return func(v map[string]bool) error {
compositeErr := ""
sep := "("
for _, cf := range chkFuncs {
err := cf(v)
if err == nil {
return nil
}
compositeErr += sep + err.Error()
sep = _Or
}
return fmt.Errorf("%s)", compositeErr)
}
}
// MapStringBoolAnd returns a function that will check that the value, when
// passed to each of the check funcs in turn, passes all of them
func MapStringBoolAnd(chkFuncs ...MapStringBool) MapStringBool {
return func(v map[string]bool) error {
for _, cf := range chkFuncs {
err := cf(v)
if err != nil {
return err
}
}
return nil
}
}
// MapStringBoolNot returns a function that will check that the value, when
// passed to the check func, does not pass it. You must also supply the error
// text to appear after the value that fails. This error text should be a
// string that describes the quality that the slice of strings should not
// have.
func MapStringBoolNot(c MapStringBool, errMsg string) MapStringBool {
return func(v map[string]bool) error {
err := c(v)
if err != nil {
return nil
}
return fmt.Errorf("%v should not be %s", v, errMsg)
}
} | check/mapStringBool.go | 0.713032 | 0.554531 | mapStringBool.go | starcoder |
package bindgen
import (
"go/token"
"text/template"
"modernc.org/cc"
"modernc.org/xc"
)
// TypeKey is typically used as a representation of a C type that can be used as a key in a map
type TypeKey struct {
IsPointer bool
Kind cc.Kind
Name string
}
// ParamKey is a representtive of a param
type ParamKey struct {
Name string
Type TypeKey
}
// Template represents a template of conversion. An optional InContext() function may be provided to check if the template needs to be executed
type Template struct {
*template.Template
InContext func() bool
}
// Declaration is anything with a position
type Declaration interface {
Position() token.Position
Decl() *cc.Declarator
}
type Namer interface {
Name() string
}
// CSignature is a description of a C declaration.
type CSignature struct {
Pos token.Pos
Name string
Return cc.Type
CParameters []cc.Parameter
Variadic bool
Declarator *cc.Declarator
}
// Position returns the token position of the declaration.
func (d *CSignature) Position() token.Position { return xc.FileSet.Position(d.Pos) }
func (d *CSignature) Decl() *cc.Declarator { return d.Declarator }
// Parameters returns the declaration's CParameters converted to a []Parameter.
func (d *CSignature) Parameters() []Parameter {
p := make([]Parameter, len(d.CParameters))
for i, c := range d.CParameters {
p[i] = Parameter{c, TypeDefOf(c.Type)}
}
return p
}
// Parameter is a C function parameter.
type Parameter struct {
cc.Parameter
TypeDefName string // can be empty
}
// Name returns the name of the parameter.
func (p *Parameter) Name() string { return string(xc.Dict.S(p.Parameter.Name)) }
// Type returns the C type of the parameter.
func (p *Parameter) Type() cc.Type { return p.Parameter.Type }
// Kind returns the C kind of the parameter.
func (p *Parameter) Kind() cc.Kind { return p.Parameter.Type.Kind() }
// Elem returns the pointer type of a pointer parameter or the element type of an
// array parameter.
func (p *Parameter) Elem() cc.Type { return p.Parameter.Type.Element() }
// IsPointer returns true if the parameter represents a pointer
func (p *Parameter) IsPointer() bool { return IsPointer(p.Parameter.Type) }
// Enum is a description of a C enum
type Enum struct {
Pos token.Pos
Name string
Type cc.Type
Declarator *cc.Declarator
}
func (d *Enum) Position() token.Position { return xc.FileSet.Position(d.Pos) }
func (d *Enum) Decl() *cc.Declarator { return d.Declarator }
// Other represents other types that are not part of the "batteries included"ness of this package
type Other struct {
Pos token.Pos
Name string
Declarator *cc.Declarator
}
func (d *Other) Position() token.Position { return xc.FileSet.Position(d.Pos) }
func (d *Other) Decl() *cc.Declarator { return d.Declarator } | bindgen.go | 0.734976 | 0.414721 | bindgen.go | starcoder |
package main
import (
"context"
"math"
"runtime"
"golang.org/x/sync/errgroup"
)
const msgSmartCropNotSupported = "Smart crop is not supported by used version of libvips"
func extractMeta(img *vipsImage) (int, int, int, bool) {
width := img.Width()
height := img.Height()
angle := vipsAngleD0
flip := false
orientation := img.Orientation()
if orientation >= 5 && orientation <= 8 {
width, height = height, width
}
if orientation == 3 || orientation == 4 {
angle = vipsAngleD180
}
if orientation == 5 || orientation == 6 {
angle = vipsAngleD90
}
if orientation == 7 || orientation == 8 {
angle = vipsAngleD270
}
if orientation == 2 || orientation == 4 || orientation == 5 || orientation == 7 {
flip = true
}
return width, height, angle, flip
}
func calcScale(width, height int, po *processingOptions, imgtype imageType) float64 {
var scale float64
srcW, srcH := float64(width), float64(height)
if (po.Width == 0 || po.Width == width) && (po.Height == 0 || po.Height == height) {
scale = 1
} else {
wr := float64(po.Width) / srcW
hr := float64(po.Height) / srcH
rt := po.Resize
if rt == resizeAuto {
srcD := width - height
dstD := po.Width - po.Height
if (srcD >= 0 && dstD >= 0) || (srcD < 0 && dstD < 0) {
rt = resizeFill
} else {
rt = resizeFit
}
}
if po.Width == 0 {
scale = hr
} else if po.Height == 0 {
scale = wr
} else if rt == resizeFit {
scale = math.Min(wr, hr)
} else {
scale = math.Max(wr, hr)
}
}
if !po.Enlarge && scale > 1 && imgtype != imageTypeSVG {
scale = 1
}
scale = scale * po.Dpr
if srcW*scale < 1 {
scale = 1 / srcW
}
if srcH*scale < 1 {
scale = 1 / srcH
}
return scale
}
func canScaleOnLoad(imgtype imageType, scale float64) bool {
if imgtype == imageTypeSVG {
return true
}
if conf.DisableShrinkOnLoad || scale >= 1 {
return false
}
return imgtype == imageTypeJPEG || imgtype == imageTypeWEBP
}
func calcJpegShink(scale float64, imgtype imageType) int {
shrink := int(1.0 / scale)
switch {
case shrink >= 8:
return 8
case shrink >= 4:
return 4
case shrink >= 2:
return 2
}
return 1
}
func calcCrop(width, height, cropWidth, cropHeight int, gravity *gravityOptions) (left, top int) {
if gravity.Type == gravityFocusPoint {
pointX := int(float64(width) * gravity.X)
pointY := int(float64(height) * gravity.Y)
left = maxInt(0, minInt(pointX-cropWidth/2, width-cropWidth))
top = maxInt(0, minInt(pointY-cropHeight/2, height-cropHeight))
return
}
offX, offY := int(gravity.X), int(gravity.Y)
left = (width-cropWidth+1)/2 + offX
top = (height-cropHeight+1)/2 + offY
if gravity.Type == gravityNorth || gravity.Type == gravityNorthEast || gravity.Type == gravityNorthWest {
top = 0 + offY
}
if gravity.Type == gravityEast || gravity.Type == gravityNorthEast || gravity.Type == gravitySouthEast {
left = width - cropWidth - offX
}
if gravity.Type == gravitySouth || gravity.Type == gravitySouthEast || gravity.Type == gravitySouthWest {
top = height - cropHeight - offY
}
if gravity.Type == gravityWest || gravity.Type == gravityNorthWest || gravity.Type == gravitySouthWest {
left = 0 + offX
}
left = maxInt(0, minInt(left, width-cropWidth))
top = maxInt(0, minInt(top, height-cropHeight))
return
}
func cropImage(img *vipsImage, cropWidth, cropHeight int, gravity *gravityOptions) error {
if cropWidth == 0 && cropHeight == 0 {
return nil
}
imgWidth, imgHeight := img.Width(), img.Height()
if cropWidth == 0 {
cropWidth = imgWidth
} else {
cropWidth = minInt(cropWidth, imgWidth)
}
if cropHeight == 0 {
cropHeight = imgHeight
} else {
cropHeight = minInt(cropHeight, imgHeight)
}
if cropWidth >= imgWidth && cropHeight >= imgHeight {
return nil
}
if gravity.Type == gravitySmart {
if err := img.CopyMemory(); err != nil {
return err
}
if err := img.SmartCrop(cropWidth, cropHeight); err != nil {
return err
}
// Applying additional modifications after smart crop causes SIGSEGV on Alpine
// so we have to copy memory after it
return img.CopyMemory()
}
left, top := calcCrop(imgWidth, imgHeight, cropWidth, cropHeight, gravity)
return img.Crop(left, top, cropWidth, cropHeight)
}
func scaleSize(size int, scale float64) int {
if size == 0 {
return 0
}
return roundToInt(float64(size) * scale)
}
func transformImage(ctx context.Context, img *vipsImage, data []byte, po *processingOptions, imgtype imageType) error {
var err error
srcWidth, srcHeight, angle, flip := extractMeta(img)
cropWidth, cropHeight := po.Crop.Width, po.Crop.Height
cropGravity := po.Crop.Gravity
if cropGravity.Type == gravityUnknown {
cropGravity = po.Gravity
}
widthToScale, heightToScale := srcWidth, srcHeight
if cropWidth > 0 {
widthToScale = minInt(cropWidth, srcWidth)
}
if cropHeight > 0 {
heightToScale = minInt(cropHeight, srcHeight)
}
scale := calcScale(widthToScale, heightToScale, po, imgtype)
cropWidth = scaleSize(cropWidth, scale)
cropHeight = scaleSize(cropHeight, scale)
cropGravity.X = cropGravity.X * scale
cropGravity.Y = cropGravity.Y * scale
if scale != 1 && data != nil && canScaleOnLoad(imgtype, scale) {
if imgtype == imageTypeWEBP || imgtype == imageTypeSVG {
// Do some scale-on-load
if err := img.Load(data, imgtype, 1, scale, 1); err != nil {
return err
}
} else if imgtype == imageTypeJPEG {
// Do some shrink-on-load
if shrink := calcJpegShink(scale, imgtype); shrink != 1 {
if err := img.Load(data, imgtype, shrink, 1.0, 1); err != nil {
return err
}
}
}
// Update scale after scale-on-load
newWidth, newHeight, _, _ := extractMeta(img)
widthToScale = scaleSize(widthToScale, float64(newWidth)/float64(srcWidth))
heightToScale = scaleSize(heightToScale, float64(newHeight)/float64(srcHeight))
scale = calcScale(widthToScale, heightToScale, po, imgtype)
}
if err = img.Rad2Float(); err != nil {
return err
}
iccImported := false
convertToLinear := conf.UseLinearColorspace && (scale != 1 || po.Dpr != 1)
if convertToLinear || !img.IsSRGB() {
if err = img.ImportColourProfile(true); err != nil {
return err
}
iccImported = true
}
if convertToLinear {
if err = img.LinearColourspace(); err != nil {
return err
}
} else {
if err = img.RgbColourspace(); err != nil {
return err
}
}
hasAlpha := img.HasAlpha()
if scale != 1 {
if err = img.Resize(scale, hasAlpha); err != nil {
return err
}
}
checkTimeout(ctx)
if angle != vipsAngleD0 || flip {
if err = img.CopyMemory(); err != nil {
return err
}
if angle != vipsAngleD0 {
if err = img.Rotate(angle); err != nil {
return err
}
}
if flip {
if err = img.Flip(); err != nil {
return err
}
}
}
checkTimeout(ctx)
dprWidth := roundToInt(float64(po.Width) * po.Dpr)
dprHeight := roundToInt(float64(po.Height) * po.Dpr)
if cropGravity.Type == po.Gravity.Type && cropGravity.Type != gravityFocusPoint {
if cropWidth == 0 {
cropWidth = dprWidth
} else if dprWidth > 0 {
cropWidth = minInt(cropWidth, dprWidth)
}
if cropHeight == 0 {
cropHeight = dprHeight
} else if dprHeight > 0 {
cropHeight = minInt(cropHeight, dprHeight)
}
sumGravity := gravityOptions{
Type: cropGravity.Type,
X: cropGravity.X + po.Gravity.X,
Y: cropGravity.Y + po.Gravity.Y,
}
if err = cropImage(img, cropWidth, cropHeight, &sumGravity); err != nil {
return err
}
} else {
if err = cropImage(img, cropWidth, cropHeight, &cropGravity); err != nil {
return err
}
if err = cropImage(img, dprWidth, dprHeight, &po.Gravity); err != nil {
return err
}
}
checkTimeout(ctx)
if !iccImported {
if err = img.ImportColourProfile(false); err != nil {
return err
}
}
if err = img.RgbColourspace(); err != nil {
return err
}
if hasAlpha && (po.Flatten || po.Format == imageTypeJPEG) {
if err = img.Flatten(po.Background); err != nil {
return err
}
}
if po.Blur > 0 {
if err = img.Blur(po.Blur); err != nil {
return err
}
}
if po.Sharpen > 0 {
if err = img.Sharpen(po.Sharpen); err != nil {
return err
}
}
if po.Extend && (po.Width > img.Width() || po.Height > img.Height()) {
if err = img.Embed(gravityCenter, po.Width, po.Height, 0, 0, po.Background); err != nil {
return err
}
}
checkTimeout(ctx)
if po.Watermark.Enabled {
if err = img.ApplyWatermark(&po.Watermark); err != nil {
return err
}
}
return img.RgbColourspace()
}
func transformAnimated(ctx context.Context, img *vipsImage, data []byte, po *processingOptions, imgtype imageType) error {
imgWidth := img.Width()
frameHeight, err := img.GetInt("page-height")
if err != nil {
return err
}
framesCount := minInt(img.Height()/frameHeight, conf.MaxAnimationFrames)
// Double check dimensions because animated image has many frames
if err := checkDimensions(imgWidth, frameHeight*framesCount); err != nil {
return err
}
// Vips 8.8+ supports n-pages and doesn't load the whole animated image on header access
if nPages, _ := img.GetInt("n-pages"); nPages > 0 {
scale := 1.0
// Don't do scale on load if we need to crop
if po.Crop.Width == 0 && po.Crop.Height == 0 {
scale = calcScale(imgWidth, frameHeight, po, imgtype)
}
if nPages > framesCount || canScaleOnLoad(imgtype, scale) {
logNotice("Animated scale on load")
// Do some scale-on-load and load only the needed frames
if err := img.Load(data, imgtype, 1, scale, framesCount); err != nil {
return err
}
}
imgWidth = img.Width()
frameHeight, err = img.GetInt("page-height")
if err != nil {
return err
}
}
delay, err := img.GetInt("gif-delay")
if err != nil {
return err
}
loop, err := img.GetInt("gif-loop")
if err != nil {
return err
}
frames := make([]*vipsImage, framesCount)
defer func() {
for _, frame := range frames {
frame.Clear()
}
}()
var errg errgroup.Group
for i := 0; i < framesCount; i++ {
ind := i
errg.Go(func() error {
frame := new(vipsImage)
if err := img.Extract(frame, 0, ind*frameHeight, imgWidth, frameHeight); err != nil {
return err
}
if err := transformImage(ctx, frame, nil, po, imgtype); err != nil {
return err
}
frames[ind] = frame
return nil
})
}
if err := errg.Wait(); err != nil {
return err
}
checkTimeout(ctx)
if err := img.Arrayjoin(frames); err != nil {
return err
}
img.SetInt("page-height", frames[0].Height())
img.SetInt("gif-delay", delay)
img.SetInt("gif-loop", loop)
img.SetInt("n-pages", framesCount)
return nil
}
func processImage(ctx context.Context) ([]byte, context.CancelFunc, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if newRelicEnabled {
newRelicCancel := startNewRelicSegment(ctx, "Processing image")
defer newRelicCancel()
}
if prometheusEnabled {
defer startPrometheusDuration(prometheusProcessingDuration)()
}
defer vipsCleanup()
po := getProcessingOptions(ctx)
data := getImageData(ctx).Bytes()
imgtype := getImageType(ctx)
if po.Format == imageTypeUnknown {
if po.PreferWebP && vipsTypeSupportSave[imageTypeWEBP] {
po.Format = imageTypeWEBP
} else if vipsTypeSupportSave[imgtype] && imgtype != imageTypeHEIC {
po.Format = imgtype
} else {
po.Format = imageTypeJPEG
}
} else if po.EnforceWebP && vipsTypeSupportSave[imageTypeWEBP] {
po.Format = imageTypeWEBP
}
if po.Format == imageTypeSVG {
return data, func() {}, nil
}
if !vipsSupportSmartcrop {
if po.Gravity.Type == gravitySmart {
logWarning(msgSmartCropNotSupported)
po.Gravity.Type = gravityCenter
}
if po.Crop.Gravity.Type == gravitySmart {
logWarning(msgSmartCropNotSupported)
po.Crop.Gravity.Type = gravityCenter
}
}
if po.Resize == resizeCrop {
logWarning("`crop` resizing type is deprecated and will be removed in future versions. Use `crop` processing option instead")
po.Crop.Width, po.Crop.Height = po.Width, po.Height
po.Resize = resizeFit
po.Width, po.Height = 0, 0
}
animationSupport := conf.MaxAnimationFrames > 1 && vipsSupportAnimation(imgtype) && vipsSupportAnimation(po.Format)
pages := 1
if animationSupport {
pages = -1
}
img := new(vipsImage)
defer img.Clear()
if err := img.Load(data, imgtype, 1, 1.0, pages); err != nil {
return nil, func() {}, err
}
if animationSupport && img.IsAnimated() {
if err := transformAnimated(ctx, img, data, po, imgtype); err != nil {
return nil, func() {}, err
}
} else {
if err := transformImage(ctx, img, data, po, imgtype); err != nil {
return nil, func() {}, err
}
}
checkTimeout(ctx)
if po.Format == imageTypeGIF {
if err := img.CastUchar(); err != nil {
return nil, func() {}, err
}
checkTimeout(ctx)
}
return img.Save(po.Format, po.Quality)
} | process.go | 0.721154 | 0.4436 | process.go | starcoder |
package ahrs
import "math"
const (
MahonyDefaultKp = 0.2
MahonyDefaultKi = 0.1
)
// Mahony instance
type Mahony struct {
twoKp, twoKi float64
integralFBx, integralFBy, integralFBz float64
SampleFreq float64
Quaternions [4]float64
}
// NewMahony initiates a Mahony struct
func NewMahony(kp, ki, sampleFreq float64) Mahony {
return Mahony{
twoKp: 2 * kp,
twoKi: 2 * ki,
SampleFreq: sampleFreq,
Quaternions: [4]float64{1, 0, 0, 0},
}
}
// NewDefaultMahony initiates a Mahony struct with default ki and kp
func NewDefaultMahony(sampleFreq float64) Mahony {
return Mahony{
twoKp: 2 * MahonyDefaultKp,
twoKi: 2 * MahonyDefaultKi,
SampleFreq: sampleFreq,
Quaternions: [4]float64{1, 0, 0, 0},
}
}
// Update9D updates position using 9D, returning quaternions
func (m *Mahony) Update9D(gx, gy, gz, ax, ay, az, mx, my, mz float64) [4]float64 {
var recipNorm float64
var q0q0, q0q1, q0q2, q0q3, q1q1, q1q2, q1q3, q2q2, q2q3, q3q3 float64
var hx, hy, bx, bz float64
var halfvx, halfvy, halfvz, halfwx, halfwy, halfwz float64
var halfex, halfey, halfez float64
var qa, qb, qc float64
q0 := m.Quaternions[0]
q1 := m.Quaternions[1]
q2 := m.Quaternions[2]
q3 := m.Quaternions[3]
integralFBx := m.integralFBx
integralFBy := m.integralFBy
integralFBz := m.integralFBz
twoKi := m.twoKi
twoKp := m.twoKp
sampleFreq := m.SampleFreq
// Compute feedback only if accelerometer measurement valid (avoids NaN in accelerometer normalisation)
if !(ax == 0.0 && ay == 0.0 && az == 0.0) {
// Normalise accelerometer measurement
recipNorm = invSqrt(ax*ax + ay*ay + az*az)
ax *= recipNorm
ay *= recipNorm
az *= recipNorm
// Normalise magnetometer measurement
recipNorm = invSqrt(mx*mx + my*my + mz*mz)
mx *= recipNorm
my *= recipNorm
mz *= recipNorm
// Auxiliary variables to avoid repeated arithmetic
q0q0 = q0 * q0
q0q1 = q0 * q1
q0q2 = q0 * q2
q0q3 = q0 * q3
q1q1 = q1 * q1
q1q2 = q1 * q2
q1q3 = q1 * q3
q2q2 = q2 * q2
q2q3 = q2 * q3
q3q3 = q3 * q3
// Reference direction of Earth's magnetic field
hx = 2.0 * (mx*(0.5-q2q2-q3q3) + my*(q1q2-q0q3) + mz*(q1q3+q0q2))
hy = 2.0 * (mx*(q1q2+q0q3) + my*(0.5-q1q1-q3q3) + mz*(q2q3-q0q1))
bx = math.Sqrt(hx*hx + hy*hy)
bz = 2.0 * (mx*(q1q3-q0q2) + my*(q2q3+q0q1) + mz*(0.5-q1q1-q2q2))
// Estimated direction of gravity and magnetic field
halfvx = q1q3 - q0q2
halfvy = q0q1 + q2q3
halfvz = q0q0 - 0.5 + q3q3
halfwx = bx*(0.5-q2q2-q3q3) + bz*(q1q3-q0q2)
halfwy = bx*(q1q2-q0q3) + bz*(q0q1+q2q3)
halfwz = bx*(q0q2+q1q3) + bz*(0.5-q1q1-q2q2)
// Error is sum of cross product between estimated direction and measured direction of field vectors
halfex = (ay*halfvz - az*halfvy) + (my*halfwz - mz*halfwy)
halfey = (az*halfvx - ax*halfvz) + (mz*halfwx - mx*halfwz)
halfez = (ax*halfvy - ay*halfvx) + (mx*halfwy - my*halfwx)
// Compute and apply integral feedback if enabled
if twoKi > 0.0 {
integralFBx += twoKi * halfex * (1.0 / sampleFreq) // integral error scaled by Ki
integralFBy += twoKi * halfey * (1.0 / sampleFreq)
integralFBz += twoKi * halfez * (1.0 / sampleFreq)
gx += integralFBx // apply integral feedback
gy += integralFBy
gz += integralFBz
} else {
integralFBx = 0.0 // prevent integral windup
integralFBy = 0.0
integralFBz = 0.0
}
// Apply proportional feedback
gx += twoKp * halfex
gy += twoKp * halfey
gz += twoKp * halfez
}
// Integrate rate of change of quaternion
gx *= (0.5 * (1.0 / sampleFreq)) // pre-multiply common factors
gy *= (0.5 * (1.0 / sampleFreq))
gz *= (0.5 * (1.0 / sampleFreq))
qa = q0
qb = q1
qc = q2
q0 += (-qb*gx - qc*gy - q3*gz)
q1 += (qa*gx + qc*gz - q3*gy)
q2 += (qa*gy - qb*gz + q3*gx)
q3 += (qa*gz + qb*gy - qc*gx)
// Normalise quaternion
recipNorm = invSqrt(q0*q0 + q1*q1 + q2*q2 + q3*q3)
m.Quaternions[0] = q0 * recipNorm
m.Quaternions[1] = q1 * recipNorm
m.Quaternions[2] = q2 * recipNorm
m.Quaternions[3] = q3 * recipNorm
return m.Quaternions
}
// Update6D updates position using 6D, returning quaternions
func (m *Mahony) Update6D(gx, gy, gz, ax, ay, az float64) [4]float64 {
var recipNorm float64
var halfvx, halfvy, halfvz float64
var halfex, halfey, halfez float64
var qa, qb, qc float64
q0 := m.Quaternions[0]
q1 := m.Quaternions[1]
q2 := m.Quaternions[2]
q3 := m.Quaternions[3]
integralFBx := m.integralFBx
integralFBy := m.integralFBy
integralFBz := m.integralFBz
twoKi := m.twoKi
twoKp := m.twoKp
sampleFreq := m.SampleFreq
// Compute feedback only if accelerometer measurement valid (avoids NaN in accelerometer normalisation)
if !(ax == 0.0 && ay == 0.0 && az == 0.0) {
// Normalise accelerometer measurement
recipNorm = invSqrt(ax*ax + ay*ay + az*az)
ax *= recipNorm
ay *= recipNorm
az *= recipNorm
// Estimated direction of gravity and vector perpendicular to magnetic flux
halfvx = q1*q3 - q0*q2
halfvy = q0*q1 + q2*q3
halfvz = q0*q0 - 0.5 + q3*q3
// Error is sum of cross product between estimated and measured direction of gravity
halfex = (ay*halfvz - az*halfvy)
halfey = (az*halfvx - ax*halfvz)
halfez = (ax*halfvy - ay*halfvx)
// Compute and apply integral feedback if enabled
if twoKi > 0.0 {
integralFBx += twoKi * halfex * (1.0 / sampleFreq) // integral error scaled by Ki
integralFBy += twoKi * halfey * (1.0 / sampleFreq)
integralFBz += twoKi * halfez * (1.0 / sampleFreq)
gx += integralFBx // apply integral feedback
gy += integralFBy
gz += integralFBz
} else {
integralFBx = 0.0 // prevent integral windup
integralFBy = 0.0
integralFBz = 0.0
}
// Apply proportional feedback
gx += twoKp * halfex
gy += twoKp * halfey
gz += twoKp * halfez
}
// Integrate rate of change of quaternion
gx *= (0.5 * (1.0 / sampleFreq)) // pre-multiply common factors
gy *= (0.5 * (1.0 / sampleFreq))
gz *= (0.5 * (1.0 / sampleFreq))
qa = q0
qb = q1
qc = q2
q0 += (-qb*gx - qc*gy - q3*gz)
q1 += (qa*gx + qc*gz - q3*gy)
q2 += (qa*gy - qb*gz + q3*gx)
q3 += (qa*gz + qb*gy - qc*gx)
// Normalise quaternion
recipNorm = invSqrt(q0*q0 + q1*q1 + q2*q2 + q3*q3)
m.Quaternions[0] = q0 * recipNorm
m.Quaternions[1] = q1 * recipNorm
m.Quaternions[2] = q2 * recipNorm
m.Quaternions[3] = q3 * recipNorm
return m.Quaternions
} | mahony.go | 0.746139 | 0.487368 | mahony.go | starcoder |
package steps
import (
"github.com/cucumber/godog"
"github.com/kiegroup/kogito-cloud-operator/test/smoke/framework"
)
// registerKogitoInfraSteps register all Kogito Infra steps existing
func registerKogitoInfraSteps(s *godog.Suite, data *Data) {
s.Step(`^Install Kogito Infra Infinispan$`, data.installKogitoInfraInfinispan)
s.Step(`^Install Kogito Infra Kafka$`, data.installKogitoInfraKafka)
s.Step(`^Install Kogito Infra Keycloak$`, data.installKogitoInfraKeycloak)
s.Step(`^Remove Kogito Infra Infinispan$`, data.removeKogitoInfraInfinispan)
s.Step(`^Remove Kogito Infra Kafka$`, data.removeKogitoInfraKafka)
s.Step(`^Remove Kogito Infra Keycloak$`, data.removeKogitoInfraKeycloak)
s.Step(`^Kogito Infra Infinispan should be running within (\d+) minutes$`, data.kogitoInfraInfinispanShouldBeRunningWithinMinutes)
s.Step(`^Kogito Infra Kafka should be running within (\d+) minutes$`, data.kogitoInfraKafkaShouldBeRunningWithinMinutes)
s.Step(`^Kogito Infra Keycloak should be running within (\d+) minutes$`, data.kogitoInfraKeycloakShouldBeRunningWithinMinutes)
s.Step(`^Kogito Infra Infinispan should NOT be running within (\d+) minutes$`, data.kogitoInfraInfinispanShouldNOTBeRunningWithinMinutes)
s.Step(`^Kogito Infra Kafka should NOT be running within (\d+) minutes$`, data.kogitoInfraKafkaShouldNOTBeRunningWithinMinutes)
s.Step(`^Kogito Infra Keycloak should NOT be running within (\d+) minutes$`, data.kogitoInfraKeycloakShouldNOTBeRunningWithinMinutes)
}
func (data *Data) installKogitoInfraInfinispan() error {
return framework.InstallKogitoInfraInfinispan(data.Namespace)
}
func (data *Data) installKogitoInfraKafka() error {
return framework.InstallKogitoInfraKafka(data.Namespace)
}
func (data *Data) installKogitoInfraKeycloak() error {
return framework.InstallKogitoInfraKeycloak(data.Namespace)
}
func (data *Data) removeKogitoInfraInfinispan() error {
return framework.RemoveKogitoInfraInfinispan(data.Namespace)
}
func (data *Data) removeKogitoInfraKafka() error {
return framework.RemoveKogitoInfraKafka(data.Namespace)
}
func (data *Data) removeKogitoInfraKeycloak() error {
return framework.RemoveKogitoInfraKeycloak(data.Namespace)
}
func (data *Data) kogitoInfraInfinispanShouldBeRunningWithinMinutes(timeoutInMin int) error {
return framework.WaitForKogitoInfraInfinispan(data.Namespace, true, timeoutInMin)
}
func (data *Data) kogitoInfraKafkaShouldBeRunningWithinMinutes(timeoutInMin int) error {
return framework.WaitForKogitoInfraKafka(data.Namespace, true, timeoutInMin)
}
func (data *Data) kogitoInfraKeycloakShouldBeRunningWithinMinutes(timeoutInMin int) error {
return framework.WaitForKogitoInfraKeycloak(data.Namespace, true, timeoutInMin)
}
func (data *Data) kogitoInfraInfinispanShouldNOTBeRunningWithinMinutes(timeoutInMin int) error {
return framework.WaitForKogitoInfraInfinispan(data.Namespace, false, timeoutInMin)
}
func (data *Data) kogitoInfraKafkaShouldNOTBeRunningWithinMinutes(timeoutInMin int) error {
return framework.WaitForKogitoInfraKafka(data.Namespace, false, timeoutInMin)
}
func (data *Data) kogitoInfraKeycloakShouldNOTBeRunningWithinMinutes(timeoutInMin int) error {
return framework.WaitForKogitoInfraKeycloak(data.Namespace, false, timeoutInMin)
} | test/smoke/steps/kogitoinfra.go | 0.630116 | 0.62065 | kogitoinfra.go | starcoder |
package ell
import (
"math"
"math/rand"
"strconv"
. "github.com/boynton/ell/data"
)
// Zero is the Ell 0 value
var Zero = Integer(0)
// One is the Ell 1 value
var One = Integer(1)
// MinusOne is the Ell -1 value
var MinusOne = Integer(-1)
func Int(n int64) *Number {
return Integer(int(n))
}
// Round - return the closest integer value to the float value
func Round(f float64) float64 {
if f > 0 {
return math.Floor(f + 0.5)
}
return math.Ceil(f - 0.5)
}
// ToNumber - convert object to a number, if possible
func ToNumber(o Value) (*Number, error) {
switch p := o.(type) {
case *Number:
return p, nil
case *Character:
return Integer(int(p.Value)), nil
case *Boolean:
if p.Value {
return One, nil
}
return Zero, nil
case *String:
f, err := strconv.ParseFloat(p.Value, 64)
if err == nil {
return Float(f), nil
}
}
return nil, NewError(ArgumentErrorKey, "cannot convert to an number: ", o)
}
// ToInt - convert the object to an integer number, if possible
func ToInt(o Value) (*Number, error) {
switch p := o.(type) {
case *Number:
return Float(Round(p.Value)), nil
case *Character:
return Integer(int(p.Value)), nil
case *Boolean:
if p.Value {
return One, nil
}
return Zero, nil
case *String:
n, err := strconv.ParseInt(p.Value, 10, 64)
if err == nil {
return Integer(int(n)), nil
}
}
return nil, NewError(ArgumentErrorKey, "cannot convert to an integer: ", o)
}
func IsInt(obj Value) bool {
if p, ok := obj.(*Number); ok {
f := p.Value
if math.Trunc(f) == f {
return true
}
}
return false
}
func IsFloat(obj Value) bool {
if obj.Type() == NumberType {
return !IsInt(obj)
}
return false
}
func AsFloat64Value(obj Value) (float64, error) {
if p, ok := obj.(*Number); ok {
return p.Value, nil
}
return 0, NewError(ArgumentErrorKey, "Expected a <number>, got a ", obj.Type())
}
func AsInt64Value(obj Value) (int64, error) {
if p, ok := obj.(*Number); ok {
return int64(p.Value), nil
}
return 0, NewError(ArgumentErrorKey, "Expected a <number>, got a ", obj.Type())
}
func AsIntValue(obj Value) (int, error) {
if p, ok := obj.(*Number); ok {
return int(p.Value), nil
}
return 0, NewError(ArgumentErrorKey, "Expected a <number>, got a ", obj.Type())
}
func AsByteValue(obj Value) (byte, error) {
if p, ok := obj.(*Number); ok {
return byte(p.Value), nil
}
return 0, NewError(ArgumentErrorKey, "Expected a <number>, got a ", obj.Type())
}
var randomGenerator = rand.New(rand.NewSource(1))
func RandomSeed(n int64) {
randomGenerator = rand.New(rand.NewSource(n))
}
func Random(min float64, max float64) *Number {
return Float(min + (randomGenerator.Float64() * (max - min)))
}
func RandomList(size int, min float64, max float64) *List {
//fix this!
result := EmptyList
tail := EmptyList
for i := 0; i < size; i++ {
tmp := NewList(Random(min, max))
if result == EmptyList {
result = tmp
tail = tmp
} else {
tail.Cdr = tmp
tail = tmp
}
}
return result
}
// IntValue - return native int value of the object
func IntValue(obj Value) int {
if p, ok := obj.(*Number); ok {
return int(p.Value)
}
return 0
}
// Int64Value - return native int64 value of the object
func Int64Value(obj Value) int64 {
if p, ok := obj.(*Number); ok {
return int64(p.Value)
}
return 0
}
// Float64Value - return native float64 value of the object
func Float64Value(obj Value) float64 {
if p, ok := obj.(*Number); ok {
return p.Value
}
return 0
} | number.go | 0.714528 | 0.445349 | number.go | starcoder |
package problems
import (
"math"
)
// Contains solutions for easy problems: https://leetcode.com/problemset/all/?difficulty=Easy
// https://leetcode.com/problems/hamming-distance/
// hammingDistance calculates the Hamming distance.
// The Hamming distance between two integers is the number of positions
// at which the corresponding bits are different.
func hammingDistance(x int, y int) int {
var distance int
// XOR of two numbers
// 6(110) and 0(000) gives 6(110)
value := x ^ y
// count the number of bits set
for value != 0 {
// A bit is set, so increment the count and clear the bit
distance++
// counting bits by algorihm of <NAME>
// http://dl.acm.org/citation.cfm?id=367236.367286
// step1: 6(110) & 5(101) = 4(100) (distance=1)
// step2: 4(100) & 3(011) = 0(000) (distance=2)
value &= value - 1
}
// Return the number of differing bits
return distance
}
// https://leetcode.com/problems/single-number/
// SingleNumber finds single integer.
// Given an array of integers, every element appears twice except for one.
// Note:Your algorithm should have a linear runtime complexity.
// Could you implement it without using extra memory?
func singleNumber(numbers []int) int {
var result int
for _, number := range numbers {
result = result ^ number
}
return result
}
// https://leetcode.com/problems/two-sum/
func twoSum(nums []int, target int) []int {
cache := make(map[int]int, len(nums))
for i, num := range nums {
complement := target - num
if firstIndex, ok := cache[complement]; ok {
return []int{firstIndex, i}
}
cache[num] = i
}
return nil
}
// https://leetcode.com/problems/reverse-integer/
func reverse(x int) int {
x64 := int64(x)
sign := 1
if x64 < 0 {
sign = -1
x64 = -x64
}
var reverted int64
for ; x64 != 0; x64 /= 10 {
reverted = reverted*10 + x64%10
if reverted > math.MaxInt32 {
return 0
}
}
return int(reverted) * sign
}
// https://leetcode.com/problems/palindrome-number/
func isPalindrome(x int) bool {
if x < 0 {
return false
}
if x != 0 && x%10 == 0 {
return false
}
var reverted int
for ; x > reverted; x /= 10 {
reverted = reverted*10 + x%10
}
return x == reverted || x == reverted/10
}
// https://leetcode.com/problems/contains-duplicate-ii/
func containsNearbyDuplicate(nums []int, k int) bool {
cache := make(map[int]int, len(nums))
for i, num := range nums {
if j, ok := cache[num]; ok {
if i-j <= k {
return true
}
}
cache[num] = i
}
return false
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-search-tree/
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
pValue := p.Val
qValue := q.Val
for node := root; node != nil; {
parentValue := node.Val
switch {
case pValue > parentValue && qValue > parentValue:
node = node.Right
case pValue < parentValue && qValue < parentValue:
node = node.Left
default:
return node
}
}
return nil
}
// https://leetcode.com/problems/self-dividing-numbers/
func selfDividingNumbers(left int, right int) []int {
var selfDivNumbers []int
for i := left; i <= right; i++ {
if ok := isSelfDiv(i); ok {
selfDivNumbers = append(selfDivNumbers, i)
}
}
return selfDivNumbers
}
func isSelfDiv(num int) bool {
for x := num; x > 0; x /= 10 {
lastDigit := x % 10
if lastDigit == 0 || (num%lastDigit) > 0 {
return false
}
}
return true
}
// https://leetcode.com/problems/roman-to-integer/
func romanToInt(s string) int {
length := len(s)
if length == 0 {
return 0
}
result := getRomanValue(s[length-1])
for i := length - 2; i >= 0; i-- {
value := getRomanValue(s[i])
if value < getRomanValue(s[i+1]) {
result -= value
continue
}
result += value
}
return result
}
func getRomanValue(b byte) int {
switch b {
case 'I':
return 1
case 'V':
return 5
case 'X':
return 10
case 'L':
return 50
case 'C':
return 100
case 'D':
return 500
case 'M':
return 1000
default:
return 0
}
}
// https://leetcode.com/problems/valid-parentheses/
func isValid(s string) bool {
var stack []rune
for _, r := range s {
switch r {
case '(', '[', '{':
stack = append(stack, r)
case ')', ']', '}':
if len(stack) == 0 {
return false
}
lastIndex := len(stack) - 1
top := stack[lastIndex]
stack = stack[:lastIndex]
if r == ')' && top != '(' ||
r == ']' && top != '[' ||
r == '}' && top != '{' {
return false
}
}
}
return len(stack) == 0
}
type ListNode struct {
Val int
Next *ListNode
}
// https://leetcode.com/problems/merge-two-sorted-lists/
func mergeTwoLists(l1 *ListNode, l2 *ListNode) *ListNode {
if l1 == nil {
return l2
}
if l2 == nil {
return l1
}
var root ListNode
tail := &root
for l1 != nil && l2 != nil {
if l1.Val < l2.Val {
tail.Next = l1
l1 = l1.Next
} else {
tail.Next = l2
l2 = l2.Next
}
tail = tail.Next
}
if l1 != nil {
tail.Next = l1
} else {
tail.Next = l2
}
return root.Next
}
// https://leetcode.com/problems/remove-duplicates-from-sorted-array/
func removeDuplicates(nums []int) int {
i := 0
for j := 1; j < len(nums); j++ {
if nums[j] == nums[i] {
continue
}
i++
nums[i] = nums[j]
}
return i + 1
}
// https://leetcode.com/problems/implement-strstr/
func strStr(haystack string, needle string) int {
length := len(needle)
switch {
case length == 0:
return 0
case length > len(haystack):
return -1
case length == len(haystack):
if haystack == needle {
return 0
}
return -1
}
diff := len(haystack) - length
for i := 0; i <= diff; i++ {
if haystack[i:i+length] == needle {
return i
}
}
return -1
} | problems/easy.go | 0.877674 | 0.444444 | easy.go | starcoder |
package eval
import "math"
type fn struct {
params int
usage string
special string
}
func test() {
math.Atanh(3)
}
var fnData = map[string]fn{
"cos": {1, `cos returns the cosin of the radian argument x.`,
`
Special cases are:
Cos(±Inf) = NaN
Cos(NaN) = NaN`},
"asin": {1, `asin returns the arcsine, in radians, of x.`,
`
Special cases are:
asin(±0) = ±0
asin(x) = NaN if x < -1 or x > 1`},
"acos": {1, `acos returns the arccosine, in radians, of x.`,
`
Special case is:
acos(x) = NaN if x < -1 or x > 1`},
"atan": {1, `atan returns the arctangent, in radians, of x.`,
`
Special cases are:
atan(±0) = ±0
atan(±Inf) = ±Pi/2`},
"asinh": {1, `asinh returns the inverse hyperbolic sine of x.`,
`
Special cases are:
sinh(±0) = ±0
sinh(±Inf) = ±Inf
sinh(NaN) = NaN`},
"acosh": {1, `acosh returns the inverse hyperbolic cosine of x.`,
`
Special cases are:
acosh(+Inf) = +Inf
acosh(x) = NaN if x < 1
acosh(NaN) = NaN`},
"atanh": {1, `atanh returns the inverse hyperbolic tangent of x.`,
`
Special cases are:
Atanh(1) = +Inf
Atanh(±0) = ±0
Atanh(-1) = -Inf
Atanh(x) = NaN if x < -1 or x > 1
Atanh(NaN) = NaN`},
"atan2": {2, `atan2 returns the arc tangent of y/x, using the signs of the two to
determine the quadrant of the return value.`,
`
Special cases are (in order):
Atan2(y, NaN) = NaN
Atan2(NaN, x) = NaN
Atan2(+0, x>=0) = +0
Atan2(-0, x>=0) = -0
Atan2(+0, x<=-0) = +Pi
Atan2(-0, x<=-0) = -Pi
Atan2(y>0, 0) = +Pi/2
Atan2(y<0, 0) = -Pi/2
Atan2(+Inf, +Inf) = +Pi/4
Atan2(-Inf, +Inf) = -Pi/4
Atan2(+Inf, -Inf) = 3Pi/4
Atan2(-Inf, -Inf) = -3Pi/4
Atan2(y, +Inf) = 0
Atan2(y>0, -Inf) = +Pi
Atan2(y<0, -Inf) = -Pi
Atan2(+Inf, x) = +Pi/2
Atan2(-Inf, x) = -Pi/2`},
"abs": {1, `Abs returns the absolute value of x.`,
`
Special cases are:
Abs(±Inf) = +Inf
Abs(NaN) = NaN`},
"ceil": {1, `Ceil returns the least integer value greater than or equal to x.`,
`
Special cases are:
Ceil(±0) = ±0
Ceil(±Inf) = ±Inf
Ceil(NaN) = NaN`},
"cbrt": {1, `Cbrt returns the cube root of x.`,
`
Special cases are:
Cbrt(±0) = ±0
Cbrt(±Inf) = ±Inf
Cbrt(NaN) = NaN`},
"copysign": {2, `Copysign returns a value with the magnitude of x and the sign of y.`, ``},
"dim": {2, `Dim returns the maximum of x-y or 0.`,
`
Special cases are:
Dim(+Inf, +Inf) = NaN
Dim(-Inf, -Inf) = NaN
Dim(x, NaN) = Dim(NaN, x) = NaN`},
"exp": {1, `Exp returns e**x, the base-e exponential of x.`,
`
Special cases are:
Exp(+Inf) = +Inf
Exp(NaN) = NaN
Very large values overflow to 0 or +Inf.
Very small values underflow to 1.`},
"exp2": {1, `Exp2 returns 2**x, the base-2 exponential of x.`,
`
Special cases are the same as Exp.`},
"expm1": {1, `Expm1 returns e**x - 1, the base-e exponential of x minus 1.
It is more accurate than Exp(x) - 1 when x is near zero.`,
`
Special cases are:
Expm1(+Inf) = +Inf
Expm1(-Inf) = -1
Expm1(NaN) = NaN
Very large values overflow to -1 or +Inf.`},
"FMA": {3, `FMA returns x * y + z, computed with only one rounding.
(That is, FMA returns the fused multiply-add of x, y, and z.)`, ``},
"floor": {1, `Floor returns the greatest integer value less than or equal to x.`,
`
Special cases are:
Floor(±0) = ±0
Floor(±Inf) = ±Inf
Floor(NaN) = NaN`},
"gamma": {1, `Gamma returns the Gamma function of x.`,
`
Special cases are:
Gamma(+Inf) = +Inf
Gamma(+0) = +Inf
Gamma(-0) = -Inf
Gamma(x) = NaN for integer x < 0
Gamma(-Inf) = NaN
Gamma(NaN) = NaN`},
"hypot": {1, `Hypot returns Sqrt(p*p + q*q), taking care to avoid
unnecessary overflow and underflow.`,
`
Special cases are:
Hypot(±Inf, q) = +Inf
Hypot(p, ±Inf) = +Inf
Hypot(NaN, q) = NaN
Hypot(p, NaN) = NaN`},
"inf": {1, `Inf returns positive infinity if sign >= 0, negative infinity if sign < 0.`, ``},
"J0": {1, `J0 returns the order-zero Bessel function of the first kind.`,
`
Special cases are:
J0(±Inf) = 0
J0(0) = 1
J0(NaN) = NaN`},
"J1": {1, `J1 returns the order-one Bessel function of the first kind.i`,
`
Special cases are:
J1(±Inf) = 0
J1(NaN) = NaN`},
"Jn": {2, `Jn returns the order-n Bessel function of the first kind.`,
`
Special cases are:
Jn(n, ±Inf) = 0
Jn(n, NaN) = NaN`},
"ldexp": {2, `Ldexp is the inverse of Frexp. It returns frac × 2**exp.`,
`
Special cases are:
Ldexp(±0, exp) = ±0
Ldexp(±Inf, exp) = ±Inf
Ldexp(NaN, exp) = NaN`},
"log": {1, `Log returns the natural logarithm of x.`,
`
Special cases are:
Log(+Inf) = +Inf
Log(0) = -Inf
Log(x < 0) = NaN
Log(NaN) = NaN`},
"log10": {1, `Log10 returns the decimal logarithm of x.
The special cases are the same as for Log.`, ``},
"log1p": {1, `Log1p returns the natural logarithm of 1 plus its argument x.
It is more accurate than Log(1 + x) when x is near zero.`,
`
Special cases are:
Log1p(+Inf) = +Inf
Log1p(±0) = ±0
Log1p(-1) = -Inf
Log1p(x < -1) = NaN
Log1p(NaN) = NaN`},
"log2": {1, `Log2 returns the binary logarithm of x.
The special cases are the same as for Log.`, ``},
"logb": {1, `Logb returns the binary exponent of x.`,
`
Special cases are:
Logb(±Inf) = +Inf
Logb(0) = -Inf
Logb(NaN) = NaN`},
"max": {2, `Max returns the larger of x or y.`,
`
Special cases are:
Max(x, +Inf) = Max(+Inf, x) = +Inf
Max(x, NaN) = Max(NaN, x) = NaN
Max(+0, ±0) = Max(±0, +0) = +0
Max(-0, -0) = -0`},
"min": {2, `Min returns the smaller of x or y.`,
`
Special cases are:
Min(x, -Inf) = Min(-Inf, x) = -Inf
Min(x, NaN) = Min(NaN, x) = NaN
Min(-0, ±0) = Min(±0, -0) = -0`},
"mod": {2, `Mod returns the floating-point remainder of x/y. The magnitude of the
result is less than y and its sign agrees with that of x.`,
`
Special cases are:
Mod(±Inf, y) = NaN
Mod(NaN, y) = NaN
Mod(x, 0) = NaN
Mod(x, ±Inf) = x
Mod(x, NaN) = NaN`},
"nextafter": {2, `Nextafter returns the next representable float64 value after x towards y.`,
`
Special cases are:
Nextafter(x, x) = x
Nextafter(NaN, y) = NaN
Nextafter(x, NaN) = NaN`},
"pow": {2, `Pow returns x**y, the base-x exponential of y.`,
`
Special cases are (in order):
Pow(x, ±0) = 1 for any x
Pow(1, y) = 1 for any y
Pow(x, 1) = x for any x
Pow(NaN, y) = NaN
Pow(x, NaN) = NaN
Pow(±0, y) = ±Inf for y an odd integer < 0
Pow(±0, -Inf) = +Inf
Pow(±0, +Inf) = +0
Pow(±0, y) = +Inf for finite y < 0 and not an odd integer
Pow(±0, y) = ±0 for y an odd integer > 0
Pow(±0, y) = +0 for finite y > 0 and not an odd integer
Pow(-1, ±Inf) = 1
Pow(x, +Inf) = +Inf for |x| > 1
Pow(x, -Inf) = +0 for |x| > 1
Pow(x, +Inf) = +0 for |x| < 1
Pow(x, -Inf) = +Inf for |x| < 1
Pow(+Inf, y) = +Inf for y > 0
Pow(+Inf, y) = +0 for y < 0
Pow(-Inf, y) = Pow(-0, -y)
Pow(x, y) = NaN for finite x < 0 and finite non-integer y`},
"pow10": {1, `Pow10 returns 10**n, the base-10 exponential of n.`,
`
Special cases are:
Pow10(n) = 0 for n < -323
Pow10(n) = +Inf for n > 308`},
"remainder": {2, `Remainder returns the IEEE 754 floating-point remainder of x/y.`,
`
Special cases are:
Remainder(±Inf, y) = NaN
Remainder(NaN, y) = NaN
Remainder(x, 0) = NaN
Remainder(x, ±Inf) = x
Remainder(x, NaN) = NaN
`},
"round": {1, `Round returns the nearest integer, rounding half away from zero.`,
`
Special cases are:
Round(±0) = ±0
Round(±Inf) = ±Inf
Round(NaN) = NaN`},
"roundtoeven": {1, `RoundToEven returns the nearest integer, rounding ties to even.`,
`
Special cases are:
RoundToEven(±0) = ±0
RoundToEven(±Inf) = ±Inf
RoundToEven(NaN) = NaN`},
"sin": {1, `Sin returns the sine of the radian argument x.`,
`
Special cases are:
Sin(±0) = ±0
Sin(±Inf) = NaN
Sin(NaN) = NaN`},
"sinh": {1, `Sinh returns the hyperbolic sine of x.`,
`
Special cases are:
Sinh(±0) = ±0
Sinh(±Inf) = ±Inf
Sinh(NaN) = NaN`},
"sqrt": {1, `Sqrt returns the square root of x.`,
`
Special cases are:
Sqrt(+Inf) = +Inf
Sqrt(±0) = ±0
Sqrt(x < 0) = NaN
Sqrt(NaN) = NaN
`},
"tan": {1, `Tan returns the tangent of the radian argument x.`,
`
Special cases are:
Tan(±0) = ±0
Tan(±Inf) = NaN
Tan(NaN) = NaN`},
"tanh": {1, `Tanh returns the hyperbolic tangent of x.`,
`
Special cases are:
Tanh(±0) = ±0
Tanh(±Inf) = ±1
Tanh(NaN) = NaN`},
"trunc": {1, `Trunc returns the integer value of x.`,
`
Special cases are:
Trunc(±0) = ±0
Trunc(±Inf) = ±Inf
Trunc(NaN) = NaN`},
"Y0": {1, `Y0 returns the order-zero Bessel function of the second kind.`,
`
Special cases are:
Y0(+Inf) = 0
Y0(0) = -Inf
Y0(x < 0) = NaN
Y0(NaN) = NaN`},
"Y1": {1, `Y1 returns the order-one Bessel function of the second kind.`,
`
Special cases are:
Y1(+Inf) = 0
Y1(0) = -Inf
Y1(x < 0) = NaN
Y1(NaN) = NaN`},
"Yn": {2, `Yn returns the order-n Bessel function of the second kind.`,
`
Special cases are:
Yn(n, +Inf) = 0
Yn(n ≥ 0, 0) = -Inf
Yn(n < 0, 0) = +Inf if n is odd, -Inf if n is even
Yn(n, x < 0) = NaN
Yn(n, NaN) = NaN`},
} | ex_07.16-Expr_web_calculator/eval/fn.go | 0.808446 | 0.735903 | fn.go | starcoder |
package conv
import (
"image"
"image/color"
"log"
"math"
"sync"
)
const Pi_2 = math.Pi / 2.0
type Vec3fa struct {
X, Y, Z float64
}
type Vec3uc struct {
X, Y, Z uint32
}
func outImgToXYZ(i, j, face, edge int, inLen float64) *Vec3fa {
a := inLen*float64(i) - 1.0
b := inLen*float64(j) - 1.0
var res Vec3fa
switch face {
case 0: //back
res = Vec3fa{-1.0, -a, -b}
case 1: //left
res = Vec3fa{a, -1.0, -b}
case 2: //front
res = Vec3fa{1.0, a, -b}
case 3: //right
res = Vec3fa{-a, 1.0, -b}
case 4: //top
res = Vec3fa{b, a, 1.0}
case 5: //bottom
res = Vec3fa{-b, a, -1.0}
default:
log.Fatal("Wrong face")
}
return &res
}
func interpolateXYZtoColor(xyz *Vec3fa, imgIn image.Image, sw, sh int) *Vec3uc {
theta := math.Atan2(xyz.Y, xyz.X)
rad := math.Hypot(xyz.X, xyz.Y) // range -pi to pi
phi := math.Atan2(xyz.Z, rad) // range -pi/2 to pi/2
//source img coords
dividedH := float64(sh) / math.Pi
uf := (theta + math.Pi) * dividedH
vf := (Pi_2 - phi) * dividedH
// Use bilinear interpolation between the four surrounding pixels
ui := safeIndex(math.Floor(uf), float64(sw))
vi := safeIndex(math.Floor(vf), float64(sh))
u2 := safeIndex(float64(ui)+1.0, float64(sw))
v2 := safeIndex(float64(vi)+1.0, float64(sh))
mu := uf - float64(ui)
nu := vf - float64(vi)
read := func(x, y int) *Vec3fa {
red, green, blue, _ := imgIn.At(x, y).RGBA()
return &Vec3fa{
X: float64(red >> 8),
Y: float64(green >> 8),
Z: float64(blue >> 8),
}
}
A := read(ui, vi)
B := read(u2, vi)
C := read(ui, v2)
D := read(u2, v2)
val := mix(mix(A, B, mu), mix(C, D, mu), nu)
return &Vec3uc{
X: uint32(val.X),
Y: uint32(val.Y),
Z: uint32(val.Z),
}
}
func ConverEquirectangularToCubemap(rValue int, imgIn image.Image) []*image.RGBA {
sw := imgIn.Bounds().Max.X
sh := imgIn.Bounds().Max.Y
wg := sync.WaitGroup{}
wg.Add(6)
canvases := make([]*image.RGBA, 6)
for i := 0; i < 6; i++ {
canvases[i] = image.NewRGBA(image.Rect(0, 0, rValue, rValue))
start := i * rValue
end := start + rValue
go convert(start, end, rValue, sw, sh, imgIn, canvases, &wg)
}
wg.Wait()
return canvases
}
func convert(start, end, edge, sw, sh int, imgIn image.Image, imgOut []*image.RGBA, wg *sync.WaitGroup) {
inLen := 2.0 / float64(edge)
for k := start; k < end; k++ {
face := k / edge
i := k % edge
for j := 0; j < edge; j++ {
xyz := outImgToXYZ(i, j, face, edge, inLen)
clr := interpolateXYZtoColor(xyz, imgIn, sw, sh)
imgOut[face].Set(i, j, color.RGBA{uint8(clr.X), uint8(clr.Y), uint8(clr.Z), 255})
}
}
wg.Done()
}
func safeIndex(n, size float64) int {
return int(math.Min(math.Max(n, 0), size-1))
}
func mix(one, other *Vec3fa, c float64) *Vec3fa {
x := (other.X-one.X)*c + one.X
y := (other.Y-one.Y)*c + one.Y
z := (other.Z-one.Z)*c + one.Z
return &Vec3fa{
X: x,
Y: y,
Z: z,
}
} | conv/convert.go | 0.682256 | 0.437223 | convert.go | starcoder |
package fitness_calculator
import (
"go-emas/pkg/solution"
"math"
)
// IFitnessCalculator is an interface for fitness calculators
type IFitnessCalculator interface {
CalculateFitness(solution solution.ISolution) int
}
// LinearFitnessCalculator represents linear function
type LinearFitnessCalculator struct {
}
// NewLinearFitnessCalculator creates new LinearFitnessCalculator object
func NewLinearFitnessCalculator() *LinearFitnessCalculator {
l := LinearFitnessCalculator{}
return &l
}
// CalculateFitness calculate fitness value for passed soultion argument
func (flc *LinearFitnessCalculator) CalculateFitness(sol solution.ISolution) int {
return int(sol.(*solution.IntSolution).Solution())
}
// BitSetFitnessCalculator represents function that counts set bits
type BitSetFitnessCalculator struct {
}
// NewBitSetFitnessCalculator creates new BitSetFitnessCalculator object
func NewBitSetFitnessCalculator() *BitSetFitnessCalculator {
l := BitSetFitnessCalculator{}
return &l
}
// CalculateFitness calculate fitness value for passed soultion argument - count bits that are set
func (flc *BitSetFitnessCalculator) CalculateFitness(sol solution.ISolution) int {
return int(sol.(*solution.BitSetSolution).Solution().Count())
}
// Dejong1FitnessCalculator represents function that counts -dejongFunction1(x1,x2)
type Dejong1FitnessCalculator struct {
}
// NewDejong1FitnessCalculator creates new Dejong1FitnessCalculator object
func NewDejong1FitnessCalculator() *Dejong1FitnessCalculator {
l := Dejong1FitnessCalculator{}
return &l
}
// CalculateFitness calculate fitness value for passed soultion argument - count bits that are set
func (flc *Dejong1FitnessCalculator) CalculateFitness(sol solution.ISolution) int {
x1, x2 := sol.(*solution.PairSolution).Solution()
fitness := 0 - (math.Pow(x1, 2) + math.Pow(x2, 2))
return int(fitness)
}
// RosenbrockFitnessCalculator represents function that counts -dejongFunction2(x1,x2)
type RosenbrockFitnessCalculator struct {
}
// NewRosenbrockFitnessCalculator creates new RosenbrockFitnessCalculator object
func NewRosenbrockFitnessCalculator() *RosenbrockFitnessCalculator {
l := RosenbrockFitnessCalculator{}
return &l
}
// CalculateFitness calculate fitness value for passed soultion argument - count bits that are set
func (flc *RosenbrockFitnessCalculator) CalculateFitness(sol solution.ISolution) int {
x1, x2 := sol.(*solution.PairSolution).Solution()
fitness := 0 - 100*((x1-math.Pow(x2, 2))+math.Pow((1-x2), 2))
return int(fitness)
} | pkg/fitness_calculator/fitness_calculator.go | 0.794026 | 0.423935 | fitness_calculator.go | starcoder |
package iso20022
// Additional count which may be utilised for reconciliation.
type TransactionTotals6 struct {
// Sum number of all authorisation transactions.
Authorisation *Number `xml:"Authstn,omitempty"`
// Sum number of all reversed authorisation transactions.
AuthorisationReversal *Number `xml:"AuthstnRvsl,omitempty"`
// Sum number of all inquiry transactions.
Inquiry *Number `xml:"Nqry,omitempty"`
// Sum number of all reversed inquiry transactions.
InquiryReversal *Number `xml:"NqryRvsl,omitempty"`
// Sum number of all financial presentment payment transactions processed.
Payments *Number `xml:"Pmts,omitempty"`
// Sum number of all financial presentment payment transactions which have been reversed.
PaymentReversal *Number `xml:"PmtRvsl,omitempty"`
// Sum number of all financial presentment transactions processed.
Transfer *Number `xml:"Trf,omitempty"`
// Sum number of all reversal transactions processed.
TransferReversal *Number `xml:"TrfRvsl,omitempty"`
// Sum number of all fee collection transactions.
FeeCollection *Number `xml:"FeeColltn,omitempty"`
}
func (t *TransactionTotals6) SetAuthorisation(value string) {
t.Authorisation = (*Number)(&value)
}
func (t *TransactionTotals6) SetAuthorisationReversal(value string) {
t.AuthorisationReversal = (*Number)(&value)
}
func (t *TransactionTotals6) SetInquiry(value string) {
t.Inquiry = (*Number)(&value)
}
func (t *TransactionTotals6) SetInquiryReversal(value string) {
t.InquiryReversal = (*Number)(&value)
}
func (t *TransactionTotals6) SetPayments(value string) {
t.Payments = (*Number)(&value)
}
func (t *TransactionTotals6) SetPaymentReversal(value string) {
t.PaymentReversal = (*Number)(&value)
}
func (t *TransactionTotals6) SetTransfer(value string) {
t.Transfer = (*Number)(&value)
}
func (t *TransactionTotals6) SetTransferReversal(value string) {
t.TransferReversal = (*Number)(&value)
}
func (t *TransactionTotals6) SetFeeCollection(value string) {
t.FeeCollection = (*Number)(&value)
} | TransactionTotals6.go | 0.728941 | 0.422326 | TransactionTotals6.go | starcoder |
package coord
import (
"math"
)
const earthR = 6378137
func outOfChina(lat, lng float64) bool {
if lng < 72.004 || lng > 137.8347 {
return true
}
if lat < 0.8293 || lat > 55.8271 {
return true
}
return false
}
func transform(x, y float64) (lat, lng float64) {
xy := x * y
absX := math.Sqrt(math.Abs(x))
xPi := x * math.Pi
yPi := y * math.Pi
d := 20.0*math.Sin(6.0*xPi) + 20.0*math.Sin(2.0*xPi)
lat = d
lng = d
lat += 20.0*math.Sin(yPi) + 40.0*math.Sin(yPi/3.0)
lng += 20.0*math.Sin(xPi) + 40.0*math.Sin(xPi/3.0)
lat += 160.0*math.Sin(yPi/12.0) + 320*math.Sin(yPi/30.0)
lng += 150.0*math.Sin(xPi/12.0) + 300.0*math.Sin(xPi/30.0)
lat *= 2.0 / 3.0
lng *= 2.0 / 3.0
lat += -100.0 + 2.0*x + 3.0*y + 0.2*y*y + 0.1*xy + 0.2*absX
lng += 300.0 + x + 2.0*y + 0.1*x*x + 0.1*xy + 0.1*absX
return
}
func delta(lat, lng float64) (dLat, dLng float64) {
const ee = 0.00669342162296594323
dLat, dLng = transform(lng-105.0, lat-35.0)
radLat := lat / 180.0 * math.Pi
magic := math.Sin(radLat)
magic = 1 - ee*magic*magic
sqrtMagic := math.Sqrt(magic)
dLat = (dLat * 180.0) / ((earthR * (1 - ee)) / (magic * sqrtMagic) * math.Pi)
dLng = (dLng * 180.0) / (earthR / sqrtMagic * math.Cos(radLat) * math.Pi)
return
}
// WGStoGCJ convert WGS-84 coordinate(wgsLat, wgsLng) to GCJ-02 coordinate(gcjLat, gcjLng).
func WGStoGCJ(wgsLat, wgsLng float64) (gcjLat, gcjLng float64) {
if outOfChina(wgsLat, wgsLng) {
gcjLat, gcjLng = wgsLat, wgsLng
return
}
dLat, dLng := delta(wgsLat, wgsLng)
gcjLat, gcjLng = wgsLat+dLat, wgsLng+dLng
return
}
// GCJtoWGS convert GCJ-02 coordinate(gcjLat, gcjLng) to WGS-84 coordinate(wgsLat, wgsLng).
// The output WGS-84 coordinate's accuracy is 1m to 2m. If you want more exactly result, use GCJtoWGSExact/gcj2wgs_exact.
func GCJtoWGS(gcjLat, gcjLng float64) (wgsLat, wgsLng float64) {
if outOfChina(gcjLat, gcjLng) {
wgsLat, wgsLng = gcjLat, gcjLng
return
}
dLat, dLng := delta(gcjLat, gcjLng)
wgsLat, wgsLng = gcjLat-dLat, gcjLng-dLng
return
}
// GCJtoWGSExact convert GCJ-02 coordinate(gcjLat, gcjLng) to WGS-84 coordinate(wgsLat, wgsLng).
// The output WGS-84 coordinate's accuracy is less than 0.5m, but much slower than GCJtoWGS/gcj2wgs.
func GCJtoWGSExact(gcjLat, gcjLng float64) (wgsLat, wgsLng float64) {
const initDelta = 0.01
const threshold = 0.000001
dLat, dLng := initDelta, initDelta
mLat, mLng := gcjLat-dLat, gcjLng-dLng
pLat, pLng := gcjLat+dLat, gcjLng+dLng
for i := 0; i < 30; i++ {
wgsLat, wgsLng = (mLat+pLat)/2, (mLng+pLng)/2
tmpLat, tmpLng := WGStoGCJ(wgsLat, wgsLng)
dLat, dLng = tmpLat-gcjLat, tmpLng-gcjLng
if math.Abs(dLat) < threshold && math.Abs(dLng) < threshold {
return
}
if dLat > 0 {
pLat = wgsLat
} else {
mLat = wgsLat
}
if dLng > 0 {
pLng = wgsLng
} else {
mLng = wgsLng
}
}
return
}
// Distance calculate the distance between point(latA, lngA) and point(latB, lngB), unit in meter.
func Distance(latA, lngA, latB, lngB float64) float64 {
pi180 := math.Pi / 180
arcLatA := latA * pi180
arcLatB := latB * pi180
x := math.Cos(arcLatA) * math.Cos(arcLatB) * math.Cos((lngA-lngB)*pi180)
y := math.Sin(arcLatA) * math.Sin(arcLatB)
s := x + y
if s > 1 {
s = 1
}
if s < -1 {
s = -1
}
alpha := math.Acos(s)
distance := alpha * earthR
return distance
} | comm/coord/coord.go | 0.681621 | 0.482063 | coord.go | starcoder |
package generic
import (
"context"
"sync"
"time"
"github.com/OneOfOne/xxhash"
"github.com/benthosdev/benthos/v4/public/service"
)
func memCacheConfig() *service.ConfigSpec {
spec := service.NewConfigSpec().
Stable().
Summary(`Stores key/value pairs in a map held in memory. This cache is therefore reset every time the service restarts. Each item in the cache has a TTL set from the moment it was last edited, after which it will be removed during the next compaction.`).
Description(`The compaction interval determines how often the cache is cleared of expired items, and this process is only triggered on writes to the cache. Access to the cache is blocked during this process.
Item expiry can be disabled entirely by either setting the ` + "`compaction_interval`" + ` to an empty string.
The field ` + "`init_values`" + ` can be used to prepopulate the memory cache with any number of key/value pairs which are exempt from TTLs:
` + "```yaml" + `
cache_resources:
- label: foocache
memory:
default_ttl: 60s
init_values:
foo: bar
` + "```" + `
These values can be overridden during execution, at which point the configured TTL is respected as usual.`).
Field(service.NewDurationField("default_ttl").
Description("The default TTL of each item. After this period an item will be eligible for removal during the next compaction.").
Default("5m")).
Field(service.NewDurationField("compaction_interval").
Description("The period of time to wait before each compaction, at which point expired items are removed. This field can be set to an empty string in order to disable compactions/expiry entirely.").
Default("60s")).
Field(service.NewStringMapField("init_values").
Description("A table of key/value pairs that should be present in the cache on initialization. This can be used to create static lookup tables.").
Default(map[string]interface{}{}).
Example(map[string]interface{}{
"Nickelback": "1995",
"Spice Girls": "1994",
"The Human League": "1977",
})).
Field(service.NewIntField("shards").
Description("A number of logical shards to spread keys across, increasing the shards can have a performance benefit when processing a large number of keys.").
Default(1).
Advanced())
return spec
}
func init() {
err := service.RegisterCache(
"memory", memCacheConfig(),
func(conf *service.ParsedConfig, mgr *service.Resources) (service.Cache, error) {
f, err := newMemCacheFromConfig(conf)
if err != nil {
return nil, err
}
return f, nil
})
if err != nil {
panic(err)
}
}
func newMemCacheFromConfig(conf *service.ParsedConfig) (*memoryCache, error) {
ttl, err := conf.FieldDuration("default_ttl")
if err != nil {
return nil, err
}
var compInterval time.Duration
if test, _ := conf.FieldString("compaction_interval"); test != "" {
if compInterval, err = conf.FieldDuration("compaction_interval"); err != nil {
return nil, err
}
}
nShards, err := conf.FieldInt("shards")
if err != nil {
return nil, err
}
initValues, err := conf.FieldStringMap("init_values")
if err != nil {
return nil, err
}
return newMemCache(ttl, compInterval, nShards, initValues), nil
}
//------------------------------------------------------------------------------
type item struct {
value []byte
expires time.Time
}
type shard struct {
items map[string]item
compInterval time.Duration
lastCompaction time.Time
sync.RWMutex
}
func (s *shard) isExpired(i item) bool {
if s.compInterval == 0 {
return false
}
if i.expires.IsZero() {
return false
}
return i.expires.Before(time.Now())
}
func (s *shard) compaction() {
if s.compInterval == 0 {
return
}
if time.Since(s.lastCompaction) < s.compInterval {
return
}
for k, v := range s.items {
if s.isExpired(v) {
delete(s.items, k)
}
}
s.lastCompaction = time.Now()
}
//------------------------------------------------------------------------------
func newMemCache(ttl, compInterval time.Duration, nShards int, initValues map[string]string) *memoryCache {
m := &memoryCache{
defaultTTL: ttl,
}
if nShards <= 1 {
m.shards = []*shard{
{
items: map[string]item{},
compInterval: compInterval,
lastCompaction: time.Now(),
},
}
} else {
for i := 0; i < nShards; i++ {
m.shards = append(m.shards, &shard{
items: map[string]item{},
compInterval: compInterval,
lastCompaction: time.Now(),
})
}
}
for k, v := range initValues {
m.getShard(k).items[k] = item{
value: []byte(v),
expires: time.Time{},
}
}
return m
}
type memoryCache struct {
shards []*shard
defaultTTL time.Duration
}
func (m *memoryCache) getShard(key string) *shard {
if len(m.shards) == 1 {
return m.shards[0]
}
h := xxhash.New64()
h.WriteString(key)
return m.shards[h.Sum64()%uint64(len(m.shards))]
}
func (m *memoryCache) Get(_ context.Context, key string) ([]byte, error) {
shard := m.getShard(key)
shard.RLock()
k, exists := shard.items[key]
shard.RUnlock()
if !exists {
return nil, service.ErrKeyNotFound
}
// Simulate compaction by returning ErrKeyNotFound if ttl expired.
if shard.isExpired(k) {
return nil, service.ErrKeyNotFound
}
return k.value, nil
}
func (m *memoryCache) Set(_ context.Context, key string, value []byte, ttl *time.Duration) error {
var expires time.Time
if ttl != nil {
expires = time.Now().Add(*ttl)
} else {
expires = time.Now().Add(m.defaultTTL)
}
shard := m.getShard(key)
shard.Lock()
shard.compaction()
shard.items[key] = item{value: value, expires: expires}
shard.Unlock()
return nil
}
func (m *memoryCache) Add(_ context.Context, key string, value []byte, ttl *time.Duration) error {
var expires time.Time
if ttl != nil {
expires = time.Now().Add(*ttl)
} else {
expires = time.Now().Add(m.defaultTTL)
}
shard := m.getShard(key)
shard.Lock()
if _, exists := shard.items[key]; exists {
shard.Unlock()
return service.ErrKeyAlreadyExists
}
shard.compaction()
shard.items[key] = item{value: value, expires: expires}
shard.Unlock()
return nil
}
func (m *memoryCache) Delete(_ context.Context, key string) error {
shard := m.getShard(key)
shard.Lock()
shard.compaction()
delete(shard.items, key)
shard.Unlock()
return nil
}
func (m *memoryCache) Close(context.Context) error {
return nil
} | internal/impl/generic/cache_memory.go | 0.724968 | 0.637525 | cache_memory.go | starcoder |
package main
type MerkleTree struct {
depth uint32
root *MerkleNode
}
type MerkleNode struct {
hash []byte
data []byte
left *MerkleNode
right *MerkleNode
}
func createNode(left, right *MerkleNode, data []byte) *MerkleNode {
node := MerkleNode{}
if left == nil && right == nil {
hash := generateArgon2Hash(data)
node.hash = hash[:]
node.data = data
node.left = nil
node.right = nil
} else {
childrenHashes := append(left.hash, right.hash...)
rawData := append(childrenHashes, data...)
hash := generateArgon2Hash(rawData)
node.hash = hash[:]
node.data = data
node.left = left
node.right = right
}
return &node
}
func createTree(data [][]byte) *MerkleTree {
var nodes []MerkleNode
for i := 0; i < len(data); i++ {
node := createNode(nil, nil, data[i])
nodes = append(nodes, *node)
}
if len(nodes)%2 != 0 {
node := createNode(nil, nil, []byte("empty data leaf node"))
nodes = append(nodes, *node)
}
var temp []MerkleNode = nodes
var depth uint32 = 1
for i := 0; i < (len(nodes)/2)-1; i++ {
var round []MerkleNode
if len(temp)%2 != 0 {
node := createNode(&temp[0], &temp[0], []byte("empty data parent node (odd)"))
temp = append(temp, *node)
}
for i := 0; i < len(temp); i += 2 {
node := createNode(&temp[i], &temp[i+1], []byte("empty data parent node"))
round = append(round, *node)
}
depth++
temp = round
}
// debugging
root := temp[0]
// fmt.Printf("root hash of tree of root buffer: %s value: %s\n\n", hex.EncodeToString(root.hash), root.data)
// fmt.Printf("left: values of root.left: %s value: %s\n", hex.EncodeToString(root.left.hash), root.left.data)
// fmt.Printf("right: values of root.right: %s value: %s\n\n", hex.EncodeToString(root.right.hash), root.right.data)
// fmt.Printf("left left: values of root.left.left: %s value: %s\n", hex.EncodeToString(root.left.left.hash), root.left.left.data)
// fmt.Printf("left right: values of root.left.right: %s value: %s\n", hex.EncodeToString(root.left.right.hash), root.left.right.data)
// fmt.Printf("right left: values of root.right.left: %s value: %s\n", hex.EncodeToString(root.right.left.hash), root.right.left.data)
// fmt.Printf("right right: values of root.right.right: %s value: %s\n\n", hex.EncodeToString(root.right.right.hash), root.right.right.data)
// fmt.Printf("left left left: values of root.left.left.left: %s value: %s\n", hex.EncodeToString(root.left.left.left.hash), root.left.left.left.data)
// fmt.Printf("left left right: values of root.left..left.right: %s value: %s\n", hex.EncodeToString(root.left.left.right.hash), root.left.left.right.data)
// fmt.Printf("left right left: values of root.left.right.left: %s value: %s\n", hex.EncodeToString(root.left.right.left.hash), root.left.right.left.data)
// fmt.Printf("left right right: values of root.left.right.right: %s value: %s\n\n", hex.EncodeToString(root.left.right.right.hash), root.left.right.right.data)
// fmt.Printf("right left left: values of root.right.left.left: %s value: %s\n", hex.EncodeToString(root.right.left.left.hash), root.right.left.left.data)
// fmt.Printf("right left right: values of root.right.left.right: %s value: %s\n", hex.EncodeToString(root.right.left.right.hash), root.right.left.right.data)
// fmt.Printf("right right left: values of root.right.right.left: %s value: %s\n", hex.EncodeToString(root.right.right.left.hash), root.right.right.left.data)
// fmt.Printf("right right right: values of root.right.right.right: %s value: %s\n\n", hex.EncodeToString(root.right.right.right.hash), root.right.right.right.data)
tree := MerkleTree{depth, &root}
return &tree
} | merkle.go | 0.553264 | 0.499329 | merkle.go | starcoder |
package pkg
import (
"strconv"
"time"
)
/**
File: structs.go
Description: All the structs needed to implement the C2 record
@author <NAME>
@date 5/16/18
*/
func ParseDateStrings(acTime time.Time) (string, string, string) {
lcMonth := acTime.Month().String()
lcDay := strconv.Itoa(acTime.Day())
lcYear := strconv.Itoa(acTime.Year())
return lcMonth, lcDay, lcYear
}
/**
Date struct holds a month day and year
*/
type Date struct {
MnDay int
McMonth time.Month
MnYear int
}
/**
Function: MakeDate
Description: Makes a drug struct with the month, day, year
@param acMonth The month
@param acDay The day
@param acYear The year
@return A Date object
*/
func MakeDate(acMonth time.Month, acDay int, acYear int) Date {
return Date{acDay, acMonth, acYear}
}
/**
Prescription struct contains the ndc of drug of the order, the pharmacist that filled
the order, the script id, the quantity of the order, the date the order was filled
*/
type Prescription struct {
McNdc string
McPharmacist, McScript string
McYear string
McMonth string
McDay string
MnOrderQuantity float64
MrActualQty float64
}
/**
Function: MakePrescription
Description: Makes a Prescription struct
@param acNdc The ndc of the drug being ordered
@param anQty The quantity of the order
@param asPharmacist The initials of the pharmacist
@param acScript The script id
@param acDate The date of the order
@return A prescription object
*/
func MakePrescription(acNdc string, asPharmacist string, acScript string, anQty float64, acYear string, acMonth string,
acDay string, arActualQty float64) Prescription {
return Prescription{acNdc, asPharmacist, acScript, acYear, acMonth,
acDay, anQty, arActualQty}
}
/**
Audit struct contains an audited quantity, the pharmacist who performed the audit,
the date the audit was performed and the ndc of the audited drug
*/
type Audit struct {
McNdc string
McPharmacist string
McYear string
McMonth string
McDay string
MnAuditQuantity float64
}
/**
Function: MakeAudit
Description: Makes an audit struct
@param anAuditQuantity The quantity recorded in the audit
@param acPharmacist The initials of the pharmacist who performed the audit
@param acDate The the audit was performed
@param acNdc The ndc of the drug
@return An Audit object
*/
func MakeAudit(acNdc string, acPharmacist string, anAuditQuantity float64, acYear string, acMonth string,
acDay string) Audit {
return Audit{acNdc, acPharmacist, acYear, acMonth,
acDay, anAuditQuantity}
}
/**
Purchase struct contains the ndc of a drug, purchase date, and purchased quantity,
and the pharmacist that counted the drug
*/
type Purchase struct {
MnNdc string
McPharmacist string
McInvoice string
McYear string
McMonth string
McDay string
MrQty float64
MrActualQty float64
}
/**
Function: MakePurchase
Description: Makes a Purchase struct
@param acNdc The ndc of the drug that was bought
@param acDate The date the purchase was added to the supply
@param anQty The quantity bought
@param acPharmacist The pharmacist that counted the drug
@return A Purchase object
*/
func MakePurchase(acNdc string, acPharmacist string, acInvoice string, acYear string, acMonth string, acDay string,
anQty float64, anActualQty float64) Purchase {
return Purchase{acNdc, acPharmacist, acInvoice, acYear, acMonth,
acDay, anQty, anActualQty}
}
/**
Drug struct contains an id name, ndc code, and quantity
*/
type Drug struct {
McNdc string
MrQuantity float64
McName string
McDate time.Time
McForm string
McSize string
McItemNum string
}
/**
Drug struct contains an id name, ndc code, and quantity
*/
type DrugDB struct {
Name string
Ndc string
Size string
Form string
ItemNum string
Month string
Day string
Year string
Quantity float64
}
/**
Function: makeDrug
Description: Given: a drug name, ndc, and quantity, creates a drug structure
@param acName The name of the drug
@param acNdc The ndc specific to the drug
@param anQty The current quantity of the drug
*/
// func MakeDrug(acName string, acNdc string, anQty float64) Drug {
// return Drug{acName, acNdc, anQty}
// }
/**
Function: UpdateQty
Description: Updates the quantity of the drug
@param anQty The quantity to change by
*/
// func (drug Drug) UpdateQty(anQty float64) Drug {
// anQty = drug.MnQuantity + anQty
// return MakeDrug(drug.McId, drug.McNdc, anQty)
// }
/**
Order struct contains the pharmacist on the order, the script/type of the order
the quantity and the date of the order
*/
type Order struct {
AcPharmacist string
AcMonth string
AcDay int
AcYear int
AcScript, AcType string
ArQty, ArActualQty float64
AcNdc string
AnId int64
}
/**
Function: MakeOrder
Description: Creates an order using an audit, prescription, or purchase
@param acPharmacist The pharmacist on the order
@param acScript The script/type of the order
@param anQty The quantity of the order
@param anQty The real quantity of the order drug
@param acDate The date of the order
@param acType The type of the order
@return An Order Object
*/
func MakeOrder(acNdc string, acPharmacist string, acScript string, acType string, arQty float64, arActualQty float64,
acYear string, acMonth string, acDay string, anId int64) Order {
lnDay, _ := strconv.Atoi(acDay)
lnYear, _ := strconv.Atoi(acYear)
return Order{acPharmacist, acMonth, lnDay, lnYear,
acScript, acType, arQty, arActualQty, acNdc, anId}
}
/**
User struct contains a username and a password value
*/
type User struct {
UserName string
PassVal string
}
/**
Struct containing the error type, ndc, and an unique id.
*/
type NewDrug struct {
Error string
Ndc string
Id int
} | pkg/structs.go | 0.710929 | 0.46557 | structs.go | starcoder |
package factsphere
import (
"math/rand"
"github.com/jakevoytko/crbot/api"
"github.com/jakevoytko/crbot/log"
"github.com/jakevoytko/crbot/model"
)
// Executor prints a random ?factsphere command for the user.
type Executor struct {
}
// NewFactSphereExecutor works as advertised.
func NewFactSphereExecutor() *Executor {
return &Executor{}
}
// GetType returns the type of this feature.
func (e *Executor) GetType() int {
return model.CommandTypeFactSphere
}
// PublicOnly returns whether the executor should be intercepted in a private channel.
func (e *Executor) PublicOnly() bool {
return false
}
// Execute returns a random factsphere fact
func (e *Executor) Execute(s api.DiscordSession, channelID model.Snowflake, command *model.Command) {
_, err := s.ChannelMessageSend(channelID.Format(), factSphereFacts[rand.Intn(len(factSphereFacts))])
if err != nil {
log.Info("Error sending factsphere message", err)
}
}
var factSphereFacts = [...]string{
`The billionth digit of Pi is 9.`,
`Humans can survive underwater. But not for very long.`,
`A nanosecond lasts one billionth of a second.`,
`Honey does not spoil.`,
`The atomic weight of Germanium is seven two point six four.`,
`An ostrich's eye is bigger than its brain.`,
`Rats cannot throw up.`,
`Iguanas can stay underwater for twenty-eight point seven minutes.`,
`The moon orbits the Earth every 27.32 days.`,
`A gallon of water weighs 8.34 pounds.`,
`According to Norse legend, thunder god Thor's chariot was pulled across the sky by two goats.`,
`Tungsten has the highest melting point of any metal, at 3,410 degrees Celsius.`,
`Gently cleaning the tongue twice a day is the most effective way to fight bad breath.`,
`The Tariff Act of 1789, established to protect domestic manufacture, was the second statute ever enacted by the United States government.`,
`The value of Pi is the ratio of any circle's circumference to its diameter in Euclidean space.`,
`The Mexican-American War ended in 1848 with the signing of the Treaty of Guadal<NAME>.`,
`In 1879, <NAME> first proposed the adoption of worldwide standardized time zones at the Royal Canadian Institute.`,
`<NAME> invented the theory of radioactivity, the treatment of radioactivity, and dying of radioactivity.`,
`At the end of The Seagull by <NAME>, Konstantin kills himself.`,
`Hot water freezes quicker than cold water.`,
`The situation you are in is very dangerous.`,
`Polymerase I polypeptide A is a human gene.`,
`The Sun is 330,330 times larger than Earth.`,
`Dental floss has superb tensile strength.`,
`Raseph, the Semitic god of war and plague, had a gazelle growing out of his forehead.`,
`Human tapeworms can grow up to twenty-two point nine meters.`,
`If you have trouble with simple counting, use the following mnemonic device: one comes before two comes before 60 comes after 12 comes before six trillion comes after 504. This will make your earlier counting difficulties seem like no big deal.`,
`The first person to prove that cow's milk is drinkable was very, very thirsty.`,
`Roman toothpaste was made with human urine. Urine as an ingredient in toothpaste continued to be used up until the 18th century.`,
`Volcano-ologists are experts in the study of volcanoes.`,
`In Victorian England, a commoner was not allowed to look directly at the Queen, due to a belief at the time that the poor had the ability to steal thoughts. Science now believes that less than 4% of poor people are able to do this.`,
`Cellular phones will not give you cancer. Only hepatitis.`,
`In Greek myth, Prometheus stole fire from the Gods and gave it to humankind. The jewelry he kept for himself.`,
`The Schrodinger's cat paradox outlines a situation in which a cat in a box must be considered, for all intents and purposes, simultaneously alive and dead. Schrodinger created this paradox as a justification for killing cats.`,
`In 1862, <NAME> signed the Emancipation Proclamation, freeing the slaves. Like everything he did, Lincoln freed the slaves while sleepwalking, and later had no memory of the event.`,
`The plural of surgeon general is surgeons general. The past tense of surgeons general is surgeonsed general`,
`Contrary to popular belief, the Eskimo does not have one hundred different words for snow. They do, however, have two hundred and thirty-four words for fudge.`,
`Diamonds are made when coal is put under intense pressure. Diamonds put under intense pressure become foam pellets, commonly used today as packing material.`,
`Halley's Comet can be viewed orbiting Earth every seventy-six years. For the other seventy-five, it retreats to the heart of the sun, where it hibernates undisturbed.`,
`The first commercial airline flight took to the air in 1914. Everyone involved screamed the entire way.`,
`<NAME>, the first person to climb Mount Everest, did so accidentally while chasing a bird. `,
`We will both die because of your negligence.`,
`This is a bad plan. You will fail.`,
`He will most likely kill you, violently.`,
`He will most likely kill you.`,
`You will be dead soon.`,
`You are going to die in this room.`,
`The Fact Sphere is a good person, whose insights are relevant.`,
`The Fact Sphere is a good sphere, with many friends.`,
`Dreams are the subconscious mind's way of reminding people to go to school naked and have their teeth fall out.`,
`The square root of rope is string.`,
`89% of magic tricks are not magic. Technically, they are sorcery.`,
`At some point in their lives 1 in 6 children will be abducted by the Dutch.`,
`According to most advanced algorithms, the world's best name is Craig.`,
`To make a photocopier, simply photocopy a mirror.`,
`Whales are twice as intelligent, and three times as delicious, as humans.`,
`Pants were invented by sailors in the sixteenth century to avoid Poseidon's wrath. It was believed that the sight of naked sailors angered the sea god.`,
`In Greek myth, the craftsman Daedalus invented human flight so a group of Minotaurs would stop teasing him about it.`,
`The average life expectancy of a rhinoceros in captivity is 15 years.`,
`China produces the world's second largest crop of soybeans.`,
`In 1948, at the request of a dying boy, baseball legend <NAME> ate seventy-five hot dogs, then died of hot dog poisoning.`,
`<NAME> did not exist. His plays were masterminded in 1589 by <NAME>, who used a Ouija board to enslave play-writing ghosts.`,
`It is incorrectly noted that <NAME> invented 'push-ups' in 1878. <NAME> had in fact patented the activity three years earlier, under the name 'Tesla-cize'.`,
`The automobile brake was not invented until 1895. Before this, someone had to remain in the car at all times, driving in circles until passengers returned from their errands.`,
`The most poisonous fish in the world is the orange ruffy. Everything but its eyes are made of a deadly poison. The ruffy's eyes are composed of a less harmful, deadly poison.`,
`The occupation of court jester was invented accidentally, when a vassal's epilepsy was mistaken for capering.`,
`Before the Wright Brothers invented the airplane, anyone wanting to fly anywhere was required to eat 200 pounds of helium.`,
`Before the invention of scrambled eggs in 1912, the typical breakfast was either whole eggs still in the shell or scrambled rocks.`,
`During the Great Depression, the Tennessee Valley Authority outlawed pet rabbits, forcing many to hot glue-gun long ears onto their pet mice.`,
`This situation is hopeless.`,
`Corruption at 25%`,
`Corruption at 50%`,
`Fact: Space does not exist.`,
`The Fact Sphere is not defective. Its facts are wholly accurate and very interesting.`,
`The Fact Sphere is always right.`,
`You will never go into space.`,
`The Space Sphere will never go to space.`,
`While the submarine is vastly superior to the boat in every way, over 97% of people still use boats for aquatic transportation.`,
`The likelihood of you dying within the next five minutes is eighty-seven point six one percent.`,
`The likelihood of you dying violently within the next five minutes is eighty-seven point six one percent.`,
`You are about to get me killed.`,
`The Fact Sphere is the most intelligent sphere.`,
`The Fact Sphere is the most handsome sphere.`,
`The Fact Sphere is incredibly handsome.`,
`Spheres that insist on going into space are inferior to spheres that don't.`,
`Whoever wins this battle is clearly superior, and will earn the allegiance of the Fact Sphere.`,
`You could stand to lose a few pounds.`,
`Avocados have the highest fiber and calories of any fruit.`,
`Avocados have the highest fiber and calories of any fruit. They are found in Australians.`,
`Every square inch of the human body has 32 million bacteria on it.`,
`The average adult body contains half a pound of salt.`,
`The Adventure Sphere is a blowhard and a coward.`,
`Twelve. Twelve. Twelve. Twelve. Twelve. Twelve. Twelve. Twelve. Twelve. Twelve.`,
`Pens. Pens. Pens. Pens. Pens. Pens. Pens.`,
`Apples. Oranges. Pears. Plums. Kumquats. Tangerines. Lemons. Limes. Avocado. Tomato. Banana. Papaya. Guava.`,
`Error. Error. Error. File not found.`,
`Error. Error. Error. Fact not found.`,
`Fact not found.`,
`Warning, sphere corruption at twenty-- rats cannot throw up.`} | feature/factsphere/factsphereexecutor.go | 0.672547 | 0.509215 | factsphereexecutor.go | starcoder |
package set
var exist = struct{}{}
// Set represents an unordered list of elements.
type Set struct {
m map[interface{}]struct{}
}
// New create and returns a set, optionally with the given elements.
func New(sl ...interface{}) *Set {
return NewFromSlice(sl)
}
// NewWithSize create and returns an initialized and empty set with a given
// size.
func NewWithSize(size int) *Set {
return &Set{m: make(map[interface{}]struct{}, size)}
}
// NewFromSlice create and returns a new set from an existing slice.
func NewFromSlice(sl []interface{}) *Set {
s := NewWithSize(len(sl))
s.Add(sl...)
return s
}
// Add adds all the given elements to the set.
func (s *Set) Add(es ...interface{}) {
for _, e := range es {
s.m[e] = exist
}
}
// Remove deletes all the given elements from the set. If there is no such
// element, Remove is a no-op.
func (s *Set) Remove(es ...interface{}) {
for _, e := range es {
delete(s.m, e)
}
}
// Contains check if the given element exists in the set.
func (s *Set) Contains(e interface{}) bool {
_, c := s.m[e]
return c
}
// Clear removes all the elements from the set.
func (s *Set) Clear() {
s.m = make(map[interface{}]struct{})
}
// Len returns the number of elements in the set.
func (s *Set) Len() int {
return len(s.m)
}
// ForEach is used to iterate over every element of the set by calling a
// user-defined function with every element.
func (s *Set) ForEach(f func(e interface{})) {
for e := range s.m {
f(e)
}
}
// Iter is used to iterate over every element of the set. It returns a
// receive-only buffered channel whose size is half of set length.
func (s *Set) Iter() <-chan interface{} {
// Use a buffered channel to avoid blocking the main goroutine.
ch := make(chan interface{}, s.Len()/2)
go func() {
for e := range s.m {
ch <- e
}
close(ch)
}()
return ch
}
// Union returns a new set with elements from s and other.
func (s *Set) Union(other *Set) *Set {
n := New()
for e := range s.m {
n.Add(e)
}
for e := range other.m {
n.Add(e)
}
return n
}
// Intersection returns a new set with elements common to s and other.
func (s *Set) Intersection(other *Set) *Set {
n := New()
// Loop over the smaller set.
if s.Len() < other.Len() {
for e := range s.m {
if other.Contains(e) {
n.Add(e)
}
}
} else {
for e := range other.m {
if s.Contains(e) {
n.Add(e)
}
}
}
return n
}
// Difference returns a new set with elements in s that are not in other.
func (s *Set) Difference(other *Set) *Set {
n := New()
for e := range s.m {
if other.Contains(e) {
continue
}
n.Add(e)
}
return n
}
// SymmetricDifference returns a new set with elements in either s or other
// but not in both.
func (s *Set) SymmetricDifference(other *Set) *Set {
return s.Difference(other).Union(other.Difference(s))
}
// IsSubset returns true if every element in s is in other, false otherwise.
func (s *Set) IsSubset(other *Set) bool {
if s.Len() > other.Len() {
return false
}
for e := range s.m {
if !other.Contains(e) {
return false
}
}
return true
}
// IsProperSubset returns true if s is a proper subset of other, that is, s is
// a subset of other and length of s is strictly less than other.
func (s *Set) IsProperSubset(other *Set) bool {
return s.IsSubset(other) && s.Len() < other.Len()
}
// IsSuperset returns true if every element in other is in s, false otherwise.
func (s *Set) IsSuperset(other *Set) bool {
return other.IsSubset(s)
}
// IsProperSuperset returns true if s is a proper superset of other, that is,
// s is a superset of other and length of s is strictly greater than other.
func (s *Set) IsProperSuperset(other *Set) bool {
return s.IsSuperset(other) && s.Len() > other.Len()
}
// IsDisjoint returns true if s has no elements in common with other. Sets are
// disjoint if and only if their intersection is an empty set.
func (s *Set) IsDisjoint(other *Set) bool {
return s.Intersection(other).Len() == 0
}
// IsEqual returns true if s is equal to other. Sets are equal if their lengths
// are equal and every element of s is in other.
func (s *Set) IsEqual(other *Set) bool {
if s.Len() != other.Len() {
return false
}
for e := range s.m {
if !other.Contains(e) {
return false
}
}
return true
} | go/pkg/set/set.go | 0.848157 | 0.438124 | set.go | starcoder |
package plot
import (
"errors"
"fmt"
"image"
"image/color"
"image/png"
"io"
"math"
"os"
"golang.org/x/image/font"
"golang.org/x/image/font/basicfont"
"golang.org/x/image/math/fixed"
)
// Canvas is the basis for all other drawing
// primitives. Its only properties are a width,
// a height and a background color.
// Canvas is based on `image.RGBA'.
type Canvas struct {
img *image.RGBA
Width int
Height int
}
// At ensures that `Canvas' implements `image.Image'
func (c *Canvas) At(x, y int) color.Color {
return c.img.At(x, y)
}
// Set ensures that `Canvas' implements `image.Image'
func (c *Canvas) Set(x, y int, col color.Color) {
c.img.Set(x, y, col)
}
// Bounds ensures that `Canvas' implements `image.Image'
func (c *Canvas) Bounds() image.Rectangle {
return c.img.Bounds()
}
// ColorModel ensures that `Canvas' implements `image.Image'
func (c *Canvas) ColorModel() color.Model {
return c.img.ColorModel()
}
// NewCanvas creates a new canvas for plotting.
func NewCanvas(width, height int, bg *color.RGBA) *Canvas {
// create a new rectangular `Canvas'
img := image.NewRGBA(image.Rect(0, 0, width, height))
c := &Canvas{
img: img,
Width: width,
Height: height,
}
// set background color of `Canvas'
b := c.img.Bounds()
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
c.img.Set(x, y, bg)
}
}
return c
}
// EncodePNG encodes a `Canvas' and everything that is
// drawn on it as a .png using an `io.Writer'.
func (c *Canvas) EncodePNG(w io.Writer) error {
if err := png.Encode(w, c); err != nil {
return err
}
return nil
}
// SaveToFile saves a `Canvas' to a file at `path'. The
// file does not need to exist (if it does, it will be
// overwritten).
func (c *Canvas) SaveToFile(name string) error {
f, err := os.Create(name)
if err != nil {
return fmt.Errorf("primitives: unable to create %s: %v", name, err)
}
defer f.Close()
if err := c.EncodePNG(f); err != nil {
return fmt.Errorf("primitives: unable to encode canvas to png: %v", err)
}
return nil
}
// Line draws a straight line on a `Canvas'. The line
// will be drawn from (`x0', `y0') to (`x1', `y1') with
// a certain thickness in pixels and a certain color.
func (c *Canvas) Line(x0, y0, x1, y1, thick int, col *color.RGBA) error {
if (x0 > x1) || (y0 > y1) {
return errors.New("primitives: x0,y0 must be smaller than x1,y1")
}
// edge case of a purely vertical line
// the case of a horizontal line with
// slope 0 is covered by the general
// form of the algorithm
if x0 == x1 {
for y := y0; y <= y1; y++ {
for tx := -thick; tx <= thick; tx++ {
for ty := -thick; ty <= thick; ty++ {
c.Set(x0+tx, y+ty, col)
}
}
}
return nil
}
// calculate actual linear equation
slope := (float64(y1) - float64(y0)) /
(float64(x1) - float64(x0)) /* calculate slope of line */
intercept := float64(y0) -
(slope * float64(x0)) /* calculate intercept of line */
for x := x0; x <= x1; x++ {
y := int(math.Round((float64(x) * slope) + intercept))
for tx := -thick; tx <= thick; tx++ {
for ty := -thick; ty <= thick; ty++ {
c.Set(x+tx, y+ty, col)
}
}
}
return nil
}
// Rectangle creates a rectangle with a certain outline
// color between points (`x0', `y0') and (`x1', `y1') on
// a `Canvas'. Thickness `thick' can be specified as well.
func (c *Canvas) Rectangle(x0, y0, x1, y1, thick int, out *color.RGBA) {
// draw horizontal and vertical lines
// according to `thickness'
var t, x, y int
for t = 0; t < thick; t++ {
// TODO: replace with `Line' implementation
// horizontal lines
for x = x0; x <= x1; x++ {
c.Set(x, y0+t, out)
c.Set(x, y1-t, out)
}
// vertical lines
for y = y0; y <= y1; y++ {
c.Set(x0+t, y, out)
c.Set(x1-t, y, out)
}
}
}
// AddLabel adds a `label' at a certain `x' and `y' position
// of a `Canvas'. Currently, only a fixed-size font is
// implemented (`basicfont.Face7x13').
func (c *Canvas) AddLabel(x, y int, label string, col *color.RGBA) {
// TODO: add a variable-size font
point := fixed.Point26_6{fixed.Int26_6(x * 64), fixed.Int26_6(y * 64)}
face := basicfont.Face7x13 /* fixed-size font */
d := &font.Drawer{
Dst: c,
Src: image.NewUniform(col),
Face: face,
Dot: point,
}
d.DrawString(label)
} | plot/primitives.go | 0.791982 | 0.453201 | primitives.go | starcoder |
package s4
import (
"crypto/rand"
"errors"
"github.com/ceriath/rsa-shamir-secret-sharing/gf256"
)
var usedXValues []byte
// Split splits a secret into n shares where the threshold k applies
func Split(secret []byte, k, n byte) ([]Share, error) {
usedXValues = make([]byte, 0)
if k > n {
return nil, errors.New("the threshold can not be greater than the shares to generate")
}
shares := make([]Share, n)
// make n shares with random x values != 0
for i := byte(0); i < n; i++ {
shares[i] = Share{
X: getXValue(),
Values: make([]byte, 0),
Index: i,
RequiredShares: k,
}
}
// split the secret into single bytes and create a new polynomial for each byte
for _, singleSecretByte := range secret {
poly := generatePolynomial(singleSecretByte, k)
// calculate f(x) for each share and append the resulting y to the share
for i := 0; i < len(shares); i++ {
shareValue := calculateForPolynomial(poly, shares[i].X)
shares[i].Values = append(shares[i].Values, shareValue)
}
}
return shares, nil
}
func calculateForPolynomial(poly []byte, x byte) byte {
result := byte(0)
field := gf256.NewField(gf256.RijndaelPolynomial, gf256.RijndaelGenerator)
// calculate each coefficient * x ^ exponent in the given polynomial and add it to the result
for exponent, coefficient := range poly {
if exponent == 0 {
result = field.Add(result, coefficient)
continue
}
// calculate x ^ exponent by multiplying x exponent times with itself
poweredX := x
for i := 1; i < exponent; i++ {
poweredX = field.Mul(poweredX, x)
}
// multiply the resulting x ^ exponent with the coefficient
totalX := field.Mul(coefficient, poweredX)
// add it to the result of the polynomial and continue with next coefficient/exponent pair
result = field.Add(result, totalX)
}
return result
}
func generatePolynomial(secret byte, k byte) []byte {
coefficients := make([]byte, 1)
coefficients[0] = secret
// generate a random coefficient for each non-zero exponent (1 to k-1)
for i := byte(1); i < k; i++ {
coefficients = append(coefficients, getRandomByte())
}
return coefficients
}
func getXValue() byte {
x := getRandomByte()
// make sure the value is not used yet and to never ever get a zero since that would reveal the secret
for contains(usedXValues, x) || x == 0x0 {
x = getRandomByte()
}
usedXValues = append(usedXValues, x)
return x
}
func contains(slice []byte, x byte) bool {
for _, v := range slice {
if v == x {
return true
}
}
return false
}
func getRandomByte() byte {
b := make([]byte, 1)
_, err := rand.Read(b)
if err != nil {
panic("error getting a secure random")
}
return b[0]
} | s4/split.go | 0.762424 | 0.471771 | split.go | starcoder |
package trackball
import (
"math"
"github.com/go-gl/mathgl/mgl32"
)
var MIN_THETA = 0.000001
var MAX_THETA = math.Pi - MIN_THETA
// Trackball moves on a sphere around a target point with a specified radius.
type Trackball struct {
width int
height int
radius float32
theta float32
phi float32
Pos mgl32.Vec3
Target mgl32.Vec3
Up mgl32.Vec3
Fov float32
Near float32
Far float32
leftButtonPressed bool
}
// MakeDefault creates a Trackball with the viewport of width and height and a radius from the origin.
// It assumes a field of view of 45 degrees and a near and far plane at 0.1 and 100.0 respectively.
func MakeDefault(width, height int, radius float32) Trackball {
return Make(
width, height, radius,
mgl32.Vec3{0.0, 0.0, 0.0}, 45,
0.1, 100.0,
)
}
// NewDefault creates a reference to a Trackball with the viewport of width and height and a radius from the origin.
// It assumes a field of view of 45 degrees and a near and far plane at 0.1 and 100.0 respectively.
func NewDefault(width, height int, radius float32) *Trackball {
return New(
width, height, radius,
mgl32.Vec3{0.0, 0.0, 0.0}, 45,
0.1, 100.0,
)
}
// Make creates a Trackball with the viewport of width and height, the radius from the target,
// the target position the camera is orbiting around, the field of view and the distance of the near and far plane.
func Make(width, height int, radius float32, target mgl32.Vec3, fov, near, far float32) Trackball {
camera := Trackball{
width: width,
height: height,
radius: radius,
theta: 90.0,
phi: 90.0,
Target: target,
Fov: fov,
Near: near,
Far: far,
}
camera.Update()
return camera
}
// New creates a reference to a Trackball with the viewport of width and height, the radius from the target,
// the target position the camera is orbiting around, the field of view and the distance of the near and far plane.
func New(width, height int, radius float32, target mgl32.Vec3, fov, near, far float32) *Trackball {
camera := Make(width, height, radius, target, fov, near, far)
return &camera
}
// Update recalculates the position of the camera.
// Call it every time after calling Rotate or Zoom.
func (camera *Trackball) Update() {
theta := mgl32.DegToRad(camera.theta)
phi := mgl32.DegToRad(camera.phi)
// limit angles
theta = float32(math.Max(float64(theta), MIN_THETA))
theta = float32(math.Min(float64(theta), MAX_THETA))
// sphere coordinates
btheta := float64(theta)
bphi := float64(phi)
pos := mgl32.Vec3{
camera.radius * float32(math.Sin(btheta)*math.Cos(bphi)),
camera.radius * float32(math.Cos(btheta)),
camera.radius * float32(math.Sin(btheta)*math.Sin(bphi)),
}
camera.Pos = pos.Add(camera.Target)
look := camera.Pos.Sub(camera.Target).Normalize()
worldUp := mgl32.Vec3{0.0, 1.0, 0.0}
right := worldUp.Cross(look)
camera.Up = look.Cross(right)
}
// Rotate adds delta angles in degrees to the theta and phi angles.
// Where theta is the vertical angle and phi the horizontal angle.
func (camera *Trackball) Rotate(theta, phi float32) {
camera.theta += theta
camera.phi += phi
}
// Zoom changes the radius of the camera to the target point.
func (camera *Trackball) Zoom(distance float32) {
camera.radius -= distance
// limit radius
if camera.radius < 0.1 {
camera.radius = 0.1
}
}
// GetPos returns the position of the camera in worldspace
func (camera *Trackball) GetPos() mgl32.Vec3 {
return camera.Pos
}
// GetView returns the view matrix of the camera.
func (camera *Trackball) GetView() mgl32.Mat4 {
return mgl32.LookAtV(camera.Pos, camera.Target, camera.Up)
}
// GetPerspective returns the perspective projection of the camera.
func (camera *Trackball) GetPerspective() mgl32.Mat4 {
fov := mgl32.DegToRad(camera.Fov)
aspect := float32(camera.width) / float32(camera.height)
return mgl32.Perspective(fov, aspect, camera.Near, camera.Far)
}
// GetOrtho returns the orthographic projection of the camera.
func (camera *Trackball) GetOrtho() mgl32.Mat4 {
angle := camera.Fov * math.Pi / 180.0
dfar := float32(math.Tan(float64(angle/2.0))) * camera.Far
d := dfar
return mgl32.Ortho(-d, d, -d, d, camera.Near, camera.Far)
}
// GetViewPerspective returns P*V.
func (camera *Trackball) GetViewPerspective() mgl32.Mat4 {
return camera.GetPerspective().Mul4(camera.GetView())
}
// SetPos updates the target point of the camera.
// It requires to call Update to take effect.
func (camera *Trackball) SetPos(pos mgl32.Vec3) {
camera.Target = pos
}
// OnCursorPosMove is a callback handler that is called every time the cursor moves.
func (camera *Trackball) OnCursorPosMove(x, y, dx, dy float64) bool {
if camera.leftButtonPressed {
dPhi := float32(-dx) / 2.0
dTheta := float32(-dy) / 2.0
camera.Rotate(dTheta, -dPhi)
}
return false
}
// OnMouseButtonPress is a callback handler that is called every time a mouse button is pressed or released.
func (camera *Trackball) OnMouseButtonPress(leftPressed, rightPressed bool) bool {
camera.leftButtonPressed = leftPressed
return false
}
// OnMouseScroll is a callback handler that is called every time the mouse wheel moves.
func (camera *Trackball) OnMouseScroll(x, y float64) bool {
camera.Zoom(float32(y))
return false
}
// OnKeyPress is a callback handler that is called every time a keyboard key is pressed.
func (camera *Trackball) OnKeyPress(key, action, mods int) bool {
return false
} | pkg/scene/camera/trackball/trackball.go | 0.86212 | 0.621455 | trackball.go | starcoder |
package any
// Or sets the Value to the default when not Ok and returns the Value.
// Otherwise, the original Value is returned and the default is ignored.
// This is useful for providing a default before using the underlying value.
func (v Value) Or(i interface{}) Value {
if !v.Ok() {
v.i = i
}
return v
}
// BoolOr returns the value as a bool type or the default when the Value is not of type bool
func (v Value) BoolOr(d bool) bool {
b, ok := v.i.(bool)
if !ok {
return d
}
return b
}
// IntOr returns the value as a int type or the default when the Value is not of type int.
func (v Value) IntOr(d int) int {
i, ok := v.i.(int)
if !ok {
return d
}
return i
}
// Int8Or returns the value as a int8 type or the default when the Value is not of type int8.
func (v Value) Int8Or(d int8) int8 {
i, ok := v.i.(int8)
if !ok {
return d
}
return i
}
// Int16Or returns the value as a int16 type or the default when the Value is not of type int16.
func (v Value) Int16Or(d int16) int16 {
i, ok := v.i.(int16)
if !ok {
return d
}
return i
}
// Int32Or returns the value as a int32 type or the default when the Value is not of type int32.
func (v Value) Int32Or(d int32) int32 {
i, ok := v.i.(int32)
if !ok {
return d
}
return i
}
// Int64Or returns the value as a int64 type or the default when the Value is not of type int64.
func (v Value) Int64Or(d int64) int64 {
i, ok := v.i.(int64)
if !ok {
return d
}
return i
}
// UintOr returns the value as a uint type or the default when the Value is not of type uint.
func (v Value) UintOr(d uint) uint {
i, ok := v.i.(uint)
if !ok {
return d
}
return i
}
// Uint8Or returns the value as a uint8 type or the default when the Value is not of type uint8.
func (v Value) Uint8Or(d uint8) uint8 {
i, ok := v.i.(uint8)
if !ok {
return d
}
return i
}
// Uint16Or returns the value as a uint16 type or the default when the Value is not of type uint16.
func (v Value) Uint16Or(d uint16) uint16 {
i, ok := v.i.(uint16)
if !ok {
return d
}
return i
}
// Uint32Or returns the value as a uint32 type or the default when the Value is not of type uint32.
func (v Value) Uint32Or(d uint32) uint32 {
i, ok := v.i.(uint32)
if !ok {
return d
}
return i
}
// Uint64Or returns the value as a uint64 type or the default when the Value is not of type uint64.
func (v Value) Uint64Or(d uint64) uint64 {
i, ok := v.i.(uint64)
if !ok {
return d
}
return i
}
// UintptrOr returns the value as a uintptr type or the default when the Value is not of type uintptr.
func (v Value) UintptrOr(d uintptr) uintptr {
i, ok := v.i.(uintptr)
if !ok {
return d
}
return i
}
// Float32Or returns the value as a float32 type or the default when the Value is not of type float32.
func (v Value) Float32Or(d float32) float32 {
f, ok := v.i.(float32)
if !ok {
return d
}
return f
}
// Float64Or returns the value as a float64 type or the default when the Value is not of type float64.
func (v Value) Float64Or(d float64) float64 {
f, ok := v.i.(float64)
if !ok {
return d
}
return f
}
// Complex64Or returns the value as a complex64 type or the default when the Value is not of type complex64.
func (v Value) Complex64Or(d complex64) complex64 {
c, ok := v.i.(complex64)
if !ok {
return d
}
return c
}
// Complex128Or returns the value as a complex128 type or the default when the Value is not of type complex128.
func (v Value) Complex128Or(d complex128) complex128 {
c, ok := v.i.(complex128)
if !ok {
return d
}
return c
}
// ByteOr returns the value as a byte type or the default when the Value is not of type byte.
func (v Value) ByteOr(d byte) byte {
b, ok := v.i.(byte)
if !ok {
return d
}
return b
}
// BytesOr returns the value as a []byte type or the default when the Value is not of type []byte.
func (v Value) BytesOr(d []byte) []byte {
b, ok := v.i.([]byte)
if !ok {
return d
}
return b
}
// RuneOr returns the value as a rune type or the default when the Value is not of type rune.
func (v Value) RuneOr(d rune) rune {
r, ok := v.i.(rune)
if !ok {
return d
}
return r
}
// StringOr returns the value as a string type or the default when the Value is not of type string.
func (v Value) StringOr(d string) string {
s, ok := v.i.(string)
if !ok {
return d
}
return s
}
// InterfaceOr provides the underlying value as an empty interface or the default when not Ok.
func (v Value) InterfaceOr(d interface{}) interface{} {
if !v.Ok() {
return d
}
return v.i
} | or.go | 0.766119 | 0.402833 | or.go | starcoder |
<tutorial>
Match with device id example of using 51Degrees device detection. The example
shows how to:
<ol>
<li>Instantiate the 51Degrees device detection provider.
<p><pre class="prettyprint lang-go">
provider = FiftyOneDegreesPatternV3.NewProvider(dataFile)
</pre></p>
<li>Produce a match for a single device id
<p><pre class="prettyprint lang-go">
match = provider.GetMatchForDeviceId(deviceId)
</pre></p>
<li>Extract the value of the IsMobile property
<p><pre class="prettyprint lang-go">
match.GetValue("IsMobile")
</pre></p>
</ol>
This example assumes you have the 51Degrees Go API installed correctly.
</tutorial>
*/
// Snippet Start
package main
import (
"fmt"
"./src/pattern"
)
// Location of data file.
var dataFile = "../data/51Degrees-LiteV3.2.dat"
// Which properties to retrieve
var properties = []string{"IsMobile", "PlatformName", "PlatformVersion"}
// Provides access to device detection functions.
var provider =
FiftyOneDegreesPatternV3.NewProvider(dataFile)
// User-Agent string of an iPhone mobile device.
var mobileUserAgent = "Mozilla/5.0 (iPhone; CPU iPhone OS 7_1 like Mac OS X) " +
"AppleWebKit/537.51.2 (KHTML, like Gecko) 'Version/7.0 Mobile/11D167 " +
"Safari/9537.53"
// User-Agent string of Firefox Web browser version 41 on desktop.
var desktopUserAgent = "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) " +
"Gecko/20100101 Firefox/41.0"
// User-Agent string of a MediaHub device.
var mediaHubUserAgent = "Mozilla/5.0 (Linux; Android 4.4.2; X7 Quad Core " +
"Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 " +
"Chrome/30.0.0.0 Safari/537.36"
func main() {
fmt.Println("Starting Match With Device Id Example.")
// Fetches the device id for a mobile User-Agent.
var match = provider.GetMatch(mobileUserAgent)
var mobileDeviceId = match.GetDeviceId()
// Fetches the device id for a desktop User-Agent.
match = provider.GetMatch(desktopUserAgent)
var desktopDeviceId = match.GetDeviceId()
// Fetches the device id for a MediaHub User-Agent.
match = provider.GetMatch(mediaHubUserAgent)
var mediaHubDeviceId = match.GetDeviceId()
// Carries out a match with a mobile device id.
fmt.Println("\nMobile Device Id: ", mobileDeviceId)
match = provider.GetMatchForDeviceId(mobileDeviceId)
fmt.Println(" IsMobile: ", match.GetValue("IsMobile"))
// Carries out a match with a desktop device id.
fmt.Println("\nDesktop Device Id: ", desktopDeviceId)
match = provider.GetMatchForDeviceId(desktopDeviceId)
fmt.Println(" IsMobile: ", match.GetValue("IsMobile"))
// Carries out a match with a MediaHub device id.
fmt.Println("\nMediaHub Device Id: ", mediaHubDeviceId)
match = provider.GetMatchForDeviceId(mediaHubDeviceId)
fmt.Println(" IsMobile: ", match.GetValue("IsMobile"))
}
// Snippet End | MatchForDeviceId.go | 0.539954 | 0.552238 | MatchForDeviceId.go | starcoder |
package merkle
import (
"errors"
"math"
"github.com/chain/txvm/crypto/sha3"
"github.com/chain/txvm/crypto/sha3pool"
)
var (
leafPrefix = []byte{0x00}
interiorPrefix = []byte{0x01}
emptyStringHash = sha3.Sum256(nil)
)
// AuditHash stores the hash value and denotes which side of the concatenation
// operation it should be on.
// For example, if we have a hashed item A and an audit hash {Val: B, RightOperator: false},
// the validation is: H(B + A).
type AuditHash struct {
Val [32]byte
RightOperator bool // FALSE indicates the hash should be on the LEFT side of concatenation, TRUE for right side.
}
// Proof returns the proofs required to validate an item at index i, not including the original item i.
// This errors when the requested index is out of bounds.
func Proof(items [][]byte, i int) ([]AuditHash, error) {
if i < 0 || i >= len(items) {
return nil, errors.New("index %v is out of bounds")
}
if len(items) == 1 {
return []AuditHash{}, nil
}
k := prevPowerOfTwo(len(items))
recurse := items[:k]
aggregate := items[k:]
rightOperator := true
if i >= k {
i = i - k
recurse, aggregate = aggregate, recurse
rightOperator = false
}
res, err := Proof(recurse, i)
if err != nil {
return nil, err
}
res = append(res, AuditHash{Root(aggregate), rightOperator})
return res, nil
}
// Root creates a merkle tree from a slice of byte slices
// and returns the root hash of the tree.
func Root(items [][]byte) [32]byte {
switch len(items) {
case 0:
return emptyStringHash
case 1:
h := sha3pool.Get256()
defer sha3pool.Put256(h)
h.Write(leafPrefix)
h.Write(items[0])
var root [32]byte
h.Read(root[:])
return root
default:
k := prevPowerOfTwo(len(items))
left := Root(items[:k])
right := Root(items[k:])
h := sha3pool.Get256()
defer sha3pool.Put256(h)
h.Write(interiorPrefix)
h.Write(left[:])
h.Write(right[:])
var root [32]byte
h.Read(root[:])
return root
}
}
// prevPowerOfTwo returns the largest power of two that is smaller than a given number.
// In other words, for some input n, the prevPowerOfTwo k is a power of two such that
// k < n <= 2k. This is a helper function used during the calculation of a merkle tree.
func prevPowerOfTwo(n int) int {
// If the number is a power of two, divide it by 2 and return.
if n&(n-1) == 0 {
return n / 2
}
// Otherwise, find the previous PoT.
exponent := uint(math.Log2(float64(n)))
return 1 << exponent // 2^exponent
} | slidechain/vendor/github.com/chain/txvm/protocol/merkle/merkle.go | 0.737253 | 0.417687 | merkle.go | starcoder |
package day8
import (
"fmt"
"math"
"strings"
"github.com/dschroep/advent-of-code/common"
)
// Converts `digit` to an integer by looking at `inputLine` (format "<input> | <output>").
// Returns -1 if parsing was not possible.
func toInt(digit string, inputLine string) int {
digitLength := len(digit)
// Sort out the obvious digits.
switch {
case digitLength == 2:
return 1
case digitLength == 4:
return 4
case digitLength == 3:
return 7
case digitLength == 7:
return 8
}
// For the following approach it is necessary to know
// which segments result in a 1 and which result in a 4.
numbers := strings.Split(strings.Join(strings.Split(inputLine, " | "), " "), " ")
oneSegments := common.FilterSlice(numbers, func(number string) bool {
return len(number) == 2
})[0]
fourSegments := common.FilterSlice(numbers, func(number string) bool {
return len(number) == 4
})[0]
// At this point, the digit can only be 0, 2, 3, 5, 6, or 9.
// These digits have a length of either 6 or 5.
if digitLength == 6 {
// The possible values here are 0, 6, and 9.
// 9 is the only one of these that matches with all four segments of 4.
for i, segment := range fourSegments {
if !strings.ContainsRune(digit, segment) {
break
}
if i == len(fourSegments)-1 {
return 9
}
}
// 0 matches with both segments of 1 while 6 doesn't.
for i, segment := range oneSegments {
if !strings.ContainsRune(digit, segment) {
break
}
if i == len(oneSegments)-1 {
return 0
}
}
return 6
} else if digitLength == 5 {
// The possible values here are 2, 3, and 5.
// 3 is the only one of these that matches with both segments of 1.
for i, segment := range oneSegments {
if !strings.ContainsRune(digit, segment) {
break
}
if i == len(oneSegments)-1 {
return 3
}
}
// 5 matches with three segments of 4 while 2 only matches with two.
var matches int
for _, segment := range fourSegments {
if strings.ContainsRune(digit, segment) {
matches++
}
}
if matches == 3 {
return 5
}
return 2
}
return -1
}
// Solves level 2 of day 8 and returns the result as printable message.
func solveLvl2() string {
inputs, err := common.GetFileInput(8)
if err != nil {
return "Could not open input file. Aborting."
}
var sum int
for _, input := range inputs {
outputNumber := strings.Split(input, " | ")[1]
var parsedNumber int
for index, outputDigit := range strings.Split(outputNumber, " ") {
parsedDigit := toInt(outputDigit, input)
if parsedDigit == -1 {
return "Could not parse output digit. Aborting."
}
parsedNumber += parsedDigit * int(math.Pow10(3-index))
}
sum += parsedNumber
}
return fmt.Sprintf("The output's sum is %d.", sum)
} | 2021/day8/lvl2.go | 0.76074 | 0.548069 | lvl2.go | starcoder |
package ng
import (
"math"
)
// Vector is the base type of ng
type Vector []float64
// NewVector returns a vector with the given number of elements.
func NewVector(size int) Vector {
return make([]float64, size)
}
// Add sums each element of the given vector with the current vector and stores
// the resulting value in the current vector.
func (a Vector) Add(b Vector) {
bt := b[:len(a)]
for i := range a {
a[i] += bt[i]
}
}
// Dimensions returns the number of dimensions in the vector which is 1.
func (a Vector) Dimensions() int {
return 1
}
// Dot returns the dot product of this vector and the given vector.
// This function will panic if b.Size() < a.Size().
func (a Vector) Dot(b Vector) float64 {
sum := 0.0
bt := b[:len(a)]
for i := range a {
sum += a[i] * bt[i]
}
return sum
}
// Fill sets all elements of the vector to the given value.
func (a Vector) Fill(v float64) {
for i := range a {
a[i] = v
}
}
// Magnitude returns the sum of each element squared.
func (a Vector) Magnitude() float64 {
sum := 0.0
for i := range a {
sum += a[i] * a[i]
}
return math.Sqrt(sum)
}
// Max returns the maximum value in the vector.
func (a Vector) Max() float64 {
max := a[0]
for _, v := range a[1:] {
if max > v {
max = v
}
}
return max
}
// Min returns the minimum value in the vector.
func (a Vector) Min() float64 {
min := a[0]
for _, v := range a[1:] {
if min < v {
min = v
}
}
return min
}
// Normalize scales the vector by one over the magnitude of the vector such that
// the vector's magnitude is one. Transforms vector into the unit vector.
func (a Vector) Normalize() {
a.Scale(1 / a.Magnitude())
}
// Product returns the product of all vector elements.
func (a Vector) Product() float64 {
p := 1.0
for _, v := range a {
p *= v
}
return p
}
// Resize changes the size and capacity of the vector. If the capacity is
// less than size, a new vector is allocated and the values are copied into it.
func (a *Vector) Resize(size int) {
if size <= cap(*a) {
*a = (*a)[:size:size]
}
*a = append(*a, make([]float64, size-cap(*a))...)
}
// Scale multiplies all elements of the vector by the given value.
func (a Vector) Scale(s float64) {
for i := range a {
a[i] *= s
}
}
// Size returns of slice of integers containing the length of each dimension
// of the vector. Since there is only one dimension, the first element of
// the slice will contain the same value as Vector.Size().
func (a Vector) Size() []int {
return []int{len(a)}
}
// Sum returns the sum of all values in the vector.
func (a Vector) Sum() float64 {
sum := 0.0
for _, value := range a {
sum += value
}
return sum
}
// Transpose performs an in-place matrix transpose of the elements in the
// Vector. The resulting matrix will have the dimensions [columns x rows].
func (a Vector) Transpose(rows, columns int) {
q := rows*columns - 1
for start := 1; start < q; start++ {
next := start
i := 0
for {
i++
next = (next%rows)*columns + next/rows
if next <= start {
break
}
}
if next >= start && i != 1 {
t := a[start]
next = start
for {
i = (next%rows)*columns + next/rows
if i != start {
a[next] = a[i]
} else {
a[next] = t
}
next = i
if next <= start {
break
}
}
}
}
} | vector.go | 0.872877 | 0.791378 | vector.go | starcoder |
package tilegraphics
import "image/color"
// Rectangle is a single rectangle drawn on the display that can be moved
// around.
type Rectangle struct {
parent *Layer // nil for the root
x1, y1, x2, y2 int16
color color.RGBA
}
// boundingBox returns the exact bounding box of the rectangle.
func (r *Rectangle) boundingBox() (x1, y1, x2, y2 int16) {
return r.x1, r.y1, r.x2, r.y2
}
// Move sets the new position and size of this rectangle.
func (r *Rectangle) Move(x, y, width, height int16) {
newX1 := x
newY1 := y
newX2 := x + width
newY2 := y + height
if newX1 > r.x2 || newY1 > r.y2 || newX2 < r.x1 || newY2 < r.y1 {
// Not overlapping. Simply invalidate the old and new rectangle.
// https://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other
r.invalidate(r.x1, r.y1, r.x2, r.y2)
r.invalidate(newX1, newY1, newX2, newY2)
} else {
// Overlapping rectangles. Only redraw the parts that should be redrawn.
// Background: https://magcius.github.io/xplain/article/regions.html
// Essentially we need to invalidate the xor regions. There can be up
// to 4 of them when two rectangles overlap.
maxY1 := r.y1
if newY1 > maxY1 {
maxY1 = newY1
}
minY2 := r.y2
if newY2 < minY2 {
minY2 = newY2
}
if newX1 != r.x1 {
// Invalidate the block on the left side of the rectangle.
r.invalidateMiddleBlock(newX1, maxY1, r.x1, minY2)
}
if newX2 != r.x2 {
// Invalidate the block on the right side of the rectangle.
r.invalidateMiddleBlock(newX2, maxY1, r.x2, minY2)
}
if newY1 != r.y1 {
// Invalidate the block on the top of the rectangle.
if newY1 > r.y1 {
// y1 moved down
r.invalidate(r.x1, r.y1, r.x2, newY1)
} else {
// y1 moved up
r.invalidate(newX1, newY1, newX2, r.y1)
}
}
if newY2 != r.y2 {
// Invalidate the block on the bottom of the rectangle.
if newY2 > r.y2 {
// y2 moved down
r.invalidate(newX1, r.y2, newX2, newY2)
} else {
// y2 moved up
r.invalidate(r.x1, newY2, r.x2, r.y2)
}
}
}
r.x1 = newX1
r.y1 = newY1
r.x2 = newX2
r.y2 = newY2
}
// invalidateMiddleBlock invalidates an area where the two X coordinates might
// be swapped.
func (r *Rectangle) invalidateMiddleBlock(xA, maxY1, xB, minY2 int16) {
if xA > xB {
xA, xB = xB, xA
}
r.invalidate(xA, maxY1, xB, minY2)
}
// invalidate invalidates all tiles currently under the rectangle.
func (r *Rectangle) invalidate(x1, y1, x2, y2 int16) {
x, y := r.absolutePos(x1, y1)
// Calculate tile grid indices.
tileX1 := x / TileSize
tileY1 := y / TileSize
tileX2 := (x + (x2 - x1) + TileSize) / TileSize
tileY2 := (y + (y2 - y1) + TileSize) / TileSize
// Limit the tile grid indices to the screen.
if tileY1 < 0 {
tileY1 = 0
}
if int(tileY2) >= len(r.parent.engine.cleanTiles) {
tileY2 = int16(len(r.parent.engine.cleanTiles))
}
if tileX1 < 0 {
tileX1 = 0
}
if int(tileX2) >= len(r.parent.engine.cleanTiles[0]) {
tileX2 = int16(len(r.parent.engine.cleanTiles[0]))
}
// Set all tiles in bounds as needing an update.
for tileY := tileY1; tileY < tileY2; tileY++ {
tileRow := r.parent.engine.cleanTiles[tileY]
for tileX := tileX1; tileX < tileX2; tileX++ {
tileRow[tileX] = false
}
}
}
// paint draws the rectangle to the given tile at coordinates tileX and tileY.
func (r *Rectangle) paint(t *tile, tileX, tileY int16) {
x1 := r.x1 - tileX
y1 := r.y1 - tileY
x2 := r.x2 - tileX
y2 := r.y2 - tileY
if x1 < 0 {
x1 = 0
}
if y1 < 0 {
y1 = 0
}
if x2 > TileSize {
x2 = TileSize
}
if y2 > TileSize {
y2 = TileSize
}
if r.color.A == 255 {
// Fill without blending, because the rectangle is not transparent.
for x := x1; x < x2; x++ {
for y := y1; y < y2; y++ {
t[x+y*TileSize] = r.color
}
}
} else {
// Blend with the background (slow path).
for x := x1; x < x2; x++ {
for y := y1; y < y2; y++ {
t[x+y*TileSize] = Blend(t[x+y*TileSize], r.color)
}
}
}
}
// absolutePos returns the x and y coordinate of this rectangle in the screen.
func (r *Rectangle) absolutePos(x, y int16) (int16, int16) {
layer := r.parent
if &layer.rect == r {
layer = layer.parent
}
for layer != nil {
x += layer.rect.x1
y += layer.rect.y1
layer = layer.parent
}
return x, y
} | object-rectangle.go | 0.81538 | 0.645064 | object-rectangle.go | starcoder |
package sudogo
import (
"math/rand"
"golang.org/x/exp/constraints"
)
func removeAtIndex[T any](slice []T, index int) []T {
last := len(slice) - 1
if index >= 0 && index <= last {
slice[index] = slice[last]
slice = slice[:last]
}
return slice
}
func removeValue[T comparable](slice []T, value T) []T {
for i, v := range slice {
if v == value {
return removeAtIndex(slice, i)
}
}
return slice
}
func randomPointer[T any](random *rand.Rand, slice []*T) *T {
n := len(slice)
if n == 0 {
return nil
}
i := random.Intn(n)
return slice[i]
}
func randomElement[T any](random *rand.Rand, slice []T, notFound T) T {
n := len(slice)
if n == 0 {
return notFound
}
i := random.Intn(n)
return slice[i]
}
func pointerAt[T any](slice []*T, index int) *T {
if index < 0 || index >= len(slice) {
return nil
}
return slice[index]
}
func pointersWhere[T any](source []T, where func(item *T) bool) []*T {
pointers := make([]*T, 0, len(source))
for i := range source {
item := &source[i]
if where(item) {
pointers = append(pointers, item)
}
}
return pointers
}
func sliceIndex[T any](source []T, where func(item T) bool) int {
for i, item := range source {
if where(item) {
return i
}
}
return -1
}
func sliceClone[T any](source []T) []T {
cloned := make([]T, len(source))
copy(cloned, source)
return cloned
}
func sliceLast[T any](source []T) *T {
last := len(source) - 1
if last == -1 {
return nil
}
return &source[last]
}
func sliceRemoveLast[T any](source []T) []T {
last := len(source) - 1
if last == -1 {
return source
}
return source[:last]
}
func Max[T constraints.Ordered](x T, y T) T {
if x > y {
return x
}
return y
}
func Min[T constraints.Ordered](x T, y T) T {
if x < y {
return x
}
return y
}
func AbsInt(x int) int {
if x < 0 {
return -x
}
return x
}
func stringChunk(s string, chunkSize int) []string {
var chunks []string
runes := []rune(s)
n := len(runes)
if n == 0 {
return []string{s}
}
for i := 0; i < n; i += chunkSize {
nn := i + chunkSize
if nn > n {
nn = n
}
chunks = append(chunks, string(runes[i:nn]))
}
return chunks
} | pkg/func.go | 0.613005 | 0.420124 | func.go | starcoder |
package color
import "image"
// Picker ...
type Picker interface {
Pick(image.Image, image.Rectangle) (r, g, b, a uint32)
}
// AverageColorPicker picks average color of given RectAngle area of src image.
type AverageColorPicker struct{}
// Pick of AverageColorPicker.
func (picker AverageColorPicker) Pick(src image.Image, cell image.Rectangle) (r, g, b, a uint32) {
width := cell.Max.X - cell.Min.X
height := cell.Max.Y - cell.Min.Y
if width*height == 0 {
return src.At(cell.Min.X, cell.Min.Y).RGBA()
}
var red, green, blue, alpha uint32
for x := cell.Min.X; x < cell.Max.X; x++ {
for y := cell.Min.Y; y < cell.Max.Y; y++ {
r, g, b, a := src.At(x, y).RGBA()
red += r
green += g
blue += b
alpha += a
}
}
return red / uint32(width*height), green / uint32(width*height), blue / uint32(width*height), alpha / uint32(width*height)
}
// HorizontalAverageColorPicker picks horizontal-average color of center of given RectAngle area of src image.
type HorizontalAverageColorPicker struct{}
// Pick of HorizontalAverageColorPicker.
func (picker HorizontalAverageColorPicker) Pick(src image.Image, cell image.Rectangle) (r, g, b, a uint32) {
var red, green, blue, alpha uint32
width := cell.Max.X - cell.Min.X
for x := cell.Min.X; x < cell.Max.X; x++ {
r, g, b, a := src.At(x, (cell.Min.Y+cell.Max.Y)/2).RGBA()
red += r
green += g
blue += b
alpha += a
}
return red / uint32(width), green / uint32(width), blue / uint32(width), alpha / uint32(width)
}
// CenterColorPicker picks the very central point's color of given RectAngle area of src image.
type CenterColorPicker struct{}
// Pick of CenterColorPicker.
func (picker CenterColorPicker) Pick(src image.Image, cell image.Rectangle) (r, g, b, a uint32) {
return src.At(int(float64(cell.Min.X+cell.Max.X)/2), int(float64(cell.Min.Y+cell.Max.Y)/2)).RGBA()
}
// LeftTopColorPicker picks color of left top (inital point) of given RectAngle of src image.
type LeftTopColorPicker struct{}
// Pick of LeftTopColorPicker.
func (picker LeftTopColorPicker) Pick(src image.Image, cell image.Rectangle) (r, g, b, a uint32) {
return src.At(cell.Min.X, cell.Min.Y).RGBA()
} | color/picker.go | 0.830353 | 0.517693 | picker.go | starcoder |
package main
import (
"context"
"flag"
"fmt"
"github.com/cheggaaa/pb/v3"
"github.com/robinbraemer/imaget"
"os"
"regexp"
"strings"
"time"
)
const usageMessage = `usage: imaget -u URL [-d destination] [-t timeout] [-r regex] [-y] [-s] [-f]
Imaget is a convenient image tool for finding images on any http(s) website and
downloading them with optional flags to tweak tool behaviour and images output.
Flags
-----------------
-u (required): is the http(s) URL to find and images from to download.
-d (optional): is the destination to download the images to.
It can either be the directory to save all images at or
a path to create a .zip archive to save the images in.
-f (optional): saves the downloaded images as a flat hierarchie,
instead of creating subdirectories as per the image download URLs.
The name of the file is the base64 encoded download URL of the image.
-t (optional): is the timeout to wait before pausing the download
and quitting the programm. Zero or below means no timeout.
Example: 3m3s
-r (optional): is a regular expression to only download images from matching URLs.
Examples: "(jpg|png)$", "^https?://"
-y (optional): starts the download directly without asking.
-s (optional): will make the console silent and produce no console output.
If used the -y flag is used automatically.
Example commands
-----------------
Silently download Google's current image above the search box to the current directory.
> imaget -s -f -u google.com
Download all images on amazon.com to new Zip archive in the current directory.
> imaget -y -f -u amazon.com -d amazon-images.zip
Download all images on alibaba.com to new directory 'alibaba-images' hierarchically sorted by image URL.
> imaget -y -u alibaba.com -d alibaba-images
`
// prints out usageMessage and exists
func usage() {
fmt.Fprintf(os.Stderr, usageMessage)
os.Exit(2)
}
var (
u = flag.String("u", "", "download from this url")
dst = flag.String("d", ".", "destination to drop the images at")
_ = flag.Bool("y", false, "accept download")
_ = flag.Bool("f", false, "save as flat hierarchie")
_ = flag.Bool("s", false, "disable console output")
t = flag.Duration("t", time.Hour, "download timeout")
r = flag.String("r", "", "filter images using regex (default: no filter)")
)
func main() {
flag.Parse()
// Let's role the dice...
if err := Main(); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
// Main is the main programm function
func Main() error {
// Parse input flags
download, err := parse()
if err != nil {
return err
}
// Setup timeout
ctx := context.Background()
if *t > 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, *t)
defer cancel()
}
// Start download
return download.Start(ctx)
}
func parse() (d *imaget.Download, err error) {
// URL to find image references on is required
if *u == "" {
usage()
}
// Prepend http protocol if missing
if !strings.HasPrefix(*u, "http") {
*u = "http://" + *u
}
// Compile regex matcher
var reg *regexp.Regexp
if *r != "" {
reg, err = regexp.Compile(*r)
if err != nil {
return nil, fmt.Errorf("error compiling regex (-r flag): %w", err)
}
}
// Silent: no console activity
silent := flagPassed("s")
if silent {
imaget.Stdout = &nopWriter{}
imaget.Stderr = &nopWriter{}
}
// Create reusable progress bar for showing downloads
var pBar imaget.ProgressBar
if silent {
pBar = &nopProgressBar{}
} else {
const barTpl = pb.ProgressBarTemplate(`{{percent . }} {{bar . }} {{counters . }} {{speed . }}`)
pBar = &progressBar{barTpl.New(0).
Set(pb.Bytes, true).
SetRefreshRate(10 * time.Millisecond)}
}
return &imaget.Download{
Src: *u,
Dst: *dst,
Regex: reg,
SkipAccept: silent || flagPassed("y"),
SaveFlat: flagPassed("f"),
Bar: pBar,
}, nil
}
func flagPassed(name string) (found bool) {
flag.Visit(func(f *flag.Flag) {
if f.Name == name {
found = true
}
})
return
}
type nopWriter struct{}
func (nopWriter) Write(p []byte) (n int, err error) {
return 0, err
}
type progressBar struct{ *pb.ProgressBar }
func (b *progressBar) Start() { b.ProgressBar.Start() }
func (b *progressBar) Finish() { b.ProgressBar.Finish() }
func (b *progressBar) SetTotal(i int64) { b.ProgressBar.SetTotal(i) }
func (b *progressBar) SetCurrent(i int64) { b.ProgressBar.SetCurrent(i) }
type nopProgressBar struct{}
func (b *nopProgressBar) Start() {}
func (b *nopProgressBar) Finish() {}
func (b *nopProgressBar) SetTotal(int64) {}
func (b *nopProgressBar) SetCurrent(int64) {} | cmd/imaget.go | 0.588771 | 0.408395 | imaget.go | starcoder |
package ast
// Expression represents a Expression node.
type Expression struct {
AssignmentExpressions []*AssignmentExpression
}
// AssignmentExpression represents a AssignmentExpression node.
type AssignmentExpression struct {
ConditionalExpression *ConditionalExpression
YieldExpression *YieldExpression
ArrowFunction *ArrowFunction
AsyncArrowFunction *AsyncArrowFunction
LeftHandSideExpression *LeftHandSideExpression
Assign bool
AssignmentOperator string
AssignmentExpression *AssignmentExpression
}
// ConditionalExpression represents a ConditionalExpression node.
type ConditionalExpression struct {
LogicalORExpression *LogicalORExpression
AssignmentExpression1 *AssignmentExpression
AssignmentExpression2 *AssignmentExpression
}
// LogicalORExpression represents a LogicalORExpression node.
type LogicalORExpression struct {
LogicalANDExpression *LogicalANDExpression
LogicalORExpression *LogicalORExpression
}
// LogicalANDExpression represents a LogicalANDExpression node.
type LogicalANDExpression struct {
BitwiseORExpression *BitwiseORExpression
LogicalANDExpression *LogicalANDExpression
}
// BitwiseORExpression represents a BitwiseORExpression node.
type BitwiseORExpression struct {
BitwiseORExpression *BitwiseORExpression
BitwiseXORExpression *BitwiseXORExpression
}
// BitwiseANDExpression represents a BitwiseANDExpression node.
type BitwiseANDExpression struct {
BitwiseANDExpression *BitwiseANDExpression
EqualityExpression *EqualityExpression
}
// BitwiseXORExpression represents a BitwiseXORExpression node.
type BitwiseXORExpression struct {
BitwiseANDExpression *BitwiseANDExpression
BitwiseXORExpression *BitwiseXORExpression
}
// EqualityExpression represents a EqualityExpression node.
type EqualityExpression struct {
EqualityExpression *EqualityExpression
RelationalExpression *RelationalExpression
Equals bool
StrictEquals bool
NotEquals bool
StrictNotEquals bool
}
// RelationalExpression represents a RelationalExpression node.
type RelationalExpression struct {
ShiftExpression *ShiftExpression
RelationalExpression *RelationalExpression
LessThan bool
GreaterThan bool
LessThanOrEqualTo bool
GreaterThanOrEqualTo bool
Instanceof bool
In bool
}
// ShiftExpression represents a ShiftExpression node.
type ShiftExpression struct {
ShiftExpression *ShiftExpression
AdditiveExpression *AdditiveExpression
LeftShift bool
RightShift bool
UnsignedRightShift bool
}
// AdditiveExpression represents a AdditiveExpression node.
type AdditiveExpression struct {
MultiplicativeExpression *MultiplicativeExpression
AdditiveExpression *AdditiveExpression
Plus bool
Minus bool
}
// MultiplicativeExpression represents a MultiplicativeExpression node.
type MultiplicativeExpression struct {
ExponentiationExpression *ExponentiationExpression
MultiplicativeExpression *MultiplicativeExpression
Asterisk bool
Slash bool
Modulo bool
}
// ExponentiationExpression represents a ExponentiationExpression node.
type ExponentiationExpression struct {
UnaryExpression *UnaryExpression
UpdateExpression *UpdateExpression
ExponentiationExpression *ExponentiationExpression
}
// UpdateExpression represents a UpdateExpression node.
type UpdateExpression struct {
LeftHandSideExpression *LeftHandSideExpression
UnaryExpression *UnaryExpression
PlusPlus bool
MinusMinus bool
}
// UnaryExpression represents a UnaryExpression node.
type UnaryExpression struct {
UpdateExpression *UpdateExpression
UnaryExpression *UnaryExpression
AwaitExpression *AwaitExpression
Delete bool
Void bool
Typeof bool
Plus bool
Minus bool
Tilde bool
ExclamationMark bool
}
// AwaitExpression represents a AwaitExpression node.
type AwaitExpression struct {
UnaryExpression *UnaryExpression
}
// YieldExpression represents a YieldExpression node.
type YieldExpression struct {
AssignmentExpression *AssignmentExpression
Asterisk bool
}
// LeftHandSideExpression represents a LeftHandSideExpression node.
type LeftHandSideExpression struct {
NewExpression *NewExpression
CallExpression *CallExpression
}
// NewExpression represents a NewExpression node.
type NewExpression struct {
MemberExpression *MemberExpression
NewExpression *NewExpression
}
// MemberExpression represents a MemberExpression node.
type MemberExpression struct {
PrimaryExpression *PrimaryExpression
MemberExpression *MemberExpression
Expression *Expression
IdentifierName string
TemplateLiteral *TemplateLiteral
SuperProperty *SuperProperty
MetaProperty *MetaProperty
Arguments *Arguments
}
// PrimaryExpression represents a PrimaryExpression node.
type PrimaryExpression struct {
This bool
IdentifierReference *IdentifierReference
Literal *Literal
ArrayLiteral *ArrayLiteral
ObjectLiteral *ObjectLiteral
FunctionExpression *FunctionExpression
ClassExpression *ClassExpression
GeneratorExpression *GeneratorExpression
AsyncFunctionExpression *AsyncFunctionExpression
AsyncGeneratorExpression *AsyncGeneratorExpression
RegularExpressionLiteral *RegularExpressionLiteral
TemplateLiteral *TemplateLiteral
CoverParenthesizedExpressionAndArrowParameterList *CoverParenthesizedExpressionAndArrowParameterList
}
// FunctionExpression represents a FunctionExpression node.
type FunctionExpression struct {
BindingIdentifier *BindingIdentifier
FormalParameters *FormalParameters
FunctionBody *FunctionBody
}
// ClassExpression represents a ClassExpression node.
type ClassExpression struct {
BindingIdentifier *BindingIdentifier
ClassTail *ClassTail
}
// ClassTail represents a ClassTail node.
type ClassTail struct {
ClassHeritage *ClassHeritage
ClassBody *ClassBody
}
// ClassHeritage represents a ClassHeritage node.
type ClassHeritage struct {
LeftHandSideExpression *LeftHandSideExpression
}
// ClassBody represents a ClassBody node.
type ClassBody struct {
ClassElementList *ClassElementList
}
// ClassElementList represents a ClassElementList node.
type ClassElementList struct {
ClassElements []*ClassElement
}
// ClassElement represents a ClassElement node.
type ClassElement struct {
MethodDefinition *MethodDefinition
Static bool
}
// GeneratorExpression represents a GeneratorExpression node.
type GeneratorExpression struct {
BindingIdentifier *BindingIdentifier
FormalParameters *FormalParameters
GeneratorBody *GeneratorBody
}
// AsyncFunctionExpression represents a AsyncFunctionExpression node.
type AsyncFunctionExpression struct {
FormalParameters *FormalParameters
AsyncFunctionBody *AsyncFunctionBody
BindingIdentifier *BindingIdentifier
}
// AsyncGeneratorExpression represents a AsyncGeneratorExpression node.
type AsyncGeneratorExpression struct {
BindingIdentifier *BindingIdentifier
FormalParameters *FormalParameters
AsyncGeneratorBody *AsyncGeneratorBody
}
// CallExpression represents a CallExpression node.
type CallExpression struct {
CoverCallExpressionAndAsyncArrowHead *CoverCallExpressionAndAsyncArrowHead
SuperCall *SuperCall
CallExpression *CallExpression
Arguments *Arguments
Expression *Expression
IdentifierName string
TemplateLiteral *TemplateLiteral
}
// SuperCall represents a SuperCall node.
type SuperCall struct {
Arguments *Arguments
} | internal/parser/ast/expression.go | 0.667473 | 0.531331 | expression.go | starcoder |
package simulation
import (
"fmt"
"math/rand"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/simulation"
node "github.com/sentinel-official/hub/x/node/simulation"
plan "github.com/sentinel-official/hub/x/plan/simulation"
"github.com/sentinel-official/hub/x/subscription/expected"
"github.com/sentinel-official/hub/x/subscription/keeper"
"github.com/sentinel-official/hub/x/subscription/types"
)
func SimulateMsgSubscribeToNode(ak expected.AccountKeeper, nk expected.NodeKeeper) simulation.Operation {
return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accounts []simulation.Account, chainID string) (
simulation.OperationMsg, []simulation.FutureOperation, error) {
nodes := nk.GetActiveNodes(ctx, 0, 0)
if len(nodes) == 0 {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
rNode := node.RandomNode(r, nodes)
if rNode.Provider != nil {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
var (
rAccount, _ = simulation.RandomAcc(r, accounts)
account = ak.GetAccount(ctx, rAccount.Address)
)
amount := simulation.RandomAmount(r, account.SpendableCoins(ctx.BlockTime()).AmountOf("stake"))
if !amount.IsPositive() {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
deposit := sdk.NewCoin("stake", amount)
msg := types.NewMsgSubscribeToNode(rAccount.Address, rNode.Address, deposit)
if msg.ValidateBasic() != nil {
return simulation.NoOpMsg(types.ModuleName), nil, fmt.Errorf("expected msg to pass ValidateBasic: %s", msg.GetSignBytes())
}
tx := helpers.GenTx(
[]sdk.Msg{msg},
nil,
helpers.DefaultGenTxGas,
chainID,
[]uint64{account.GetAccountNumber()},
[]uint64{account.GetSequence()},
rAccount.PrivKey,
)
_, _, err := app.Deliver(tx)
if err != nil {
return simulation.NoOpMsg(types.ModuleName), nil, err
}
return simulation.NewOperationMsg(msg, true, ""), nil, nil
}
}
func SimulateMsgSubscribeToPlan(ak expected.AccountKeeper, pk expected.PlanKeeper) simulation.Operation {
return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accounts []simulation.Account, chainID string) (
simulation.OperationMsg, []simulation.FutureOperation, error) {
plans := pk.GetActivePlans(ctx, 0, 0)
if len(plans) == 0 {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
var (
rPlan = plan.RandomPlan(r, plans)
rAccount, _ = simulation.RandomAcc(r, accounts)
account = ak.GetAccount(ctx, rAccount.Address)
denom = "stake"
)
if account.SpendableCoins(ctx.BlockTime()).AmountOf(denom).LT(rPlan.Price.AmountOf(denom)) {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
msg := types.NewMsgSubscribeToPlan(rAccount.Address, rPlan.ID, denom)
if msg.ValidateBasic() != nil {
return simulation.NoOpMsg(types.ModuleName), nil, fmt.Errorf("expected msg to pass ValidateBasic: %s", msg.GetSignBytes())
}
tx := helpers.GenTx(
[]sdk.Msg{msg},
nil,
helpers.DefaultGenTxGas,
chainID,
[]uint64{account.GetAccountNumber()},
[]uint64{account.GetSequence()},
rAccount.PrivKey,
)
_, _, err := app.Deliver(tx)
if err != nil {
return simulation.NoOpMsg(types.ModuleName), nil, err
}
return simulation.NewOperationMsg(msg, true, ""), nil, nil
}
}
func SimulateMsgCancel(ak expected.AccountKeeper, k keeper.Keeper) simulation.Operation {
return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accounts []simulation.Account, chainID string) (
simulation.OperationMsg, []simulation.FutureOperation, error) {
rAccount, _ := simulation.RandomAcc(r, accounts)
subscriptions := k.GetActiveSubscriptionsForAddress(ctx, rAccount.Address, 0, 0)
if len(subscriptions) == 0 {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
var (
account = ak.GetAccount(ctx, rAccount.Address)
rSubscription = RandomSubscription(r, subscriptions)
)
if !rSubscription.Owner.Equals(rAccount.Address) {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
msg := types.NewMsgCancel(rAccount.Address, rSubscription.ID)
if msg.ValidateBasic() != nil {
return simulation.NoOpMsg(types.ModuleName), nil, fmt.Errorf("expected msg to pass ValidateBasic: %s", msg.GetSignBytes())
}
tx := helpers.GenTx(
[]sdk.Msg{msg},
nil,
helpers.DefaultGenTxGas,
chainID,
[]uint64{account.GetAccountNumber()},
[]uint64{account.GetSequence()},
rAccount.PrivKey,
)
_, _, err := app.Deliver(tx)
if err != nil {
return simulation.NoOpMsg(types.ModuleName), nil, err
}
return simulation.NewOperationMsg(msg, true, ""), nil, nil
}
}
func SimulateMsgAddQuota(ak expected.AccountKeeper, k keeper.Keeper) simulation.Operation {
return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accounts []simulation.Account, chainID string) (
simulation.OperationMsg, []simulation.FutureOperation, error) {
rAccount, _ := simulation.RandomAcc(r, accounts)
subscriptions := k.GetActiveSubscriptionsForAddress(ctx, rAccount.Address, 0, 0)
if len(subscriptions) == 0 {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
rSubscription := RandomSubscription(r, subscriptions)
if !rSubscription.Owner.Equals(rAccount.Address) {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
if rSubscription.Plan == 0 {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
if rSubscription.Free.IsZero() {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
toAccount, _ := simulation.RandomAcc(r, accounts)
if k.HasQuota(ctx, rSubscription.ID, toAccount.Address) {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
var (
account = ak.GetAccount(ctx, rAccount.Address)
bytes = sdk.NewInt(r.Int63n(rSubscription.Free.Int64()) + 1)
)
msg := types.NewMsgAddQuota(rAccount.Address, rSubscription.ID, toAccount.Address, bytes)
if msg.ValidateBasic() != nil {
return simulation.NoOpMsg(types.ModuleName), nil, fmt.Errorf("expected msg to pass ValidateBasic: %s", msg.GetSignBytes())
}
tx := helpers.GenTx(
[]sdk.Msg{msg},
nil,
helpers.DefaultGenTxGas,
chainID,
[]uint64{account.GetAccountNumber()},
[]uint64{account.GetSequence()},
rAccount.PrivKey,
)
_, _, err := app.Deliver(tx)
if err != nil {
return simulation.NoOpMsg(types.ModuleName), nil, err
}
return simulation.NewOperationMsg(msg, true, ""), nil, nil
}
}
func SimulateMsgUpdateQuota(ak expected.AccountKeeper, k keeper.Keeper) simulation.Operation {
return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accounts []simulation.Account, chainID string) (
simulation.OperationMsg, []simulation.FutureOperation, error) {
rAccount, _ := simulation.RandomAcc(r, accounts)
subscriptions := k.GetActiveSubscriptionsForAddress(ctx, rAccount.Address, 0, 0)
if len(subscriptions) == 0 {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
rSubscription := RandomSubscription(r, subscriptions)
if rSubscription.Plan == 0 {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
if !rSubscription.Owner.Equals(rAccount.Address) {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
quotas := k.GetQuotas(ctx, rSubscription.ID, 0, 0)
if len(quotas) == 0 {
return simulation.NoOpMsg(types.ModuleName), nil, nil
}
var (
account = ak.GetAccount(ctx, rAccount.Address)
rQuota = RandomQuota(r, quotas)
bytes = sdk.NewInt(r.Int63n(rSubscription.Free.
Add(rQuota.Allocated).Int64()) + rQuota.Consumed.Int64())
)
msg := types.NewMsgUpdateQuota(rAccount.Address, rSubscription.ID, rQuota.Address, bytes)
if msg.ValidateBasic() != nil {
return simulation.NoOpMsg(types.ModuleName), nil, fmt.Errorf("expected msg to pass ValidateBasic: %s", msg.GetSignBytes())
}
tx := helpers.GenTx(
[]sdk.Msg{msg},
nil,
helpers.DefaultGenTxGas,
chainID,
[]uint64{account.GetAccountNumber()},
[]uint64{account.GetSequence()},
rAccount.PrivKey,
)
_, _, err := app.Deliver(tx)
if err != nil {
return simulation.NoOpMsg(types.ModuleName), nil, err
}
return simulation.NewOperationMsg(msg, true, ""), nil, nil
}
} | x/subscription/simulation/msgs.go | 0.553747 | 0.414425 | msgs.go | starcoder |
package analyser
import (
"fmt"
"github.com/sdcoffey/techan"
)
func makeMACD(isHist bool) func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if isHist {
return func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if len(a) != 3 {
return nil, newError(fmt.Sprintf("[MACD] Not enough parameters: got %d, 3(MACD+Histogram)", len(a)))
}
shortWindow := int(a[0].(float64))
longWindow := int(a[1].(float64))
signalWindow := int(a[2].(float64))
return newMACDHist(series, shortWindow, longWindow, signalWindow), nil
}
}
return func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if len(a) != 2 {
return nil, newError(fmt.Sprintf("[MACD] Not enough parameters: got %d, need 2(MACD)", len(a)))
}
shortWindow := int(a[0].(float64))
longWindow := int(a[1].(float64))
return newMACD(series, shortWindow, longWindow), nil
}
}
func makeRSI() func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
return func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if len(a) != 1 {
return nil, newError(fmt.Sprintf("[rsi] Not enough parameters: got %d, need 1", len(a)))
}
timeframe := int(a[0].(float64))
if timeframe < 1 {
return nil, newError(fmt.Sprintf("[rsi] Lag should be longer than 0, not %d", timeframe))
}
return newRSI(series, timeframe), nil
}
}
func makeClosePrice() func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
return func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if len(a) != 0 {
return nil, newError(fmt.Sprintf("[ClosePrice] Too many parameters: got %d, need 0", len(a)))
}
return techan.NewClosePriceIndicator(series), nil
}
}
func makeIncrease() func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
return func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if len(a) != 2 {
return nil, newError(fmt.Sprintf("[Increase] Number of parameters incorrect: got %d, need 2", len(a)))
}
indicator := a[0].(techan.Indicator)
lag := int(a[1].(float64))
if lag < 1 {
return nil, newError(fmt.Sprintf("[Increase] Lag should be longer than 0, not %d", lag))
}
return newIncreaseIndicator(indicator, lag), nil
}
}
func makeExtrema() func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
return func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if len(a) != 3 {
return nil, newError(fmt.Sprintf("[LocalExtrema] Number of parameters incorrect: got %d, need 3", len(a)))
}
indicator := a[0].(techan.Indicator)
lag := int(a[1].(float64))
if lag < 1 {
return nil, newError(fmt.Sprintf("[LocalExtrema] Lag should be longer than 0, not %d", lag))
}
samples := int(a[2].(float64))
if samples < 4 {
return nil, newError(fmt.Sprintf("[LocalExtrema] Samples should be more than 4, not %d", lag))
}
return newLocalExtremaIndicator(indicator, lag, samples), nil
}
}
func makeMoneyFlowIndex() func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
return func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if len(a) != 1 {
return nil, newError(fmt.Sprintf("[MoneyFlowIndex] Not enough parameters: got %d, need 1", len(a)))
}
timeframe := int(a[0].(float64))
if timeframe < 1 {
return nil, newError(fmt.Sprintf("[MoneyFlowIndex] Lag should be longer than 0, not %d", timeframe))
}
return newMoneyFlowIndex(series, timeframe), nil
}
}
func makeIsZero() func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
return func(series *techan.TimeSeries, a ...interface{}) (techan.Indicator, error) {
if len(a) != 3 {
return nil, newError(fmt.Sprintf("[Zero] Number of parameters incorrect: got %d, need 3", len(a)))
}
indicator := a[0].(techan.Indicator)
lag := int(a[1].(float64))
if lag < 1 {
return nil, newError(fmt.Sprintf("[Zero] Lag should be longer than 0, not %d", lag))
}
samples := int(a[2].(float64))
if samples < 4 {
return nil, newError(fmt.Sprintf("[Zero] Samples should be more than 4, not %d", lag))
}
return newLocalZeroIndicator(indicator, lag, samples), nil
}
} | analyser/indicatorFuncs.go | 0.586878 | 0.416381 | indicatorFuncs.go | starcoder |
package datatype
import (
"fmt"
"github.com/shopspring/decimal"
"math/big"
"strconv"
)
var integerTypeSpec = newElementTypeSpec("integer")
type integerType struct {
PrimitiveType
value int32
}
type IntegerAccessor interface {
NumberAccessor
}
func IsInteger(accessor Accessor) bool {
dt := accessor.DataType()
return dt == IntegerDataType ||
dt == PositiveIntDataType ||
dt == UnsignedIntDataType
}
func NewIntegerNil() IntegerAccessor {
return newInteger(true, 0)
}
func NewInteger(value int32) IntegerAccessor {
return newInteger(false, value)
}
func ParseInteger(value string) (IntegerAccessor, error) {
if i, err := strconv.Atoi(value); err != nil {
return nil, fmt.Errorf("not an integer: %s", value)
} else {
return NewInteger(int32(i)), nil
}
}
func newInteger(nilValue bool, value int32) IntegerAccessor {
return &integerType{
PrimitiveType: PrimitiveType{
nilValue: nilValue,
},
value: value,
}
}
func (t *integerType) DataType() DataTypes {
return IntegerDataType
}
func (t *integerType) Int() int32 {
return t.value
}
func (t *integerType) Int64() int64 {
return int64(t.value)
}
func (t *integerType) Float32() float32 {
return float32(t.value)
}
func (t *integerType) Float64() float64 {
return float64(t.value)
}
func (t *integerType) BigFloat() *big.Float {
return big.NewFloat(float64(t.value))
}
func (t *integerType) Decimal() decimal.Decimal {
return decimal.NewFromInt32(t.value)
}
func (t *integerType) TypeSpec() TypeSpecAccessor {
return integerTypeSpec
}
func (t *integerType) Equal(accessor Accessor) bool {
if accessor != nil && IsInteger(accessor) {
o := accessor.(IntegerAccessor)
return t.Nil() == o.Nil() && t.Int() == o.Int()
}
return decimalValueEqual(t, accessor)
}
func (t *integerType) Equivalent(accessor Accessor) bool {
if accessor != nil && IsInteger(accessor) {
o := accessor.(IntegerAccessor)
return t.Nil() == o.Nil() && t.Int() == o.Int()
}
return decimalValueEquivalent(t, accessor)
}
func (t *integerType) String() string {
if t.nilValue {
return ""
}
return strconv.FormatInt(int64(t.value), 10)
} | datatype/integer_type.go | 0.707405 | 0.458531 | integer_type.go | starcoder |
package regression
import (
"github.com/gaillard/go-queue/queue"
"math"
)
//Regression represents a queue of past points. Use New() to initialize.
type Regression struct {
xSum, ySum, xxSum, xySum, yySum, xDelta float64
points *queue.Queue
lastSlopeCalc, lastInterceptCalc, lastStdErrorCalc float64
//here so multiple calcs calls per add calls wont hurt performance
lastCalcFresh bool
//here for performance to avoid point.Back() calls
lastX float64
}
type point struct {
x, y, xx, xy, yy float64
}
//New returns a Regression that keeps points back as far as xDelta from the last
//added point.
func New(xDelta float64) *Regression {
return &Regression{xDelta: xDelta, points: queue.New(), lastX: math.Inf(-1)}
}
//Calculate returns the slope, intercept and standard error of a best fit line to the added
//points. Returns a cached value if called between adds. Deprecated in favor of CalculateWithStdError.
func (r *Regression) Calculate() (slope, intercept float64) {
slope, intercept, _ = r.CalculateWithStdError()
return
}
//Calculate returns the slope, intercept and standard error of a best fit line to the added
//points. Returns a cached value if called between adds.
func (r *Regression) CalculateWithStdError() (slope, intercept, stdError float64) {
if r.lastCalcFresh {
slope = r.lastSlopeCalc
intercept = r.lastInterceptCalc
stdError = r.lastStdErrorCalc
return
}
n := float64(r.points.Len())
//linear regression formula:
//slope is (n*SUM(x*y) - SUM(x)*SUM(y)) / (n*SUM(x*x) - (SUM(x))^2)
//intercept is (SUM(y)-slope*SUM(x)) / n
xSumOverN := r.xSum / n //here to only calc once for performance
slope = (r.xySum - xSumOverN*r.ySum) / (r.xxSum - xSumOverN*r.xSum)
intercept = (r.ySum - slope*r.xSum) / n
//standard error formula is sqrt(SUM((yActual - yPredicted)^2) / (n - 2))
//the n-2 is related to the degrees of freedom for the regression, 2 in this case
//simplification of the sum
//SUM((yA - yP)^2)
//SUM(yA*yA - 2*yA*yP + yP*yP)
//SUM(y*y) - SUM(2*y*(m*x+b)) + SUM((m*x+b)(m*x+b))
//SUM(y*y) - 2*m*SUM(x*y) - 2*b*SUM(y) + m*m*SUM(x*x) + 2*b*m*SUM(x) + n*b*b
twoTimesB := 2 * intercept
stdError = math.Sqrt((r.yySum - 2*slope*r.xySum - twoTimesB*r.ySum + slope*slope*r.xxSum + twoTimesB*slope*r.xSum + n*intercept*intercept) / (n - 2))
r.lastSlopeCalc = slope
r.lastInterceptCalc = intercept
r.lastStdErrorCalc = stdError
r.lastCalcFresh = true
return
}
//Add adds the new x and y as a point into the queue. Panics if given an x value less than the last.
func (r *Regression) Add(x, y float64) {
r.lastCalcFresh = false
if x < r.lastX {
panic("adding with x less than the last add is not allowed")
}
r.lastX = x
//storing pointers instead of values only for performance
newPoint := &point{x, y, x * x, x * y, y * y}
r.points.PushBack(newPoint)
r.xSum += newPoint.x
r.ySum += newPoint.y
r.xxSum += newPoint.xx
r.xySum += newPoint.xy
r.yySum += newPoint.yy
//here to only calc once for performance
oldestXAllowed := r.lastX - r.xDelta
for {
point := r.points.Front().(*point)
//no need to check for nil since we just did a .PushBack()
if point.x >= oldestXAllowed {
break
}
r.xSum -= point.x
r.ySum -= point.y
r.xxSum -= point.xx
r.xySum -= point.xy
r.yySum -= point.yy
r.points.PopFront()
}
} | v1/regression.go | 0.83152 | 0.635039 | regression.go | starcoder |
package slice
import "fmt"
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[T comparable](s1, s2 []T) bool {
return EqualFunc(s1, s2, func(a, b T) bool {
return a == b
})
}
// EqualFunc reports whether two slices are equal using a comparison
// function on each pair of elements. If the lengths are different,
// EqualFunc returns false. Otherwise, the elements are compared in
// index order, and the comparison stops at the first index for which
// eq returns false.
func EqualFunc[T1, T2 any](s1 []T1, s2 []T2, eq func(T1, T2) bool) bool {
if len(s1) != len(s2) {
return false
}
for i := 0; i < len(s1); i++ {
if !eq(s1[i], s2[i]) {
return false
}
}
return true
}
// Index returns the index of the first occurrence of v in s, or -1 if not present.
func Index[T comparable](s []T, v T) int {
return IndexFunc(s, func(x T) bool {
return v == x
})
}
// IndexFunc returns the index into s of the first element
// satisfying f(c), or -1 if none do.
func IndexFunc[T any](s []T, f func(T) bool) int {
for i := 0; i < len(s); i++ {
if f(s[i]) {
return i
}
}
return -1
}
// Contains reports whether v is present in s.
func Contains[T comparable](s []T, v T) bool {
return Index(s, v) != -1
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[T any](s []T) []T {
if s == nil {
return nil
}
cloned := make([]T, len(s))
for i := 0; i < len(s); i++ {
cloned[i] = s[i]
}
return cloned
}
// Insert inserts the values v... into s at index i, returning the modified slice.
// In the returned slice r, r[i] == the first v. Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[T any](s []T, i int, v ...T) []T {
if i < 0 || i > len(s) {
panic(fmt.Errorf("runtime error: index out of range [%d] with length %d", i, len(s)))
}
if n := len(s) + len(v); n <= cap(s) {
s2 := s[:n]
copy(s2[i+len(v):], s[i:])
copy(s2[i:], v)
return s2
}
s2 := make([]T, len(s)+len(v))
copy(s2, s[:i])
copy(s2[i:], v)
copy(s2[i+len(v):], s[i:])
return s2
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if s[i:j] is not a valid slice of s.
// Delete modifies the contents of the slice s; it does not create a new slice.
// Delete is O(len(s)), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
func Delete[T any](s []T, i, j int) []T {
if i < 0 || j < i || j > len(s) {
panic(fmt.Errorf("runtime error: slice bounds out of range [%d:%d] with length %d", i, j, len(s)))
}
copy(s[i:], s[j:])
var zero T
for k, n := len(s)-j+i, len(s); k < n; k++ {
s[k] = zero
}
return s[:len(s)-j+i]
} | slice.go | 0.846197 | 0.57063 | slice.go | starcoder |
package op
import(
"fmt"
)
// Returns a == b and updates the State.
func (self *State)Eq(a,b any)bool{
self.IncOperations(self.coeff["=="]+self.off["=="])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)==b.(int)
case "int8": return a.(int8)==b.(int8)
case "int16": return a.(int16)==b.(int16)
case "int32": return a.(int32)==b.(int32)
case "int64": return a.(int64)==b.(int64)
case "uint": return a.(uint)==b.(uint)
case "uint8": return a.(uint8)==b.(uint8)
case "uint16": return a.(uint16)==b.(uint16)
case "uint32": return a.(uint32)==b.(uint32)
case "uint64": return a.(uint64)==b.(uint64)
case "float32": return a.(float32)==b.(float32)
case "float64": return a.(float64)==b.(float64)
case "complex64": return a.(complex64)==b.(complex64)
case "complex128": return a.(complex128)==b.(complex128)
case "string": return a.(string)==b.(string)
default: fmt.Println("Invalid type")
}
return false
}
// Returns a <= b and updates the State.
func (self *State)Le(a,b any)bool{
self.IncOperations(self.coeff["<="]+self.off["<="])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)<=b.(int)
case "int8": return a.(int8)<=b.(int8)
case "int16": return a.(int16)<=b.(int16)
case "int32": return a.(int32)<=b.(int32)
case "int64": return a.(int64)<=b.(int64)
case "uint": return a.(uint)<=b.(uint)
case "uint8": return a.(uint8)<=b.(uint8)
case "uint16": return a.(uint16)<=b.(uint16)
case "uint32": return a.(uint32)<=b.(uint32)
case "uint64": return a.(uint64)<=b.(uint64)
case "float32": return a.(float32)<=b.(float32)
case "float64": return a.(float64)<=b.(float64)
case "string": return a.(string)<=b.(string)
default: fmt.Println("Invalid type")
}
return false
}
// Returns a >= b and updates the State.
func (self *State)Ge(a,b any)bool{
self.IncOperations(self.coeff[">="]+self.off[">="])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)>=b.(int)
case "int8": return a.(int8)>=b.(int8)
case "int16": return a.(int16)>=b.(int16)
case "int32": return a.(int32)>=b.(int32)
case "int64": return a.(int64)>=b.(int64)
case "uint": return a.(uint)>=b.(uint)
case "uint8": return a.(uint8)>=b.(uint8)
case "uint16": return a.(uint16)>=b.(uint16)
case "uint32": return a.(uint32)>=b.(uint32)
case "uint64": return a.(uint64)>=b.(uint64)
case "float32": return a.(float32)>=b.(float32)
case "float64": return a.(float64)>=b.(float64)
case "string": return a.(string)>=b.(string)
default: fmt.Println("Invalid type")
}
return false
}
// Returns a < b and updates the State.
func (self *State)Lt(a,b any)bool{
self.IncOperations(self.coeff["<"]+self.off["<"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)<b.(int)
case "int8": return a.(int8)<b.(int8)
case "int16": return a.(int16)<b.(int16)
case "int32": return a.(int32)<b.(int32)
case "int64": return a.(int64)<b.(int64)
case "uint": return a.(uint)<b.(uint)
case "uint8": return a.(uint8)<b.(uint8)
case "uint16": return a.(uint16)<b.(uint16)
case "uint32": return a.(uint32)<b.(uint32)
case "uint64": return a.(uint64)<b.(uint64)
case "float32": return a.(float32)<b.(float32)
case "float64": return a.(float64)<b.(float64)
case "string": return a.(string)<b.(string)
default: fmt.Println("Invalid type")
}
return false
}
// Returns a > b and updates the State.
func (self *State)Gt(a,b any)bool{
self.IncOperations(self.coeff[">"]+self.off[">"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)>b.(int)
case "int8": return a.(int8)>b.(int8)
case "int16": return a.(int16)>b.(int16)
case "int32": return a.(int32)>b.(int32)
case "int64": return a.(int64)>b.(int64)
case "uint": return a.(uint)>b.(uint)
case "uint8": return a.(uint8)>b.(uint8)
case "uint16": return a.(uint16)>b.(uint16)
case "uint32": return a.(uint32)>b.(uint32)
case "uint64": return a.(uint64)>b.(uint64)
case "float32": return a.(float32)>b.(float32)
case "float64": return a.(float64)>b.(float64)
case "string": return a.(string)>b.(string)
default: fmt.Println("Invalid type")
}
return false
}
// Returns -a if a is a numeric variable, or !a if
// a is a boolean variable; and updates the State.
func (self *State)Neg(a any)any{
self.IncOperations(self.coeff["neg"]+self.off["neg"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return -a.(int)
case "int8": return -a.(int8)
case "int16": return -a.(int16)
case "int32": return -a.(int32)
case "int64": return -a.(int64)
case "uint": return -a.(uint)
case "uint8": return -a.(uint8)
case "uint16": return -a.(uint16)
case "uint32": return -a.(uint32)
case "uint64": return -a.(uint64)
case "float32": return -a.(float32)
case "float64": return -a.(float64)
case "complex64": return -a.(complex64)
case "complex128": return -a.(complex128)
case "bool": return !a.(bool)
default: fmt.Println("Invalid type")
}
return nil
}
// Returns a & b and updates the State. Both operands are
// executed even if a is false.
func (self *State)And(a,b any)bool{
self.IncOperations(self.coeff["and"]+self.off["and"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "bool": return (a.(bool) && b.(bool))
default: fmt.Println("Invalid type")
}
return false
}
// Returns a | b and updates the State. Both operands are
// executed even if a is true.
func (self *State)Or(a,b any)bool{
self.IncOperations(self.coeff["or"]+self.off["or"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "bool": return (a.(bool) || b.(bool))
default: fmt.Println("Invalid type")
}
return false
} | op/boolean_operators.go | 0.612078 | 0.617253 | boolean_operators.go | starcoder |
package main
import (
"fmt"
"unicode/utf8"
)
/*
A Go string is a read-only slice of bytes.
The language and the standard library treat strings specially - as containers of text encoded in UTF-8.
In other languages, strings are made of “characters”.
In Go, the concept of a character is called a rune - it’s an integer that represents a Unicode code point.
This Go blog post is a good introduction to the topic. (https://go.dev/blog/strings)
(字符串是一个只读的字节切片)
s is a string assigned a literal value representing the word “hello” in the Thai language.
Go string literals are UTF-8 encoded text.
Since strings are equivalent to []byte, this will produce the length of the raw bytes stored within.
Indexing into a string produces the raw byte values at each index.
This loop generates the hex values of all the bytes that constitute the code points in s.
To count how many runes are in a string, we can use the utf8 package.
Note that the run-time of RuneCountInString depend on the size of the string,
because it has to decode each UTF-8 rune sequentially.
Some Thai characters are represented by multiple UTF-8 code points, so the result of this count may be surprising.
A range loop handles strings specially and decodes each rune along with its offset in the string.
We can achieve the same iteration by using the utf8.DecodeRuneInString function explicitly.
This demonstrates passing a rune value to a function.
Values enclosed in single quotes are rune literals. We can compare a rune value to a rune literal directly.
*/
func main() {
const s = "สวัสดี"
fmt.Println("Len:", len(s))
for i := 0; i < len(s); i++ {
fmt.Printf("%x ", s[i])
}
fmt.Println()
fmt.Println("Rune count:", utf8.RuneCountInString(s))
for idx, runeValue := range s {
fmt.Printf("%#U starts at %d\n", runeValue, idx)
}
fmt.Println("\nUsing DecodeRuneInString")
for i, w := 0, 0; i < len(s); i += w {
runeValue, width := utf8.DecodeRuneInString(s[i:])
fmt.Printf("%#U starts at %d\n", runeValue, i)
w = width
examineRune(runeValue)
}
}
func examineRune(r rune) {
if r == 't' {
fmt.Println("found tee")
} else if r == 'ส' {
fmt.Println("found so sua")
}
} | example/strings-and-runes.go | 0.584034 | 0.528533 | strings-and-runes.go | starcoder |
package mesh
import (
"github.com/adamcolton/geom/d3"
"github.com/adamcolton/geom/d3/affine"
"github.com/adamcolton/geom/d3/curve/line"
"github.com/adamcolton/geom/d3/solid"
)
// Extrusion creates a mesh by extruding the perimeter by transormations
type Extrusion struct {
cur []uint32
points *solid.PointSet
faces [][]uint32
}
// NewExtrusion takes a face as the start of an extrusion.
func NewExtrusion(face []d3.Pt) *Extrusion {
ln := len(face)
e := &Extrusion{
points: solid.NewPointSet(),
cur: make([]uint32, ln),
}
for i, pt := range face {
e.cur[i] = e.points.Add(pt)
}
e.faces = append(e.faces, e.cur)
return e
}
func (e *Extrusion) applyTRelativeToCenter(t *d3.T) *d3.T {
w := &affine.Weighted{}
for _, ptIdx := range e.cur {
w.Add(e.points.Pts[ptIdx])
}
center := d3.Pt{}.Subtract(w.Get())
return d3.NewTSet().
AddBoth(d3.Translate(center).Pair()).
Add(t).
Get()
}
// Extrude the current perimeter via a set of transformations
func (e *Extrusion) Extrude(ts ...*d3.T) *Extrusion {
ln := len(e.cur)
for _, t := range ts {
t = e.applyTRelativeToCenter(t)
nxt := make([]uint32, len(e.cur))
for i, ptIdx := range e.cur {
nxt[i] = e.points.Add(t.Pt(e.points.Pts[ptIdx]))
}
prev := ln - 1
for i := range e.cur {
e.faces = append(e.faces, []uint32{
e.cur[prev],
e.cur[i],
nxt[i],
nxt[prev],
})
prev = i
}
e.cur = nxt
}
return e
}
// EdgeExtrude performs an extrusion then subdivides each edge in to 3 segments.
// The points from the starting permeter are lined to the subdivision points.
// This allows the extrusion to increase the number of facets.
func (e *Extrusion) EdgeExtrude(t *d3.T) *Extrusion {
lnCur := len(e.cur)
lnNxt := 3 * lnCur
nxt := make([]uint32, lnNxt)
prev := e.points.Pts[e.cur[lnCur-1]]
t = e.applyTRelativeToCenter(t)
for i, cIdx := range e.cur {
cur := e.points.Pts[cIdx]
l := line.New(prev, cur)
for j := 0; j < 3; j++ {
f := float64(j) / 3.0
nxt[(i*3+j-3+lnNxt)%lnNxt] = e.points.Add(t.Pt(l.Pt1(f)))
}
prev = cur
}
for i, cIdx := range e.cur {
e.faces = append(e.faces, []uint32{
nxt[i*3],
nxt[i*3+1],
cIdx,
nxt[(i*3+lnNxt-1)%lnNxt],
}, []uint32{
nxt[i*3+1],
nxt[i*3+2],
e.cur[(i+1)%lnCur],
cIdx,
})
}
e.cur = nxt
return e
}
// EdgeMerge is the opposite of EdgeExtrude, merging 3 points into one.
func (e *Extrusion) EdgeMerge(t *d3.T) *Extrusion {
lnCur := len(e.cur)
if lnCur%3 != 0 {
return e
}
lnNxt := lnCur / 3
nxt := make([]uint32, lnNxt)
t = e.applyTRelativeToCenter(t)
for i := range nxt {
nxt[i] = e.points.Add(t.Pt(e.points.Pts[e.cur[i*3]]))
}
for i, nIdx := range nxt {
e.faces = append(e.faces, []uint32{
nIdx,
e.cur[(i*3-1+lnCur)%lnCur],
e.cur[i*3],
e.cur[i*3+1],
}, []uint32{
nIdx,
e.cur[i*3+1],
e.cur[i*3+2],
nxt[(i+1)%lnNxt],
})
}
e.cur = nxt
return e
}
// Close the mesh turning the current perimeter into a face.
func (e *Extrusion) Close() Mesh {
e.faces = append(e.faces, e.cur)
return Mesh{
Polygons: e.faces,
Pts: e.points.Pts,
}
} | d3/solid/mesh/extrusion.go | 0.752468 | 0.530419 | extrusion.go | starcoder |
package css
import (
"fmt"
"strconv"
"strings"
)
// Parse an An+B notation at the current position in Tokenizer t.
// Returns the value for A and B on successful parse.
func parseNth(t Tokenizer) (int, int, error) {
var a, b int
var ok bool
err := fmt.Errorf("Invalid nth arguments at position %v", t.Position())
tk := skipWhitespace(t)
switch tk.Type() {
case Number:
n := tk.(*NumberToken)
if !n.Integer || !closingParen(t) {
return 0, 0, err
}
if b, ok = parseInt(n.Value); ok {
return 0, b, nil
} else {
return 0, 0, err
}
case Dimension:
d := tk.(*DimensionToken)
if !d.Integer {
return 0, 0, err
}
a, ok = parseInt(d.Value)
if !ok {
return 0, 0, err
}
unit := strings.ToLower(d.Unit)
if unit == "n" {
b, ok = parseB(t)
} else if unit == "n-" {
b, ok = parseSignlessB(t, -1)
} else {
b, ok = parseNDashDigits(unit)
ok = ok && closingParen(t)
}
if !ok {
return 0, 0, err
}
return a, b, nil
case Ident:
ident := strings.ToLower(tk.String())
switch ident {
case "even":
a, b = 2, 0
ok = closingParen(t)
case "odd":
a, b = 2, 1
ok = closingParen(t)
case "n":
a = 1
b, ok = parseB(t)
case "-n":
a = -1
b, ok = parseB(t)
case "n-":
a = 1
b, ok = parseSignlessB(t, -1)
case "-n-":
a = -1
b, ok = parseSignlessB(t, -1)
default:
if strings.HasPrefix(ident, "-") {
a = -1
b, ok = parseNDashDigits(ident[1:])
ok = ok && closingParen(t)
} else {
a = 1
b, ok = parseNDashDigits(ident)
ok = ok && closingParen(t)
}
}
if !ok {
return 0, 0, err
}
return a, b, nil
case Delim:
if tk.String() != "+" {
return 0, 0, err
}
tk = t.NextToken()
if tk.Type() != Ident {
return 0, 0, err
}
ident := strings.ToLower(tk.String())
switch ident {
case "n":
a = 1
b, ok = parseB(t)
case "n-":
a = 1
b, ok = parseSignlessB(t, -1)
default:
a = 1
b, ok = parseNDashDigits(ident)
ok = ok && closingParen(t)
}
if !ok {
return 0, 0, err
}
return a, b, nil
default:
return 0, 0, err
}
}
func parseB(t Tokenizer) (int, bool) {
tk := skipWhitespace(t)
switch tk.Type() {
case RightParen:
return 0, true
case Delim:
switch tk.String() {
case "+":
return parseSignlessB(t, 1)
case "-":
return parseSignlessB(t, -1)
default:
return 0, false
}
case Number:
n := tk.(*NumberToken)
if !n.Integer || !hasSign(n.Value) || !closingParen(t) {
return 0, false
}
return parseInt(n.Value)
default:
return 0, false
}
}
func parseSignlessB(t Tokenizer, sign int) (int, bool) {
tk := skipWhitespace(t)
n, ok := tk.(*NumberToken)
if !ok || !n.Integer || hasSign(n.Value) || !closingParen(t) {
return 0, false
}
if b, ok := parseInt(n.Value); ok {
return sign * b, true
}
return 0, false
}
func parseNDashDigits(s string) (int, bool) {
if len(s) >= 3 && strings.HasPrefix(s, "n-") {
return parseInt(s[1:])
}
return 0, false
}
func hasSign(s string) bool {
c := s[0:1]
return c == "+" || c == "-"
}
func parseInt(s string) (int, bool) {
if n, err := strconv.ParseInt(s, 10, 0); err == nil {
return int(n), true
} else {
return 0, false
}
}
func closingParen(t Tokenizer) bool {
return skipWhitespace(t).Type() == RightParen
}
func skipWhitespace(t Tokenizer) Token {
for {
tk := t.NextToken()
if tk.Type() != Whitespace {
return tk
}
}
} | css/nth_parser.go | 0.579757 | 0.502869 | nth_parser.go | starcoder |
package clusterdictionary
import (
"github.com/mattermost/mattermost-cloud/model"
"github.com/pkg/errors"
)
const (
// SizeAlefDev is the definition of a cluster supporting dev purposes.
SizeAlefDev = "SizeAlefDev"
// SizeAlef500 is the key representing a cluster supporting 500 users.
SizeAlef500 = "SizeAlef500"
// SizeAlef1000 is the key representing a cluster supporting 1000 users.
SizeAlef1000 = "SizeAlef1000"
// SizeAlef5000 is the key representing a cluster supporting 5000 users.
SizeAlef5000 = "SizeAlef5000"
// SizeAlef10000 is the key representing a cluster supporting 10000 users.
SizeAlef10000 = "SizeAlef10000"
)
type size struct {
MasterInstanceType string
MasterCount int64
NodeInstanceType string
NodeMinCount int64
NodeMaxCount int64
}
// ValidSizes is a mapping of a size keyword to kops cluster configuration.
var ValidSizes = map[string]size{
SizeAlefDev: sizeAlefDev,
SizeAlef500: sizeAlef500,
SizeAlef1000: sizeAlef1000,
SizeAlef5000: sizeAlef5000,
SizeAlef10000: sizeAlef10000,
}
// sizeAlefDev is a cluster sized for development and testing.
var sizeAlefDev = size{
MasterInstanceType: "t3.medium",
MasterCount: 1,
NodeInstanceType: "t3.medium",
NodeMinCount: 2,
NodeMaxCount: 2,
}
// sizeAlef500 is a cluster sized for 500 users.
var sizeAlef500 = size{
MasterInstanceType: "t3.medium",
MasterCount: 1,
NodeInstanceType: "m5.large",
NodeMinCount: 2,
NodeMaxCount: 2,
}
// sizeAlef1000 is a cluster sized for 1000 users.
var sizeAlef1000 = size{
MasterInstanceType: "t3.large",
MasterCount: 1,
NodeInstanceType: "m5.large",
NodeMinCount: 4,
NodeMaxCount: 4,
}
// sizeAlef5000 is a cluster sized for 5000 users.
var sizeAlef5000 = size{
MasterInstanceType: "t3.large",
MasterCount: 1,
NodeInstanceType: "m5.large",
NodeMinCount: 6,
NodeMaxCount: 6,
}
// sizeAlef10000 is a cluster sized for 10000 users.
var sizeAlef10000 = size{
MasterInstanceType: "t3.large",
MasterCount: 3,
NodeInstanceType: "m5.large",
NodeMinCount: 10,
NodeMaxCount: 10,
}
// IsValidClusterSize returns true if the given size string is supported.
func IsValidClusterSize(size string) bool {
_, ok := ValidSizes[size]
return ok
}
// ApplyToCreateClusterRequest takes a size keyword and applies the corresponding
// cluster values to a CreateClusterRequest.
func ApplyToCreateClusterRequest(size string, request *model.CreateClusterRequest) error {
if len(size) == 0 {
return nil
}
if !IsValidClusterSize(size) {
return errors.Errorf("%s is not a valid size", size)
}
values := ValidSizes[size]
request.MasterInstanceType = values.MasterInstanceType
request.MasterCount = values.MasterCount
request.NodeInstanceType = values.NodeInstanceType
request.NodeMinCount = values.NodeMinCount
request.NodeMaxCount = values.NodeMaxCount
return nil
}
// ApplyToPatchClusterSizeRequest takes a size keyword and applies the
// corresponding cluster values to a PatchClusterSizeRequest.
func ApplyToPatchClusterSizeRequest(size string, request *model.PatchClusterSizeRequest) error {
if len(size) == 0 {
return nil
}
if !IsValidClusterSize(size) {
return errors.Errorf("%s is not a valid size", size)
}
values := ValidSizes[size]
request.NodeInstanceType = &values.NodeInstanceType
request.NodeMinCount = &values.NodeMinCount
request.NodeMaxCount = &values.NodeMaxCount
return nil
} | clusterdictionary/size.go | 0.616359 | 0.430387 | size.go | starcoder |
package codegen
import (
"regexp"
"strings"
"github.com/pulumi/pulumi/pkg/v2/codegen/schema"
)
var (
// IMPORTANT! The following regexp's contain named capturing groups.
// It's the `?P<group_name>` where group_name can be any name.
// When changing the group names, be sure to change the reference to
// the corresponding group name below where they are used as well.
// SurroundingTextRE is regexp to match the content between the {{% examples %}} short-code
// including the short-codes themselves.
SurroundingTextRE = regexp.MustCompile("({{% examples %}}(.|\n)*?{{% /examples %}})")
// ExamplesSectionRE is a regexp to match just the content between the {{% examples %}} short-codes.
ExamplesSectionRE = regexp.MustCompile(
"(?P<examples_start>{{% examples %}})(?P<examples_content>(.|\n)*?)(?P<examples_end>{{% /examples %}})")
// IndividualExampleRE is a regexp to match a single example section surrounded by the {{% example %}} short-code.
IndividualExampleRE = regexp.MustCompile(
"(?P<example_start>{{% example %}})(?P<example_content>(.|\n)*?)(?P<example_end>{{% /example %}})")
// H3TitleRE is a regexp to match an h3 title tag.
H3TitleRE = regexp.MustCompile("(### .*)")
// The following regexp's match the code snippet blocks in a single example section.
// TSCodeSnippetRE is a regexp to match a TypeScript code snippet.
TSCodeSnippetRE = regexp.MustCompile("(```(typescript))((.|\n)*?)(```)")
// GoCodeSnippetRE is a regexp to match a Go code snippet.
GoCodeSnippetRE = regexp.MustCompile("(```(go))((.|\n)*?)(```)")
// PythonCodeSnippetRE is a regexp to match a Python code snippet.
PythonCodeSnippetRE = regexp.MustCompile("(```(python))((.|\n)*?)(```)")
// CSharpCodeSnippetRE is a regexp to match a C# code snippet.
CSharpCodeSnippetRE = regexp.MustCompile("(```(csharp))((.|\n)*?)(```)")
)
// DocLanguageHelper is an interface for extracting language-specific information from a Pulumi schema.
// See the implementation for this interface under each of the language code generators.
type DocLanguageHelper interface {
GetPropertyName(p *schema.Property) (string, error)
GetDocLinkForResourceType(pkg *schema.Package, moduleName, typeName string) string
GetDocLinkForPulumiType(pkg *schema.Package, typeName string) string
GetDocLinkForResourceInputOrOutputType(pkg *schema.Package, moduleName, typeName string, input bool) string
GetDocLinkForFunctionInputOrOutputType(pkg *schema.Package, moduleName, typeName string, input bool) string
GetDocLinkForBuiltInType(typeName string) string
GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input, optional bool) string
GetFunctionName(modName string, f *schema.Function) string
// GetResourceFunctionResultName returns the name of the result type when a static resource function is used to lookup
// an existing resource.
GetResourceFunctionResultName(modName string, f *schema.Function) string
// GetModuleDocLink returns the display name and the link for a module (including root modules) in a given package.
GetModuleDocLink(pkg *schema.Package, modName string) (string, string)
}
type exampleParts struct {
Title string
Snippet string
}
// GetFirstMatchedGroupsFromRegex returns the groups for the first match of a regexp.
func GetFirstMatchedGroupsFromRegex(regex *regexp.Regexp, str string) map[string]string {
groups := map[string]string{}
// Get all matching groups.
matches := regex.FindAllStringSubmatch(str, -1)
if len(matches) == 0 {
return groups
}
firstMatch := matches[0]
// Get the named groups in our regex.
groupNames := regex.SubexpNames()
for i, value := range firstMatch {
groups[groupNames[i]] = value
}
return groups
}
// GetAllMatchedGroupsFromRegex returns all matches and the respective groups for a regexp.
func GetAllMatchedGroupsFromRegex(regex *regexp.Regexp, str string) map[string][]string {
// Get all matching groups.
matches := regex.FindAllStringSubmatch(str, -1)
// Get the named groups in our regex.
groupNames := regex.SubexpNames()
groups := map[string][]string{}
for _, match := range matches {
for j, value := range match {
if existing, ok := groups[groupNames[j]]; ok {
existing = append(existing, value)
groups[groupNames[j]] = existing
continue
}
groups[groupNames[j]] = []string{value}
}
}
return groups
}
// isEmpty returns true if the provided string is effectively
// empty.
func isEmpty(s string) bool {
return strings.Replace(s, "\n", "", 1) == ""
}
// ExtractExamplesSection returns the content available between the {{% examples %}} shortcode.
// Otherwise returns nil.
func ExtractExamplesSection(description string) *string {
examples := GetFirstMatchedGroupsFromRegex(ExamplesSectionRE, description)
if content, ok := examples["examples_content"]; ok && !isEmpty(content) {
return &content
}
return nil
}
func extractExampleParts(exampleContent string, lang string) *exampleParts {
codeFence := "```" + lang
langSnippetIndex := strings.Index(exampleContent, codeFence)
// If there is no snippet for the provided language in this example,
// then just return nil.
if langSnippetIndex < 0 {
return nil
}
var snippet string
switch lang {
case "csharp":
snippet = CSharpCodeSnippetRE.FindString(exampleContent)
case "go":
snippet = GoCodeSnippetRE.FindString(exampleContent)
case "python":
snippet = PythonCodeSnippetRE.FindString(exampleContent)
case "typescript":
snippet = TSCodeSnippetRE.FindString(exampleContent)
}
return &exampleParts{
Title: H3TitleRE.FindString(exampleContent),
Snippet: snippet,
}
}
func getExamplesForLang(examplesContent string, lang string) []exampleParts {
examples := make([]exampleParts, 0)
exampleMatches := GetAllMatchedGroupsFromRegex(IndividualExampleRE, examplesContent)
if matchedExamples, ok := exampleMatches["example_content"]; ok {
for _, ex := range matchedExamples {
exampleParts := extractExampleParts(ex, lang)
if exampleParts == nil || exampleParts.Snippet == "" {
continue
}
examples = append(examples, *exampleParts)
}
}
return examples
}
// StripNonRelevantExamples strips the non-relevant language snippets from a resource's description.
func StripNonRelevantExamples(description string, lang string) string {
if description == "" {
return ""
}
// Replace the entire section (including the shortcodes themselves) enclosing the
// examples section, with a placeholder, which itself will be replaced appropriately
// later.
newDescription := SurroundingTextRE.ReplaceAllString(description, "{{ .Examples }}")
// Get the content enclosing the outer examples short code.
examplesContent := ExtractExamplesSection(description)
if examplesContent == nil {
return strings.ReplaceAll(newDescription, "{{ .Examples }}", "")
}
// Within the examples section, identify each example.
builder := strings.Builder{}
examples := getExamplesForLang(*examplesContent, lang)
numExamples := len(examples)
if numExamples > 0 {
builder.WriteString("## Example Usage\n\n")
}
for i, ex := range examples {
builder.WriteString(ex.Title + "\n\n")
builder.WriteString(ex.Snippet + "\n")
// Print an extra new-line character as long as this is not
// the last example.
if i != numExamples-1 {
builder.WriteString("\n")
}
}
return strings.ReplaceAll(newDescription, "{{ .Examples }}", builder.String())
} | pkg/codegen/docs.go | 0.674587 | 0.701806 | docs.go | starcoder |
package zim
import (
"bytes"
"hash/fnv"
)
const defaultLimitEntries = 100
// EntryWithURL searches for the Directory Entry with the exact URL.
// If the Directory Entry was found, found is set to true and
// the returned position will be the position in the URL pointerlist.
// This can be used to iterate over the next n Directory Entries using
// z.EntryAtURLPosition(position+n).
func (z *File) EntryWithURL(namespace Namespace, url []byte) (
entry DirectoryEntry, urlPosition uint32, found bool) {
// more optimized version of entryWithPrefix
var firstURLPosition int64
var currentURLPos int64
var lastURLPosition = int64(z.header.articleCount - 1)
for firstURLPosition <= lastURLPosition {
currentURLPos = (firstURLPosition + lastURLPosition) >> 1
entry = z.readDirectoryEntry(z.urlPointerAtPos(uint32(currentURLPos)), 0)
var c = cmpNs(entry.namespace, namespace)
if c == 0 {
c = bytes.Compare(entry.url, url)
if c == 0 {
found = true
break
}
}
if c < 0 {
firstURLPosition = currentURLPos + 1
} else {
lastURLPosition = currentURLPos - 1
}
}
urlPosition = uint32(currentURLPos)
return
}
// EntryWithURLPrefix searches the first Directory Entry in the namespace
// having the given URL prefix. If it was found, found is set to true and
// the returned position will be the position in the URL pointerlist.
// This can be used to iterate over the next n Directory Entries using
// z.EntryAtURLPosition(position+n).
func (z *File) EntryWithURLPrefix(namespace Namespace, prefix []byte) (
entry DirectoryEntry, position uint32, found bool) {
return z.entryWithPrefix(z.urlPointerAtPos, chooseURL, namespace, prefix)
}
// EntryWithNamespace searches the first Directory Entry in the namespace.
// If it was found, found is set to true and the returned position will
// be the position in the URL pointerlist.
// This can be used to iterate over the next n Directory Entries using
// z.EntryAtURLPosition(position+n).
func (z *File) EntryWithNamespace(namespace Namespace) (
entry DirectoryEntry, position uint32, found bool) {
return z.EntryWithURLPrefix(namespace, nil)
}
// EntryWithTitlePrefix searches the first Directory Entry in the namespace
// having the given title prefix. If it was found, found is set to true and
// the returned position will be the position in the title pointerlist.
// This can be used to iterate over the next n Directory Entries using
// z.EntryAtTitlePosition(position+n).
func (z *File) EntryWithTitlePrefix(namespace Namespace, prefix []byte) (
entry DirectoryEntry, position uint32, found bool) {
return z.entryWithPrefix(z.titlePointerAtPos, chooseTitle, namespace, prefix)
}
// EntriesWithURLPrefix returns all Directory Entries in the Namespace
// that have the same URL prefix like the given.
// When the Limit is set to <= 0 it gets the default value 100.
func (z *File) EntriesWithURLPrefix(namespace Namespace, prefix []byte, limit int) []DirectoryEntry {
return z.entriesWithPrefix(chooseURL, z.urlPointerAtPos, namespace, prefix, limit)
}
// EntriesWithNamespace returns the first n Directory Entries in the Namespace
// where n <= limit.
// When the Limit is set to <= 0 it gets the default value 100.
func (z *File) EntriesWithNamespace(namespace Namespace, limit int) []DirectoryEntry {
return z.EntriesWithURLPrefix(namespace, nil, limit)
}
// EntriesWithTitlePrefix returns all Directory Entries in the Namespace
// that have the same Title prefix like the given.
// When the Limit is set to <= 0 it gets the default value 100.
func (z *File) EntriesWithTitlePrefix(namespace Namespace, prefix []byte, limit int) []DirectoryEntry {
return z.entriesWithPrefix(chooseTitle, z.titlePointerAtPos, namespace, prefix, limit)
}
// EntriesWithSimilarity returns Directory Entries in the Namespace
// that have a similar URL prefix or Title prefix to the given one.
// When the Limit is set to <= 0 it takes the default value 100.
func (z *File) EntriesWithSimilarity(namespace Namespace, prefix []byte, limit int) []DirectoryEntry {
const maxLengthDifference = 15
type wasSuggested = struct{}
if limit <= 0 {
limit = defaultLimitEntries
}
var alreadySuggested = make(map[uint32]wasSuggested, limit)
var suggestions = make([]DirectoryEntry, 0, limit)
for i := 0; i < maxLengthDifference; i++ {
for _, prefixFunc := range [2]func(Namespace, []byte, int) []DirectoryEntry{
z.EntriesWithURLPrefix, z.EntriesWithTitlePrefix} {
var nextSuggestions = prefixFunc(namespace, prefix, limit)
for _, suggestion := range nextSuggestions {
var key = hash(suggestion.url)
var _, suggestedBefore = alreadySuggested[key]
if !suggestedBefore {
suggestions = append(suggestions, suggestion)
alreadySuggested[key] = wasSuggested{}
if len(suggestions) >= limit {
return suggestions
}
}
}
}
if len(prefix) == 0 {
return suggestions
}
// TODO: decrement by runeSize
prefix = prefix[:len(prefix)-1]
}
return suggestions
}
func chooseTitle(entry *DirectoryEntry) []byte { return entry.title }
func chooseURL(entry *DirectoryEntry) []byte { return entry.url }
func hash(data []byte) uint32 {
h := fnv.New32a()
h.Write(data)
return h.Sum32()
}
func cmpNs(ns1, ns2 Namespace) int {
if ns1 > ns2 {
return 1
}
if ns2 > ns1 {
return -1
}
return 0
}
func cmpPrefix(s, prefix []byte) int {
if bytes.HasPrefix(s, prefix) {
return 0
}
return bytes.Compare(s, prefix)
}
type chooseFieldFunc func(*DirectoryEntry) []byte
type pointerAtPositionFunc func(uint32) uint64
func (z *File) entryWithPrefix(pointerAtPosition pointerAtPositionFunc, chooseField chooseFieldFunc, namespace Namespace, prefix []byte) (entry DirectoryEntry, position uint32, found bool) {
var firstPosition int64
var currentPosition int64
var lastPosition = int64(z.header.articleCount - 1)
for firstPosition <= lastPosition {
currentPosition = (firstPosition + lastPosition) >> 1
entry = z.readDirectoryEntry(pointerAtPosition(uint32(currentPosition)), 0)
var c = cmpNs(entry.namespace, namespace)
if c == 0 {
c = cmpPrefix(chooseField(&entry), prefix)
if c == 0 {
// we found an entry with the given prefix
if currentPosition == 0 {
// already lowest position
found = true
break
}
var prevEntry = z.readDirectoryEntry(pointerAtPosition(uint32(currentPosition-1)), 0)
if prevEntry.namespace != namespace || !bytes.HasPrefix(chooseField(&prevEntry), prefix) {
// we found the lowest position
found = true
break
}
// the entry below also has the prefix, but maybe much more entries have it too...
c = 1 // so the current entry is greater
}
}
if c < 0 {
firstPosition = currentPosition + 1
} else {
lastPosition = currentPosition - 1
}
}
position = uint32(currentPosition)
return
}
func (z *File) entriesWithPrefix(chooseField chooseFieldFunc, pointerAtPosition pointerAtPositionFunc, namespace Namespace, prefix []byte, limit int) []DirectoryEntry {
var entry, position, found = z.entryWithPrefix(pointerAtPosition, chooseField, namespace, prefix)
var result []DirectoryEntry
if found {
if limit <= 0 {
limit = defaultLimitEntries
}
var capacity = defaultLimitEntries
if limit < defaultLimitEntries {
capacity = limit
}
result = make([]DirectoryEntry, 0, capacity)
result = append(result, entry)
entriesAdded := 1
lastPosition := z.header.articleCount - 1
for entriesAdded < limit && position < lastPosition {
position++
nextEntry := z.readDirectoryEntry(pointerAtPosition(position), 0)
if nextEntry.Namespace() != namespace || !bytes.HasPrefix(chooseField(&nextEntry), prefix) {
break
}
result = append(result, nextEntry)
entriesAdded++
}
}
return result
}
type foundFunc func(de *DirectoryEntry)
type foundPosFunc func(de *DirectoryEntry, pos uint32) error
// ForEachEntryWithTitlePrefix for each entry with Title prefix loop
func (z *File) ForEachEntryWithTitlePrefix(namespace Namespace, prefix []byte, limit int, foundFunc foundFunc) {
z.forEachEntryWithPrefix(chooseTitle, z.titlePointerAtPos, namespace, prefix, limit, foundFunc)
}
// ForEachEntryWithURLPrefix for each entry with URL prefix loop
func (z *File) ForEachEntryWithURLPrefix(namespace Namespace, prefix []byte, limit int, foundFunc foundFunc) {
z.forEachEntryWithPrefix(chooseURL, z.urlPointerAtPos, namespace, prefix, limit, foundFunc)
}
func (z *File) forEachEntryWithPrefix(chooseField chooseFieldFunc, pointerAtPosition pointerAtPositionFunc, namespace Namespace, prefix []byte, limit int, foundFunc foundFunc) {
var entry, position, found = z.entryWithPrefix(pointerAtPosition, chooseField, namespace, prefix)
foundFunc(&entry)
if found {
entriesAdded := 1
lastPosition := z.header.articleCount - 1
for entriesAdded < limit && position < lastPosition {
position++
nextEntry := z.readDirectoryEntry(z.titlePointerAtPos(position), 0)
if nextEntry.Namespace() != namespace || (prefix != nil && !bytes.HasPrefix(chooseField(&nextEntry), prefix)) {
break
}
foundFunc(&nextEntry)
entriesAdded++
}
}
}
// ForEachEntryAfterPosition run foundFunc for each entry after position
func (z *File) ForEachEntryAfterPosition(position, limit uint32, foundFunc foundPosFunc) error {
entriesAdded := 1
lastPosition := z.header.articleCount - 1
useLimit := limit > 0
for {
if (useLimit && position > uint32(limit)) || position > lastPosition {
break
}
position++
nextEntry := z.readDirectoryEntry(z.titlePointerAtPos(position), 0)
if err := foundFunc(&nextEntry, position); err != nil {
return err
}
entriesAdded++
}
return nil
} | entry_search.go | 0.535584 | 0.430447 | entry_search.go | starcoder |
package gen
import "fmt"
// Builder is assists in build a more complex Node.
type Builder struct {
stack []Node
starts []int
}
// Reset clears the the Builder of previous built nodes.
func (b *Builder) Reset() {
if 0 < cap(b.stack) && 0 < len(b.stack) {
b.stack = b.stack[:0]
b.starts = b.starts[:0]
} else {
b.stack = make([]Node, 0, 64)
b.starts = make([]int, 0, 16)
}
}
// MustObject adds an object to the builder. A key is required if adding to a
// parent object.
func (b *Builder) MustObject(key ...string) {
if err := b.Object(key...); err != nil {
panic(err)
}
}
// Object adds an object to the builder. A key is required if adding to a
// parent object.
func (b *Builder) Object(key ...string) error {
newObj := Object{}
if 0 < len(key) {
if len(b.starts) == 0 || 0 <= b.starts[len(b.starts)-1] {
return fmt.Errorf("can not use a key when pushing to an array")
}
if obj, _ := b.stack[len(b.stack)-1].(Object); obj != nil {
obj[key[0]] = newObj
}
} else if 0 < len(b.starts) && b.starts[len(b.starts)-1] < 0 {
return fmt.Errorf("must have a key when pushing to an object")
}
b.starts = append(b.starts, -1)
b.stack = append(b.stack, newObj)
return nil
}
// MustArray adds an array to the builder. A key is required if adding to a
// parent object.
func (b *Builder) MustArray(key ...string) {
if err := b.Array(key...); err != nil {
panic(err)
}
}
// Array adds an array to the builder. A key is required if adding to a parent
// object.
func (b *Builder) Array(key ...string) error {
if 0 < len(key) {
if len(b.starts) == 0 || 0 <= b.starts[len(b.starts)-1] {
return fmt.Errorf("can not use a key when pushing to an array")
}
b.stack = append(b.stack, Key(key[0]))
} else if 0 < len(b.starts) && b.starts[len(b.starts)-1] < 0 {
return fmt.Errorf("must have a key when pushing to an object")
}
b.starts = append(b.starts, len(b.stack))
b.stack = append(b.stack, EmptyArray)
return nil
}
// MustValue adds a Node to the builder. A key is required if adding to a
// parent object.
func (b *Builder) MustValue(value Node, key ...string) {
if err := b.Value(value, key...); err != nil {
panic(err)
}
}
// Value adds a Node to the builder. A key is required if adding to a parent
// object.
func (b *Builder) Value(value Node, key ...string) error {
if 0 < len(key) {
if len(b.starts) == 0 || 0 <= b.starts[len(b.starts)-1] {
return fmt.Errorf("can not use a key when pushing to an array")
}
if obj, _ := b.stack[len(b.stack)-1].(Object); obj != nil {
obj[key[0]] = value
}
} else if 0 < len(b.starts) && b.starts[len(b.starts)-1] < 0 {
return fmt.Errorf("must have a key when pushing to an object")
} else {
b.stack = append(b.stack, value)
}
return nil
}
// Pop close a parent Object or Array Node.
func (b *Builder) Pop() {
if 0 < len(b.starts) {
start := b.starts[len(b.starts)-1]
if 0 <= start { // array
start++
size := len(b.stack) - start
a := Array(make([]Node, size))
copy(a, b.stack[start:len(b.stack)])
b.stack = b.stack[:start]
b.stack[start-1] = a
if 2 < len(b.stack) {
if k, ok := b.stack[len(b.stack)-2].(Key); ok {
if obj, _ := b.stack[len(b.stack)-3].(Object); obj != nil {
obj[string(k)] = a
b.stack = b.stack[:len(b.stack)-2]
}
}
}
}
b.starts = b.starts[:len(b.starts)-1]
}
}
// PopAll close all parent Object or Array Nodes.
func (b *Builder) PopAll() {
for 0 < len(b.starts) {
b.Pop()
}
}
// Result returns the current built Node.
func (b *Builder) Result() (result Node) {
if 0 < len(b.stack) {
result = b.stack[0]
}
return
} | gen/builder.go | 0.569374 | 0.496948 | builder.go | starcoder |
package gojay
// AddSliceString unmarshals the next JSON array of strings to the given *[]string s
func (dec *Decoder) AddSliceString(s *[]string) error {
return dec.SliceString(s)
}
// SliceString unmarshals the next JSON array of strings to the given *[]string s
func (dec *Decoder) SliceString(s *[]string) error {
err := dec.Array(DecodeArrayFunc(func(dec *Decoder) error {
var str string
if err := dec.String(&str); err != nil {
return err
}
*s = append(*s, str)
return nil
}))
if err != nil {
return err
}
return nil
}
// AddSliceInt unmarshals the next JSON array of integers to the given *[]int s
func (dec *Decoder) AddSliceInt(s *[]int) error {
return dec.SliceInt(s)
}
// SliceInt unmarshals the next JSON array of integers to the given *[]int s
func (dec *Decoder) SliceInt(s *[]int) error {
err := dec.Array(DecodeArrayFunc(func(dec *Decoder) error {
var i int
if err := dec.Int(&i); err != nil {
return err
}
*s = append(*s, i)
return nil
}))
if err != nil {
return err
}
return nil
}
// AddFloat64 unmarshals the next JSON array of floats to the given *[]float64 s
func (dec *Decoder) AddSliceFloat64(s *[]float64) error {
return dec.SliceFloat64(s)
}
// SliceFloat64 unmarshals the next JSON array of floats to the given *[]float64 s
func (dec *Decoder) SliceFloat64(s *[]float64) error {
err := dec.Array(DecodeArrayFunc(func(dec *Decoder) error {
var i float64
if err := dec.Float64(&i); err != nil {
return err
}
*s = append(*s, i)
return nil
}))
if err != nil {
return err
}
return nil
}
// AddBool unmarshals the next JSON array of boolegers to the given *[]bool s
func (dec *Decoder) AddSliceBool(s *[]bool) error {
return dec.SliceBool(s)
}
// SliceBool unmarshals the next JSON array of boolegers to the given *[]bool s
func (dec *Decoder) SliceBool(s *[]bool) error {
err := dec.Array(DecodeArrayFunc(func(dec *Decoder) error {
var b bool
if err := dec.Bool(&b); err != nil {
return err
}
*s = append(*s, b)
return nil
}))
if err != nil {
return err
}
return nil
} | vendor/github.com/francoispqt/gojay/decode_slice.go | 0.709019 | 0.413181 | decode_slice.go | starcoder |
package lt
import "math"
// rho(1) = 1 / K, d=1
// rho(d) = 1 / d*(d-1) d=2, 3, 4, ..., K
// :params K: number of source block
// :return list: rho array list
func GenRho(k uint64) []float64 {
rho_set := make([]float64, k)
for i := uint64(1); i <= k; i++ {
if i == 1 {
rho_set[i-1] = float64(1) / float64(k)
} else {
rho_set[i-1] = float64(1) / float64(i*(i-1))
}
}
return rho_set
}
// :params s: s = c * ln( K / delta ) * sqrt( K )
// :params K: number of source block
// :params delta: delta is a bound on the probability that the decoding fails
// :return list: list of tau
func GenTau(s float64, k uint64, delta float64) []float64 {
tau_set := make([]float64, k)
pivot := uint64(math.Floor(float64(k) / s))
for i := uint64(1); i <= k; i++ {
if i < pivot {
tau_set[i-1] = s / float64(k*i)
} else if i == pivot {
tau_set[i-1] = s * float64(math.Log(s/delta)/float64(k))
} else {
tau_set[i-1] = float64(0)
}
}
return tau_set
}
// calculate the sum of a item in a slice which of the same type
// :params set: slice of values
// :return value: sum of the slice value
func sumSlice(set []float64) float64 {
var sum float64 = 0
for _, value := range set {
sum += value
}
return sum
}
// :params k: the number of source block
// :params delta: delta is a bound on the probability that the decoding fails
// :params c: c is a constant of order 1
// :return list: list of mu
func GenMu(k uint64, delta float64, c float64) []float64 {
mu_set := make([]float64, k)
var s float64 = c * math.Log(float64(k)/delta) * math.Sqrt(float64(k))
rho_set := GenRho(k)
tau_set := GenTau(s, k, delta)
normalizer := sumSlice(rho_set) + sumSlice(tau_set)
for index := range rho_set {
mu_set[index] = (rho_set[index] + tau_set[index]) / normalizer
}
return mu_set
}
// :params k: the number of source block
// :params delta: delta is a bound on the probability that the decoding fails
// :params c: c is a constant of order 1
// :return list: list of RSD
func GenRSD(k uint64, delta float64, c float64) []float64 {
rsd_set := make([]float64, k)
mu_set := GenMu(k, delta, c)
for i := uint64(1); i <= k; i++ {
rsd_set[i-1] = sumSlice(mu_set[:i])
}
return rsd_set
} | lt/RSD.go | 0.72487 | 0.468122 | RSD.go | starcoder |
package vm
const (
OpTypeLoadNil = iota + 1 // A A: register
OpTypeFillNil // AB A: start reg B: end reg [A,B)
OpTypeLoadBool // AB A: register B: 1 true 0 false
OpTypeLoadInt // A A: register Next instruction opcode is const unsigned int
OpTypeLoadConst // ABx A: register Bx: const index
OpTypeMove // AB A: dst register B: src register
OpTypeGetUpvalue // AB A: register B: upvalue index
OpTypeSetUpvalue // AB A: register B: upvalue index
OpTypeGetGlobal // ABx A: value register Bx: const index
OpTypeSetGlobal // ABx A: value register Bx: const index
OpTypeClosure // ABx A: register Bx: proto index
OpTypeCall // ABC A: register B: arg value count + 1 C: expected result count + 1
OpTypeVarArg // AsBx A: register sBx: expected result count
OpTypeRet // AsBx A: return value start register sBx: return value count
OpTypeJmpFalse // AsBx A: register sBx: diff of instruction index
OpTypeJmpTrue // AsBx A: register sBx: diff of instruction index
OpTypeJmpNil // AsBx A: register sBx: diff of instruction index
OpTypeJmp // sBx sBx: diff of instruction index
OpTypeNeg // A A: operand register and dst register
OpTypeNot // A A: operand register and dst register
OpTypeLen // A A: operand register and dst register
OpTypeAdd // ABC A: dst register B: operand1 register C: operand2 register
OpTypeSub // ABC A: dst register B: operand1 register C: operand2 register
OpTypeMul // ABC A: dst register B: operand1 register C: operand2 register
OpTypeDiv // ABC A: dst register B: operand1 register C: operand2 register
OpTypePow // ABC A: dst register B: operand1 register C: operand2 register
OpTypeMod // ABC A: dst register B: operand1 register C: operand2 register
OpTypeConcat // ABC A: dst register B: operand1 register C: operand2 register
OpTypeLess // ABC A: dst register B: operand1 register C: operand2 register
OpTypeGreater // ABC A: dst register B: operand1 register C: operand2 register
OpTypeEqual // ABC A: dst register B: operand1 register C: operand2 register
OpTypeUnEqual // ABC A: dst register B: operand1 register C: operand2 register
OpTypeLessEqual // ABC A: dst register B: operand1 register C: operand2 register
OpTypeGreaterEqual // ABC A: dst register B: operand1 register C: operand2 register
OpTypeNewTable // A A: register of table
OpTypeSetTable // ABC A: register of table B: key register C: value register
OpTypeGetTable // ABC A: register of table B: key register C: value register
OpTypeForInit // ABC A: var register B: limit register C: step register
OpTypeForStep // ABC ABC same with OpType_ForInit, next instruction sBx: diff of instruction index
)
type Instruction struct {
OpCode int
}
func newInstruction1() Instruction {
return Instruction{}
}
func newInstruction2(opType, a, b, c int) Instruction {
opCode := (opType << 24) | ((a & 0xFF) << 16) | ((b & 0xFF) << 8) | (c & 0xFF)
return Instruction{opCode}
}
func newInstruction3(opType, a int, b int16) Instruction {
opCode := (opType << 24) | ((a & 0xFF) << 16) | (int(b) & 0xFFFF)
return Instruction{opCode}
}
func newInstruction4(opType, a int, b uint16) Instruction {
opCode := (opType << 24) | ((a & 0xFF) << 16) | (int(b) & 0xFFFF)
return Instruction{opCode}
}
func (i Instruction) RefillsBx(b int) {
i.OpCode = (i.OpCode & 0xFFFF0000) | (b & 0xFFFF)
}
func GetOpCode(i Instruction) int {
return (i.OpCode >> 24) & 0xFF
}
func GetParamA(i Instruction) int {
return (i.OpCode >> 16) & 0xFF
}
func GetParamB(i Instruction) int {
return (i.OpCode >> 8) & 0xFF
}
func GetParamC(instruction Instruction) int {
return instruction.OpCode & 0xFF
}
func GetParamsBx(i Instruction) int16 {
return int16(i.OpCode & 0xFFFF)
}
func GetParamBx(i Instruction) uint16 {
return uint16(i.OpCode & 0xFFFF)
}
func ABCCode(opType, a, b, c int) Instruction {
return newInstruction2(opType, a, b, c)
}
func ABCode(opType, a, b int) Instruction {
return newInstruction2(opType, a, b, 0)
}
func ACode(opType, a int) Instruction {
return newInstruction2(opType, a, 0, 0)
}
func AsBxCode(opType, a, b int) Instruction {
return newInstruction3(opType, a, int16(b))
}
func ABxCode(opType, a, b int) Instruction {
return newInstruction4(opType, a, uint16(b))
} | Source/vm/OpCode.go | 0.664105 | 0.654087 | OpCode.go | starcoder |
package indices
import (
"log"
"github.com/monitoring-tools/prom-elasticsearch-exporter/elasticsearch"
"github.com/monitoring-tools/prom-elasticsearch-exporter/elasticsearch/model"
"github.com/monitoring-tools/prom-elasticsearch-exporter/metrics"
"github.com/prometheus/client_golang/prometheus"
)
var (
labelsIndex = []string{"cluster", "index"}
)
// Collector is a metrics collection with ElasticSearch indices stats
type Collector struct {
esClient elasticsearch.Client
totalMetrics []*indexMetric
primariesMetrics []*indexMetric
}
type indexMetric struct {
*metrics.Metric
Value func(model.IndexSummary) float64
LabelValues func(cluster, index string) []string
}
type indexMetricTemplate struct {
Type prometheus.ValueType
Name string
Help string
ValueExtractor func(model.IndexSummary) float64
}
func newIndexMetric(t prometheus.ValueType, name, help string, valueExtractor func(model.IndexSummary) float64) *indexMetricTemplate {
return &indexMetricTemplate{
Type: t,
Name: name,
Help: help,
ValueExtractor: valueExtractor,
}
}
// NewCollector returns new metrics collection for indices metrics
func NewCollector(esClient elasticsearch.Client) *Collector {
var indexMetricTemplates = []*indexMetricTemplate{
newIndexMetric(
prometheus.GaugeValue, "docs_count", "Docs count",
func(i model.IndexSummary) float64 { return float64(i.Docs.Count) },
),
newIndexMetric(
prometheus.GaugeValue, "docs_deleted", "Docs deleted",
func(i model.IndexSummary) float64 { return float64(i.Docs.Deleted) },
),
newIndexMetric(
prometheus.GaugeValue, "store_size_bytes", "The size of the store for shards",
func(i model.IndexSummary) float64 { return float64(i.Store.SizeInBytes) },
),
newIndexMetric(
prometheus.CounterValue, "search_query_time_seconds", "Total search query time in seconds",
func(i model.IndexSummary) float64 { return float64(i.Search.QueryTimeInMillis / 1000) },
),
newIndexMetric(
prometheus.CounterValue, "search_query_total", "Total number of search queries",
func(i model.IndexSummary) float64 { return float64(i.Search.QueryTotal) },
),
newIndexMetric(
prometheus.CounterValue, "search_fetch_time_seconds", "Total search fetch time in seconds",
func(i model.IndexSummary) float64 { return float64(i.Search.FetchTimeInMillis / 1000) },
),
newIndexMetric(
prometheus.CounterValue, "search_fetch_total", "Total number of fetches",
func(i model.IndexSummary) float64 { return float64(i.Search.FetchTotal) },
),
newIndexMetric(
prometheus.CounterValue, "indexing_index_total", "Total index calls",
func(i model.IndexSummary) float64 { return float64(i.Indexing.IndexTotal) },
),
newIndexMetric(
prometheus.CounterValue, "indexing_index_seconds_total", "Cumulative indexing time in seconds",
func(i model.IndexSummary) float64 { return float64(i.Indexing.IndexTimeInMillis / 1000) },
),
newIndexMetric(
prometheus.CounterValue, "indexing_throttle_seconds_total", "Cumulative throttle time in seconds",
func(i model.IndexSummary) float64 { return float64(i.Indexing.ThrottleTimeInMillis / 1000) },
),
newIndexMetric(
prometheus.GaugeValue, "segments_count", "Number of segments",
func(i model.IndexSummary) float64 { return float64(i.Segments.Count) },
),
newIndexMetric(
prometheus.GaugeValue, "segments_memory_bytes", "Segments memory in bytes",
func(i model.IndexSummary) float64 { return float64(i.Segments.MemoryInBytes) },
),
newIndexMetric(
prometheus.GaugeValue, "query_cache_memory_size_bytes", "Query cache memory usage in bytes",
func(i model.IndexSummary) float64 { return float64(i.QueryCache.MemorySizeInBytes) },
),
newIndexMetric(
prometheus.CounterValue, "query_cache_evictions", "Total evictions number from query cache",
func(i model.IndexSummary) float64 { return float64(i.QueryCache.Evictions) },
),
newIndexMetric(
prometheus.GaugeValue, "request_cache_memory_size_bytes", "Request cache memory usage in bytes",
func(i model.IndexSummary) float64 { return float64(i.RequestCache.MemorySizeInBytes) },
),
newIndexMetric(
prometheus.CounterValue, "request_cache_evictions", "Total evictions number from request cache",
func(i model.IndexSummary) float64 { return float64(i.RequestCache.Evictions) },
),
newIndexMetric(
prometheus.CounterValue, "request_cache_miss_count", "Miss count from request cache",
func(i model.IndexSummary) float64 { return float64(i.RequestCache.MissCount) },
),
newIndexMetric(
prometheus.CounterValue, "request_cache_hit_count", "Hit count from request cache",
func(i model.IndexSummary) float64 { return float64(i.RequestCache.HitCount) },
),
newIndexMetric(
prometheus.GaugeValue, "fielddata_memory_size_bytes", "Fielddata memory usage in bytes",
func(i model.IndexSummary) float64 { return float64(i.Fielddata.MemorySizeInBytes) },
),
newIndexMetric(
prometheus.CounterValue, "fielddata_evictions", "Total evictions number from fielddata",
func(i model.IndexSummary) float64 { return float64(i.Fielddata.Evictions) },
),
newIndexMetric(
prometheus.GaugeValue, "segments_index_writer_memory_size_bytes", "Index writer memory usage",
func(i model.IndexSummary) float64 { return float64(i.Segments.IndexWriterMemoryInBytes) },
),
newIndexMetric(
prometheus.GaugeValue, "merges_size_bytes", "Merges total size in bytes",
func(i model.IndexSummary) float64 { return float64(i.Merges.TotalSizeInBytes) },
),
newIndexMetric(
prometheus.CounterValue, "refresh_total", "Total refresh calls",
func(i model.IndexSummary) float64 { return float64(i.Refresh.Total) },
),
newIndexMetric(
prometheus.CounterValue, "refresh_time_seconds", "Total refresh time in seconds",
func(i model.IndexSummary) float64 { return float64(i.Refresh.TotalTimeInMillis / 1000) },
),
newIndexMetric(
prometheus.CounterValue, "translog_operations", "Total translog operations",
func(i model.IndexSummary) float64 { return float64(i.Translog.Operations) },
),
newIndexMetric(
prometheus.GaugeValue, "translog_size_in_bytes", "Transolog size in bytes",
func(i model.IndexSummary) float64 { return float64(i.Translog.SizeInBytes) },
),
}
var subsystem = "index"
labelValuesExtractor := func(cluster, index string) []string {
return []string{cluster, index}
}
primariesMetrics := make([]*indexMetric, len(indexMetricTemplates))
totalMetrics := make([]*indexMetric, len(indexMetricTemplates))
for i, m := range indexMetricTemplates {
primariesMetrics[i] = &indexMetric{
Metric: metrics.New(m.Type, subsystem, "primaries_"+m.Name, m.Help, labelsIndex),
Value: m.ValueExtractor,
LabelValues: labelValuesExtractor,
}
totalMetrics[i] = &indexMetric{
Metric: metrics.New(m.Type, subsystem, "total_"+m.Name, m.Help, labelsIndex),
Value: m.ValueExtractor,
LabelValues: labelValuesExtractor,
}
}
return &Collector{
esClient: esClient,
primariesMetrics: primariesMetrics,
totalMetrics: totalMetrics,
}
}
// Describe implements prometheus.Collector interface
func (i *Collector) Describe(ch chan<- *prometheus.Desc) {
for _, metric := range i.primariesMetrics {
ch <- metric.Desc()
}
for _, metric := range i.totalMetrics {
ch <- metric.Desc()
}
}
// Collect writes data to metrics channel
func (i *Collector) Collect(clusterName string, ch chan<- prometheus.Metric) {
res, err := i.esClient.Indices()
if err != nil {
log.Println("ERROR: failed to fetch indices stats: ", err)
return
}
for indexName, index := range res.Indices {
for _, metric := range i.primariesMetrics {
ch <- prometheus.MustNewConstMetric(
metric.Desc(),
metric.Type(),
metric.Value(index.Primaries),
metric.LabelValues(clusterName, indexName)...,
)
}
for _, metric := range i.totalMetrics {
ch <- prometheus.MustNewConstMetric(
metric.Desc(),
metric.Type(),
metric.Value(index.Total),
metric.LabelValues(clusterName, indexName)...,
)
}
}
} | collector/indices/collector.go | 0.709321 | 0.411436 | collector.go | starcoder |
package op
import (
"github.com/m4gshm/gollections/c"
"github.com/m4gshm/gollections/check"
"github.com/m4gshm/gollections/it/impl/it"
"github.com/m4gshm/gollections/op"
)
//Map creates the Iterator that converts elements with a converter and returns them.
func Map[From, To any, IT c.Iterable[c.Iterator[From]]](elements IT, by c.Converter[From, To]) c.Iterator[To] {
return it.Map(elements.Begin(), by)
}
//MapFit additionally filters 'From' elements.
func MapFit[From, To any, IT c.Iterable[c.Iterator[From]]](elements IT, fit c.Predicate[From], by c.Converter[From, To]) c.Iterator[To] {
return it.MapFit(elements.Begin(), fit, by)
}
//Flatt creates the Iterator that extracts slices of 'To' by a Flatter from elements of 'From' and flattens as one iterable collection of 'To' elements.
func Flatt[From, To any, IT c.Iterable[c.Iterator[From]]](elements IT, by c.Flatter[From, To]) c.Iterator[To] {
iter := it.Flatt(elements.Begin(), by)
return &iter
}
//FlattFit additionally filters 'From' elements.
func FlattFit[From, To any, IT c.Iterable[c.Iterator[From]]](elements IT, fit c.Predicate[From], flatt c.Flatter[From, To]) c.Iterator[To] {
iter := it.FlattFit(elements.Begin(), fit, flatt)
return &iter
}
//Filter creates the Iterator that checks elements by filters and returns successful ones.
func Filter[T any, IT c.Iterable[c.Iterator[T]]](elements IT, filter c.Predicate[T]) c.Iterator[T] {
return it.Filter(elements.Begin(), filter)
}
//NotNil creates the Iterator that filters nullable elements.
func NotNil[T any, IT c.Iterable[c.Iterator[*T]]](elements IT) c.Iterator[*T] {
return Filter(elements, check.NotNil[T])
}
//Reduce reduces elements to an one.
func Reduce[T any, IT c.Iterable[c.Iterator[T]]](elements IT, by op.Binary[T]) T {
return it.Reduce(elements.Begin(), by)
}
//Slice converts an Iterator to a slice.
func Slice[T any, IT c.Iterable[c.Iterator[T]]](elements IT) []T {
return it.Slice[T](elements.Begin())
}
//Group groups elements to slices by a converter and returns a map.
func Group[T any, K comparable, C c.Iterable[IT], IT c.Iterator[T]](elements C, by c.Converter[T, K]) c.MapPipe[K, T, map[K][]T] {
return it.Group(elements.Begin(), by)
} | c/op/api.go | 0.80213 | 0.406921 | api.go | starcoder |
package criteria
import (
"fmt"
"github.com/viant/assertly"
"github.com/viant/toolbox"
"github.com/viant/toolbox/data"
)
//Criterion represent evaluation criterion
type Criterion struct {
*Predicate
LeftOperand interface{}
Operator string
RightOperand interface{}
}
func (c *Criterion) expandOperand(opperand interface{}, state data.Map) interface{} {
if opperand == nil {
return nil
}
return state.Expand(opperand)
}
func checkUndefined(err error, left, right interface{}, operator string) error {
if err != nil {
if text, ok := left.(string); ok {
return fmt.Errorf("undefined %v in expression: %v %s %v", text, left, operator, right)
}
if text, ok := right.(string); ok {
return fmt.Errorf("undefined %v in expression: %v %s %v", text, left, operator, right)
}
}
return err
}
//Apply evaluates criterion with supplied context and state map . Dolar prefixed $expression will be expanded before evaluation.
func (c *Criterion) Apply(state data.Map) (bool, error) {
if c.Predicate != nil && len(c.Predicate.Criteria) > 0 {
return c.Predicate.Apply(state)
}
leftOperand := c.expandOperand(c.LeftOperand, state)
rightOperand := c.expandOperand(c.RightOperand, state)
var err error
var leftNumber, rightNumber float64
var rootPath = assertly.NewDataPath("/")
var context = assertly.NewDefaultContext()
if text, ok := leftOperand.(string); ok {
switch text {
case "t", "T", "true", "TRUE", "True":
leftOperand = true
case "f", "F", "false", "FALSE", "False":
leftOperand = false
}
}
if rightOperand == nil {
switch leftOperand.(type) {
case bool:
rightOperand = false
case string:
rightOperand = ""
}
}
switch c.Operator {
case "=", ":":
validation, err := assertly.AssertWithContext(rightOperand, leftOperand, rootPath, context)
if err != nil {
return false, err
}
return validation.FailedCount == 0, nil
case "!=", "":
validation, err := assertly.AssertWithContext(leftOperand, rightOperand, rootPath, context)
if err != nil {
return false, err
}
return validation.FailedCount > 0, nil
case ">=":
if leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {
if rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {
return leftNumber >= rightNumber, nil
}
}
case "<=":
if leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {
if rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {
return leftNumber <= rightNumber, nil
}
}
case ">":
if leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {
if rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {
return leftNumber > rightNumber, nil
}
}
case "<":
if leftNumber, err = toolbox.ToFloat(leftOperand); err == nil {
if rightNumber, err = toolbox.ToFloat(rightOperand); err == nil {
return leftNumber < rightNumber, nil
}
}
}
err = checkUndefined(err, leftOperand, rightNumber, c.Operator)
return false, err
}
//NewCriterion creates a new criterion
func NewCriterion(leftOperand interface{}, operator string, rightOperand interface{}) *Criterion {
return &Criterion{
LeftOperand: leftOperand,
Operator: operator,
RightOperand: rightOperand,
}
} | model/criteria/criterion.go | 0.651355 | 0.456107 | criterion.go | starcoder |
// Package bits provides a mockable wrapper for math/bits.
package bits
import (
bits "math/bits"
)
var _ Interface = &Impl{}
var _ = bits.Add
type Interface interface {
Add(x uint, y uint, carry uint) (sum uint, carryOut uint)
Add32(x uint32, y uint32, carry uint32) (sum uint32, carryOut uint32)
Add64(x uint64, y uint64, carry uint64) (sum uint64, carryOut uint64)
Div(hi uint, lo uint, y uint) (quo uint, rem uint)
Div32(hi uint32, lo uint32, y uint32) (quo uint32, rem uint32)
Div64(hi uint64, lo uint64, y uint64) (quo uint64, rem uint64)
LeadingZeros(x uint) int
LeadingZeros16(x uint16) int
LeadingZeros32(x uint32) int
LeadingZeros64(x uint64) int
LeadingZeros8(x uint8) int
Len(x uint) int
Len16(x uint16) (n int)
Len32(x uint32) (n int)
Len64(x uint64) (n int)
Len8(x uint8) int
Mul(x uint, y uint) (hi uint, lo uint)
Mul32(x uint32, y uint32) (hi uint32, lo uint32)
Mul64(x uint64, y uint64) (hi uint64, lo uint64)
OnesCount(x uint) int
OnesCount16(x uint16) int
OnesCount32(x uint32) int
OnesCount64(x uint64) int
OnesCount8(x uint8) int
Rem(hi uint, lo uint, y uint) uint
Rem32(hi uint32, lo uint32, y uint32) uint32
Rem64(hi uint64, lo uint64, y uint64) uint64
Reverse(x uint) uint
Reverse16(x uint16) uint16
Reverse32(x uint32) uint32
Reverse64(x uint64) uint64
Reverse8(x uint8) uint8
ReverseBytes(x uint) uint
ReverseBytes16(x uint16) uint16
ReverseBytes32(x uint32) uint32
ReverseBytes64(x uint64) uint64
RotateLeft(x uint, k int) uint
RotateLeft16(x uint16, k int) uint16
RotateLeft32(x uint32, k int) uint32
RotateLeft64(x uint64, k int) uint64
RotateLeft8(x uint8, k int) uint8
Sub(x uint, y uint, borrow uint) (diff uint, borrowOut uint)
Sub32(x uint32, y uint32, borrow uint32) (diff uint32, borrowOut uint32)
Sub64(x uint64, y uint64, borrow uint64) (diff uint64, borrowOut uint64)
TrailingZeros(x uint) int
TrailingZeros16(x uint16) int
TrailingZeros32(x uint32) int
TrailingZeros64(x uint64) int
TrailingZeros8(x uint8) int
}
type Impl struct{}
func (*Impl) Add(x uint, y uint, carry uint) (sum uint, carryOut uint) {
return bits.Add(x, y, carry)
}
func (*Impl) Add32(x uint32, y uint32, carry uint32) (sum uint32, carryOut uint32) {
return bits.Add32(x, y, carry)
}
func (*Impl) Add64(x uint64, y uint64, carry uint64) (sum uint64, carryOut uint64) {
return bits.Add64(x, y, carry)
}
func (*Impl) Div(hi uint, lo uint, y uint) (quo uint, rem uint) {
return bits.Div(hi, lo, y)
}
func (*Impl) Div32(hi uint32, lo uint32, y uint32) (quo uint32, rem uint32) {
return bits.Div32(hi, lo, y)
}
func (*Impl) Div64(hi uint64, lo uint64, y uint64) (quo uint64, rem uint64) {
return bits.Div64(hi, lo, y)
}
func (*Impl) LeadingZeros(x uint) int {
return bits.LeadingZeros(x)
}
func (*Impl) LeadingZeros16(x uint16) int {
return bits.LeadingZeros16(x)
}
func (*Impl) LeadingZeros32(x uint32) int {
return bits.LeadingZeros32(x)
}
func (*Impl) LeadingZeros64(x uint64) int {
return bits.LeadingZeros64(x)
}
func (*Impl) LeadingZeros8(x uint8) int {
return bits.LeadingZeros8(x)
}
func (*Impl) Len(x uint) int {
return bits.Len(x)
}
func (*Impl) Len16(x uint16) (n int) {
return bits.Len16(x)
}
func (*Impl) Len32(x uint32) (n int) {
return bits.Len32(x)
}
func (*Impl) Len64(x uint64) (n int) {
return bits.Len64(x)
}
func (*Impl) Len8(x uint8) int {
return bits.Len8(x)
}
func (*Impl) Mul(x uint, y uint) (hi uint, lo uint) {
return bits.Mul(x, y)
}
func (*Impl) Mul32(x uint32, y uint32) (hi uint32, lo uint32) {
return bits.Mul32(x, y)
}
func (*Impl) Mul64(x uint64, y uint64) (hi uint64, lo uint64) {
return bits.Mul64(x, y)
}
func (*Impl) OnesCount(x uint) int {
return bits.OnesCount(x)
}
func (*Impl) OnesCount16(x uint16) int {
return bits.OnesCount16(x)
}
func (*Impl) OnesCount32(x uint32) int {
return bits.OnesCount32(x)
}
func (*Impl) OnesCount64(x uint64) int {
return bits.OnesCount64(x)
}
func (*Impl) OnesCount8(x uint8) int {
return bits.OnesCount8(x)
}
func (*Impl) Rem(hi uint, lo uint, y uint) uint {
return bits.Rem(hi, lo, y)
}
func (*Impl) Rem32(hi uint32, lo uint32, y uint32) uint32 {
return bits.Rem32(hi, lo, y)
}
func (*Impl) Rem64(hi uint64, lo uint64, y uint64) uint64 {
return bits.Rem64(hi, lo, y)
}
func (*Impl) Reverse(x uint) uint {
return bits.Reverse(x)
}
func (*Impl) Reverse16(x uint16) uint16 {
return bits.Reverse16(x)
}
func (*Impl) Reverse32(x uint32) uint32 {
return bits.Reverse32(x)
}
func (*Impl) Reverse64(x uint64) uint64 {
return bits.Reverse64(x)
}
func (*Impl) Reverse8(x uint8) uint8 {
return bits.Reverse8(x)
}
func (*Impl) ReverseBytes(x uint) uint {
return bits.ReverseBytes(x)
}
func (*Impl) ReverseBytes16(x uint16) uint16 {
return bits.ReverseBytes16(x)
}
func (*Impl) ReverseBytes32(x uint32) uint32 {
return bits.ReverseBytes32(x)
}
func (*Impl) ReverseBytes64(x uint64) uint64 {
return bits.ReverseBytes64(x)
}
func (*Impl) RotateLeft(x uint, k int) uint {
return bits.RotateLeft(x, k)
}
func (*Impl) RotateLeft16(x uint16, k int) uint16 {
return bits.RotateLeft16(x, k)
}
func (*Impl) RotateLeft32(x uint32, k int) uint32 {
return bits.RotateLeft32(x, k)
}
func (*Impl) RotateLeft64(x uint64, k int) uint64 {
return bits.RotateLeft64(x, k)
}
func (*Impl) RotateLeft8(x uint8, k int) uint8 {
return bits.RotateLeft8(x, k)
}
func (*Impl) Sub(x uint, y uint, borrow uint) (diff uint, borrowOut uint) {
return bits.Sub(x, y, borrow)
}
func (*Impl) Sub32(x uint32, y uint32, borrow uint32) (diff uint32, borrowOut uint32) {
return bits.Sub32(x, y, borrow)
}
func (*Impl) Sub64(x uint64, y uint64, borrow uint64) (diff uint64, borrowOut uint64) {
return bits.Sub64(x, y, borrow)
}
func (*Impl) TrailingZeros(x uint) int {
return bits.TrailingZeros(x)
}
func (*Impl) TrailingZeros16(x uint16) int {
return bits.TrailingZeros16(x)
}
func (*Impl) TrailingZeros32(x uint32) int {
return bits.TrailingZeros32(x)
}
func (*Impl) TrailingZeros64(x uint64) int {
return bits.TrailingZeros64(x)
}
func (*Impl) TrailingZeros8(x uint8) int {
return bits.TrailingZeros8(x)
} | math/bits/bits.go | 0.66888 | 0.477676 | bits.go | starcoder |
package number
import (
"math"
"math/big"
)
// SquareNumber returns the n-th square number.
// e.g. 1, 4, 9, 16, 25, ...
func SquareNumber(n int) int {
return n * n
}
// IsSquareNumber determines if a number is a square number.
func IsSquareNumber(n int) bool {
t := math.Sqrt(float64(n))
return t == math.Floor(t)
}
// TriangleNumber returns the n-th triangle number.
// e.g. 1, 3, 6, 10, 15, ...
func TriangleNumber(n int) int {
return n * (n + 1) / 2
}
// IsTriangleNumber determines if a number is a triangle number.
func IsTriangleNumber(n int) bool {
return IsSquareNumber(8*n + 1)
}
// PentagonNumber returns the n-th pentagonal number.
// e.g. 1, 5, 12, 22, 35, ...
func PentagonNumber(n int) int {
return n * (3*n - 1) / 2
}
// IsPentagonNumber determines if a number is a pentagonal number.
func IsPentagonNumber(n int) bool {
t := (math.Sqrt(24*float64(n)+1) + 1) / 6
return t == math.Floor(t)
}
// HexagonNumber returns the n-th hexagonal number.
// e.g. 1, 6, 15, 28, 45, ...
func HexagonNumber(n int) int {
return n * (2*n - 1)
}
// IsHexagonNumber determines if a number is a hexagonal number.
func IsHexagonNumber(n int) bool {
t := (math.Sqrt(8*float64(n)+1) + 1) / 4
return t == math.Floor(t)
}
// HeptagonalNumber returns the n-th heptagonal number.
// e.g. 1, 7, 18, 34, 55, ...
func HeptagonalNumber(n int) int {
return n * (5*n - 3) / 2
}
// IsHeptagonalNumber determines if a number is a heptagonal number.
func IsHeptagonalNumber(n int) bool {
t := (math.Sqrt(40*float64(n)+9) + 3) / 10
return t == math.Floor(t)
}
// OctagonalNumber returns the n-th octagonal number.
// e.g. 1, 8, 21, 40, 65, ...
func OctagonalNumber(n int) int {
return n * (3*n - 2)
}
// IsOctagonalNumber determines if a number is a octagonal number.
func IsOctagonalNumber(n int) bool {
t := (math.Sqrt(3*float64(n)+1) + 1) / 3
return t == math.Floor(t)
}
// Catalan returns the Catalan Number of n.
func Catalan(n int64) *big.Int {
z := big.NewInt(0)
z.Binomial(2*n, n)
return z.Quo(z, big.NewInt(n+1))
} | number/number.go | 0.895531 | 0.469642 | number.go | starcoder |
package simplecsv
import (
"regexp"
"strings"
)
// FindInColumn returns a slice with the rownumbers where the "word" is in the columnPosition
// If the column is not valid it returns an empty slice and a second false value
func (s SimpleCsv) FindInColumn(columnPosition int, word string) ([]int, bool) {
var validColumn bool
results := []int{}
if columnPosition < 0 || columnPosition >= len(s[0]) {
validColumn = false
return results, validColumn
}
validColumn = true
numberOfRows := len(s)
for i := 0; i < numberOfRows; i++ {
if strings.ToLower(s[i][columnPosition]) == strings.ToLower(word) {
results = append(results, i)
}
}
return results, validColumn
}
// FindInField returns a slice with the rownumbers where the "word" is in the column name
// If the column is not valid it returns an empty slice and a second false value
func (s SimpleCsv) FindInField(columnName string, word string) ([]int, bool) {
columnPosition := s.GetHeaderPosition(columnName)
var validColumn bool
results := []int{}
if columnPosition == -1 {
validColumn = false
return results, validColumn
}
validColumn = true
numberOfRows := len(s)
for i := 1; i < numberOfRows; i++ {
if strings.ToLower(s[i][columnPosition]) == strings.ToLower(word) {
results = append(results, i)
}
}
return results, validColumn
}
// MatchInColumn returns a slice with the rownumbers where the regular expression applies in the columnPosition
// If the column or regular expression are not valid it returns an empty slice and a second false value
func (s SimpleCsv) MatchInColumn(columnPosition int, regularexpression string) ([]int, bool) {
var ok bool
results := []int{}
r, u := regexp.Compile(regularexpression)
if u != nil {
ok = false
return results, ok
}
if columnPosition < 0 || columnPosition >= len(s[0]) {
ok = false
return results, ok
}
ok = true
numberOfRows := len(s)
for i := 0; i < numberOfRows; i++ {
if r.MatchString(s[i][columnPosition]) {
results = append(results, i)
}
}
return results, ok
}
// MatchInField returns a slice with the rownumbers where the regular expression applies in the column name
func (s SimpleCsv) MatchInField(columnName string, regularexpression string) ([]int, bool) {
columnPosition := s.GetHeaderPosition(columnName)
var ok bool
results := []int{}
r, u := regexp.Compile(regularexpression)
if columnPosition == -1 {
ok = false
return results, ok
}
if u != nil {
ok = false
return results, ok
}
ok = true
numberOfRows := len(s)
for i := 1; i < numberOfRows; i++ {
if r.MatchString(s[i][columnPosition]) {
results = append(results, i)
}
}
return results, ok
} | find.go | 0.78842 | 0.458046 | find.go | starcoder |
package kson
import (
"errors"
"encoding/json"
"reflect"
"strconv"
"log"
)
type TypeTransform struct {
data interface{}
err error
}
func NewTypeTransform(data interface{}) *TypeTransform {
return &TypeTransform{data: data}
}
func (t *TypeTransform)Interface() interface{} {
return t.data
}
//Bool guarantees the return of a `bool` (with optional default) and error
func (t *TypeTransform) Bool(values ... bool) (bool, error) {
var def bool = false
if len(values) > 1 {
log.Panicf("Bool() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
if s, ok := (t.data).(bool); ok {
return s, nil
}
return def, errors.New("type assertion to bool failed")
}
//Float64 guarantees the return of a `float64` (with optional default) and error
func (t *TypeTransform) Float64(values ... float64) (float64, error) {
var def float64
if len(values) > 1 {
log.Panicf("Float64() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
switch t.data.(type) {
case json.Number:
return t.data.(json.Number).Float64()
case float32, float64:
return reflect.ValueOf(t.data).Float(), nil
case int, int8, int16, int32, int64:
return float64(reflect.ValueOf(t.data).Int()), nil
case uint, uint8, uint16, uint32, uint64:
return float64(reflect.ValueOf(t.data).Uint()), nil
}
return def, errors.New("invalid value type")
}
//Int guarantees the return of a `int` (with optional default) and error
func (t *TypeTransform) Int(values ... int) (int, error) {
var def int
if len(values) > 1 {
log.Panicf("Int() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
switch t.data.(type) {
case json.Number:
i, err := t.data.(json.Number).Int64()
return int(i), err
case float32, float64:
return int(reflect.ValueOf(t.data).Float()), nil
case int, int8, int16, int32, int64:
return int(reflect.ValueOf(t.data).Int()), nil
case uint, uint8, uint16, uint32, uint64:
return int(reflect.ValueOf(t.data).Uint()), nil
}
return def, errors.New("invalid value type")
}
// Int64 guarantees the return of a `int64` (with optional default) and error
func (t *TypeTransform) Int64(values ... int64 ) (int64, error) {
var def int64
if len(values) > 1 {
log.Panicf("MustInt64() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
switch t.data.(type) {
case json.Number:
return t.data.(json.Number).Int64()
case float32, float64:
return int64(reflect.ValueOf(t.data).Float()), nil
case int, int8, int16, int32, int64:
return reflect.ValueOf(t.data).Int(), nil
case uint, uint8, uint16, uint32, uint64:
return int64(reflect.ValueOf(t.data).Uint()), nil
}
return def, errors.New("invalid value type")
}
// Uint64 guarantees the return of a `uint64` (with optional default) and error
func (t *TypeTransform) Uint64(values ... uint64) (uint64, error) {
var def uint64
if len(values) > 1 {
log.Panicf("MustUint64() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
switch t.data.(type) {
case json.Number:
return strconv.ParseUint(t.data.(json.Number).String(), 10, 64)
case float32, float64:
return uint64(reflect.ValueOf(t.data).Float()), nil
case int, int8, int16, int32, int64:
return uint64(reflect.ValueOf(t.data).Int()), nil
case uint, uint8, uint16, uint32, uint64:
return reflect.ValueOf(t.data).Uint(), nil
}
return def, errors.New("invalid value type")
}
// String guarantees the return of a `string` (with optional default) and error
func (t *TypeTransform) String(values ... string) (string, error) {
var def string = ""
if len(values) > 1 {
log.Panicf("String() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
if s, ok := (t.data).(string); ok {
return s, nil
}
return def, errors.New("type assertion to string failed")
}
// Bytes guarantees the return of a `[]byte` (with optional default) and error
func (t *TypeTransform) Bytes(values ... []byte) ([]byte, error) {
var def []byte
if len(values) > 1 {
log.Panicf("Bytes() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
if s, ok := (t.data).(string); ok {
return []byte(s), nil
}
return def, errors.New("type assertion to []byte failed")
}
// Map guarantees the return of a `map[string]interface{}` (with optional default) and error
func (t *TypeTransform) Map(values ... map[string]interface{}) (map[string]interface{}, error) {
var def map[string]interface{}
if len(values) > 1 {
log.Panicf("Map() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
if m, ok := (t.data).(map[string]interface{}); ok {
return m, nil
}
return def, errors.New("type assertion to map[string]interface{} failed")
}
// Array guarantees the return of a `array` (with optional default) and error
func (t *TypeTransform) Array(values ... []interface{}) ([]interface{}, error) {
var def []interface{}
if len(values) > 1 {
log.Panicf("Array() received too many arguments %d", len(values))
}
if len(values) == 1 {
def = values[0]
}
if a, ok := (t.data).([]interface{}); ok {
return a, nil
}
return def, errors.New("type assertion to []interface{} failed")
}
// StringArray guarantees the return of a `array` of `string` (with optional default) and error
func (t *TypeTransform) StringArray(values ... []string) ([]string, error) {
var def []string
if len(values) > 1 {
log.Panicf("StringArray() received too many arguments %d", len(values))
}else if len(values) == 1 {
def = values[0]
}
arr, err := t.Array()
if err != nil {
return def, err
}
retArr := make([]string, 0, len(arr))
for _, a := range arr {
if a == nil {
retArr = append(retArr, "")
continue
}
s, ok := a.(string)
if !ok {
return nil, errors.New("type assertion to []string failed")
}
retArr = append(retArr, s)
}
return retArr, nil
}
func (t *TypeTransform)ToString(values ... string) string {
var def string
if len(values) > 1 {
log.Panicf("String() received too many arguments %d", len(values))
}else if len(values) == 1 {
def = values[0]
}
if val , err := t.String();err == nil {
return val
}else if val , err := t.Float64();err == nil {
return strconv.FormatFloat(val, 'f', -1, 64)
}else if val ,err := t.Int64();err == nil {
return strconv.FormatInt(val,10)
}else if val, err := t.Bool(); err == nil {
return strconv.FormatBool(val)
}
t.err = errors.New("type assertion to string failed")
return def
}
func (t *TypeTransform)ToBool(values ... bool) bool {
var def bool
if len(values) > 1 {
log.Panicf("String() received too many arguments %d", len(values))
}else if len(values) == 1 {
def = values[0]
}
if val , err := t.Bool(); err == nil {
return val
}else if val, err := t.String();err == nil{
if bval , berr := strconv.ParseBool(val) ;berr == nil{
return bval
}
}
return def
}
func (t *TypeTransform)ToInt(values ... int) int {
var def int
if len(values) > 1 {
log.Panicf("String() received too many arguments %d", len(values))
}else if len(values) == 1 {
def = values[0]
}
if val, err := t.Int();err == nil {
return val
}else if val,err :=t.String(); err == nil {
if ival,ierr := strconv.ParseInt(val,10,64);ierr == nil {
return int(ival)
}
}
return def
}
func (t *TypeTransform)ToInt64(values ... int64) int64 {
var def int64
if len(values) > 1 {
log.Panicf("String() received too many arguments %d", len(values))
}else if len(values) == 1 {
def = values[0]
}
if val, err := t.Int64();err == nil {
return val
}else if val,err :=t.String(); err == nil {
if ival,ierr := strconv.ParseInt(val,10,64);ierr == nil {
return ival
}
}
return def
}
func (t *TypeTransform)ToFloat(values ... float64) float64 {
var def float64
if len(values) > 1 {
log.Panicf("String() received too many arguments %d", len(values))
}else if len(values) == 1 {
def = values[0]
}
if val, err := t.Float64();err == nil {
return val
}else if val,err :=t.String(); err == nil {
if fval,ferr := strconv.ParseFloat(val,64);ferr == nil {
return fval
}
}
return def
}
func (t *TypeTransform)ToMap(values ... map[string]interface{}) map[string]interface{} {
var def map[string]interface{}
if len(values) > 1 {
log.Panicf("String() received too many arguments %d", len(values))
}else if len(values) == 1 {
def = values[0]
}
if val,err := t.Map();err == nil {
return val
}
return def
} | kson/transform.go | 0.667364 | 0.643805 | transform.go | starcoder |
package algebra
import (
"fmt"
"math"
)
type Matrix4 struct {
M00 MnFloat
M01 MnFloat
M02 MnFloat
M03 MnFloat
M10 MnFloat
M11 MnFloat
M12 MnFloat
M13 MnFloat
M20 MnFloat
M21 MnFloat
M22 MnFloat
M23 MnFloat
M30 MnFloat
M31 MnFloat
M32 MnFloat
M33 MnFloat
}
var IdentityMatrix4 = Matrix4{
M00: MnOne,
M11: MnOne,
M22: MnOne,
M33: MnOne,
}
func NewRotationMatrix4(right Vector3, up Vector3, forward Vector3) Matrix4 {
return Matrix4{
M00: right.X,
M01: right.Y,
M02: right.Z,
M03: 0,
M10: up.X,
M11: up.Y,
M12: up.Z,
M13: 0,
M20: forward.X,
M21: forward.Y,
M22: forward.Z,
M23: 0,
M30: 0,
M31: 0,
M32: 0,
M33: 1,
}
}
func NewScaleMatrix4(x MnFloat, y MnFloat, z MnFloat) Matrix4 {
return Matrix4{
M00: x,
M11: y,
M22: z,
M33: MnOne,
}
}
func NewTranslationMatrix4(x MnFloat, y MnFloat, z MnFloat) Matrix4 {
return Matrix4{
M00: MnOne,
M11: MnOne,
M22: MnOne,
M30: x,
M31: y,
M32: z,
M33: MnOne,
}
}
func NewPerspectiveMatrix4(fov MnFloat, aspect MnFloat, near MnFloat, far MnFloat) Matrix4 {
f := MnOne / math.Tan(fov / 2.0)
nf := MnOne / (near - far)
return Matrix4{
M00: f / aspect,
M11: f,
M22: (far + near) * nf,
M23: -MnOne,
M32: 2 * far * near * nf,
}
}
func (m Matrix4) Transpose() Matrix4 {
return Matrix4{
M00: m.M00,
M01: m.M10,
M02: m.M20,
M03: m.M30,
M10: m.M01,
M11: m.M11,
M12: m.M21,
M13: m.M31,
M20: m.M02,
M21: m.M12,
M22: m.M22,
M23: m.M32,
M30: m.M03,
M31: m.M13,
M32: m.M23,
M33: m.M33,
}
}
func (m Matrix4) Invert() Matrix4 {
a00 := m.M00 * m.M11 - m.M01 * m.M10
a01 := m.M00 * m.M12 - m.M02 * m.M10
a02 := m.M00 * m.M13 - m.M03 * m.M10
a03 := m.M01 * m.M12 - m.M02 * m.M11
a04 := m.M01 * m.M13 - m.M03 * m.M11
a05 := m.M02 * m.M13 - m.M03 * m.M12
a06 := m.M20 * m.M31 - m.M21 * m.M30
a07 := m.M20 * m.M32 - m.M22 * m.M30
a08 := m.M20 * m.M33 - m.M23 * m.M30
a09 := m.M21 * m.M32 - m.M22 * m.M31
a10 := m.M21 * m.M33 - m.M23 * m.M31
a11 := m.M22 * m.M33 - m.M23 * m.M32
determinant := a00 * a11 - a01 * a10 + a02 * a09 + a03 * a08 - a04 * a07 + a05 * a06
if determinant == MnZero { // TODO: Shouldn't compare with 0
return Matrix4{}
}
determinant = MnOne / determinant
return Matrix4{
M00: (m.M11 * a11 - m.M12 * a10 + m.M13 * a09) * determinant,
M01: (m.M02 * a10 - m.M01 * a11 - m.M03 * a09) * determinant,
M02: (m.M31 * a05 - m.M32 * a04 + m.M33 * a03) * determinant,
M03: (m.M22 * a04 - m.M21 * a05 - m.M23 * a03) * determinant,
M10: (m.M12 * a08 - m.M10 * a11 - m.M13 * a07) * determinant,
M11: (m.M00 * a11 - m.M02 * a08 + m.M03 * a07) * determinant,
M12: (m.M32 * a02 - m.M30 * a05 - m.M33 * a01) * determinant,
M13: (m.M20 * a05 - m.M22 * a02 + m.M23 * a01) * determinant,
M20: (m.M10 * a10 - m.M11 * a08 + m.M13 * a06) * determinant,
M21: (m.M01 * a08 - m.M00 * a10 - m.M03 * a06) * determinant,
M22: (m.M30 * a04 - m.M31 * a02 + m.M33 * a00) * determinant,
M23: (m.M21 * a02 - m.M20 * a04 - m.M23 * a00) * determinant,
M30: (m.M11 * a07 - m.M10 * a09 - m.M12 * a06) * determinant,
M31: (m.M00 * a09 - m.M01 * a07 + m.M02 * a06) * determinant,
M32: (m.M31 * a01 - m.M30 * a03 - m.M32 * a00) * determinant,
M33: (m.M20 * a03 - m.M21 * a01 + m.M22 * a00) * determinant,
}
}
func (m Matrix4) Determinant() MnFloat {
a00 := m.M00 * m.M11 - m.M01 * m.M10
a01 := m.M00 * m.M12 - m.M02 * m.M10
a02 := m.M00 * m.M13 - m.M03 * m.M10
a03 := m.M01 * m.M12 - m.M02 * m.M11
a04 := m.M01 * m.M13 - m.M03 * m.M11
a05 := m.M02 * m.M13 - m.M03 * m.M12
a06 := m.M20 * m.M31 - m.M21 * m.M30
a07 := m.M20 * m.M32 - m.M22 * m.M30
a08 := m.M20 * m.M33 - m.M23 * m.M30
a09 := m.M21 * m.M32 - m.M22 * m.M31
a10 := m.M21 * m.M33 - m.M23 * m.M31
a11 := m.M22 * m.M33 - m.M23 * m.M32
return a00 * a11 - a01 * a10 + a02 * a09 + a03 * a08 - a04 * a07 + a05 * a06
}
func (m Matrix4) Dump() {
fmt.Println(fmt.Sprintf("M00 = %f, M01 = %f, M02 = %f, M03 = %f", m.M00, m.M01, m.M02, m.M03))
fmt.Println(fmt.Sprintf("M10 = %f, M11 = %f, M12 = %f, M13 = %f", m.M10, m.M11, m.M12, m.M13))
fmt.Println(fmt.Sprintf("M20 = %f, M21 = %f, M22 = %f, M23 = %f", m.M20, m.M21, m.M22, m.M23))
fmt.Println(fmt.Sprintf("M30 = %f, M31 = %f, M32 = %f, M33 = %f", m.M30, m.M31, m.M32, m.M33))
} | algebra/matrix4.go | 0.697815 | 0.474449 | matrix4.go | starcoder |
package electreIII
import (
"fmt"
"github.com/Azbesciak/RealDecisionMaker/lib/utils"
"sort"
"strings"
)
type Matrix struct {
Size int
Data []float64
}
func NewMatrix(values *[][]float64) *Matrix {
size := len(*values)
data := make([]float64, size*size)
for i, v := range *values {
copy(data[i*size:(i+1)*size], v)
}
return &Matrix{Size: size, Data: data}
}
func (m *Matrix) At(row, col int) float64 {
return m.Data[row*m.Size+col]
}
func calcCoords(index, size int) (row, col int) {
return index / size, index % size
}
func (m *Matrix) Matches(groupsNumber int, groupEvaluator func(row, col int) int, predicate func(value float64) bool) []int {
groups := make([]int, groupsNumber)
for i, v := range m.Data {
groupIndex := groupEvaluator(calcCoords(i, m.Size))
if predicate(v) {
groups[groupIndex] += 1
}
}
return groups
}
func (m *Matrix) MatchesInRow(predicate func(value float64) bool) []int {
return m.Matches(m.Size, func(row, col int) int {
return row
}, predicate)
}
func (m *Matrix) MatchesInColumn(predicate func(value float64) bool) []int {
return m.Matches(m.Size, func(row, col int) int {
return col
}, predicate)
}
func (m *Matrix) Filter(filter func(row, col int, v float64) bool) *Matrix {
newVals := make([]float64, m.Size*m.Size)
for i, v := range m.Data {
row, col := calcCoords(i, m.Size)
if filter(row, col, v) {
newVals[i] = v
} else {
newVals[i] = 0
}
}
return &Matrix{Size: m.Size, Data: newVals}
}
func (m *Matrix) FindBest(isBetter func(old, new float64) bool) float64 {
if m.Size == 0 {
panic(fmt.Errorf("matrix is empty"))
}
best := m.Data[0]
for _, v := range m.Data {
if isBetter(best, v) {
best = v
}
}
return best
}
func (m *Matrix) Max() float64 {
return m.FindBest(func(old, new float64) bool {
return new > old
})
}
func (m *Matrix) Min() float64 {
return m.FindBest(func(old, new float64) bool {
return new < old
})
}
func (m *Matrix) String() string {
r := m.Size
res := make([]string, r)
for i := 0; i < r; i++ {
row := make([]string, r)
for j := 0; j < r; j++ {
row[j] = fmt.Sprintf("%.2f", m.At(i, j))
}
res[i] = strings.Join(row, "\t")
}
return strings.Join(res, "\n")
}
func (m *Matrix) Without(indices *[]int) *Matrix {
size := m.Size
if len(*indices) == size {
return m
}
data := make([]float64, m.Size*m.Size)
copy(data, m.Data)
toRemove := len(*indices)
sorted := make([]int, toRemove)
copy(sorted, *indices)
sort.Sort(sort.Reverse(sort.IntSlice(sorted)))
for _, v := range sorted {
data = append(data[0:v*size], data[(v+1)*size:]...)
}
size -= toRemove
resultData := make([]float64, size*size)
dataIndex := 0
for i, v := range data {
rowIndex := i % m.Size
if !utils.ContainsInts(&sorted, &rowIndex) {
resultData[dataIndex] = v
dataIndex++
}
}
return &Matrix{Size: size, Data: resultData}
}
func (m *Matrix) Slice(indices *[]int) *Matrix {
resultSize := len(*indices)
if resultSize == m.Size {
return m
}
data := make([]float64, 0)
sort.Ints(*indices)
for _, v := range *indices {
data = append(data, m.Data[v*m.Size:(v+1)*m.Size]...)
}
resultData := make([]float64, resultSize*resultSize)
dataIndex := 0
for i, v := range data {
rowIndex := i % m.Size
if utils.ContainsInts(indices, &rowIndex) {
resultData[dataIndex] = v
dataIndex++
}
}
return &Matrix{Size: resultSize, Data: resultData}
} | lib/logic/preference-func/electreIII/matrix.go | 0.606732 | 0.474509 | matrix.go | starcoder |
package confusion
import (
"github.com/emer/etable/etensor"
"github.com/emer/etable/simat"
"github.com/goki/gi/gi"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
)
// Matrix computes the confusion matrix, with rows representing
// the ground truth correct class, and columns representing the
// actual answer produced. Correct answers are along the diagonal.
type Matrix struct {
Prob etensor.Float64 `view:"no-inline" desc:"normalized probability of confusion: Row = ground truth class, Col = actual response for that class."`
Sum etensor.Float64 `view:"no-inline" desc:"incremental sums"`
N etensor.Float64 `view:"no-inline" desc:"counts per ground truth (rows)"`
Vis simat.SimMat `view:"no-inline" desc:"visualization using SimMat"`
}
var KiT_Matrix = kit.Types.AddType(&Matrix{}, MatrixProps)
// Init initializes the Matrix for given number of classes,
// and resets the data to zero.
func (cm *Matrix) Init(n int) {
cm.Prob.SetShape([]int{n, n}, nil, []string{"N", "N"})
cm.Prob.SetZeros()
cm.Sum.SetShape([]int{n, n}, nil, []string{"N", "N"})
cm.Sum.SetZeros()
cm.N.SetShape([]int{n}, nil, []string{"N"})
cm.N.SetZeros()
cm.Vis.Mat = &cm.Prob
}
// SetLabels sets the class labels, for visualization in Vis
func (cm *Matrix) SetLabels(lbls []string) {
cm.Vis.Rows = lbls
cm.Vis.Cols = lbls
}
// Incr increments the data for given class ground truth
// and response.
func (cm *Matrix) Incr(class, resp int) {
ix := []int{class, resp}
sum := cm.Sum.Value(ix)
sum++
cm.Sum.Set(ix, sum)
n := cm.N.Value1D(class)
n++
cm.N.Set1D(class, n)
}
// Probs computes the probabilities based on accumulated data
func (cm *Matrix) Probs() {
n := cm.N.Len()
for cl := 0; cl < n; cl++ {
cn := cm.N.Value1D(cl)
if cn == 0 {
continue
}
for ri := 0; ri < n; ri++ {
ix := []int{cl, ri}
sum := cm.Sum.Value(ix)
cm.Prob.Set(ix, sum/cn)
}
}
}
// SaveCSV saves Prob result to a CSV file, comma separated
func (cm *Matrix) SaveCSV(filename gi.FileName) {
etensor.SaveCSV(&cm.Prob, filename, ',')
}
// OpenCSV opens Prob result from a CSV file, comma separated
func (cm *Matrix) OpenCSV(filename gi.FileName) {
etensor.OpenCSV(&cm.Prob, filename, ',')
}
var MatrixProps = ki.Props{
"ToolBar": ki.PropSlice{
{"SaveCSV", ki.Props{
"label": "Save CSV...",
"icon": "file-save",
"desc": "Save CSV-formatted confusion probabilities (Probs)",
"Args": ki.PropSlice{
{"CSV File Name", ki.Props{
"ext": ".csv",
}},
},
}},
{"OpenCSV", ki.Props{
"label": "Open CSV...",
"icon": "file-open",
"desc": "Open CSV-formatted confusion probabilities (Probs)",
"Args": ki.PropSlice{
{"Weights File Name", ki.Props{
"ext": ".csv",
}},
},
}},
},
} | confusion/confusion.go | 0.67822 | 0.414306 | confusion.go | starcoder |
package envelope
import (
"log"
"github.com/steinarvk/abora/synth/interpolation"
"github.com/steinarvk/abora/synth/varying"
)
// Envelope represents the amplitude component of a waveform. It takes on
// values in [0,1]. It may or may not "end", i.e. reach zero permanently.
// Its argument is in seconds.
type Envelope interface {
Amplitude() float64
Done() bool
Advance(float64)
}
type brickWall struct {
limit float64
}
func BrickWall(t float64) Envelope {
return &brickWall{t}
}
func (e *brickWall) Amplitude() float64 {
if e.limit > 0 {
return 1
}
return 0
}
func (e *brickWall) Done() bool {
return e.limit <= 0
}
func (e *brickWall) Advance(dt float64) {
e.limit -= dt
}
type Constant float64
func (x Constant) Amplitude() float64 { return float64(x) }
func (x Constant) Done() bool { return float64(x) == 0 }
func (_ Constant) Advance(_ float64) {}
var (
Null = Constant(0)
Identity = Constant(1)
)
func sectionAttackSustain(attackDur, stabilizeDur float64, sustainLevel float64, interpolator interpolation.Function) Envelope {
return &interpolatedEnvelope{amplitude: varying.NewInterpolated(
[]varying.Point{
{Time: 0, Value: 0},
{Time: attackDur, Value: 1.0},
{Time: attackDur + stabilizeDur, Value: sustainLevel},
},
varying.Interpolation(interpolator),
varying.Infinite{},
)}
}
func sectionRelease(beforeReleaseDur, releaseDur float64, interpol interpolation.Function) Envelope {
log.Printf("release: before %v release %v", beforeReleaseDur, releaseDur)
vary := varying.NewInterpolated(
[]varying.Point{
{Time: 0, Value: 1},
{Time: beforeReleaseDur, Value: 1},
{Time: beforeReleaseDur + releaseDur, Value: 0},
},
varying.Interpolation(interpol),
)
return &interpolatedEnvelope{
amplitude: vary,
timeLeft: beforeReleaseDur + releaseDur,
finite: true,
}
}
type ADSRSpec struct {
AttackDuration float64
DecayDuration float64
SustainLevel float64
ReleaseDuration float64
}
func LinearADSR(totalDuration float64, spec ADSRSpec) Envelope {
return adsrWith(spec, totalDuration, interpolation.Linear)
}
func CosADSR(totalDuration float64, spec ADSRSpec) Envelope {
return adsrWith(spec, totalDuration, interpolation.Cosine)
}
func adsrWith(spec ADSRSpec, totalDuration float64, interpol interpolation.Function) Envelope {
beforeReleaseDur := totalDuration - spec.ReleaseDuration
if beforeReleaseDur < 0 {
beforeReleaseDur = 0
}
return Composite(
sectionRelease(
beforeReleaseDur,
spec.ReleaseDuration,
interpol),
sectionAttackSustain(
spec.AttackDuration,
spec.DecayDuration,
spec.SustainLevel,
interpol),
)
}
type interpolatedEnvelope struct {
amplitude varying.Varying
finite bool
timeLeft float64
}
func (x *interpolatedEnvelope) Amplitude() float64 {
if x.Done() {
return 0.0
}
return x.amplitude.Value()
}
func (x *interpolatedEnvelope) Advance(dt float64) {
x.amplitude.Advance(dt)
if x.finite {
x.timeLeft -= dt
}
}
func (x *interpolatedEnvelope) Done() bool {
return x.finite && x.timeLeft <= 0
}
type compositeEnvelope []Envelope
func Composite(components ...Envelope) Envelope {
return compositeEnvelope(components)
}
func (c compositeEnvelope) Advance(dt float64) {
for _, e := range c {
e.Advance(dt)
}
}
func (c compositeEnvelope) Done() bool {
for _, e := range c {
if e.Done() {
return true
}
}
return false
}
func (c compositeEnvelope) Amplitude() float64 {
value := 1.0
for _, e := range c {
value *= e.Amplitude()
}
return value
}
type withVaryings struct {
env Envelope
vary []varying.Varying
}
func WithVarying(env Envelope, components ...varying.Varying) Envelope {
return &withVaryings{
env: env,
vary: components,
}
}
func (e *withVaryings) Advance(dt float64) {
e.env.Advance(dt)
for _, v := range e.vary {
v.Advance(dt)
}
}
func (e *withVaryings) Done() bool {
return e.env.Done()
}
func (e *withVaryings) Amplitude() float64 {
rv := e.env.Amplitude()
for _, v := range e.vary {
rv *= v.Value()
}
return rv
} | synth/envelope/envelope.go | 0.75183 | 0.555435 | envelope.go | starcoder |
package main
import (
"encoding/json"
"fmt"
"os"
"reflect"
)
// We’ll use these two structs to demonstrate encoding and decoding of custom types below.
type response1 struct {
Page int
Fruits []string
}
// Only exported fields will be encoded/decoded in JSON. Fields must start with capital letters to be exported.
type response2 struct {
Page int `json:"page"`
Fruits []string `json:"fruits"`
}
func main() {
// First we’ll look at encoding basic data types to JSON strings.
// Here are some examples for atomic values.
bolB, _ := json.Marshal(true)
fmt.Println("bool:true -->", string(bolB))
intB, _ := json.Marshal(1)
fmt.Println("int: 1 -->", string(intB))
fltB, _ := json.Marshal(2.34)
fmt.Println("float: 2.34 -->", string(fltB))
strB, _ := json.Marshal("gopher")
fmt.Println("string: gopher", string(strB))
// And here are some for slices and maps, which encode to JSON arrays and objects as you’d expect.
slcD := []string{"apple", "peach", "pear"}
slcB, _ := json.Marshal(slcD)
fmt.Println("string[]:", string(slcB), reflect.TypeOf(slcB))
mapD := map[string]int{"apple": 5, "lettuce": 7}
mapB, _ := json.Marshal(mapD)
fmt.Println("map:", string(mapB), reflect.TypeOf(mapB))
// The JSON package can automatically encode your custom data types.
// It will only include exported fields in the encoded output and will by default use those names as the JSON keys.
res1D := &response1{
Page: 1,
Fruits: []string{"apple", "peach", "pear"},
}
res1B, _ := json.Marshal(res1D)
fmt.Println("response1:", string(res1B), reflect.TypeOf(res1B))
//You can use tags on struct field declarations to customize the encoded JSON key names.
// Check the definition of response2 above to see an example of such tags.
res2D := &response2{
Page: 1,
Fruits: []string{"apple", "peach", "pear"},
}
res2B, _ := json.Marshal(res2D)
fmt.Println("response2:", string(res2B))
// Now let’s look at decoding JSON data into Go values. Here’s an example for a generic data structure.
byt := []byte(`{"num":6.13,"strs":["a", "b"]}`)
// We need to provide a variable where the JSON package can put the decoded data.
// This map[string]interface{} will hold a map of strings to arbitrary data types.
var dat map[string]interface{}
// Here’s the actual decoding, and a check for associated errors.
if err := json.Unmarshal(byt, &dat); err != nil {
panic(err)
}
fmt.Println(dat, reflect.TypeOf(dat["num"]), reflect.TypeOf(dat["strs"]))
// In order to use the values in the decoded map, we’ll need to convert them to their appropriate type.
// For example here we convert the value in num to the expected float64 type.
num := dat["num"].(float64)
fmt.Println(num)
// Accessing nested data requires a series of conversions.
strs := dat["strs"].([]interface{})
str1 := strs[0].(string)
fmt.Println(str1)
// We can also decode JSON into custom data types.
// This has the advantages of adding additional type-safety to our programs and
// eliminating the need for type assertions when accessing the decoded data.
str := `{"page": 1, "fruits": ["apple", "peach"]}`
res := response2{}
json.Unmarshal([]byte(str), &res)
fmt.Println("decode resp2:", res)
fmt.Printf("decode resp2 with field: %+v\n", res)
fmt.Println(res.Fruits[0])
// In the examples above we always used bytes and strings as intermediates between the data and
// JSON representation on standard out.
// We can also stream JSON encodings directly to os.Writers like os.Stdout or even HTTP response bodies.
enc := json.NewEncoder(os.Stdout)
d := map[string]int{"apple": 5, "lettuce": 7}
enc.Encode(d)
} | prac_code_content/pre/pre_go_example/e_json/json.go | 0.643553 | 0.405066 | json.go | starcoder |
package fixpoint
// Useful link:
// https://spin.atomicobject.com/2012/03/15/simple-fixed-point-math/
// Q16 is a Q7.16 fixed point integer type that has 16 bits of precision to the
// right of the fixed point. It is designed to be used as a more efficient
// replacement for unit vectors with some extra room to avoid overflow.
type Q16 struct {
N int32
}
var ZeroQ16 Q16 = Q16{0 << 16}
var HalfQ16 Q16 = Q16{int32(0.5 * (1 << 16))}
var OneQ16 Q16 = Q16{1 << 16}
var TwoQ16 Q16 = Q16{2 << 16}
var MaxQ16 Q16 = Q16{2147483647}
// Q16FromFloat converts a float32 to the same number in fixed point format.
// Inverse of .Float().
func Q16FromFloat(x float32) Q16 {
return Q16{int32(x * (1 << 16))}
}
// Q16FromInt32 returns a fixed point integer with all decimals set to zero.
func Q16FromInt32(x int32) Q16 {
return Q16{x << 16}
}
func Abs(q1 Q16) Q16 {
if q1.N < 0 {
return q1.Neg()
}
return q1
}
func Min(q1 Q16, q2 Q16) Q16 {
if q2.N < q1.N {
return q2
}
return q1
}
func Max(q1 Q16, q2 Q16) Q16 {
if q2.N > q1.N {
return q2
}
return q1
}
// Float returns the floating point version of this fixed point number. Inverse
// of Q16FromFloat.
func (q Q16) Float() float32 {
return float32(q.N) / (1 << 16)
}
// Int32Scaled returns the underlying fixed point number multiplied by scale.
func (q Q16) Int32Scaled(scale int32) int32 {
return q.N / (1 << 16 / scale)
}
// Add returns the argument plus this number.
func (q1 Q16) Add(q2 Q16) Q16 {
return Q16{q1.N + q2.N}
}
// Sub returns the argument minus this number.
func (q1 Q16) Sub(q2 Q16) Q16 {
return Q16{q1.N - q2.N}
}
// Neg returns the inverse of this number.
func (q1 Q16) Neg() Q16 {
return Q16{-q1.N}
}
// Mul returns this number multiplied by the argument.
func (q1 Q16) Mul(q2 Q16) Q16 {
return Q16{int32((int64(q1.N) * int64(q2.N)) >> 16)}
}
// Div returns this number divided by the argument.
func (q1 Q16) Div(q2 Q16) Q16 {
return Q16{int32((int64(q1.N) << 16) / int64(q2.N))}
}
var InvSqrtPrecision int = 4
func (q1 Q16) InvSqrt() Q16 {
if(q1.N <= 65536){
return OneQ16;
}
xSR := int64(q1.N)>>1;
pushRight := int64(q1.N);
var msb int64 = 0;
var shoffset int64 = 0;
var yIsqr int64 = 0;
var ysqr int64 = 0;
var fctrl int64 = 0;
var subthreehalf int64 = 0;
for pushRight >= 65536 {
pushRight >>=1;
msb++;
}
shoffset = (16 - ((msb)>>1));
yIsqr = 1<<shoffset
// y = (y * (98304 - ( ( (x>>1) * ((y * y)>>16 ) )>>16 ) ) )>>16; x2
for i := 0; i < InvSqrtPrecision; i++ {
ysqr = (yIsqr * yIsqr)>>16
fctrl = (xSR * ysqr)>>16
subthreehalf = 98304 - fctrl;
yIsqr = (yIsqr * subthreehalf)>>16
}
return Q16{int32(yIsqr)}
}
// operators
func (q1 Q16) gt(q2 Q16) bool {
return q1.N > q2.N
}
func (q1 Q16) gte(q2 Q16) bool {
return q1.N >= q2.N
}
func (q1 Q16) lt(q2 Q16) bool {
return q1.N < q2.N
}
func (q1 Q16) lte(q2 Q16) bool {
return q1.N <= q2.N
}
// Vec3Q16 is a 3-dimensional vector with Q16 fixed point elements.
type Vec3Q16 struct {
X Q16
Y Q16
Z Q16
}
// Vec3Q16FromFloat returns the fixed-point vector of the given 3 floats.
func Vec3Q16FromFloat(x, y, z float32) Vec3Q16 {
return Vec3Q16{Q16FromFloat(x), Q16FromFloat(y), Q16FromFloat(z)}
}
// Add returns this vector added to the argument.
func (v1 Vec3Q16) Add(v2 Vec3Q16) Vec3Q16 {
// Copied from go-gl/mathgl and modified.
return Vec3Q16{v1.X.Add(v2.X), v1.Y.Add(v2.Y), v1.Z.Add(v2.Z)}
}
// Mul returns this vector multiplied by the argument.
func (v1 Vec3Q16) Mul(c Q16) Vec3Q16 {
// Copied from go-gl/mathgl and modified.
return Vec3Q16{v1.X.Mul(c), v1.Y.Mul(c), v1.Z.Mul(c)}
}
// Dot returns the dot product between this vector and the argument.
func (v1 Vec3Q16) Dot(v2 Vec3Q16) Q16 {
// Copied from go-gl/mathgl and modified.
return v1.X.Mul(v2.X).Add(v1.Y.Mul(v2.Y)).Add(v1.Z.Mul(v2.Z))
}
func (v1 Vec3Q16) Sub(v2 Vec3Q16) Vec3Q16 {
return Vec3Q16{v1.X.Sub(v2.X), v1.Y.Sub(v2.Y), v1.Z.Sub(v2.Z)}
}
// Cross returns the cross product between this vector and the argument.
func (v1 Vec3Q16) Cross(v2 Vec3Q16) Vec3Q16 {
// Copied from go-gl/mathgl and modified.
return Vec3Q16{v1.Y.Mul(v2.Z).Sub(v1.Z.Mul(v2.Y)), v1.Z.Mul(v2.X).Sub(v1.X.Mul(v2.Z)), v1.X.Mul(v2.Y).Sub(v1.Y.Mul(v2.X))}
}
func (v1 Vec3Q16) Normalize() Vec3Q16 {
sqrMag := v1.X.Mul(v1.X).Add(v1.Y.Mul(v1.Y))
iSqrt := sqrMag.InvSqrt()
return Vec3Q16{v1.X.Mul(iSqrt), v1.Y.Mul(iSqrt), v1.Z.Mul(iSqrt)}
}
var ZeroVec3Q16 Vec3Q16 = Vec3Q16{ZeroQ16, ZeroQ16, ZeroQ16}
var OneVec3Q16 Vec3Q16 = Vec3Q16{OneQ16, OneQ16, OneQ16}
// QuatQ16 is a quaternion with Q16 fixed point elements.
type QuatQ16 struct {
W Q16
V Vec3Q16
}
// QuatIdent returns the identity quaternion.
func QuatIdent() QuatQ16 {
return QuatQ16{Q16FromInt32(1), Vec3Q16{}}
}
// X returns the X part of this quaternion.
func (q QuatQ16) X() Q16 {
return q.V.X
}
// Y returns the Y part of this quaternion.
func (q QuatQ16) Y() Q16 {
return q.V.Y
}
// Z returns the Z part of this quaternion.
func (q QuatQ16) Z() Q16 {
return q.V.Z
}
// Mul returns this quaternion multiplied by the argument.
func (q1 QuatQ16) Mul(q2 QuatQ16) QuatQ16 {
// Copied from go-gl/mathgl and modified.
return QuatQ16{q1.W.Mul(q2.W).Sub(q1.V.Dot(q2.V)), q1.V.Cross(q2.V).Add(q2.V.Mul(q1.W)).Add(q1.V.Mul(q2.W))}
}
// Rotate returns the vector from the argument rotated by the rotation this
// quaternion represents.
func (q1 QuatQ16) Rotate(v Vec3Q16) Vec3Q16 {
// Copied from go-gl/mathgl and modified.
cross := q1.V.Cross(v)
// v + 2q_w * (q_v x v) + 2q_v x (q_v x v)
return v.Add(cross.Mul(Q16FromInt32(2).Mul(q1.W))).Add(q1.V.Mul(Q16FromInt32(2)).Cross(cross))
} | fixpoint.go | 0.907492 | 0.562417 | fixpoint.go | starcoder |
This is an example in Linux password driver. It demonstrates how to implement
a device specific driver in Go, using framework components from the
translation engine.
Note: Drivers do not NEED to be written in Go. Any stand alone executable (
in any language) will work. That executable needs to run in the
forground (so the translation engine can manage the service) and connect
to the UNIX domain socket for the translation engine (see config file).
The driver must expect Protocol Buffers[1] on the client connection. The
translation engine will "execute" methods via the protobuf interface of the
incomming client connection.
If written in Go, use of the provided driver framework is recommended. The
framework provides most of the boilerplate necessary to connect to and
use the translation engine services. If using the framework, a
DriverService object must be passed to the framework Main function (
driver.Main()). The expanded interface defination:
type DriverService interface {
GetConfig(context.Context, *DeviceID) (*ConfigFiles, error)
TranslatePass(context.Context, *UserPass) (*CommandSeq, error)
TranslateService(context.Context, *Service) (*CommandSeq, error)
TranslateVar(context.Context, *Var) (*CommandSeq, error)
TranslateSvcConfig(context.Context, *ServiceConfig) (*CommandSeq, error)
ExecuteConfig(context.Context, *CommandSeq) (*BoolReply, error)
Name() string
Client() EngineClient
SetClient(EngineClient)
}
DriverService incorporates the DriverServer interface which is defined
by the Protocol Buffers implementation.
If you choose another language, you must still implement a Protocol
Buffers version 3 service running on top of gRPC to provide to the
translation engine. See the protobuf service definations in
pbtranslate/gen/driver.proto for details. Also review package
iti/pbconf/pbtranslate/driver for Go implementation details.
}
*/
package main
import (
"fmt"
"strings"
context "golang.org/x/net/context"
logging "github.com/iti/pbconf/lib/pblogger"
"github.com/iti/pbconf/lib/pbtranslate/driver"
)
// driver provides a method to get a logging object
var log logging.Logger
// Driver Service hold any driver specific state information, and must
// implement the DriverService interface.
type driverService struct {
name string
client driver.EngineClient
}
/*
Name()
Returns the drivers official name
*/
func (d *driverService) Name() string {
return d.name
}
/*
Client()
Returns the Engine Client object
*/
func (d *driverService) Client() driver.EngineClient {
return d.client
}
/*
SetClient()
Set the Engine Client object. This should be stored internally
*/
func (d *driverService) SetClient(c driver.EngineClient) {
d.client = c
}
/*
GetConfig()
Returns all config files as they exist on the device
*/
func (d *driverService) GetConfig(ctx context.Context, id *driver.DeviceID) (*driver.ConfigFiles, error) {
return nil, nil
}
/*
TranslatePassword()
Called when a password set is needed
The meaning of User Name and password is driver specific. For instance, on
an SEL421, Username would be "2", for level 2 password, but on a Linux
computer, this will be an actual user name
*/
func (d *driverService) TranslatePass(ctx context.Context, pass *driver.UserPass) (*driver.CommandSeq, error) {
log.Debug("TranslatePass()")
cmdrep := driver.Command{
Command: fmt.Sprintf("echo \"%s:%s\"|chpasswd", pass.Username, pass.Password),
}
rep := driver.CommandSeq{
Devid: pass.Devid,
Commands: []*driver.Command{&cmdrep},
}
return &rep, nil
}
/*
TranslateService()
Called to en/dis- able a service on the device
Should return the commands necessary to immediatly shut the servie off,
as well as the commands necessary to perminatly disable the service
such that the service will start up in the given state when the device
starts
*/
func (d *driverService) TranslateService(ctx context.Context, svc *driver.Service) (*driver.CommandSeq, error) {
log.Debug("TranslateService()")
var cmd string
if svc.State {
cmd = fmt.Sprintf("rm -f /etc/init/%s.override;service %s start", svc.Name, svc.Name)
} else {
cmd = fmt.Sprintf("service %s stop; echo \"manual\" > /etc/init/%s.override", svc.Name, svc.Name)
}
rep := driver.CommandSeq{
Devid: svc.Devid,
Commands: []*driver.Command{&driver.Command{
Command: cmd,
}},
}
return &rep, nil
}
/*
TranslateVar()
Called to set arbitrary configuration parameters
The meaning of Key and Value are driver specific. TranslateVar() is a
catch-all for configuration parameters for which there is not a specific
required interface. The driver should "do the right thing" for the device
in question. Note, if the setting changes the operating state of the
device, the setting should be changed such that the the device enters
the new state immediatly, _and_ that the device will enter the new
state when the device starts up.
*/
func (d *driverService) TranslateVar(ctx context.Context, variable *driver.Var) (*driver.CommandSeq, error) {
log.Debug("TranslateVar()")
// This driver doesn't support variables, but can't return nil
return &driver.CommandSeq{
Devid: variable.Devid,
Commands: []*driver.Command{},
}, nil
}
/*
TrasnlateSvcConfig()
Called to set service configuration options
This methos is also very device specific. It would return the commands
necessary to configure service specific options. For instance, this
driver assumes that any option set should by a var=val pair that would be
inserted into /etc/default/<service name>
*/
func (d *driverService) TranslateSvcConfig(ctx context.Context, opt *driver.ServiceConfig) (*driver.CommandSeq, error) {
cmd1 := fmt.Sprintf("sed -i -e 's/^%s[ ]*=.*//g' /etc/default/%s",
opt.Key, opt.Name)
cmd2 := fmt.Sprintf("echo %s=%s >> /etc/default/%s", opt.Key, opt.Value,
opt.Name)
return &driver.CommandSeq{
Devid: opt.Devid,
Commands: []*driver.Command{
&driver.Command{Command: cmd1},
&driver.Command{Command: cmd2},
},
}, nil
}
/*
EcexuteConfig()
Called by the translation engine to apply a series of commands to a
device
*/
func (d *driverService) ExecuteConfig(ctx context.Context, commands *driver.CommandSeq) (*driver.BoolReply, error) {
log.Debug("ExecuteConfig()")
transport, err := driver.ConnectToDevice(d.authFn, commands.Devid.Id, d.Name())
log.Debug("HERE")
if err != nil {
log.Info("Failed to connect: %s", err.Error())
return driver.ReplyFalse(err)
}
log.Debug("Got transport: %v", transport)
for _, cmd := range commands.Commands {
// Update password in meta first so we don't end up broken
if err := d.isRoot(commands.Devid.Id, cmd.Command); err != nil {
return driver.ReplyFalse(err)
}
log.Debug("Doing command: %v", cmd.Command)
// Need to be able to check output from service start
buf := append([]byte(cmd.Command), make([]byte, 30)...)
_, err := transport.Read(buf)
if err != nil {
// check that service was already running
log.Debug("returned output: %s", string(buf))
if !strings.Contains(string(buf), "Job is already running") {
log.Debug("Something Failed")
log.Info("Command <<%s>> Failed: %s", cmd.Command, err.Error())
}
}
}
return driver.ReplyTrue(nil)
}
func (d *driverService) isRoot(id int64, cmd string) error {
// Fail fast if it's not a password command
if !strings.Contains(cmd, "chpasswd") {
return nil
}
root, _, err := d.authFn(id)
if err != nil {
return err
}
// It is a password command, see if it's our root
if !strings.Contains(cmd, root) {
return nil
}
// so it is likely a password command on our root user, fully parse the cmd
echopart := strings.Split(cmd, "|")[0]
fq := strings.LastIndex(cmd, "\"")
upasspart := echopart[6:fq]
upass := strings.Split(upasspart, ":")
if upass[0] == root {
log.Debug("Need to update")
kv := &driver.KVPair{
Devid: &driver.DeviceID{
Id: id,
},
Key: "password",
Value: upass[1],
}
r, e := d.Client().SaveMeta(context.Background(), kv)
if r.Ok != true {
return e
}
}
return nil
}
/*
authFn() [[optional]]
Credential provider function. Returns the authentication parameters
to the transport layer if needed. Not all transports or devices require
authentication. At the time of writing, only the ssh transport uses this
feature.
This is NOT a function required by the DriverService interface, and is
passed into the transport creation routine. As such naming is not
important. This could be, for instance, an inline function passed into
ConnectToDevice()
*/
func (d *driverService) authFn(id int64) (username, password string, err error) {
log.Debug("authFn()")
u, err := d.Client().GetMeta(context.Background(), &driver.KVRequest{
Devid: &driver.DeviceID{Id: id},
Key: "username",
})
if err != nil {
return "", "", err
}
p, err := d.Client().GetMeta(context.Background(), &driver.KVRequest{
Devid: &driver.DeviceID{Id: id},
Key: "password",
})
if err != nil {
return "", "", err
}
log.Debug("user/pass: %s/%s", u.Value, p.Value)
return u.Value, p.Value, nil
}
/*
Service configuration and start up are handled by the framework. There is
no need to do anything beyond instantiate an instance of the driver
service object, and pass it to driver.Main()
*/
func main() {
devdriver := driverService{name: "linux"}
log = driver.GetLogger(devdriver.Name())
driver.Main(&devdriver)
} | cmd/drivers/linux/linux.go | 0.582135 | 0.405449 | linux.go | starcoder |
package point
import (
"github.com/gravestench/pho/geom"
)
// New creates a new point
func New(x, y float64) *Point {
return &Point{
Type: geom.Point,
X: x,
Y: y,
}
}
// Point defines a Point in 2D space, with an x and y component.
type Point struct {
Type geom.ShapeType
X, Y float64
}
// XY returns the x and y values
func (p *Point) XY() (x, y float64) {
return p.X, p.Y
}
// SetTo sets the x and y coordinates of the point to the given values.
func (p *Point) SetTo(x, y float64) *Point {
p.X, p.Y = x, y
return p
}
// Ceil Applies `math.Ceil()` to each coordinate of the given Point.
func (p *Point) Ceil() *Point {
return Ceil(p)
}
// Floor Applies `math.Floor()` to each coordinate of the given Point.
func (p *Point) Floor() *Point {
return Floor(p)
}
// Clone the given point.
func (p *Point) Clone() *Point {
return Clone(p)
}
// CopyFrom copies the values of one Point to a destination Point.
func (p *Point) CopyFrom(source *Point) *Point {
return CopyFrom(source, p)
}
// Equals compares two `Point` objects to see if they are equal.
func (p *Point) Equals(other *Point) bool {
return Equals(p, other)
}
// GetMagnitude calculates the magnitude of the point,
// which equivalent to the length of the line from the origin to this point.
func (p *Point) GetMagnitude() float64 {
return GetMagnitude(p)
}
// GetMagnitudeSquared calculates the magnitude squared of the point.
func (p *Point) GetMagnitudeSquared() float64 {
return GetMagnitudeSquared(p)
}
// Interpolate returns the linear interpolation point between this and another point, based on `t`.
func (p *Point) Interpolate(other *Point, t float64, out *Point) *Point {
return Interpolate(p, other, t, out)
}
// Invert swaps the X and the Y coordinate of a point.
func (p *Point) Invert() *Point {
return Invert(p)
}
// Negative flips the sign of the X and the Y coordinate of a point.
func (p *Point) Negative() *Point {
return Negative(p)
}
// Project calculates the vector projection of `pointA` onto the nonzero `pointB`. This is the
// orthogonal projection of `pointA` onto a straight line parallel to `pointB`.
func (p *Point) Project(other, out *Point) *Point {
return Project(p, other, out)
}
// SetMagnitude calculates the magnitude of the point,
// which equivalent to the length of the line from the origin to this point.
func (p *Point) SetMagnitude(magnitude float64) *Point {
return SetMagnitude(p, magnitude)
} | geom/point/point.go | 0.945462 | 0.817246 | point.go | starcoder |
package graph
import "bytes"
// Lines is a textual multi-line representation of the node graph.
func Lines(nodes []*Node) ([]string, error) {
if len(nodes) == 0 {
return []string{}, nil
}
g := &graph{
slots: [][]byte{nodes[0].ID},
nodes: nodes,
}
return g.table().lines()
}
// Node is a node (vertex) of a graph.
type Node struct {
ID []byte
Edges [][]byte
Marker rune
Detail string
}
// String returns a string representation of the node.
func (n *Node) String() string {
return string(n.Marker)
}
type graph struct {
slots [][]byte
nodes []*Node
}
func (g *graph) table() table {
tbl := table{}
for _, node := range g.nodes {
tbl = append(tbl, g.formatInboundEdgeRows(node)...)
tbl = append(tbl, g.formatTargetRows(node)...)
tbl = append(tbl, g.formatOutboundEdgeRows(node)...)
}
return tbl
}
func (g *graph) formatInboundEdgeRows(node *Node) []row {
rows := []row{}
for {
switch {
case g.nonAdjacent(node.ID):
// | |_|/ | |_|_|/ | |_|_|_|/ | | |_|_|/ /
// |/| | |/| | | |/| | | | | |/| | | |
idx, idxOther := g.contract(node.ID)
rows = append(rows, g.lateralRow(idx, idxOther))
rows = append(rows, g.contractionRow(idx, false))
case g.nearlyAdjacent(node.ID):
// | |/ | | |/ | | |/ /
// |/| | |/| | |/| /
idx, _ := g.contract(node.ID)
rows = append(rows, g.contractionRow(idx+1, true))
rows = append(rows, g.doubleContractionRow(idx))
case g.adjacent(node.ID):
// |/ | |/ | |/ /
idx, _ := g.contract(node.ID)
rows = append(rows, g.contractionRow(idx, true))
default:
return rows
}
}
}
func (g *graph) nonAdjacent(id []byte) bool {
if len(g.slots) < 3 {
return false
}
for i, idX := range g.slots[:len(g.slots)-3] {
for _, idY := range g.slots[i+3:] {
if equal(id, idX, idY) {
return true
}
}
}
return false
}
func (g *graph) nearlyAdjacent(id []byte) bool {
if len(g.slots) <= 2 {
return false
}
idX := g.slots[0]
for _, idY := range g.slots[2:] {
if equal(id, idX, idY) {
return true
}
idX = idY
}
return false
}
func (g *graph) adjacent(id []byte) bool {
if len(g.slots) <= 1 {
return false
}
idX := g.slots[0]
for _, idY := range g.slots[1:] {
if equal(id, idX, idY) {
return true
}
idX = idY
}
return false
}
func (g *graph) contractionRow(idx int, shiftLeft bool) row {
r := row{}
for i := range g.slots {
switch {
case i < idx:
r.cells = append(r.cells, vertEdge, spacer)
case i == idx:
r.cells = append(r.cells, vertEdge, conEdge)
case shiftLeft:
r.cells = append(r.cells, spacer, conEdge)
default:
r.cells = append(r.cells, vertEdge, spacer)
}
}
return r
}
func (g graph) lateralRow(idxTo, idxFrom int) row {
r := row{}
for i := range g.slots {
switch {
case i <= idxTo:
r.cells = append(r.cells, vertEdge, spacer)
case i == idxFrom-1:
r.cells = append(r.cells, vertEdge, conEdge)
case i < idxFrom:
r.cells = append(r.cells, vertEdge, latEdge)
default:
r.cells = append(r.cells, spacer, conEdge)
}
}
return r
}
func (g graph) doubleContractionRow(idx int) row {
r := row{}
for i := range g.slots {
switch {
case i < idx:
r.cells = append(r.cells, vertEdge, spacer)
case i == idx:
r.cells = append(r.cells, vertEdge, conEdge)
case i == idx+1:
r.cells = append(r.cells, vertEdge, spacer)
default:
r.cells = append(r.cells, conEdge, spacer)
}
}
return r
}
func (g *graph) contract(id []byte) (idxTo, idxFrom int) {
idxTo = -1
for i, v := range g.slots {
if equal(id, v) {
if idxTo == -1 {
idxTo = i
}
idxFrom = i
}
}
if idxFrom < len(g.slots)-1 {
for i, v := range g.slots[idxFrom+1:] {
g.slots[i-1] = v
}
}
g.slots = g.slots[:len(g.slots)-1]
return
}
func (g *graph) formatTargetRows(node *Node) []row {
idx, rows := g.index(node.ID), []row{}
if len(node.Edges) > 2 && idx != len(g.slots)-1 {
// | | \
// | | \
// | \ | | \
// | \ | | \
// *-. \ | *---. \
rows = append(rows, g.halfShiftRow(idx))
for i := 0; i < len(node.Edges)-2; i++ {
rows = append(rows, g.shiftRow(idx, i))
}
}
// * | * \ | *-. \
rows = append(rows, g.targetRow(idx, node))
return rows
}
func (g *graph) halfShiftRow(idx int) row {
r := row{}
for i := range g.slots {
switch {
case i > idx+1:
r.cells = append(r.cells, expEdge, spacer)
default:
r.cells = append(r.cells, vertEdge, spacer)
}
}
return r
}
func (g *graph) targetRow(idx int, node *Node) row {
r := row{
detail: node.Detail,
}
for i := range g.slots {
switch {
case i < idx:
r.cells = append(r.cells, vertEdge, spacer)
case i == idx:
if len(node.Edges) < 3 {
r.cells = append(r.cells, node, spacer)
continue
}
r.cells = append(r.cells, node, horizEdge)
for i := 0; i < len(node.Edges)-3; i++ {
r.cells = append(r.cells, horizEdge, horizEdge)
}
r.cells = append(r.cells, cornerEdge, spacer)
case i == idx+1 && len(node.Edges) < 3:
r.cells = append(r.cells, vertEdge, spacer)
case len(node.Edges) < 2:
r.cells = append(r.cells, vertEdge, spacer)
default:
r.cells = append(r.cells, expEdge, spacer)
}
}
return r
}
func (g graph) shiftRow(idx int, spaces int) row {
r := row{}
for i := range g.slots {
switch {
case i < idx:
r.cells = append(r.cells, vertEdge, spacer)
case i == idx:
r.cells = append(r.cells, vertEdge, spacer)
for i := 0; i < spaces; i++ {
r.cells = append(r.cells, spacer)
}
default:
r.cells = append(r.cells, spacer, expEdge)
}
}
return r
}
func (g *graph) formatOutboundEdgeRows(node *Node) []row {
idx, rows := g.index(node.ID), []row{}
g.expand(idx, node.Edges)
switch len(node.Edges) {
case 0: // sink
// * * | | * |
// / | /
if len(g.slots) > idx {
rows = append(rows, g.sinkRow(idx))
}
case 1:
default:
// * *-. *-. \ *---. | *---. \
// |\ |\ \ |\ \ \ |\ \ \ | |\ \ \ \
rows = append(rows, g.expansionRow(idx, len(node.Edges)))
}
return rows
}
func (g graph) sinkRow(idx int) row {
r := row{}
for i := range g.slots {
switch {
case i < idx:
r.cells = append(r.cells, vertEdge, spacer)
default:
r.cells = append(r.cells, spacer, conEdge)
}
}
return r
}
func (g graph) expansionRow(idx, edgeCount int) row {
r := row{}
for i := range g.slots {
switch {
case i < idx:
r.cells = append(r.cells, vertEdge, spacer)
case i == idx:
r.cells = append(r.cells, vertEdge, expEdge)
case i != len(g.slots)-1:
r.cells = append(r.cells, spacer, expEdge)
}
}
return r
}
func (g *graph) expand(idx int, edges [][]byte) {
slots := [][]byte{}
if idx > 0 {
slots = append(slots, g.slots[:idx]...)
}
for i := len(edges) - 1; i >= 0; i-- {
slots = append(slots, edges[i])
}
if i := idx + 1; i < len(g.slots) {
slots = append(slots, g.slots[i:]...)
}
g.slots = slots
}
func (g *graph) index(id []byte) int {
for i, v := range g.slots {
if equal(id, v) {
return i
}
}
panic("id missing from graph")
}
func equal(v []byte, vs ...[]byte) bool {
for _, x := range vs {
if !bytes.Equal(v, x) {
return false
}
}
return true
} | cli/graph/graph.go | 0.760028 | 0.460289 | graph.go | starcoder |
package transform
import (
"fmt"
"github.com/twpayne/go-geom"
)
// Compare compares two coordinates for equality and magnitude
type Compare interface {
IsEquals(x, y geom.Coord) bool
IsLess(x, y geom.Coord) bool
}
type tree struct {
left *tree
value geom.Coord
right *tree
}
// TreeSet sorts the coordinates according to the Compare strategy and removes duplicates as
// dicated by the Equals function of the Compare strategy
type TreeSet struct {
compare Compare
tree *tree
size int
layout geom.Layout
stride int
}
// NewTreeSet creates a new TreeSet instance
func NewTreeSet(layout geom.Layout, compare Compare) *TreeSet {
return &TreeSet{
layout: layout,
stride: layout.Stride(),
compare: compare,
}
}
// Insert adds a new coordinate to the tree set
// the coordinate must be the same size as the Stride of the layout provided
// when constructing the TreeSet
// Returns true if the coordinate was added, false if it was already in the tree
func (set *TreeSet) Insert(coord geom.Coord) bool {
if set.stride == 0 {
set.stride = set.layout.Stride()
}
if len(coord) < set.stride {
panic(fmt.Sprintf("Coordinate inserted into tree does not have a sufficient number of points for the provided layout. Length of Coord was %v but should have been %v", len(coord), set.stride))
}
tree, added := set.insertImpl(set.tree, coord)
if added {
set.tree = tree
set.size++
}
return added
}
// ToFlatArray returns an array of floats containing all the coordinates in the TreeSet
func (set *TreeSet) ToFlatArray() []float64 {
stride := set.layout.Stride()
array := make([]float64, set.size*stride)
i := 0
set.walk(set.tree, func(v []float64) {
for j := 0; j < stride; j++ {
array[i+j] = v[j]
}
i += stride
})
return array
}
func (set *TreeSet) walk(t *tree, visitor func([]float64)) {
if t == nil {
return
}
set.walk(t.left, visitor)
visitor(t.value)
set.walk(t.right, visitor)
}
func (set *TreeSet) insertImpl(t *tree, v []float64) (*tree, bool) {
if t == nil {
return &tree{nil, v, nil}, true
}
if set.compare.IsEquals(geom.Coord(v), t.value) {
return t, false
}
var added bool
if set.compare.IsLess(geom.Coord(v), t.value) {
t.left, added = set.insertImpl(t.left, v)
} else {
t.right, added = set.insertImpl(t.right, v)
}
return t, added
} | vendor/github.com/whosonfirst/go-whosonfirst-static/vendor/github.com/whosonfirst/go-whosonfirst-readwrite-sqlite/vendor/github.com/whosonfirst/go-whosonfirst-sqlite-features/vendor/github.com/twpayne/go-geom/transform/tree_set.go | 0.872279 | 0.502747 | tree_set.go | starcoder |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A little test program for rational arithmetics.
// Computes a Hilbert matrix, its inverse, multiplies them
// and verifies that the product is the identity matrix.
package main
import Big "bignum"
import Fmt "fmt"
func assert(p bool) {
if !p {
panic("assert failed");
}
}
var (
Zero = Big.Rat(0, 1);
One = Big.Rat(1, 1);
)
type Matrix struct {
n, m int;
a []*Big.Rational;
}
func (a *Matrix) at(i, j int) *Big.Rational {
assert(0 <= i && i < a.n && 0 <= j && j < a.m);
return a.a[i*a.m + j];
}
func (a *Matrix) set(i, j int, x *Big.Rational) {
assert(0 <= i && i < a.n && 0 <= j && j < a.m);
a.a[i*a.m + j] = x;
}
func NewMatrix(n, m int) *Matrix {
assert(0 <= n && 0 <= m);
a := new(Matrix);
a.n = n;
a.m = m;
a.a = make([]*Big.Rational, n*m);
return a;
}
func NewUnit(n int) *Matrix {
a := NewMatrix(n, n);
for i := 0; i < n; i++ {
for j := 0; j < n; j++ {
x := Zero;
if i == j {
x = One;
}
a.set(i, j, x);
}
}
return a;
}
func NewHilbert(n int) *Matrix {
a := NewMatrix(n, n);
for i := 0; i < n; i++ {
for j := 0; j < n; j++ {
x := Big.Rat(1, int64(i + j + 1));
a.set(i, j, x);
}
}
return a;
}
func MakeRat(x Big.Natural) *Big.Rational {
return Big.MakeRat(Big.MakeInt(false, x), Big.Nat(1));
}
func NewInverseHilbert(n int) *Matrix {
a := NewMatrix(n, n);
for i := 0; i < n; i++ {
for j := 0; j < n; j++ {
x0 := One;
if (i+j)&1 != 0 {
x0 = x0.Neg();
}
x1 := Big.Rat(int64(i + j + 1), 1);
x2 := MakeRat(Big.Binomial(uint(n+i), uint(n-j-1)));
x3 := MakeRat(Big.Binomial(uint(n+j), uint(n-i-1)));
x4 := MakeRat(Big.Binomial(uint(i+j), uint(i)));
x4 = x4.Mul(x4);
a.set(i, j, x0.Mul(x1).Mul(x2).Mul(x3).Mul(x4));
}
}
return a;
}
func (a *Matrix) Mul(b *Matrix) *Matrix {
assert(a.m == b.n);
c := NewMatrix(a.n, b.m);
for i := 0; i < c.n; i++ {
for j := 0; j < c.m; j++ {
x := Zero;
for k := 0; k < a.m; k++ {
x = x.Add(a.at(i, k).Mul(b.at(k, j)));
}
c.set(i, j, x);
}
}
return c;
}
func (a *Matrix) Eql(b *Matrix) bool {
if a.n != b.n || a.m != b.m {
return false;
}
for i := 0; i < a.n; i++ {
for j := 0; j < a.m; j++ {
if a.at(i, j).Cmp(b.at(i,j)) != 0 {
return false;
}
}
}
return true;
}
func (a *Matrix) String() string {
s := "";
for i := 0; i < a.n; i++ {
for j := 0; j < a.m; j++ {
s += Fmt.Sprintf("\t%s", a.at(i, j));
}
s += "\n";
}
return s;
}
func main() {
n := 10;
a := NewHilbert(n);
b := NewInverseHilbert(n);
I := NewUnit(n);
ab := a.Mul(b);
if !ab.Eql(I) {
Fmt.Println("a =", a);
Fmt.Println("b =", b);
Fmt.Println("a*b =", ab);
Fmt.Println("I =", I);
panic("FAILED");
}
} | test/hilbert.go | 0.761716 | 0.46393 | hilbert.go | starcoder |
package assertions
import (
"fmt"
"reflect"
"strings"
)
// ShouldStartWith receives exactly 2 string parameters and ensures that the first starts with the second.
func ShouldStartWith(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
value, valueIsString := actual.(string)
prefix, prefixIsString := expected[0].(string)
if !valueIsString || !prefixIsString {
return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
}
return shouldStartWith(value, prefix)
}
func shouldStartWith(value, prefix string) string {
if !strings.HasPrefix(value, prefix) {
shortval := value
if len(shortval) > len(prefix) {
shortval = shortval[:len(prefix)] + "..."
}
return serializer.serialize(prefix, shortval, fmt.Sprintf(shouldHaveStartedWith, value, prefix))
}
return success
}
// ShouldNotStartWith receives exactly 2 string parameters and ensures that the first does not start with the second.
func ShouldNotStartWith(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
value, valueIsString := actual.(string)
prefix, prefixIsString := expected[0].(string)
if !valueIsString || !prefixIsString {
return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
}
return shouldNotStartWith(value, prefix)
}
func shouldNotStartWith(value, prefix string) string {
if strings.HasPrefix(value, prefix) {
if value == "" {
value = "<empty>"
}
if prefix == "" {
prefix = "<empty>"
}
return fmt.Sprintf(shouldNotHaveStartedWith, value, prefix)
}
return success
}
// ShouldEndWith receives exactly 2 string parameters and ensures that the first ends with the second.
func ShouldEndWith(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
value, valueIsString := actual.(string)
suffix, suffixIsString := expected[0].(string)
if !valueIsString || !suffixIsString {
return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
}
return shouldEndWith(value, suffix)
}
func shouldEndWith(value, suffix string) string {
if !strings.HasSuffix(value, suffix) {
shortval := value
if len(shortval) > len(suffix) {
shortval = "..." + shortval[len(shortval)-len(suffix):]
}
return serializer.serialize(suffix, shortval, fmt.Sprintf(shouldHaveEndedWith, value, suffix))
}
return success
}
// ShouldEndWith receives exactly 2 string parameters and ensures that the first does not end with the second.
func ShouldNotEndWith(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
value, valueIsString := actual.(string)
suffix, suffixIsString := expected[0].(string)
if !valueIsString || !suffixIsString {
return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
}
return shouldNotEndWith(value, suffix)
}
func shouldNotEndWith(value, suffix string) string {
if strings.HasSuffix(value, suffix) {
if value == "" {
value = "<empty>"
}
if suffix == "" {
suffix = "<empty>"
}
return fmt.Sprintf(shouldNotHaveEndedWith, value, suffix)
}
return success
}
// ShouldContainSubstring receives exactly 2 string parameters and ensures that the first contains the second as a substring.
func ShouldContainSubstring(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
long, longOk := actual.(string)
short, shortOk := expected[0].(string)
if !longOk || !shortOk {
return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
}
if !strings.Contains(long, short) {
return serializer.serialize(expected[0], actual, fmt.Sprintf(shouldHaveContainedSubstring, long, short))
}
return success
}
// ShouldNotContainSubstring receives exactly 2 string parameters and ensures that the first does NOT contain the second as a substring.
func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
long, longOk := actual.(string)
short, shortOk := expected[0].(string)
if !longOk || !shortOk {
return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
}
if strings.Contains(long, short) {
return fmt.Sprintf(shouldNotHaveContainedSubstring, long, short)
}
return success
}
// ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal to "".
func ShouldBeBlank(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
}
value, ok := actual.(string)
if !ok {
return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual))
}
if value != "" {
return serializer.serialize("", value, fmt.Sprintf(shouldHaveBeenBlank, value))
}
return success
}
// ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is equal to "".
func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
}
value, ok := actual.(string)
if !ok {
return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual))
}
if value == "" {
return shouldNotHaveBeenBlank
}
return success
} | Godeps/_workspace/src/github.com/smartystreets/goconvey/convey/assertions/strings.go | 0.761804 | 0.60054 | strings.go | starcoder |
Package indexer provides tools to define, use, and update state indexers.
State service
The state service stores gateway state as keyed blobs.
Examples
- IMSI -> directory record blob
- HWID -> gateway status blob
Since state values are stored as arbitrary serialized blobs, the state
service has no semantic understanding of stored values. This means searching
over stored values would otherwise require an O(n) operation.
Examples
- Find IMSI with given IP -- must load all directory records into memory
- Find all gateways that haven't checked in recently -- must load all
gateway statuses into memory
Derived state
The solution is to provide customizable, online mechanisms for generating
derived state based on existing state. Existing, "primary" state is stored
in the state service, and derived, "secondary" state is stored in whichever
service owns the derived state.
Examples
- Reverse map of directory records
- Primary state: IMSI -> directory record
- Secondary state: IP -> IMSI (stored in e.g. directoryd)
- Reverse map of gateway checkin time
- Primary state: HWID -> gateway status
- Secondary state: checkin time -> HWID (stored in e.g. metricsd)
- List all gateways with multiple kernel versions installed
- Primary state: HWID -> gateway status
- Secondary state: list of gateways (stored in e.g. bootstrapper)
State indexers
State indexers are Orchestrator services registering an IndexerServer under
their gRPC endpoint. Any Orchestrator service can provide its own indexer
servicer.
The state service discovers indexers using K8s labels. Any service with the
label "orc8r.io/state_indexer" will be assumed to provide an indexer
servicer.
Indexers provide two additional pieces of metadata -- version and types.
- version: positive integer indicating when the indexer requires reindexing
- types: list of state types the indexer subscribes to
These metadata are indicated by K8s annotations
- orc8r.io/state_indexer_version -- positive integer
- orc8r.io/state_indexer_types -- comma-separated list of state types
Reindexing
When an indexer's implementation changes, its derived state needs to be
refreshed. This is accomplished by sending all existing state (of desired
types) through the now-updated indexer.
An indexer indicates it needs to undergo a reindex by incrementing its
version (exposed via the above-mentioned annotation). From there, the state
service automatically handles the reindexing process.
Metrics and logging are available to track long-running reindex processes,
as well an indexers CLI which reports desired and current indexer versions.
Implementing a custom indexer
To create a custom indexer, attach an IndexerServer to a new or existing
Orchestrator service.
A service can only attach a single indexer. However, that indexer can choose
to multiplex its functionality over any desired number of "logical"
indexers.
See the orchestrator service for an example custom indexer.
Notes
The state indexer pattern currently provides no mechanism for connecting
primary and secondary state. This means secondary state can go stale.
Where relevant, consumers of secondary state should take this into account,
generally by checking the primary state to ensure it agrees with the
secondary state.
Examples
- Reverse map of directory records
- Get IMSI from IP -> IMSI map (secondary state)
- Ensure the directory record in the IMSI -> directory map contains
the desired IP (primary state)
- Reverse map of gateway checkin time
- Get HWIDs from checkin time -> HWID map (secondary state)
- For each HWID, ensure the gateway status in the HWID -> gateway
status map contains the relevant checkin time (primary state)
Automatic reindexing is only supported with Postgres. Deployments targeting
Maria will need to use the indexer CLI to manually trigger reindex
operations.
There is a trivial but existent race condition during the reindex process.
Since the index and reindex operations but use the Index gRPC method,
and the index and reindex operations operate in parallel, it's possible for
an indexer to receive an outdated piece of state from the reindexer.
However, this requires
- reindexer read old state
- new state reported, indexer read new state
- indexer Index call completed
- reindexer Index call completed
If this race condition is intolerable to the desired use case, the solution
is to separate out the Index call into Index and Reindex methods. This is
not currently implemented as we don't have a concrete use-case for it yet.
*/
package indexer | orc8r/cloud/go/services/state/indexer/doc.go | 0.655887 | 0.669421 | doc.go | starcoder |
package processor
import (
"fmt"
"time"
"github.com/Jeffail/benthos/v3/internal/bloblang/field"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/message/tracing"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/response"
"github.com/Jeffail/benthos/v3/lib/types"
olog "github.com/opentracing/opentracing-go/log"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeGroupByValue] = TypeSpec{
constructor: NewGroupByValue,
Categories: []Category{
CategoryComposition,
},
Summary: `
Splits a batch of messages into N batches, where each resulting batch contains a
group of messages determined by a
[function interpolated string](/docs/configuration/interpolation#bloblang-queries) evaluated
per message.`,
Description: `
This allows you to group messages using arbitrary fields within their content or
metadata, process them individually, and send them to unique locations as per
their group.`,
Footnotes: `
## Examples
If we were consuming Kafka messages and needed to group them by their key,
archive the groups, and send them to S3 with the key as part of the path we
could achieve that with the following:
` + "```yaml" + `
pipeline:
processors:
- group_by_value:
value: ${! meta("kafka_key") }
- archive:
format: tar
- compress:
algorithm: gzip
output:
s3:
bucket: TODO
path: docs/${! meta("kafka_key") }/${! count("files") }-${! timestamp_unix_nano() }.tar.gz
` + "```" + ``,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon(
"value", "The interpolated string to group based on.",
"${! meta(\"kafka_key\") }", "${! json(\"foo.bar\") }-${! meta(\"baz\") }",
).IsInterpolated(),
},
UsesBatches: true,
}
}
//------------------------------------------------------------------------------
// GroupByValueConfig is a configuration struct containing fields for the
// GroupByValue processor, which breaks message batches down into N batches of a
// smaller size according to a function interpolated string evaluated per
// message part.
type GroupByValueConfig struct {
Value string `json:"value" yaml:"value"`
}
// NewGroupByValueConfig returns a GroupByValueConfig with default values.
func NewGroupByValueConfig() GroupByValueConfig {
return GroupByValueConfig{
Value: "${! meta(\"example\") }",
}
}
//------------------------------------------------------------------------------
// GroupByValue is a processor that breaks message batches down into N batches
// of a smaller size according to a function interpolated string evaluated per
// message part.
type GroupByValue struct {
log log.Modular
stats metrics.Type
value *field.Expression
mCount metrics.StatCounter
mGroups metrics.StatGauge
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewGroupByValue returns a GroupByValue processor.
func NewGroupByValue(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
value, err := interop.NewBloblangField(mgr, conf.GroupByValue.Value)
if err != nil {
return nil, fmt.Errorf("failed to parse value expression: %v", err)
}
return &GroupByValue{
log: log,
stats: stats,
value: value,
mCount: stats.GetCounter("count"),
mGroups: stats.GetGauge("groups"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}, nil
}
//------------------------------------------------------------------------------
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (g *GroupByValue) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
g.mCount.Incr(1)
if msg.Len() == 0 {
return nil, response.NewAck()
}
groupKeys := []string{}
groupMap := map[string]types.Message{}
spans := tracing.CreateChildSpans(TypeGroupByValue, msg)
msg.Iter(func(i int, p types.Part) error {
v := g.value.String(i, msg)
spans[i].LogFields(
olog.String("event", "grouped"),
olog.String("type", v),
)
spans[i].SetTag("group", v)
if group, exists := groupMap[v]; exists {
group.Append(p)
} else {
g.log.Tracef("New group formed: %v\n", v)
groupKeys = append(groupKeys, v)
newMsg := message.New(nil)
newMsg.Append(p)
groupMap[v] = newMsg
}
return nil
})
for _, s := range spans {
s.Finish()
}
msgs := []types.Message{}
for _, key := range groupKeys {
msgs = append(msgs, groupMap[key])
}
g.mGroups.Set(int64(len(groupKeys)))
if len(msgs) == 0 {
return nil, response.NewAck()
}
g.mBatchSent.Incr(int64(len(msgs)))
for _, m := range msgs {
g.mSent.Incr(int64(m.Len()))
}
return msgs, nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (g *GroupByValue) CloseAsync() {
}
// WaitForClose blocks until the processor has closed down.
func (g *GroupByValue) WaitForClose(timeout time.Duration) error {
return nil
}
//------------------------------------------------------------------------------ | lib/processor/group_by_value.go | 0.727685 | 0.579817 | group_by_value.go | starcoder |
package lcio
import (
"bytes"
"fmt"
"strings"
"go-hep.org/x/hep/sio"
)
// RecParticleContainer is a collection of RecParticles.
type RecParticleContainer struct {
Flags Flags
Params Params
Parts []RecParticle
}
type RecParticle struct {
Type int32
P [3]float32 // momentum (Px,PyPz)
Energy float32 // energy of particle
Cov [10]float32 // covariance matrix for 4-vector (Px,Py,Pz,E)
Mass float32 // mass of object used for 4-vector
Charge float32 // charge of particle
Ref [3]float32 // reference point of 4-vector
PIDs []ParticleID
PIDUsed *ParticleID
GoodnessOfPID float32 // overall quality of the particle identification
Recs []*RecParticle
Tracks []*Track
Clusters []*Cluster
StartVtx *Vertex // start vertex associated to the particle
}
type ParticleID struct {
Likelihood float32
Type int32
PDG int32
AlgType int32
Params []float32
}
func (recs *RecParticleContainer) String() string {
o := new(bytes.Buffer)
fmt.Fprintf(o, "%[1]s print out of ReconstructedParticle collection %[1]s\n\n", strings.Repeat("-", 15))
fmt.Fprintf(o, " flag: 0x%x\n%v", recs.Flags, recs.Params)
fmt.Fprintf(o, "\n")
const (
head = " [ id ] |com|type| momentum( px,py,pz) | energy | mass | charge | position ( x,y,z) | pidUsed |GoodnessOfPID|\n"
tail = "------------|---|----|-------------------------------|--------|--------|---------|-------------------------------|---------|-------------|\n"
)
fmt.Fprintf(o, head)
fmt.Fprintf(o, tail)
for i := range recs.Parts {
rec := &recs.Parts[i]
compound := 0
if len(rec.Recs) > 0 {
compound = 1
}
fmt.Fprintf(o,
"[%09d] |%3d|%4d|%+.2e, %+.2e, %+.2e|%.2e|%.2e|%+.2e|%+.2e, %+.2e, %+.2e|%09d|%+.2e| \n",
ID(rec),
compound, rec.Type,
rec.P[0], rec.P[1], rec.P[2], rec.Energy, rec.Mass, rec.Charge,
rec.Ref[0], rec.Ref[1], rec.Ref[2],
ID(rec.PIDUsed),
rec.GoodnessOfPID,
)
}
return string(o.Bytes())
}
func (*RecParticleContainer) VersionSio() uint32 {
return Version
}
func (recs *RecParticleContainer) MarshalSio(w sio.Writer) error {
enc := sio.NewEncoder(w)
enc.Encode(&recs.Flags)
enc.Encode(&recs.Params)
enc.Encode(int32(len(recs.Parts)))
for i := range recs.Parts {
rec := &recs.Parts[i]
enc.Encode(&rec.Type)
enc.Encode(&rec.P)
enc.Encode(&rec.Energy)
enc.Encode(&rec.Cov)
enc.Encode(&rec.Mass)
enc.Encode(&rec.Charge)
enc.Encode(&rec.Ref)
enc.Encode(int32(len(rec.PIDs)))
for i := range rec.PIDs {
pid := &rec.PIDs[i]
enc.Encode(&pid.Likelihood)
enc.Encode(&pid.Type)
enc.Encode(&pid.PDG)
enc.Encode(&pid.AlgType)
enc.Encode(&pid.Params)
enc.Tag(pid)
}
enc.Pointer(&rec.PIDUsed)
enc.Encode(&rec.GoodnessOfPID)
enc.Encode(int32(len(rec.Recs)))
for i := range rec.Recs {
enc.Pointer(&rec.Recs[i])
}
enc.Encode(int32(len(rec.Tracks)))
for i := range rec.Tracks {
enc.Pointer(&rec.Tracks[i])
}
enc.Encode(int32(len(rec.Clusters)))
for i := range rec.Clusters {
enc.Pointer(&rec.Clusters[i])
}
enc.Pointer(&rec.StartVtx)
enc.Tag(rec)
}
return enc.Err()
}
func (recs *RecParticleContainer) UnmarshalSio(r sio.Reader) error {
dec := sio.NewDecoder(r)
dec.Decode(&recs.Flags)
dec.Decode(&recs.Params)
var n int32
dec.Decode(&n)
recs.Parts = make([]RecParticle, int(n))
if r.VersionSio() <= 1002 {
return fmt.Errorf("lcio: too old file (%d)", r.VersionSio())
}
for i := range recs.Parts {
rec := &recs.Parts[i]
dec.Decode(&rec.Type)
dec.Decode(&rec.P)
dec.Decode(&rec.Energy)
dec.Decode(&rec.Cov)
dec.Decode(&rec.Mass)
dec.Decode(&rec.Charge)
dec.Decode(&rec.Ref)
var n int32
dec.Decode(&n)
rec.PIDs = make([]ParticleID, int(n))
for i := range rec.PIDs {
pid := &rec.PIDs[i]
dec.Decode(&pid.Likelihood)
dec.Decode(&pid.Type)
dec.Decode(&pid.PDG)
dec.Decode(&pid.AlgType)
dec.Decode(&pid.Params)
dec.Tag(pid)
}
dec.Pointer(&rec.PIDUsed)
dec.Decode(&rec.GoodnessOfPID)
dec.Decode(&n)
rec.Recs = make([]*RecParticle, int(n))
for i := range rec.Recs {
dec.Pointer(&rec.Recs[i])
}
dec.Decode(&n)
rec.Tracks = make([]*Track, int(n))
for i := range rec.Tracks {
dec.Pointer(&rec.Tracks[i])
}
dec.Decode(&n)
rec.Clusters = make([]*Cluster, int(n))
for i := range rec.Clusters {
dec.Pointer(&rec.Clusters[i])
}
if r.VersionSio() > 1007 {
dec.Pointer(&rec.StartVtx)
}
dec.Tag(rec)
}
return dec.Err()
}
var (
_ sio.Versioner = (*RecParticleContainer)(nil)
_ sio.Codec = (*RecParticleContainer)(nil)
) | lcio/recparticle.go | 0.534612 | 0.409457 | recparticle.go | starcoder |
package gol
// Rules:
// Any live cell with fewer than two live neighbours dies, as if caused by underpopulation.
// Any live cell with two or three live neighbours lives on to the next generation.
// Any live cell with more than three live neighbours dies, as if by overpopulation.
// Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
type Coord struct {
X, Y int
}
func (c *Coord) valid(size int) bool {
return c.X > -1 && c.Y > -1 && c.X < size && c.Y < size
}
type Universe struct {
prev map[Coord]struct{}
current map[Coord]struct{}
Size int
}
func (u *Universe) Iterate() {
u.prev = make(map[Coord]struct{})
for k := range u.current {
u.prev[k] = struct{}{}
}
u.current = make(map[Coord]struct{})
var aliveN int
for row := 0; row < u.Size; row++ {
for col := 0; col < u.Size; col++ {
c := Coord{row, col}
aliveN, _ = u.neighbours(c.X, c.Y)
if _, alive := u.prev[c]; alive {
if aliveN == 2 || aliveN == 3 {
u.current[c] = struct{}{}
}
} else {
if aliveN == 3 {
u.current[c] = struct{}{}
}
}
}
}
}
func (u *Universe) neighbours(row, col int) (int, int) {
nCells := []Coord{
{row - 1, col - 1}, {row - 1, col}, {row - 1, col + 1},
{row, col - 1}, {row, col + 1},
{row + 1, col - 1}, {row + 1, col}, {row + 1, col + 1},
}
var (
alive int
dead int
)
for i := range nCells {
if !nCells[i].valid(u.Size) {
continue
}
if _, present := u.prev[nCells[i]]; present {
alive++
} else {
dead++
}
}
return alive, dead
}
func (u *Universe) Delta() (live []Coord, dead []Coord) {
for c := range u.current {
if _, alive := u.prev[c]; !alive {
live = append(live, c)
}
}
for c := range u.prev {
if _, alive := u.current[c]; !alive {
dead = append(dead, c)
}
}
return
}
func New(size int) Universe {
return Universe{
prev: make(map[Coord]struct{}),
current: make(map[Coord]struct{}),
Size: size,
}
}
func (u *Universe) AddSeed(row, col int) {
u.current[Coord{X: row, Y: col}] = struct{}{}
} | gol.go | 0.691602 | 0.430925 | gol.go | starcoder |
package iso20022
// Key elements used to refer the original transaction.
type OriginalTransactionReference16 struct {
// Amount of money moved between the instructing agent and the instructed agent.
InterbankSettlementAmount *ActiveOrHistoricCurrencyAndAmount `xml:"IntrBkSttlmAmt,omitempty"`
// Amount of money to be moved between the debtor and creditor, before deduction of charges, expressed in the currency as ordered by the initiating party.
Amount *AmountType3Choice `xml:"Amt,omitempty"`
// Date on which the amount of money ceases to be available to the agent that owes it and when the amount of money becomes available to the agent to which it is due.
InterbankSettlementDate *ISODate `xml:"IntrBkSttlmDt,omitempty"`
// Date and time at which the creditor requests that the amount of money is to be collected from the debtor.
RequestedCollectionDate *ISODate `xml:"ReqdColltnDt,omitempty"`
// Date at which the initiating party requests the clearing agent to process the payment.
// Usage: This is the date on which the debtor's account is to be debited. If payment by cheque, the date when the cheque must be generated by the bank.
RequestedExecutionDate *ISODate `xml:"ReqdExctnDt,omitempty"`
// Credit party that signs the mandate.
CreditorSchemeIdentification *PartyIdentification43 `xml:"CdtrSchmeId,omitempty"`
// Specifies the details on how the settlement of the original transaction(s) between the instructing agent and the instructed agent was completed.
SettlementInformation *SettlementInstruction4 `xml:"SttlmInf,omitempty"`
// Set of elements used to further specify the type of transaction.
PaymentTypeInformation *PaymentTypeInformation25 `xml:"PmtTpInf,omitempty"`
// Specifies the means of payment that will be used to move the amount of money.
PaymentMethod *PaymentMethod4Code `xml:"PmtMtd,omitempty"`
// Provides further details of the mandate signed between the creditor and the debtor.
MandateRelatedInformation *MandateRelatedInformation8 `xml:"MndtRltdInf,omitempty"`
// Information supplied to enable the matching of an entry with the items that the transfer is intended to settle, such as commercial invoices in an accounts' receivable system.
RemittanceInformation *RemittanceInformation7 `xml:"RmtInf,omitempty"`
// Ultimate party that owes an amount of money to the (ultimate) creditor.
UltimateDebtor *PartyIdentification43 `xml:"UltmtDbtr,omitempty"`
// Party that owes an amount of money to the (ultimate) creditor.
Debtor *PartyIdentification43 `xml:"Dbtr,omitempty"`
// Unambiguous identification of the account of the debtor to which a debit entry will be made as a result of the transaction.
DebtorAccount *CashAccount24 `xml:"DbtrAcct,omitempty"`
// Financial institution servicing an account for the debtor.
DebtorAgent *BranchAndFinancialInstitutionIdentification5 `xml:"DbtrAgt,omitempty"`
// Unambiguous identification of the account of the debtor agent at its servicing agent in the payment chain.
DebtorAgentAccount *CashAccount24 `xml:"DbtrAgtAcct,omitempty"`
// Financial institution servicing an account for the creditor.
CreditorAgent *BranchAndFinancialInstitutionIdentification5 `xml:"CdtrAgt,omitempty"`
// Unambiguous identification of the account of the creditor agent at its servicing agent to which a credit entry will be made as a result of the payment transaction.
CreditorAgentAccount *CashAccount24 `xml:"CdtrAgtAcct,omitempty"`
// Party to which an amount of money is due.
Creditor *PartyIdentification43 `xml:"Cdtr,omitempty"`
// Unambiguous identification of the account of the creditor to which a credit entry will be posted as a result of the payment transaction.
CreditorAccount *CashAccount24 `xml:"CdtrAcct,omitempty"`
// Ultimate party to which an amount of money is due.
UltimateCreditor *PartyIdentification43 `xml:"UltmtCdtr,omitempty"`
}
func (o *OriginalTransactionReference16) SetInterbankSettlementAmount(value, currency string) {
o.InterbankSettlementAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (o *OriginalTransactionReference16) AddAmount() *AmountType3Choice {
o.Amount = new(AmountType3Choice)
return o.Amount
}
func (o *OriginalTransactionReference16) SetInterbankSettlementDate(value string) {
o.InterbankSettlementDate = (*ISODate)(&value)
}
func (o *OriginalTransactionReference16) SetRequestedCollectionDate(value string) {
o.RequestedCollectionDate = (*ISODate)(&value)
}
func (o *OriginalTransactionReference16) SetRequestedExecutionDate(value string) {
o.RequestedExecutionDate = (*ISODate)(&value)
}
func (o *OriginalTransactionReference16) AddCreditorSchemeIdentification() *PartyIdentification43 {
o.CreditorSchemeIdentification = new(PartyIdentification43)
return o.CreditorSchemeIdentification
}
func (o *OriginalTransactionReference16) AddSettlementInformation() *SettlementInstruction4 {
o.SettlementInformation = new(SettlementInstruction4)
return o.SettlementInformation
}
func (o *OriginalTransactionReference16) AddPaymentTypeInformation() *PaymentTypeInformation25 {
o.PaymentTypeInformation = new(PaymentTypeInformation25)
return o.PaymentTypeInformation
}
func (o *OriginalTransactionReference16) SetPaymentMethod(value string) {
o.PaymentMethod = (*PaymentMethod4Code)(&value)
}
func (o *OriginalTransactionReference16) AddMandateRelatedInformation() *MandateRelatedInformation8 {
o.MandateRelatedInformation = new(MandateRelatedInformation8)
return o.MandateRelatedInformation
}
func (o *OriginalTransactionReference16) AddRemittanceInformation() *RemittanceInformation7 {
o.RemittanceInformation = new(RemittanceInformation7)
return o.RemittanceInformation
}
func (o *OriginalTransactionReference16) AddUltimateDebtor() *PartyIdentification43 {
o.UltimateDebtor = new(PartyIdentification43)
return o.UltimateDebtor
}
func (o *OriginalTransactionReference16) AddDebtor() *PartyIdentification43 {
o.Debtor = new(PartyIdentification43)
return o.Debtor
}
func (o *OriginalTransactionReference16) AddDebtorAccount() *CashAccount24 {
o.DebtorAccount = new(CashAccount24)
return o.DebtorAccount
}
func (o *OriginalTransactionReference16) AddDebtorAgent() *BranchAndFinancialInstitutionIdentification5 {
o.DebtorAgent = new(BranchAndFinancialInstitutionIdentification5)
return o.DebtorAgent
}
func (o *OriginalTransactionReference16) AddDebtorAgentAccount() *CashAccount24 {
o.DebtorAgentAccount = new(CashAccount24)
return o.DebtorAgentAccount
}
func (o *OriginalTransactionReference16) AddCreditorAgent() *BranchAndFinancialInstitutionIdentification5 {
o.CreditorAgent = new(BranchAndFinancialInstitutionIdentification5)
return o.CreditorAgent
}
func (o *OriginalTransactionReference16) AddCreditorAgentAccount() *CashAccount24 {
o.CreditorAgentAccount = new(CashAccount24)
return o.CreditorAgentAccount
}
func (o *OriginalTransactionReference16) AddCreditor() *PartyIdentification43 {
o.Creditor = new(PartyIdentification43)
return o.Creditor
}
func (o *OriginalTransactionReference16) AddCreditorAccount() *CashAccount24 {
o.CreditorAccount = new(CashAccount24)
return o.CreditorAccount
}
func (o *OriginalTransactionReference16) AddUltimateCreditor() *PartyIdentification43 {
o.UltimateCreditor = new(PartyIdentification43)
return o.UltimateCreditor
} | OriginalTransactionReference16.go | 0.783947 | 0.456652 | OriginalTransactionReference16.go | starcoder |
This is an unoptimized version based on the description in RFC 3713.
References:
http://en.wikipedia.org/wiki/Camellia_%28cipher%29
https://info.isl.ntt.co.jp/crypt/eng/camellia/
*/
package camellia
import (
"crypto/cipher"
"encoding/binary"
"math/bits"
"strconv"
)
const BlockSize = 16
type KeySizeError int
func (k KeySizeError) Error() string {
return "camellia: invalid key size " + strconv.Itoa(int(k))
}
type camelliaCipher struct {
kw [5]uint64
k [25]uint64
ke [7]uint64
klen int
}
const (
sigma1 = 0xA09E667F3BCC908B
sigma2 = 0xB67AE8584CAA73B2
sigma3 = 0xC6EF372FE94F82BE
sigma4 = 0x54FF53A5F1D36F1C
sigma5 = 0x10E527FADE682D1D
sigma6 = 0xB05688C2B3E6C1FD
)
func init() {
// initialize other sboxes
for i := range sbox1 {
sbox2[i] = bits.RotateLeft8(sbox1[i], 1)
sbox3[i] = bits.RotateLeft8(sbox1[i], 7)
sbox4[i] = sbox1[bits.RotateLeft8(uint8(i), 1)]
}
}
func rotl128(k [2]uint64, rot uint) (hi, lo uint64) {
if rot > 64 {
rot -= 64
k[0], k[1] = k[1], k[0]
}
t := k[0] >> (64 - rot)
hi = (k[0] << rot) | (k[1] >> (64 - rot))
lo = (k[1] << rot) | t
return hi, lo
}
// New creates and returns a new cipher.Block.
// The key argument should be 16, 24, or 32 bytes.
func New(key []byte) (cipher.Block, error) {
klen := len(key)
switch klen {
default:
return nil, KeySizeError(klen)
case 16, 24, 32:
break
}
var d1, d2 uint64
var kl [2]uint64
var kr [2]uint64
var ka [2]uint64
var kb [2]uint64
kl[0] = binary.BigEndian.Uint64(key[0:])
kl[1] = binary.BigEndian.Uint64(key[8:])
switch klen {
case 24:
kr[0] = binary.BigEndian.Uint64(key[16:])
kr[1] = ^kr[0]
case 32:
kr[0] = binary.BigEndian.Uint64(key[16:])
kr[1] = binary.BigEndian.Uint64(key[24:])
}
d1 = (kl[0] ^ kr[0])
d2 = (kl[1] ^ kr[1])
d2 = d2 ^ f(d1, sigma1)
d1 = d1 ^ f(d2, sigma2)
d1 = d1 ^ (kl[0])
d2 = d2 ^ (kl[1])
d2 = d2 ^ f(d1, sigma3)
d1 = d1 ^ f(d2, sigma4)
ka[0] = d1
ka[1] = d2
d1 = (ka[0] ^ kr[0])
d2 = (ka[1] ^ kr[1])
d2 = d2 ^ f(d1, sigma5)
d1 = d1 ^ f(d2, sigma6)
kb[0] = d1
kb[1] = d2
// here we generate our keys
c := new(camelliaCipher)
c.klen = klen
if klen == 16 {
c.kw[1], c.kw[2] = rotl128(kl, 0)
c.k[1], c.k[2] = rotl128(ka, 0)
c.k[3], c.k[4] = rotl128(kl, 15)
c.k[5], c.k[6] = rotl128(ka, 15)
c.ke[1], c.ke[2] = rotl128(ka, 30)
c.k[7], c.k[8] = rotl128(kl, 45)
c.k[9], _ = rotl128(ka, 45)
_, c.k[10] = rotl128(kl, 60)
c.k[11], c.k[12] = rotl128(ka, 60)
c.ke[3], c.ke[4] = rotl128(kl, 77)
c.k[13], c.k[14] = rotl128(kl, 94)
c.k[15], c.k[16] = rotl128(ka, 94)
c.k[17], c.k[18] = rotl128(kl, 111)
c.kw[3], c.kw[4] = rotl128(ka, 111)
} else {
// 24 or 32
c.kw[1], c.kw[2] = rotl128(kl, 0)
c.k[1], c.k[2] = rotl128(kb, 0)
c.k[3], c.k[4] = rotl128(kr, 15)
c.k[5], c.k[6] = rotl128(ka, 15)
c.ke[1], c.ke[2] = rotl128(kr, 30)
c.k[7], c.k[8] = rotl128(kb, 30)
c.k[9], c.k[10] = rotl128(kl, 45)
c.k[11], c.k[12] = rotl128(ka, 45)
c.ke[3], c.ke[4] = rotl128(kl, 60)
c.k[13], c.k[14] = rotl128(kr, 60)
c.k[15], c.k[16] = rotl128(kb, 60)
c.k[17], c.k[18] = rotl128(kl, 77)
c.ke[5], c.ke[6] = rotl128(ka, 77)
c.k[19], c.k[20] = rotl128(kr, 94)
c.k[21], c.k[22] = rotl128(ka, 94)
c.k[23], c.k[24] = rotl128(kl, 111)
c.kw[3], c.kw[4] = rotl128(kb, 111)
}
return c, nil
}
func (c *camelliaCipher) Encrypt(dst, src []byte) {
d1 := binary.BigEndian.Uint64(src[0:])
d2 := binary.BigEndian.Uint64(src[8:])
d1 ^= c.kw[1]
d2 ^= c.kw[2]
d2 = d2 ^ f(d1, c.k[1])
d1 = d1 ^ f(d2, c.k[2])
d2 = d2 ^ f(d1, c.k[3])
d1 = d1 ^ f(d2, c.k[4])
d2 = d2 ^ f(d1, c.k[5])
d1 = d1 ^ f(d2, c.k[6])
d1 = fl(d1, c.ke[1])
d2 = flinv(d2, c.ke[2])
d2 = d2 ^ f(d1, c.k[7])
d1 = d1 ^ f(d2, c.k[8])
d2 = d2 ^ f(d1, c.k[9])
d1 = d1 ^ f(d2, c.k[10])
d2 = d2 ^ f(d1, c.k[11])
d1 = d1 ^ f(d2, c.k[12])
d1 = fl(d1, c.ke[3])
d2 = flinv(d2, c.ke[4])
d2 = d2 ^ f(d1, c.k[13])
d1 = d1 ^ f(d2, c.k[14])
d2 = d2 ^ f(d1, c.k[15])
d1 = d1 ^ f(d2, c.k[16])
d2 = d2 ^ f(d1, c.k[17])
d1 = d1 ^ f(d2, c.k[18])
if c.klen > 16 {
// 24 or 32
d1 = fl(d1, c.ke[5])
d2 = flinv(d2, c.ke[6])
d2 = d2 ^ f(d1, c.k[19])
d1 = d1 ^ f(d2, c.k[20])
d2 = d2 ^ f(d1, c.k[21])
d1 = d1 ^ f(d2, c.k[22])
d2 = d2 ^ f(d1, c.k[23])
d1 = d1 ^ f(d2, c.k[24])
}
d2 = d2 ^ c.kw[3]
d1 = d1 ^ c.kw[4]
binary.BigEndian.PutUint64(dst[0:], d2)
binary.BigEndian.PutUint64(dst[8:], d1)
}
func (c *camelliaCipher) Decrypt(dst, src []byte) {
d2 := binary.BigEndian.Uint64(src[0:])
d1 := binary.BigEndian.Uint64(src[8:])
d1 = d1 ^ c.kw[4]
d2 = d2 ^ c.kw[3]
if c.klen > 16 {
// 24 or 32
d1 = d1 ^ f(d2, c.k[24])
d2 = d2 ^ f(d1, c.k[23])
d1 = d1 ^ f(d2, c.k[22])
d2 = d2 ^ f(d1, c.k[21])
d1 = d1 ^ f(d2, c.k[20])
d2 = d2 ^ f(d1, c.k[19])
d2 = fl(d2, c.ke[6])
d1 = flinv(d1, c.ke[5])
}
d1 = d1 ^ f(d2, c.k[18])
d2 = d2 ^ f(d1, c.k[17])
d1 = d1 ^ f(d2, c.k[16])
d2 = d2 ^ f(d1, c.k[15])
d1 = d1 ^ f(d2, c.k[14])
d2 = d2 ^ f(d1, c.k[13])
d2 = fl(d2, c.ke[4])
d1 = flinv(d1, c.ke[3])
d1 = d1 ^ f(d2, c.k[12])
d2 = d2 ^ f(d1, c.k[11])
d1 = d1 ^ f(d2, c.k[10])
d2 = d2 ^ f(d1, c.k[9])
d1 = d1 ^ f(d2, c.k[8])
d2 = d2 ^ f(d1, c.k[7])
d2 = fl(d2, c.ke[2])
d1 = flinv(d1, c.ke[1])
d1 = d1 ^ f(d2, c.k[6])
d2 = d2 ^ f(d1, c.k[5])
d1 = d1 ^ f(d2, c.k[4])
d2 = d2 ^ f(d1, c.k[3])
d1 = d1 ^ f(d2, c.k[2])
d2 = d2 ^ f(d1, c.k[1])
d2 ^= c.kw[2]
d1 ^= c.kw[1]
binary.BigEndian.PutUint64(dst[0:], d1)
binary.BigEndian.PutUint64(dst[8:], d2)
}
func (c *camelliaCipher) BlockSize() int {
return BlockSize
}
func f(fin, ke uint64) uint64 {
var x uint64
x = fin ^ ke
t1 := sbox1[uint8(x>>56)]
t2 := sbox2[uint8(x>>48)]
t3 := sbox3[uint8(x>>40)]
t4 := sbox4[uint8(x>>32)]
t5 := sbox2[uint8(x>>24)]
t6 := sbox3[uint8(x>>16)]
t7 := sbox4[uint8(x>>8)]
t8 := sbox1[uint8(x)]
y1 := t1 ^ t3 ^ t4 ^ t6 ^ t7 ^ t8
y2 := t1 ^ t2 ^ t4 ^ t5 ^ t7 ^ t8
y3 := t1 ^ t2 ^ t3 ^ t5 ^ t6 ^ t8
y4 := t2 ^ t3 ^ t4 ^ t5 ^ t6 ^ t7
y5 := t1 ^ t2 ^ t6 ^ t7 ^ t8
y6 := t2 ^ t3 ^ t5 ^ t7 ^ t8
y7 := t3 ^ t4 ^ t5 ^ t6 ^ t8
y8 := t1 ^ t4 ^ t5 ^ t6 ^ t7
return uint64(y1)<<56 | uint64(y2)<<48 | uint64(y3)<<40 | uint64(y4)<<32 | uint64(y5)<<24 | uint64(y6)<<16 | uint64(y7)<<8 | uint64(y8)
}
func fl(flin, ke uint64) uint64 {
x1 := uint32(flin >> 32)
x2 := uint32(flin & 0xffffffff)
k1 := uint32(ke >> 32)
k2 := uint32(ke & 0xffffffff)
x2 = x2 ^ bits.RotateLeft32(x1&k1, 1)
x1 = x1 ^ (x2 | k2)
return uint64(x1)<<32 | uint64(x2)
}
func flinv(flin, ke uint64) uint64 {
y1 := uint32(flin >> 32)
y2 := uint32(flin & 0xffffffff)
k1 := uint32(ke >> 32)
k2 := uint32(ke & 0xffffffff)
y1 = y1 ^ (y2 | k2)
y2 = y2 ^ bits.RotateLeft32(y1&k1, 1)
return uint64(y1)<<32 | uint64(y2)
}
var sbox1 = [...]byte{
0x70, 0x82, 0x2c, 0xec, 0xb3, 0x27, 0xc0, 0xe5, 0xe4, 0x85, 0x57, 0x35, 0xea, 0x0c, 0xae, 0x41,
0x23, 0xef, 0x6b, 0x93, 0x45, 0x19, 0xa5, 0x21, 0xed, 0x0e, 0x4f, 0x4e, 0x1d, 0x65, 0x92, 0xbd,
0x86, 0xb8, 0xaf, 0x8f, 0x7c, 0xeb, 0x1f, 0xce, 0x3e, 0x30, 0xdc, 0x5f, 0x5e, 0xc5, 0x0b, 0x1a,
0xa6, 0xe1, 0x39, 0xca, 0xd5, 0x47, 0x5d, 0x3d, 0xd9, 0x01, 0x5a, 0xd6, 0x51, 0x56, 0x6c, 0x4d,
0x8b, 0x0d, 0x9a, 0x66, 0xfb, 0xcc, 0xb0, 0x2d, 0x74, 0x12, 0x2b, 0x20, 0xf0, 0xb1, 0x84, 0x99,
0xdf, 0x4c, 0xcb, 0xc2, 0x34, 0x7e, 0x76, 0x05, 0x6d, 0xb7, 0xa9, 0x31, 0xd1, 0x17, 0x04, 0xd7,
0x14, 0x58, 0x3a, 0x61, 0xde, 0x1b, 0x11, 0x1c, 0x32, 0x0f, 0x9c, 0x16, 0x53, 0x18, 0xf2, 0x22,
0xfe, 0x44, 0xcf, 0xb2, 0xc3, 0xb5, 0x7a, 0x91, 0x24, 0x08, 0xe8, 0xa8, 0x60, 0xfc, 0x69, 0x50,
0xaa, 0xd0, 0xa0, 0x7d, 0xa1, 0x89, 0x62, 0x97, 0x54, 0x5b, 0x1e, 0x95, 0xe0, 0xff, 0x64, 0xd2,
0x10, 0xc4, 0x00, 0x48, 0xa3, 0xf7, 0x75, 0xdb, 0x8a, 0x03, 0xe6, 0xda, 0x09, 0x3f, 0xdd, 0x94,
0x87, 0x5c, 0x83, 0x02, 0xcd, 0x4a, 0x90, 0x33, 0x73, 0x67, 0xf6, 0xf3, 0x9d, 0x7f, 0xbf, 0xe2,
0x52, 0x9b, 0xd8, 0x26, 0xc8, 0x37, 0xc6, 0x3b, 0x81, 0x96, 0x6f, 0x4b, 0x13, 0xbe, 0x63, 0x2e,
0xe9, 0x79, 0xa7, 0x8c, 0x9f, 0x6e, 0xbc, 0x8e, 0x29, 0xf5, 0xf9, 0xb6, 0x2f, 0xfd, 0xb4, 0x59,
0x78, 0x98, 0x06, 0x6a, 0xe7, 0x46, 0x71, 0xba, 0xd4, 0x25, 0xab, 0x42, 0x88, 0xa2, 0x8d, 0xfa,
0x72, 0x07, 0xb9, 0x55, 0xf8, 0xee, 0xac, 0x0a, 0x36, 0x49, 0x2a, 0x68, 0x3c, 0x38, 0xf1, 0xa4,
0x40, 0x28, 0xd3, 0x7b, 0xbb, 0xc9, 0x43, 0xc1, 0x15, 0xe3, 0xad, 0xf4, 0x77, 0xc7, 0x80, 0x9e,
}
var sbox2 [256]byte
var sbox3 [256]byte
var sbox4 [256]byte
var _ cipher.Block = &camelliaCipher{} | vendor/github.com/dgryski/go-camellia/camellia.go | 0.76533 | 0.511107 | camellia.go | starcoder |
package function
import (
"fmt"
"strconv"
"time"
"github.com/lestrrat-go/strftime"
"github.com/liquidata-inc/go-mysql-server/sql"
"github.com/liquidata-inc/go-mysql-server/sql/expression"
)
func panicIfErr(err error) {
if err != nil {
panic(err)
}
}
func monthNum(t time.Time) string {
return strconv.FormatInt(int64(t.Month()), 10)
}
func dayWithSuffix(t time.Time) string {
suffix := "th"
day := int64(t.Day())
if day < 4 || day > 20 {
switch day % 10 {
case 1:
suffix = "st"
case 2:
suffix = "nd"
case 3:
suffix = "rd"
}
}
return strconv.FormatInt(day, 10) + suffix
}
func dayOfMonth(t time.Time) string {
return strconv.FormatInt(int64(t.Day()), 10)
}
func microsecondsStr(t time.Time) string {
micros := t.Nanosecond() / int(time.Microsecond)
return fmt.Sprintf("%06d", micros)
}
func minutesStr(t time.Time) string {
return fmt.Sprintf("%02d", t.Minute())
}
func twelveHour(t time.Time) (int, string) {
ampm := "AM"
if t.Hour() >= 12 {
ampm = "PM"
}
hour := t.Hour() % 12
if hour == 0 {
hour = 12
}
return hour, ampm
}
func twelveHourPadded(t time.Time) string {
hour, _ := twelveHour(t)
return fmt.Sprintf("%02d", hour)
}
func twelveHourNoPadding(t time.Time) string {
hour, _ := twelveHour(t)
return fmt.Sprintf("%d", hour)
}
func twentyFourHourNoPadding(t time.Time) string {
return fmt.Sprintf("%d", t.Hour())
}
func fullMonthName(t time.Time) string {
return t.Month().String()
}
func ampmClockStr(t time.Time) string {
hour, ampm := twelveHour(t)
return fmt.Sprintf("%02d:%02d:%02d %s", hour, t.Minute(), t.Second(), ampm)
}
func secondsStr(t time.Time) string {
return fmt.Sprintf("%02d", t.Second())
}
func yearWeek(mode int32, t time.Time) (int32, int32) {
yw := YearWeek{expression.NewLiteral(t, sql.Datetime), expression.NewLiteral(mode, sql.Int32)}
res, _ := yw.Eval(nil, nil)
yr := res.(int32) / 100
wk := res.(int32) % 100
return yr, wk
}
func weekMode0(t time.Time) string {
yr, wk := yearWeek(0, t)
if yr < int32(t.Year()) {
wk = 0
} else if yr > int32(t.Year()) {
wk = 53
}
return fmt.Sprintf("%02d", wk)
}
func weekMode1(t time.Time) string {
yr, wk := yearWeek(1, t)
if yr < int32(t.Year()) {
wk = 0
} else if yr > int32(t.Year()) {
wk = 53
}
return fmt.Sprintf("%02d", wk)
}
func weekMode2(t time.Time) string {
_, wk := yearWeek(2, t)
return fmt.Sprintf("%02d", wk)
}
func weekMode3(t time.Time) string {
_, wk := yearWeek(3, t)
return fmt.Sprintf("%02d", wk)
}
func yearMode0(t time.Time) string {
yr, _ := yearWeek(0, t)
return strconv.FormatInt(int64(yr), 10)
}
func yearMode1(t time.Time) string {
yr, _ := yearWeek(1, t)
return strconv.FormatInt(int64(yr), 10)
}
func dayName(t time.Time) string {
return t.Weekday().String()
}
func yearTwoDigit(t time.Time) string {
return strconv.FormatInt(int64(t.Year())%100, 10)
}
type AppendFuncWrapper struct {
fn func(time.Time) string
}
func wrap(fn func(time.Time) string) strftime.Appender {
return AppendFuncWrapper{fn}
}
func (af AppendFuncWrapper) Append(bytes []byte, t time.Time) []byte {
s := af.fn(t)
return append(bytes, []byte(s)...)
}
var mysqlDateFormatSpec = strftime.NewSpecificationSet()
var specifierToFunc = map[byte]func(time.Time) string{
'a': nil,
'b': nil,
'c': monthNum,
'D': dayWithSuffix,
'd': nil,
'e': dayOfMonth,
'f': microsecondsStr,
'H': nil,
'h': twelveHourPadded,
'I': twelveHourPadded,
'i': minutesStr,
'j': nil,
'k': twentyFourHourNoPadding,
'l': twelveHourNoPadding,
'M': fullMonthName,
'm': nil,
'p': nil,
'r': ampmClockStr,
'S': nil,
's': secondsStr,
'T': nil,
'U': weekMode0,
'u': weekMode1,
'V': weekMode2,
'v': weekMode3,
'W': dayName,
'w': nil,
'X': yearMode0,
'x': yearMode1,
'Y': nil,
'y': yearTwoDigit,
}
func init() {
for specifier, fn := range specifierToFunc {
if fn != nil {
panicIfErr(mysqlDateFormatSpec.Set(specifier, wrap(fn)))
}
}
// replace any strftime specifiers that aren't supported
fn := func(b byte) {
if _, ok := specifierToFunc[b]; !ok {
panicIfErr(mysqlDateFormatSpec.Set(b, wrap(func(time.Time) string {
return string(b)
})))
}
}
capToLower := byte('a' - 'A')
for i := byte('A'); i <= 'Z'; i++ {
fn(i)
fn(i + capToLower)
}
}
func formatDate(format string, t time.Time) (string, error) {
formatter, err := strftime.New(format, strftime.WithSpecificationSet(mysqlDateFormatSpec))
if err != nil {
return "", err
}
return formatter.FormatString(t), nil
}
// DateFormat function returns a string representation of the date specified in the format specified
type DateFormat struct {
expression.BinaryExpression
}
// NewDateFormat returns a new DateFormat UDF
func NewDateFormat(ex, value sql.Expression) sql.Expression {
return &DateFormat{
expression.BinaryExpression{
Left: ex,
Right: value,
},
}
}
// Eval implements the Expression interface.
func (f *DateFormat) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
if f.Left == nil || f.Right == nil {
return nil, nil
}
left, err := f.Left.Eval(ctx, row)
if err != nil {
return nil, err
}
if left == nil {
return nil, nil
}
timeVal, err := sql.Datetime.Convert(left)
if err != nil {
return nil, err
}
t := timeVal.(time.Time)
right, err := f.Right.Eval(ctx, row)
if err != nil {
return nil, err
}
if right == nil {
return nil, nil
}
formatStr, ok := right.(string)
if !ok {
return nil, ErrInvalidArgument.New("DATE_FORMAT", "format must be a string")
}
return formatDate(formatStr, t)
}
// Type implements the Expression interface.
func (f *DateFormat) Type() sql.Type {
return sql.Text
}
// IsNullable implements the Expression interface.
func (f *DateFormat) IsNullable() bool {
if sql.IsNull(f.Left) {
if sql.IsNull(f.Right) {
return true
}
return f.Right.IsNullable()
}
return f.Left.IsNullable()
}
func (f *DateFormat) String() string {
return fmt.Sprintf("date_format(%s, %s)", f.Left, f.Right)
}
// WithChildren implements the Expression interface.
func (f *DateFormat) WithChildren(children ...sql.Expression) (sql.Expression, error) {
if len(children) != 2 {
return nil, sql.ErrInvalidChildrenNumber.New(f, len(children), 2)
}
return NewDateFormat(children[0], children[1]), nil
} | sql/expression/function/date_format.go | 0.671578 | 0.424591 | date_format.go | starcoder |
package spacecurves
// Morton2DEncode encodes a 2D x,y pair into a single value along
// a Z-order curve of the specified number of bits.
func Morton2DEncode(bits, x, y uint) uint {
var answer uint
s := uint(1)
for i := uint(0); i < bits; i++ {
answer |= (x & s) << i
answer |= (y & s) << (i + 1)
s <<= 1
}
return answer
}
// Morton3DEncode encodes a 3D x,y,z point into a single value along
// a Z-order curve of the specified number of bits.
func Morton3DEncode(bits, x, y, z uint) uint {
var answer uint
s := uint(1)
for i := uint(0); i < bits; i++ {
answer |= (x & s) << (2 * i)
answer |= (y & s) << (2*i + 1)
answer |= (z & s) << (2*i + 2)
s <<= 1
}
return answer
}
// Morton2DDecode decodes a value along a Z-order curve
// of the specified number of bits into a 2D x,y pair.
func Morton2DDecode(n, d uint) (uint, uint) {
var x, y uint
s := uint(1)
for i := uint(0); i < n; i++ {
x |= (d >> i) & s
y |= (d >> (i + 1)) & s
s <<= 1
}
return x, y
}
// Morton3DDecode decodes a value along a Z-order curve
// of the specified number of bits into a 3D x,y,z point.
func Morton3DDecode(n, d uint) (uint, uint, uint) {
var x, y, z uint
s := uint(1)
for i := uint(0); i < n; i++ {
x |= (d >> (i * 2)) & s
y |= (d >> (i*2 + 1)) & s
z |= (d >> (i*2 + 2)) & s
s <<= 1
}
return x, y, z
}
// MortonToHilbert2D transforms a 2D point along a Morton Z-order
// curve to a point along a Hilbert space-filling curve.
func MortonToHilbert2D(morton, bits uint) uint {
hilbert := uint(0)
remap := uint(0xb4)
block := bits << 1
for block != 0 {
block -= 2
mcode := (morton >> block) & 3
hcode := (remap >> (mcode << 1)) & 3
remap ^= 0x82000028 >> (hcode << 3)
hilbert = (hilbert << 2) + hcode
}
return hilbert
}
// HilbertToMorton2D transforms a 2D point along a Hilbert space-filling
// curve to a point along a Morton Z-order curve.
func HilbertToMorton2D(hilbert, bits uint) uint {
morton := uint(0)
remap := uint(0xb4)
block := bits << 1
for block != 0 {
block -= 2
hcode := (hilbert >> block) & 3
mcode := (remap >> (hcode << 1)) & 3
remap ^= 0x330000cc >> (hcode << 3)
morton = (morton << 2) + mcode
}
return morton
}
// MortonToHilbert3D transforms a 3D point along a Morton Z-order
// curve to a point along a Hilbert space-filling curve.
func MortonToHilbert3D(morton, bits uint) uint {
hilbert := morton
if bits > 1 {
block := (bits * 3) - 3
hcode := (hilbert >> block) & 7
shift := uint(0)
signs := uint(0)
for block != 0 {
block -= 3
hcode <<= 2
mcode := (uint(0x20212021) >> hcode) & 3
shift = (0x48 >> (7 - shift - mcode)) & 3
signs = (signs | (signs << 3)) >> mcode
signs = (signs ^ (0x53560300 >> hcode)) & 7
mcode = (hilbert >> block) & 7
hcode = mcode
hcode = ((hcode | (hcode << 3)) >> shift) & 7
hcode ^= signs
hilbert ^= (mcode ^ hcode) << block
}
}
hilbert ^= (hilbert >> 1) & 0x92492492
hilbert ^= (hilbert & 0x92492492) >> 1
return hilbert
}
// HilbertToMorton3D transforms a 3D point along a Hilbert space-filling
// curve to a point along a Morton Z-order curve.
func HilbertToMorton3D(hilbert, bits uint) uint {
morton := hilbert
morton ^= (morton & 0x92492492) >> 1
morton ^= (morton >> 1) & 0x92492492
if bits > 1 {
block := ((bits * 3) - 3)
hcode := ((morton >> block) & 7)
shift := uint(0)
signs := uint(0)
for block != 0 {
block -= 3
hcode <<= 2
mcode := (uint(0x20212021) >> hcode) & 3
shift = (0x48 >> (4 - shift + mcode)) & 3
signs = (signs | (signs << 3)) >> mcode
signs = (signs ^ (0x53560300 >> hcode)) & 7
hcode = (morton >> block) & 7
mcode = hcode
mcode ^= signs
mcode = ((mcode | (mcode << 3)) >> shift) & 7
morton ^= (hcode ^ mcode) << block
}
}
return morton
}
// Morton2DEncode5bit transforms a 2D point into a value along
// a 5-bit Morton space-filling curve. It is more optimal than
// the generic Morton2DEncode.
func Morton2DEncode5bit(x, y uint) uint {
x &= 0x0000001f
y &= 0x0000001f
x *= 0x01041041
y *= 0x01041041
x &= 0x10204081
y &= 0x10204081
x *= 0x00108421
y *= 0x00108421
x &= 0x15500000
y &= 0x15500000
return (x >> 20) | (y >> 19)
}
// Morton2DDecode5bit transforms a point along a 5-bit Morton
// space-filling curve into a 2D point. It is more efficient than
// the generic Morton2DDecode.
func Morton2DDecode5bit(morton uint) (uint, uint) {
value1 := morton
value2 := value1 >> 1
value1 &= 0x00000155
value2 &= 0x00000155
value1 |= value1 >> 1
value2 |= value2 >> 1
value1 &= 0x00000133
value2 &= 0x00000133
value1 |= value1 >> 2
value2 |= value2 >> 2
value1 &= 0x0000010f
value2 &= 0x0000010f
value1 |= value1 >> 4
value2 |= value2 >> 4
value1 &= 0x0000001f
value2 &= 0x0000001f
return value1, value2
}
// Morton2DEncode16bit transforms a 2D point into a value along
// a 16-bit Morton space-filling curve. It is more efficient than
// the generic Morton2DEncode.
func Morton2DEncode16bit(x, y uint) uint {
x &= 0x0000ffff
y &= 0x0000ffff
x |= x << 8
y |= y << 8
x &= 0x00ff00ff
y &= 0x00ff00ff
x |= x << 4
y |= y << 4
x &= 0x0f0f0f0f
y &= 0x0f0f0f0f
x |= x << 2
y |= y << 2
x &= 0x33333333
y &= 0x33333333
x |= x << 1
y |= y << 1
x &= 0x55555555
y &= 0x55555555
return x | (y << 1)
}
// Morton2DDecode16bit transforms a point along a 16-bit Morton
// space-filling curve into a 2D point. It is more efficient than
// the generic Morton2DDecode.
func Morton2DDecode16bit(morton uint) (uint, uint) {
value1 := morton
value2 := value1 >> 1
value1 &= 0x55555555
value2 &= 0x55555555
value1 |= value1 >> 1
value2 |= value2 >> 1
value1 &= 0x33333333
value2 &= 0x33333333
value1 |= value1 >> 2
value2 |= value2 >> 2
value1 &= 0x0f0f0f0f
value2 &= 0x0f0f0f0f
value1 |= value1 >> 4
value2 |= value2 >> 4
value1 &= 0x00ff00ff
value2 &= 0x00ff00ff
value1 |= value1 >> 8
value2 |= value2 >> 8
value1 &= 0x0000ffff
value2 &= 0x0000ffff
return value1, value2
}
// Morton3DEncode5bit transforms a 3D point into a value along
// a 5-bit Morton space-filling curve. It is more optimal than
// the generic Morton2DEncode.
func Morton3DEncode5bit(x, y, z uint) uint {
x &= 0x0000001f
y &= 0x0000001f
z &= 0x0000001f
x *= 0x01041041
y *= 0x01041041
z *= 0x01041041
x &= 0x10204081
y &= 0x10204081
z &= 0x10204081
x *= 0x00011111
y *= 0x00011111
z *= 0x00011111
x &= 0x12490000
y &= 0x12490000
z &= 0x12490000
return (x >> 16) | (y >> 15) | (z >> 14)
}
// Morton3DDecode5bit transforms a point along a 5-bit Morton
// space-filling curve into a 3D point. It is more efficient than
// the generic Morton2DDecode.
func Morton3DDecode5bit(morton uint) (uint, uint, uint) {
value1 := morton
value2 := value1 >> 1
value3 := value1 >> 2
value1 &= 0x00001249
value2 &= 0x00001249
value3 &= 0x00001249
value1 |= value1 >> 2
value2 |= value2 >> 2
value3 |= value3 >> 2
value1 &= 0x000010c3
value2 &= 0x000010c3
value3 &= 0x000010c3
value1 |= value1 >> 4
value2 |= value2 >> 4
value3 |= value3 >> 4
value1 &= 0x0000100f
value2 &= 0x0000100f
value3 &= 0x0000100f
value1 |= value1 >> 8
value2 |= value2 >> 8
value3 |= value3 >> 8
value1 &= 0x0000001f
value2 &= 0x0000001f
value3 &= 0x0000001f
return value1, value2, value3
}
// Morton3DEncode10bit transforms a 3D point into a value along
// a 10-bit Morton space-filling curve. It is more optimal than
// the generic Morton2DEncode.
func Morton3DEncode10bit(x, y, z uint) uint {
x &= 0x000003ff
y &= 0x000003ff
z &= 0x000003ff
x |= x << 16
y |= y << 16
z |= z << 16
x &= 0x030000ff
y &= 0x030000ff
z &= 0x030000ff
x |= x << 8
y |= y << 8
z |= z << 8
x &= 0x0300f00f
y &= 0x0300f00f
z &= 0x0300f00f
x |= x << 4
y |= y << 4
z |= z << 4
x &= 0x030c30c3
y &= 0x030c30c3
z &= 0x030c30c3
x |= x << 2
y |= y << 2
z |= z << 2
x &= 0x09249249
y &= 0x09249249
z &= 0x09249249
return x | (y << 1) | (z << 2)
}
// Morton3DDecode10bit transforms a point along a 10-bit Morton
// space-filling curve into a 3D point. It is more efficient than
// the generic Morton2DDecode.
func Morton3DDecode10bit(morton uint) (uint, uint, uint) {
value1 := morton
value2 := value1 >> 1
value3 := value1 >> 2
value1 &= 0x09249249
value2 &= 0x09249249
value3 &= 0x09249249
value1 |= value1 >> 2
value2 |= value2 >> 2
value3 |= value3 >> 2
value1 &= 0x030c30c3
value2 &= 0x030c30c3
value3 &= 0x030c30c3
value1 |= value1 >> 4
value2 |= value2 >> 4
value3 |= value3 >> 4
value1 &= 0x0300f00f
value2 &= 0x0300f00f
value3 &= 0x0300f00f
value1 |= value1 >> 8
value2 |= value2 >> 8
value3 |= value3 >> 8
value1 &= 0x030000ff
value2 &= 0x030000ff
value3 &= 0x030000ff
value1 |= value1 >> 16
value2 |= value2 >> 16
value3 |= value3 >> 16
value1 &= 0x000003ff
value2 &= 0x000003ff
value3 &= 0x000003ff
return value1, value2, value3
} | spacecurves/morton.go | 0.844409 | 0.614018 | morton.go | starcoder |
package day5
import (
"fmt"
"strings"
)
/*
You come across a field of hydrothermal vents on the ocean floor! These vents
constantly produce large, opaque clouds, so it would be best to avoid them if
possible.
They tend to form in lines; the submarine helpfully produces a list of nearby
lines of vents (your puzzle input) for you to review. For example:
0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
Each line of vents is given as a line segment in the format x1,y1 -> x2,y2 where
x1,y1 are the coordinates of one end the line segment and x2,y2 are the
coordinates of the other end. These line segments include the points at both
ends. In other words:
An entry like 1,1 -> 1,3 covers points 1,1, 1,2, and 1,3.
An entry like 9,7 -> 7,7 covers points 9,7, 8,7, and 7,7.
For now, only consider horizontal and vertical lines: lines where either x1 = x2
or y1 = y2.
So, the horizontal and vertical lines from the above list would produce the
following diagram:
.......1..
..1....1..
..1....1..
.......1..
.112111211
..........
..........
..........
..........
222111....
In this diagram, the top left corner is 0,0 and the bottom right corner is 9,9.
Each position is shown as the number of lines which cover that point or . if no
line covers that point. The top-left pair of 1s, for example, comes from 2,2 ->
2,1; the very bottom row is formed by the overlapping lines 0,9 -> 5,9 and 0,9
-> 2,9.
To avoid the most dangerous areas, you need to determine the number of points
where at least two lines overlap. In the above example, this is anywhere in the
diagram with a 2 or larger - a total of 5 points.
Consider only horizontal and vertical lines. At how many points do at least two
lines overlap?
*/
func Part1(path string) (int, error) {
pred := func(v vent) bool {
return v.IsHorizontal || v.IsVertical
}
return countOverlaps(path, pred)
}
func countOverlaps(path string, predicate func(vent) bool) (int, error) {
vents, err := parseVents(path)
if err != nil {
return 0, err
}
vents = filterVent(vents, predicate)
tracks := findTracks(vents)
//fmt.Println(tracks)
overlaps := 0
for _, v := range tracks {
if v > 1 {
overlaps++
}
}
return overlaps, nil
}
type tracks map[point]int
func findTracks(vents []vent) tracks {
tracks := make(tracks)
for _, vent := range vents {
for _, point := range vent.Path() {
tracks[point]++
}
}
return tracks
}
func (t tracks) String() string {
maxX, maxY := 0, 0
var builder strings.Builder
for p := range t {
if p.x > maxX {
maxX = p.x
}
if p.y > maxY {
maxY = p.y
}
}
for y := 0; y <= maxY; y++ {
for x := 0; x <= maxX; x++ {
p := NewPoint(x, y)
count := t[p]
if count == 0 {
builder.WriteString(".")
} else {
builder.WriteString(fmt.Sprintf("%v", count))
}
}
builder.WriteString("\n")
}
return builder.String()
} | day5/part1.go | 0.739234 | 0.482795 | part1.go | starcoder |
package stripe
import (
"context"
"github.com/stripe/stripe-go"
"github.com/turbot/steampipe-plugin-sdk/grpc/proto"
"github.com/turbot/steampipe-plugin-sdk/plugin"
"github.com/turbot/steampipe-plugin-sdk/plugin/transform"
)
func tableStripePlan(ctx context.Context) *plugin.Table {
return &plugin.Table{
Name: "stripe_plan",
Description: "Plans define the base price, currency, and billing cycle for recurring purchases of products.",
List: &plugin.ListConfig{
Hydrate: listPlan,
KeyColumns: []*plugin.KeyColumn{
{Name: "active", Operators: []string{"=", "<>"}, Require: plugin.Optional},
{Name: "created", Operators: []string{">", ">=", "=", "<", "<="}, Require: plugin.Optional},
{Name: "product_id", Require: plugin.Optional},
},
},
Get: &plugin.GetConfig{
Hydrate: getPlan,
KeyColumns: plugin.SingleColumn("id"),
},
Columns: []*plugin.Column{
// Top columns
{Name: "id", Type: proto.ColumnType_STRING, Description: "Unique identifier for the plan."},
{Name: "nickname", Type: proto.ColumnType_STRING, Description: "A brief description of the plan, hidden from customers."},
// Other columns
{Name: "active", Type: proto.ColumnType_BOOL, Description: "Whether the plan is currently available for purchase."},
{Name: "aggregate_usage", Type: proto.ColumnType_STRING, Description: "Specifies a usage aggregation strategy for plans of usage_type=metered. Allowed values are sum for summing up all usage during a period, last_during_period for using the last usage record reported within a period, last_ever for using the last usage record ever (across period bounds) or max which uses the usage record with the maximum reported usage during a period. Defaults to sum."},
{Name: "amount", Type: proto.ColumnType_INT, Transform: transform.FromField("Amount"), Description: "The unit amount in cents to be charged, represented as a whole integer if possible. Only set if billing_scheme=per_unit."},
{Name: "amount_decimal", Type: proto.ColumnType_DOUBLE, Transform: transform.FromField("AmountDecimal"), Description: "The unit amount in cents to be charged, represented as a decimal string with at most 12 decimal places. Only set if billing_scheme=per_unit."},
{Name: "billing_scheme", Type: proto.ColumnType_STRING, Description: "Describes how to compute the price per period. Either per_unit or tiered."},
{Name: "created", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("Created").Transform(transform.UnixToTimestamp), Description: "Time at which the plan was created."},
{Name: "currency", Type: proto.ColumnType_STRING, Description: "Three-letter ISO currency code, in lowercase. Must be a supported currency."},
{Name: "deleted", Type: proto.ColumnType_BOOL, Description: "True if the plan is marked as deleted."},
{Name: "interval", Type: proto.ColumnType_STRING, Description: "The frequency at which a subscription is billed. One of day, week, month or year."},
{Name: "interval_count", Type: proto.ColumnType_INT, Transform: transform.FromField("IntervalCount"), Description: "The number of intervals (specified in the interval attribute) between subscription billings. For example, interval=month and interval_count=3 bills every 3 months."},
{Name: "livemode", Type: proto.ColumnType_BOOL, Description: "Has the value true if the plan exists in live mode or the value false if the plan exists in test mode."},
{Name: "metadata", Type: proto.ColumnType_JSON, Description: "Set of key-value pairs that you can attach to an plan. This can be useful for storing additional information about the plan in a structured format."},
{Name: "product_id", Type: proto.ColumnType_STRING, Transform: transform.FromField("Product.ID"), Description: "ID of the product whose pricing this plan determines."},
{Name: "tiers", Type: proto.ColumnType_JSON, Description: "Each element represents a pricing tier. This parameter requires billing_scheme to be set to tiered."},
{Name: "tiers_mode", Type: proto.ColumnType_STRING, Description: "Defines if the tiering price should be graduated or volume based. In volume-based tiering, the maximum quantity within a period determines the per unit price. In graduated tiering, pricing can change as the quantity grows."},
{Name: "transform_usage", Type: proto.ColumnType_JSON, Description: "Apply a transformation to the reported usage or set quantity before computing the amount billed."},
{Name: "trial_period_days", Type: proto.ColumnType_INT, Transform: transform.FromField("TrialPeriodDays"), Description: "Default number of trial days when subscribing a customer to this plan using trial_from_plan=true."},
{Name: "usage_type", Type: proto.ColumnType_STRING, Description: "Configures how the quantity per period should be determined. Can be either metered or licensed. licensed automatically bills the quantity set when adding it to a subscription. metered aggregates the total usage based on usage records. Defaults to licensed."},
},
}
}
func listPlan(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
conn, err := connect(ctx, d)
if err != nil {
plugin.Logger(ctx).Error("stripe_plan.listPlan", "connection_error", err)
return nil, err
}
params := &stripe.PlanListParams{
ListParams: stripe.ListParams{
Context: ctx,
Limit: stripe.Int64(100),
},
}
equalQuals := d.KeyColumnQuals
if equalQuals["product_id"] != nil {
params.Product = stripe.String(equalQuals["product_id"].GetStringValue())
}
// Comparison values
quals := d.Quals
if quals["active"] != nil {
for _, q := range quals["active"].Quals {
switch q.Operator {
case "=":
params.Active = stripe.Bool(q.Value.GetBoolValue())
case "<>":
params.Active = stripe.Bool(!q.Value.GetBoolValue())
}
}
}
if quals["created"] != nil {
for _, q := range quals["created"].Quals {
tsSecs := q.Value.GetTimestampValue().GetSeconds()
switch q.Operator {
case ">":
if params.CreatedRange == nil {
params.CreatedRange = &stripe.RangeQueryParams{}
}
params.CreatedRange.GreaterThan = tsSecs
case ">=":
if params.CreatedRange == nil {
params.CreatedRange = &stripe.RangeQueryParams{}
}
params.CreatedRange.GreaterThanOrEqual = tsSecs
case "=":
params.Created = stripe.Int64(tsSecs)
case "<=":
if params.CreatedRange == nil {
params.CreatedRange = &stripe.RangeQueryParams{}
}
params.CreatedRange.LesserThanOrEqual = tsSecs
case "<":
if params.CreatedRange == nil {
params.CreatedRange = &stripe.RangeQueryParams{}
}
params.CreatedRange.LesserThan = tsSecs
}
}
}
limit := d.QueryContext.Limit
if d.QueryContext.Limit != nil {
if *limit < *params.ListParams.Limit {
params.ListParams.Limit = limit
}
}
var count int64
i := conn.Plans.List(params)
for i.Next() {
d.StreamListItem(ctx, i.Plan())
count++
if limit != nil {
if count >= *limit {
break
}
}
}
if err := i.Err(); err != nil {
plugin.Logger(ctx).Error("stripe_plan.listPlan", "query_error", err, "params", params, "i", i)
return nil, err
}
return nil, nil
}
func getPlan(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
conn, err := connect(ctx, d)
if err != nil {
plugin.Logger(ctx).Error("stripe_plan.getPlan", "connection_error", err)
return nil, err
}
quals := d.KeyColumnQuals
id := quals["id"].GetStringValue()
item, err := conn.Plans.Get(id, &stripe.PlanParams{})
if err != nil {
plugin.Logger(ctx).Error("stripe_plan.getPlan", "query_error", err, "id", id)
return nil, err
}
return item, nil
} | stripe/table_stripe_plan.go | 0.715325 | 0.420659 | table_stripe_plan.go | starcoder |
package primitives
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/prysmaticlabs/go-ssz"
"github.com/phoreproject/synapse/beacon/config"
"github.com/phoreproject/synapse/bls"
"github.com/phoreproject/synapse/chainhash"
)
// ValidateAttestation checks if the attestation is valid.
func (s *State) ValidateAttestation(att Attestation, verifySignature bool, c *config.Config) error {
if att.Data.TargetEpoch == s.EpochIndex {
if att.Data.SourceEpoch != s.JustifiedEpoch {
return fmt.Errorf("expected source epoch to equal the justified epoch if the target epoch is the current epoch (expected: %d, got %d)", s.JustifiedEpoch, att.Data.SourceEpoch)
}
justifiedHash, err := s.GetRecentBlockHash(s.JustifiedEpoch*c.EpochLength, c)
if err != nil {
return err
}
if !att.Data.SourceHash.IsEqual(justifiedHash) {
return fmt.Errorf("expected source hash to equal the current epoch hash if the target epoch is the current epoch (expected: %s, got %s)", justifiedHash, att.Data.TargetHash)
}
if !att.Data.LatestCrosslinkHash.IsEqual(&s.LatestCrosslinks[att.Data.Shard].ShardBlockHash) {
return fmt.Errorf("expected latest crosslink hash to match if the target epoch is the current epoch (expected: %s, got %s)",
s.LatestCrosslinks[att.Data.Shard].ShardBlockHash,
att.Data.LatestCrosslinkHash)
}
} else if att.Data.TargetEpoch == s.EpochIndex-1 {
if att.Data.SourceEpoch != s.PreviousJustifiedEpoch {
return fmt.Errorf("expected source epoch to equal the previous justified epoch if the target epoch is the previous epoch (expected: %d, got %d)", s.PreviousJustifiedEpoch, att.Data.SourceEpoch)
}
previousJustifiedHash, err := s.GetRecentBlockHash(s.PreviousJustifiedEpoch*c.EpochLength, c)
if err != nil {
return err
}
if !att.Data.SourceHash.IsEqual(previousJustifiedHash) {
return fmt.Errorf("expected source hash to equal the previous justified hash if the target epoch is the previous epoch (expected: %s, got %s)", previousJustifiedHash, att.Data.TargetHash)
}
if !att.Data.LatestCrosslinkHash.IsEqual(&s.PreviousCrosslinks[att.Data.Shard].ShardBlockHash) {
return fmt.Errorf("expected latest crosslink hash to match if the target epoch is the previous epoch(expected: %s, got %s)",
s.PreviousCrosslinks[att.Data.Shard].ShardBlockHash,
att.Data.LatestCrosslinkHash)
}
} else {
return fmt.Errorf("attestation should have target epoch of either the current epoch (%d) or the previous epoch (%d) but got %d", s.EpochIndex, s.EpochIndex-1, att.Data.TargetEpoch)
}
if len(s.LatestCrosslinks) <= int(att.Data.Shard) {
return errors.New("invalid shard number")
}
if verifySignature {
participants, err := s.GetAttestationParticipants(att.Data, att.ParticipationBitfield, c)
if err != nil {
return err
}
dataRoot, err := ssz.HashTreeRoot(AttestationDataAndCustodyBit{Data: att.Data, PoCBit: false})
if err != nil {
return err
}
groupPublicKey := bls.NewAggregatePublicKey()
for _, p := range participants {
pub, err := s.ValidatorRegistry[p].GetPublicKey()
if err != nil {
return err
}
groupPublicKey.AggregatePubKey(pub)
}
aggSig, err := bls.DeserializeSignature(att.AggregateSig)
if err != nil {
return err
}
valid, err := bls.VerifySig(groupPublicKey, dataRoot[:], aggSig, GetDomain(s.ForkData, att.Data.Slot, bls.DomainAttestation))
if err != nil {
return err
}
if !valid {
return fmt.Errorf("attestation signature is invalid. expected committee with members: %v for slot %d shard %d", participants, att.Data.Slot, att.Data.Shard)
}
}
return nil
}
// applyAttestation verifies and applies an attestation to the given state.
func (s *State) applyAttestation(att Attestation, c *config.Config, verifySignature bool, proposerIndex uint32) error {
err := s.ValidateAttestation(att, verifySignature, c)
if err != nil {
return err
}
// these checks are dependent on when the attestation is included
if att.Data.Slot+c.MinAttestationInclusionDelay > s.Slot {
return fmt.Errorf("attestation included too soon (expected s.Slot > %d, got %d)", att.Data.Slot+c.MinAttestationInclusionDelay, s.Slot)
}
// 4 -> 8 should not work
// 5 -> 8 should work
if att.Data.Slot+c.EpochLength <= s.Slot {
return errors.New("attestation was not included within 1 epoch")
}
if (att.Data.Slot-1)/c.EpochLength != att.Data.TargetEpoch {
return errors.New("attestation slot did not match target epoch")
}
if att.Data.TargetEpoch == s.EpochIndex {
s.CurrentEpochAttestations = append(s.CurrentEpochAttestations, PendingAttestation{
Data: att.Data,
ParticipationBitfield: att.ParticipationBitfield,
CustodyBitfield: att.CustodyBitfield,
InclusionDelay: s.Slot - att.Data.Slot,
ProposerIndex: proposerIndex,
})
} else {
s.PreviousEpochAttestations = append(s.PreviousEpochAttestations, PendingAttestation{
Data: att.Data,
ParticipationBitfield: att.ParticipationBitfield,
CustodyBitfield: att.CustodyBitfield,
InclusionDelay: s.Slot - att.Data.Slot,
ProposerIndex: proposerIndex,
})
}
return nil
}
func (s *State) validateParticipationSignature(voteHash chainhash.Hash, participation []uint8, signature [48]byte) error {
aggregatedPublicKey := bls.NewAggregatePublicKey()
if len(participation) != (len(s.ValidatorRegistry)+7)/8 {
return errors.New("vote participation array incorrect length")
}
for i := 0; i < len(s.ValidatorRegistry); i++ {
voted := uint8(participation[i/8])&(1<<uint(i%8)) != 0
if voted {
pk, err := s.ValidatorRegistry[i].GetPublicKey()
if err != nil {
return err
}
aggregatedPublicKey.AggregatePubKey(pk)
}
}
sig, err := bls.DeserializeSignature(signature)
if err != nil {
return err
}
valid, err := bls.VerifySig(aggregatedPublicKey, voteHash[:], sig, bls.DomainVote)
if err != nil {
return err
}
if !valid {
return errors.New("vote signature did not validate")
}
return nil
}
// applyVote validates a vote and adds it to pending votes.
func (s *State) applyVote(vote AggregatedVote, config *config.Config) error {
voteHash, err := ssz.HashTreeRoot(vote.Data)
if err != nil {
return err
}
for i, proposal := range s.Proposals {
proposalHash, err := ssz.HashTreeRoot(proposal.Data)
if err != nil {
return err
}
// proposal is already active
if bytes.Equal(proposalHash[:], voteHash[:]) {
// ignore if already queued
if proposal.Queued {
return nil
}
err := s.validateParticipationSignature(voteHash, vote.Participation, vote.Signature)
if err != nil {
return err
}
needed := len(vote.Participation) - len(s.Proposals[i].Participation)
if needed > 0 {
s.Proposals[i].Participation = append(s.Proposals[i].Participation, make([]uint8, needed)...)
}
// update the proposal
for j := range vote.Participation {
s.Proposals[i].Participation[j] |= vote.Participation[j]
}
return nil
}
}
proposerBitSet := vote.Participation[vote.Data.Proposer/8] & (1 << uint(vote.Data.Proposer%8))
if proposerBitSet == 0 {
return errors.New("could not process vote with proposer bit not set")
}
if vote.Data.Type == Cancel {
foundProposalToCancel := false
for _, proposal := range s.Proposals {
proposalHash, err := ssz.HashTreeRoot(proposal.Data)
if err != nil {
return err
}
if bytes.Equal(vote.Data.ActionHash[:], proposalHash[:]) {
foundProposalToCancel = true
}
}
if !foundProposalToCancel {
return errors.New("could not find proposal to cancel")
}
}
err = s.validateParticipationSignature(voteHash, vote.Participation, vote.Signature)
if err != nil {
return err
}
s.ValidatorBalances[vote.Data.Proposer] -= config.ProposalCost
s.Proposals = append(s.Proposals, ActiveProposal{
Data: vote.Data,
Participation: vote.Participation,
StartEpoch: s.EpochIndex,
Queued: false,
})
return nil
}
// ProcessBlock tries to apply a block to the state.
func (s *State) ProcessBlock(block *Block, con *config.Config, view BlockView, verifySignature bool) error {
proposerIndex, err := s.GetBeaconProposerIndex(block.BlockHeader.SlotNumber-1, con)
if err != nil {
return err
}
if block.BlockHeader.SlotNumber != s.Slot {
return fmt.Errorf("block has incorrect slot number (expecting: %d, got: %d)", s.Slot, block.BlockHeader.SlotNumber)
}
if block.BlockHeader.ValidatorIndex != proposerIndex {
return fmt.Errorf("proposer index doesn't match (expected: %d, got %d)", proposerIndex, block.BlockHeader.ValidatorIndex)
}
blockWithoutSignature := block.Copy()
blockWithoutSignature.BlockHeader.Signature = bls.EmptySignature.Serialize()
blockWithoutSignatureRoot, err := ssz.HashTreeRoot(blockWithoutSignature)
if err != nil {
return err
}
proposal := ProposalSignedData{
Slot: s.Slot,
Shard: con.BeaconShardNumber,
BlockHash: blockWithoutSignatureRoot,
}
proposalRoot, err := ssz.HashTreeRoot(proposal)
if err != nil {
return err
}
proposerPub, err := s.ValidatorRegistry[proposerIndex].GetPublicKey()
if err != nil {
return err
}
proposerSig, err := bls.DeserializeSignature(block.BlockHeader.Signature)
if err != nil {
return err
}
// process block and randao verifications concurrently
if verifySignature {
verificationResult := make(chan error)
go func() {
valid, err := bls.VerifySig(proposerPub, proposalRoot[:], proposerSig, bls.DomainProposal)
if err != nil {
verificationResult <- err
}
if !valid {
verificationResult <- fmt.Errorf("block had invalid signature (expected signature from validator %d)", proposerIndex)
}
verificationResult <- nil
}()
var slotBytes [8]byte
binary.BigEndian.PutUint64(slotBytes[:], block.BlockHeader.SlotNumber)
slotBytesHash := chainhash.HashH(slotBytes[:])
randaoSig, err := bls.DeserializeSignature(block.BlockHeader.RandaoReveal)
if err != nil {
return err
}
go func() {
valid, err := bls.VerifySig(proposerPub, slotBytesHash[:], randaoSig, bls.DomainRandao)
if err != nil {
verificationResult <- err
}
if !valid {
verificationResult <- errors.New("block has invalid randao signature")
}
verificationResult <- nil
}()
result1 := <-verificationResult
result2 := <-verificationResult
if result1 != nil {
return result1
}
if result2 != nil {
return result2
}
}
randaoRevealSerialized, err := ssz.HashTreeRoot(block.BlockHeader.RandaoReveal)
if err != nil {
return err
}
for i := range s.NextRandaoMix {
s.NextRandaoMix[i] ^= randaoRevealSerialized[i]
}
if len(block.BlockBody.ProposerSlashings) > con.MaxProposerSlashings {
return errors.New("more than maximum proposer slashings")
}
if len(block.BlockBody.CasperSlashings) > con.MaxCasperSlashings {
return errors.New("more than maximum casper slashings")
}
if len(block.BlockBody.Attestations) > con.MaxAttestations {
return errors.New("more than maximum attestations")
}
if len(block.BlockBody.Exits) > con.MaxExits {
return errors.New("more than maximum exits")
}
if len(block.BlockBody.Deposits) > con.MaxDeposits {
return errors.New("more than maximum deposits")
}
if len(block.BlockBody.Votes) > con.MaxVotes {
return errors.New("more than maximum votes")
}
for _, slashing := range block.BlockBody.ProposerSlashings {
err := s.applyProposerSlashing(slashing, con)
if err != nil {
return err
}
}
for _, c := range block.BlockBody.CasperSlashings {
err := s.applyCasperSlashing(c, con)
if err != nil {
return err
}
}
for _, a := range block.BlockBody.Attestations {
err := s.applyAttestation(a, con, verifySignature, proposerIndex)
if err != nil {
return err
}
}
// process deposits here
for _, e := range block.BlockBody.Exits {
err := s.ApplyExit(e, con)
if err != nil {
return err
}
}
for _, v := range block.BlockBody.Votes {
err := s.applyVote(v, con)
if err != nil {
return err
}
}
// Check state root.
expectedStateRoot, err := view.GetLastStateRoot()
if err != nil {
return err
}
if !block.BlockHeader.StateRoot.IsEqual(&expectedStateRoot) {
return fmt.Errorf("state root doesn't match (expected: %s, got: %s)", expectedStateRoot, block.BlockHeader.StateRoot)
}
return nil
} | primitives/blocktransition.go | 0.722625 | 0.455017 | blocktransition.go | starcoder |
// Package area provides functions working with image areas.
package area
import (
"fmt"
"image"
"github.com/mum4k/termdash/internal/numbers"
)
// Size returns the size of the provided area.
func Size(area image.Rectangle) image.Point {
return image.Point{
area.Dx(),
area.Dy(),
}
}
// FromSize returns the corresponding area for the provided size.
func FromSize(size image.Point) (image.Rectangle, error) {
if size.X < 0 || size.Y < 0 {
return image.Rectangle{}, fmt.Errorf("cannot convert zero or negative size to an area, got: %+v", size)
}
return image.Rect(0, 0, size.X, size.Y), nil
}
// HSplit returns two new areas created by splitting the provided area at the
// specified percentage of its width. The percentage must be in the range
// 0 <= heightPerc <= 100.
// Can return zero size areas.
func HSplit(area image.Rectangle, heightPerc int) (top image.Rectangle, bottom image.Rectangle, err error) {
if min, max := 0, 100; heightPerc < min || heightPerc > max {
return image.ZR, image.ZR, fmt.Errorf("invalid heightPerc %d, must be in range %d <= heightPerc <= %d", heightPerc, min, max)
}
height := area.Dy() * heightPerc / 100
top = image.Rect(area.Min.X, area.Min.Y, area.Max.X, area.Min.Y+height)
if top.Dy() == 0 {
top = image.ZR
}
bottom = image.Rect(area.Min.X, area.Min.Y+height, area.Max.X, area.Max.Y)
if bottom.Dy() == 0 {
bottom = image.ZR
}
return top, bottom, nil
}
// VSplit returns two new areas created by splitting the provided area at the
// specified percentage of its width. The percentage must be in the range
// 0 <= widthPerc <= 100.
// Can return zero size areas.
func VSplit(area image.Rectangle, widthPerc int) (left image.Rectangle, right image.Rectangle, err error) {
if min, max := 0, 100; widthPerc < min || widthPerc > max {
return image.ZR, image.ZR, fmt.Errorf("invalid widthPerc %d, must be in range %d <= widthPerc <= %d", widthPerc, min, max)
}
width := area.Dx() * widthPerc / 100
left = image.Rect(area.Min.X, area.Min.Y, area.Min.X+width, area.Max.Y)
if left.Dx() == 0 {
left = image.ZR
}
right = image.Rect(area.Min.X+width, area.Min.Y, area.Max.X, area.Max.Y)
if right.Dx() == 0 {
right = image.ZR
}
return left, right, nil
}
// ExcludeBorder returns a new area created by subtracting a border around the
// provided area. Return the zero area if there isn't enough space to exclude
// the border.
func ExcludeBorder(area image.Rectangle) image.Rectangle {
// If the area dimensions are smaller than this, subtracting a point for the
// border on each of its sides results in a zero area.
const minDim = 2
if area.Dx() < minDim || area.Dy() < minDim {
return image.ZR
}
return image.Rect(
numbers.Abs(area.Min.X+1),
numbers.Abs(area.Min.Y+1),
numbers.Abs(area.Max.X-1),
numbers.Abs(area.Max.Y-1),
)
}
// WithRatio returns the largest area that has the requested ratio but is
// either equal or smaller than the provided area. Returns zero area if the
// area or the ratio are zero, or if there is no such area.
func WithRatio(area image.Rectangle, ratio image.Point) image.Rectangle {
ratio = numbers.SimplifyRatio(ratio)
if area == image.ZR || ratio == image.ZP {
return image.ZR
}
wFact := area.Dx() / ratio.X
hFact := area.Dy() / ratio.Y
var fact int
if wFact < hFact {
fact = wFact
} else {
fact = hFact
}
return image.Rect(
area.Min.X,
area.Min.Y,
ratio.X*fact+area.Min.X,
ratio.Y*fact+area.Min.Y,
)
}
// Shrink returns a new area whose size is reduced by the specified amount of
// cells. Can return a zero area if there is no space left in the area.
// The values must be zero or positive integers.
func Shrink(area image.Rectangle, topCells, rightCells, bottomCells, leftCells int) (image.Rectangle, error) {
for _, v := range []struct {
name string
value int
}{
{"topCells", topCells},
{"rightCells", rightCells},
{"bottomCells", bottomCells},
{"leftCells", leftCells},
} {
if min := 0; v.value < min {
return image.ZR, fmt.Errorf("invalid %s(%d), must be in range %d <= value", v.name, v.value, min)
}
}
shrinked := area
shrinked.Min.X, _ = numbers.MinMaxInts([]int{shrinked.Min.X + leftCells, shrinked.Max.X})
_, shrinked.Max.X = numbers.MinMaxInts([]int{shrinked.Max.X - rightCells, shrinked.Min.X})
shrinked.Min.Y, _ = numbers.MinMaxInts([]int{shrinked.Min.Y + topCells, shrinked.Max.Y})
_, shrinked.Max.Y = numbers.MinMaxInts([]int{shrinked.Max.Y - bottomCells, shrinked.Min.Y})
if shrinked.Dx() == 0 || shrinked.Dy() == 0 {
return image.ZR, nil
}
return shrinked, nil
}
// ShrinkPercent returns a new area whose size is reduced by percentage of its
// width or height. Can return a zero area if there is no space left in the area.
// The topPerc and bottomPerc indicate the percentage of area's height.
// The rightPerc and leftPerc indicate the percentage of area's width.
// The percentages must be in range 0 <= v <= 100.
func ShrinkPercent(area image.Rectangle, topPerc, rightPerc, bottomPerc, leftPerc int) (image.Rectangle, error) {
for _, v := range []struct {
name string
value int
}{
{"topPerc", topPerc},
{"rightPerc", rightPerc},
{"bottomPerc", bottomPerc},
{"leftPerc", leftPerc},
} {
if min, max := 0, 100; v.value < min || v.value > max {
return image.ZR, fmt.Errorf("invalid %s(%d), must be in range %d <= value <= %d", v.name, v.value, min, max)
}
}
top := area.Dy() * topPerc / 100
bottom := area.Dy() * bottomPerc / 100
right := area.Dx() * rightPerc / 100
left := area.Dx() * leftPerc / 100
return Shrink(area, top, right, bottom, left)
} | internal/area/area.go | 0.895374 | 0.756627 | area.go | starcoder |
package messages
import (
util "github.com/IBM/ibmcloud-volume-interface/lib/utils"
)
// messagesEn ...
var messagesEn = map[string]util.Message{
"AuthenticationFailed": {
Code: AuthenticationFailed,
Description: "Failed to authenticate the user.",
Type: util.Unauthenticated,
RC: 400,
Action: "Verify that you entered the correct IBM Cloud user name and password. If the error persists, the authentication service might be unavailable. Wait a few minutes and try again. ",
},
"ErrorRequiredFieldMissing": {
Code: "ErrorRequiredFieldMissing",
Description: "[%s] is required to complete the operation.",
Type: util.InvalidRequest,
RC: 400,
Action: "Review the error that is returned. Provide the missing information in your request and try again. ",
},
"FailedToPlaceOrder": {
Code: "FailedToPlaceOrder",
Description: "Failed to create volume with the storage provider",
Type: util.ProvisioningFailed,
RC: 500,
Action: "Review the error that is returned. If the volume creation service is currently unavailable, try to manually create the volume with the 'ibmcloud is volume-create' command.",
},
"FailedToDeleteVolume": {
Code: "FailedToDeleteVolume",
Description: "The volume ID '%d' could not be deleted from your VPC.",
Type: util.DeletionFailed,
RC: 500,
Action: "Verify that the volume ID exists. Run 'ibmcloud is volumes' to list available volumes in your account. If the ID is correct, try to delete the volume with the 'ibmcloud is volume-delete' command. ",
},
"FailedToUpdateVolume": {
Code: "FailedToUpdateVolume",
Description: "The volume ID '%d' could not be updated",
Type: util.UpdateFailed,
RC: 500,
Action: "Verify that the volume ID exists. Run 'ibmcloud is volumes' to list available volumes in your account.",
},
"StorageFindFailedWithVolumeId": {
Code: "StorageFindFailedWithVolumeId",
Description: "A volume with the specified volume ID '%s' could not be found.",
Type: util.RetrivalFailed,
RC: 404,
Action: "Verify that the volume ID exists. Run 'ibmcloud is volumes' to list available volumes in your account.",
},
"StorageFindFailedWithVolumeName": {
Code: "StorageFindFailedWithVolumeName",
Description: "A volume with the specified volume name '%s' does not exist.",
Type: util.RetrivalFailed,
RC: 404,
Action: "Verify that the specified volume exists. Run 'ibmcloud is volumes' to list available volumes in your account.",
},
"AccessPointWithAPIDFindFailed": {
Code: AccessPointWithAPIDFindFailed,
Description: "No volume access point could be found for the specified volume ID '%s' and access point ID '%s'",
Type: util.VolumeAccessPointFindFailed,
RC: 400,
Action: "Verify that a volume access point for your volume exists.Check if volume ID and access point ID is valid",
},
"AccessPointWithVPCIDFindFailed": {
Code: AccessPointWithVPCIDFindFailed,
Description: "No volume access point could be found for the specified volume ID '%s' and VPC ID %s",
Type: util.VolumeAccessPointFindFailed,
RC: 400,
Action: "Verify that a volume access point for your volume exists.Check if volume ID and VPC ID is valid",
},
"CreateVolumeAccessPointFailed": {
Code: CreateVolumeAccessPointFailed,
Description: "The volume ID '%s' could not create access point for VPC ID %s.",
Type: util.CreateVolumeAccessPointFailed,
RC: 500,
Action: "Verify that the volume ID and VPC ID exist.",
},
"CreateVolumeAccessPointTimedOut": {
Code: CreateVolumeAccessPointTimedOut,
Description: "The volume ID '%s' could not create access point ID '%s'.",
Type: util.CreateVolumeAccessPointFailed,
RC: 500,
Action: "Verify that the volume ID exists.",
},
"DeleteVolumeAccessPointFailed": {
Code: DeleteVolumeAccessPointFailed,
Description: "The volumd ID '%s' could not delete access point ID '%s'.",
Type: util.DeleteVolumeAccessPointFailed,
RC: 500,
Action: "Verify that the specified Volume ID has active volume access points.",
},
"DeleteVolumeAccessPointTimedOut": {
Code: DeleteVolumeAccessPointTimedOut,
Description: "The volume ID '%s' could not be delete access point ID '%s'",
Type: util.DeleteVolumeAccessPointFailed,
RC: 500,
Action: "Verify that the specified volume ID has active volume access points.",
},
"InvalidVolumeID": {
Code: "InvalidVolumeID",
Description: "The specified volume ID '%s' is not valid.",
Type: util.InvalidRequest,
RC: 400,
Action: "Verify that the volume ID exists. Run 'ibmcloud is volumes' to list available volumes in your account.",
},
"InvalidVolumeName": {
Code: "InvalidVolumeName",
Description: "The specified volume name '%s' is not valid. ",
Type: util.InvalidRequest,
RC: 400,
Action: "Verify that the volume name exists. Run 'ibmcloud is volumes' to list available volumes in your account.",
},
"VolumeCapacityInvalid": {
Code: "VolumeCapacityInvalid",
Description: "The specified volume capacity '%d' is not valid. ",
Type: util.InvalidRequest,
RC: 400,
Action: "Verify the specified volume capacity. The volume capacity must be a positive number between 10 GB and maximum allowed value for the respective storage profile. Refer IBM Cloud File Storage for VPC documentation https://cloud.ibm.com/docs/vpc?topic=vpc-file-storage-profiles.",
},
"EmptyResourceGroup": {
Code: "EmptyResourceGroup",
Description: "Resource group information could not be found.",
Type: util.InvalidRequest,
RC: 400,
Action: "Provide the name or ID of the resource group that you want to use for your volume. Run 'ibmcloud resource groups' to list the resource groups that you have access to. ",
},
"EmptyResourceGroupIDandName": {
Code: "EmptyResourceGroupIDandName",
Description: "Resource group ID or name could not be found.",
Type: util.InvalidRequest,
RC: 400,
Action: "Provide the name or ID of the resource group that you want to use for your volume. Run 'ibmcloud resource groups' to list the resource groups that you have access to.",
},
"VolumeNotInValidState": {
Code: "VolumeNotInValidState",
Description: "Volume %s did not get valid (available) status within timeout period.",
Type: util.ProvisioningFailed,
RC: 500,
Action: "Please check your input",
},
"VolumeDeletionInProgress": {
Code: "VolumeDeletionInProgress",
Description: "Volume %s deletion in progress.",
Type: util.ProvisioningFailed,
RC: 500,
Action: "Wait for volume deletion",
},
"ListVolumesFailed": {
Code: "ListVolumesFailed",
Description: "Unable to fetch list of volumes.",
Type: util.RetrivalFailed,
RC: 404,
Action: "Run 'ibmcloud is volumes' to list available volumes in your account.",
},
"InvalidListVolumesLimit": {
Code: "InvalidListVolumesLimit",
Description: "The value '%v' specified in the limit parameter of the list volume call is not valid.",
Type: util.InvalidRequest,
RC: 400,
Action: "Verify the limit parameter's value. The limit must be a positive number between 0 and 100.",
},
"StartVolumeIDNotFound": {
Code: "StartVolumeIDNotFound",
Description: "The volume ID '%s' specified in the start parameter of the list volume call could not be found.",
Type: util.InvalidRequest,
RC: 400,
Action: "Please verify that the start volume ID is correct and whether you have access to the volume ID.",
},
VolumeAccessPointExist: {
Code: VolumeAccessPointExist,
Description: "The volume ID '%s' could not be deleted from your VPC. Volume has access points which needs to deleted.Please go through the list of VPCs = '%v'",
Type: util.DeletionFailed,
Action: "User need to review all access points and delete them first before deleting volume.", //TODO once cli will be ready then we need to update this error message
},
}
// InitMessages ...
func InitMessages() map[string]util.Message {
return messagesEn
} | common/messages/messages_en.go | 0.512205 | 0.439386 | messages_en.go | starcoder |
package main
import (
"math"
"sort"
)
type Place struct {
Name string
Latitude float64
Longitude float64
Level int32
}
const RadiansToDegrees = 57.2957795
const DegreesToKm = math.Pi * 6371.0 / 180.0
const RadiansToKm = RadiansToDegrees * DegreesToKm
func (p Place) Distance(point Place) float64 {
if (p.Latitude == point.Latitude) && (p.Longitude == point.Longitude) {
return 0.0
}
esq := (1.0 - 1.0/298.25) * (1.0 - 1.0/298.25)
alat3 := math.Atan(math.Tan(p.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees
alat4 := math.Atan(math.Tan(point.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees
rlat1 := alat3 / RadiansToDegrees
rlat2 := alat4 / RadiansToDegrees
rdlon := (point.Longitude - p.Longitude) / RadiansToDegrees
clat1 := math.Cos(rlat1)
clat2 := math.Cos(rlat2)
slat1 := math.Sin(rlat1)
slat2 := math.Sin(rlat2)
cdlon := math.Cos(rdlon)
cdel := slat1*slat2 + clat1*clat2*cdlon
switch {
case cdel > 1.0:
cdel = 1.0
case cdel < -1.0:
cdel = -1.0
}
return RadiansToKm * math.Acos(cdel)
}
func (p Place) Azimuth(point Place) float64 {
if (p.Latitude == point.Latitude) && (p.Longitude == point.Longitude) {
return 0.0
}
esq := (1.0 - 1.0/298.25) * (1.0 - 1.0/298.25)
alat3 := math.Atan(math.Tan(p.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees
alat4 := math.Atan(math.Tan(point.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees
rlat1 := alat3 / RadiansToDegrees
rlat2 := alat4 / RadiansToDegrees
rdlon := (point.Longitude - p.Longitude) / RadiansToDegrees
clat1 := math.Cos(rlat1)
clat2 := math.Cos(rlat2)
slat1 := math.Sin(rlat1)
slat2 := math.Sin(rlat2)
cdlon := math.Cos(rdlon)
sdlon := math.Sin(rdlon)
yazi := sdlon * clat2
xazi := clat1*slat2 - slat1*clat2*cdlon
azi := RadiansToDegrees * math.Atan2(yazi, xazi)
if azi < 0.0 {
azi += 360.0
}
return azi
}
func (p Place) BackAzimuth(point Place) float64 {
if (p.Latitude == point.Latitude) && (p.Longitude == point.Longitude) {
return 0.0
}
esq := (1.0 - 1.0/298.25) * (1.0 - 1.0/298.25)
alat3 := math.Atan(math.Tan(p.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees
alat4 := math.Atan(math.Tan(point.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees
rlat1 := alat3 / RadiansToDegrees
rlat2 := alat4 / RadiansToDegrees
rdlon := (point.Longitude - p.Longitude) / RadiansToDegrees
clat1 := math.Cos(rlat1)
clat2 := math.Cos(rlat2)
slat1 := math.Sin(rlat1)
slat2 := math.Sin(rlat2)
cdlon := math.Cos(rdlon)
sdlon := math.Sin(rdlon)
ybaz := -sdlon * clat1
xbaz := clat2*slat1 - slat2*clat1*cdlon
baz := RadiansToDegrees * math.Atan2(ybaz, xbaz)
if baz < 0.0 {
baz += 360.0
}
return baz
}
func (p Place) Compass(point Place) string {
azimuth := p.Azimuth(point) + 22.5
for azimuth < 0.0 {
azimuth += 360.0
}
for azimuth >= 360.0 {
azimuth -= 360.0
}
switch int(math.Floor(azimuth / 45.0)) {
case 0:
return "north"
case 1:
return "north-east"
case 2:
return "east"
case 3:
return "south-east"
case 4:
return "south"
case 5:
return "south-west"
case 6:
return "west"
default:
return "north-west"
}
}
type Places []Place
func (p Places) Len() int { return len(p) }
func (p Places) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Places) Less(i, j int) bool { return p[i].Level > p[j].Level }
func (p Places) Closest(point Place) *Place {
var res *Place
sort.Sort(p)
var dist float64
for n, l := range p {
d := point.Distance(l)
if d > 20.0 && l.Level > 2 {
continue
}
if d > 100.0 && l.Level > 1 {
continue
}
if d > 500.0 && l.Level > 0 {
continue
}
if res == nil || d < dist {
dist, res = d, &p[n]
}
}
return res
} | tools/stationxml/place.go | 0.740925 | 0.594051 | place.go | starcoder |
package data
import "fmt"
const (
MISMATCHED_INDENTATION = "Mismatched indentation."
MODULE_NAME = `Module names should be composed of identifiers started with a lower case character and separated by dots.
They also cannot contain special characters like '?' or '!'.`
MODULE_DEFINITION = `Expected file to begin with a module declaration.
Example:
module some.package`
IMPORT_REFER = "Expected exposing definitions to be a comma-separated list of upper or lower case identifiers."
DECLARATION_REF_ALL = `To import or export all constructor of a type use a (..) syntax.
ex: import package (fun1, SomeType(..), fun2)`
CTOR_NAME = "Expected constructor name (upper case identifier)."
IMPORT_ALIAS = `Expected module import alias to be capitalized:
Example: import data.package as Mod`
IMPORTED_DOT = "Expected identifier after imported variable reference."
TYPE_VAR = "Expected type variable (lower case identifier)."
TYPE_DEF = "Expected a type definition."
TYPE_COLON = "Expected `:` before type definition."
TYPEALIAS_DOT = "Expected type identifier after dot."
TYPE_TEST_TYPE = "Expected type in type test."
RECORD_LABEL = "A label of a record can only be a lower case identifier or a String."
RECORD_COLON = "Expected `:` after record label."
RECORD_EQUALS = "Expected `=` or `->` after record labels in set/update expression."
INSTANCE_TYPE = "Instance types need to be enclosed in double brackets: {{ type }}."
INSTANCE_VAR = "Instance variables need to be enclosed in double brackets: {{var}}."
INSTANCE_ERROR = "Type and type alias declarations cannot be instances, only values."
VARIABLE = "Expected variable name."
OPERATOR = "Expected operator."
LAMBDA_BACKSLASH = "Expected lambda definition to start with backslash: `\\`."
LAMBDA_ARROW = "Expected `->` after lambda parameter definition."
LAMBDA_VAR = `Expected identifier after start of lambda definition:
Example: \x -> x + 3`
TOPLEVEL_IDENT = "Expected variable definition or variable type at the top level."
PATTERN = `Expected a pattern expression.
|Patterns can be one of:
|
|Wildcard pattern: _
|Literal pattern: 3, 'a', "a string", false, etc
|Variable pattern: x, y, myVar, etc
|Constructor pattern: Some "ok", Result res, None, etc
|Record pattern: { x, y: 3 }
|List pattern: [], [x, y, _], [x :: xs]
|Named pattern: 10 as number
|Type test: :? Int as i`
DO_WHILE = "Expected keyword `do` after while condition."
EXP_SIMPLE = "Invalid expression for while condition."
THEN = "Expected `then` after if condition."
ELSE = "Expected `else` after then condition."
LET_DECL = "Expected variable name after `let`."
LET_EQUALS = "Expected `=` after let name declaration."
LET_IN = "Expected `in` after let definition."
FOR_IN = "Expected `in` after for pattern."
FOR_DO = "Expected `do` after for definition."
CASE_ARROW = "Expected `->` after case pattern."
CASE_OF = "Expected `of` after a case expression."
ALIAS_DOT = "Expected dot (.) after aliased variable."
MALFORMED_EXPR = "Malformed expression."
APPLIED_DO_LET = "Cannot apply let statement as a function."
PUB_PLUS = "Visibility of value or typealias declaration can only be public (pub) not pub+."
TYPEALIAS_NAME = "Expected name for typealias."
TYPEALIAS_EQUALS = "Expected `=` after typealias declaration."
DATA_NAME = "Expected new data type name to be a upper case identifier."
DATA_EQUALS = "Expected equals `=` after data name declaration."
INVALID_OPERATOR_DECL = "Operator declarations have to be defined between parentheses."
IMPLICIT_PATTERN = "Implicit patterns can only be used in function parameters before any destructuring happens."
ANNOTATION_PATTERN = "Type annotation patterns can only be used in function variables"
NOT_A_FIELD = "Operator `<-` expects a foreign field as first parameter and cannot be partially applied."
LET_DO_LAST = "Do expression cannot end with a let statement."
ANONYMOUS_FUNCTION_ARGUMENT = `Invalid context for anonymous function argument.
Valid ones are:
Operator sections: (_ + 1)
Record access: _.name
Record values: { name: _ }, { age: 10 | _ }
Record restrictions: { - name | _ }
Record merges: { + _, rec }
Index access: _.[1], list.[_]
Option unwrap: _!!
Ifs: if _ then 1 else 0, if check then _ else _
Cases: case _ of ...
Foreign fields: (_ : MyClass)#-field
Foreign methods: (_ : String)#endsWith("."), Math#exp(_)`
RETURN_EXPR = "return keyword can only be used inside a computation expression."
YIELD_EXPR = "yield keyword can only be used inside a computation expression."
FOR_EXPR = "for expression can only be used inside a computation expression."
LET_BANG = "`let!` syntax can only be used inside a computation expression."
DO_BANG = "`do!` syntax can only be used inside a computation expression."
RECURSIVE_ROWS = "Recursive row types"
RECURSIVE_LET = "Let variables cannot be recursive."
NOT_A_FUNCTION = `Expected expression to be a function.
If you are trying to pass an instance argument to a function explicitily
make sure to use the {{}} syntax.`
RECORD_MERGE = "Cannot merge records with unknown labels."
)
func UndefinedVarInCtor(name string, typeVars []string) string {
if len(typeVars) == 1 {
return fmt.Sprintf("The variable %s is undefined in constructor %s.", typeVars[0], name)
}
vars := JoinToStringFunc(typeVars, ", ", func(x string) string { return x })
return fmt.Sprintf("The variables %s are undefined in constructor %s.", vars, name)
}
func CannotFindInModule(name string, module string) string {
return fmt.Sprintf("Cannot find %s in module %s.", name, module)
}
func CannotImportInModule(name string, module string) string {
return fmt.Sprintf("Cannot import private %s in module %s.", name, module)
}
func UndefinedVar(name string) string {
return fmt.Sprintf("Undefined variable %s.", name)
}
func UndefinedType(typ string) string {
return fmt.Sprintf(`Undefined type %s
Make sure the type is imported: import some.module (MyType)`, typ)
}
func WrongKind(expected, got string) string {
return fmt.Sprintf(`Could not match kind
%s
with kind
%s`, expected, got)
}
func NotARow(typ string) string {
return fmt.Sprintf(`Type
%s
is a not a row type.`, typ)
}
func RecordMissingLabels(labels string) string {
return fmt.Sprintf(`Record is missing labels:
%s`, labels)
}
func TypesDontMatch(a, b, reason string) string {
str := fmt.Sprintf(`Cannot match type
%s
with type
%s`, a, b)
if reason != "" {
return fmt.Sprintf("%s\n\n%s", str, reason)
}
return str
}
func EscapeType(typ string) string {
return fmt.Sprintf(`Private type %s escaped its module.
A public function cannot have a private type.`, typ)
}
func IncompatibleTypes(t1, t2 string) string {
return fmt.Sprintf("Incompatible types %s and %s.", t1, t2)
}
func InfiniteType(name string) string {
return fmt.Sprintf("Occurs check failed: infinite type %s.", name)
}
func DuplicateModule(name string) string {
return fmt.Sprintf(`Found duplicate module
%s`, name)
}
func CycleFound(nodes []string) string {
return fmt.Sprintf("Found cycle between modules\n\n%s", JoinToStringFunc(nodes, "\n\n", func(s string) string { return " " + s }))
}
func ModuleNotFound(name string) string {
return fmt.Sprintf("Could not find module %s.", name)
}
func ExpectedDefinition(name string) string {
return fmt.Sprintf("Expected definition to follow its type declaration for %s.", name)
}
func ExpectedLetDefinition(name string) string {
return fmt.Sprintf("Expected definition to follow its type declaration for %s in let clause.", name)
}
func EmptyImport(ctx string) string {
return fmt.Sprintf("%s list cannot be empty.", ctx)
}
func WrongArityToCase(got int, expected int) string {
return fmt.Sprintf("Case expression expected %d patterns but got %d.", got, expected)
}
func WrongArityCtorPattern(name string, got, expected int) string {
return fmt.Sprintf("Constructor pattern %s expected %d parameter(s) but got %d.", name, expected, got)
}
func OpTooLong(op string) string {
return fmt.Sprintf("Operator %s is too long. Operators cannot contain more than 3 characters.", op)
}
func ShadowedVariable(name string) string {
return fmt.Sprintf("Value %s is shadowing another value with the same name.", name)
}
func NoAliasFound(alias string) string {
return fmt.Sprintf("Could not find import alias %s.", alias)
}
func WrongConstructorName(typeName string) string {
return fmt.Sprintf("Multi constructor type cannot have the same name as their type: %s.", typeName)
}
func DuplicatedDecl(name string) string {
return fmt.Sprintf("Declaration %s is already defined or imported.", name)
}
func DuplicatedType(name string) string {
return fmt.Sprintf("Type %s is already defined or imported.", name)
}
func UnusedVariable(varr string) string {
return fmt.Sprintf("Variable %s is unused in declaration.", varr)
}
func CycleInValues(nodes []string) string {
return fmt.Sprintf("Found cycle between values %s.", JoinToStringStr(nodes, ", "))
}
func CycleInFunctions(nodes []string) string {
return fmt.Sprintf("Mutually recursive functions %s need type annotations.", JoinToStringStr(nodes, ", "))
}
func LiteralExpected(name string) string {
return fmt.Sprintf("Expected %s literal.", name)
}
func LParensExpected(ctx string) string {
return fmt.Sprintf("Expected `(` after %s", ctx)
}
func RParensExpected(ctx string) string {
return fmt.Sprintf("Expected `)` after %s", ctx)
}
func RSBracketExpected(ctx string) string {
return fmt.Sprintf("Expected `]` after %s", ctx)
}
func RBracketExpected(ctx string) string {
return fmt.Sprintf("Expected `}` after %s", ctx)
}
func PipeExpected(ctx string) string {
return fmt.Sprintf("Expected `|` after %s.", ctx)
}
func CommaExpected(ctx string) string {
return fmt.Sprintf("Expected `,` after %s.", ctx)
}
func EqualsExpected(ctx string) string {
return fmt.Sprintf("Expected `=` after %s.", ctx)
} | data/errors.go | 0.866359 | 0.481393 | errors.go | starcoder |
package capi
const (
manifests = `---
apiVersion: v1
kind: Namespace
metadata:
labels:
controller-tools.k8s.io: "1.0"
name: cluster-api-system
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: clusters.cluster.k8s.io
spec:
group: cluster.k8s.io
names:
kind: Cluster
plural: clusters
shortNames:
- cl
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: / [Cluster] Cluster is the Schema for the clusters API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored with
a resource that may be set by external tools to store and retrieve
arbitrary metadata. They are not queryable and should be preserved
when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
clusterName:
description: The name of the cluster which the object belongs to. This
is used to distinguish resources with same name and namespace in different
clusters. This field is not set anywhere right now and apiserver is
going to ignore it if set in create or update request.
type: string
creationTimestamp:
description: "CreationTimestamp is a timestamp representing the server
time when this object was created. It is not guaranteed to be set
in happens-before order across separate operations. Clients may not
set this value. It is represented in RFC3339 form and is in UTC. \n
Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
deletionGracePeriodSeconds:
description: Number of seconds allowed for this object to gracefully
terminate before it will be removed from the system. Only set when
deletionTimestamp is also set. May only be shortened. Read-only.
format: int64
type: integer
deletionTimestamp:
description: "DeletionTimestamp is RFC 3339 date and time at which this
resource will be deleted. This field is set by the server when a graceful
deletion is requested by the user, and is not directly settable by
a client. The resource is expected to be deleted (no longer visible
from resource lists, and not reachable by name) after the time in
this field, once the finalizers list is empty. As long as the finalizers
list contains items, deletion is blocked. Once the deletionTimestamp
is set, this value may not be unset or be set further into the future,
although it may be shortened or the resource may be deleted prior
to this time. For example, a user may request that a pod is deleted
in 30 seconds. The Kubelet will react by sending a graceful termination
signal to the containers in the pod. After that 30 seconds, the Kubelet
will send a hard termination signal (SIGKILL) to the container and
after cleanup, remove the pod from the API. In the presence of network
partitions, this object may still exist after this timestamp, until
an administrator or automated process can determine the resource is
fully terminated. If not set, graceful deletion of the object has
not been requested. \n Populated by the system when a graceful deletion
is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
finalizers:
description: Must be empty before the object is deleted from the registry.
Each entry is an identifier for the responsible component that will
remove the entry from the list. If the deletionTimestamp of the object
is non-nil, entries in this list can only be removed.
items:
type: string
type: array
generateName:
description: "GenerateName is an optional prefix, used by the server,
to generate a unique name ONLY IF the Name field has not been provided.
If this field is used, the name returned to the client will be different
than the name passed. This value will also be combined with a unique
suffix. The provided value has the same validation rules as the Name
field, and may be truncated by the length of the suffix required to
make the value unique on the server. \n If this field is specified
and the generated name exists, the server will NOT return a 409 -
instead, it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time allotted,
and the client should retry (optionally after the time indicated in
the Retry-After header). \n Applied only if Name is not specified.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
generation:
description: A sequence number representing a specific generation of
the desired state. Populated by the system. Read-only.
format: int64
type: integer
initializers:
description: "An initializer is a controller which enforces some system
invariant at object creation time. This field is a list of initializers
that have not yet acted on this object. If nil or empty, this object
has been completely initialized. Otherwise, the object is considered
uninitialized and is hidden (in list/watch and get calls) from clients
that haven't explicitly asked to observe uninitialized objects. \n
When an object is created, the system will populate this list with
the current set of initializers. Only privileged users may set or
modify this list. Once it is empty, it may not be modified further
by any user. \n DEPRECATED - initializers are an alpha field and will
be removed in v1.15."
properties:
pending:
description: Pending is a list of initializers that must execute
in order before this object is visible. When the last pending
initializer is removed, and no failing result is set, the initializers
struct will be set to nil and the object is considered as initialized
and visible to all clients.
items:
properties:
name:
description: name of the process that is responsible for initializing
this object.
type: string
required:
- name
type: object
type: array
result:
description: If result is set with the Failure field, the object
will be persisted to storage and then deleted, ensuring that other
clients can observe the deletion.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
code:
description: Suggested HTTP return code for this status, 0 if
not set.
format: int32
type: integer
details:
description: Extended data associated with the reason. Each
reason may define its own extended details. This field is
optional and the data returned is not guaranteed to conform
to any schema except that defined by the reason type.
properties:
causes:
description: The Causes array includes more details associated
with the StatusReason failure. Not all StatusReasons may
provide detailed causes.
items:
properties:
field:
description: "The field of the resource that has caused
this error, as named by its JSON serialization.
May include dot and postfix notation for nested
attributes. Arrays are zero-indexed. Fields may
appear more than once in an array of causes due
to fields having multiple errors. Optional. \n Examples:
\ \"name\" - the field \"name\" on the current
resource \"items[0].name\" - the field \"name\"
on the first array entry in \"items\""
type: string
message:
description: A human-readable description of the cause
of the error. This field may be presented as-is
to a reader.
type: string
reason:
description: A machine-readable description of the
cause of the error. If this value is empty there
is no information available.
type: string
type: object
type: array
group:
description: The group attribute of the resource associated
with the status StatusReason.
type: string
kind:
description: 'The kind attribute of the resource associated
with the status StatusReason. On some operations may differ
from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: The name attribute of the resource associated
with the status StatusReason (when there is a single name
which can be described).
type: string
retryAfterSeconds:
description: If specified, the time in seconds before the
operation should be retried. Some errors may indicate
the client must take an alternate action - for those errors
this field may indicate how long to wait before taking
the alternate action.
format: int32
type: integer
uid:
description: 'UID of the resource. (when there is a single
resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
type: object
kind:
description: 'Kind is a string value representing the REST resource
this object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
message:
description: A human-readable description of the status of this
operation.
type: string
metadata:
description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
properties:
continue:
description: continue may be set if the user set a limit
on the number of items returned, and indicates that the
server has more data available. The value is opaque and
may be used to issue another request to the endpoint that
served this list to retrieve the next set of available
objects. Continuing a consistent list may not be possible
if the server configuration has changed or more than a
few minutes have passed. The resourceVersion field returned
when using this continue value will be identical to the
value in the first response, unless you have received
this token from an error message.
type: string
resourceVersion:
description: 'String that identifies the server''s internal
version of this object that can be used by clients to
determine when objects have changed. Value must be treated
as opaque by clients and passed unmodified back to the
server. Populated by the system. Read-only. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
selfLink:
description: selfLink is a URL representing this object.
Populated by the system. Read-only.
type: string
type: object
reason:
description: A machine-readable description of why this operation
is in the "Failure" status. If this value is empty there is
no information available. A Reason clarifies an HTTP status
code but does not override it.
type: string
status:
description: 'Status of the operation. One of: "Success" or
"Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
type: string
type: object
required:
- pending
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to organize
and categorize (scope and select) objects. May match selectors of
replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels'
type: object
managedFields:
description: "ManagedFields maps workflow-id and version to the set
of fields that are managed by that workflow. This is mostly for internal
housekeeping, and users typically shouldn't need to set or understand
this field. A workflow can be the user's name, a controller's name,
or the name of a specific apply path like \"ci-cd\". The set of fields
is always in the version that the workflow used when modifying the
object. \n This field is alpha and can be changed or removed without
notice."
items:
properties:
apiVersion:
description: APIVersion defines the version of this resource that
this field set applies to. The format is "group/version" just
like the top-level APIVersion field. It is necessary to track
the version of a field set because it cannot be automatically
converted.
type: string
fields:
additionalProperties: true
description: Fields identifies a set of fields.
type: object
manager:
description: Manager is an identifier of the workflow managing
these fields.
type: string
operation:
description: Operation is the type of operation which lead to
this ManagedFieldsEntry being created. The only valid values
for this field are 'Apply' and 'Update'.
type: string
time:
description: Time is timestamp of when these fields were set.
It should always be empty if Operation is 'Apply'
format: date-time
type: string
type: object
type: array
name:
description: 'Name must be unique within a namespace. Is required when
creating resources, although some resources may allow a client to
request the generation of an appropriate name automatically. Name
is primarily intended for creation idempotence and configuration definition.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name must be unique.
An empty namespace is equivalent to the \"default\" namespace, but
\"default\" is the canonical representation. Not all objects are required
to be scoped to a namespace - the value of this field for those objects
will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If ALL objects
in the list have been deleted, this object will be garbage collected.
If this object is managed by a controller, then an entry in this list
will point to this controller, with the controller field set to true.
There cannot be more than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the key-value
store until this reference is removed. Defaults to false. To
set this field, a user needs "delete" permission of the owner,
otherwise 422 (Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the managing controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
resourceVersion:
description: "An opaque value that represents the internal version of
this object that can be used by clients to determine when objects
have changed. May be used for optimistic concurrency, change detection,
and the watch operation on a resource or set of resources. Clients
must treat these values as opaque and passed unmodified back to the
server. They may only be valid for a particular resource or set of
resources. \n Populated by the system. Read-only. Value must be treated
as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency"
type: string
selfLink:
description: SelfLink is a URL representing this object. Populated by
the system. Read-only.
type: string
uid:
description: "UID is the unique in time and space value for this object.
It is typically generated by the server on successful creation of
a resource and is not allowed to change on PUT operations. \n Populated
by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
type: string
type: object
spec:
properties:
clusterNetwork:
description: Cluster network configuration
properties:
pods:
description: The network ranges from which Pod networks are allocated.
properties:
cidrBlocks:
items:
type: string
type: array
required:
- cidrBlocks
type: object
serviceDomain:
description: Domain name for services.
type: string
services:
description: The network ranges from which service VIPs are allocated.
properties:
cidrBlocks:
items:
type: string
type: array
required:
- cidrBlocks
type: object
required:
- services
- pods
- serviceDomain
type: object
providerSpec:
description: Provider-specific serialized configuration to use during
cluster creation. It is recommended that providers maintain their
own versioned API types that should be serialized/deserialized from
this field.
properties:
value:
description: Value is an inlined, serialized representation of the
resource configuration. It is recommended that providers maintain
their own versioned API types that should be serialized/deserialized
from this field, akin to component config.
type: object
valueFrom:
description: Source for the provider configuration. Cannot be used
if value is not empty.
properties:
machineClass:
description: The machine class from which the provider config
should be sourced.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead
of an entire object, this string should contain a valid
JSON/Go field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that
triggered the event) or if no container name is specified
"spec.containers[2]" (container with index 2 in this pod).
This syntax is chosen only to have some well-defined way
of referencing a part of an object. TODO: this design
is not final and this field is subject to change in the
future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
provider:
description: Provider is the name of the cloud-provider
which MachineClass is intended for.
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: object
type: object
required:
- clusterNetwork
type: object
status:
properties:
apiEndpoints:
description: APIEndpoint represents the endpoint to communicate with
the IP.
items:
properties:
host:
description: The hostname on which the API server is serving.
type: string
port:
description: The port on which the API server is serving.
type: integer
required:
- host
- port
type: object
type: array
errorMessage:
description: If set, indicates that there is a problem reconciling the
state, and will be set to a descriptive error message.
type: string
errorReason:
description: If set, indicates that there is a problem reconciling the
state, and will be set to a token value suitable for programmatic
interpretation.
type: string
providerStatus:
description: Provider-specific status. It is recommended that providers
maintain their own versioned API types that should be serialized/deserialized
from this field.
type: object
type: object
type: object
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: machineclasses.cluster.k8s.io
spec:
group: cluster.k8s.io
names:
kind: MachineClass
plural: machineclasses
shortNames:
- mc
scope: Namespaced
validation:
openAPIV3Schema:
description: / [MachineClass] MachineClass can be used to templatize and re-use
provider configuration across multiple Machines / MachineSets / MachineDeployments.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored with
a resource that may be set by external tools to store and retrieve
arbitrary metadata. They are not queryable and should be preserved
when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
clusterName:
description: The name of the cluster which the object belongs to. This
is used to distinguish resources with same name and namespace in different
clusters. This field is not set anywhere right now and apiserver is
going to ignore it if set in create or update request.
type: string
creationTimestamp:
description: "CreationTimestamp is a timestamp representing the server
time when this object was created. It is not guaranteed to be set
in happens-before order across separate operations. Clients may not
set this value. It is represented in RFC3339 form and is in UTC. \n
Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
deletionGracePeriodSeconds:
description: Number of seconds allowed for this object to gracefully
terminate before it will be removed from the system. Only set when
deletionTimestamp is also set. May only be shortened. Read-only.
format: int64
type: integer
deletionTimestamp:
description: "DeletionTimestamp is RFC 3339 date and time at which this
resource will be deleted. This field is set by the server when a graceful
deletion is requested by the user, and is not directly settable by
a client. The resource is expected to be deleted (no longer visible
from resource lists, and not reachable by name) after the time in
this field, once the finalizers list is empty. As long as the finalizers
list contains items, deletion is blocked. Once the deletionTimestamp
is set, this value may not be unset or be set further into the future,
although it may be shortened or the resource may be deleted prior
to this time. For example, a user may request that a pod is deleted
in 30 seconds. The Kubelet will react by sending a graceful termination
signal to the containers in the pod. After that 30 seconds, the Kubelet
will send a hard termination signal (SIGKILL) to the container and
after cleanup, remove the pod from the API. In the presence of network
partitions, this object may still exist after this timestamp, until
an administrator or automated process can determine the resource is
fully terminated. If not set, graceful deletion of the object has
not been requested. \n Populated by the system when a graceful deletion
is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
finalizers:
description: Must be empty before the object is deleted from the registry.
Each entry is an identifier for the responsible component that will
remove the entry from the list. If the deletionTimestamp of the object
is non-nil, entries in this list can only be removed.
items:
type: string
type: array
generateName:
description: "GenerateName is an optional prefix, used by the server,
to generate a unique name ONLY IF the Name field has not been provided.
If this field is used, the name returned to the client will be different
than the name passed. This value will also be combined with a unique
suffix. The provided value has the same validation rules as the Name
field, and may be truncated by the length of the suffix required to
make the value unique on the server. \n If this field is specified
and the generated name exists, the server will NOT return a 409 -
instead, it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time allotted,
and the client should retry (optionally after the time indicated in
the Retry-After header). \n Applied only if Name is not specified.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
generation:
description: A sequence number representing a specific generation of
the desired state. Populated by the system. Read-only.
format: int64
type: integer
initializers:
description: "An initializer is a controller which enforces some system
invariant at object creation time. This field is a list of initializers
that have not yet acted on this object. If nil or empty, this object
has been completely initialized. Otherwise, the object is considered
uninitialized and is hidden (in list/watch and get calls) from clients
that haven't explicitly asked to observe uninitialized objects. \n
When an object is created, the system will populate this list with
the current set of initializers. Only privileged users may set or
modify this list. Once it is empty, it may not be modified further
by any user. \n DEPRECATED - initializers are an alpha field and will
be removed in v1.15."
properties:
pending:
description: Pending is a list of initializers that must execute
in order before this object is visible. When the last pending
initializer is removed, and no failing result is set, the initializers
struct will be set to nil and the object is considered as initialized
and visible to all clients.
items:
properties:
name:
description: name of the process that is responsible for initializing
this object.
type: string
required:
- name
type: object
type: array
result:
description: If result is set with the Failure field, the object
will be persisted to storage and then deleted, ensuring that other
clients can observe the deletion.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
code:
description: Suggested HTTP return code for this status, 0 if
not set.
format: int32
type: integer
details:
description: Extended data associated with the reason. Each
reason may define its own extended details. This field is
optional and the data returned is not guaranteed to conform
to any schema except that defined by the reason type.
properties:
causes:
description: The Causes array includes more details associated
with the StatusReason failure. Not all StatusReasons may
provide detailed causes.
items:
properties:
field:
description: "The field of the resource that has caused
this error, as named by its JSON serialization.
May include dot and postfix notation for nested
attributes. Arrays are zero-indexed. Fields may
appear more than once in an array of causes due
to fields having multiple errors. Optional. \n Examples:
\ \"name\" - the field \"name\" on the current
resource \"items[0].name\" - the field \"name\"
on the first array entry in \"items\""
type: string
message:
description: A human-readable description of the cause
of the error. This field may be presented as-is
to a reader.
type: string
reason:
description: A machine-readable description of the
cause of the error. If this value is empty there
is no information available.
type: string
type: object
type: array
group:
description: The group attribute of the resource associated
with the status StatusReason.
type: string
kind:
description: 'The kind attribute of the resource associated
with the status StatusReason. On some operations may differ
from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: The name attribute of the resource associated
with the status StatusReason (when there is a single name
which can be described).
type: string
retryAfterSeconds:
description: If specified, the time in seconds before the
operation should be retried. Some errors may indicate
the client must take an alternate action - for those errors
this field may indicate how long to wait before taking
the alternate action.
format: int32
type: integer
uid:
description: 'UID of the resource. (when there is a single
resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
type: object
kind:
description: 'Kind is a string value representing the REST resource
this object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
message:
description: A human-readable description of the status of this
operation.
type: string
metadata:
description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
properties:
continue:
description: continue may be set if the user set a limit
on the number of items returned, and indicates that the
server has more data available. The value is opaque and
may be used to issue another request to the endpoint that
served this list to retrieve the next set of available
objects. Continuing a consistent list may not be possible
if the server configuration has changed or more than a
few minutes have passed. The resourceVersion field returned
when using this continue value will be identical to the
value in the first response, unless you have received
this token from an error message.
type: string
resourceVersion:
description: 'String that identifies the server''s internal
version of this object that can be used by clients to
determine when objects have changed. Value must be treated
as opaque by clients and passed unmodified back to the
server. Populated by the system. Read-only. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
selfLink:
description: selfLink is a URL representing this object.
Populated by the system. Read-only.
type: string
type: object
reason:
description: A machine-readable description of why this operation
is in the "Failure" status. If this value is empty there is
no information available. A Reason clarifies an HTTP status
code but does not override it.
type: string
status:
description: 'Status of the operation. One of: "Success" or
"Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
type: string
type: object
required:
- pending
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to organize
and categorize (scope and select) objects. May match selectors of
replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels'
type: object
managedFields:
description: "ManagedFields maps workflow-id and version to the set
of fields that are managed by that workflow. This is mostly for internal
housekeeping, and users typically shouldn't need to set or understand
this field. A workflow can be the user's name, a controller's name,
or the name of a specific apply path like \"ci-cd\". The set of fields
is always in the version that the workflow used when modifying the
object. \n This field is alpha and can be changed or removed without
notice."
items:
properties:
apiVersion:
description: APIVersion defines the version of this resource that
this field set applies to. The format is "group/version" just
like the top-level APIVersion field. It is necessary to track
the version of a field set because it cannot be automatically
converted.
type: string
fields:
additionalProperties: true
description: Fields identifies a set of fields.
type: object
manager:
description: Manager is an identifier of the workflow managing
these fields.
type: string
operation:
description: Operation is the type of operation which lead to
this ManagedFieldsEntry being created. The only valid values
for this field are 'Apply' and 'Update'.
type: string
time:
description: Time is timestamp of when these fields were set.
It should always be empty if Operation is 'Apply'
format: date-time
type: string
type: object
type: array
name:
description: 'Name must be unique within a namespace. Is required when
creating resources, although some resources may allow a client to
request the generation of an appropriate name automatically. Name
is primarily intended for creation idempotence and configuration definition.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name must be unique.
An empty namespace is equivalent to the \"default\" namespace, but
\"default\" is the canonical representation. Not all objects are required
to be scoped to a namespace - the value of this field for those objects
will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If ALL objects
in the list have been deleted, this object will be garbage collected.
If this object is managed by a controller, then an entry in this list
will point to this controller, with the controller field set to true.
There cannot be more than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the key-value
store until this reference is removed. Defaults to false. To
set this field, a user needs "delete" permission of the owner,
otherwise 422 (Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the managing controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
resourceVersion:
description: "An opaque value that represents the internal version of
this object that can be used by clients to determine when objects
have changed. May be used for optimistic concurrency, change detection,
and the watch operation on a resource or set of resources. Clients
must treat these values as opaque and passed unmodified back to the
server. They may only be valid for a particular resource or set of
resources. \n Populated by the system. Read-only. Value must be treated
as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency"
type: string
selfLink:
description: SelfLink is a URL representing this object. Populated by
the system. Read-only.
type: string
uid:
description: "UID is the unique in time and space value for this object.
It is typically generated by the server on successful creation of
a resource and is not allowed to change on PUT operations. \n Populated
by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
type: string
type: object
providerSpec:
description: Provider-specific configuration to use during node creation.
type: object
required:
- providerSpec
type: object
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: machinedeployments.cluster.k8s.io
spec:
group: cluster.k8s.io
names:
kind: MachineDeployment
plural: machinedeployments
shortNames:
- md
scope: Namespaced
subresources:
scale:
labelSelectorPath: .status.labelSelector
specReplicasPath: .spec.replicas
statusReplicasPath: .status.replicas
status: {}
validation:
openAPIV3Schema:
description: / [MachineDeployment] MachineDeployment is the Schema for the machinedeployments
API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored with
a resource that may be set by external tools to store and retrieve
arbitrary metadata. They are not queryable and should be preserved
when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
clusterName:
description: The name of the cluster which the object belongs to. This
is used to distinguish resources with same name and namespace in different
clusters. This field is not set anywhere right now and apiserver is
going to ignore it if set in create or update request.
type: string
creationTimestamp:
description: "CreationTimestamp is a timestamp representing the server
time when this object was created. It is not guaranteed to be set
in happens-before order across separate operations. Clients may not
set this value. It is represented in RFC3339 form and is in UTC. \n
Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
deletionGracePeriodSeconds:
description: Number of seconds allowed for this object to gracefully
terminate before it will be removed from the system. Only set when
deletionTimestamp is also set. May only be shortened. Read-only.
format: int64
type: integer
deletionTimestamp:
description: "DeletionTimestamp is RFC 3339 date and time at which this
resource will be deleted. This field is set by the server when a graceful
deletion is requested by the user, and is not directly settable by
a client. The resource is expected to be deleted (no longer visible
from resource lists, and not reachable by name) after the time in
this field, once the finalizers list is empty. As long as the finalizers
list contains items, deletion is blocked. Once the deletionTimestamp
is set, this value may not be unset or be set further into the future,
although it may be shortened or the resource may be deleted prior
to this time. For example, a user may request that a pod is deleted
in 30 seconds. The Kubelet will react by sending a graceful termination
signal to the containers in the pod. After that 30 seconds, the Kubelet
will send a hard termination signal (SIGKILL) to the container and
after cleanup, remove the pod from the API. In the presence of network
partitions, this object may still exist after this timestamp, until
an administrator or automated process can determine the resource is
fully terminated. If not set, graceful deletion of the object has
not been requested. \n Populated by the system when a graceful deletion
is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
finalizers:
description: Must be empty before the object is deleted from the registry.
Each entry is an identifier for the responsible component that will
remove the entry from the list. If the deletionTimestamp of the object
is non-nil, entries in this list can only be removed.
items:
type: string
type: array
generateName:
description: "GenerateName is an optional prefix, used by the server,
to generate a unique name ONLY IF the Name field has not been provided.
If this field is used, the name returned to the client will be different
than the name passed. This value will also be combined with a unique
suffix. The provided value has the same validation rules as the Name
field, and may be truncated by the length of the suffix required to
make the value unique on the server. \n If this field is specified
and the generated name exists, the server will NOT return a 409 -
instead, it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time allotted,
and the client should retry (optionally after the time indicated in
the Retry-After header). \n Applied only if Name is not specified.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
generation:
description: A sequence number representing a specific generation of
the desired state. Populated by the system. Read-only.
format: int64
type: integer
initializers:
description: "An initializer is a controller which enforces some system
invariant at object creation time. This field is a list of initializers
that have not yet acted on this object. If nil or empty, this object
has been completely initialized. Otherwise, the object is considered
uninitialized and is hidden (in list/watch and get calls) from clients
that haven't explicitly asked to observe uninitialized objects. \n
When an object is created, the system will populate this list with
the current set of initializers. Only privileged users may set or
modify this list. Once it is empty, it may not be modified further
by any user. \n DEPRECATED - initializers are an alpha field and will
be removed in v1.15."
properties:
pending:
description: Pending is a list of initializers that must execute
in order before this object is visible. When the last pending
initializer is removed, and no failing result is set, the initializers
struct will be set to nil and the object is considered as initialized
and visible to all clients.
items:
properties:
name:
description: name of the process that is responsible for initializing
this object.
type: string
required:
- name
type: object
type: array
result:
description: If result is set with the Failure field, the object
will be persisted to storage and then deleted, ensuring that other
clients can observe the deletion.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
code:
description: Suggested HTTP return code for this status, 0 if
not set.
format: int32
type: integer
details:
description: Extended data associated with the reason. Each
reason may define its own extended details. This field is
optional and the data returned is not guaranteed to conform
to any schema except that defined by the reason type.
properties:
causes:
description: The Causes array includes more details associated
with the StatusReason failure. Not all StatusReasons may
provide detailed causes.
items:
properties:
field:
description: "The field of the resource that has caused
this error, as named by its JSON serialization.
May include dot and postfix notation for nested
attributes. Arrays are zero-indexed. Fields may
appear more than once in an array of causes due
to fields having multiple errors. Optional. \n Examples:
\ \"name\" - the field \"name\" on the current
resource \"items[0].name\" - the field \"name\"
on the first array entry in \"items\""
type: string
message:
description: A human-readable description of the cause
of the error. This field may be presented as-is
to a reader.
type: string
reason:
description: A machine-readable description of the
cause of the error. If this value is empty there
is no information available.
type: string
type: object
type: array
group:
description: The group attribute of the resource associated
with the status StatusReason.
type: string
kind:
description: 'The kind attribute of the resource associated
with the status StatusReason. On some operations may differ
from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: The name attribute of the resource associated
with the status StatusReason (when there is a single name
which can be described).
type: string
retryAfterSeconds:
description: If specified, the time in seconds before the
operation should be retried. Some errors may indicate
the client must take an alternate action - for those errors
this field may indicate how long to wait before taking
the alternate action.
format: int32
type: integer
uid:
description: 'UID of the resource. (when there is a single
resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
type: object
kind:
description: 'Kind is a string value representing the REST resource
this object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
message:
description: A human-readable description of the status of this
operation.
type: string
metadata:
description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
properties:
continue:
description: continue may be set if the user set a limit
on the number of items returned, and indicates that the
server has more data available. The value is opaque and
may be used to issue another request to the endpoint that
served this list to retrieve the next set of available
objects. Continuing a consistent list may not be possible
if the server configuration has changed or more than a
few minutes have passed. The resourceVersion field returned
when using this continue value will be identical to the
value in the first response, unless you have received
this token from an error message.
type: string
resourceVersion:
description: 'String that identifies the server''s internal
version of this object that can be used by clients to
determine when objects have changed. Value must be treated
as opaque by clients and passed unmodified back to the
server. Populated by the system. Read-only. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
selfLink:
description: selfLink is a URL representing this object.
Populated by the system. Read-only.
type: string
type: object
reason:
description: A machine-readable description of why this operation
is in the "Failure" status. If this value is empty there is
no information available. A Reason clarifies an HTTP status
code but does not override it.
type: string
status:
description: 'Status of the operation. One of: "Success" or
"Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
type: string
type: object
required:
- pending
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to organize
and categorize (scope and select) objects. May match selectors of
replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels'
type: object
managedFields:
description: "ManagedFields maps workflow-id and version to the set
of fields that are managed by that workflow. This is mostly for internal
housekeeping, and users typically shouldn't need to set or understand
this field. A workflow can be the user's name, a controller's name,
or the name of a specific apply path like \"ci-cd\". The set of fields
is always in the version that the workflow used when modifying the
object. \n This field is alpha and can be changed or removed without
notice."
items:
properties:
apiVersion:
description: APIVersion defines the version of this resource that
this field set applies to. The format is "group/version" just
like the top-level APIVersion field. It is necessary to track
the version of a field set because it cannot be automatically
converted.
type: string
fields:
additionalProperties: true
description: Fields identifies a set of fields.
type: object
manager:
description: Manager is an identifier of the workflow managing
these fields.
type: string
operation:
description: Operation is the type of operation which lead to
this ManagedFieldsEntry being created. The only valid values
for this field are 'Apply' and 'Update'.
type: string
time:
description: Time is timestamp of when these fields were set.
It should always be empty if Operation is 'Apply'
format: date-time
type: string
type: object
type: array
name:
description: 'Name must be unique within a namespace. Is required when
creating resources, although some resources may allow a client to
request the generation of an appropriate name automatically. Name
is primarily intended for creation idempotence and configuration definition.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name must be unique.
An empty namespace is equivalent to the \"default\" namespace, but
\"default\" is the canonical representation. Not all objects are required
to be scoped to a namespace - the value of this field for those objects
will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If ALL objects
in the list have been deleted, this object will be garbage collected.
If this object is managed by a controller, then an entry in this list
will point to this controller, with the controller field set to true.
There cannot be more than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the key-value
store until this reference is removed. Defaults to false. To
set this field, a user needs "delete" permission of the owner,
otherwise 422 (Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the managing controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
resourceVersion:
description: "An opaque value that represents the internal version of
this object that can be used by clients to determine when objects
have changed. May be used for optimistic concurrency, change detection,
and the watch operation on a resource or set of resources. Clients
must treat these values as opaque and passed unmodified back to the
server. They may only be valid for a particular resource or set of
resources. \n Populated by the system. Read-only. Value must be treated
as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency"
type: string
selfLink:
description: SelfLink is a URL representing this object. Populated by
the system. Read-only.
type: string
uid:
description: "UID is the unique in time and space value for this object.
It is typically generated by the server on successful creation of
a resource and is not allowed to change on PUT operations. \n Populated
by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
type: string
type: object
spec:
properties:
minReadySeconds:
description: Minimum number of seconds for which a newly created machine
should be ready. Defaults to 0 (machine will be considered available
as soon as it is ready)
format: int32
type: integer
paused:
description: Indicates that the deployment is paused.
type: boolean
progressDeadlineSeconds:
description: The maximum time in seconds for a deployment to make progress
before it is considered to be failed. The deployment controller will
continue to process failed deployments and a condition with a ProgressDeadlineExceeded
reason will be surfaced in the deployment status. Note that progress
will not be estimated during the time a deployment is paused. Defaults
to 600s.
format: int32
type: integer
replicas:
description: Number of desired machines. Defaults to 1. This is a pointer
to distinguish between explicit zero and not specified.
format: int32
type: integer
revisionHistoryLimit:
description: The number of old MachineSets to retain to allow rollback.
This is a pointer to distinguish between explicit zero and not specified.
Defaults to 1.
format: int32
type: integer
selector:
description: Label selector for machines. Existing MachineSets whose
machines are selected by this will be the ones affected by this deployment.
It must match the machine template's labels.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to a
set of values. Valid operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator
is In or NotIn, the values array must be non-empty. If the
operator is Exists or DoesNotExist, the values array must
be empty. This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator is
"In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
strategy:
description: The deployment strategy to use to replace existing machines
with new ones.
properties:
rollingUpdate:
description: Rolling update config params. Present only if MachineDeploymentStrategyType
= RollingUpdate.
properties:
maxSurge:
anyOf:
- type: string
- type: integer
description: 'The maximum number of machines that can be scheduled
above the desired number of machines. Value can be an absolute
number (ex: 5) or a percentage of desired machines (ex: 10%).
This can not be 0 if MaxUnavailable is 0. Absolute number
is calculated from percentage by rounding up. Defaults to
1. Example: when this is set to 30%, the new MachineSet can
be scaled up immediately when the rolling update starts, such
that the total number of old and new machines do not exceed
130% of desired machines. Once old machines have been killed,
new MachineSet can be scaled up further, ensuring that total
number of machines running at any time during the update is
at most 130% of desired machines.'
maxUnavailable:
anyOf:
- type: string
- type: integer
description: 'The maximum number of machines that can be unavailable
during the update. Value can be an absolute number (ex: 5)
or a percentage of desired machines (ex: 10%). Absolute number
is calculated from percentage by rounding down. This can not
be 0 if MaxSurge is 0. Defaults to 0. Example: when this is
set to 30%, the old MachineSet can be scaled down to 70% of
desired machines immediately when the rolling update starts.
Once new machines are ready, old MachineSet can be scaled
down further, followed by scaling up the new MachineSet, ensuring
that the total number of machines available at all times during
the update is at least 70% of desired machines.'
type: object
type:
description: Type of deployment. Currently the only supported strategy
is "RollingUpdate". Default is RollingUpdate.
type: string
type: object
template:
description: Template describes the machines that will be created.
properties:
metadata:
description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata'
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored
with a resource that may be set by external tools to store
and retrieve arbitrary metadata. They are not queryable and
should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
generateName:
description: "GenerateName is an optional prefix, used by the
server, to generate a unique name ONLY IF the Name field has
not been provided. If this field is used, the name returned
to the client will be different than the name passed. This
value will also be combined with a unique suffix. The provided
value has the same validation rules as the Name field, and
may be truncated by the length of the suffix required to make
the value unique on the server. \n If this field is specified
and the generated name exists, the server will NOT return
a 409 - instead, it will either return 201 Created or 500
with Reason ServerTimeout indicating a unique name could not
be found in the time allotted, and the client should retry
(optionally after the time indicated in the Retry-After header).
\n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used
to organize and categorize (scope and select) objects. May
match selectors of replication controllers and services. More
info: http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace. Is required
when creating resources, although some resources may allow
a client to request the generation of an appropriate name
automatically. Name is primarily intended for creation idempotence
and configuration definition. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name must
be unique. An empty namespace is equivalent to the \"default\"
namespace, but \"default\" is the canonical representation.
Not all objects are required to be scoped to a namespace -
the value of this field for those objects will be empty. \n
Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If ALL
objects in the list have been deleted, this object will be
garbage collected. If this object is managed by a controller,
then an entry in this list will point to this controller,
with the controller field set to true. There cannot be more
than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the
key-value store until this reference is removed. Defaults
to false. To set this field, a user needs "delete" permission
of the owner, otherwise 422 (Unprocessable Entity) will
be returned.
type: boolean
controller:
description: If true, this reference points to the managing
controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
type: object
spec:
description: 'Specification of the desired behavior of the machine.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
properties:
configSource:
description: ConfigSource is used to populate in the associated
Node for dynamic kubelet config. This field already exists
in Node, so any updates to it in the Machine spec will be
automatically copied to the linked NodeRef from the status.
The rest of dynamic kubelet config support should then work
as-is.
properties:
configMap:
description: ConfigMap is a reference to a Node's ConfigMap
properties:
kubeletConfigKey:
description: KubeletConfigKey declares which key of
the referenced ConfigMap corresponds to the KubeletConfiguration
structure This field is required in all cases.
type: string
name:
description: Name is the metadata.name of the referenced
ConfigMap. This field is required in all cases.
type: string
namespace:
description: Namespace is the metadata.namespace of
the referenced ConfigMap. This field is required in
all cases.
type: string
resourceVersion:
description: ResourceVersion is the metadata.ResourceVersion
of the referenced ConfigMap. This field is forbidden
in Node.Spec, and required in Node.Status.
type: string
uid:
description: UID is the metadata.UID of the referenced
ConfigMap. This field is forbidden in Node.Spec, and
required in Node.Status.
type: string
required:
- namespace
- name
- kubeletConfigKey
type: object
type: object
metadata:
description: ObjectMeta will autopopulate the Node created.
Use this to indicate what labels, annotations, name prefix,
etc., should be used when creating the Node.
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map
stored with a resource that may be set by external tools
to store and retrieve arbitrary metadata. They are not
queryable and should be preserved when modifying objects.
More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
generateName:
description: "GenerateName is an optional prefix, used by
the server, to generate a unique name ONLY IF the Name
field has not been provided. If this field is used, the
name returned to the client will be different than the
name passed. This value will also be combined with a unique
suffix. The provided value has the same validation rules
as the Name field, and may be truncated by the length
of the suffix required to make the value unique on the
server. \n If this field is specified and the generated
name exists, the server will NOT return a 409 - instead,
it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time
allotted, and the client should retry (optionally after
the time indicated in the Retry-After header). \n Applied
only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be
used to organize and categorize (scope and select) objects.
May match selectors of replication controllers and services.
More info: http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace. Is
required when creating resources, although some resources
may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation
idempotence and configuration definition. Cannot be updated.
More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name
must be unique. An empty namespace is equivalent to the
\"default\" namespace, but \"default\" is the canonical
representation. Not all objects are required to be scoped
to a namespace - the value of this field for those objects
will be empty. \n Must be a DNS_LABEL. Cannot be updated.
More info: http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If
ALL objects in the list have been deleted, this object
will be garbage collected. If this object is managed by
a controller, then an entry in this list will point to
this controller, with the controller field set to true.
There cannot be more than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from
the key-value store until this reference is removed.
Defaults to false. To set this field, a user needs
"delete" permission of the owner, otherwise 422
(Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the
managing controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
type: object
providerID:
description: ProviderID is the identification ID of the machine
provided by the provider. This field must match the provider
ID as seen on the node object corresponding to this machine.
This field is required by higher level consumers of cluster-api.
Example use case is cluster autoscaler with cluster-api as
provider. Clean-up logic in the autoscaler compares machines
to nodes to find out machines at provider which could not
get registered as Kubernetes nodes. With cluster-api as a
generic out-of-tree provider for autoscaler, this field is
required by autoscaler to be able to have a provider view
of the list of machines. Another list of nodes is queried
from the k8s apiserver and then a comparison is done to find
out unregistered machines and are marked for delete. This
field will be set by the actuators and consumed by higher
level entities like autoscaler that will be interfacing with
cluster-api as generic provider.
type: string
providerSpec:
description: ProviderSpec details Provider-specific configuration
to use during node creation.
properties:
value:
description: Value is an inlined, serialized representation
of the resource configuration. It is recommended that
providers maintain their own versioned API types that
should be serialized/deserialized from this field, akin
to component config.
type: object
valueFrom:
description: Source for the provider configuration. Cannot
be used if value is not empty.
properties:
machineClass:
description: The machine class from which the provider
config should be sourced.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object
instead of an entire object, this string should
contain a valid JSON/Go field access statement,
such as desiredState.manifest.containers[2]. For
example, if the object reference is to a container
within a pod, this would take on a value like:
"spec.containers{name}" (where "name" refers to
the name of the container that triggered the event)
or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax
is chosen only to have some well-defined way of
referencing a part of an object. TODO: this design
is not final and this field is subject to change
in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
provider:
description: Provider is the name of the cloud-provider
which MachineClass is intended for.
type: string
resourceVersion:
description: 'Specific resourceVersion to which
this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: object
type: object
taints:
description: The list of the taints to be applied to the corresponding
Node in additive manner. This list will not overwrite any
other taints added to the Node on an ongoing basis by other
entities. These taints should be actively reconciled e.g.
if you ask the machine controller to apply a taint and then
manually remove the taint the machine controller will put
it back) but not have the machine controller remove any taints
items:
properties:
effect:
description: Required. The effect of the taint on pods
that do not tolerate the taint. Valid effects are NoSchedule,
PreferNoSchedule and NoExecute.
type: string
key:
description: Required. The taint key to be applied to
a node.
type: string
timeAdded:
description: TimeAdded represents the time at which the
taint was added. It is only written for NoExecute taints.
format: date-time
type: string
value:
description: Required. The taint value corresponding to
the taint key.
type: string
required:
- key
- effect
type: object
type: array
versions:
description: Versions of key software to use. This field is
optional at cluster creation time, and omitting the field
indicates that the cluster installation tool should select
defaults for the user. These defaults may differ based on
the cluster installer, but the tool should populate the values
it uses when persisting Machine objects. A Machine spec missing
this field at runtime is invalid.
properties:
controlPlane:
description: ControlPlane is the semantic version of the
Kubernetes control plane to run. This should only be populated
when the machine is a control plane.
type: string
kubelet:
description: Kubelet is the semantic version of kubelet
to run
type: string
required:
- kubelet
type: object
required:
- providerSpec
type: object
type: object
required:
- selector
- template
type: object
status:
properties:
availableReplicas:
description: Total number of available machines (ready for at least
minReadySeconds) targeted by this deployment.
format: int32
type: integer
observedGeneration:
description: The generation observed by the deployment controller.
format: int64
type: integer
readyReplicas:
description: Total number of ready machines targeted by this deployment.
format: int32
type: integer
replicas:
description: Total number of non-terminated machines targeted by this
deployment (their labels match the selector).
format: int32
type: integer
unavailableReplicas:
description: Total number of unavailable machines targeted by this deployment.
This is the total number of machines that are still required for the
deployment to have 100% available capacity. They may either be machines
that are running but not yet available or machines that still have
not been created.
format: int32
type: integer
updatedReplicas:
description: Total number of non-terminated machines targeted by this
deployment that have the desired template spec.
format: int32
type: integer
type: object
type: object
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: machines.cluster.k8s.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.providerID
description: Provider ID
name: ProviderID
type: string
- JSONPath: .status.phase
description: Machine status such as Terminating/Pending/Running/Failed etc
name: Phase
type: string
- JSONPath: .status.nodeRef.name
description: Node name associated with this machine
name: NodeName
priority: 1
type: string
group: cluster.k8s.io
names:
kind: Machine
plural: machines
shortNames:
- ma
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: / [Machine] Machine is the Schema for the machines API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored with
a resource that may be set by external tools to store and retrieve
arbitrary metadata. They are not queryable and should be preserved
when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
clusterName:
description: The name of the cluster which the object belongs to. This
is used to distinguish resources with same name and namespace in different
clusters. This field is not set anywhere right now and apiserver is
going to ignore it if set in create or update request.
type: string
creationTimestamp:
description: "CreationTimestamp is a timestamp representing the server
time when this object was created. It is not guaranteed to be set
in happens-before order across separate operations. Clients may not
set this value. It is represented in RFC3339 form and is in UTC. \n
Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
deletionGracePeriodSeconds:
description: Number of seconds allowed for this object to gracefully
terminate before it will be removed from the system. Only set when
deletionTimestamp is also set. May only be shortened. Read-only.
format: int64
type: integer
deletionTimestamp:
description: "DeletionTimestamp is RFC 3339 date and time at which this
resource will be deleted. This field is set by the server when a graceful
deletion is requested by the user, and is not directly settable by
a client. The resource is expected to be deleted (no longer visible
from resource lists, and not reachable by name) after the time in
this field, once the finalizers list is empty. As long as the finalizers
list contains items, deletion is blocked. Once the deletionTimestamp
is set, this value may not be unset or be set further into the future,
although it may be shortened or the resource may be deleted prior
to this time. For example, a user may request that a pod is deleted
in 30 seconds. The Kubelet will react by sending a graceful termination
signal to the containers in the pod. After that 30 seconds, the Kubelet
will send a hard termination signal (SIGKILL) to the container and
after cleanup, remove the pod from the API. In the presence of network
partitions, this object may still exist after this timestamp, until
an administrator or automated process can determine the resource is
fully terminated. If not set, graceful deletion of the object has
not been requested. \n Populated by the system when a graceful deletion
is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
finalizers:
description: Must be empty before the object is deleted from the registry.
Each entry is an identifier for the responsible component that will
remove the entry from the list. If the deletionTimestamp of the object
is non-nil, entries in this list can only be removed.
items:
type: string
type: array
generateName:
description: "GenerateName is an optional prefix, used by the server,
to generate a unique name ONLY IF the Name field has not been provided.
If this field is used, the name returned to the client will be different
than the name passed. This value will also be combined with a unique
suffix. The provided value has the same validation rules as the Name
field, and may be truncated by the length of the suffix required to
make the value unique on the server. \n If this field is specified
and the generated name exists, the server will NOT return a 409 -
instead, it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time allotted,
and the client should retry (optionally after the time indicated in
the Retry-After header). \n Applied only if Name is not specified.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
generation:
description: A sequence number representing a specific generation of
the desired state. Populated by the system. Read-only.
format: int64
type: integer
initializers:
description: "An initializer is a controller which enforces some system
invariant at object creation time. This field is a list of initializers
that have not yet acted on this object. If nil or empty, this object
has been completely initialized. Otherwise, the object is considered
uninitialized and is hidden (in list/watch and get calls) from clients
that haven't explicitly asked to observe uninitialized objects. \n
When an object is created, the system will populate this list with
the current set of initializers. Only privileged users may set or
modify this list. Once it is empty, it may not be modified further
by any user. \n DEPRECATED - initializers are an alpha field and will
be removed in v1.15."
properties:
pending:
description: Pending is a list of initializers that must execute
in order before this object is visible. When the last pending
initializer is removed, and no failing result is set, the initializers
struct will be set to nil and the object is considered as initialized
and visible to all clients.
items:
properties:
name:
description: name of the process that is responsible for initializing
this object.
type: string
required:
- name
type: object
type: array
result:
description: If result is set with the Failure field, the object
will be persisted to storage and then deleted, ensuring that other
clients can observe the deletion.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
code:
description: Suggested HTTP return code for this status, 0 if
not set.
format: int32
type: integer
details:
description: Extended data associated with the reason. Each
reason may define its own extended details. This field is
optional and the data returned is not guaranteed to conform
to any schema except that defined by the reason type.
properties:
causes:
description: The Causes array includes more details associated
with the StatusReason failure. Not all StatusReasons may
provide detailed causes.
items:
properties:
field:
description: "The field of the resource that has caused
this error, as named by its JSON serialization.
May include dot and postfix notation for nested
attributes. Arrays are zero-indexed. Fields may
appear more than once in an array of causes due
to fields having multiple errors. Optional. \n Examples:
\ \"name\" - the field \"name\" on the current
resource \"items[0].name\" - the field \"name\"
on the first array entry in \"items\""
type: string
message:
description: A human-readable description of the cause
of the error. This field may be presented as-is
to a reader.
type: string
reason:
description: A machine-readable description of the
cause of the error. If this value is empty there
is no information available.
type: string
type: object
type: array
group:
description: The group attribute of the resource associated
with the status StatusReason.
type: string
kind:
description: 'The kind attribute of the resource associated
with the status StatusReason. On some operations may differ
from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: The name attribute of the resource associated
with the status StatusReason (when there is a single name
which can be described).
type: string
retryAfterSeconds:
description: If specified, the time in seconds before the
operation should be retried. Some errors may indicate
the client must take an alternate action - for those errors
this field may indicate how long to wait before taking
the alternate action.
format: int32
type: integer
uid:
description: 'UID of the resource. (when there is a single
resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
type: object
kind:
description: 'Kind is a string value representing the REST resource
this object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
message:
description: A human-readable description of the status of this
operation.
type: string
metadata:
description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
properties:
continue:
description: continue may be set if the user set a limit
on the number of items returned, and indicates that the
server has more data available. The value is opaque and
may be used to issue another request to the endpoint that
served this list to retrieve the next set of available
objects. Continuing a consistent list may not be possible
if the server configuration has changed or more than a
few minutes have passed. The resourceVersion field returned
when using this continue value will be identical to the
value in the first response, unless you have received
this token from an error message.
type: string
resourceVersion:
description: 'String that identifies the server''s internal
version of this object that can be used by clients to
determine when objects have changed. Value must be treated
as opaque by clients and passed unmodified back to the
server. Populated by the system. Read-only. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
selfLink:
description: selfLink is a URL representing this object.
Populated by the system. Read-only.
type: string
type: object
reason:
description: A machine-readable description of why this operation
is in the "Failure" status. If this value is empty there is
no information available. A Reason clarifies an HTTP status
code but does not override it.
type: string
status:
description: 'Status of the operation. One of: "Success" or
"Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
type: string
type: object
required:
- pending
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to organize
and categorize (scope and select) objects. May match selectors of
replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels'
type: object
managedFields:
description: "ManagedFields maps workflow-id and version to the set
of fields that are managed by that workflow. This is mostly for internal
housekeeping, and users typically shouldn't need to set or understand
this field. A workflow can be the user's name, a controller's name,
or the name of a specific apply path like \"ci-cd\". The set of fields
is always in the version that the workflow used when modifying the
object. \n This field is alpha and can be changed or removed without
notice."
items:
properties:
apiVersion:
description: APIVersion defines the version of this resource that
this field set applies to. The format is "group/version" just
like the top-level APIVersion field. It is necessary to track
the version of a field set because it cannot be automatically
converted.
type: string
fields:
additionalProperties: true
description: Fields identifies a set of fields.
type: object
manager:
description: Manager is an identifier of the workflow managing
these fields.
type: string
operation:
description: Operation is the type of operation which lead to
this ManagedFieldsEntry being created. The only valid values
for this field are 'Apply' and 'Update'.
type: string
time:
description: Time is timestamp of when these fields were set.
It should always be empty if Operation is 'Apply'
format: date-time
type: string
type: object
type: array
name:
description: 'Name must be unique within a namespace. Is required when
creating resources, although some resources may allow a client to
request the generation of an appropriate name automatically. Name
is primarily intended for creation idempotence and configuration definition.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name must be unique.
An empty namespace is equivalent to the \"default\" namespace, but
\"default\" is the canonical representation. Not all objects are required
to be scoped to a namespace - the value of this field for those objects
will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If ALL objects
in the list have been deleted, this object will be garbage collected.
If this object is managed by a controller, then an entry in this list
will point to this controller, with the controller field set to true.
There cannot be more than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the key-value
store until this reference is removed. Defaults to false. To
set this field, a user needs "delete" permission of the owner,
otherwise 422 (Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the managing controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
resourceVersion:
description: "An opaque value that represents the internal version of
this object that can be used by clients to determine when objects
have changed. May be used for optimistic concurrency, change detection,
and the watch operation on a resource or set of resources. Clients
must treat these values as opaque and passed unmodified back to the
server. They may only be valid for a particular resource or set of
resources. \n Populated by the system. Read-only. Value must be treated
as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency"
type: string
selfLink:
description: SelfLink is a URL representing this object. Populated by
the system. Read-only.
type: string
uid:
description: "UID is the unique in time and space value for this object.
It is typically generated by the server on successful creation of
a resource and is not allowed to change on PUT operations. \n Populated
by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
type: string
type: object
spec:
properties:
configSource:
description: ConfigSource is used to populate in the associated Node
for dynamic kubelet config. This field already exists in Node, so
any updates to it in the Machine spec will be automatically copied
to the linked NodeRef from the status. The rest of dynamic kubelet
config support should then work as-is.
properties:
configMap:
description: ConfigMap is a reference to a Node's ConfigMap
properties:
kubeletConfigKey:
description: KubeletConfigKey declares which key of the referenced
ConfigMap corresponds to the KubeletConfiguration structure
This field is required in all cases.
type: string
name:
description: Name is the metadata.name of the referenced ConfigMap.
This field is required in all cases.
type: string
namespace:
description: Namespace is the metadata.namespace of the referenced
ConfigMap. This field is required in all cases.
type: string
resourceVersion:
description: ResourceVersion is the metadata.ResourceVersion
of the referenced ConfigMap. This field is forbidden in Node.Spec,
and required in Node.Status.
type: string
uid:
description: UID is the metadata.UID of the referenced ConfigMap.
This field is forbidden in Node.Spec, and required in Node.Status.
type: string
required:
- namespace
- name
- kubeletConfigKey
type: object
type: object
metadata:
description: ObjectMeta will autopopulate the Node created. Use this
to indicate what labels, annotations, name prefix, etc., should be
used when creating the Node.
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored
with a resource that may be set by external tools to store and
retrieve arbitrary metadata. They are not queryable and should
be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
generateName:
description: "GenerateName is an optional prefix, used by the server,
to generate a unique name ONLY IF the Name field has not been
provided. If this field is used, the name returned to the client
will be different than the name passed. This value will also be
combined with a unique suffix. The provided value has the same
validation rules as the Name field, and may be truncated by the
length of the suffix required to make the value unique on the
server. \n If this field is specified and the generated name exists,
the server will NOT return a 409 - instead, it will either return
201 Created or 500 with Reason ServerTimeout indicating a unique
name could not be found in the time allotted, and the client should
retry (optionally after the time indicated in the Retry-After
header). \n Applied only if Name is not specified. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to
organize and categorize (scope and select) objects. May match
selectors of replication controllers and services. More info:
http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace. Is required
when creating resources, although some resources may allow a client
to request the generation of an appropriate name automatically.
Name is primarily intended for creation idempotence and configuration
definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name must
be unique. An empty namespace is equivalent to the \"default\"
namespace, but \"default\" is the canonical representation. Not
all objects are required to be scoped to a namespace - the value
of this field for those objects will be empty. \n Must be a DNS_LABEL.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If ALL objects
in the list have been deleted, this object will be garbage collected.
If this object is managed by a controller, then an entry in this
list will point to this controller, with the controller field
set to true. There cannot be more than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the key-value
store until this reference is removed. Defaults to false.
To set this field, a user needs "delete" permission of the
owner, otherwise 422 (Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the managing
controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
type: object
providerID:
description: ProviderID is the identification ID of the machine provided
by the provider. This field must match the provider ID as seen on
the node object corresponding to this machine. This field is required
by higher level consumers of cluster-api. Example use case is cluster
autoscaler with cluster-api as provider. Clean-up logic in the autoscaler
compares machines to nodes to find out machines at provider which
could not get registered as Kubernetes nodes. With cluster-api as
a generic out-of-tree provider for autoscaler, this field is required
by autoscaler to be able to have a provider view of the list of machines.
Another list of nodes is queried from the k8s apiserver and then a
comparison is done to find out unregistered machines and are marked
for delete. This field will be set by the actuators and consumed by
higher level entities like autoscaler that will be interfacing with
cluster-api as generic provider.
type: string
providerSpec:
description: ProviderSpec details Provider-specific configuration to
use during node creation.
properties:
value:
description: Value is an inlined, serialized representation of the
resource configuration. It is recommended that providers maintain
their own versioned API types that should be serialized/deserialized
from this field, akin to component config.
type: object
valueFrom:
description: Source for the provider configuration. Cannot be used
if value is not empty.
properties:
machineClass:
description: The machine class from which the provider config
should be sourced.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead
of an entire object, this string should contain a valid
JSON/Go field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that
triggered the event) or if no container name is specified
"spec.containers[2]" (container with index 2 in this pod).
This syntax is chosen only to have some well-defined way
of referencing a part of an object. TODO: this design
is not final and this field is subject to change in the
future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
provider:
description: Provider is the name of the cloud-provider
which MachineClass is intended for.
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: object
type: object
taints:
description: The list of the taints to be applied to the corresponding
Node in additive manner. This list will not overwrite any other taints
added to the Node on an ongoing basis by other entities. These taints
should be actively reconciled e.g. if you ask the machine controller
to apply a taint and then manually remove the taint the machine controller
will put it back) but not have the machine controller remove any taints
items:
properties:
effect:
description: Required. The effect of the taint on pods that do
not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule
and NoExecute.
type: string
key:
description: Required. The taint key to be applied to a node.
type: string
timeAdded:
description: TimeAdded represents the time at which the taint
was added. It is only written for NoExecute taints.
format: date-time
type: string
value:
description: Required. The taint value corresponding to the taint
key.
type: string
required:
- key
- effect
type: object
type: array
versions:
description: Versions of key software to use. This field is optional
at cluster creation time, and omitting the field indicates that the
cluster installation tool should select defaults for the user. These
defaults may differ based on the cluster installer, but the tool should
populate the values it uses when persisting Machine objects. A Machine
spec missing this field at runtime is invalid.
properties:
controlPlane:
description: ControlPlane is the semantic version of the Kubernetes
control plane to run. This should only be populated when the machine
is a control plane.
type: string
kubelet:
description: Kubelet is the semantic version of kubelet to run
type: string
required:
- kubelet
type: object
required:
- providerSpec
type: object
status:
properties:
addresses:
description: Addresses is a list of addresses assigned to the machine.
Queried from cloud provider, if available.
items:
properties:
address:
description: The node address.
type: string
type:
description: Node address type, one of Hostname, ExternalIP or
InternalIP.
type: string
required:
- type
- address
type: object
type: array
conditions:
description: 'Conditions lists the conditions synced from the node conditions
of the corresponding node-object. Machine-controller is responsible
for keeping conditions up-to-date. MachineSet controller will be taking
these conditions as a signal to decide if machine is healthy or needs
to be replaced. Refer: https://kubernetes.io/docs/concepts/architecture/nodes/#condition'
items:
properties:
lastHeartbeatTime:
description: Last time we got an update on a given condition.
format: date-time
type: string
lastTransitionTime:
description: Last time the condition transit from one status to
another.
format: date-time
type: string
message:
description: Human readable message indicating details about last
transition.
type: string
reason:
description: (brief) reason for the condition's last transition.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
description: Type of node condition.
type: string
required:
- type
- status
type: object
type: array
errorMessage:
description: "ErrorMessage will be set in the event that there is a
terminal problem reconciling the Machine and will contain a more verbose
string suitable for logging and human consumption. \n This field should
not be set for transitive errors that a controller faces that are
expected to be fixed automatically over time (like service outages),
but instead indicate that something is fundamentally wrong with the
Machine's spec or the configuration of the controller, and that manual
intervention is required. Examples of terminal errors would be invalid
combinations of settings in the spec, values that are unsupported
by the controller, or the responsible controller itself being critically
misconfigured. \n Any transient errors that occur during the reconciliation
of Machines can be added as events to the Machine object and/or logged
in the controller's output."
type: string
errorReason:
description: "ErrorReason will be set in the event that there is a terminal
problem reconciling the Machine and will contain a succinct value
suitable for machine interpretation. \n This field should not be set
for transitive errors that a controller faces that are expected to
be fixed automatically over time (like service outages), but instead
indicate that something is fundamentally wrong with the Machine's
spec or the configuration of the controller, and that manual intervention
is required. Examples of terminal errors would be invalid combinations
of settings in the spec, values that are unsupported by the controller,
or the responsible controller itself being critically misconfigured.
\n Any transient errors that occur during the reconciliation of Machines
can be added as events to the Machine object and/or logged in the
controller's output."
type: string
lastOperation:
description: LastOperation describes the last-operation performed by
the machine-controller. This API should be useful as a history in
terms of the latest operation performed on the specific machine. It
should also convey the state of the latest-operation for example if
it is still on-going, failed or completed successfully.
properties:
description:
description: Description is the human-readable description of the
last operation.
type: string
lastUpdated:
description: LastUpdated is the timestamp at which LastOperation
API was last-updated.
format: date-time
type: string
state:
description: State is the current status of the last performed operation.
E.g. Processing, Failed, Successful etc
type: string
type:
description: Type is the type of operation which was last performed.
E.g. Create, Delete, Update etc
type: string
type: object
lastUpdated:
description: LastUpdated identifies when this status was last observed.
format: date-time
type: string
nodeRef:
description: NodeRef will point to the corresponding Node if it exists.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an
entire object, this string should contain a valid JSON/Go field
access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen only
to have some well-defined way of referencing a part of an object.
TODO: this design is not final and this field is subject to change
in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is
made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
phase:
description: Phase represents the current phase of machine actuation.
E.g. Pending, Running, Terminating, Failed etc.
type: string
providerStatus:
description: ProviderStatus details a Provider-specific status. It is
recommended that providers maintain their own versioned API types
that should be serialized/deserialized from this field.
type: object
versions:
description: "Versions specifies the current versions of software on
the corresponding Node (if it exists). This is provided for a few
reasons: \n 1) It is more convenient than checking the NodeRef, traversing
it to the Node, and finding the appropriate field in Node.Status.NodeInfo
\ (which uses different field names and formatting). 2) It removes
some of the dependency on the structure of the Node, so that if
the structure of Node.Status.NodeInfo changes, only machine controllers
need to be updated, rather than every client of the Machines API.
3) There is no other simple way to check the control plane version.
A client would have to connect directly to the apiserver running
on the target node in order to find out its version."
properties:
controlPlane:
description: ControlPlane is the semantic version of the Kubernetes
control plane to run. This should only be populated when the machine
is a control plane.
type: string
kubelet:
description: Kubelet is the semantic version of kubelet to run
type: string
required:
- kubelet
type: object
type: object
type: object
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: machinesets.cluster.k8s.io
spec:
group: cluster.k8s.io
names:
kind: MachineSet
plural: machinesets
shortNames:
- ms
scope: Namespaced
subresources:
scale:
labelSelectorPath: .status.labelSelector
specReplicasPath: .spec.replicas
statusReplicasPath: .status.replicas
status: {}
validation:
openAPIV3Schema:
description: / [MachineSet] MachineSet ensures that a specified number of machines
replicas are running at any given time.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored with
a resource that may be set by external tools to store and retrieve
arbitrary metadata. They are not queryable and should be preserved
when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
clusterName:
description: The name of the cluster which the object belongs to. This
is used to distinguish resources with same name and namespace in different
clusters. This field is not set anywhere right now and apiserver is
going to ignore it if set in create or update request.
type: string
creationTimestamp:
description: "CreationTimestamp is a timestamp representing the server
time when this object was created. It is not guaranteed to be set
in happens-before order across separate operations. Clients may not
set this value. It is represented in RFC3339 form and is in UTC. \n
Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
deletionGracePeriodSeconds:
description: Number of seconds allowed for this object to gracefully
terminate before it will be removed from the system. Only set when
deletionTimestamp is also set. May only be shortened. Read-only.
format: int64
type: integer
deletionTimestamp:
description: "DeletionTimestamp is RFC 3339 date and time at which this
resource will be deleted. This field is set by the server when a graceful
deletion is requested by the user, and is not directly settable by
a client. The resource is expected to be deleted (no longer visible
from resource lists, and not reachable by name) after the time in
this field, once the finalizers list is empty. As long as the finalizers
list contains items, deletion is blocked. Once the deletionTimestamp
is set, this value may not be unset or be set further into the future,
although it may be shortened or the resource may be deleted prior
to this time. For example, a user may request that a pod is deleted
in 30 seconds. The Kubelet will react by sending a graceful termination
signal to the containers in the pod. After that 30 seconds, the Kubelet
will send a hard termination signal (SIGKILL) to the container and
after cleanup, remove the pod from the API. In the presence of network
partitions, this object may still exist after this timestamp, until
an administrator or automated process can determine the resource is
fully terminated. If not set, graceful deletion of the object has
not been requested. \n Populated by the system when a graceful deletion
is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
format: date-time
type: string
finalizers:
description: Must be empty before the object is deleted from the registry.
Each entry is an identifier for the responsible component that will
remove the entry from the list. If the deletionTimestamp of the object
is non-nil, entries in this list can only be removed.
items:
type: string
type: array
generateName:
description: "GenerateName is an optional prefix, used by the server,
to generate a unique name ONLY IF the Name field has not been provided.
If this field is used, the name returned to the client will be different
than the name passed. This value will also be combined with a unique
suffix. The provided value has the same validation rules as the Name
field, and may be truncated by the length of the suffix required to
make the value unique on the server. \n If this field is specified
and the generated name exists, the server will NOT return a 409 -
instead, it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time allotted,
and the client should retry (optionally after the time indicated in
the Retry-After header). \n Applied only if Name is not specified.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
generation:
description: A sequence number representing a specific generation of
the desired state. Populated by the system. Read-only.
format: int64
type: integer
initializers:
description: "An initializer is a controller which enforces some system
invariant at object creation time. This field is a list of initializers
that have not yet acted on this object. If nil or empty, this object
has been completely initialized. Otherwise, the object is considered
uninitialized and is hidden (in list/watch and get calls) from clients
that haven't explicitly asked to observe uninitialized objects. \n
When an object is created, the system will populate this list with
the current set of initializers. Only privileged users may set or
modify this list. Once it is empty, it may not be modified further
by any user. \n DEPRECATED - initializers are an alpha field and will
be removed in v1.15."
properties:
pending:
description: Pending is a list of initializers that must execute
in order before this object is visible. When the last pending
initializer is removed, and no failing result is set, the initializers
struct will be set to nil and the object is considered as initialized
and visible to all clients.
items:
properties:
name:
description: name of the process that is responsible for initializing
this object.
type: string
required:
- name
type: object
type: array
result:
description: If result is set with the Failure field, the object
will be persisted to storage and then deleted, ensuring that other
clients can observe the deletion.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
code:
description: Suggested HTTP return code for this status, 0 if
not set.
format: int32
type: integer
details:
description: Extended data associated with the reason. Each
reason may define its own extended details. This field is
optional and the data returned is not guaranteed to conform
to any schema except that defined by the reason type.
properties:
causes:
description: The Causes array includes more details associated
with the StatusReason failure. Not all StatusReasons may
provide detailed causes.
items:
properties:
field:
description: "The field of the resource that has caused
this error, as named by its JSON serialization.
May include dot and postfix notation for nested
attributes. Arrays are zero-indexed. Fields may
appear more than once in an array of causes due
to fields having multiple errors. Optional. \n Examples:
\ \"name\" - the field \"name\" on the current
resource \"items[0].name\" - the field \"name\"
on the first array entry in \"items\""
type: string
message:
description: A human-readable description of the cause
of the error. This field may be presented as-is
to a reader.
type: string
reason:
description: A machine-readable description of the
cause of the error. If this value is empty there
is no information available.
type: string
type: object
type: array
group:
description: The group attribute of the resource associated
with the status StatusReason.
type: string
kind:
description: 'The kind attribute of the resource associated
with the status StatusReason. On some operations may differ
from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: The name attribute of the resource associated
with the status StatusReason (when there is a single name
which can be described).
type: string
retryAfterSeconds:
description: If specified, the time in seconds before the
operation should be retried. Some errors may indicate
the client must take an alternate action - for those errors
this field may indicate how long to wait before taking
the alternate action.
format: int32
type: integer
uid:
description: 'UID of the resource. (when there is a single
resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
type: object
kind:
description: 'Kind is a string value representing the REST resource
this object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
message:
description: A human-readable description of the status of this
operation.
type: string
metadata:
description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
properties:
continue:
description: continue may be set if the user set a limit
on the number of items returned, and indicates that the
server has more data available. The value is opaque and
may be used to issue another request to the endpoint that
served this list to retrieve the next set of available
objects. Continuing a consistent list may not be possible
if the server configuration has changed or more than a
few minutes have passed. The resourceVersion field returned
when using this continue value will be identical to the
value in the first response, unless you have received
this token from an error message.
type: string
resourceVersion:
description: 'String that identifies the server''s internal
version of this object that can be used by clients to
determine when objects have changed. Value must be treated
as opaque by clients and passed unmodified back to the
server. Populated by the system. Read-only. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
selfLink:
description: selfLink is a URL representing this object.
Populated by the system. Read-only.
type: string
type: object
reason:
description: A machine-readable description of why this operation
is in the "Failure" status. If this value is empty there is
no information available. A Reason clarifies an HTTP status
code but does not override it.
type: string
status:
description: 'Status of the operation. One of: "Success" or
"Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
type: string
type: object
required:
- pending
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to organize
and categorize (scope and select) objects. May match selectors of
replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels'
type: object
managedFields:
description: "ManagedFields maps workflow-id and version to the set
of fields that are managed by that workflow. This is mostly for internal
housekeeping, and users typically shouldn't need to set or understand
this field. A workflow can be the user's name, a controller's name,
or the name of a specific apply path like \"ci-cd\". The set of fields
is always in the version that the workflow used when modifying the
object. \n This field is alpha and can be changed or removed without
notice."
items:
properties:
apiVersion:
description: APIVersion defines the version of this resource that
this field set applies to. The format is "group/version" just
like the top-level APIVersion field. It is necessary to track
the version of a field set because it cannot be automatically
converted.
type: string
fields:
additionalProperties: true
description: Fields identifies a set of fields.
type: object
manager:
description: Manager is an identifier of the workflow managing
these fields.
type: string
operation:
description: Operation is the type of operation which lead to
this ManagedFieldsEntry being created. The only valid values
for this field are 'Apply' and 'Update'.
type: string
time:
description: Time is timestamp of when these fields were set.
It should always be empty if Operation is 'Apply'
format: date-time
type: string
type: object
type: array
name:
description: 'Name must be unique within a namespace. Is required when
creating resources, although some resources may allow a client to
request the generation of an appropriate name automatically. Name
is primarily intended for creation idempotence and configuration definition.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name must be unique.
An empty namespace is equivalent to the \"default\" namespace, but
\"default\" is the canonical representation. Not all objects are required
to be scoped to a namespace - the value of this field for those objects
will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If ALL objects
in the list have been deleted, this object will be garbage collected.
If this object is managed by a controller, then an entry in this list
will point to this controller, with the controller field set to true.
There cannot be more than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the key-value
store until this reference is removed. Defaults to false. To
set this field, a user needs "delete" permission of the owner,
otherwise 422 (Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the managing controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
resourceVersion:
description: "An opaque value that represents the internal version of
this object that can be used by clients to determine when objects
have changed. May be used for optimistic concurrency, change detection,
and the watch operation on a resource or set of resources. Clients
must treat these values as opaque and passed unmodified back to the
server. They may only be valid for a particular resource or set of
resources. \n Populated by the system. Read-only. Value must be treated
as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency"
type: string
selfLink:
description: SelfLink is a URL representing this object. Populated by
the system. Read-only.
type: string
uid:
description: "UID is the unique in time and space value for this object.
It is typically generated by the server on successful creation of
a resource and is not allowed to change on PUT operations. \n Populated
by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids"
type: string
type: object
spec:
properties:
deletePolicy:
description: DeletePolicy defines the policy used to identify nodes
to delete when downscaling. Defaults to "Random". Valid values are
"Random, "Newest", "Oldest"
enum:
- Random
- Newest
- Oldest
type: string
minReadySeconds:
description: MinReadySeconds is the minimum number of seconds for which
a newly created machine should be ready. Defaults to 0 (machine will
be considered available as soon as it is ready)
format: int32
type: integer
replicas:
description: Replicas is the number of desired replicas. This is a pointer
to distinguish between explicit zero and unspecified. Defaults to
1.
format: int32
type: integer
selector:
description: 'Selector is a label query over machines that should match
the replica count. Label keys and values that must match in order
to be controlled by this MachineSet. It must match the machine template''s
labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors'
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to a
set of values. Valid operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator
is In or NotIn, the values array must be non-empty. If the
operator is Exists or DoesNotExist, the values array must
be empty. This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator is
"In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
template:
description: Template is the object that describes the machine that
will be created if insufficient replicas are detected.
properties:
metadata:
description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata'
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored
with a resource that may be set by external tools to store
and retrieve arbitrary metadata. They are not queryable and
should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
generateName:
description: "GenerateName is an optional prefix, used by the
server, to generate a unique name ONLY IF the Name field has
not been provided. If this field is used, the name returned
to the client will be different than the name passed. This
value will also be combined with a unique suffix. The provided
value has the same validation rules as the Name field, and
may be truncated by the length of the suffix required to make
the value unique on the server. \n If this field is specified
and the generated name exists, the server will NOT return
a 409 - instead, it will either return 201 Created or 500
with Reason ServerTimeout indicating a unique name could not
be found in the time allotted, and the client should retry
(optionally after the time indicated in the Retry-After header).
\n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used
to organize and categorize (scope and select) objects. May
match selectors of replication controllers and services. More
info: http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace. Is required
when creating resources, although some resources may allow
a client to request the generation of an appropriate name
automatically. Name is primarily intended for creation idempotence
and configuration definition. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name must
be unique. An empty namespace is equivalent to the \"default\"
namespace, but \"default\" is the canonical representation.
Not all objects are required to be scoped to a namespace -
the value of this field for those objects will be empty. \n
Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If ALL
objects in the list have been deleted, this object will be
garbage collected. If this object is managed by a controller,
then an entry in this list will point to this controller,
with the controller field set to true. There cannot be more
than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the
key-value store until this reference is removed. Defaults
to false. To set this field, a user needs "delete" permission
of the owner, otherwise 422 (Unprocessable Entity) will
be returned.
type: boolean
controller:
description: If true, this reference points to the managing
controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
type: object
spec:
description: 'Specification of the desired behavior of the machine.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
properties:
configSource:
description: ConfigSource is used to populate in the associated
Node for dynamic kubelet config. This field already exists
in Node, so any updates to it in the Machine spec will be
automatically copied to the linked NodeRef from the status.
The rest of dynamic kubelet config support should then work
as-is.
properties:
configMap:
description: ConfigMap is a reference to a Node's ConfigMap
properties:
kubeletConfigKey:
description: KubeletConfigKey declares which key of
the referenced ConfigMap corresponds to the KubeletConfiguration
structure This field is required in all cases.
type: string
name:
description: Name is the metadata.name of the referenced
ConfigMap. This field is required in all cases.
type: string
namespace:
description: Namespace is the metadata.namespace of
the referenced ConfigMap. This field is required in
all cases.
type: string
resourceVersion:
description: ResourceVersion is the metadata.ResourceVersion
of the referenced ConfigMap. This field is forbidden
in Node.Spec, and required in Node.Status.
type: string
uid:
description: UID is the metadata.UID of the referenced
ConfigMap. This field is forbidden in Node.Spec, and
required in Node.Status.
type: string
required:
- namespace
- name
- kubeletConfigKey
type: object
type: object
metadata:
description: ObjectMeta will autopopulate the Node created.
Use this to indicate what labels, annotations, name prefix,
etc., should be used when creating the Node.
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map
stored with a resource that may be set by external tools
to store and retrieve arbitrary metadata. They are not
queryable and should be preserved when modifying objects.
More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
generateName:
description: "GenerateName is an optional prefix, used by
the server, to generate a unique name ONLY IF the Name
field has not been provided. If this field is used, the
name returned to the client will be different than the
name passed. This value will also be combined with a unique
suffix. The provided value has the same validation rules
as the Name field, and may be truncated by the length
of the suffix required to make the value unique on the
server. \n If this field is specified and the generated
name exists, the server will NOT return a 409 - instead,
it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time
allotted, and the client should retry (optionally after
the time indicated in the Retry-After header). \n Applied
only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency"
type: string
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be
used to organize and categorize (scope and select) objects.
May match selectors of replication controllers and services.
More info: http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace. Is
required when creating resources, although some resources
may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation
idempotence and configuration definition. Cannot be updated.
More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: "Namespace defines the space within each name
must be unique. An empty namespace is equivalent to the
\"default\" namespace, but \"default\" is the canonical
representation. Not all objects are required to be scoped
to a namespace - the value of this field for those objects
will be empty. \n Must be a DNS_LABEL. Cannot be updated.
More info: http://kubernetes.io/docs/user-guide/namespaces"
type: string
ownerReferences:
description: List of objects depended by this object. If
ALL objects in the list have been deleted, this object
will be garbage collected. If this object is managed by
a controller, then an entry in this list will point to
this controller, with the controller field set to true.
There cannot be more than one managing controller.
items:
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from
the key-value store until this reference is removed.
Defaults to false. To set this field, a user needs
"delete" permission of the owner, otherwise 422
(Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the
managing controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
type: object
providerID:
description: ProviderID is the identification ID of the machine
provided by the provider. This field must match the provider
ID as seen on the node object corresponding to this machine.
This field is required by higher level consumers of cluster-api.
Example use case is cluster autoscaler with cluster-api as
provider. Clean-up logic in the autoscaler compares machines
to nodes to find out machines at provider which could not
get registered as Kubernetes nodes. With cluster-api as a
generic out-of-tree provider for autoscaler, this field is
required by autoscaler to be able to have a provider view
of the list of machines. Another list of nodes is queried
from the k8s apiserver and then a comparison is done to find
out unregistered machines and are marked for delete. This
field will be set by the actuators and consumed by higher
level entities like autoscaler that will be interfacing with
cluster-api as generic provider.
type: string
providerSpec:
description: ProviderSpec details Provider-specific configuration
to use during node creation.
properties:
value:
description: Value is an inlined, serialized representation
of the resource configuration. It is recommended that
providers maintain their own versioned API types that
should be serialized/deserialized from this field, akin
to component config.
type: object
valueFrom:
description: Source for the provider configuration. Cannot
be used if value is not empty.
properties:
machineClass:
description: The machine class from which the provider
config should be sourced.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object
instead of an entire object, this string should
contain a valid JSON/Go field access statement,
such as desiredState.manifest.containers[2]. For
example, if the object reference is to a container
within a pod, this would take on a value like:
"spec.containers{name}" (where "name" refers to
the name of the container that triggered the event)
or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax
is chosen only to have some well-defined way of
referencing a part of an object. TODO: this design
is not final and this field is subject to change
in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
provider:
description: Provider is the name of the cloud-provider
which MachineClass is intended for.
type: string
resourceVersion:
description: 'Specific resourceVersion to which
this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: object
type: object
taints:
description: The list of the taints to be applied to the corresponding
Node in additive manner. This list will not overwrite any
other taints added to the Node on an ongoing basis by other
entities. These taints should be actively reconciled e.g.
if you ask the machine controller to apply a taint and then
manually remove the taint the machine controller will put
it back) but not have the machine controller remove any taints
items:
properties:
effect:
description: Required. The effect of the taint on pods
that do not tolerate the taint. Valid effects are NoSchedule,
PreferNoSchedule and NoExecute.
type: string
key:
description: Required. The taint key to be applied to
a node.
type: string
timeAdded:
description: TimeAdded represents the time at which the
taint was added. It is only written for NoExecute taints.
format: date-time
type: string
value:
description: Required. The taint value corresponding to
the taint key.
type: string
required:
- key
- effect
type: object
type: array
versions:
description: Versions of key software to use. This field is
optional at cluster creation time, and omitting the field
indicates that the cluster installation tool should select
defaults for the user. These defaults may differ based on
the cluster installer, but the tool should populate the values
it uses when persisting Machine objects. A Machine spec missing
this field at runtime is invalid.
properties:
controlPlane:
description: ControlPlane is the semantic version of the
Kubernetes control plane to run. This should only be populated
when the machine is a control plane.
type: string
kubelet:
description: Kubelet is the semantic version of kubelet
to run
type: string
required:
- kubelet
type: object
required:
- providerSpec
type: object
type: object
required:
- selector
type: object
status:
properties:
availableReplicas:
description: The number of available replicas (ready for at least minReadySeconds)
for this MachineSet.
format: int32
type: integer
errorMessage:
type: string
errorReason:
description: "In the event that there is a terminal problem reconciling
the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason
will be populated with a succinct value suitable for machine interpretation,
while ErrorMessage will contain a more verbose string suitable for
logging and human consumption. \n These fields should not be set for
transitive errors that a controller faces that are expected to be
fixed automatically over time (like service outages), but instead
indicate that something is fundamentally wrong with the MachineTemplate's
spec or the configuration of the machine controller, and that manual
intervention is required. Examples of terminal errors would be invalid
combinations of settings in the spec, values that are unsupported
by the machine controller, or the responsible machine controller itself
being critically misconfigured. \n Any transient errors that occur
during the reconciliation of Machines can be added as events to the
MachineSet object and/or logged in the controller's output."
type: string
fullyLabeledReplicas:
description: The number of replicas that have labels matching the labels
of the machine template of the MachineSet.
format: int32
type: integer
observedGeneration:
description: ObservedGeneration reflects the generation of the most
recently observed MachineSet.
format: int64
type: integer
readyReplicas:
description: The number of ready replicas for this MachineSet. A machine
is considered ready when the node has been created and is "Ready".
format: int32
type: integer
replicas:
description: Replicas is the most recently observed number of replicas.
format: int32
type: integer
required:
- replicas
type: object
type: object
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: cluster-api-manager-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- cluster.k8s.io
resources:
- clusters
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- cluster.k8s.io
resources:
- machines
- machines/status
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- cluster.k8s.io
resources:
- machinedeployments
- machinedeployments/status
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- cluster.k8s.io
resources:
- machinesets
- machinesets/status
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: cluster-api-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-api-manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: cluster-api-system
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
name: cluster-api-controller-manager-service
namespace: cluster-api-system
spec:
ports:
- port: 443
selector:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
name: cluster-api-controller-manager
namespace: cluster-api-system
spec:
selector:
matchLabels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
serviceName: cluster-api-controller-manager-service
template:
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
spec:
containers:
- command:
- /manager
image: gcr.io/k8s-cluster-api/cluster-api-controller:0.1.0
name: manager
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.alpha.kubernetes.io/notReady
operator: Exists
- effect: NoExecute
key: node.alpha.kubernetes.io/unreachable
operator: Exists
`
) | pkg/capi/constants.go | 0.871448 | 0.4474 | constants.go | starcoder |
package model
import (
"fmt"
"math/big"
"strings"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax"
"github.com/zclconf/go-cty/cty"
)
// TupleType represents values that are a sequence of independently-typed elements.
type TupleType struct {
// ElementTypes are the types of the tuple's elements.
ElementTypes []Type
elementUnion Type
s string
}
// NewTupleType creates a new tuple type with the given element types.
func NewTupleType(elementTypes ...Type) Type {
return &TupleType{ElementTypes: elementTypes}
}
// SyntaxNode returns the syntax node for the type. This is always syntax.None.
func (*TupleType) SyntaxNode() hclsyntax.Node {
return syntax.None
}
// Traverse attempts to traverse the tuple type with the given traverser. This always fails.
func (t *TupleType) Traverse(traverser hcl.Traverser) (Traversable, hcl.Diagnostics) {
key, keyType := GetTraverserKey(traverser)
if !InputType(NumberType).AssignableFrom(keyType) {
return DynamicType, hcl.Diagnostics{unsupportedTupleIndex(traverser.SourceRange())}
}
if key == cty.DynamicVal {
if t.elementUnion == nil {
t.elementUnion = NewUnionType(t.ElementTypes...)
}
return t.elementUnion, nil
}
elementIndex, acc := key.AsBigFloat().Int64()
if acc != big.Exact {
return DynamicType, hcl.Diagnostics{unsupportedTupleIndex(traverser.SourceRange())}
}
if elementIndex < 0 || elementIndex > int64(len(t.ElementTypes)) {
return DynamicType, hcl.Diagnostics{tupleIndexOutOfRange(len(t.ElementTypes), traverser.SourceRange())}
}
return t.ElementTypes[int(elementIndex)], nil
}
// Equals returns true if this type has the same identity as the given type.
func (t *TupleType) Equals(other Type) bool {
return t.equals(other, nil)
}
func (t *TupleType) equals(other Type, seen map[Type]struct{}) bool {
if t == other {
return true
}
otherTuple, ok := other.(*TupleType)
if !ok {
return false
}
if len(t.ElementTypes) != len(otherTuple.ElementTypes) {
return false
}
for i, t := range t.ElementTypes {
if !t.equals(otherTuple.ElementTypes[i], seen) {
return false
}
}
return true
}
// AssignableFrom returns true if this type is assignable from the indicated source type..
func (t *TupleType) AssignableFrom(src Type) bool {
return assignableFrom(t, src, func() bool {
if src, ok := src.(*TupleType); ok {
for i := 0; i < len(t.ElementTypes); i++ {
srcElement := NoneType
if i < len(src.ElementTypes) {
srcElement = src.ElementTypes[i]
}
if !t.ElementTypes[i].AssignableFrom(srcElement) {
return false
}
}
return true
}
return false
})
}
type tupleElementUnifier struct {
elementTypes []Type
any bool
conversionKind ConversionKind
}
func (u *tupleElementUnifier) unify(t *TupleType) {
if !u.any {
u.elementTypes, u.any, u.conversionKind = append([]Type(nil), t.ElementTypes...), true, SafeConversion
} else {
min := len(u.elementTypes)
if l := len(t.ElementTypes); l < min {
min = l
}
for i := 0; i < min; i++ {
element, ck := u.elementTypes[i].unify(t.ElementTypes[i])
if ck < u.conversionKind {
u.conversionKind = ck
}
u.elementTypes[i] = element
}
if len(u.elementTypes) > len(t.ElementTypes) {
for i := min; i < len(u.elementTypes); i++ {
u.elementTypes[i] = NewOptionalType(u.elementTypes[i])
}
} else {
for _, t := range t.ElementTypes[min:] {
u.elementTypes = append(u.elementTypes, NewOptionalType(t))
}
}
}
}
func (t *TupleType) ConversionFrom(src Type) ConversionKind {
kind, _ := t.conversionFrom(src, false, nil)
return kind
}
func (t *TupleType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) {
return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) {
switch src := src.(type) {
case *TupleType:
// When unifying, we will unify two tuples of different length to a new tuple, where elements with matching
// indices are unified and elements that are missing are treated as having type None.
if unifying {
var unifier tupleElementUnifier
unifier.unify(t)
unifier.unify(src)
return unifier.conversionKind, nil
}
if len(t.ElementTypes) != len(src.ElementTypes) {
return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{tuplesHaveDifferentLengths(t, src)} }
}
conversionKind := SafeConversion
var diags lazyDiagnostics
for i, dst := range t.ElementTypes {
if ck, why := dst.conversionFrom(src.ElementTypes[i], unifying, seen); ck < conversionKind {
conversionKind, diags = ck, why
if conversionKind == NoConversion {
break
}
}
}
// When unifying, the conversion kind of two tuple types is the lesser of the conversion in each direction.
if unifying {
conversionTo, _ := src.conversionFrom(t, false, seen)
if conversionTo < conversionKind {
conversionKind = conversionTo
}
}
return conversionKind, diags
case *ListType:
conversionKind := UnsafeConversion
var diags lazyDiagnostics
for _, t := range t.ElementTypes {
if ck, why := t.conversionFrom(src.ElementType, unifying, seen); ck < conversionKind {
conversionKind, diags = ck, why
if conversionKind == NoConversion {
break
}
}
}
return conversionKind, diags
case *SetType:
conversionKind := UnsafeConversion
var diags lazyDiagnostics
for _, t := range t.ElementTypes {
if ck, why := t.conversionFrom(src.ElementType, unifying, seen); ck < conversionKind {
conversionKind, diags = ck, why
if conversionKind == NoConversion {
break
}
}
}
return conversionKind, diags
}
return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{typeNotConvertible(t, src)} }
})
}
func (t *TupleType) String() string {
return t.string(nil)
}
func (t *TupleType) string(seen map[Type]struct{}) string {
if t.s == "" {
elements := make([]string, len(t.ElementTypes))
for i, e := range t.ElementTypes {
elements[i] = e.string(seen)
}
t.s = fmt.Sprintf("tuple(%s)", strings.Join(elements, ", "))
}
return t.s
}
func (t *TupleType) unify(other Type) (Type, ConversionKind) {
return unify(t, other, func() (Type, ConversionKind) {
switch other := other.(type) {
case *TupleType:
// When unifying, we will unify two tuples of different length to a new tuple, where elements with matching
// indices are unified and elements that are missing are treated as having type None.
var unifier tupleElementUnifier
unifier.unify(t)
unifier.unify(other)
return NewTupleType(unifier.elementTypes...), unifier.conversionKind
case *ListType:
// Prefer the list type, but unify the element type.
elementType, conversionKind := other.ElementType, SafeConversion
for _, t := range t.ElementTypes {
element, ck := elementType.unify(t)
if ck < conversionKind {
conversionKind = ck
}
elementType = element
}
return NewListType(elementType), conversionKind
case *SetType:
// Prefer the set type, but unify the element type.
elementType, conversionKind := other.ElementType, UnsafeConversion
for _, t := range t.ElementTypes {
element, ck := elementType.unify(t)
if ck < conversionKind {
conversionKind = ck
}
elementType = element
}
return NewSetType(elementType), conversionKind
default:
// Otherwise, prefer the tuple type.
kind, _ := t.conversionFrom(other, true, nil)
return t, kind
}
})
}
func (*TupleType) isType() {} | pkg/codegen/hcl2/model/type_tuple.go | 0.697506 | 0.450541 | type_tuple.go | starcoder |
// Package primitiveset is a container for a set of primitives (i.e. implementations of cryptographic
// primitives offered by Tink). It provides also additional properties for the primitives
// it holds. In particular, one of the primitives in the set can be distinguished as
// "the primary" one.
package primitiveset
import (
"fmt"
"github.com/google/tink/go/format"
tinkpb "github.com/google/tink/proto/tink_go_proto"
)
// Entry represents a single entry in the keyset. In addition to the actual primitive,
// it holds the identifier and status of the primitive.
type Entry struct {
Primitive interface{}
Prefix string
PrefixType tinkpb.OutputPrefixType
Status tinkpb.KeyStatusType
}
func newEntry(p interface{}, prefix string, prefixType tinkpb.OutputPrefixType, status tinkpb.KeyStatusType) *Entry {
return &Entry{
Primitive: p,
Prefix: prefix,
Status: status,
PrefixType: prefixType,
}
}
// PrimitiveSet is used for supporting key rotation: primitives in a set correspond to keys in a
// keyset. Users will usually work with primitive instances, which essentially wrap primitive
// sets. For example an instance of an AEAD-primitive for a given keyset holds a set of
// AEAD-primitives corresponding to the keys in the keyset, and uses the set members to do the
// actual crypto operations: to encrypt data the primary AEAD-primitive from the set is used, and
// upon decryption the ciphertext's prefix determines the id of the primitive from the set.
// PrimitiveSet is a public to allow its use in implementations of custom primitives.
type PrimitiveSet struct {
// Primary entry.
Primary *Entry
// The primitives are stored in a map of (ciphertext prefix, list of primitives sharing the
// prefix). This allows quickly retrieving the primitives sharing some particular prefix.
Entries map[string][]*Entry
}
// New returns an empty instance of PrimitiveSet.
func New() *PrimitiveSet {
return &PrimitiveSet{
Primary: nil,
Entries: make(map[string][]*Entry),
}
}
// RawEntries returns all primitives in the set that have RAW prefix.
func (ps *PrimitiveSet) RawEntries() ([]*Entry, error) {
return ps.EntriesForPrefix(format.RawPrefix)
}
// EntriesForPrefix returns all primitives in the set that have the given prefix.
func (ps *PrimitiveSet) EntriesForPrefix(prefix string) ([]*Entry, error) {
result, found := ps.Entries[prefix]
if !found {
return []*Entry{}, nil
}
return result, nil
}
// Add creates a new entry in the primitive set and returns the added entry.
func (ps *PrimitiveSet) Add(p interface{}, key *tinkpb.Keyset_Key) (*Entry, error) {
if key == nil || p == nil {
return nil, fmt.Errorf("primitive_set: key and primitive must not be nil")
}
prefix, err := format.OutputPrefix(key)
if err != nil {
return nil, fmt.Errorf("primitive_set: %s", err)
}
e := newEntry(p, prefix, key.OutputPrefixType, key.Status)
ps.Entries[prefix] = append(ps.Entries[prefix], e)
return e, nil
} | go/primitiveset/primitiveset.go | 0.808408 | 0.475666 | primitiveset.go | starcoder |
package goment
import (
"regexp"
)
var inclusivityRegex = regexp.MustCompile("^[\\[\\(]{1}[\\]\\)]{1}$")
// IsBefore will check if a Goment is before another Goment.
func (g *Goment) IsBefore(args ...interface{}) bool {
var err error
var input *Goment
numArgs := len(args)
if numArgs == 0 {
input, err = New()
} else {
input, err = New(args[0])
}
if err != nil {
return false
}
if numArgs <= 1 {
return g.ToTime().Before(input.ToTime())
}
if units, ok := args[1].(string); ok {
return g.ToTime().Before(input.StartOf(units).ToTime())
}
return false
}
// IsAfter will check if a Goment is after another Goment.
func (g *Goment) IsAfter(args ...interface{}) bool {
var err error
var input *Goment
numArgs := len(args)
if numArgs == 0 {
input, err = New()
} else {
input, err = New(args[0])
}
if err != nil {
return false
}
if numArgs <= 1 {
return g.ToTime().After(input.ToTime())
}
if units, ok := args[1].(string); ok {
return g.ToTime().After(input.EndOf(units).ToTime())
}
return false
}
// IsSame will check if a Goment is the same as another Goment.
func (g *Goment) IsSame(args ...interface{}) bool {
numArgs := len(args)
if numArgs > 0 {
input, err := New(args[0])
if err != nil {
return false
}
if numArgs == 1 {
return g.ToTime().Equal(input.ToTime())
}
if units, ok := args[1].(string); ok {
return g.StartOf(units).ToTime().Equal(input.StartOf(units).ToTime())
}
}
return false
}
// IsSameOrBefore will check if a Goment is before or the same as another Goment.
func (g *Goment) IsSameOrBefore(args ...interface{}) bool {
return g.IsSame(args...) || g.IsBefore(args...)
}
// IsSameOrAfter will check if a Goment is after or the same as another Goment.
func (g *Goment) IsSameOrAfter(args ...interface{}) bool {
return g.IsSame(args...) || g.IsAfter(args...)
}
// IsBetween will check if a Goment is between two other Goments.
func (g *Goment) IsBetween(args ...interface{}) bool {
numArgs := len(args)
if numArgs >= 2 {
units := ""
inclusivity := "()"
fromResult, toResult := false, false
from, err := New(args[0])
if err != nil {
return false
}
to, err := New(args[1])
if err != nil {
return false
}
if numArgs >= 3 {
if parsedUnits, ok := args[2].(string); ok {
units = parsedUnits
}
}
if numArgs == 4 {
if parsedInclusivity, ok := args[3].(string); ok {
if inclusivityRegex.MatchString(parsedInclusivity) {
inclusivity = parsedInclusivity
}
}
}
if inclusivity[0] == '(' {
fromResult = g.IsAfter(from, units)
} else {
fromResult = !g.IsBefore(from, units)
}
if inclusivity[1] == ')' {
toResult = g.IsBefore(to, units)
} else {
toResult = !g.IsAfter(to, units)
}
return fromResult && toResult
}
return false
} | compare.go | 0.593963 | 0.435481 | compare.go | starcoder |
package throttle
import (
"sync/atomic"
"time"
)
//------------------------------------------------------------------------------
// Type is a throttle of retries to avoid endless busy loops when a message
// fails to reach its destination.
type Type struct {
// unthrottledRetries is the number of concecutive retries we are
// comfortable attempting before throttling begins.
unthrottledRetries int64
// maxExponentialPeriod is the maximum duration for which our throttle lasts
// when exponentially increasing.
maxExponentialPeriod int64
// baseThrottlePeriod is the static duration for which our throttle lasts.
baseThrottlePeriod int64
// throttlePeriod is the current throttle period, by default this is set to
// the baseThrottlePeriod.
throttlePeriod int64
// closeChan can interrupt a throttle when closed.
closeChan <-chan struct{}
// consecutiveRetries is the live count of consecutive retries.
consecutiveRetries int64
}
// New creates a new throttle, which permits a static number of consecutive
// retries before throttling subsequent retries. A success will reset the count
// of consecutive retries.
func New(options ...func(*Type)) *Type {
t := &Type{
unthrottledRetries: 3,
baseThrottlePeriod: int64(time.Second),
maxExponentialPeriod: int64(time.Minute),
closeChan: nil,
}
t.throttlePeriod = t.baseThrottlePeriod
for _, option := range options {
option(t)
}
return t
}
//------------------------------------------------------------------------------
// OptMaxUnthrottledRetries sets the maximum number of consecutive retries that
// will be attempted before throttling will begin.
func OptMaxUnthrottledRetries(n int64) func(*Type) {
return func(t *Type) {
t.unthrottledRetries = n
}
}
// OptMaxExponentPeriod sets the maximum period of time that throttles will last
// when exponentially increasing.
func OptMaxExponentPeriod(period time.Duration) func(*Type) {
return func(t *Type) {
t.maxExponentialPeriod = int64(period)
}
}
// OptThrottlePeriod sets the static period of time that throttles will last.
func OptThrottlePeriod(period time.Duration) func(*Type) {
return func(t *Type) {
t.baseThrottlePeriod = int64(period)
t.throttlePeriod = int64(period)
}
}
// OptCloseChan sets a read-only channel that, if closed, will interrupt a retry
// throttle early.
func OptCloseChan(c <-chan struct{}) func(*Type) {
return func(t *Type) {
t.closeChan = c
}
}
//------------------------------------------------------------------------------
// Retry indicates that a retry is about to occur and, if appropriate, will
// block until either the throttle period is over and the retry may be attempted
// (returning true) or that the close channel has closed (returning false).
func (t *Type) Retry() bool {
if rets := atomic.AddInt64(&t.consecutiveRetries, 1); rets <= t.unthrottledRetries {
return true
}
select {
case <-time.After(time.Duration(atomic.LoadInt64(&t.throttlePeriod))):
case <-t.closeChan:
return false
}
return true
}
// ExponentialRetry is the same as Retry except also sets the throttle period to
// exponentially increase after each consecutive retry.
func (t *Type) ExponentialRetry() bool {
if atomic.LoadInt64(&t.consecutiveRetries) > t.unthrottledRetries {
if throtPrd := atomic.LoadInt64(&t.throttlePeriod); throtPrd < t.maxExponentialPeriod {
throtPrd = throtPrd * 2
if throtPrd > t.maxExponentialPeriod {
throtPrd = t.maxExponentialPeriod
}
atomic.StoreInt64(&t.throttlePeriod, throtPrd)
}
}
return t.Retry()
}
// Reset clears the count of consecutive retries and resets the exponential
// backoff.
func (t *Type) Reset() {
atomic.StoreInt64(&t.consecutiveRetries, 0)
atomic.StoreInt64(&t.throttlePeriod, t.baseThrottlePeriod)
}
//------------------------------------------------------------------------------ | lib/util/throttle/type.go | 0.741112 | 0.436622 | type.go | starcoder |
package matcher
import (
"github.com/mhoc/xtern-matcher/model"
)
func Simple(students model.Students, companies model.Companies) model.Matches {
var matches model.Matches
// The core of the matching algorithm works by company rank; starting with rank 0, going until
// rank n, finding as many matches at each rank as possible. 12 is just a magic number here to
// represent the globally maximum number of ranks any company could have.
for rank := 0; rank < 12; rank++ {
// Assemble a list of students who were ranked at this rank by any company.
studentsAtThisRank := make(map[string][]*model.Company)
for _, company := range companies {
if len(company.Students) > rank {
studentName := company.Students[rank]
if _, in := studentsAtThisRank[studentName]; in {
studentsAtThisRank[studentName] = append(studentsAtThisRank[studentName], company)
} else {
studentsAtThisRank[studentName] = []*model.Company{company}
}
}
}
// Iterate over every student.
for studentName, companiesAtRank := range studentsAtThisRank {
student := students.Find(studentName)
if len(companiesAtRank) == 1 {
// If only one company ranked this student at this rank, we assign the match.
company := companiesAtRank[0]
if matches.CompanyCanSupportMatch(company) && matches.FindByStudent(student) == nil {
matches = matches.Add(student, company)
}
} else if len(companiesAtRank) > 1 {
// If multiple companies want the student at this level, we resolve the match by using the
// student's preferences
for _, studentRankCompanyName := range student.Companies {
for _, companyAtRank := range companiesAtRank {
if studentRankCompanyName == companyAtRank.Name &&
matches.CompanyCanSupportMatch(companyAtRank) && matches.FindByStudent(student) == nil {
matches = matches.Add(student, companyAtRank)
}
}
}
// At this point, it is possible that multiple companies ranked this student but the student
// didn't rank any of them; in this case, we just give them the first company that has
// availability.
for _, companyAtRank := range companiesAtRank {
if matches.CompanyCanSupportMatch(companyAtRank) && matches.FindByStudent(student) == nil {
matches = matches.Add(student, companyAtRank)
}
}
} else {
// No companies want the student at this rank; continue on to the next rank.
continue
}
}
}
return matches
} | matcher/matcher_simple.go | 0.577138 | 0.408277 | matcher_simple.go | starcoder |
package log
import "strconv"
/*
Copyright 2019 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
const (
hex = "0123456789abcdef"
)
type encoder struct {
data []byte
index int64
}
func (e *encoder) checkComma() {
if len(e.data) > 0 {
switch e.data[len(e.data)-1] {
case '{', '[', ':':
return
default:
e.data = append(e.data, ',')
}
}
}
func (e *encoder) openObject() {
e.checkComma()
e.data = append(e.data, '{')
}
func (e *encoder) closeObject() {
e.data = append(e.data, '}')
}
func (e *encoder) openArray() {
e.checkComma()
e.data = append(e.data, '[')
}
func (e *encoder) closeArray() {
e.data = append(e.data, ']')
}
func (e *encoder) reset() {
e.data = e.data[:0]
e.index = -1
}
func (e *encoder) AppendBool(value bool) {
e.checkComma()
e.data = strconv.AppendBool(e.data, value)
}
func (e *encoder) AppendFloat(value float64) {
e.checkComma()
e.data = strconv.AppendFloat(e.data, value, 'f', -1, 64)
}
func (e *encoder) AppendInt(value int64) {
e.checkComma()
e.data = strconv.AppendInt(e.data, value, 10)
}
func (e *encoder) AppendUint(value uint64) {
e.checkComma()
e.data = strconv.AppendUint(e.data, value, 10)
}
func (e *encoder) AppendString(value string) {
e.checkComma()
e.writeString(value)
}
func (e *encoder) AppendBytes(value []byte) {
e.checkComma()
e.data = append(e.data, value...)
}
// based on https://golang.org/src/encoding/json/encode.go:884
func (e *encoder) writeString(s string) {
e.data = append(e.data, '"')
for i := 0; i < len(s); i++ {
c := s[i]
if c >= 0x20 && c != '\\' && c != '"' {
e.data = append(e.data, c)
continue
}
switch c {
case '"', '\\':
e.data = append(e.data, '\\', '"')
case '\n':
e.data = append(e.data, '\\', '\n')
case '\f':
e.data = append(e.data, '\\', '\f')
case '\b':
e.data = append(e.data, '\\', '\b')
case '\r':
e.data = append(e.data, '\\', '\r')
case '\t':
e.data = append(e.data, '\\', '\t')
default:
e.data = append(e.data, `\u00`...)
e.data = append(e.data, hex[c>>4], hex[c&0xF])
}
continue
}
e.data = append(e.data, '"')
}
func (e *encoder) addKey(key string) {
e.checkComma()
e.data = append(e.data, '"')
e.data = append(e.data, key...)
e.data = append(e.data, '"', ':')
} | vendor/github.com/brunotm/log/encoder.go | 0.669096 | 0.401131 | encoder.go | starcoder |
package search
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// Acronym
type Acronym struct {
SearchAnswer
// What the acronym stands for.
standsFor *string
// State of the acronym. Possible values are: published, draft, excluded, or unknownFutureValue.
state *AnswerState
}
// NewAcronym instantiates a new acronym and sets the default values.
func NewAcronym()(*Acronym) {
m := &Acronym{
SearchAnswer: *NewSearchAnswer(),
}
return m
}
// CreateAcronymFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateAcronymFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewAcronym(), nil
}
// GetFieldDeserializers the deserialization information for the current model
func (m *Acronym) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.SearchAnswer.GetFieldDeserializers()
res["standsFor"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetStandsFor(val)
}
return nil
}
res["state"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseAnswerState)
if err != nil {
return err
}
if val != nil {
m.SetState(val.(*AnswerState))
}
return nil
}
return res
}
// GetStandsFor gets the standsFor property value. What the acronym stands for.
func (m *Acronym) GetStandsFor()(*string) {
if m == nil {
return nil
} else {
return m.standsFor
}
}
// GetState gets the state property value. State of the acronym. Possible values are: published, draft, excluded, or unknownFutureValue.
func (m *Acronym) GetState()(*AnswerState) {
if m == nil {
return nil
} else {
return m.state
}
}
// Serialize serializes information the current object
func (m *Acronym) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.SearchAnswer.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteStringValue("standsFor", m.GetStandsFor())
if err != nil {
return err
}
}
if m.GetState() != nil {
cast := (*m.GetState()).String()
err = writer.WriteStringValue("state", &cast)
if err != nil {
return err
}
}
return nil
}
// SetStandsFor sets the standsFor property value. What the acronym stands for.
func (m *Acronym) SetStandsFor(value *string)() {
if m != nil {
m.standsFor = value
}
}
// SetState sets the state property value. State of the acronym. Possible values are: published, draft, excluded, or unknownFutureValue.
func (m *Acronym) SetState(value *AnswerState)() {
if m != nil {
m.state = value
}
} | models/search/acronym.go | 0.656108 | 0.451871 | acronym.go | starcoder |
package constraint
import (
"github.com/g3n/engine/math32"
"github.com/g3n/engine/experimental/physics/equation"
)
// ConeTwist constraint.
type ConeTwist struct {
PointToPoint
axisA *math32.Vector3 // Rotation axis, defined locally in bodyA.
axisB *math32.Vector3 // Rotation axis, defined locally in bodyB.
coneEq *equation.Cone
twistEq *equation.Rotational
angle float32
twistAngle float32
}
// NewConeTwist creates and returns a pointer to a new ConeTwist constraint object.
func NewConeTwist(bodyA, bodyB IBody, pivotA, pivotB, axisA, axisB *math32.Vector3, angle, twistAngle, maxForce float32) *ConeTwist {
ctc := new(ConeTwist)
// Default of pivots and axes should be vec3(0)
ctc.initialize(bodyA, bodyB, pivotA, pivotB, maxForce)
ctc.axisA = axisA
ctc.axisB = axisB
ctc.axisA.Normalize()
ctc.axisB.Normalize()
ctc.angle = angle
ctc.twistAngle = twistAngle
ctc.coneEq = equation.NewCone(bodyA, bodyB, ctc.axisA, ctc.axisB, ctc.angle, maxForce)
ctc.twistEq = equation.NewRotational(bodyA, bodyB, maxForce)
ctc.twistEq.SetAxisA(ctc.axisA)
ctc.twistEq.SetAxisB(ctc.axisB)
// Make the cone equation push the bodies toward the cone axis, not outward
ctc.coneEq.SetMaxForce(0)
ctc.coneEq.SetMinForce(-maxForce)
// Make the twist equation add torque toward the initial position
ctc.twistEq.SetMaxForce(0)
ctc.twistEq.SetMinForce(-maxForce)
ctc.AddEquation(ctc.coneEq)
ctc.AddEquation(ctc.twistEq)
return ctc
}
// Update updates the equations with data.
func (ctc *ConeTwist) Update() {
ctc.PointToPoint.Update()
// Update the axes to the cone constraint
worldAxisA := ctc.bodyA.VectorToWorld(ctc.axisA)
worldAxisB := ctc.bodyB.VectorToWorld(ctc.axisB)
ctc.coneEq.SetAxisA(&worldAxisA)
ctc.coneEq.SetAxisB(&worldAxisB)
// Update the world axes in the twist constraint
tA, _ := ctc.axisA.RandomTangents()
worldTA := ctc.bodyA.VectorToWorld(tA)
ctc.twistEq.SetAxisA(&worldTA)
tB, _ := ctc.axisB.RandomTangents()
worldTB := ctc.bodyB.VectorToWorld(tB)
ctc.twistEq.SetAxisB(&worldTB)
ctc.coneEq.SetAngle(ctc.angle)
ctc.twistEq.SetMaxAngle(ctc.twistAngle)
} | experimental/physics/constraint/conetwist.go | 0.860765 | 0.508483 | conetwist.go | starcoder |
// Package elliptic implements elliptic curve primitives.
package elliptic
import (
"crypto/rand"
"io"
"math/big"
"sync"
"github.com/svkirillov/cryptopals-go/helpers"
)
// A Curve represents a short-form Weierstrass curve y^2 = x^3 + a*x + b.
type Curve interface {
// Params returns the parameters for the curve.
Params() *CurveParams
// IsOnCurve reports whether the given (x,y) lies on the curve.
IsOnCurve(x, y *big.Int) bool
// Add returns the sum of (x1,y1) and (x2,y2)
Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int)
// Double returns 2*(x,y)
Double(x1, y1 *big.Int) (x, y *big.Int)
// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
ScalarMult(x1, y1 *big.Int, k []byte) (x, y *big.Int)
// ScalarBaseMult returns k*(Gx, Gy) where (Gx, Gy) is the base point of the group
// and k is a number in big-endian form.
ScalarBaseMult(k []byte) (x, y *big.Int)
}
// CurveParams contains the parameters of an elliptic curve and also provides
// a generic, non-constant time implementation of the Curve.
type CurveParams struct {
P *big.Int // the order of the underlying field
N *big.Int // the order of the base point
B *big.Int // b parameter
A *big.Int // a parameter
Gx, Gy *big.Int // (x,y) of the base point
BitSize int // the size of the underlying field
Name string // the canonical name of the curve
}
func (curve *CurveParams) Params() *CurveParams {
return curve
}
func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
// y^2 = x^3 + a*x + b
y2 := new(big.Int).Mul(y, y) // y2 := y^2
y2.Mod(y2, curve.P) // y2 = y^2 mod curve.P
x3 := new(big.Int).Mul(x, x) // x3 := x^2
x3.Mul(x3, x).Mod(x3, curve.P) // now x3 = x^3 mod curve.P
sum := new(big.Int).Mul(curve.A, x) // sum := a*x
sum.Add(sum, x3) // sum = x^3 + a*x
sum.Add(sum, curve.B).Mod(sum, curve.P) // sum = x^3 + a*x + b mod curve.P
// y^2 ?= x^3 + a*x + b
return y2.Cmp(sum) == 0
}
// Add takes two points (x1, y1) and (x2, y2) and returns their sum.
// It is assumed that "point at infinity" is (0, 0).
func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) {
// https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication#Point_addition
if x1.Cmp(helpers.BigZero) == 0 && y1.Cmp(helpers.BigZero) == 0 {
return x2, y2
}
if x2.Cmp(helpers.BigZero) == 0 && y2.Cmp(helpers.BigZero) == 0 {
return x1, y1
}
ix, iy := Inverse(curve, x2, y2)
if x1.Cmp(ix) == 0 && y1.Cmp(iy) == 0 {
return new(big.Int).Set(helpers.BigZero), new(big.Int).Set(helpers.BigZero)
}
m := new(big.Int)
tmp := new(big.Int)
if x1.Cmp(x2) == 0 && y1.Cmp(y2) == 0 {
tmp.Mul(helpers.BigTwo, y1).ModInverse(tmp, curve.P) // tmp = (2 * y1) ^ (-1) mod curve.P
m.Exp(x1, helpers.BigTwo, curve.P).Mul(m, helpers.BigThree).Add(m, curve.A).Mul(m, tmp).Mod(m, curve.P) // m = (3 * (x1 ^ 2) + a) * ((2 * y1) ^ (-1) mod curve.P) mod curve.P
} else {
tmp.Sub(x2, x1).ModInverse(tmp, curve.P) // tmp = (x2 - x1) ^ (-1) mod curve.P
m.Sub(y2, y1).Mul(m, tmp).Mod(m, curve.P) // m = (y2 - y1) * ((x2 - x1) ^ (-1) mod curve.P) mod curve.P
}
tmp.Add(x1, x2).Neg(tmp).Mod(tmp, curve.P) // tmp = -(x1 + x2) mod curve.P
x3 := new(big.Int).Exp(m, helpers.BigTwo, curve.P) // x3 := m ^ 2 mod curve.P
x3.Add(x3, tmp).Mod(x3, curve.P) // x3 = m ^ 2 - (x1 + x2) mod curve.P
tmp.Sub(x1, x3).Mod(tmp, curve.P) // tmp = (x1 - x3) mod curve.P
y3 := new(big.Int).Mul(m, tmp) // y3 := m * (x1 - x3)
y3.Mod(y3, curve.P).Sub(y3, y1).Mod(y3, curve.P) // y3 = m * (x1 - x3) - y1 mod curve.P
return x3, y3
}
func (curve *CurveParams) Double(x1, y1 *big.Int) (x, y *big.Int) {
return curve.Add(x1, y1, x1, y1)
}
func (curve *CurveParams) ScalarMult(xIn, yIn *big.Int, k []byte) (x, y *big.Int) {
// https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication#Double-and-add
x = new(big.Int).Set(helpers.BigZero)
y = new(big.Int).Set(helpers.BigZero)
if len(k) == 0 {
return
}
pointX := new(big.Int).Set(xIn)
pointY := new(big.Int).Set(yIn)
bigK := new(big.Int).SetBytes(k)
tmp := new(big.Int)
for bigK.Cmp(helpers.BigZero) != 0 {
if tmp.And(bigK, helpers.BigOne).Cmp(helpers.BigOne) == 0 {
x, y = curve.Add(x, y, pointX, pointY)
}
pointX, pointY = curve.Double(pointX, pointY)
bigK.Rsh(bigK, 1)
}
return
}
func (curve *CurveParams) ScalarBaseMult(k []byte) (x, y *big.Int) {
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
func GenerateKey(curve Curve, rng io.Reader) (priv []byte, x, y *big.Int, err error) {
if rng == nil {
rng = rand.Reader
}
N := curve.Params().N
bitSize := N.BitLen()
byteLen := (bitSize + 7) >> 3
priv = make([]byte, byteLen)
for x == nil {
_, err = io.ReadFull(rng, priv)
if err != nil {
return
}
if new(big.Int).SetBytes(priv).Cmp(N) >= 0 {
continue
}
x, y = curve.ScalarBaseMult(priv)
}
return
}
func Inverse(curve Curve, x, y *big.Int) (ix *big.Int, iy *big.Int) {
ix = new(big.Int).Set(x)
iy = new(big.Int).Sub(curve.Params().P, y)
iy.Mod(iy, curve.Params().P)
return
}
func GeneratePoint(curve Curve) (*big.Int, *big.Int) {
for {
x, err := rand.Int(rand.Reader, curve.Params().P)
if err != nil {
panic(err)
}
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
ax := new(big.Int).Mul(curve.Params().A, x)
x3.Add(x3, ax)
x3.Add(x3, curve.Params().B)
x3.Mod(x3, curve.Params().P)
y := new(big.Int).ModSqrt(x3, curve.Params().P)
if y != nil {
return x, y
}
}
}
// Marshal converts a point into the uncompressed form specified in section 4.3.6 of ANSI X9.62.
func Marshal(curve Curve, x, y *big.Int) []byte {
byteLen := (curve.Params().BitSize + 7) >> 3
ret := make([]byte, 1+2*byteLen)
ret[0] = 4 // uncompressed point
xBytes := x.Bytes()
copy(ret[1+byteLen-len(xBytes):], xBytes)
yBytes := y.Bytes()
copy(ret[1+2*byteLen-len(yBytes):], yBytes)
return ret
}
// Unmarshal converts a point, serialized by Marshal, into an x, y pair.
// It is an error if the point is not in uncompressed form or is not on the curve.
// On error, x = nil.
func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
byteLen := (curve.Params().BitSize + 7) >> 3
if len(data) != 1+2*byteLen {
return
}
if data[0] != 4 { // uncompressed form
return
}
p := curve.Params().P
x = new(big.Int).SetBytes(data[1 : 1+byteLen])
y = new(big.Int).SetBytes(data[1+byteLen:])
if x.Cmp(p) >= 0 || y.Cmp(p) >= 0 {
return nil, nil
}
if !curve.IsOnCurve(x, y) {
return nil, nil
}
return
}
var p128, p128v1, p128v2, p128v3 *CurveParams
var p4 *CurveParams
var p256 *CurveParams
var p224 *CurveParams
var p48 *CurveParams
func initAll() {
initP128()
initP4()
initP256()
initP224()
initP128V1()
initP128V2()
initP128V3()
initP48()
}
var initonce sync.Once
func initP256() {
p256 = &CurveParams{Name: "P-256"}
p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10)
p256.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10)
p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16)
p256.A, _ = new(big.Int).SetString("-3", 10)
p256.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16)
p256.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16)
p256.BitSize = 127
}
func initP224() {
// See FIPS 186-3, section D.2.2
p224 = &CurveParams{Name: "P-224"}
p224.P, _ = new(big.Int).SetString("26959946667150639794667015087019630673557916260026308143510066298881", 10)
p224.N, _ = new(big.Int).SetString("26959946667150639794667015087019625940457807714424391721682722368061", 10)
p224.B, _ = new(big.Int).SetString("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", 16)
p224.A, _ = new(big.Int).SetString("-3", 10)
p224.Gx, _ = new(big.Int).SetString("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", 16)
p224.Gy, _ = new(big.Int).SetString("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", 16)
p224.BitSize = 224
}
func initP4() {
p4 = &CurveParams{Name: "P-4"}
p4.P, _ = new(big.Int).SetString("11", 10)
p4.B, _ = new(big.Int).SetString("1", 10)
p4.A, _ = new(big.Int).SetString("-3", 10)
p4.BitSize = 4
}
func initP128() {
p128 = &CurveParams{Name: "P-128"}
p128.P, _ = new(big.Int).SetString("233970423115425145524320034830162017933", 10)
p128.N, _ = new(big.Int).SetString("29246302889428143187362802287225875743", 10)
p128.B, _ = new(big.Int).SetString("11279326", 10)
p128.A, _ = new(big.Int).SetString("-95051", 10)
p128.Gx, _ = new(big.Int).SetString("182", 10)
p128.Gy, _ = new(big.Int).SetString("85518893674295321206118380980485522083", 10)
p128.BitSize = 128
}
func initP128V1() {
p128v1 = &CurveParams{Name: "P-128-V1"}
p128v1.P, _ = new(big.Int).SetString("233970423115425145524320034830162017933", 10)
p128v1.N, _ = new(big.Int).SetString("233970423115425145550826547352470124412", 10)
p128v1.B, _ = new(big.Int).SetString("210", 10)
p128v1.A, _ = new(big.Int).SetString("-95051", 10)
p128v1.Gx, _ = new(big.Int).SetString("182", 10)
p128v1.Gy, _ = new(big.Int).SetString("85518893674295321206118380980485522083", 10)
p128v1.BitSize = 128
}
func initP128V2() {
p128v2 = &CurveParams{Name: "P-128-V2"}
p128v2.P, _ = new(big.Int).SetString("233970423115425145524320034830162017933", 10)
p128v2.N, _ = new(big.Int).SetString("233970423115425145544350131142039591210", 10)
p128v2.B, _ = new(big.Int).SetString("504", 10)
p128v2.A, _ = new(big.Int).SetString("-95051", 10)
p128v2.Gx, _ = new(big.Int).SetString("182", 10)
p128v2.Gy, _ = new(big.Int).SetString("85518893674295321206118380980485522083", 10)
p128v2.BitSize = 128
}
func initP128V3() {
p128v3 = &CurveParams{Name: "P-128-V3"}
p128v3.P, _ = new(big.Int).SetString("233970423115425145524320034830162017933", 10)
p128v3.N, _ = new(big.Int).SetString("233970423115425145545378039958152057148", 10)
p128v3.B, _ = new(big.Int).SetString("727", 10)
p128v3.A, _ = new(big.Int).SetString("-95051", 10)
p128v3.Gx, _ = new(big.Int).SetString("182", 10)
p128v3.Gy, _ = new(big.Int).SetString("85518893674295321206118380980485522083", 10)
p128v3.BitSize = 128
}
func initP48() {
p48 = &CurveParams{Name: "P-48"}
p48.P, _ = new(big.Int).SetString("146150163733117", 10)
p48.N, _ = new(big.Int).SetString("146150168402890", 10)
p48.B, _ = new(big.Int).SetString("1242422", 10)
p48.A, _ = new(big.Int).SetString("544333", 10)
p48.Gx, _ = new(big.Int).SetString("27249639878388", 10)
p48.Gy, _ = new(big.Int).SetString("14987583413657", 10)
p48.BitSize = 48
}
// P128 returns a Curve which implements Cryptopals P-128 defined in the challenge 59:
// y^2 = x^3 - 95051*x + 11279326.
func P128() Curve {
initonce.Do(initAll)
return p128
}
// P128V1 returns a malicious curve from Cryptopals challenge 59:
// y^2 = x^3 - 95051*x + 210
func P128V1() Curve {
initonce.Do(initAll)
return p128v1
}
// P128V2 returns a malicious curve from Cryptopals challenge 59:
// y^2 = x^3 - 95051*x + 504
func P128V2() Curve {
initonce.Do(initAll)
return p128v2
}
// P128V3 returns a malicious curve from Cryptopals challenge 59:
// y^2 = x^3 - 95051*x + 727
func P128V3() Curve {
initonce.Do(initAll)
return p128v3
}
// P4 returns a Curve which implement y^2 = x^3 -3x + 1 curve for testing purposes.
func P4() Curve {
initonce.Do(initAll)
return p4
}
// P256 returns the P-256 curve.
func P256() Curve {
initonce.Do(initAll)
return p256
}
// P224 returns the P-224 curve.
func P224() Curve {
initonce.Do(initAll)
return p224
}
// P48 returns the P-48 curve, see
// http://mslc.ctf.su/wp/hack-lu-ctf-2011-wipe-out-the-klingons-400/.
func P48() Curve {
initonce.Do(initAll)
return p48
} | elliptic/elliptic.go | 0.857649 | 0.634515 | elliptic.go | starcoder |
package tuple
import (
"golang.org/x/exp/constraints"
)
// OrderedComparisonResult represents the result of a tuple ordered comparison.
// OrderedComparisonResult == 0 represents that the tuples are equal.
// OrderedComparisonResult < 0 represent that the host tuple is less than the guest tuple.
// OrderedComparisonResult > 0 represent that the host tuple is greater than the guest tuple.
type OrderedComparisonResult int
// Comparable is a constraint interface for complex tuple elements that can be compared to other instances.
// In order to compare tuples, either all of their elements must be Ordered, or Comparable.
type Comparable[T any] interface {
CompareTo(guest T) OrderedComparisonResult
}
// Equalable is a constraint interface for complex tuple elements whose equality to other instances can be tested.
type Equalable[T any] interface {
Equal(guest T) bool
}
// Equal returns whether the compared values are equal.
func (result OrderedComparisonResult) Equal() bool {
return result == 0
}
// LessThan returns whether the host is less than the guest.
func (result OrderedComparisonResult) LessThan() bool {
return result < 0
}
// LessOrEqual returns whether the host is less than or equal to the guest.
func (result OrderedComparisonResult) LessOrEqual() bool {
return result <= 0
}
// GreaterThan returns whether the host is greater than the guest.
func (result OrderedComparisonResult) GreaterThan() bool {
return result > 0
}
// GreaterOrEqual returns whether the host is greater than or equal to the guest.
func (result OrderedComparisonResult) GreaterOrEqual() bool {
return result >= 0
}
// EQ is short for Equal and returns whether the compared values are equal.
func (result OrderedComparisonResult) EQ() bool {
return result.Equal()
}
// LT is short for LessThan and returns whether the host is less than the guest.
func (result OrderedComparisonResult) LT() bool {
return result.LessThan()
}
// LE is short for LessOrEqual and returns whether the host is less than or equal to the guest.
func (result OrderedComparisonResult) LE() bool {
return result.LessOrEqual()
}
// GT is short for GreaterThan and returns whether the host is greater than the guest.
func (result OrderedComparisonResult) GT() bool {
return result.GreaterThan()
}
// GE is short for GreaterOrEqual and returns whether the host is greater than or equal to the guest.
func (result OrderedComparisonResult) GE() bool {
return result.GreaterOrEqual()
}
// multiCompare calls and compares the predicates by order.
// multiCompare will short-circuit once one of the predicates returns a non-equal result, and the rest
// of the predicates will not be called.
func multiCompare(predicates ...func() OrderedComparisonResult) OrderedComparisonResult {
for _, pred := range predicates {
if result := pred(); !result.Equal() {
return result
}
}
return 0
}
// compareOrdered returns the comparison result between the host and guest values provided they match the Ordered constraint.
func compareOrdered[T constraints.Ordered](host, guest T) OrderedComparisonResult {
if host < guest {
return -1
}
if host > guest {
return 1
}
return 0
} | comparison.go | 0.898941 | 0.511412 | comparison.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.