code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package query // GoColumnType represents the GO type that corresponds to a database column type GoColumnType int const ( ColTypeUnknown GoColumnType = iota ColTypeBytes ColTypeString ColTypeInteger ColTypeUnsigned ColTypeInteger64 ColTypeUnsigned64 ColTypeDateTime ColTypeFloat ColTypeDouble ColTypeBool ) // String returns the constant type name as a string func (g GoColumnType) String() string { switch g { case ColTypeUnknown: return "ColTypeUnknown" case ColTypeBytes: return "ColTypeBytes" case ColTypeString: return "ColTypeString" case ColTypeInteger: return "ColTypeInteger" case ColTypeUnsigned: return "ColTypeUnsigned" case ColTypeInteger64: return "ColTypeInteger64" case ColTypeUnsigned64: return "ColTypeUnsigned64" case ColTypeDateTime: return "ColTypeDateTime" case ColTypeFloat: return "ColTypeFloat" case ColTypeDouble: return "ColTypeDouble" case ColTypeBool: return "ColTypeBool" } return "" } // GoType returns the actual GO type as go code func (g GoColumnType) GoType() string { switch g { case ColTypeUnknown: return "Unknown" case ColTypeBytes: return "[]byte" case ColTypeString: return "string" case ColTypeInteger: return "int" case ColTypeUnsigned: return "uint" case ColTypeInteger64: return "int64" case ColTypeUnsigned64: return "uint64" case ColTypeDateTime: return "datetime.DateTime" case ColTypeFloat: return "float32" // always internally represent with max bits case ColTypeDouble: return "float64" // always internally represent with max bits case ColTypeBool: return "bool" } return "" } // DefaultValue returns a string that represents the GO default value for the corresponding type func (g GoColumnType) DefaultValue() string { switch g { case ColTypeUnknown: return "" case ColTypeBytes: return "" case ColTypeString: return "\"\"" case ColTypeInteger: return "0" case ColTypeUnsigned: return "0" case ColTypeInteger64: return "0" case ColTypeUnsigned64: return "0" case ColTypeDateTime: return "datetime.DateTime{}" /* v, _ := goradd.DateTime{}.MarshalText() s := string(v[:]) return fmt.Sprintf("%#v", s)*/ case ColTypeFloat: return "0.0" // always internally represent with max bits case ColTypeDouble: return "0.0" // always internally represent with max bits case ColTypeBool: return "false" } return "" } func ColTypeFromGoTypeString(name string) GoColumnType { switch name { case "Unknown": return ColTypeUnknown case "[]byte": return ColTypeBytes case "string": return ColTypeString case "int": return ColTypeInteger case "uint": return ColTypeUnsigned case "int64": return ColTypeInteger64 case "uint64": return ColTypeUnsigned64 case "datetime.DateTime": return ColTypeDateTime case "float32": return ColTypeFloat case "float64": return ColTypeDouble case "bool": return ColTypeBool default: panic("unknown column go type " + name) } }
pkg/orm/query/goColumnType.go
0.806967
0.510435
goColumnType.go
starcoder
package visitorgen // simplified ast - when reading the golang ast of the ast.go file, we translate the golang ast objects // to this much simpler format, that contains only the necessary information and no more type ( // SourceFile contains all important lines from an ast.go file SourceFile struct { lines []Sast } // Sast or simplified AST, is a representation of the ast.go lines we are interested in Sast interface { toSastString() string } // InterfaceDeclaration represents a declaration of an interface. This is used to keep track of which types // need to be handled by the visitor framework InterfaceDeclaration struct { name, block string } // TypeAlias is used whenever we see a `type XXX YYY` - XXX is the new name for YYY. // Note that YYY could be an array or a reference TypeAlias struct { name string typ Type } // FuncDeclaration represents a function declaration. These are tracked to know which types implement interfaces. FuncDeclaration struct { receiver *Field name, block string arguments []*Field } // StructDeclaration represents a struct. It contains the fields and their types StructDeclaration struct { name string fields []*Field } // Field is a field in a struct - a name with a type tuple Field struct { name string typ Type } // Type represents a type in the golang type system. Used to keep track of type we need to handle, // and the types of fields. Type interface { toTypString() string rawTypeName() string } // TypeString is a raw type name, such as `string` TypeString struct { typName string } // Ref is a reference to something, such as `*string` Ref struct { inner Type } // Array is an array of things, such as `[]string` Array struct { inner Type } ) var _ Sast = (*InterfaceDeclaration)(nil) var _ Sast = (*StructDeclaration)(nil) var _ Sast = (*FuncDeclaration)(nil) var _ Sast = (*TypeAlias)(nil) var _ Type = (*TypeString)(nil) var _ Type = (*Ref)(nil) var _ Type = (*Array)(nil) // String returns a textual representation of the SourceFile. This is for testing purposed func (t *SourceFile) String() string { var result string for _, l := range t.lines { result += l.toSastString() result += "\n" } return result } func (t *Ref) toTypString() string { return "*" + t.inner.toTypString() } func (t *Array) toTypString() string { return "[]" + t.inner.toTypString() } func (t *TypeString) toTypString() string { return t.typName } func (f *FuncDeclaration) toSastString() string { var receiver string if f.receiver != nil { receiver = "(" + f.receiver.String() + ") " } var args string for i, arg := range f.arguments { if i > 0 { args += ", " } args += arg.String() } return "func " + receiver + f.name + "(" + args + ") {" + blockInNewLines(f.block) + "}" } func (i *InterfaceDeclaration) toSastString() string { return "type " + i.name + " interface {" + blockInNewLines(i.block) + "}" } func (a *TypeAlias) toSastString() string { return "type " + a.name + " " + a.typ.toTypString() } func (s *StructDeclaration) toSastString() string { var block string for _, f := range s.fields { block += "\t" + f.String() + "\n" } return "type " + s.name + " struct {" + blockInNewLines(block) + "}" } func blockInNewLines(block string) string { if block == "" { return "" } return "\n" + block + "\n" } // String returns a string representation of a field func (f *Field) String() string { if f.name != "" { return f.name + " " + f.typ.toTypString() } return f.typ.toTypString() } func (t *TypeString) rawTypeName() string { return t.typName } func (t *Ref) rawTypeName() string { return t.inner.rawTypeName() } func (t *Array) rawTypeName() string { return t.inner.rawTypeName() }
go/vt/sqlparser/visitorgen/sast.go
0.669529
0.560192
sast.go
starcoder
package migration import ( "database/sql" "fmt" "magma/orc8r/cloud/go/sqorc" "github.com/pkg/errors" ) func DropNewTables(tx *sql.Tx) error { tablesToDrop := []string{ NetworksTable, NetworkConfigTable, EntityTable, EntityAssocTable, EntityAclTable, deviceServiceTable, StateServiceTable, } for _, tableName := range tablesToDrop { _, err := tx.Exec(fmt.Sprintf("DROP TABLE IF EXISTS %s CASCADE", tableName)) if err != nil { return errors.Wrapf(err, "failed to drop table %s", tableName) } } return nil } func SetupTables(tx *sql.Tx, builder sqorc.StatementBuilder) error { // device service tables _, err := builder.CreateTable(deviceServiceTable). IfNotExists(). Column(BlobNidCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(BlobTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(BlobKeyCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(BlobValCol).Type(sqorc.ColumnTypeBytes).EndColumn(). Column(BlobVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn(). PrimaryKey(BlobNidCol, BlobTypeCol, BlobKeyCol). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create devices table") } // state service tables _, err = builder.CreateTable(StateServiceTable). IfNotExists(). Column(BlobNidCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(BlobTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(BlobKeyCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(BlobValCol).Type(sqorc.ColumnTypeBytes).EndColumn(). Column(BlobVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn(). PrimaryKey(BlobNidCol, BlobTypeCol, BlobKeyCol). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create states table") } // configurator tables _, err = builder.CreateTable(NetworksTable). IfNotExists(). Column(NwIDCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn(). Column(NwNameCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(NwDescCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(NwVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn(). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create networks table") } _, err = builder.CreateTable(NetworkConfigTable). IfNotExists(). Column(NwcIDCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(NwcTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(NwcValCol).Type(sqorc.ColumnTypeBytes).EndColumn(). PrimaryKey(NwcIDCol, NwcTypeCol). ForeignKey(NetworksTable, map[string]string{NwcIDCol: NwIDCol}, sqorc.ColumnOnDeleteCascade). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create network configs table") } // Create an internal-only primary key (UUID) for entities. // This keeps index size in control and supporting table schemas simpler. _, err = builder.CreateTable(EntityTable). IfNotExists(). Column(EntPkCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn(). Column(EntNidCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(EntTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(EntKeyCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(EntGidCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(EntNameCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(EntDescCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(EntPidCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(EntConfCol).Type(sqorc.ColumnTypeBytes).EndColumn(). Column(EntVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn(). Unique(EntNidCol, EntKeyCol, EntTypeCol). Unique(EntPidCol). ForeignKey(NetworksTable, map[string]string{EntNidCol: NwIDCol}, sqorc.ColumnOnDeleteCascade). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create entities table") } _, err = builder.CreateTable(EntityAssocTable). IfNotExists(). Column(AFrCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(AToCol).Type(sqorc.ColumnTypeText).EndColumn(). PrimaryKey(AFrCol, AToCol). ForeignKey(EntityTable, map[string]string{AFrCol: EntPkCol}, sqorc.ColumnOnDeleteCascade). ForeignKey(EntityTable, map[string]string{AToCol: EntPkCol}, sqorc.ColumnOnDeleteCascade). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create entity assoc table") } _, err = builder.CreateTable(EntityAclTable). IfNotExists(). Column(AclIdCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn(). Column(AclEntCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(AclScopeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(AclPermCol).Type(sqorc.ColumnTypeInt).NotNull().EndColumn(). Column(AclTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(AclIdFilterCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(AclVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn(). ForeignKey(EntityTable, map[string]string{AclEntCol: EntPkCol}, sqorc.ColumnOnDeleteCascade). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create entity acl table") } // Create indexes (index is not implicitly created on a referencing FK) _, err = builder.CreateIndex("graph_id_idx"). IfNotExists(). On(EntityTable). Columns(EntGidCol). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create graph ID index") } _, err = builder.CreateIndex("acl_ent_pk_idx"). IfNotExists(). On(EntityAclTable). Columns(AclEntCol). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "failed to create acl ent PK index") } // Create internal network(s) _, err = builder.Insert(NetworksTable). Columns(NwIDCol, NwNameCol, NwDescCol). Values(InternalNetworkID, internalNetworkName, internalNetworkDescription). OnConflict(nil, NwIDCol). RunWith(tx). Exec() if err != nil { return errors.Wrap(err, "error creating internal networks") } return nil }
orc8r/cloud/go/tools/migrations/m003_configurator/migration/migration_tables.go
0.516595
0.583797
migration_tables.go
starcoder
package swagger const ( Collection = `{ "swagger": "2.0", "info": { "title": "collection.proto", "version": "version not set" }, "schemes": [ "http", "https" ], "consumes": [ "application/json" ], "produces": [ "application/json" ], "paths": { "/exp/collection": { "get": { "operationId": "ListCollections", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expCollections" } } }, "tags": [ "CollectionService" ] } }, "/exp/collection/{id}": { "get": { "operationId": "GetCollection", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expCollection" } } }, "parameters": [ { "name": "id", "in": "path", "required": true, "type": "integer", "format": "int32" } ], "tags": [ "CollectionService" ] } } }, "definitions": { "expCollection": { "type": "object", "properties": { "Id": { "type": "integer", "format": "int32" }, "Title": { "type": "string" }, "Image": { "type": "string" }, "Website": { "type": "string" }, "ContactEmail": { "type": "string" }, "BriefDescription": { "type": "string", "title": "Why should users view your collection?" }, "LongDescription": { "type": "string", "title": "Why should users continue and view your lessons?" }, "Type": { "type": "string" }, "Tier": { "type": "string" }, "CollectionFile": { "type": "string" }, "Lessons": { "type": "array", "items": { "$ref": "#/definitions/expLessonSummary" } } } }, "expCollections": { "type": "object", "properties": { "collections": { "type": "array", "items": { "$ref": "#/definitions/expCollection" } } } }, "expLessonSummary": { "type": "object", "properties": { "lessonId": { "type": "integer", "format": "int32" }, "lessonName": { "type": "string" }, "lessonDescription": { "type": "string" } } } } } ` Curriculum = `{ "swagger": "2.0", "info": { "title": "curriculum.proto", "version": "version not set" }, "schemes": [ "http", "https" ], "consumes": [ "application/json" ], "produces": [ "application/json" ], "paths": { "/exp/curriculum": { "get": { "operationId": "GetCurriculumInfo", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expCurriculumInfo" } } }, "tags": [ "CurriculumService" ] } } }, "definitions": { "expCurriculumInfo": { "type": "object", "properties": { "Name": { "type": "string" }, "Description": { "type": "string" }, "Website": { "type": "string" } }, "description": "Use this to return only metadata about the installed curriculum." } } } ` Kubelab = `{ "swagger": "2.0", "info": { "title": "kubelab.proto", "version": "version not set" }, "schemes": [ "http", "https" ], "consumes": [ "application/json" ], "produces": [ "application/json" ], "paths": {}, "definitions": { "expConnection": { "type": "object", "properties": { "A": { "type": "string" }, "B": { "type": "string" } } }, "expEndpoint": { "type": "object", "properties": { "Name": { "type": "string" }, "Image": { "type": "string" }, "ConfigurationType": { "type": "string", "title": "Validation for this field will be done post-validation" }, "AdditionalPorts": { "type": "array", "items": { "type": "integer", "format": "int32" }, "title": "Handles any ports not explicitly mentioned in a presentation" }, "Presentations": { "type": "array", "items": { "$ref": "#/definitions/expPresentation" } }, "Host": { "type": "string" } } }, "expKubeLab": { "type": "object", "properties": { "Namespace": { "type": "string" }, "CreateRequest": { "$ref": "#/definitions/expLessonScheduleRequest" }, "Networks": { "type": "array", "items": { "type": "string" } }, "Pods": { "type": "array", "items": { "type": "string" } }, "Services": { "type": "array", "items": { "type": "string" } }, "Ingresses": { "type": "array", "items": { "type": "string" } }, "status": { "$ref": "#/definitions/expStatus" }, "ReachableEndpoints": { "type": "array", "items": { "type": "string" } }, "CurrentStage": { "type": "integer", "format": "int32" } } }, "expKubeLabs": { "type": "object", "properties": { "Items": { "type": "object", "additionalProperties": { "$ref": "#/definitions/expKubeLab" } } } }, "expLesson": { "type": "object", "properties": { "LessonId": { "type": "integer", "format": "int32" }, "Stages": { "type": "array", "items": { "$ref": "#/definitions/expLessonStage" } }, "LessonName": { "type": "string" }, "Endpoints": { "type": "array", "items": { "$ref": "#/definitions/expEndpoint" } }, "Connections": { "type": "array", "items": { "$ref": "#/definitions/expConnection" } }, "Category": { "type": "string" }, "LessonDiagram": { "type": "string" }, "LessonVideo": { "type": "string" }, "Tier": { "type": "string" }, "Prereqs": { "type": "array", "items": { "type": "integer", "format": "int32" } }, "Tags": { "type": "array", "items": { "type": "string" } }, "Collection": { "type": "integer", "format": "int32" }, "Description": { "type": "string" }, "Slug": { "type": "string", "title": "This is meant to fill: \"How well do you know \u003cslug\u003e?\"" }, "LessonFile": { "type": "string" }, "LessonDir": { "type": "string" } } }, "expLessonScheduleRequest": { "type": "object", "properties": { "Lesson": { "$ref": "#/definitions/expLesson" }, "OperationType": { "type": "integer", "format": "int32" }, "Uuid": { "type": "string" }, "Stage": { "type": "integer", "format": "int32" }, "Created": { "type": "string", "format": "date-time" } } }, "expLessonStage": { "type": "object", "properties": { "Id": { "type": "integer", "format": "int32" }, "Description": { "type": "string" }, "LabGuide": { "type": "string" }, "JupyterLabGuide": { "type": "boolean", "format": "boolean" }, "VerifyCompleteness": { "type": "boolean", "format": "boolean" }, "VerifyObjective": { "type": "string" } } }, "expPresentation": { "type": "object", "properties": { "Name": { "type": "string" }, "Port": { "type": "integer", "format": "int32" }, "Type": { "type": "string" } } }, "expStatus": { "type": "string", "enum": [ "DONOTUSE", "INITIAL_BOOT", "CONFIGURATION", "READY" ], "default": "DONOTUSE" } } } ` Lesson = `{ "swagger": "2.0", "info": { "title": "lesson.proto", "version": "version not set" }, "schemes": [ "http", "https" ], "consumes": [ "application/json" ], "produces": [ "application/json" ], "paths": { "/exp/lesson": { "get": { "summary": "Retrieve all Lessons with filter", "operationId": "ListLessons", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expLessons" } } }, "parameters": [ { "name": "Category", "in": "query", "required": false, "type": "string" } ], "tags": [ "LessonService" ] } }, "/exp/lesson/{id}": { "get": { "operationId": "GetLesson", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expLesson" } } }, "parameters": [ { "name": "id", "in": "path", "required": true, "type": "integer", "format": "int32" } ], "tags": [ "LessonService" ] } }, "/exp/lesson/{id}/prereqs": { "get": { "operationId": "GetAllLessonPrereqs", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expLessonPrereqs" } } }, "parameters": [ { "name": "id", "in": "path", "required": true, "type": "integer", "format": "int32" } ], "tags": [ "LessonService" ] } } }, "definitions": { "expConnection": { "type": "object", "properties": { "A": { "type": "string" }, "B": { "type": "string" } } }, "expEndpoint": { "type": "object", "properties": { "Name": { "type": "string" }, "Image": { "type": "string" }, "ConfigurationType": { "type": "string", "title": "Validation for this field will be done post-validation" }, "AdditionalPorts": { "type": "array", "items": { "type": "integer", "format": "int32" }, "title": "Handles any ports not explicitly mentioned in a presentation" }, "Presentations": { "type": "array", "items": { "$ref": "#/definitions/expPresentation" } }, "Host": { "type": "string" } } }, "expLesson": { "type": "object", "properties": { "LessonId": { "type": "integer", "format": "int32" }, "Stages": { "type": "array", "items": { "$ref": "#/definitions/expLessonStage" } }, "LessonName": { "type": "string" }, "Endpoints": { "type": "array", "items": { "$ref": "#/definitions/expEndpoint" } }, "Connections": { "type": "array", "items": { "$ref": "#/definitions/expConnection" } }, "Category": { "type": "string" }, "LessonDiagram": { "type": "string" }, "LessonVideo": { "type": "string" }, "Tier": { "type": "string" }, "Prereqs": { "type": "array", "items": { "type": "integer", "format": "int32" } }, "Tags": { "type": "array", "items": { "type": "string" } }, "Collection": { "type": "integer", "format": "int32" }, "Description": { "type": "string" }, "Slug": { "type": "string", "title": "This is meant to fill: \"How well do you know \u003cslug\u003e?\"" }, "LessonFile": { "type": "string" }, "LessonDir": { "type": "string" } } }, "expLessonPrereqs": { "type": "object", "properties": { "prereqs": { "type": "array", "items": { "type": "integer", "format": "int32" } } } }, "expLessonStage": { "type": "object", "properties": { "Id": { "type": "integer", "format": "int32" }, "Description": { "type": "string" }, "LabGuide": { "type": "string" }, "JupyterLabGuide": { "type": "boolean", "format": "boolean" }, "VerifyCompleteness": { "type": "boolean", "format": "boolean" }, "VerifyObjective": { "type": "string" } } }, "expLessons": { "type": "object", "properties": { "lessons": { "type": "array", "items": { "$ref": "#/definitions/expLesson" } } } }, "expPresentation": { "type": "object", "properties": { "Name": { "type": "string" }, "Port": { "type": "integer", "format": "int32" }, "Type": { "type": "string" } } } } } ` Livelesson = `{ "swagger": "2.0", "info": { "title": "livelesson.proto", "version": "version not set" }, "schemes": [ "http", "https" ], "consumes": [ "application/json" ], "produces": [ "application/json" ], "paths": { "/*": { "get": { "operationId": "HealthCheck", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expHealthCheckMessage" } } }, "tags": [ "LiveLessonsService" ] } }, "/exp/livelesson": { "post": { "summary": "Request a lab is created, or request the UUID of one that already exists for these parameters.", "operationId": "RequestLiveLesson", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expLessonUUID" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/expLessonParams" } } ], "tags": [ "LiveLessonsService" ] } }, "/exp/livelesson/{id}": { "get": { "summary": "Retrieve details about a lesson", "operationId": "GetLiveLesson", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expLiveLesson" } } }, "parameters": [ { "name": "id", "in": "path", "required": true, "type": "string" } ], "tags": [ "LiveLessonsService" ] } }, "/exp/livelesson/{id}/verify": { "post": { "operationId": "RequestVerification", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expVerificationTaskUUID" } } }, "parameters": [ { "name": "id", "in": "path", "required": true, "type": "string" }, { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/expLessonUUID" } } ], "tags": [ "LiveLessonsService" ] } }, "/exp/verification/{id}": { "get": { "operationId": "GetVerification", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expVerificationTask" } } }, "parameters": [ { "name": "id", "in": "path", "required": true, "type": "string" } ], "tags": [ "LiveLessonsService" ] } } }, "definitions": { "expEndpoint": { "type": "object", "properties": { "Name": { "type": "string" }, "Image": { "type": "string" }, "ConfigurationType": { "type": "string", "title": "Validation for this field will be done post-validation" }, "AdditionalPorts": { "type": "array", "items": { "type": "integer", "format": "int32" }, "title": "Handles any ports not explicitly mentioned in a presentation" }, "Presentations": { "type": "array", "items": { "$ref": "#/definitions/expPresentation" } }, "Host": { "type": "string" } } }, "expHealthCheckMessage": { "type": "object" }, "expKillLiveLessonStatus": { "type": "object", "properties": { "success": { "type": "boolean", "format": "boolean" } } }, "expLessonParams": { "type": "object", "properties": { "lessonId": { "type": "integer", "format": "int32" }, "sessionId": { "type": "string" }, "lessonStage": { "type": "integer", "format": "int32" } } }, "expLessonUUID": { "type": "object", "properties": { "id": { "type": "string" } } }, "expLiveLesson": { "type": "object", "properties": { "LessonUUID": { "type": "string" }, "LessonId": { "type": "integer", "format": "int32" }, "LiveEndpoints": { "type": "object", "additionalProperties": { "$ref": "#/definitions/expEndpoint" } }, "LessonStage": { "type": "integer", "format": "int32" }, "LabGuide": { "type": "string" }, "JupyterLabGuide": { "type": "boolean", "format": "boolean" }, "LiveLessonStatus": { "$ref": "#/definitions/expStatus" }, "createdTime": { "type": "string", "format": "date-time" }, "LessonDiagram": { "type": "string" }, "LessonVideo": { "type": "string" }, "Error": { "type": "boolean", "format": "boolean" }, "HealthyTests": { "type": "integer", "format": "int32" }, "TotalTests": { "type": "integer", "format": "int32" } }, "description": "A provisioned lab without the scheduler details. The server will translate from an underlying type\n(i.e. KubeLab) into this, so only the abstract, relevant details are presented." }, "expLiveLessons": { "type": "object", "properties": { "items": { "type": "object", "additionalProperties": { "$ref": "#/definitions/expLiveLesson" } } } }, "expPresentation": { "type": "object", "properties": { "Name": { "type": "string" }, "Port": { "type": "integer", "format": "int32" }, "Type": { "type": "string" } } }, "expSession": { "type": "object", "properties": { "id": { "type": "string" } } }, "expSessions": { "type": "object", "properties": { "sessions": { "type": "array", "items": { "$ref": "#/definitions/expSession" } } } }, "expStatus": { "type": "string", "enum": [ "DONOTUSE", "INITIAL_BOOT", "CONFIGURATION", "READY" ], "default": "DONOTUSE" }, "expSyringeState": { "type": "object", "properties": { "Livelessons": { "type": "object", "additionalProperties": { "$ref": "#/definitions/expLiveLesson" }, "title": "Map that contains a mapping of UUIDs to LiveLesson messages" } } }, "expVerificationTask": { "type": "object", "properties": { "liveLessonId": { "type": "string" }, "liveLessonStage": { "type": "integer", "format": "int32" }, "success": { "type": "boolean", "format": "boolean" }, "working": { "type": "boolean", "format": "boolean" }, "message": { "type": "string" }, "completed": { "type": "string", "format": "date-time" } } }, "expVerificationTaskUUID": { "type": "object", "properties": { "id": { "type": "string" } } } } } ` Syringeinfo = `{ "swagger": "2.0", "info": { "title": "syringeinfo.proto", "version": "version not set" }, "schemes": [ "http", "https" ], "consumes": [ "application/json" ], "produces": [ "application/json" ], "paths": { "/exp/syringeinfo": { "get": { "operationId": "GetSyringeInfo", "responses": { "200": { "description": "A successful response.", "schema": { "$ref": "#/definitions/expSyringeInfo" } } }, "tags": [ "SyringeInfoService" ] } } }, "definitions": { "expSyringeInfo": { "type": "object", "properties": { "buildSha": { "type": "string" }, "antidoteSha": { "type": "string" }, "imageVersion": { "type": "string" } } } } } ` )
api/exp/swagger/swagger.pb.go
0.692746
0.401981
swagger.pb.go
starcoder
package geom import "math" // A Vec3 represents a vector with coordinates X, Y and Z in 3-dimensional // euclidean space. type Vec3 struct { X, Y, Z float32 } var ( // V3Zero is the zero vector (0,0,0). V3Zero = Vec3{0, 0, 0} // V3Unit is the unit vector (1,1,1). V3Unit = Vec3{1, 1, 1} // V3UnitX is the x-axis unit vector (1,0,0). V3UnitX = Vec3{1, 0, 0} // V3UnitY is the y-axis unit vector (0,1,0). V3UnitY = Vec3{0, 1, 0} // V3UnitZ is the z-axis unit vector (0,0,1). V3UnitZ = Vec3{0, 0, 1} ) // V3 is shorthand for Vec3{X: x, Y: y, Z: z}. func V3(x, y, z float32) Vec3 { return Vec3{x, y, z} } // Add returns the vector v+w. func (v Vec3) Add(w Vec3) Vec3 { return Vec3{v.X + w.X, v.Y + w.Y, v.Z + w.Z} } // Sub returns the vector v-w. func (v Vec3) Sub(w Vec3) Vec3 { return Vec3{v.X - w.X, v.Y - w.Y, v.Z - w.Z} } // Mul returns the vector v*s. func (v Vec3) Mul(s float32) Vec3 { return Vec3{v.X * s, v.Y * s, v.Z * s} } // Div returns the vector v/s. func (v Vec3) Div(s float32) Vec3 { return Vec3{v.X / s, v.Y / s, v.Z / s} } // Neg returns the negated vector of v. func (v Vec3) Neg() Vec3 { return v.Mul(-1) } // Dot returns the dot (a.k.a. scalar) product of v and w. func (v Vec3) Dot(w Vec3) float32 { return v.X*w.X + v.Y*w.Y + v.Z*w.Z } // Cross returns the cross product of v and w. func (v Vec3) Cross(w Vec3) Vec3 { return Vec3{ v.Y*w.Z - v.Z*w.Y, v.Z*w.X - v.X*w.Z, v.X*w.Y - v.Y*w.X, } } // CompMul returns the component-wise multiplication of two vectors. func (v Vec3) CompMul(w Vec3) Vec3 { return Vec3{v.X * w.X, v.Y * w.Y, v.Z * w.Z} } // CompDiv returns the component-wise division of two vectors. func (v Vec3) CompDiv(w Vec3) Vec3 { return Vec3{v.X / w.X, v.Y / w.Y, v.Z / w.Z} } // SqDist returns the square of the euclidean distance between two vectors. func (v Vec3) SqDist(w Vec3) float32 { return v.Sub(w).SqLen() } // Dist returns the euclidean distance between two vectors. func (v Vec3) Dist(w Vec3) float32 { return v.Sub(w).Len() } // SqLen returns the square of the length (euclidean norm) of a vector. func (v Vec3) SqLen() float32 { return v.Dot(v) } // Len returns the length (euclidean norm) of a vector. func (v Vec3) Len() float32 { return float32(math.Sqrt(float64(v.SqLen()))) } // Norm returns the normalized vector of a vector. func (v Vec3) Norm() Vec3 { return v.Div(v.Len()) } // Reflect returns the reflection vector of v given a normal n. func (v Vec3) Reflect(n Vec3) Vec3 { return v.Sub(n.Mul(2 * v.Dot(n))) } // Lerp returns the linear interpolation between v and w by amount t. // The amount t is usually a value between 0 and 1. If t=0 v will be // returned; if t=1 w will be returned. func (v Vec3) Lerp(w Vec3, t float32) Vec3 { return Vec3{lerp(v.X, w.X, t), lerp(v.Y, w.Y, t), lerp(v.Z, w.Z, t)} } // Min returns a vector with each component set to the lesser value // of the corresponding component pair of v and w. func (v Vec3) Min(w Vec3) Vec3 { return Vec3{ float32(math.Min(float64(v.X), float64(w.X))), float32(math.Min(float64(v.Y), float64(w.Y))), float32(math.Min(float64(v.Z), float64(w.Z))), } } // Max returns a vector with each component set to the greater value // of the corresponding component pair of v and w. func (v Vec3) Max(w Vec3) Vec3 { return Vec3{ float32(math.Max(float64(v.X), float64(w.X))), float32(math.Max(float64(v.Y), float64(w.Y))), float32(math.Max(float64(v.Z), float64(w.Z))), } } // Transform transforms vector v with 4x4 matrix m. func (v Vec3) Transform(m *Mat4) Vec3 { return Vec3{ m[0][0]*v.X + m[1][0]*v.Y + m[2][0]*v.Z + m[3][0], m[0][1]*v.X + m[1][1]*v.Y + m[2][1]*v.Z + m[3][1], m[0][2]*v.X + m[1][2]*v.Y + m[2][2]*v.Z + m[3][2], } } // NearEq returns whether v and w are approximately equal. This relation is not // transitive in general. The tolerance for the floating-point components is // ±1e-5. func (v Vec3) NearEq(w Vec3) bool { return nearEq(v.X, w.X, epsilon) && nearEq(v.Y, w.Y, epsilon) && nearEq(v.Z, w.Z, epsilon) } // String returns a string representation of v like "(3.25, -1.5, 1.2)". func (v Vec3) String() string { return "(" + str(v.X) + ", " + str(v.Y) + ", " + str(v.Z) + ")" }
vec3.go
0.911805
0.683697
vec3.go
starcoder
package collections import ( "fmt" "reflect" ) // Append appends from to a slice to and returns the resulting slice. // If length of from is one and the only element is a slice of same type as to, // it will be appended. func Append(to interface{}, from ...interface{}) (interface{}, error) { tov, toIsNil := indirect(reflect.ValueOf(to)) toIsNil = toIsNil || to == nil var tot reflect.Type if !toIsNil { if tov.Kind() != reflect.Slice { return nil, fmt.Errorf("expected a slice, got %T", to) } tot = tov.Type().Elem() toIsNil = tov.Len() == 0 if len(from) == 1 { fromv := reflect.ValueOf(from[0]) if fromv.Kind() == reflect.Slice { if toIsNil { // If we get nil []string, we just return the []string return from[0], nil } fromt := reflect.TypeOf(from[0]).Elem() // If we get []string []string, we append the from slice to to if tot == fromt { return reflect.AppendSlice(tov, fromv).Interface(), nil } else if !fromt.AssignableTo(tot) { // Fall back to a []interface{} slice. return appendToInterfaceSliceFromValues(tov, fromv) } } } } if toIsNil { return Slice(from...), nil } for _, f := range from { fv := reflect.ValueOf(f) if !fv.Type().AssignableTo(tot) { // Fall back to a []interface{} slice. return appendToInterfaceSlice(tov, from...) } tov = reflect.Append(tov, fv) } return tov.Interface(), nil } func appendToInterfaceSliceFromValues(slice1, slice2 reflect.Value) ([]interface{}, error) { var tos []interface{} for _, slice := range []reflect.Value{slice1, slice2} { for i := 0; i < slice.Len(); i++ { tos = append(tos, slice.Index(i).Interface()) } } return tos, nil } func appendToInterfaceSlice(tov reflect.Value, from ...interface{}) ([]interface{}, error) { var tos []interface{} for i := 0; i < tov.Len(); i++ { tos = append(tos, tov.Index(i).Interface()) } tos = append(tos, from...) return tos, nil } // indirect is borrowed from the Go stdlib: 'text/template/exec.go' // TODO(bep) consolidate func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { if v.IsNil() { return v, true } if v.Kind() == reflect.Interface && v.NumMethod() > 0 { break } } return v, false }
common/collections/append.go
0.563618
0.449272
append.go
starcoder
package gridspech import "strings" // TileCoordSet represents a mathematical set of coordinates. type TileCoordSet struct { set map[TileCoord]struct{} } // NewTileCoordSet returns a TileCoordSet containing only tiles. func NewTileCoordSet(tiles ...TileCoord) TileCoordSet { var cs TileCoordSet for _, tile := range tiles { cs.Add(tile) } return cs } // Init initializes the tilecoordset. func (ts *TileCoordSet) checkInit() { if ts.set == nil { ts.set = make(map[TileCoord]struct{}) } } // Add adds t to the TileSet ts. func (ts *TileCoordSet) Add(t TileCoord) { ts.checkInit() ts.set[t] = struct{}{} } // Has returns if ts contains t. func (ts TileCoordSet) Has(t TileCoord) bool { _, ok := ts.set[t] return ok } // Remove removes t from ts. func (ts *TileCoordSet) Remove(t TileCoord) { ts.checkInit() delete(ts.set, t) } // RemoveIf removes each value for which pred returns true. func (ts *TileCoordSet) RemoveIf(pred func(coord TileCoord) bool) { for tile := range ts.set { if pred(tile) { ts.Remove(tile) } } } // RemoveAll removes all of the elements in o from ts (making ts the intersection of ts and o) func (ts *TileCoordSet) RemoveAll(o TileCoordSet) { if ts.Len() < o.Len() { for tile := range ts.set { if o.Has(tile) { ts.Remove(tile) } } } else { for tile := range o.set { if ts.Has(tile) { ts.Remove(tile) } } } } // Len returns the number of tiles in ts. func (ts TileCoordSet) Len() int { return len(ts.set) } // Merge adds all tiles in other into ts. func (ts *TileCoordSet) Merge(other TileCoordSet) { ts.checkInit() for tile := range other.set { ts.set[tile] = struct{}{} } } // Eq returns if ts contains exactly the same contents as other. func (ts TileCoordSet) Eq(other TileCoordSet) bool { if ts.Len() != other.Len() { return false } for tile := range ts.set { if !other.Has(tile) { return false } } return true } // Iter returns an iterator for this TileSet. func (ts TileCoordSet) Iter() <-chan TileCoord { iter := make(chan TileCoord, 5) go func() { for tile := range ts.set { iter <- tile } close(iter) }() return iter } // Slice returns a slice representation of ts func (ts TileCoordSet) Slice() []TileCoord { slice := make([]TileCoord, 0, len(ts.set)) for tile := range ts.set { slice = append(slice, tile) } return slice } // ToTileSet converts ts into a TileSet func (ts TileCoordSet) ToTileSet(fn func(t TileCoord) Tile) TileSet { var result TileSet for val := range ts.set { result.Add(fn(val)) } return result } func (ts TileCoordSet) String() string { slice := ts.Slice() var maxX, maxY int for _, tile := range slice { if tile.X > maxX { maxX = tile.X } if tile.Y > maxY { maxY = tile.Y } } maxX++ maxY++ tilesAt := make([][]bool, maxX) for x := range tilesAt { tilesAt[x] = make([]bool, maxY) } for _, v := range slice { tilesAt[v.X][v.Y] = true } var sb strings.Builder sb.WriteByte('{') for y := maxY - 1; y >= 0; y-- { for x := 0; x < maxX; x++ { if tilesAt[x][y] { sb.WriteByte('x') } else { sb.WriteByte(' ') } } if y > 0 { sb.WriteByte('|') } } sb.WriteByte('}') return sb.String() } // MultiLineString returns a string representation of this tileset on multiple lines func (ts TileCoordSet) MultiLineString() string { next := ts.String() next = next[1 : len(next)-1] next = strings.ReplaceAll(next, "|", "\n") next += "\n" return next }
tileCoordSet.go
0.827166
0.445952
tileCoordSet.go
starcoder
package levenshtein /// Computes the Damerau-Levenshtein Distance between two strings, represented as arrays of /// integers, where each integer represents the code point of a character in the source string. /// Includes an optional threshhold which can be used to indicate the maximum allowable distance. /// <param name="source">An array of the code points of the first string</param> /// <param name="target">An array of the code points of the second string</param> /// <param name="threshold">Maximum allowable distance</param> /// <returns>Int.MaxValue if threshhold exceeded; /// otherwise the Damerau-Leveshteim distance between the strings</returns> func DamerauDistance(source string, target string, threshold int) int { length1 := len(source) length2 := len(target) // Return trivial case - difference in string lengths exceeds threshhold breakEarly := (length1 >= length2 && length1 - length2 > threshold) || (length2 >= length1 && length2 - length1 > threshold) if breakEarly { return -1 } // Ensure arrays [i] / length1 use shorter length if (length1 > length2) { swapStr(&source, &target) swapInt(&length1, &length2) } maxi := length1; maxj := length2; dCurrent := make([]int, maxi + 1) dMinus1 := make([]int, maxi + 1) dMinus2 := make([]int, maxi + 1) var dSwap []int for i := 0; i <= maxi; i++ { dCurrent[i] = i } jm1, im1, im2 := 0, 0, -1 for j := 1; j <= maxj; j++ { // Rotate dSwap = dMinus2; dMinus2 = dMinus1; dMinus1 = dCurrent; dCurrent = dSwap; // Initialize minDistance := 10000; dCurrent[0] = j; im1 = 0; im2 = -1; for i := 1; i <= maxi; i++ { cost := 1 if source[im1] == target[jm1] { cost = 0 } del := dCurrent[im1] + 1; ins := dMinus1[i] + 1; sub := dMinus1[im1] + cost; //Fastest execution for min value of 3 integers var min int if del > ins { if ins > sub { min = sub } else { min = ins } } else { if del > sub { min = sub } else { min = del } } if i > 1 && j > 1 && source[im2] == target[jm1] && source[im1] == target[j - 2] { min = minInt(min, dMinus2[im2] + cost) } dCurrent[i] = min; if min < minDistance { minDistance = min } im1++; im2++; } jm1++; if minDistance > threshold { return -1 } } result := dCurrent[maxi] if result > threshold { return -1 } else { return result } } func minInt(x, y int) int { if x <= y { return x } else { return y } } func swapStr(str1, str2 *string) { *str1, *str2 = *str2, *str1 } func swapInt(int1, int2 *int) { *int1, *int2 = *int2, *int1 }
algorithm/levenshtein/levenshtein_damerau.go
0.883751
0.634925
levenshtein_damerau.go
starcoder
package main // # Bulk Process Manager // **BPM is a Process manager for [nodejs](http://nodejs.org) projects** // **Author: [<NAME>](http://eladyarkoni.com)** // <div style="text-align: center;"> // <img src="https://nodejs.org/static/images/logos/nodejs-new-pantone-black.png" height="100"> // <img src="https://gquintana.github.io//images/logos/golang.png" height="100"> // <img src="https://roma-kvs.org/images/benchmark/leveldb-logo.png" height="100"> // </div> // <br/> // BPM is a production process manager for Node.js applications developed in [go language](http://golang.org). // It allows you to keep applications alive forever and to run them in a cluster mode. // BPM gives you a set of tools to manage your node production processes through command line or even remotly. // ## Behind The Scenes // BPM is using a local http server as a god of all nodejs processes. // The bpm command will start the server if its not started yet and communicate with it through rest api calls. // The BPM local http server is using Google LevelDB, a fast key-value storage library, to store your projects data and status. // Eventually, The bpm http server monitors your running nodejs processes. you can use the http server remotly to take a full control over your nodejs projects. // Any bpm command line will do the following: // 1. Starts the local http server is its not started yet // 2. Makes a rest api call // 3. Gets a Response // 4. Prints the response to command line // ## Install // BPM is using [NPM](https://www.npmjs.com/) for the installation process. // ``` // $ npm install bulk-pm -g // ``` // ## Usage // ### Adding a new nodejs project to BPM // This command adds the nodejs project working directory to BPM. // ``` // $ bpm add <node_project_directory> // ``` // The node project must have the package.json file with the main script configured. // BPM is using the main script to start the process. // ``` // { // "name": "node-project-name", // "description": "node project description...", // ... // ... // "main": "index.js" // } // ``` // ### Start Node Project // This command starts the nodejs project processes. // ``` // $ bpm start <package_name> [cluster_processes_number] // ``` // **cluster_processes_number**: The number of processes to start the project in cluster mode. // * If cluster_processes_number is not defined or 0, then, the node project will be started in normal mode. // ### Stop Node Project // This command stops the nodejs project processes. // ``` // $ bpm stop <package_name> // ``` // ### Get Status // This command gets the status of all nodejs projects that are managed in BPM. // ``` // $ bpm status // ``` // ## Development Roadmap // BPM is going to be the ultimate solution for managing NodeJS projects on production environment. // Here are some of the features that are going to be developed in the near future: // 1. Process resources monitoring (Memory, CPU...) // 2. Autoscaling support // 3. Alerts configuration for DevOps // 4. Deploy: Deploy nodejs projects to remote servers. // 5. BPM Center Orchestrator: Manage nodejs projects over multiple servers // And more... // ## License // BSD-2-Clause.
doc.go
0.647575
0.407569
doc.go
starcoder
package linear import ( "errors" "fmt" "github.com/bayesiangopher/bayesiangopher/core" "gonum.org/v1/gonum/mat" "log" "math" ) var ( SVDDecompositionError = errors.New("ошибка во время SVD разложения") SVDResultComputeError = errors.New("вектор b посчитан неправильно") CoefBeforeFittingError = errors.New("обучить") ) // LRtype specifies the treatment of solving type LRtype int const ( // QR specifies solving with QR decomposition QR LRtype = 1 << (iota + 1) // SVD specifies solving with SVD decomposition SVD ) // LinearRegression is main struct for linear regression package type LR struct { regressand *mat.VecDense regressors *mat.Dense parameterVector *mat.VecDense method LRtype targetColumn int } // LinearRegression prepare structure and data for fitting func LinearRegression(train core.Train, targetColumn int, method LRtype) *LR { lr := LR{} lr.method = method lr.targetColumn = targetColumn lr.regressors = mat.NewDense(len(*train), (*train)[0].Elements - 1, nil) lr.regressand = mat.NewVecDense(len(*train), nil) for index, row := range *train { for idx, element := range row.Data { if idx == targetColumn { lr.regressand.SetVec(index, element) } else { if idx > targetColumn { idx -= 1} lr.regressors.Set(index, idx, element) } } } return &lr } // Fit train to LR func (lr *LR) Fit() (err error) { // Make fitting: switch { case lr.method&QR != 0: lr.parameterVector = qrRegressionSolver(lr.regressors, lr.regressand) case lr.method&SVD != 0: lr.parameterVector, err = svdRegressionSolver(lr.regressors, lr.regressand) if err != nil { log.Fatal(err) } default: lr.parameterVector, err = svdRegressionSolver(lr.regressors, lr.regressand) if err != nil { log.Fatal(err) } } return nil } // Predict wrapper for PredictTrain and PredictDense func (lr *LR) Predict(testTrainR core.Train, testTrainM *mat.Dense) (predictResult *mat.VecDense) { if testTrainR == nil && testTrainM == nil { log.Fatal("не передано данных") } if testTrainR == nil { return lr.PredictDense(testTrainM) } else { return lr.PredictTrain(testTrainR) } } // Predict does predict by Train func (lr *LR) PredictTrain(testTrain core.Train) (predictResult *mat.VecDense) { testTrainMatrix := core.MakeMatrixFromTrain(testTrain) r, c := testTrainMatrix.Dims() predictResult = mat.NewVecDense(r, nil) var result float64 for i := 0; i < r; i++ { result = 0 for j := 0; j < c; j++ { result += testTrainMatrix.At(i,j) * lr.parameterVector.AtVec(j) } predictResult.SetVec(i, result) } return } // Predict does predict by Dense func (lr *LR) PredictDense(testTrain *mat.Dense) (predictResult *mat.VecDense) { r, c := testTrain.Dims() predictResult = mat.NewVecDense(r, nil) var result float64 for i := 0; i < r; i++ { result = 0 for j := 0; j < c; j++ { result += testTrain.At(i,j) * lr.parameterVector.AtVec(j) } predictResult.SetVec(i, result) } return } // DeterminationCoefficient return coefficient of determination func (lr *LR) DeterminationCoefficient(testTrain core.Train) (coef float64) { // The most general definition of the coefficient of determination is: // 1 - SSres/SStot, // SSres - the residual sum of squares; // SStot - the total sum of squares (proportional to the variance of the data). if lr.parameterVector == nil { log.Fatal(CoefBeforeFittingError) } regressors := mat.NewDense(len(*testTrain), (*testTrain)[0].Elements - 1, nil) regressand := mat.NewVecDense(len(*testTrain), nil) for index, row := range *testTrain { for idx, element := range row.Data { if idx == lr.targetColumn { regressand.SetVec(index, element) } else { if idx > lr.targetColumn { idx -= 1} regressors.Set(index, idx, element) } } } var meanRegresandsValue float64 core.VecPrint(regressand) r, _ := regressand.Dims() for i := 0; i < r; i++ { meanRegresandsValue += regressand.AtVec(i) } meanRegresandsValue /= float64(r) fmt.Println(meanRegresandsValue) predictResult := lr.PredictDense(regressors) var SStot float64 for i := 0; i < r; i++ { SStot += math.Pow(regressand.AtVec(i) - meanRegresandsValue, 2) } fmt.Println(SStot) var SSres float64 for i := 0; i < r; i++ { SSres += math.Pow(regressand.AtVec(i) - predictResult.AtVec(i), 2) } fmt.Println(SSres) return 1 - SSres / SStot } func Min(values []float64) (min float64, err error) { if len(values) == 0 { return 0, errors.New("пустой слайс") } min = values[0] for _, v := range values { if v < min { min = v } } return min, nil } func qrRegressionSolver(A *mat.Dense, y *mat.VecDense) (b *mat.VecDense){ _, c := A.Dims() b = mat.NewVecDense(c, nil) // QR decomposition is often used to solve the linear least squares problem // b = R' * QT * y ~ R * b = QT * y => give us b. // Q - orthogonal matrix m x n; // R - upper triangular matrix n x n. // (see - https://en.wikipedia.org/wiki/QR_decomposition) var QR mat.QR var Q, R, Qt, Qty mat.Dense QR.Factorize(A) QR.QTo(&Q) QR.RTo(&R) Qt.Clone(Q.T()) Qty.Mul(&Qt, y) // Now find b: for i := c - 1; i >= 0; i-- { b.SetVec(i, Qty.At(i, 0)) for j := i + 1; j < c; j++ { b.SetVec(i, b.AtVec(i) - b.AtVec(j) * R.At(i, j)) } b.SetVec(i, b.AtVec(i) / R.At(i, i)) } return } func svdRegressionSolver(A *mat.Dense, y *mat.VecDense) (b *mat.VecDense, err error) { r, c := A.Dims() b = mat.NewVecDense(c, nil) // SVD decomposition is often used to solve the linear least squares problem // b = X+ * y, where X+ = U * D+ * VT, where: // D+ - generalized inverse of E, where // E - is a diagonal m × n matrix with non-negative real numbers on the diagonal; // U - is an m × m unitary matrix; // VT - transposed n × n unitary matrix. // (see - https://en.wikipedia.org/wiki/Singular_value_decomposition) var SVD mat.SVD var V, U mat.Dense SVD.Factorize(A, mat.SVDFull) SVD.VTo(&V) SVD.UTo(&U) container := SVD.Values(nil) if vr, vc := V.Dims(); vr != c && vc != c { return nil, SVDDecompositionError } if ur, uc := U.Dims(); ur != r && uc != r { return nil, SVDDecompositionError } D := mat.NewDense(r, c, nil) for idx, element := range container { D.Set(idx, idx, 1 / element) } // Now find b: var Vt, DVt, UDVt mat.Dense Vt.Clone(V.T()) DVt.Mul(D, &Vt) UDVt.Mul(&U, &DVt) var temp mat.Dense temp.Mul(UDVt.T(), y) if _, c := temp.Dims(); c != 1 { return nil, SVDResultComputeError } view := temp.ColView(0) for i := 0; i < view.Len(); i++ { b.SetVec(i, view.AtVec(i)) } return }
supervisedlearning/regressions/linear/linear.go
0.61057
0.448668
linear.go
starcoder
package data import "math" // ---------------------------------------------------------------------------- // (X,Y), (U,V) // XYUVer wraps the Len and XYUV methods. type XYUVer interface { // Len returns the number of x, y, u, v quadruples. Len() int // XYUV returns an x, y, u, v quadruple. XYUV(int) (x, y, u, v float64) } // XYUVRange returns the minimum and maximum x, y, u and v values. func XYUVRange(xyuvs XYUVer) (xmin, xmax, ymin, ymax, umin, umax, vmin, vmax float64) { xmin, xmax = math.Inf(1), math.Inf(-1) ymin, ymax = math.Inf(1), math.Inf(-1) umin, umax = math.Inf(1), math.Inf(-1) vmin, vmax = math.Inf(1), math.Inf(-1) for i := 0; i < xyuvs.Len(); i++ { x, y, u, v := xyuvs.XYUV(i) xmin, xmax = math.Min(xmin, x), math.Max(xmax, x) ymin, ymax = math.Min(ymin, y), math.Max(ymax, y) umin, umax = math.Min(umin, u), math.Max(umax, u) vmin, vmax = math.Min(vmin, v), math.Max(vmax, v) } return xmin, xmax, ymin, ymax, umin, umax, vmin, vmax } // XYUVs implements the XYUVer interface. type XYUVs []struct{ X, Y, U, V float64 } func (d XYUVs) Len() int { return len(d) } func (d XYUVs) XYUV(i int) (x, y, u, v float64) { return d[i].X, d[i].Y, d[i].U, d[i].V } // ---------------------------------------------------------------------------- // Text // XYText wraps the Len and XYText methods. type XYTexter interface { // Len returns the number of data points. Len() int // XYText returns a coordinate (x, y) and a string. XYText(int) (x, y float64, t string) } // XYTexts implements the XYTexter interface. type XYTexts []struct { X, Y float64 Text string } func (d XYTexts) Len() int { return len(d) } func (d XYTexts) XYText(i int) (x, y float64, text string) { return d[i].X, d[i].Y, d[i].Text } // ---------------------------------------------------------------------------- // Boxplot // Boxplotter wraps the Len and Boxplot methods. type Boxplotter interface { // Len returns the number of boxes. Len() int // Boxplot returns an data for the i'th boxplot. Boxplot(i int) (x, min, q1, median, q3, max float64, outlier []float64) } // Boxplots implememnts the Boxplotter interface. type Boxplots []struct { X float64 Min, Q1, Median, Q3, Max float64 Outlier []float64 } func (b Boxplots) Len() int { return len(b) } func (b Boxplots) Boxplot(i int) (x, min, q1, median, q3, max float64, outlier []float64) { return b[i].X, b[i].Min, b[i].Q1, b[i].Median, b[i].Q3, b[i].Max, b[i].Outlier }
data/data.go
0.806281
0.487795
data.go
starcoder
package game // Universe contains the current generation of a universe, as well as other // important information about it. type Universe struct { current, alternate [][]int topLeft [2]int generation int } // NewUniverse creates a new universe and populates it, using the specified slice. func NewUniverse(initial [][]int) Universe { if isEmpty(initial) { return emptyUniverse(0) } lenX, lenY := len(initial[0]), len(initial) capX, capY := 2*lenX, 2*lenY current := make([][]int, lenY, capY) alternate := make([][]int, lenY, capY) for y := range current { current[y] = make([]int, lenX, capX) alternate[y] = make([]int, lenX, capX) } for y := range initial { copy(current[y], initial[y]) } universe := Universe{current, alternate, [2]int{0, 0}, 0} universe.ensurePadding(4, 4, 4) return universe } // Evolve calculates the next n generations of the universe. func (u *Universe) Evolve(padding, generations int) { for n := 0; n < generations; n++ { u.evolveUniverse() u.ensurePadding(1, 2*padding, padding) } } // CurrentState returns a copy of the current generation with p rows/columns of // padding. The second return value is the position upper left corner of the copy. func (u Universe) CurrentState(p int) ([][]int, [2]int) { state := make([][]int, len(u.current)) for y := range u.current { state[y] = make([]int, len(u.current[y])) copy(state[y], u.current[y]) } old, new := padding(state), [4]int{} state, new = ensurePadding(state, old, p, p, p) return state, calculateTopLeft(u.topLeft, old, new) } // Generation returns the number of the current generation of a Universe func (u Universe) Generation() int { return u.generation } func emptyUniverse(generation int) Universe { return Universe{[][]int{[]int{0}}, [][]int{[]int{0}}, [2]int{0, 0}, generation} } func (u *Universe) evolveUniverse() { for y := range u.current { for x, cell := range u.current[y] { if cell != 0 { u.increaseNeighbourhood(x, y) } } } for y := range u.alternate { for x, cell := range u.alternate[y] { if cell == 3 { u.alternate[y][x] = 1 } else if cell == 2 && u.current[y][x] == 1 { u.alternate[y][x] = 1 } else { u.alternate[y][x] = 0 } u.current[y][x] = 0 } } u.current, u.alternate = u.alternate, u.current u.generation++ } func (u Universe) increaseNeighbourhood(x, y int) { for dy := -1; dy <= 1; dy++ { for dx := -1; dx <= 1; dx++ { if dx == 0 && dy == 0 { continue } if x+dx >= 0 && y+dy >= 0 && x+dx < len(u.current[0]) && y+dy < len(u.current) { u.alternate[y+dy][x+dx]++ } } } } func calculateTopLeft(topLeft [2]int, oldPadding, newPadding [4]int) [2]int { return [2]int{topLeft[0] + (oldPadding[2] - newPadding[2]), topLeft[1] + (oldPadding[0] - newPadding[0])} } func isEmpty(s [][]int) bool { for y := range s { for _, c := range s[y] { if c > 0 { return false } } } return true }
Conways-Game/src/game/game.go
0.781914
0.535159
game.go
starcoder
package imaging import ( "image" "image/color" "math" ) func affineTransform(x, y float64, data []float64) (xout, yout float64) { a := data[:] a0 := a[0] a1 := a[1] a2 := a[2] a3 := a[3] a4 := a[4] a5 := a[5] xin := float64(x) + 0.5 yin := float64(y) + 0.5 xout = a0*xin + a1*yin + a2 yout = a3*xin + a4*yin + a5 return } func perspectiveTransform(x, y float64, data []float64) (xout, yout float64) { a := data[:] a0 := a[0] a1 := a[1] a2 := a[2] a3 := a[3] a4 := a[4] a5 := a[5] a6 := a[6] a7 := a[7] xin := float64(x) + 0.5 yin := float64(y) + 0.5 xout = (a0*xin + a1*yin + a2) / (a6*xin + a7*yin + 1) yout = (a3*xin + a4*yin + a5) / (a6*xin + a7*yin + 1) return } func quadTransform(x, y float64, data []float64) (xout, yout float64) { a := data[:] a0 := a[0] a1 := a[1] a2 := a[2] a3 := a[3] a4 := a[4] a5 := a[5] a6 := a[6] a7 := a[7] xin := float64(x) + 0.5 yin := float64(y) + 0.5 xout = a0 + a1*xin + a2*yin + a3*xin*yin yout = a4 + a5*xin + a6*yin + a7*xin*yin return } func precomputeWeightsForTransform(dstSize, srcSize int, scale float64, filter ResampleFilter) [][]indexWeight { du := scale if scale < 1.0 { scale = 1.0 } ru := math.Ceil(scale * filter.Support) out := make([][]indexWeight, dstSize) tmp := make([]indexWeight, 0, dstSize*int(ru+2)*2) for v := 0; v < dstSize; v++ { fu := (float64(v)+0.5)*du - 0.5 begin := int(math.Ceil(fu - ru)) if begin < 0 { begin = 0 } end := int(math.Floor(fu + ru)) if end > srcSize-1 { end = srcSize - 1 } var sum float64 for u := begin; u <= end; u++ { w := filter.Kernel((float64(u) - fu) / scale) if w != 0 { sum += w tmp = append(tmp, indexWeight{index: u, weight: w}) } } if sum != 0 { for i := range tmp { tmp[i].weight /= sum } } out[v] = tmp tmp = tmp[len(tmp):] } return out } func filterApply(img image.Image, outImage []uint8, x, y float64, xw, yw [][]indexWeight, filter ResampleFilter) bool { if filter.Support == 0.0 { c := img.At(int(x+0.5), int(y+0.5)) r, g, b, a := c.RGBA() outImage[0] = uint8(r * 255 / 65535) outImage[1] = uint8(g * 255 / 65535) outImage[2] = uint8(b * 255 / 65535) outImage[3] = uint8(a * 255 / 65535) return true } xx, yy := int(x+0.5), int(y+0.5) var r, g, b, a float64 for _, w := range xw[xx] { s := img.At(w.index/2, int(y+0.5)) cr, cg, cb, ca := s.RGBA() aw := float64(ca) * w.weight r += float64(cr) * aw g += float64(cg) * aw b += float64(cb) * aw a += aw } for _, h := range yw[yy] { s := img.At(int(x+0.5), h.index/2) cr, cg, cb, ca := s.RGBA() aw := float64(ca) * h.weight r += float64(cr) * aw g += float64(cg) * aw b += float64(cb) * aw a += aw } if a != 0 { aInv := 1 / a outImage[0] = clamp(r * 255 / 65535 * aInv) outImage[1] = clamp(g * 255 / 65535 * aInv) outImage[2] = clamp(b * 255 / 65535 * aInv) outImage[3] = clamp(a * 255 / 65535) } return true } type ImagingTransformMap func(x, y float64, data []float64) (xout, yout float64) func genericTransform(img image.Image, outImage *image.NRGBA, x0, y0, x1, y1 float64, transform ImagingTransformMap, data []float64, filter ResampleFilter, fill bool, fillColor color.Color) { srcW := img.Bounds().Dx() srcH := img.Bounds().Dy() if srcW <= 0 || srcH <= 0 { return } if x0 < 0 { x0 = 0 } if y0 < 0 { y0 = 0 } if x1 > float64(srcW) { x1 = float64(srcW) } if y1 > float64(srcH) { y1 = float64(srcH) } if x0 == 0 && y0 == 0 && float64(srcW) == x1 && float64(srcH) == y1 { *outImage = *Clone(img) } dstW, dstH := int(x1-x0), int(y1-y0) var dst *image.NRGBA if outImage == nil { dst = image.NewNRGBA(image.Rect(0, 0, dstW, dstH)) } else { dst = outImage } var xx, yy float64 xw, yw := transform(float64(dstW), float64(dstH), data) x_ws := precomputeWeightsForTransform(srcW, dstW, float64(dstW)/xw, filter) y_ws := precomputeWeightsForTransform(srcH, dstH, float64(dstH)/yw, filter) scanLine := make([]uint8, 4) for y := y0; y < y1; y++ { for x := x0; x < x1; x++ { xx, yy = transform(float64(x-x0), float64(y-y0), data) if !filterApply(img, scanLine, xx, yy, x_ws, y_ws, filter) { if fill { r, g, b, a := fillColor.RGBA() scanLine[0], scanLine[1], scanLine[2], scanLine[3] = uint8(r), uint8(g), uint8(b), uint8(a) } } j := int(y)*outImage.Stride + int(x)*4 d := dst.Pix[j : j+4 : j+4] copy(d, scanLine) } } outImage = dst } func imagingTransform(img image.Image, outImage *image.NRGBA, method TransformsMethod, x0, y0, x1, y1 float64, data []float64, filter ResampleFilter, fill bool, fillColor color.Color) { var transform ImagingTransformMap switch method { case AFFINE: transform = affineTransform break case PERSPECTIVE: transform = perspectiveTransform break case QUAD: transform = quadTransform break default: return } genericTransform(img, outImage, x0, y0, x1, y1, transform, data, filter, fill, fillColor) } type TransformsMethod uint32 const ( AFFINE TransformsMethod = 0 EXTENT TransformsMethod = 1 PERSPECTIVE TransformsMethod = 2 QUAD TransformsMethod = 3 MESH TransformsMethod = 4 ) func transformer(box [4]float64, image image.Image, outImage *image.NRGBA, method TransformsMethod, data []float64, filter ResampleFilter, fill bool, fillColor color.Color) { w := box[2] - box[0] h := box[3] - box[1] if method == AFFINE { data = data[0:6] } else if method == EXTENT { x0, y0, x1, y1 := data[0], data[1], data[2], data[3] xs := (x1 - x0) / float64(w) ys := (y1 - y0) / float64(h) method = AFFINE data = []float64{xs, 0, x0, 0, ys, y0} } else if method == PERSPECTIVE { data = data[0:8] } else if method == QUAD { nw := data[0:2] sw := data[2:4] se := data[4:6] ne := data[6:8] x0, y0 := nw[0], nw[1] As := 1.0 / float64(w) At := 1.0 / float64(h) data = []float64{ x0, (ne[0] - x0) * As, (sw[0] - x0) * At, (se[0] - sw[0] - ne[0] + x0) * As * At, y0, (ne[1] - y0) * As, (sw[1] - y0) * At, (se[1] - sw[1] - ne[1] + y0) * As * At, } } else { panic("unknown transformation method") } imagingTransform(image, outImage, method, box[0], box[1], box[2], box[3], data, filter, fill, fillColor) } func Transform(dst image.Image, width, height int, method TransformsMethod, data interface{}, filter ResampleFilter, fill bool, fillcolor color.Color) *image.NRGBA { im := image.NewNRGBA(image.Rect(0, 0, width, height)) if method == MESH { if qdata, ok := data.(map[[4]float64][]float64); !ok { return nil } else { for box, quad := range qdata { transformer(box, dst, im, QUAD, quad, filter, fill, fillcolor) } } } else { transformer([4]float64{0, 0, float64(width), float64(height)}, dst, im, method, data.([]float64), filter, fill, fillcolor) } return im }
geometry.go
0.649023
0.558748
geometry.go
starcoder
package main import ( "flag" "fmt" "os" ) var usage = `Usage: ep [OPTIONS...] COMMAND [COMMAND-OPTS...] ABOUT "ep" is a simple command line utility to distribute parallel work to a set of worker subprocesses. "ep" can be used as prefix of another command that reads lines from standard input, for example a list of files to work on or some data to parse. All output from all workers is displayed to standard output, along with any errors on stderr. "ep" will automatically start one worked per available CPU and (on Linux) assign a CPU affinity to pin each worker to one physical CPU. "ep" can also be run on several hosts at the same time. In this case, you need to instruct "ep" of the total number of "ep" instances you intend to run and the relative number of each instance. In this case, "ep" will automatically partition the input between the instances. Input data must be the same on all hosts, or some data might never be processed. WORKERS Workers must be designed to do only two basic things: 1. Read data to work on from standard input and exit when the input is over; 2. Print errors to stderr and any useful output to stdout (or anywhere the user specifies via flags.) Workers must not exit immediately by cycle through the input until completion. Workers don't need to be parallel themselves. A simple sequential worker will produce optimal results. Workers should also be able to read null-byte separated lines of input. MULTI-NODE PARTITIONING When running in multi-node mode ("-w" and "-wg" command line options), the data in input is ignored or processed depending on the result of a basic hashing of the data itself (currenty, the numeric sum of all bytes.) This guarantees that no two nodes process the same data and a negligible overhead. EXAMPLES * Single node, multiple processes, using the example utility "epsum" to print chechsums of files: $ find /usr/bin -type f | ep epsum -t sum512 * Multiple nodes (three nodes), sharing the same filesystem: host1$ find /data/shared -type f | ep -w 1 -wg 3 epsum -t sum512 >sums1 host2$ find /data/shared -type f | ep -w 2 -wg 3 epsum -t sum512 >sums2 host3$ find /data/shared -type f | ep -w 3 -wg 3 epsum -t sum512 >sums3 $ cat sums? > sums OPTIONS ` func init() { flag.Usage = func() { fmt.Fprintln(os.Stderr, usage) flag.PrintDefaults() } }
cmd/ep/doc.go
0.583203
0.409073
doc.go
starcoder
package msgraph // RatingGermanyMoviesType undocumented type RatingGermanyMoviesType int const ( // RatingGermanyMoviesTypeVAllAllowed undocumented RatingGermanyMoviesTypeVAllAllowed RatingGermanyMoviesType = 0 // RatingGermanyMoviesTypeVAllBlocked undocumented RatingGermanyMoviesTypeVAllBlocked RatingGermanyMoviesType = 1 // RatingGermanyMoviesTypeVGeneral undocumented RatingGermanyMoviesTypeVGeneral RatingGermanyMoviesType = 2 // RatingGermanyMoviesTypeVAgesAbove6 undocumented RatingGermanyMoviesTypeVAgesAbove6 RatingGermanyMoviesType = 3 // RatingGermanyMoviesTypeVAgesAbove12 undocumented RatingGermanyMoviesTypeVAgesAbove12 RatingGermanyMoviesType = 4 // RatingGermanyMoviesTypeVAgesAbove16 undocumented RatingGermanyMoviesTypeVAgesAbove16 RatingGermanyMoviesType = 5 // RatingGermanyMoviesTypeVAdults undocumented RatingGermanyMoviesTypeVAdults RatingGermanyMoviesType = 6 ) // RatingGermanyMoviesTypePAllAllowed returns a pointer to RatingGermanyMoviesTypeVAllAllowed func RatingGermanyMoviesTypePAllAllowed() *RatingGermanyMoviesType { v := RatingGermanyMoviesTypeVAllAllowed return &v } // RatingGermanyMoviesTypePAllBlocked returns a pointer to RatingGermanyMoviesTypeVAllBlocked func RatingGermanyMoviesTypePAllBlocked() *RatingGermanyMoviesType { v := RatingGermanyMoviesTypeVAllBlocked return &v } // RatingGermanyMoviesTypePGeneral returns a pointer to RatingGermanyMoviesTypeVGeneral func RatingGermanyMoviesTypePGeneral() *RatingGermanyMoviesType { v := RatingGermanyMoviesTypeVGeneral return &v } // RatingGermanyMoviesTypePAgesAbove6 returns a pointer to RatingGermanyMoviesTypeVAgesAbove6 func RatingGermanyMoviesTypePAgesAbove6() *RatingGermanyMoviesType { v := RatingGermanyMoviesTypeVAgesAbove6 return &v } // RatingGermanyMoviesTypePAgesAbove12 returns a pointer to RatingGermanyMoviesTypeVAgesAbove12 func RatingGermanyMoviesTypePAgesAbove12() *RatingGermanyMoviesType { v := RatingGermanyMoviesTypeVAgesAbove12 return &v } // RatingGermanyMoviesTypePAgesAbove16 returns a pointer to RatingGermanyMoviesTypeVAgesAbove16 func RatingGermanyMoviesTypePAgesAbove16() *RatingGermanyMoviesType { v := RatingGermanyMoviesTypeVAgesAbove16 return &v } // RatingGermanyMoviesTypePAdults returns a pointer to RatingGermanyMoviesTypeVAdults func RatingGermanyMoviesTypePAdults() *RatingGermanyMoviesType { v := RatingGermanyMoviesTypeVAdults return &v }
v1.0/RatingGermanyMoviesTypeEnum.go
0.585338
0.600511
RatingGermanyMoviesTypeEnum.go
starcoder
package main import "fmt" // GraphNode is a single node in a graph list type GraphNode struct { name string value float64 } // Edge represents an edge between two vertices type Edge struct { src string dest string value float64 } // graph is a data structure which will be holding a graph type graph map[string][]GraphNode // addVertexToGraph adds a vertex to graph func (g graph) addVertexToGraph(vtx string) { if g[vtx] != nil { return } g[vtx] = make([]GraphNode, 0) } // addEdgeToGraph adds an edge to a graph func (g graph) addEdgeToGraph(fromVtx, toVtx string, edgeValue float64) { if g[fromVtx] == nil { // check if initial vertex exists return } for i := range g[fromVtx] { // check if edge already exists if g[fromVtx][i].name == toVtx { return } } if g[toVtx] == nil { // create new destination vertext if it does not exists g[toVtx] = make([]GraphNode, 0) fmt.Println("\n-- Destination vertex " + toVtx + " created. --") } g[fromVtx] = append(g[fromVtx], GraphNode{name: toVtx, value: edgeValue}) g[toVtx] = append(g[toVtx], GraphNode{name: fromVtx, value: edgeValue}) uniqueEdges = append(uniqueEdges, Edge{src: fromVtx, dest: toVtx, value: edgeValue}) } // addVertex asks user to enter vertex name func (g graph) addVertex() { var vtxName string fmt.Print("Enter the name of vertex: ") fmt.Scanf("%s\n", &vtxName) g.addVertexToGraph(vtxName) } // addEdge asks user to enter necessary values before adding an edge to graph func (g graph) addEdge() { var fromVtx, toVtx string var edgeValue float64 fmt.Print("Enter the initial vertex name: ") fmt.Scanf("%s\n", &fromVtx) fmt.Print("Enter the destination vertex name: ") fmt.Scanf("%s\n", &toVtx) fmt.Print("Enter the weight of edge: ") fmt.Scanf("%d\n", &edgeValue) g.addEdgeToGraph(fromVtx, toVtx, edgeValue) } // removeEdgeFromGraph removes an edge from graph func (g graph) removeEdgeFromGraph(fromVtx, toVtx string) { if g[fromVtx] == nil || g[toVtx] == nil { fmt.Println("\n-- Edge between " + fromVtx + " and " + toVtx + " does not exist. --") return } for i := range g[fromVtx] { if g[fromVtx][i].name == toVtx { if i == 0 { g[fromVtx] = g[fromVtx][1:len(g[fromVtx])] } else if i == (len(g[fromVtx]) - 1) { g[fromVtx] = g[fromVtx][0:(len(g[fromVtx]) - 1)] } else { initial := g[fromVtx][0:i] final := g[fromVtx][i+1 : len(g[fromVtx])] g[fromVtx] = append(initial, final...) } break } } for i := range g[toVtx] { if g[toVtx][i].name == fromVtx { if i == 0 { g[toVtx] = g[toVtx][1:len(g[toVtx])] } else if i == (len(g[toVtx]) - 1) { g[toVtx] = g[toVtx][0:(len(g[toVtx]) - 1)] } else { initial := g[toVtx][0:i] final := g[toVtx][i+1 : len(g[toVtx])] g[toVtx] = append(initial, final...) } break } } } // DFS traverses a graph using dfs technique func (g graph) DFS(start string) bool { visited := make(map[string]bool) parent := "-1" if g[start] == nil { fmt.Println("\n-- No vertex named " + start + " present in graph. --") return false } check := g.dfsHelper(start, visited, parent) return check } // dfsHelper recursively calls itself to solve dfs traversal func (g graph) dfsHelper(vtx string, visited map[string]bool, parent string) bool { visited[vtx] = true for i := range g[vtx] { if visited[g[vtx][i].name] && parent != g[vtx][i].name && parent != vtx { return true } if !visited[g[vtx][i].name] { parent = vtx check := g.dfsHelper(g[vtx][i].name, visited, parent) if check { return check } } } return false } // simpleDisplay simply displays a graph func (g graph) simpleDisplay() { fmt.Println("") for i := range g { fmt.Print(i, " => ") for j := range g[i] { fmt.Print(g[i][j]) } fmt.Println("") } }
algorithms/graphs/kruskals/graph.go
0.583559
0.52616
graph.go
starcoder
package metrics import "sync" // Observer is an interface that generalizes a group of metrics // that are receiving a set of numeric values in time type Observer interface { Observe(float64) } // HistogramBucketOptions an options to fine-tune histogram buckets type HistogramBucketOptions struct { Type string Buckets []string } // HistogramOptions a set of options for histogram metric type HistogramOptions struct { Namespace string Subsystem string Name string Desc string Tags []string Buckets HistogramBucketOptions } // NewHistogramWithTags Histogram constructor for DefaultMetrics global func NewHistogramWithTags(opts HistogramOptions) (*GroupTagHistogram, error) { return DefaultMetrics.NewHistogramWithTags(opts) } // MustNewHistogramWithTags constructor with embedded panic func MustNewHistogramWithTags(opts HistogramOptions) *GroupTagHistogram { g, err := DefaultMetrics.NewHistogramWithTags(opts) if err != nil { panic("Metric Error: " + err.Error()) } return g } // TagObserver interface for appending tags to metrics type TagObserver interface { WithTags(map[string]string) (Observer, error) WithLabels(...string) Observer } // GroupTagHistogram a group of different Histograms from different metric systems type GroupTagHistogram struct { tagobservers []TagObserver options HistogramOptions registred map[uint64]*GroupObserver registredLock sync.Mutex } // AddHistogram is appending new histogram to set func (gth *GroupTagHistogram) AddHistogram(o TagObserver) { gth.tagobservers = append(gth.tagobservers, o) } // WithTags makes a group with given tags (label-value pairs) func (gth *GroupTagHistogram) WithTags(tags map[string]string) (*GroupObserver, error) { gh := &GroupObserver{} for _, tc := range gth.tagobservers { c, err := tc.WithTags(tags) if err != nil { return nil, err } gh.AddObserver(c) } return gh, nil } // WithLabels make a group with given labels func (gth *GroupTagHistogram) WithLabels(labels ...string) *GroupObserver { h := secureHash.GetHash(labels) gth.registredLock.Lock() defer gth.registredLock.Unlock() gh, ok := gth.registred[h] if ok { return gh } gh = &GroupObserver{} for _, tc := range gth.tagobservers { c := tc.WithLabels(labels...) gh.AddObserver(c) } gth.registred[h] = gh return gh } // GroupObserver is a set of Observers that within the same tags type GroupObserver struct { observers []Observer } // AddObserver adds observer func (g *GroupObserver) AddObserver(o Observer) { g.observers = append(g.observers, o) } // Observe calls the metrics ultimately func (g *GroupObserver) Observe(f float64) { for _, c := range g.observers { c.Observe(f) } }
metrics/observer.go
0.696268
0.411288
observer.go
starcoder
package hole import ( "math/rand" "strings" ) var ( notes = [...][2]string{ {"C", "B♯"}, {"C♯", "D♭"}, {"D", "D"}, {"D♯", "E♭"}, {"E", "F♭"}, {"F", "E♯"}, {"F♯", "G♭"}, {"G", "G"}, {"G♯", "A♭"}, {"A", "A"}, {"A♯", "B♭"}, {"B", "C♭"}, } triadTypes = [...]string{ "°", "m", "", "+", } triadSteps = [...][2]int{ {3, 3}, {3, 4}, {4, 3}, {4, 4}, } orderings = [...][3]int{ {0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}, } ) func letterVal(note string) byte { return note[0] - 'A' } func genNotes(rootIdx int, rootNote string, steps [2]int) []string { thirdIdx := (rootIdx + steps[0]) % 12 fifthIdx := (rootIdx + steps[0] + steps[1]) % 12 thirdNote := notes[thirdIdx][0] fifthNote := notes[fifthIdx][0] // Enforce strict spelling. The third should be 2 letters // above the root, and the fifth should be 4 letters above, // wrapping at G if (letterVal(rootNote)+2)%7 != letterVal(thirdNote) { thirdNote = notes[thirdIdx][1] } if (letterVal(rootNote)+4)%7 != letterVal(fifthNote) { fifthNote = notes[fifthIdx][1] } // Return empty if strict spelling is impossible if (letterVal(rootNote)+2)%7 != letterVal(thirdNote) || (letterVal(rootNote)+4)%7 != letterVal(fifthNote) { return []string{} } return []string{rootNote, thirdNote, fifthNote} } func musicalChords() ([]string, string) { var tests []test // Skip a random combination for anti-cheese skipNum := rand.Intn(61) combNum := 0 for rootIdx, rootNames := range notes { // Loop once for each unique name the note has uniqueNames := 2 if rootNames[0] == rootNames[1] { uniqueNames = 1 } for _, rootNote := range rootNames[:uniqueNames] { for triadIdx, triad := range triadTypes { steps := triadSteps[triadIdx] chordNotes := genNotes(rootIdx, rootNote, steps) if len(chordNotes) > 0 { if skipNum != combNum { chord := rootNote + triad for _, ordering := range orderings { rearrangedNotes := []string{chordNotes[ordering[0]], chordNotes[ordering[1]], chordNotes[ordering[2]]} tests = append(tests, test{ strings.Join(rearrangedNotes, " "), chord, }) } } combNum++ } } } } // Cut 3 tests. return outputTests(shuffle(tests)[3:]) }
hole/musical-chords.go
0.517083
0.453322
musical-chords.go
starcoder
package cluster import ( "context" "fmt" "github.com/elastic/terraform-provider-elasticstack/internal/clients" "github.com/elastic/terraform-provider-elasticstack/internal/utils" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func DataSourceSnapshotRespository() *schema.Resource { commonStdSettings := map[string]*schema.Schema{ "max_number_of_snapshots": { Description: "Maximum number of snapshots the repository can contain.", Type: schema.TypeInt, Computed: true, }, } commonSettings := map[string]*schema.Schema{ "chunk_size": { Description: "Maximum size of files in snapshots.", Type: schema.TypeString, Computed: true, }, "compress": { Description: "If true, metadata files, such as index mappings and settings, are compressed in snapshots.", Type: schema.TypeBool, Computed: true, }, "max_snapshot_bytes_per_sec": { Description: "Maximum snapshot creation rate per node.", Type: schema.TypeString, Computed: true, }, "max_restore_bytes_per_sec": { Description: "Maximum snapshot restore rate per node.", Type: schema.TypeString, Computed: true, }, "readonly": { Description: "If true, the repository is read-only.", Type: schema.TypeBool, Computed: true, }, } //-- repos specific settings fsSettings := map[string]*schema.Schema{ "location": { Description: "Location of the shared filesystem used to store and retrieve snapshots.", Type: schema.TypeString, Computed: true, }, } urlSettings := map[string]*schema.Schema{ "url": { Description: "URL location of the root of the shared filesystem repository.", Type: schema.TypeString, Computed: true, }, "http_max_retries": { Description: "Maximum number of retries for http and https URLs.", Type: schema.TypeInt, Computed: true, }, "http_socket_timeout": { Description: "Maximum wait time for data transfers over a connection.", Type: schema.TypeString, Computed: true, }, } gcsSettings := map[string]*schema.Schema{ "bucket": { Description: "The name of the bucket to be used for snapshots.", Type: schema.TypeString, Computed: true, }, "client": { Description: "The name of the client to use to connect to Google Cloud Storage.", Type: schema.TypeString, Computed: true, }, "base_path": { Description: "Specifies the path within the bucket to the repository data. Defaults to the root of the bucket.", Type: schema.TypeString, Computed: true, }, } azureSettings := map[string]*schema.Schema{ "container": { Description: "Container name. You must create the Azure container before creating the repository.", Type: schema.TypeString, Computed: true, }, "client": { Description: "Azure named client to use.", Type: schema.TypeString, Computed: true, }, "base_path": { Description: "Specifies the path within the container to the repository data.", Type: schema.TypeString, Computed: true, }, "location_mode": { Description: "Location mode. `primary_only` or `secondary_only`. See: https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy", Type: schema.TypeString, Computed: true, }, } s3Settings := map[string]*schema.Schema{ "bucket": { Description: "Name of the S3 bucket to use for snapshots.", Type: schema.TypeString, Computed: true, }, "client": { Description: "The name of the S3 client to use to connect to S3.", Type: schema.TypeString, Computed: true, }, "base_path": { Description: "Specifies the path to the repository data within its bucket.", Type: schema.TypeString, Computed: true, }, "server_side_encryption": { Description: "When true, files are encrypted server-side using AES-256 algorithm.", Type: schema.TypeBool, Computed: true, }, "buffer_size": { Description: "Minimum threshold below which the chunk is uploaded using a single request.", Type: schema.TypeString, Computed: true, }, "canned_acl": { Description: "The S3 repository supports all S3 canned ACLs.", Type: schema.TypeString, Computed: true, }, "storage_class": { Description: "Sets the S3 storage class for objects stored in the snapshot repository.", Type: schema.TypeString, Computed: true, }, } hdfsSettings := map[string]*schema.Schema{ "uri": { Description: `The uri address for hdfs. ex: "hdfs://<host>:<port>/".`, Type: schema.TypeString, Computed: true, }, "path": { Description: "The file path within the filesystem where data is stored/loaded.", Type: schema.TypeString, Computed: true, }, "load_defaults": { Description: "Whether to load the default Hadoop configuration or not.", Type: schema.TypeBool, Computed: true, }, } //-- snapRepoSchema := map[string]*schema.Schema{ "name": { Description: "Name of the snapshot repository.", Type: schema.TypeString, Required: true, }, "type": { Description: "Repository type.", Type: schema.TypeString, Computed: true, }, "fs": { Description: "Shared filesystem repository. Set only if the type of the fetched repo is `fs`.", Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: utils.MergeSchemaMaps(commonSettings, commonStdSettings, fsSettings), }, }, "url": { Description: "URL repository. Set only if the type of the fetched repo is `url`.", Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: utils.MergeSchemaMaps(commonSettings, commonStdSettings, urlSettings), }, }, "gcs": { Description: "Google Cloud Storage service as a repository. Set only if the type of the fetched repo is `gcs`.", Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: utils.MergeSchemaMaps(commonSettings, gcsSettings), }, }, "azure": { Description: "Azure Blob storage as a repository. Set only if the type of the fetched repo is `azure`.", Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: utils.MergeSchemaMaps(commonSettings, azureSettings), }, }, "s3": { Description: "AWS S3 as a repository. Set only if the type of the fetched repo is `s3`.", Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: utils.MergeSchemaMaps(commonSettings, s3Settings), }, }, "hdfs": { Description: "HDFS File System as a repository. Set only if the type of the fetched repo is `hdfs`.", Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: utils.MergeSchemaMaps(commonSettings, hdfsSettings), }, }, } utils.AddConnectionSchema(snapRepoSchema) return &schema.Resource{ Description: "Gets information about the registered snapshot repositories.", ReadContext: dataSourceSnapRepoRead, Schema: snapRepoSchema, } } func dataSourceSnapRepoRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics client, err := clients.NewApiClient(d, meta) if err != nil { return diag.FromErr(err) } repoName := d.Get("name").(string) id, diags := client.ID(repoName) if diags.HasError() { return diags } currentRepo, diags := client.GetElasticsearchSnapshotRepository(repoName) if diags.HasError() { return diags } // get the schema of the Elem of the current repo type schemaSettings := DataSourceSnapshotRespository().Schema[currentRepo.Type].Elem.(*schema.Resource).Schema settings, err := flattenRepoSettings(currentRepo, schemaSettings) if err != nil { diags = append(diags, diag.Diagnostic{ Severity: diag.Error, Summary: "Unable to parse snapshot repository settings.", Detail: fmt.Sprintf(`Unable to parse settings returned by ES API: %v`, err), }) return diags } if err := d.Set(currentRepo.Type, settings); err != nil { return diag.FromErr(err) } if err := d.Set("type", currentRepo.Type); err != nil { return diag.FromErr(err) } d.SetId(id.String()) return diags }
internal/elasticsearch/cluster/snapshot_repository_data_source.go
0.68784
0.453988
snapshot_repository_data_source.go
starcoder
package main import ( "math/bits" ) type Processor struct { reg [4]uint8 // A, X, Y, Z flag [4]bool // zero, negative, carry, overflow ptr uint ram *Memory stack *Stack } // read and execute one instruction from the memory func (cpu Processor) Cycle () { // read the byte at the specified location inst := cpu.ram.GetByte(cpu.ptr) reg := inst & 0x3 // register to use cpu.ptr += 1 // move to next byte // lower values are NOP if between(0x40, inst, 0xA8) { if inst < 0x48 { // STR if inst < 0x44 { // STR from registers addr := cpu.ram.GetAddress(cpu.ptr) cpu.reg[reg] = uint8(cpu.ram.GetByte(addr)) cpu.ptr += 2 } else { // STR with indexing cpu.reg[ 0 ] = uint8(cpu.readMR(inst)) } } else if inst < 0x54 { // LOD var val uint if inst < 0x4C { // LOD in registers addr := cpu.ram.GetAddress(cpu.ptr) val = cpu.ram.GetByte(addr) cpu.reg[reg] = uint8(val) cpu.ptr += 2 } else if inst < 0x50 { // LOD with indexing val = cpu.readMR(inst) cpu.reg[ 0 ] = uint8(val) } else { // LOD numbers val = cpu.ram.GetByte(cpu.ptr) cpu.reg[reg] = uint8(val) cpu.ptr += 1 } cpu.upFlags(val) } else if inst < 0x58 { // PSH cpu.stack.Push(cpu.reg[reg]) } else if inst < 0x5C { // PLL cpu.reg[reg] = cpu.stack.Pull() } else if inst < 0x60 { // JMP & RTN if inst < 0x5E { // JMP addr := cpu.ram.GetAddress(cpu.ptr) if inst == 0x5D {cpu.stack.PushAddress(cpu.ptr + 1)} cpu.ptr = addr } else { // RTN cpu.ptr = cpu.stack.PullAddress() } } else if inst < 0x70 { // TRS reg2 := (inst & 0xC) >> 2 cpu.reg[reg] = cpu.reg[reg2] } else if inst < 0xA8 { // unary operations val := cpu.readMR(inst) if inst < 0x78 { val += 1 // INC } else if inst < 0x80 { val -= 1 // DEC } else if inst < 0x88 { val <<= 1 // SHL } else if inst < 0x90 { val >>= 1 // SHR } else if inst < 0x98 { val = uint(bits.RotateLeft8(uint8(val), 1)) // ROL } else if inst < 0xA0 { val = uint(bits.RotateLeft8(uint8(val), -1)) // ROR } else { val = ^val // NOT } cpu.writeMR(inst, val) cpu.upFlags(val) } } else if between(0xB0, inst, 0xF0) { if inst < 0xB8 { // BRC // detect if branching cond := cpu.flag[reg] not := inst >= 0xB4 if (cond && !not) || (!cond && not) { cpu.ptr = cpu.ram.GetAddress(cpu.ptr) } else { cpu.ptr += 2 } } else if inst < 0xBC { // SET cpu.flag[reg] = true } else if inst < 0xC0 { // CLR cpu.flag[reg] = false } else if inst < 0xE8 { // operations that store result in accumulator val1 := uint(cpu.reg[0]) val2 := cpu.readMR(inst) if inst < 0xC8 { val1 += val2 // ADD } else if inst < 0xD0 { val1 -= val2 // SUB } else if inst < 0xD8 { val1 &= val2 // AND } else if inst < 0xE0 { val1 |= val2 // IOR } else { val1 ^= val2 // XOR } cpu.reg[0] = uint8(val1) cpu.upFlags(val1) } else { // CMP } } } // helper to read from memory or registers func (cpu Processor) readMR (inst uint) uint { reg := inst & 0x3 if (inst & 0x4) == 0 { // read from a register return uint(cpu.reg[reg]) } else { addr := cpu.ram.GetAddress(cpu.ptr) if reg == 0 { // read from memory return cpu.ram.GetByte(addr) } else { // read from memory with index return cpu.ram.GetByte(addr + uint(cpu.reg[reg])) } cpu.ptr += 2 } return 0 } // helper to write to memory or registers func (cpu Processor) writeMR (inst, value uint) { reg := inst & 0x3 if (inst & 0x4) == 0 { // write to a register cpu.reg[reg] = uint8(value) } else { addr := cpu.ram.GetAddress(cpu.ptr) if reg == 0 { // write to memory cpu.ram.Write(addr, value) } else { // write to memory with index cpu.ram.Write(addr + uint(cpu.reg[reg]), value) } cpu.ptr += 2 } } // set base flags func (cpu Processor) upFlags (value uint) { cpu.flag[0] = value == 0 cpu.flag[1] = (value & 0x080) != 0 cpu.flag[2] = (value & 0x100) != 0 } // specify if the pointer has reached the end of memory func (cpu Processor) ReachedEnd () bool { if cpu.ptr >= 0x10000 { cpu.ptr = 0x0 return true } return false } func between (min, val, max uint) bool { return min <= val && val < max }
processor.go
0.560012
0.446193
processor.go
starcoder
package validator import ( "bytes" "fmt" "reflect" "sort" "strconv" "strings" "sync" "unicode/utf8" ) const tagName string = "valid" // Validator contruct type Validator struct { Translator *Translator Attributes map[string]string CustomMessage map[string]string } var loadValidatorOnce *Validator var once sync.Once // New returns a new instance of 'valid' with sane defaults. func New() *Validator { once.Do(func() { loadValidatorOnce = &Validator{} }) return loadValidatorOnce } // newValidator returns a new instance of 'valid' with sane defaults. func newValidator() *Validator { once.Do(func() { loadValidatorOnce = &Validator{} }) return loadValidatorOnce } // Between check The field under validation must have a size between the given min and max. Strings, numerics, arrays, and files are evaluated in the same fashion as the size rule. func Between(v reflect.Value, params []string) bool { if len(params) != 2 { return false } switch v.Kind() { case reflect.String: min, _ := ToInt(params[0]) max, _ := ToInt(params[1]) return BetweenString(v.String(), min, max) case reflect.Slice, reflect.Map, reflect.Array: min, _ := ToInt(params[0]) max, _ := ToInt(params[1]) return DigitsBetweenInt64(int64(v.Len()), min, max) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: min, _ := ToInt(params[0]) max, _ := ToInt(params[1]) return DigitsBetweenInt64(v.Int(), min, max) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: min, _ := ToUint(params[0]) max, _ := ToUint(params[1]) return DigitsBetweenUint64(v.Uint(), min, max) case reflect.Float32, reflect.Float64: min, _ := ToFloat(params[0]) max, _ := ToFloat(params[1]) return DigitsBetweenFloat64(v.Float(), min, max) } panic(fmt.Sprintf("validator: Between unsupport Type %T", v.Interface())) } // DigitsBetween check The field under validation must have a length between the given min and max. func DigitsBetween(v reflect.Value, params []string) bool { if len(params) != 2 { return false } switch v.Kind() { case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: min, _ := ToInt(params[0]) max, _ := ToInt(params[1]) var value string switch v.Kind() { case reflect.String: value = v.String() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value = ToString(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: value = ToString(v.Uint()) } if value == "" || !IsNumeric(value) { return false } return BetweenString(value, min, max) } panic(fmt.Sprintf("validator: DigitsBetween unsupport Type %T", v.Interface())) } // Size The field under validation must have a size matching the given value. // For string data, value corresponds to the number of characters. // For numeric data, value corresponds to a given integer value. // For an array | map | slice, size corresponds to the count of the array | map | slice. func Size(v reflect.Value, param []string) bool { switch v.Kind() { case reflect.String: p, _ := ToInt(param[0]) return compareString(v.String(), p, "==") case reflect.Slice, reflect.Map, reflect.Array: p, _ := ToInt(param[0]) return compareInt64(int64(v.Len()), p, "==") case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: p, _ := ToInt(param[0]) return compareInt64(v.Int(), p, "==") case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: p, _ := ToUint(param[0]) return compareUint64(v.Uint(), p, "==") case reflect.Float32, reflect.Float64: p, _ := ToFloat(param[0]) return compareFloat64(v.Float(), p, "==") } panic(fmt.Sprintf("validator: Size unsupport Type %T", v.Interface())) } // Max is the validation function for validating if the current field's value is less than or equal to the param's value. func Max(v reflect.Value, param []string) bool { switch v.Kind() { case reflect.String: p, _ := ToInt(param[0]) return compareString(v.String(), p, "<=") case reflect.Slice, reflect.Map, reflect.Array: p, _ := ToInt(param[0]) return compareInt64(int64(v.Len()), p, "<=") case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: p, _ := ToInt(param[0]) return compareInt64(v.Int(), p, "<=") case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: p, _ := ToUint(param[0]) return compareUint64(v.Uint(), p, "<=") case reflect.Float32, reflect.Float64: p, _ := ToFloat(param[0]) return compareFloat64(v.Float(), p, "<=") } panic(fmt.Sprintf("validator: Max unsupport Type %T", v.Interface())) } // Min is the validation function for validating if the current field's value is greater than or equal to the param's value. func Min(v reflect.Value, param []string) bool { switch v.Kind() { case reflect.String: p, _ := ToInt(param[0]) return compareString(v.String(), p, ">=") case reflect.Slice, reflect.Map, reflect.Array: p, _ := ToInt(param[0]) return compareInt64(int64(v.Len()), p, ">=") case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: p, _ := ToInt(param[0]) return compareInt64(v.Int(), p, ">=") case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: p, _ := ToUint(param[0]) return compareUint64(v.Uint(), p, ">=") case reflect.Float32, reflect.Float64: p, _ := ToFloat(param[0]) return compareFloat64(v.Float(), p, ">=") } panic(fmt.Sprintf("validator: Min unsupport Type %T", v.Interface())) } // Same is the validation function for validating if the current field's value euqal the param's value. func Same(v reflect.Value, anotherField reflect.Value) bool { if v.Kind() != anotherField.Kind() { panic(fmt.Sprintf("validator: Same The two fields must be of the same type %T, %T", v.Interface(), anotherField.Interface())) } switch v.Kind() { case reflect.String: return v.String() == anotherField.String() case reflect.Slice, reflect.Map, reflect.Array: return compareInt64(int64(v.Len()), int64(anotherField.Len()), "==") case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return compareInt64(v.Int(), anotherField.Int(), "==") case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return compareUint64(v.Uint(), anotherField.Uint(), "==") case reflect.Float32, reflect.Float64: return compareFloat64(v.Float(), anotherField.Float(), "==") } panic(fmt.Sprintf("validator: Lt unsupport Type %T", v.Interface())) } // Lt is the validation function for validating if the current field's value is less than the param's value. func Lt(v reflect.Value, anotherField reflect.Value) bool { if v.Kind() != anotherField.Kind() { panic(fmt.Sprintf("validator: Lt The two fields must be of the same type %T, %T", v.Interface(), anotherField.Interface())) } switch v.Kind() { case reflect.String: return compareString(v.String(), int64(utf8.RuneCountInString(anotherField.String())), "<") case reflect.Slice, reflect.Map, reflect.Array: return compareInt64(int64(v.Len()), int64(anotherField.Len()), "<") case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return compareInt64(v.Int(), anotherField.Int(), "<") case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return compareUint64(v.Uint(), anotherField.Uint(), "<") case reflect.Float32, reflect.Float64: return compareFloat64(v.Float(), anotherField.Float(), "<") } panic(fmt.Sprintf("validator: Lt unsupport Type %T", v.Interface())) } // Lte is the validation function for validating if the current field's value is less than or equal to the param's value. func Lte(v reflect.Value, anotherField reflect.Value) bool { if v.Kind() != anotherField.Kind() { panic(fmt.Sprintf("validator: Lte The two fields must be of the same type %T, %T", v.Interface(), anotherField.Interface())) } switch v.Kind() { case reflect.String: return compareString(v.String(), int64(utf8.RuneCountInString(anotherField.String())), "<=") case reflect.Slice, reflect.Map, reflect.Array: return compareInt64(int64(v.Len()), int64(anotherField.Len()), "<=") case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return compareInt64(v.Int(), anotherField.Int(), "<=") case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return compareUint64(v.Uint(), anotherField.Uint(), "<=") case reflect.Float32, reflect.Float64: return compareFloat64(v.Float(), anotherField.Float(), "<=") } panic(fmt.Sprintf("validator: Lte unsupport Type %T", v.Interface())) } // Gt is the validation function for validating if the current field's value is greater than to the param's value. func Gt(v reflect.Value, anotherField reflect.Value) bool { if v.Kind() != anotherField.Kind() { panic(fmt.Sprintf("validator: Gt The two fields must be of the same type %T, %T", v.Interface(), anotherField.Interface())) } switch v.Kind() { case reflect.String: return compareString(v.String(), int64(utf8.RuneCountInString(anotherField.String())), ">") case reflect.Slice, reflect.Map, reflect.Array: return compareInt64(int64(v.Len()), int64(anotherField.Len()), ">") case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return compareInt64(v.Int(), anotherField.Int(), ">") case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return compareUint64(v.Uint(), anotherField.Uint(), ">") case reflect.Float32, reflect.Float64: return compareFloat64(v.Float(), anotherField.Float(), ">") } panic(fmt.Sprintf("validator: Gt unsupport Type %T", v.Interface())) } // Gte is the validation function for validating if the current field's value is greater than or equal to the param's value. func Gte(v reflect.Value, anotherField reflect.Value) bool { if v.Kind() != anotherField.Kind() { panic(fmt.Sprintf("validator: Gte The two fields must be of the same type %T, %T", v.Interface(), anotherField.Interface())) } switch v.Kind() { case reflect.String: return compareString(v.String(), int64(utf8.RuneCountInString(anotherField.String())), ">=") case reflect.Slice, reflect.Map, reflect.Array: return compareInt64(int64(v.Len()), int64(anotherField.Len()), ">=") case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return compareInt64(v.Int(), anotherField.Int(), ">=") case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return compareUint64(v.Uint(), anotherField.Uint(), ">=") case reflect.Float32, reflect.Float64: return compareFloat64(v.Float(), anotherField.Float(), ">=") } panic(fmt.Sprintf("validator: Gte unsupport Type %T", v.Interface())) } // Distinct is the validation function for validating an attribute is unique among other values. func Distinct(v reflect.Value) bool { switch v.Kind() { case reflect.Slice, reflect.Map, reflect.Array: typ := v.Elem() switch typ.Kind() { case reflect.String: return DistinctString(v.Interface().([]string)) case reflect.Int: return DistinctInt(v.Interface().([]int)) case reflect.Int8: return DistinctInt8(v.Interface().([]int8)) case reflect.Int16: return DistinctInt16(v.Interface().([]int16)) case reflect.Int32: return DistinctInt32(v.Interface().([]int32)) case reflect.Int64: return DistinctInt64(v.Interface().([]int64)) case reflect.Float32: return DistinctFloat32(v.Interface().([]float32)) case reflect.Float64: return DistinctFloat64(v.Interface().([]float64)) case reflect.Uint: return DistinctUint(v.Interface().([]uint)) case reflect.Uint8: return DistinctUint8(v.Interface().([]uint8)) case reflect.Uint16: return DistinctUint16(v.Interface().([]uint16)) case reflect.Uint32: return DistinctUint32(v.Interface().([]uint32)) case reflect.Uint64: return DistinctUint64(v.Interface().([]uint64)) } } panic(fmt.Sprintf("validator: Distinct unsupport Type %T", v.Interface())) } func validateStruct(s interface{}, jsonNamespace []byte, structNamespace []byte) error { if s == nil { return nil } var err error val := reflect.ValueOf(s) if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { val = val.Elem() } // we only accept structs if val.Kind() != reflect.Struct { return fmt.Errorf("function only accepts structs; got %s", val.Kind()) } var errs Errors fields := cachedTypefields(val.Type()) for _, f := range fields { valuefield := val.Field(f.index[0]) err := newTypeValidator(valuefield, &f, val, jsonNamespace, structNamespace) if err != nil { errs = append(errs, err) } } if len(errs) > 0 { err = errs } return err } // ValidateStruct use tags for fields. // result will be equal to `false` if there are any errors. func ValidateStruct(s interface{}) error { newValidator() return validateStruct(s, nil, nil) } func newTypeValidator(v reflect.Value, f *field, o reflect.Value, jsonNamespace []byte, structNamespace []byte) (resultErr error) { if !v.IsValid() || f.omitEmpty && Empty(v) { return nil } name := string(append(jsonNamespace, f.nameBytes...)) structName := string(append(structNamespace, f.structName...)) if err := checkRequired(v, f, o, name, structName); err != nil { return err } for _, tag := range f.validTags { if validatefunc, ok := CustomTypeRuleMap.Get(tag.name); ok { if result := validatefunc(v, o, tag); !result { return &Error{ Name: name, StructName: structName, Err: formatsMessages(tag, v, f, o), Tag: tag.name, } } } } switch v.Kind() { case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.String: for _, tag := range f.validTags { if err := checkDependentRules(tag, f, v, o, name, structName); err != nil { return err } if validfunc, ok := RuleMap[tag.name]; ok { isValid := validfunc(v) if !isValid { return &Error{ Name: name, StructName: structName, Err: formatsMessages(tag, v, f, o), Tag: tag.name, } } } if validfunc, ok := ParamRuleMap[tag.name]; ok { isValid := validfunc(v, tag.params) if !isValid { return &Error{ Name: name, StructName: structName, Err: formatsMessages(tag, v, f, o), Tag: tag.name, } } } switch v.Kind() { case reflect.String: if validfunc, ok := StringRulesMap[tag.name]; ok { isValid := validfunc(v.String()) if !isValid { return &Error{ Name: name, StructName: structName, Err: formatsMessages(tag, v, f, o), Tag: tag.name, } } } } } return nil case reflect.Map: if v.Type().Key().Kind() != reflect.String { return &UnsupportedTypeError{v.Type()} } for _, tag := range f.validTags { if err := checkDependentRules(tag, f, v, o, name, structName); err != nil { return err } if validfunc, ok := ParamRuleMap[tag.name]; ok { isValid := validfunc(v, tag.params) if !isValid { return &Error{ Name: name, StructName: structName, Err: formatsMessages(tag, v, f, o), Tag: tag.name, } } } } var sv stringValues sv = v.MapKeys() sort.Sort(sv) for _, k := range sv { var err error value := v.MapIndex(k) if value.Kind() == reflect.Interface { value = value.Elem() } if value.Kind() == reflect.Struct || value.Kind() == reflect.Ptr { newJSONNamespace := append(append(jsonNamespace, f.nameBytes...), '.') newJSONNamespace = append(append(newJSONNamespace, []byte(k.String())...), '.') newstructNamespace := append(append(structNamespace, f.structNameBytes...), '.') newstructNamespace = append(append(newstructNamespace, []byte(k.String())...), '.') err = validateStruct(value.Interface(), newJSONNamespace, newstructNamespace) if err != nil { return err } } } return nil case reflect.Slice, reflect.Array: for _, tag := range f.validTags { if err := checkDependentRules(tag, f, v, o, name, structName); err != nil { return err } if validfunc, ok := ParamRuleMap[tag.name]; ok { isValid := validfunc(v, tag.params) if !isValid { return &Error{ Name: name, StructName: structName, Err: formatsMessages(tag, v, f, o), Tag: tag.name, } } } } for i := 0; i < v.Len(); i++ { var err error value := v.Index(i) if value.Kind() == reflect.Interface { value = value.Elem() } if value.Kind() == reflect.Struct || value.Kind() == reflect.Ptr { newJSONNamespace := append(append(jsonNamespace, f.nameBytes...), '.') newJSONNamespace = append(append(newJSONNamespace, []byte(strconv.Itoa(i))...), '.') newStructNamespace := append(append(structNamespace, f.structNameBytes...), '.') newStructNamespace = append(append(newStructNamespace, []byte(strconv.Itoa(i))...), '.') err = validateStruct(v.Index(i).Interface(), newJSONNamespace, newStructNamespace) if err != nil { return err } } } return nil case reflect.Interface: // If the value is an interface then encode its element if v.IsNil() { return nil } return validateStruct(v.Interface(), jsonNamespace, structNamespace) case reflect.Ptr: // If the value is a pointer then check its element if v.IsNil() { return nil } jsonNamespace = append(append(jsonNamespace, f.nameBytes...), '.') structNamespace = append(append(structNamespace, f.structNameBytes...), '.') return validateStruct(v.Interface(), jsonNamespace, structNamespace) case reflect.Struct: jsonNamespace = append(append(jsonNamespace, f.nameBytes...), '.') structNamespace = append(append(structNamespace, f.structNameBytes...), '.') return validateStruct(v.Interface(), jsonNamespace, structNamespace) default: return &UnsupportedTypeError{v.Type()} } } // Empty determine whether a variable is empty func Empty(v reflect.Value) bool { switch v.Kind() { case reflect.String, reflect.Array: return v.Len() == 0 case reflect.Map, reflect.Slice: return v.Len() == 0 || v.IsNil() case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) } // Error returns string equivalent for reflect.Type func (e *UnsupportedTypeError) Error() string { return "validator: unsupported type: " + e.Type.String() } func (sv stringValues) Len() int { return len(sv) } func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } func (sv stringValues) get(i int) string { return sv[i].String() } // Required check value required when anotherField str is a member of the set of strings params func Required(v reflect.Value) bool { return !Empty(v) } // RequiredIf check value required when anotherField str is a member of the set of strings params func RequiredIf(v reflect.Value, anotherField reflect.Value, params []string, tag *ValidTag) bool { if anotherField.Kind() == reflect.Interface || anotherField.Kind() == reflect.Ptr { anotherField = anotherField.Elem() } if !anotherField.IsValid() { return true } switch anotherField.Kind() { case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.String: value := ToString(anotherField) if InString(value, params) { if Empty(v) { if tag != nil { if tag.messageParameter == nil { tag.messageParameter = make(messageParameterMap) } tag.messageParameter["value"] = value } return false } } case reflect.Map: values := []string{} var sv stringValues sv = anotherField.MapKeys() sort.Sort(sv) for _, k := range sv { value := v.MapIndex(k) if value.Kind() == reflect.Interface || value.Kind() == reflect.Ptr { value = value.Elem() } if value.Kind() != reflect.Struct { values = append(values, ToString(value.Interface())) } else { panic(fmt.Sprintf("validator: RequiredIf unsupport Type %T", value.Interface())) } } for _, value := range values { if InString(value, params) { if Empty(v) { if tag != nil { if tag.messageParameter == nil { tag.messageParameter = make(messageParameterMap) } tag.messageParameter["value"] = value } return false } } } case reflect.Slice, reflect.Array: values := []string{} for i := 0; i < v.Len(); i++ { value := v.Index(i) if value.Kind() == reflect.Interface || value.Kind() == reflect.Ptr { value = value.Elem() } if value.Kind() != reflect.Struct { values = append(values, ToString(value.Interface())) } else { panic(fmt.Sprintf("validator: RequiredIf unsupport Type %T", value.Interface())) } } for _, value := range values { if InString(value, params) { if Empty(v) { if tag != nil { if tag.messageParameter == nil { tag.messageParameter = make(messageParameterMap) } tag.messageParameter["value"] = value } return false } } } default: panic(fmt.Sprintf("validator: RequiredIf unsupport Type %T", anotherField.Interface())) } return true } // RequiredUnless check value required when anotherField str is a member of the set of strings params func RequiredUnless(v reflect.Value, anotherField reflect.Value, params []string) bool { if anotherField.Kind() == reflect.Interface || anotherField.Kind() == reflect.Ptr { anotherField = anotherField.Elem() } if !anotherField.IsValid() { return true } switch anotherField.Kind() { case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.String: value := ToString(anotherField) if !InString(value, params) { if Empty(v) { return false } } case reflect.Map: values := []string{} var sv stringValues sv = anotherField.MapKeys() sort.Sort(sv) for _, k := range sv { value := v.MapIndex(k) if value.Kind() == reflect.Interface || value.Kind() == reflect.Ptr { value = value.Elem() } if value.Kind() != reflect.Struct { values = append(values, ToString(value.Interface())) } else { panic(fmt.Sprintf("validator: requiredUnless unsupport Type %T", value.Interface())) } } for _, value := range values { if !InString(value, params) { if Empty(v) { return false } } } case reflect.Slice, reflect.Array: values := []string{} for i := 0; i < v.Len(); i++ { value := v.Index(i) if value.Kind() == reflect.Interface || value.Kind() == reflect.Ptr { value = value.Elem() } if value.Kind() != reflect.Struct { values = append(values, ToString(value.Interface())) } else { panic(fmt.Sprintf("validator: requiredUnless unsupport Type %T", value.Interface())) } } for _, value := range values { if !InString(value, params) { if Empty(v) { return false } } } default: panic(fmt.Sprintf("validator: requiredUnless unsupport Type %T", anotherField.Interface())) } return true } // allFailingRequired validate that an attribute exists when all other attributes do not. func allFailingRequired(parameters []string, v reflect.Value) bool { for _, p := range parameters { anotherField, err := findField(p, v) if err != nil { continue } if !Empty(anotherField) { return false } } return true } // anyFailingRequired determine if any of the given attributes fail the required test. func anyFailingRequired(parameters []string, v reflect.Value) bool { for _, p := range parameters { anotherField, err := findField(p, v) if err != nil { return true } if Empty(anotherField) { return true } } return false } func checkRequired(v reflect.Value, f *field, o reflect.Value, name string, structName string) error { for _, tag := range f.requiredTags { isError := false switch tag.name { case "required": isError = !Required(v) case "requiredIf": anotherField, err := findField(tag.params[0], o) if err == nil && len(tag.params) >= 2 && !RequiredIf(v, anotherField, tag.params[1:], tag) { isError = true } case "requiredUnless": anotherField, err := findField(tag.params[0], o) if err == nil && len(tag.params) >= 2 && !RequiredUnless(v, anotherField, tag.params[1:]) { isError = true } case "requiredWith": if !RequiredWith(tag.params, v) { isError = true } case "requiredWithAll": if !RequiredWithAll(tag.params, v) { isError = true } case "requiredWithout": if !RequiredWithout(tag.params, v) { isError = true } case "requiredWithoutAll": if !RequiredWithoutAll(tag.params, v) { isError = true } } if isError { return &Error{ Name: name, StructName: structName, Err: formatsMessages(tag, v, f, o), Tag: tag.name, } } } return nil } // RequiredWith The field under validation must be present and not empty only if any of the other specified fields are present. func RequiredWith(otherFields []string, v reflect.Value) bool { if !allFailingRequired(otherFields, v) { return Required(v) } return true } // RequiredWithAll The field under validation must be present and not empty only if all of the other specified fields are present. func RequiredWithAll(otherFields []string, v reflect.Value) bool { if !anyFailingRequired(otherFields, v) { return Required(v) } return true } // RequiredWithout The field under validation must be present and not empty only when any of the other specified fields are not present. func RequiredWithout(otherFields []string, v reflect.Value) bool { if anyFailingRequired(otherFields, v) { return Required(v) } return true } // RequiredWithoutAll The field under validation must be present and not empty only when all of the other specified fields are not present. func RequiredWithoutAll(otherFields []string, v reflect.Value) bool { if allFailingRequired(otherFields, v) { return Required(v) } return true } func formatsMessages(validTag *ValidTag, v reflect.Value, f *field, o reflect.Value) error { validator := newValidator() var message string var ok bool if message, ok = validator.CustomMessage[f.structName+"."+validTag.messageName]; ok { return fmt.Errorf(message) } if validator.Translator != nil { message = validator.Translator.Trans(f.structName, validTag.messageName, f.attribute) message = replaceAttributes(message, "", validTag.messageParameter) } else { message, ok = MessageMap[validTag.messageName] if ok { attribute := f.attribute if customAttribute, ok := validator.Attributes[f.structName]; ok { attribute = customAttribute } message = replaceAttributes(message, attribute, validTag.messageParameter) } } if message != "" { if shouldReplaceRequiredWith(validTag.name) { message = replaceRequiredWith(message, validTag.params, validator) } if shouldReplaceRequiredIf(validTag.name) { message = replaceRequiredIf(message, o, validTag.params[0], validator) } if validTag.name == "same" { message = replaceSame(message, o, validTag.params[0], validator) } return fmt.Errorf(message) } return fmt.Errorf("validator: undefined message : %s", validTag.messageName) } func replaceAttributes(message string, attribute string, messageParameter messageParameterMap) string { message = strings.Replace(message, ":attribute", attribute, -1) for key, value := range messageParameter { message = strings.Replace(message, ":"+key, value, -1) } return message } func replaceRequiredWith(message string, attributes []string, validator *Validator) string { first := true var buff bytes.Buffer for _, v := range attributes { if first { first = false } else { buff.WriteByte(' ') buff.WriteByte('/') buff.WriteByte(' ') } if validator.Translator != nil { if customAttribute, ok := validator.Translator.attributes[validator.Translator.locale][v]; ok { buff.WriteString(customAttribute) continue } } if customAttribute, ok := validator.Attributes[v]; ok { buff.WriteString(customAttribute) continue } buff.WriteString(v) } return strings.Replace(message, ":values", buff.String(), -1) } func shouldReplaceRequiredWith(tag string) bool { switch tag { case "requiredWith", "requiredWithAll", "requiredWithout", "requiredWithoutAll": return true default: return false } } func getDisplayableAttribute(o reflect.Value, attribute string, validator *Validator) string { attributes := strings.Split(attribute, ".") if len(attributes) > 0 { attribute = o.Type().Name() + attributes[0] } else { attribute = strings.Join(attributes[len(attributes)-2:], ".") } if validator.Translator != nil { if customAttribute, ok := validator.Translator.attributes[validator.Translator.locale][attribute]; ok { return customAttribute } } if customAttribute, ok := validator.Attributes[attribute]; ok { return customAttribute } return attributes[len(attributes)-1] } func replaceSame(message string, o reflect.Value, attribute string, validator *Validator) string { other := getDisplayableAttribute(o, attribute, validator) return strings.Replace(message, ":other", other, -1) } func replaceRequiredIf(message string, o reflect.Value, attribute string, validator *Validator) string { other := getDisplayableAttribute(o, attribute, validator) return strings.Replace(message, ":other", other, -1) } func shouldReplaceRequiredIf(tag string) bool { switch tag { case "requiredIf", "requiredUnless": return true default: return false } } func findField(fieldName string, v reflect.Value) (reflect.Value, error) { fields := strings.Split(fieldName, ".") current := v.FieldByName(fields[0]) i := 1 if len(fields) > i { for true { if current.Kind() == reflect.Interface || current.Kind() == reflect.Ptr { current = current.Elem() } if !current.IsValid() { return current, fmt.Errorf("validator: findField Struct is nil") } name := fields[i] current = current.FieldByName(name) if i == len(fields)-1 { break } i++ } } return current, nil } func checkDependentRules(validTag *ValidTag, f *field, v reflect.Value, o reflect.Value, name string, structName string) error { isValid := true var anotherField reflect.Value var err error switch validTag.name { case "gt", "gte", "lt", "lte", "same": anotherField, err = findField(validTag.params[0], o) if err != nil { return nil } } switch validTag.name { case "gt": isValid = Gt(v, anotherField) case "gte": isValid = Gte(v, anotherField) case "lt": isValid = Lt(v, anotherField) case "lte": isValid = Lte(v, anotherField) case "same": isValid = Same(v, anotherField) } if !isValid { return &Error{ Name: name, StructName: structName, Err: formatsMessages(validTag, v, f, o), Tag: validTag.name, } } return nil }
validator.go
0.728265
0.476458
validator.go
starcoder
package clustering import ( "errors" "math" "reflect" "log" "fmt" ) type Coordinate interface { GetValue()interface{} SetValue(interface{})(error) PoweredDistanceTo(Coordinate,float64)(float64) GetAbsoluteDistanceTo(Coordinate)(float64) AddValue(interface{})() NormalizeValue(float64)() GetZeroValue()(interface{}) } func NewCoordinate(value interface{})(c Coordinate){ switch v:=value.(type){ case float64: c = NewFloat(v) case int: c = NewFloat(float64(v)) } return } type floatCoordinate float64 func NewFloat(value float64)(coordinate *floatCoordinate){ coordinate = new(floatCoordinate) *coordinate = floatCoordinate(value) return } func (c *floatCoordinate) GetValue()(interface{}){ return float64(*c) } func (c *floatCoordinate) SetValue(value interface{})(err error){ if value == nil { *c = floatCoordinate(0) } else if v,ok := value.(float64); ok { *c = floatCoordinate(v) } else if v,ok := value.(floatCoordinate); ok { *c = v } else if v,ok := value.(*floatCoordinate); ok { *c = *v } else { err = errors.New("Wrong type used; Value must be a float64 or nil.") } return } func (c *floatCoordinate) PoweredDistanceTo(other Coordinate, exponend float64)(distance float64) { if c.ValueOfEqualType(other) { // we can calculate the dinstace between two points of equal value type if other == nil { distance = math.MaxFloat64 } else { distance = math.Pow(float64(*c) - other.GetValue().(float64),exponend) } } else { log.Fatal("Coordinates of different type cannot be distance compared...") } return } func (c *floatCoordinate) GetAbsoluteDistanceTo(other Coordinate)(distance float64){ if c.ValueOfEqualType(other) { // we can calculate the dinstace between two points of equal value type if other == nil { distance = math.MaxFloat64 } else { distance = math.Abs(float64(*c) - other.GetValue().(float64)) } } else { log.Fatal("Coordinates of different type cannot be distance compared...") } return } func (c *floatCoordinate) AddValue(value interface{})() { if v, ok := value.(float64); ok { *c += floatCoordinate(v) } } func (c *floatCoordinate) NormalizeValue(denominator float64)() { if denominator != float64(0) { *c /= floatCoordinate(denominator) } } func (c *floatCoordinate) GetZeroValue()(interface{}){return float64(0)} func (c *floatCoordinate) ValueOfEqualType(other Coordinate)(equality bool){ if other != nil { if reflect.TypeOf(c.GetValue()) == reflect.TypeOf(other.GetValue()) { // types of value fields are equal equality = true } else { switch other.GetValue().(type) { case int: equality = true case uint: equality = true default: equality = false } } } return } func (c *floatCoordinate) String()(string){ return fmt.Sprintf("%.2f",*c) }
auxiliary/clustering/coordinate.go
0.549157
0.407746
coordinate.go
starcoder
package iso20022 // Chain of parties involved in the settlement of a transaction, including receipts and deliveries, book transfers, treasury deals, or other activities, resulting in the movement of a security or amount of money from one account to another. type SettlementParties2 struct { // First party in the settlement chain. In a plain vanilla settlement, it is the Central Securities Depository where the counterparty requests to receive the financial instrument or from where the counterparty delivers the financial instruments. Depository *PartyIdentification36 `xml:"Dpstry,omitempty"` // Party that, in a settlement chain interacts with the depository. Party1 *PartyIdentificationAndAccount16 `xml:"Pty1,omitempty"` // Party that, in a settlement chain interacts with the party 1. Party2 *PartyIdentificationAndAccount16 `xml:"Pty2,omitempty"` // Party that, in a settlement chain interacts with the party 2. Party3 *PartyIdentificationAndAccount16 `xml:"Pty3,omitempty"` // Party that, in a settlement chain interacts with the party 3. Party4 *PartyIdentificationAndAccount16 `xml:"Pty4,omitempty"` // Party that, in a settlement chain interacts with the party 4. Party5 *PartyIdentificationAndAccount16 `xml:"Pty5,omitempty"` } func (s *SettlementParties2) AddDepository() *PartyIdentification36 { s.Depository = new(PartyIdentification36) return s.Depository } func (s *SettlementParties2) AddParty1() *PartyIdentificationAndAccount16 { s.Party1 = new(PartyIdentificationAndAccount16) return s.Party1 } func (s *SettlementParties2) AddParty2() *PartyIdentificationAndAccount16 { s.Party2 = new(PartyIdentificationAndAccount16) return s.Party2 } func (s *SettlementParties2) AddParty3() *PartyIdentificationAndAccount16 { s.Party3 = new(PartyIdentificationAndAccount16) return s.Party3 } func (s *SettlementParties2) AddParty4() *PartyIdentificationAndAccount16 { s.Party4 = new(PartyIdentificationAndAccount16) return s.Party4 } func (s *SettlementParties2) AddParty5() *PartyIdentificationAndAccount16 { s.Party5 = new(PartyIdentificationAndAccount16) return s.Party5 }
SettlementParties2.go
0.679604
0.498047
SettlementParties2.go
starcoder
package generalized_suffix_tree import "strings" type Tree struct { Root *Node } type Node struct { Start int Children map[string]*Node } const Terminators = "0123456789" // Can be any characters in any order. // Construction func NewGST(a ...string) *Tree { root := &Node{ -1, map[string]*Node{}, } for i, s := range a { terminator := Terminators[i : i+1] insertString(root, s+terminator) } return &Tree{root} } func insertString(n *Node, s string) { for i := range s { insertSuffix(n, s[i:], i) } } func insertSuffix(n *Node, suffix string, start int) { if len(suffix) == 0 { return } for edge, child := range n.Children { p := commonPrefixLen(edge, suffix) if p == 0 { continue } if p == len(edge) { // The suffix contains the entire edge. // Insert the trimmed suffix one level down. insertSuffix(child, suffix[p:], start) return } // There is a partial match between the edge and the suffix. // Split the edge in two. mid := &Node{ -1, map[string]*Node{}, } mid.Children[edge[p:]] = child delete(n.Children, edge) n.Children[edge[:p]] = mid // Insert the trimmed suffix one level down. insertSuffix(mid, suffix[p:], start) return } newNode := &Node{ start, map[string]*Node{}, } n.Children[suffix] = newNode } func commonPrefixLen(a, b string) int { i := 0 for i < len(a) && i < len(b) && a[i] == b[i] { i++ } return i } // Operations type Callback func(string, map[int][]int) func (n *Node) DepthFirstSearch(prefix string, f Callback) map[int][]int { starts := map[int][]int{} if len(n.Children) == 0 { // Leaf node. terminator := prefix[len(prefix)-1:] index := strings.Index(Terminators, terminator) starts[index] = []int{n.Start} } else { // Non-leaf node. for edge, child := range n.Children { childStarts := child.DepthFirstSearch(prefix+edge, f) merge(starts, childStarts) } } if f != nil { f(prefix, starts) } return starts } func merge(dst, src map[int][]int) { for k, v := range src { dst[k] = append(dst[k], v...) } } // Debugging // func (t *Tree) Log() { // log(t.Root, 0) // } // func log(n *Node, indent int) { // fmt.Printf("%s(%d)\n", strings.Repeat(" ", indent), n.Start) // for edge, child := range n.Children { // fmt.Printf("%s%s\n", strings.Repeat(" ", indent+2), edge) // log(child, indent+4) // } // }
generalized_suffix_tree/go/generalized_suffix_tree.go
0.501465
0.424293
generalized_suffix_tree.go
starcoder
package server import ( "fmt" "sync" ) type neighborManager struct { neighbors []*neighbor neighborsMu sync.Mutex } func newNeighborManager() *neighborManager { return &neighborManager{ neighbors: make([]*neighbor, 0), } } func (nm *neighborManager) addNeighbor(n *neighbor) error { nm.neighborsMu.Lock() defer nm.neighborsMu.Unlock() for i := range nm.neighbors { if nm.neighbors[i].vrfID == n.vrfID && nm.neighbors[i].peerAddress == n.peerAddress { return fmt.Errorf("Unable to add neighbor %s on VRF %d: exists", n.peerAddress, n.vrfID) } } nm.neighbors = append(nm.neighbors, n) return nil } func (nm *neighborManager) getNeighbor(vrfID uint64, addr [16]byte) *neighbor { nm.neighborsMu.Lock() defer nm.neighborsMu.Unlock() for i := range nm.neighbors { if nm.neighbors[i].vrfID == vrfID && nm.neighbors[i].peerAddress == addr { return nm.neighbors[i] } } return nil } func (nm *neighborManager) neighborDown(vrfID uint64, addr [16]byte) error { nm.neighborsMu.Lock() defer nm.neighborsMu.Unlock() return nm._neighborDown(vrfID, addr) } func (nm *neighborManager) _neighborDown(vrfID uint64, addr [16]byte) error { for i := range nm.neighbors { if nm.neighbors[i].vrfID != vrfID || nm.neighbors[i].peerAddress != addr { continue } if nm.neighbors[i].fsm.ipv4Unicast != nil { nm.neighbors[i].fsm.ipv4Unicast.bmpDispose() } if nm.neighbors[i].fsm.ipv6Unicast != nil { nm.neighbors[i].fsm.ipv6Unicast.bmpDispose() } nm.neighbors = append(nm.neighbors[:i], nm.neighbors[i+1:]...) return nil } return fmt.Errorf("Neighbor %d/%v not found", vrfID, addr) } func (nm *neighborManager) disposeAll() { nm.neighborsMu.Lock() defer nm.neighborsMu.Unlock() for len(nm.neighbors) > 0 { nm._neighborDown(nm.neighbors[0].vrfID, nm.neighbors[0].peerAddress) } } func (nm *neighborManager) list() []*neighbor { nm.neighborsMu.Lock() defer nm.neighborsMu.Unlock() ret := make([]*neighbor, len(nm.neighbors)) for i := range nm.neighbors { ret[i] = nm.neighbors[i] } return ret }
protocols/bgp/server/bmp_neighbor_manager.go
0.585575
0.437223
bmp_neighbor_manager.go
starcoder
package main const ( helpConnect = `Connects to a beanstalk server. With no arguments, will try to connect to the 127.0.0.1:11300. Can also provide host and port arguments. To connect to a beanstalk server on <HOST> using port 11300: connect <HOST> To connect to a beanstalk server on <HOST> using port <PORT>: connect <HOST> <PORT> Will error if a connection cannot be established` helpDelete = `Deletes a job with the specified id: delete <ID> This command is available via the 'del' and 'dj' aliases.` helpDeleteAll = `Deletes all %s jobs on the current tube. Can also delete jobs not on the current tube by passing a tube argument: delete-%s <TUBE> This command is available via the 'd%c' alias` helpDisconnect = `Disconnects from the currently connected beanstalk server` helpInfo = `Provides information, including hostname and port, about the current connection` helpKick = `Kicks all jobs from the current tube. Alternatively the number of jobs can be specified as an argument: kick <NUM_JOBS>` helpListTubes = `List the tubes for the connected beanstalk server. Outputs a table of results, display tube, and details of the number of ready, delayed and buried jobs. This command is available via the 'lt' and 'list' aliases` helpPeek = `Looks at the job at the front of the %s queue. A tube argument can also be provided, otherwise uses the current active tube: peek-%s <TUBE> This command is available via the 'p%c' alias` helpPut = `Opens an editor and allows data to be put onto the current tube. Alternatively a tube can be provided: put <TUBE> Will first attempt to open an editor defined with the $EDITOR environment variable, otherwise defaults to vi.` helpStats = `Displays statistics for the connected beanstalk server` helpStatsJob = `Displays statistics for the specified job: stats-job <JOB> This command is available via the 'sj' alias` helpStatsTube = `Displays stats for the current tube. Alternatively a tube argument can be provided: stats-tube <TUBE> This command is available via the 'st' alias` helpUse = `Change the current tube in use: use <TUBE> This command is available via the 'ut' alias` helpVersion = `Displays beany version information` )
help.go
0.704364
0.40869
help.go
starcoder
package mu import ( "math" ) // ODEs func RungeKutta(dy func(float64, float64) float64, tf float64, t float64, y float64, h float64) float64 { if tf <= t { return y } rk := []float64{h*dy(t, y)} for i := 1 ; i < 3 ; i++ { rk = append(rk, h*dy(t + h/2, y + rk[i-1]/2)) } rk = append(rk, h*dy(t + h, y + rk[len(rk)-1])) y += (rk[0] + 2*rk[1] + 2*rk[2] + rk[3])/6 t += h return RungeKutta(dy, tf, t, y, h) } func Heun(dy func(float64, float64) float64, tf float64, t float64, y float64, h float64) float64 { if tf <= t { return y } y += h/2*(dy(t, y) + dy(t + h, y + h*dy(t, y))) t += h return Heun(dy, tf, t, y, h) } func Midpoint(dy func(float64, float64) float64, tf float64, t float64, y float64, h float64) float64 { if tf <= t { return y } y += h*dy(t + h/2, y + h/2*dy(t, y)) t += h return Midpoint(dy, tf, t, y, h) } func Euler(dy func(float64, float64) float64, tf float64, t float64, y float64, h float64) float64 { // should be used for comparison/education/curiosity only for t < tf { t += h y += h*dy(t, y) } return y } // Integrals func Trapezoid(f func(float64) float64, a float64, b float64, n int) float64 { h := (b - a)/float64(n) var sum float64 = 0 x := []float64{} for i := 0 ; i < n + 1 ; i++ { x = append(x, a + h*float64(i)) } for i := 1 ; i < n ; i++ { sum += f(x[i]) } return h*(sum + (f(a) + f(b))/2) } func Simpson(f func(float64) float64, a float64, b float64, n int) float64 { h := (b - a)/float64(2*n) x := []float64{} sum := []float64{0, 0} for i := 0 ; i < 2*n ; i++ { x = append(x, a + h*float64(i)) } for i := 0 ; i < n ; i++ { sum[0] += f(x[2*i]) } for i := 1 ; i < n ; i++ { sum[1] += f(x[2*i+1]) } return h/3*(f(a) + f(b) + 4*sum[0] + 2*sum[1]) } // First Order Differentiation func ForwardDifff(f func(float64) float64, x float64, h float64) float64 { return (f(x + h) - f(x))/h } func CenteredDiff(f func(float64) float64, x float64, h float64) float64 { return (f(x + h) - f(x - h))/2*h } // Higher Order Differentiation func NewtonDiff(f func(float64) float64, x float64, h float64, n uint64) float64 { if n == 0 { return f(x) } sum := 0.0 var k uint64 = 0 for k <= n { sum += math.Pow(-1, float64(k) + float64(n))*Binomial(n, k)*f(x + float64(k)*h) k++ } return sum }
analysis.go
0.781289
0.582164
analysis.go
starcoder
package compute import ( "log" "math" "sort" "sync" "time" "github.com/gbl08ma/sqalx" "github.com/underlx/disturbancesmlx/types" ) var rootSqalxNode sqalx.Node var mainLog *log.Logger // Initialize initializes the package func Initialize(snode sqalx.Node, log *log.Logger) { rootSqalxNode = snode mainLog = log } // TripsScatterplotNumTripsVsAvgSpeedPoint represents a datapoint for the TripsScatterplotNumTripsVsAvgSpeed scatterplot type TripsScatterplotNumTripsVsAvgSpeedPoint struct { DayOfWeek time.Weekday Hour int NumUsers int AverageSpeed float64 } // TripsScatterplotNumTripsVsAvgSpeed returns data for a scatterplot showing possible relations between the number of users in the network and the average trip speed func TripsScatterplotNumTripsVsAvgSpeed(node sqalx.Node, fromTime time.Time, toTime time.Time, threads int) ([]TripsScatterplotNumTripsVsAvgSpeedPoint, error) { tx, err := node.Beginx() if err != nil { return []TripsScatterplotNumTripsVsAvgSpeedPoint{}, err } defer tx.Commit() // read-only tx tripIDs, err := types.GetTripIDsBetween(tx, fromTime, toTime) if err != nil { return []TripsScatterplotNumTripsVsAvgSpeedPoint{}, err } if len(tripIDs) == 0 { return []TripsScatterplotNumTripsVsAvgSpeedPoint{}, nil } points := []TripsScatterplotNumTripsVsAvgSpeedPoint{} processTrips := func(tripIDsPart []string) ([]TripsScatterplotNumTripsVsAvgSpeedPoint, error) { tx, err := rootSqalxNode.Beginx() if err != nil { return []TripsScatterplotNumTripsVsAvgSpeedPoint{}, err } defer tx.Commit() // read-only tx // instantiate each trip from DB individually // (instead of using types.GetTrips) // to reduce memory usage thisPoints := []TripsScatterplotNumTripsVsAvgSpeedPoint{} for _, tripID := range tripIDsPart { trip, err := types.GetTrip(tx, tripID) if err != nil { return []TripsScatterplotNumTripsVsAvgSpeedPoint{}, err } if trip.EndTime.Sub(trip.StartTime) > 2*time.Hour { continue } point := TripsScatterplotNumTripsVsAvgSpeedPoint{ DayOfWeek: trip.StartTime.Weekday(), Hour: trip.StartTime.Hour(), } simTrips, err := trip.SimultaneousTripIDs(tx, 2*time.Hour) if err != nil { return []TripsScatterplotNumTripsVsAvgSpeedPoint{}, err } point.NumUsers = len(simTrips) if err != nil { return []TripsScatterplotNumTripsVsAvgSpeedPoint{}, err } // round to multiples of 5 so there aren't too many "buckets" on the axis point.NumUsers = int(math.Round(float64(point.NumUsers)/5) * 5) point.AverageSpeed, _, _, err = trip.AverageSpeed(tx) if err != nil || point.AverageSpeed > 50 || point.AverageSpeed < 10 || math.IsNaN(point.AverageSpeed) { continue } // round to units so there aren't too many "buckets" on the axis point.AverageSpeed = math.Round(point.AverageSpeed) thisPoints = append(thisPoints, point) } return thisPoints, nil } var wg sync.WaitGroup var appendMutex sync.Mutex launched := 0 for index := 0; index < len(tripIDs); index += 1000 { wg.Add(1) go func(start, end int) { if end > len(tripIDs) { end = len(tripIDs) } p, err := processTrips(tripIDs[start:end]) if err != nil { mainLog.Fatalln(err) } appendMutex.Lock() points = append(points, p...) appendMutex.Unlock() mainLog.Println("TripsScatterplotNumTripsVsAvgSpeed processed", start, "to", end-1) wg.Done() }(index, index+1000) launched++ if launched >= threads { wg.Wait() launched = 0 } } wg.Wait() sort.Slice(points, func(i, j int) bool { if points[i].DayOfWeek == points[j].DayOfWeek { if points[i].Hour == points[j].Hour { return points[i].NumUsers < points[j].NumUsers } return points[i].Hour < points[j].Hour } return points[i].DayOfWeek < points[j].DayOfWeek }) return points, nil }
compute/compute.go
0.572125
0.406214
compute.go
starcoder
// Package planner contains a query planner for Rego queries. package planner import ( "fmt" "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/ir" ) type planiter func() error type binaryiter func(ir.Local, ir.Local) error // Planner implements a query planner for Rego queries. type Planner struct { strings []ir.StringConst blocks []ir.Block curr *ir.Block vars map[ast.Var]ir.Local queries []ast.Body ltarget ir.Local lcurr ir.Local } // New returns a new Planner object. func New() *Planner { return &Planner{ lcurr: ir.Input + 1, vars: map[ast.Var]ir.Local{ ast.InputRootDocument.Value.(ast.Var): ir.Input, }, } } // WithQueries sets the query set to generate a plan for. func (p *Planner) WithQueries(queries []ast.Body) *Planner { p.queries = queries return p } // Plan returns a IR plan for the policy query. func (p *Planner) Plan() (*ir.Policy, error) { for _, q := range p.queries { p.curr = &ir.Block{} defined := false if err := p.planQuery(q, 0, func() error { p.appendStmt(ir.ReturnStmt{ Code: ir.Defined, }) defined = true return nil }); err != nil { return nil, err } if defined { p.blocks = append(p.blocks, *p.curr) } } p.blocks = append(p.blocks, ir.Block{ Stmts: []ir.Stmt{ ir.ReturnStmt{ Code: ir.Undefined, }, }, }) policy := ir.Policy{ Static: ir.Static{ Strings: p.strings, }, Plan: ir.Plan{ Blocks: p.blocks, }, } return &policy, nil } func (p *Planner) planQuery(q ast.Body, index int, iter planiter) error { if index >= len(q) { return iter() } return p.planExpr(q[index], func() error { return p.planQuery(q, index+1, iter) }) } // TODO(tsandall): improve errors to include location information. func (p *Planner) planExpr(e *ast.Expr, iter planiter) error { if e.Negated { return p.planNot(e, iter) } if len(e.With) > 0 { return fmt.Errorf("with keyword not implemented") } if e.IsCall() { return p.planExprCall(e, iter) } return p.planExprTerm(e, iter) } func (p *Planner) planNot(e *ast.Expr, iter planiter) error { cond := p.newLocal() p.appendStmt(ir.MakeBooleanStmt{ Value: true, Target: cond, }) not := ir.NotStmt{ Cond: cond, } prev := p.curr p.curr = &not.Block if err := p.planExpr(e.Complement(), func() error { p.appendStmt(ir.AssignBooleanStmt{ Value: false, Target: cond, }) return nil }); err != nil { return err } p.curr = prev p.appendStmt(not) truth := p.newLocal() p.appendStmt(ir.MakeBooleanStmt{ Value: true, Target: truth, }) p.appendStmt(ir.EqualStmt{ A: cond, B: truth, }) return iter() } func (p *Planner) planExprTerm(e *ast.Expr, iter planiter) error { return p.planTerm(e.Terms.(*ast.Term), func() error { falsy := p.newLocal() p.appendStmt(ir.MakeBooleanStmt{ Value: false, Target: falsy, }) p.appendStmt(ir.NotEqualStmt{ A: p.ltarget, B: falsy, }) return iter() }) } func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error { switch e.Operator().String() { case ast.Equality.Name: return p.planUnify(e.Operand(0), e.Operand(1), iter) case ast.Equal.Name: return p.planBinaryExpr(e, func(a, b ir.Local) error { p.appendStmt(ir.EqualStmt{ A: a, B: b, }) return iter() }) case ast.LessThan.Name: return p.planBinaryExpr(e, func(a, b ir.Local) error { p.appendStmt(ir.LessThanStmt{ A: a, B: b, }) return iter() }) case ast.LessThanEq.Name: return p.planBinaryExpr(e, func(a, b ir.Local) error { p.appendStmt(ir.LessThanEqualStmt{ A: a, B: b, }) return iter() }) case ast.GreaterThan.Name: return p.planBinaryExpr(e, func(a, b ir.Local) error { p.appendStmt(ir.GreaterThanStmt{ A: a, B: b, }) return iter() }) case ast.GreaterThanEq.Name: return p.planBinaryExpr(e, func(a, b ir.Local) error { p.appendStmt(ir.GreaterThanEqualStmt{ A: a, B: b, }) return iter() }) case ast.NotEqual.Name: return p.planBinaryExpr(e, func(a, b ir.Local) error { p.appendStmt(ir.NotEqualStmt{ A: a, B: b, }) return iter() }) default: return fmt.Errorf("%v operator not implemented", e.Operator()) } } func (p *Planner) planUnify(a, b *ast.Term, iter planiter) error { switch va := a.Value.(type) { case ast.Null, ast.Boolean, ast.Number, ast.String, ast.Ref: return p.planTerm(a, func() error { return p.planUnifyLocal(p.ltarget, b, iter) }) case ast.Var: return p.planUnifyVar(va, b, iter) case ast.Array: switch vb := b.Value.(type) { case ast.Var: return p.planUnifyVar(vb, a, iter) case ast.Ref: return p.planTerm(b, func() error { return p.planUnifyLocalArray(p.ltarget, va, iter) }) case ast.Array: if len(va) == len(vb) { return p.planUnifyArraysRec(va, vb, 0, iter) } return nil } case ast.Object: switch vb := b.Value.(type) { case ast.Var: return p.planUnifyVar(vb, a, iter) case ast.Ref: return p.planTerm(b, func() error { return p.planUnifyLocalObject(p.ltarget, va, iter) }) case ast.Object: if va.Len() == vb.Len() { return p.planUnifyObjectsRec(va, vb, va.Keys(), 0, iter) } return nil } } return fmt.Errorf("not implemented: unify(%v, %v)", a, b) } func (p *Planner) planUnifyVar(a ast.Var, b *ast.Term, iter planiter) error { if la, ok := p.vars[a]; ok { return p.planUnifyLocal(la, b, iter) } return p.planTerm(b, func() error { target := p.newLocal() p.vars[a] = target p.appendStmt(ir.AssignVarStmt{ Source: p.ltarget, Target: target, }) return iter() }) } func (p *Planner) planUnifyLocal(a ir.Local, b *ast.Term, iter planiter) error { switch vb := b.Value.(type) { case ast.Null, ast.Boolean, ast.Number, ast.String, ast.Ref: return p.planTerm(b, func() error { p.appendStmt(ir.EqualStmt{ A: a, B: p.ltarget, }) return iter() }) case ast.Var: if lv, ok := p.vars[vb]; ok { p.appendStmt(ir.EqualStmt{ A: a, B: lv, }) return iter() } lv := p.newLocal() p.vars[vb] = lv p.appendStmt(ir.AssignVarStmt{ Source: a, Target: lv, }) return iter() case ast.Array: return p.planUnifyLocalArray(a, vb, iter) case ast.Object: return p.planUnifyLocalObject(a, vb, iter) } return fmt.Errorf("not implemented: unifyLocal(%v, %v)", a, b) } func (p *Planner) planUnifyLocalArray(a ir.Local, b ast.Array, iter planiter) error { p.appendStmt(ir.IsArrayStmt{ Source: a, }) blen := p.newLocal() alen := p.newLocal() p.appendStmt(ir.LenStmt{ Source: a, Target: alen, }) p.appendStmt(ir.MakeNumberIntStmt{ Value: int64(len(b)), Target: blen, }) p.appendStmt(ir.EqualStmt{ A: alen, B: blen, }) lkey := p.newLocal() p.appendStmt(ir.MakeNumberIntStmt{ Target: lkey, }) lval := p.newLocal() return p.planUnifyLocalArrayRec(a, 0, b, lkey, lval, iter) } func (p *Planner) planUnifyLocalArrayRec(a ir.Local, index int, b ast.Array, lkey, lval ir.Local, iter planiter) error { if len(b) == index { return iter() } p.appendStmt(ir.AssignIntStmt{ Value: int64(index), Target: lkey, }) p.appendStmt(ir.DotStmt{ Source: a, Key: lkey, Target: lval, }) return p.planUnifyLocal(lval, b[index], func() error { return p.planUnifyLocalArrayRec(a, index+1, b, lkey, lval, iter) }) } func (p *Planner) planUnifyLocalObject(a ir.Local, b ast.Object, iter planiter) error { p.appendStmt(ir.IsObjectStmt{ Source: a, }) blen := p.newLocal() alen := p.newLocal() p.appendStmt(ir.LenStmt{ Source: a, Target: alen, }) p.appendStmt(ir.MakeNumberIntStmt{ Value: int64(b.Len()), Target: blen, }) p.appendStmt(ir.EqualStmt{ A: alen, B: blen, }) lkey := p.newLocal() lval := p.newLocal() bkeys := b.Keys() return p.planUnifyLocalObjectRec(a, 0, bkeys, b, lkey, lval, iter) } func (p *Planner) planUnifyLocalObjectRec(a ir.Local, index int, keys []*ast.Term, b ast.Object, lkey, lval ir.Local, iter planiter) error { if index == len(keys) { return iter() } return p.planTerm(keys[index], func() error { p.appendStmt(ir.AssignVarStmt{ Source: p.ltarget, Target: lkey, }) p.appendStmt(ir.DotStmt{ Source: a, Key: lkey, Target: lval, }) return p.planUnifyLocal(lval, b.Get(keys[index]), func() error { return p.planUnifyLocalObjectRec(a, index+1, keys, b, lkey, lval, iter) }) }) } func (p *Planner) planUnifyArraysRec(a, b ast.Array, index int, iter planiter) error { if index == len(a) { return iter() } return p.planUnify(a[index], b[index], func() error { return p.planUnifyArraysRec(a, b, index+1, iter) }) } func (p *Planner) planUnifyObjectsRec(a, b ast.Object, keys []*ast.Term, index int, iter planiter) error { if index == len(keys) { return iter() } aval := a.Get(keys[index]) bval := b.Get(keys[index]) if aval == nil || bval == nil { return nil } return p.planUnify(aval, bval, func() error { return p.planUnifyObjectsRec(a, b, keys, index+1, iter) }) } func (p *Planner) planBinaryExpr(e *ast.Expr, iter binaryiter) error { return p.planTerm(e.Operand(0), func() error { a := p.ltarget return p.planTerm(e.Operand(1), func() error { b := p.ltarget return iter(a, b) }) }) } func (p *Planner) planTerm(t *ast.Term, iter planiter) error { switch v := t.Value.(type) { case ast.Null: return p.planNull(v, iter) case ast.Boolean: return p.planBoolean(v, iter) case ast.Number: return p.planNumber(v, iter) case ast.String: return p.planString(v, iter) case ast.Var: return p.planVar(v, iter) case ast.Ref: return p.planRef(v, iter) case ast.Array: return p.planArray(v, iter) case ast.Object: return p.planObject(v, iter) default: return fmt.Errorf("%v term not implemented", ast.TypeName(v)) } } func (p *Planner) planNull(null ast.Null, iter planiter) error { target := p.newLocal() p.appendStmt(ir.MakeNullStmt{ Target: target, }) p.ltarget = target return iter() } func (p *Planner) planBoolean(b ast.Boolean, iter planiter) error { target := p.newLocal() p.appendStmt(ir.MakeBooleanStmt{ Value: bool(b), Target: target, }) p.ltarget = target return iter() } func (p *Planner) planNumber(num ast.Number, iter planiter) error { i, ok := num.Int() if !ok { return fmt.Errorf("float values not implemented") } i64 := int64(i) target := p.newLocal() p.appendStmt(ir.MakeNumberIntStmt{ Value: i64, Target: target, }) p.ltarget = target return iter() } func (p *Planner) planString(str ast.String, iter planiter) error { index := p.appendStringConst(string(str)) target := p.newLocal() p.appendStmt(ir.MakeStringStmt{ Index: index, Target: target, }) p.ltarget = target return iter() } func (p *Planner) planVar(v ast.Var, iter planiter) error { if _, ok := p.vars[v]; !ok { p.vars[v] = p.newLocal() } p.ltarget = p.vars[v] return iter() } func (p *Planner) planArray(arr ast.Array, iter planiter) error { larr := p.newLocal() p.appendStmt(ir.MakeArrayStmt{ Capacity: int32(len(arr)), Target: larr, }) return p.planArrayRec(arr, 0, larr, iter) } func (p *Planner) planArrayRec(arr ast.Array, index int, larr ir.Local, iter planiter) error { if index == len(arr) { return iter() } return p.planTerm(arr[index], func() error { p.appendStmt(ir.ArrayAppendStmt{ Value: p.ltarget, Array: larr, }) return p.planArrayRec(arr, index+1, larr, iter) }) } func (p *Planner) planObject(obj ast.Object, iter planiter) error { lobj := p.newLocal() p.appendStmt(ir.MakeObjectStmt{ Target: lobj, }) return p.planObjectRec(obj, 0, obj.Keys(), lobj, iter) } func (p *Planner) planObjectRec(obj ast.Object, index int, keys []*ast.Term, lobj ir.Local, iter planiter) error { if index == len(keys) { return iter() } return p.planTerm(keys[index], func() error { lkey := p.ltarget return p.planTerm(obj.Get(keys[index]), func() error { lval := p.ltarget p.appendStmt(ir.ObjectInsertStmt{ Key: lkey, Value: lval, Object: lobj, }) return p.planObjectRec(obj, index+1, keys, lobj, iter) }) }) } func (p *Planner) planRef(ref ast.Ref, iter planiter) error { if !ref[0].Equal(ast.InputRootDocument) { return fmt.Errorf("%v root document not implemented", ref[0]) } p.ltarget = p.vars[ast.InputRootDocument.Value.(ast.Var)] return p.planRefRec(ref, 1, iter) } func (p *Planner) planRefRec(ref ast.Ref, index int, iter planiter) error { if len(ref) == index { return iter() } switch v := ref[index].Value.(type) { case ast.Null, ast.Boolean, ast.Number, ast.String: source := p.ltarget return p.planTerm(ref[index], func() error { key := p.ltarget target := p.newLocal() p.appendStmt(ir.DotStmt{ Source: source, Key: key, Target: target, }) p.ltarget = target return p.planRefRec(ref, index+1, iter) }) case ast.Var: if _, ok := p.vars[v]; !ok { return p.planScan(ref, index, func() error { return p.planRefRec(ref, index+1, iter) }) } p.ltarget = p.vars[v] return p.planRefRec(ref, index+1, iter) default: return fmt.Errorf("%v reference operand not implemented", ast.TypeName(ref[index].Value)) } } func (p *Planner) planScan(ref ast.Ref, index int, iter planiter) error { source := p.ltarget return p.planVar(ref[index].Value.(ast.Var), func() error { key := p.ltarget cond := p.newLocal() value := p.newLocal() p.appendStmt(ir.MakeBooleanStmt{ Value: false, Target: cond, }) scan := ir.ScanStmt{ Source: source, Key: key, Value: value, } prev := p.curr p.curr = &scan.Block p.ltarget = value if err := iter(); err != nil { return err } p.appendStmt(ir.AssignBooleanStmt{ Value: true, Target: cond, }) p.curr = prev p.appendStmt(scan) truth := p.newLocal() p.appendStmt(ir.MakeBooleanStmt{ Value: true, Target: truth, }) p.appendStmt(ir.EqualStmt{ A: cond, B: truth, }) return nil }) } func (p *Planner) appendStmt(s ir.Stmt) { p.curr.Stmts = append(p.curr.Stmts, s) } func (p *Planner) appendStringConst(s string) int { index := len(p.strings) p.strings = append(p.strings, ir.StringConst{ Value: s, }) return index } func (p *Planner) newLocal() ir.Local { x := p.lcurr p.lcurr++ return x }
vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
0.63341
0.482124
planner.go
starcoder
Mac Cheese Grater Plate http://saccade.com/blog/2019/06/how-to-make-apples-mac-pro-holes/ */ //----------------------------------------------------------------------------- package main import ( "log" "math" "github.com/deadsy/sdfx/render" "github.com/deadsy/sdfx/sdf" ) //----------------------------------------------------------------------------- // material shrinkage var shrink = 1.0 / 0.999 // PLA ~0.1% //var shrink = 1.0/0.995; // ABS ~0.5% //----------------------------------------------------------------------------- // colSpace returns the space between columns func colSpace(radius float64) float64 { return (4.0 * radius) / math.Sqrt(3.0) } // rowSpace returns the space between rows func rowSpace(radius float64) float64 { return 2.0 * radius } // xOffset returns the x-offset between adjacent rows func xOffset(radius float64) float64 { return (2.0 * radius) / math.Sqrt(3.0) } // yOffset returns the y-offset between adjacent rows func yOffset(radius float64) float64 { return (2.0 * radius) / 3.0 } // zOffset returns the z-offset between ball grids func zOffset(radius float64) float64 { return (4.0 * radius) / 3.0 } //----------------------------------------------------------------------------- // ballRow returns a ball row func ballRow(ncol int, radius float64) (sdf.SDF3, error) { space := colSpace(radius) x := sdf.V3{-0.5 * ((float64(ncol) - 1) * space), 0, 0} dx := sdf.V3{space, 0, 0} var balls []sdf.SDF3 s, err := sdf.Sphere3D(radius) if err != nil { return nil, err } for i := 0; i < ncol; i++ { balls = append(balls, sdf.Transform3D(s, sdf.Translate3d(x))) x = x.Add(dx) } return sdf.Union3D(balls...), nil } // ballGrid returns a ball grid func ballGrid( ncol int, // number of columns nrow int, // number of rows radius float64, // radius of ball ) (sdf.SDF3, error) { space := rowSpace(radius) x := sdf.V3{0, -0.5 * ((float64(nrow) - 1) * space), 0} dy0 := sdf.V3{-xOffset(radius), space, 0} dy1 := sdf.V3{xOffset(radius), space, 0} var rows []sdf.SDF3 s, err := ballRow(ncol, radius) if err != nil { return nil, err } for i := 0; i < nrow; i++ { rows = append(rows, sdf.Transform3D(s, sdf.Translate3d(x))) if i%2 == 0 { x = x.Add(dy0) } else { x = x.Add(dy1) } } return sdf.Union3D(rows...), nil } // macCheeseGrater returns a Apple Mac style cheese grater plate. func macCheeseGrater( ncol int, // number of columns nrow int, // number of rows radius float64, // radius of ball ) (sdf.SDF3, error) { dx := sdf.V3{xOffset(radius), yOffset(radius), zOffset(radius)}.MulScalar(0.5) g, err := ballGrid(ncol, nrow, radius) if err != nil { return nil, err } g0 := sdf.Transform3D(g, sdf.Translate3d(dx.Neg())) g1 := sdf.Transform3D(g, sdf.Translate3d(dx)) balls := sdf.Union3D(g0, g1) pX := colSpace(radius) * (float64(ncol) - 1) pY := rowSpace(radius) * (float64(nrow) - 1) pZ := 0.5 * colSpace(radius) plate, err := sdf.Box3D(sdf.V3{pX, pY, pZ}, 0) if err != nil { return nil, err } return sdf.Difference3D(plate, balls), nil } //----------------------------------------------------------------------------- func main() { s, err := macCheeseGrater(15, 6, 10.0) if err != nil { log.Fatalf("error: %s", err) } render.RenderSTL(sdf.ScaleUniform3D(s, shrink), 500, "mcg.stl") } //-----------------------------------------------------------------------------
examples/mcg/main.go
0.728845
0.487002
main.go
starcoder
package aoc2020 /* --- Day 25: Combo Breaker --- You finally reach the check-in desk. Unfortunately, their registration systems are currently offline, and they cannot check you in. Noticing the look on your face, they quickly add that tech support is already on the way! They even created all the room keys this morning; you can take yours now and give them your room deposit once the registration system comes back online. The room key is a small RFID card. Your room is on the 25th floor and the elevators are also temporarily out of service, so it takes what little energy you have left to even climb the stairs and navigate the halls. You finally reach the door to your room, swipe your card, and - beep - the light turns red. Examining the card more closely, you discover a phone number for tech support. "Hello! How can we help you today?" You explain the situation. "Well, it sounds like the card isn't sending the right command to unlock the door. If you go back to the check-in desk, surely someone there can reset it for you." Still catching your breath, you describe the status of the elevator and the exact number of stairs you just had to climb. "I see! Well, your only other option would be to reverse-engineer the cryptographic handshake the card does with the door and then inject your own commands into the data stream, but that's definitely impossible." You thank them for their time. Unfortunately for the door, you know a thing or two about cryptographic handshakes. The handshake used by the card and the door involves an operation that transforms a subject number. To transform a subject number, start with the value 1. Then, a number of times called the loop size, perform the following steps: Set the value to itself multiplied by the subject number. Set the value to the remainder after dividing the value by 20201227. The card always uses a specific, secret loop size when it transforms a subject number. The door always uses a different, secret loop size. The cryptographic handshake works like this: The card transforms the subject number of 7 according to the card's secret loop size. The result is called the card's public key. The door transforms the subject number of 7 according to the door's secret loop size. The result is called the door's public key. The card and door use the wireless RFID signal to transmit the two public keys (your puzzle input) to the other device. Now, the card has the door's public key, and the door has the card's public key. Because you can eavesdrop on the signal, you have both public keys, but neither device's loop size. The card transforms the subject number of the door's public key according to the card's loop size. The result is the encryption key. The door transforms the subject number of the card's public key according to the door's loop size. The result is the same encryption key as the card calculated. If you can use the two public keys to determine each device's loop size, you will have enough information to calculate the secret encryption key that the card and door use to communicate; this would let you send the unlock command directly to the door! For example, suppose you know that the card's public key is 5764801. With a little trial and error, you can work out that the card's loop size must be 8, because transforming the initial subject number of 7 with a loop size of 8 produces 5764801. Then, suppose you know that the door's public key is 17807724. By the same process, you can determine that the door's loop size is 11, because transforming the initial subject number of 7 with a loop size of 11 produces 17807724. At this point, you can use either device's loop size with the other device's public key to calculate the encryption key. Transforming the subject number of 17807724 (the door's public key) with a loop size of 8 (the card's loop size) produces the encryption key, 14897079. (Transforming the subject number of 5764801 (the card's public key) with a loop size of 11 (the door's loop size) produces the same encryption key: 14897079.) What encryption key is the handshake trying to establish? */ import ( "fmt" "time" goutils "github.com/simonski/goutils" ) const DAY_25_DIVIDE_BY = 20201227 const DAY_25_TEST_CARD_PUBLIC_KEY = 5764801 const DAY_25_TEST_CARD_LOOP_SIZE = 8 const DAY_25_TEST_CARD_SUBJECT_NUMBER = 7 const DAY_25_CARD_PUBLIC_KEY = 335121 const DAY_25_DOOR_PUBLIC_KEY = 363891 // AOC_2020_25 is the entrypoint func AOC_2020_25(cli *goutils.CLI) { AOC_2020_25_part1_attempt1(cli) AOC_2020_25_part2_attempt1(cli) } func AOC_2020_25_part1_attempt1(cli *goutils.CLI) { start := time.Now() end := time.Now() fmt.Printf("%v\n", end.Sub(start)) } func AOC_2020_25_part2_attempt1(cli *goutils.CLI) { start := time.Now() end := time.Now() fmt.Printf("%v\n", end.Sub(start)) } // Finds the loop size or breaks after CIRCUIT_BREAKER times (1000) func FindLoopSize(subject_number int, target int) int { loop_size := 1 value := 1 for { if loop_size%10000 == 0 { fmt.Printf("FindLoopSize(subject_number: %v, target %v, value=%v) loop=%v\n", subject_number, target, value, loop_size) } value *= subject_number value = value % DAY_25_DIVIDE_BY if value == target { return loop_size } loop_size++ } } func FindPrivateKey(subject_number int, max_loop_size int) int { loop_size := 1 value := 1 for { value *= subject_number value = value % DAY_25_DIVIDE_BY if loop_size == max_loop_size { return value } loop_size++ } }
app/aoc2020/aoc2020_25.go
0.582491
0.652338
aoc2020_25.go
starcoder
package constants import "strings" var Countries = strings.TrimPrefix(` /** Country represents a world country. */ export class Country { // The country's emoji flag. flag: string; // The country's name. name: string; // Two-letter country code (ISO 3166-1 alpha-2). code: string; // The country's abbreviation. abbr: string; constructor(code: string) { const country = COUNTRIES[code]; this.flag = country.flag; this.name = country.name; this.code = country.code; this.abbr = country.abbr; } } /** COUNTRIES represents a map of countries. */ export const COUNTRIES: Record<string, Country> = { 'AF': { flag: '🇦🇫', name: 'Afghanistan', abbr: 'AF', code: '93' } as Country, 'AX': { flag: '🇦🇽', name: 'Alland Islands', abbr: 'AX', code: '358' } as Country, 'AL': { flag: '🇦🇱', name: 'Albania', abbr: 'AL', code: '355' } as Country, 'DZ': { flag: '🇩🇿', name: 'Algeria', abbr: 'DZ', code: '213' } as Country, 'AS': { flag: '🇦🇸', name: 'American Samoa', abbr: 'AS', code: '1-684' } as Country, 'AD': { flag: '🇦🇩', name: 'Andorra', abbr: 'AD', code: '376' } as Country, 'AO': { flag: '🇦🇴', name: 'Angola', abbr: 'AO', code: '244' } as Country, 'AI': { flag: '🇦🇮', name: 'Anguilla', abbr: 'AI', code: '1-264' } as Country, 'AQ': { flag: '🇦🇶', name: 'Antarctica', abbr: 'AQ', code: '672' } as Country, 'AG': { flag: '🇦🇬', name: 'Antigua and Barbuda', abbr: 'AG', code: '1-268' } as Country, 'AR': { flag: '🇦🇷', name: 'Argentina', abbr: 'AR', code: '54' } as Country, 'AM': { flag: '🇦🇲', name: 'Armenia', abbr: 'AM', code: '374' } as Country, 'AW': { flag: '🇦🇼', name: 'Aruba', abbr: 'AW', code: '297' } as Country, 'AU': { flag: '🇦🇺', name: 'Australia', abbr: 'AU', code: '61' } as Country, 'AT': { flag: '🇦🇹', name: 'Austria', abbr: 'AT', code: '43' } as Country, 'AZ': { flag: '🇦🇿', name: 'Azerbaijan', abbr: 'AZ', code: '994' } as Country, 'BS': { flag: '🇧🇸', name: 'Bahamas', abbr: 'BS', code: '1-242' } as Country, 'BH': { flag: '🇧🇭', name: 'Bahrain', abbr: 'BH', code: '973' } as Country, 'BD': { flag: '🇧🇩', name: 'Bangladesh', abbr: 'BD', code: '880' } as Country, 'BB': { flag: '🇧🇧', name: 'Barbados', abbr: 'BB', code: '1-246' } as Country, 'BY': { flag: '🇧🇾', name: 'Belarus', abbr: 'BY', code: '375' } as Country, 'BE': { flag: '🇧🇪', name: 'Belgium', abbr: 'BE', code: '32' } as Country, 'BZ': { flag: '🇧🇿', name: 'Belize', abbr: 'BZ', code: '501' } as Country, 'BJ': { flag: '🇧🇯', name: 'Benin', abbr: 'BJ', code: '229' } as Country, 'BM': { flag: '🇧🇲', name: 'Bermuda', abbr: 'BM', code: '1-441' } as Country, 'BT': { flag: '🇧🇹', name: 'Bhutan', abbr: 'BT', code: '975' } as Country, 'BO': { flag: '🇧🇴', name: 'Bolivia', abbr: 'BO', code: '591' } as Country, 'BA': { flag: '🇧🇦', name: 'Bosnia and Herzegovina', abbr: 'BA', code: '387' } as Country, 'BW': { flag: '🇧🇼', name: 'Botswana', abbr: 'BW', code: '267' } as Country, 'BV': { flag: '🇧🇻', name: 'Bouvet Island', abbr: 'BV', code: '47' } as Country, 'BR': { flag: '🇧🇷', name: 'Brazil', abbr: 'BR', code: '55' } as Country, 'IO': { flag: '🇮🇴', name: 'British Indian Ocean Territory', abbr: 'IO', code: '246', }, 'VG': { flag: '🇻🇬', name: 'British Virgin Islands', abbr: 'VG', code: '1-284' } as Country, 'BN': { flag: '🇧🇳', name: 'Brunei Darussalam', abbr: 'BN', code: '673' } as Country, 'BG': { flag: '🇧🇬', name: 'Bulgaria', abbr: 'BG', code: '359' } as Country, 'BF': { flag: '🇧🇫', name: 'Burkina Faso', abbr: 'BF', code: '226' } as Country, 'BI': { flag: '🇧🇮', name: 'Burundi', abbr: 'BI', code: '257' } as Country, 'KH': { flag: '🇰🇭', name: 'Cambodia', abbr: 'KH', code: '855' } as Country, 'CM': { flag: '🇨🇲', name: 'Cameroon', abbr: 'CM', code: '237' } as Country, 'CA': { flag: '🇨🇦', name: 'Canada', abbr: 'CA', code: '1' } as Country, 'CV': { flag: '🇨🇻', name: 'Cape Verde', abbr: 'CV', code: '238' } as Country, 'KY': { flag: '🇰🇾', name: 'Cayman Islands', abbr: 'KY', code: '1-345' } as Country, 'CF': { flag: '🇨🇫', name: 'Central African Republic', abbr: 'CF', code: '236' } as Country, 'TD': { flag: '🇹🇩', name: 'Chad', abbr: 'TD', code: '235' } as Country, 'CL': { flag: '🇨🇱', name: 'Chile', abbr: 'CL', code: '56' } as Country, 'CN': { flag: '🇨🇳', name: 'China', abbr: 'CN', code: '86' } as Country, 'CX': { flag: '🇨🇽', name: 'Christmas Island', abbr: 'CX', code: '61' } as Country, 'CC': { flag: '🇨🇨', name: 'Cocos (Keeling) Islands', abbr: 'CC', code: '61' } as Country, 'CO': { flag: '🇨🇴', name: 'Colombia', abbr: 'CO', code: '57' } as Country, 'KM': { flag: '🇰🇲', name: 'Comoros', abbr: 'KM', code: '269' } as Country, 'CG': { flag: '🇨🇩', name: 'Congo, Democratic Republic of the', abbr: 'CG', code: '243', }, 'CD': { flag: '🇨🇬', name: 'Congo, Republic of the', abbr: 'CD', code: '242', }, 'CK': { flag: '🇨🇰', name: 'Cook Islands', abbr: 'CK', code: '682' } as Country, 'CR': { flag: '🇨🇷', name: 'Costa Rica', abbr: 'CR', code: '506' } as Country, 'CI': { flag: '🇨🇮', name: "Cote d'Ivoire", abbr: 'CI', code: '225' }, 'HR': { flag: '🇭🇷', name: 'Croatia', abbr: 'HR', code: '385' } as Country, 'CU': { flag: '🇨🇺', name: 'Cuba', abbr: 'CU', code: '53' } as Country, 'CW': { flag: '🇨🇼', name: 'Curacao', abbr: 'CW', code: '599' } as Country, 'CY': { flag: '🇨🇾', name: 'Cyprus', abbr: 'CY', code: '357' } as Country, 'CZ': { flag: '🇨🇿', name: 'Czech Republic', abbr: 'CZ', code: '420' } as Country, 'DK': { flag: '🇩🇰', name: 'Denmark', abbr: 'DK', code: '45' } as Country, 'DJ': { flag: '🇩🇯', name: 'Djibouti', abbr: 'DJ', code: '253' } as Country, 'DM': { flag: '🇩🇲', name: 'Dominica', abbr: 'DM', code: '1-767' } as Country, 'DO': { flag: '🇩🇴', name: 'Dominican Republic', abbr: 'DO', code: '1-809' } as Country, 'EC': { flag: '🇪🇨', name: 'Ecuador', abbr: 'EC', code: '593' } as Country, 'EG': { flag: '🇪🇬', name: 'Egypt', abbr: 'EG', code: '20' } as Country, 'SV': { flag: '🇸🇻', name: 'El Salvador', abbr: 'SV', code: '503' } as Country, 'GQ': { flag: '🇬🇶', name: 'Equatorial Guinea', abbr: 'GQ', code: '240' } as Country, 'ER': { flag: '🇪🇷', name: 'Eritrea', abbr: 'ER', code: '291' } as Country, 'EE': { flag: '🇪🇪', name: 'Estonia', abbr: 'EE', code: '372' } as Country, 'ET': { flag: '🇪🇹', name: 'Ethiopia', abbr: 'ET', code: '251' } as Country, 'FK': { flag: '🇫🇰', name: 'Falkland Islands (Malvinas)', abbr: 'FK', code: '500' } as Country, 'FO': { flag: '🇫🇴', name: 'Faroe Islands', abbr: 'FO', code: '298' } as Country, 'FJ': { flag: '🇫🇯', name: 'Fiji', abbr: 'FJ', code: '679' } as Country, 'FI': { flag: '🇫🇮', name: 'Finland', abbr: 'FI', code: '358' } as Country, 'FR': { flag: '🇫🇷', name: 'France', abbr: 'FR', code: '33' } as Country, 'GF': { flag: '🇬🇫', name: 'French Guiana', abbr: 'GF', code: '594' } as Country, 'PF': { flag: '🇵🇫', name: 'French Polynesia', abbr: 'PF', code: '689' } as Country, 'TF': { flag: '🇹🇫', name: 'French Southern Territories', abbr: 'TF', code: '262' } as Country, 'GA': { flag: '🇬🇦', name: 'Gabon', abbr: 'GA', code: '241' } as Country, 'GM': { flag: '🇬🇲', name: 'Gambia', abbr: 'GM', code: '220' } as Country, 'GE': { flag: '🇬🇪', name: 'Georgia', abbr: 'GE', code: '995' } as Country, 'DE': { flag: '🇩🇪', name: 'Germany', abbr: 'DE', code: '49' } as Country, 'GH': { flag: '🇬🇭', name: 'Ghana', abbr: 'GH', code: '233' } as Country, 'GI': { flag: '🇬🇮', name: 'Gibraltar', abbr: 'GI', code: '350' } as Country, 'GR': { flag: '🇬🇷', name: 'Greece', abbr: 'GR', code: '30' } as Country, 'GL': { flag: '🇬🇱', name: 'Greenland', abbr: 'GL', code: '299' } as Country, 'GD': { flag: '🇬🇩', name: 'Grenada', abbr: 'GD', code: '1-473' } as Country, 'GP': { flag: '🇬🇵', name: 'Guadeloupe', abbr: 'GP', code: '590' } as Country, 'GU': { flag: '🇬🇺', name: 'Guam', abbr: 'GU', code: '1-671' } as Country, 'GT': { flag: '🇬🇹', name: 'Guatemala', abbr: 'GT', code: '502' } as Country, 'GG': { flag: '🇬🇬', name: 'Guernsey', abbr: 'GG', code: '44' } as Country, 'GW': { flag: '🇬🇼', name: 'Guinea-Bissau', abbr: 'GW', code: '245' } as Country, 'GN': { flag: '🇬🇳', name: 'Guinea', abbr: 'GN', code: '224' } as Country, 'GY': { flag: '🇬🇾', name: 'Guyana', abbr: 'GY', code: '592' } as Country, 'HT': { flag: '🇭🇹', name: 'Haiti', abbr: 'HT', code: '509' } as Country, 'HM': { flag: '🇭🇲', name: 'Heard Island and McDonald Islands', abbr: 'HM', code: '672', }, 'VA': { flag: '🇻🇦', name: 'Holy See (Vatican City State)', abbr: 'VA', code: '379', }, 'HN': { flag: '🇭🇳', name: 'Honduras', abbr: 'HN', code: '504' } as Country, 'HK': { flag: '🇭🇰', name: 'Hong Kong', abbr: 'HK', code: '852' } as Country, 'HU': { flag: '🇭🇺', name: 'Hungary', abbr: 'HU', code: '36' } as Country, 'IS': { flag: '🇮🇸', name: 'Iceland', abbr: 'IS', code: '354' } as Country, 'IN': { flag: '🇮🇳', name: 'India', abbr: 'IN', code: '91' } as Country, 'ID': { flag: '🇮🇩', name: 'Indonesia', abbr: 'ID', code: '62' } as Country, 'IR': { flag: '🇮🇷', name: 'Iran, Islamic Republic of', abbr: 'IR', code: '98' } as Country, 'IQ': { flag: '🇮🇶', name: 'Iraq', abbr: 'IQ', code: '964' } as Country, 'IE': { flag: '🇮🇪', name: 'Ireland', abbr: 'IE', code: '353' } as Country, 'IM': { flag: '🇮🇲', name: 'Isle of Man', abbr: 'IM', code: '44' } as Country, 'IL': { flag: '🇮🇱', name: 'Israel', abbr: 'IL', code: '972' } as Country, 'IT': { flag: '🇮🇹', name: 'Italy', abbr: 'IT', code: '39' } as Country, 'JM': { flag: '🇯🇲', name: 'Jamaica', abbr: 'JM', code: '1-876' } as Country, 'JP': { flag: '🇯🇵', name: 'Japan', abbr: 'JP', code: '81' } as Country, 'JE': { flag: '🇯🇪', name: 'Jersey', abbr: 'JE', code: '44' } as Country, 'JO': { flag: '🇯🇴', name: 'Jordan', abbr: 'JO', code: '962' } as Country, 'KZ': { flag: '🇰🇿', name: 'Kazakhstan', abbr: 'KZ', code: '7' } as Country, 'KE': { flag: '🇰🇪', name: 'Kenya', abbr: 'KE', code: '254' } as Country, 'KI': { flag: '🇰🇮', name: 'Kiribati', abbr: 'KI', code: '686' } as Country, 'KP': { flag: '🇰🇵', name: "Korea, Democratic People's Republic of", abbr: 'KP', code: '850', }, 'KR': { flag: '🇰🇷', name: 'Korea, Republic of', abbr: 'KR', code: '82' } as Country, 'XK': { flag: '🇽🇰', name: 'Kosovo', abbr: 'XK', code: '383' } as Country, 'KW': { flag: '🇰🇼', name: 'Kuwait', abbr: 'KW', code: '965' } as Country, 'KG': { flag: '🇰🇬', name: 'Kyrgyzstan', abbr: 'KG', code: '996' } as Country, 'LA': { flag: '🇱🇦', name: "Lao People's Democratic Republic", abbr: 'LA', code: '856', }, 'LV': { flag: '🇱🇻', name: 'Latvia', abbr: 'LV', code: '371' } as Country, 'LB': { flag: '🇱🇧', name: 'Lebanon', abbr: 'LB', code: '961' } as Country, 'LS': { flag: '🇱🇸', name: 'Lesotho', abbr: 'LS', code: '266' } as Country, 'LR': { flag: '🇱🇷', name: 'Liberia', abbr: 'LR', code: '231' } as Country, 'LY': { flag: '🇱🇾', name: 'Libya', abbr: 'LY', code: '218' } as Country, 'LI': { flag: '🇱🇮', name: 'Liechtenstein', abbr: 'LI', code: '423' } as Country, 'LT': { flag: '🇱🇹', name: 'Lithuania', abbr: 'LT', code: '370' } as Country, 'LU': { flag: '🇱🇺', name: 'Luxembourg', abbr: 'LU', code: '352' } as Country, 'MO': { flag: '🇲🇴', name: 'Macao', abbr: 'MO', code: '853' } as Country, 'MK': { flag: '🇲🇰', name: 'Macedonia, the Former Yugoslav Republic of', abbr: 'MK', code: '389', }, 'MG': { flag: '🇲🇬', name: 'Madagascar', abbr: 'MG', code: '261' } as Country, 'MW': { flag: '🇲🇼', name: 'Malawi', abbr: 'MW', code: '265' } as Country, 'MY': { flag: '🇲🇾', name: 'Malaysia', abbr: 'MY', code: '60' } as Country, 'MV': { flag: '🇲🇻', name: 'Maldives', abbr: 'MV', code: '960' } as Country, 'ML': { flag: '🇲🇱', name: 'Mali', abbr: 'ML', code: '223' } as Country, 'MT': { flag: '🇲🇹', name: 'Malta', abbr: 'MT', code: '356' } as Country, 'MH': { flag: '🇲🇭', name: 'Marshall Islands', abbr: 'MH', code: '692' } as Country, 'MQ': { flag: '🇲🇶', name: 'Martinique', abbr: 'MQ', code: '596' } as Country, 'MR': { flag: '🇲🇷', name: 'Mauritania', abbr: 'MR', code: '222' } as Country, 'MU': { flag: '🇲🇺', name: 'Mauritius', abbr: 'MU', code: '230' } as Country, 'YT': { flag: '🇾🇹', name: 'Mayotte', abbr: 'YT', code: '262' } as Country, 'MX': { flag: '🇲🇽', name: 'Mexico', abbr: 'MX', code: '52' } as Country, 'FM': { flag: '🇫🇲', name: 'Micronesia, Federated States of', abbr: 'FM', code: '691', }, 'MD': { flag: '🇲🇩', name: 'Moldova, Republic of', abbr: 'MD', code: '373' } as Country, 'MC': { flag: '🇲🇨', name: 'Monaco', abbr: 'MC', code: '377' } as Country, 'MN': { flag: '🇲🇳', name: 'Mongolia', abbr: 'MN', code: '976' } as Country, 'ME': { flag: '🇲🇪', name: 'Montenegro', abbr: 'ME', code: '382' } as Country, 'MS': { flag: '🇲🇸', name: 'Montserrat', abbr: 'MS', code: '1-664' } as Country, 'MA': { flag: '🇲🇦', name: 'Morocco', abbr: 'MA', code: '212' } as Country, 'MZ': { flag: '🇲🇿', name: 'Mozambique', abbr: 'MZ', code: '258' } as Country, 'MM': { flag: '🇲🇲', name: 'Myanmar', abbr: 'MM', code: '95' } as Country, 'NA': { flag: '🇳🇦', name: 'Namibia', abbr: 'NA', code: '264' } as Country, 'NR': { flag: '🇳🇷', name: 'Nauru', abbr: 'NR', code: '674' } as Country, 'NP': { flag: '🇳🇵', name: 'Nepal', abbr: 'NP', code: '977' } as Country, 'NL': { flag: '🇳🇱', name: 'Netherlands', abbr: 'NL', code: '31' } as Country, 'NC': { flag: '🇳🇨', name: 'New Caledonia', abbr: 'NC', code: '687' } as Country, 'NZ': { flag: '🇳🇿', name: 'New Zealand', abbr: 'NZ', code: '64' } as Country, 'NI': { flag: '🇳🇮', name: 'Nicaragua', abbr: 'NI', code: '505' } as Country, 'NE': { flag: '🇳🇪', name: 'Niger', abbr: 'NE', code: '227' } as Country, 'NG': { flag: '🇳🇬', name: 'Nigeria', abbr: 'NG', code: '234' } as Country, 'NU': { flag: '🇳🇺', name: 'Niue', abbr: 'NU', code: '683' } as Country, 'NF': { flag: '🇳🇫', name: 'Norfolk Island', abbr: 'NF', code: '672' } as Country, 'MP': { flag: '🇲🇵', name: 'Northern Mariana Islands', abbr: 'MP', code: '1-670' } as Country, 'NO': { flag: '🇳🇴', name: 'Norway', abbr: 'NO', code: '47' } as Country, 'OM': { flag: '🇴🇲', name: 'Oman', abbr: 'OM', code: '968' } as Country, 'PK': { flag: '🇵🇰', name: 'Pakistan', abbr: 'PK', code: '92' } as Country, 'PW': { flag: '🇵🇼', name: 'Palau', abbr: 'PW', code: '680' } as Country, 'PS': { flag: '🇵🇸', name: 'Palestine, State of', abbr: 'PS', code: '970' } as Country, 'PA': { flag: '🇵🇦', name: 'Panama', abbr: 'PA', code: '507' } as Country, 'PG': { flag: '🇵🇬', name: 'Papua New Guinea', abbr: 'PG', code: '675' } as Country, 'PY': { flag: '🇵🇾', name: 'Paraguay', abbr: 'PY', code: '595' } as Country, 'PE': { flag: '🇵🇪', name: 'Peru', abbr: 'PE', code: '51' } as Country, 'PH': { flag: '🇵🇭', name: 'Philippines', abbr: 'PH', code: '63' } as Country, 'PN': { flag: '🇵🇳', name: 'Pitcairn', abbr: 'PN', code: '870' } as Country, 'PL': { flag: '🇵🇱', name: 'Poland', abbr: 'PL', code: '48' } as Country, 'PT': { flag: '🇵🇹', name: 'Portugal', abbr: 'PT', code: '351' } as Country, 'PR': { flag: '🇵🇷', name: 'Puerto Rico', abbr: 'PR', code: '1' } as Country, 'QA': { flag: '🇶🇦', name: 'Qatar', abbr: 'QA', code: '974' } as Country, 'RE': { flag: '🇷🇪', name: 'Reunion', abbr: 'RE', code: '262' } as Country, 'RO': { flag: '🇷🇴', name: 'Romania', abbr: 'RO', code: '40' } as Country, 'RU': { flag: '🇷🇺', name: 'Russian Federation', abbr: 'RU', code: '7' } as Country, 'RW': { flag: '🇷🇼', name: 'Rwanda', abbr: 'RW', code: '250' } as Country, 'BL': { flag: '🇧🇱', name: 'Saint Barthelemy', abbr: 'BL', code: '590' } as Country, 'SH': { flag: '🇸🇭', name: 'Saint Helena', abbr: 'SH', code: '290' } as Country, 'KN': { flag: '🇰🇳', name: 'Saint Kitts and Nevis', abbr: 'KN', code: '1-869' } as Country, 'LC': { flag: '🇱🇨', name: 'Saint Lucia', abbr: 'LC', code: '1-758' } as Country, 'MF': { flag: '🇲🇫', name: 'Saint Martin (French part)', abbr: 'MF', code: '590' } as Country, 'PM': { flag: '🇵🇲', name: 'Saint Pierre and Miquelon', abbr: 'PM', code: '508' } as Country, 'VC': { flag: '🇻🇨', name: 'Saint Vincent and the Grenadines', abbr: 'VC', code: '1-784', }, 'WS': { flag: '🇼🇸', name: 'Samoa', abbr: 'WS', code: '685' } as Country, 'SM': { flag: '🇸🇲', name: 'San Marino', abbr: 'SM', code: '378' } as Country, 'ST': { flag: '🇸🇹', name: 'Sao Tome and Principe', abbr: 'ST', code: '239' } as Country, 'SA': { flag: '🇸🇦', name: 'Saudi Arabia', abbr: 'SA', code: '966' } as Country, 'SN': { flag: '🇸🇳', name: 'Senegal', abbr: 'SN', code: '221' } as Country, 'RS': { flag: '🇷🇸', name: 'Serbia', abbr: 'RS', code: '381' } as Country, 'SC': { flag: '🇸🇨', name: 'Seychelles', abbr: 'SC', code: '248' } as Country, 'SL': { flag: '🇸🇱', name: 'Sierra Leone', abbr: 'SL', code: '232' } as Country, 'SG': { flag: '🇸🇬', name: 'Singapore', abbr: 'SG', code: '65' } as Country, 'SX': { flag: '🇸🇽', name: 'Sint Maarten (Dutch part)', abbr: 'SX', code: '1-721' } as Country, 'SK': { flag: '🇸🇰', name: 'Slovakia', abbr: 'SK', code: '421' } as Country, 'SI': { flag: '🇸🇮', name: 'Slovenia', abbr: 'SI', code: '386' } as Country, 'SB': { flag: '🇸🇧', name: 'Solomon Islands', abbr: 'SB', code: '677' } as Country, 'SO': { flag: '🇸🇴', name: 'Somalia', abbr: 'SO', code: '252' } as Country, 'ZA': { flag: '🇿🇦', name: 'South Africa', abbr: 'ZA', code: '27' } as Country, 'GS': { flag: '🇬🇸', name: 'South Georgia and the South Sandwich Islands', abbr: 'GS', code: '500', }, 'SS': { flag: '🇸🇸', name: 'South Sudan', abbr: 'SS', code: '211' } as Country, 'ES': { flag: '🇪🇸', name: 'Spain', abbr: 'ES', code: '34' } as Country, 'LK': { flag: '🇱🇰', name: 'Sri Lanka', abbr: 'LK', code: '94' } as Country, 'SD': { flag: '🇸🇩', name: 'Sudan', abbr: 'SD', code: '249' } as Country, 'SR': { flag: '🇸🇷', name: 'Suriname', abbr: 'SR', code: '597' } as Country, 'SJ': { flag: '🇸🇯', name: 'Svalbard and <NAME>', abbr: 'SJ', code: '47' } as Country, 'SZ': { flag: '🇸🇿', name: 'Swaziland', abbr: 'SZ', code: '268' } as Country, 'SE': { flag: '🇸🇪', name: 'Sweden', abbr: 'SE', code: '46' } as Country, 'CH': { flag: '🇨🇭', name: 'Switzerland', abbr: 'CH', code: '41' } as Country, 'SY': { flag: '🇸🇾', name: 'Syrian Arab Republic', abbr: 'SY', code: '963' } as Country, 'TW': { flag: '🇹🇼', name: 'Taiwan, Province of China', abbr: 'TW', code: '886' } as Country, 'TJ': { flag: '🇹🇯', name: 'Tajikistan', abbr: 'TJ', code: '992' } as Country, 'TH': { flag: '🇹🇭', name: 'Thailand', abbr: 'TH', code: '66' } as Country, 'TL': { flag: '🇹🇱', name: 'Timor-Leste', abbr: 'TL', code: '670' } as Country, 'TG': { flag: '🇹🇬', name: 'Togo', abbr: 'TG', code: '228' } as Country, 'TK': { flag: '🇹🇰', name: 'Tokelau', abbr: 'TK', code: '690' } as Country, 'TO': { flag: '🇹🇴', name: 'Tonga', abbr: 'TO', code: '676' } as Country, 'TT': { flag: '🇹🇹', name: 'Trinidad and Tobago', abbr: 'TT', code: '1-868' } as Country, 'TN': { flag: '🇹🇳', name: 'Tunisia', abbr: 'TN', code: '216' } as Country, 'TR': { flag: '🇹🇷', name: 'Turkey', abbr: 'TR', code: '90' } as Country, 'TM': { flag: '🇹🇲', name: 'Turkmenistan', abbr: 'TM', code: '993' } as Country, 'TC': { flag: '🇹🇨', name: 'Turks and Caicos Islands', abbr: 'TC', code: '1-649' } as Country, 'TV': { flag: '🇹🇻', name: 'Tuvalu', abbr: 'TV', code: '688' } as Country, 'UG': { flag: '🇺🇬', name: 'Uganda', abbr: 'UG', code: '256' } as Country, 'UA': { flag: '🇺🇦', name: 'Ukraine', abbr: 'UA', code: '380' } as Country, 'AE': { flag: '🇦🇪', name: 'United Arab Emirates', abbr: 'AE', code: '971' } as Country, 'GB': { flag: '🇬🇧', name: 'United Kingdom', abbr: 'GB', code: '44' } as Country, 'TZ': { flag: '🇹🇿', name: 'United Republic of Tanzania', abbr: 'TZ', code: '255' } as Country, 'US': { flag: '🇺🇲', name: 'United States', abbr: 'US', code: '1' } as Country, 'UY': { flag: '🇺🇾', name: 'Uruguay', abbr: 'UY', code: '598' } as Country, 'VI': { flag: '🇻🇮', name: 'US Virgin Islands', abbr: 'VI', code: '1-340' } as Country, 'UZ': { flag: '🇺🇿', name: 'Uzbekistan', abbr: 'UZ', code: '998' } as Country, 'VU': { flag: '🇻🇺', name: 'Vanuatu', abbr: 'VU', code: '678' } as Country, 'VE': { flag: '🇻🇪', name: 'Venezuela', abbr: 'VE', code: '58' } as Country, 'VN': { flag: '🇻🇳', name: 'Vietnam', abbr: 'VN', code: '84' } as Country, 'WF': { flag: '🇼🇫', name: 'Wallis and Futuna', abbr: 'WF', code: '681' } as Country, 'EH': { flag: '🇪🇭', name: 'Western Sahara', abbr: 'EH', code: '212' } as Country, 'YE': { flag: '🇾🇪', name: 'Yemen', abbr: 'YE', code: '967' } as Country, 'ZM': { flag: '🇿🇲', name: 'Zambia', abbr: 'ZM', code: '260' } as Country, 'ZW': { flag: '🇿🇼', name: 'Zimbabwe', abbr: 'ZW', code: '263' } as Country, };`, "\n")
src/gen/typescript/constants/countries.go
0.574753
0.513912
countries.go
starcoder
package bit // Note the use of << to create an untyped constant. const bitsPerWord = 32 << uint(^uint(0)>>63) // Implementation-specific size of int and uint in bits. const BitsPerWord = bitsPerWord // either 32 or 64 // Implementation-specific integer limit values. const ( MaxInt = 1<<(BitsPerWord-1) - 1 // either 1<<31 - 1 or 1<<63 - 1 MinInt = -MaxInt - 1 // either -1 << 31 or -1 << 63 MaxUint = 1<<BitsPerWord - 1 // either 1<<32 - 1 or 1<<64 - 1 ) // MinPos returns the position of the minimum nonzero bit in w, w ≠ 0. // It panics for w = 0. func MinPos(w uint64) int { // “Using de Bruijn Sequences to Index a 1 in a Computer Word”, // Leiserson, Prokop, and Randall, MIT, 1998. if w == 0 { panic("bit: MinPos(0) undefined") } // w & -w clears all bits except the one at minimum position p. // Hence, the multiplication below is equivalent to b26<<p. // A table lookup translates the 64 possible outcomes into // the exptected answer. return bitPos[((w&-w)*b26)>>58] } // A sequence, starting with 6 zeros, that contains all possible // 6-bit patterns as subseqences, a.k.a. De Bruijn B(2, 6). const b26 uint64 = 0x022fdd63cc95386d var bitPos [64]int func init() { for p := uint(0); p < 64; p++ { bitPos[b26<<p>>58] = int(p) } } // MaxPos returns the position of the maximum nonzero bit in w, w ≠ 0. // It panics for w = 0. func MaxPos(w uint64) int { if w == 0 { panic("bit: MaxPos(0) undefined") } // Fill word with ones on the right, e.g. 0x0000f308 -> 0x0000ffff. w |= w >> 1 w |= w >> 2 w |= w >> 4 w |= w >> 8 w |= w >> 16 w |= w >> 32 return Count(w) - 1 } // Count returns the number of nonzero bits in w. func Count(w uint64) int { // “Software Optimization Guide for AMD64 Processors”, Section 8.6. const maxw = 1<<64 - 1 const bpw = 64 // Compute the count for each 2-bit group. // Example using 16-bit word w = 00,01,10,11,00,01,10,11 // w - (w>>1) & 01,01,01,01,01,01,01,01 = 00,01,01,10,00,01,01,10 w -= (w >> 1) & (maxw / 3) // Add the count of adjacent 2-bit groups and store in 4-bit groups: // w & 0011,0011,0011,0011 + w>>2 & 0011,0011,0011,0011 = 0001,0011,0001,0011 w = w&(maxw/15*3) + (w>>2)&(maxw/15*3) // Add the count of adjacent 4-bit groups and store in 8-bit groups: // (w + w>>4) & 00001111,00001111 = 00000100,00000100 w += w >> 4 w &= maxw / 255 * 15 // Add all 8-bit counts with a multiplication and a shift: // (w * 00000001,00000001) >> 8 = 00001000 w *= maxw / 255 w >>= (bpw/8 - 1) * 8 return int(w) }
vendor/github.com/andybalholm/go-bit/funcs.go
0.752286
0.526038
funcs.go
starcoder
package parser import ( "time" ) // Parser handles the parsing from time.Time to various language formats. type Parser interface { // ParseTime parses the time. ParseTime(time.Time) // Digit day of month Day() uint8 // Digit day of month with leading 0 if there's only one digit. PaddedDay() string // Digit month of a year Month() uint8 // Digit month of a year with leading 0 if there's only one digit. PaddedMonth() string // 4 digit year Year() uint16 // Last two digit of a Year. e.g. year of 1990 will return 90 YearShort() uint8 // Hour of the day Hour() uint8 // Hour of the day with leading 0 PaddedHour() string // Minute of an Hour Minute() uint8 // Minute of an Hour with leading 0 PaddedMinute() string // Second of a minute Second() uint8 // Second from unix SecondUnix() int64 // Second of a minute with leading 0 PaddedSecond() string // Long Day. Like `Monday` or `Wednesday` DayLong() string // Short day. Like `Mon` or `Wed` DayShort() string // Full Month. Like `January` or `December` MonthLong() string // Short Month. Like `Jan` or `Dec` MonthShort() string } // Shared struct between parsers type baseParser struct { t time.Time year uint16 yearShort uint8 day uint8 paddedDay string month uint8 paddedMonth string hour uint8 paddedHour string minute uint8 paddedMinute string second uint8 secondUnix int64 paddedSecond string } func newBaseParser(t time.Time) baseParser { timeStr := t.String() return baseParser{ t: t, day: uint8(t.Day()), paddedDay: timeStr[8:10], hour: uint8(t.Hour()), paddedHour: timeStr[11:13], minute: uint8(t.Minute()), paddedMinute: timeStr[14:16], second: uint8(t.Second()), paddedSecond: timeStr[17:19], secondUnix: t.Unix(), month: uint8(t.Month()), paddedMonth: timeStr[5:7], year: uint16(t.Year()), yearShort: uint8(t.Year() % 100), } } func (b *baseParser) ParseTime(t time.Time) { *b = newBaseParser(t) } func (b baseParser) Day() uint8 { return b.day } func (b baseParser) PaddedDay() string { return b.paddedDay } func (b baseParser) Month() uint8 { return b.month } func (b baseParser) PaddedMonth() string { return b.paddedMonth } func (b baseParser) Year() uint16 { return b.year } func (b baseParser) YearShort() uint8 { return b.yearShort } func (b baseParser) Hour() uint8 { return b.hour } func (b baseParser) PaddedHour() string { return b.paddedHour } func (b baseParser) Minute() uint8 { return b.minute } func (b baseParser) PaddedMinute() string { return b.paddedMinute } func (b baseParser) Second() uint8 { return b.second } func (b baseParser) PaddedSecond() string { return b.paddedSecond } func (b baseParser) SecondUnix() int64 { return b.secondUnix }
parser/parser.go
0.731059
0.521898
parser.go
starcoder
package main const usage = ` Usage: ./pginsight <cmdname> [--flags] Commands: index usage Shows which indexes are being scanned and how many tuples are fetched index unused Shows the indexes which haven't been scanned index duplicate Finds indexes which index on the same key(s) disk db Shows disk usage for all databases accessible from the configured user disk relations Show disk usage for all relations in the configured database cache total Shows the total number of cache hits in the database cache tables Breakdown of cache hits per table queries Shows the slowest 10 queries. ` const indexUsage = ` Usage: ./pginsight index <subcommand> Commands: index usage Shows which indexes are being scanned and how many tuples are fetched index unused Shows the indexes which haven't been scanned index bloat Measure index bloat for all tables ` const idxBloatFields = ` Fields Info: Extra Size: Estitmation of the amount of bytes not used in the table. Which composed of fillfactor, bloat, and alignment padding spaces Extra Ratio: Estimated ratio of the real size used by Extra Size Fillfactor: http://www.postgresql.org/docs/9.4/static/sql-createtable.html#SQL-CREATETABLE-STORAGE-PARAMETERS Bloat Size: Estimated size of the bloat without the extra space kept for the fillfactor Bloat Ratio: Estimated ratio of the real size used by Bloat Size Not Applicable: If true, do not trust the stats Credits: This report is based of https://github.com/ioguix/pgsql-bloat-estimation ` const cacheUsage = ` Usage: ./pginsight cache <subcommand> Commands: cache total Shows the total number of cache hits in the database cache tables Breakdown of cache hits per table ` const cacheFields = ` Fields Info: Heap Read: Number of disk blocks read. Heap Hit: Number of buffer hits. Hit Ratio: Ratio of cache hits. ` const queriesHelp = ` Fields Info: Calls: Number of times this query got executed Total Time: Cumulative sum of time spent executing this statement, in minutes Avg Time/Query: Average time spent executing this statement per query Rows: Total number of rows retrieved or affected by the statement Hit Ratio: Ratio of cache hits. ` const diskUsage = ` Usage: ./pginsight disk <subcommand> Commands: disk db Shows disk usage for all databases accessible from the configured user disk tables Show disk usage for all tables in the configured database `
help.go
0.773045
0.615767
help.go
starcoder
package metrics import ( "time" kitmetrics "github.com/go-kit/kit/metrics" ) // defaultTimingUnit is the resolution we'll use for all duration measurements. const defaultTimingUnit = time.Millisecond // DurationTimer acts as a stopwatch, sending observations to a wrapped histogram. // It's a bit of helpful syntax sugar for h.Observe(time.Since(x)), with a specified // time duration unit. type DurationTimer struct { h kitmetrics.Histogram t time.Time d time.Duration } // NewDurationTimer wraps the given histogram and records the current time. // It defaults to time.Millisecond units. func NewDurationTimer(h kitmetrics.Histogram) *DurationTimer { return &DurationTimer{ h: h, t: time.Now(), d: defaultTimingUnit, } } // ObserveDuration captures the number of time units since the timer was // constructed, and forwards that observation to the histogram. func (t *DurationTimer) ObserveDuration() { measureSince(t.h, t.t, time.Now(), t.d) } // MeasureSince takes a Histogram and initial time and generates // an observation for the total duration of the operation. It's // intended to be called via defer, e.g. defer MeasureSince(h, time.Now()). func MeasureSince(h kitmetrics.Histogram, t0 time.Time) { measureSince(h, t0, time.Now(), defaultTimingUnit) } // measureSince is the underlying code for supporting both MeasureSince // and DurationTimer.ObserveDuration. func measureSince(h kitmetrics.Histogram, t0, t1 time.Time, unit time.Duration) { d := t1.Sub(t0) if d < 0 { d = 0 } h.Observe(float64(d / unit)) } // MonotonicTimer emits metrics periodically until it is stopped. type MonotonicTimer struct { DurationTimer cancel chan struct{} } // NewMonotonicTimer takes a histogram and units like Duration Timer, as well as a frequency to report statistics on func NewMonotonicTimer(h kitmetrics.Histogram, d, frequency time.Duration) *MonotonicTimer { t := newUnstartedMonotonicTimer(h, d) ticker := time.NewTicker(frequency) go t.start(ticker.Stop, ticker.C) return t } func newUnstartedMonotonicTimer(h kitmetrics.Histogram, d time.Duration) *MonotonicTimer { return &MonotonicTimer{ DurationTimer: DurationTimer{ h: h, t: time.Now(), d: d, }, cancel: make(chan struct{}), } } func (t *MonotonicTimer) start(stop func(), nowc <-chan time.Time) { defer stop() for { select { case <-t.cancel: return case <-nowc: t.ObserveDuration() } } } // Finish stops the ongoing reports of duration and makes one final Observation func (t *MonotonicTimer) Finish() { close(t.cancel) t.ObserveDuration() }
go-kit/metrics/timer.go
0.8119
0.466846
timer.go
starcoder
package geomfn import ( "math" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/errors" "github.com/twpayne/go-geom" ) // SnapToGrid snaps all coordinates in the Geometry to the given grid size, // offset by the given origin. It will remove duplicate points from the results. // If the resulting geometry is invalid, it will be converted to it's EMPTY form. func SnapToGrid(g geo.Geometry, origin geom.Coord, gridSize geom.Coord) (geo.Geometry, error) { if len(origin) != 4 { return geo.Geometry{}, errors.Newf("origin must be 4D") } if len(gridSize) != 4 { return geo.Geometry{}, errors.Newf("gridSize must be 4D") } if g.Empty() { return g, nil } geomT, err := g.AsGeomT() if err != nil { return geo.Geometry{}, err } retGeomT, err := snapToGrid(geomT, origin, gridSize) if err != nil { return geo.Geometry{}, err } return geo.MakeGeometryFromGeomT(retGeomT) } func snapCoordinateToGrid( l geom.Layout, dst []float64, src []float64, origin geom.Coord, gridSize geom.Coord, ) { dst[0] = snapOrdinateToGrid(src[0], origin[0], gridSize[0]) dst[1] = snapOrdinateToGrid(src[1], origin[1], gridSize[1]) if l.ZIndex() != -1 { dst[l.ZIndex()] = snapOrdinateToGrid(src[l.ZIndex()], origin[l.ZIndex()], gridSize[l.ZIndex()]) } if l.MIndex() != -1 { dst[l.MIndex()] = snapOrdinateToGrid(src[l.MIndex()], origin[l.MIndex()], gridSize[l.MIndex()]) } } func snapOrdinateToGrid( ordinate float64, originOrdinate float64, gridSizeOrdinate float64, ) float64 { if gridSizeOrdinate == 0 { return ordinate } return math.RoundToEven((ordinate-originOrdinate)/gridSizeOrdinate)*gridSizeOrdinate + originOrdinate } func snapToGrid(t geom.T, origin geom.Coord, gridSize geom.Coord) (geom.T, error) { if t.Empty() { return t, nil } t, err := applyOnCoordsForGeomT(t, func(l geom.Layout, dst []float64, src []float64) error { snapCoordinateToGrid(l, dst, src, origin, gridSize) return nil }) if err != nil { return nil, err } return removeConsecutivePointsFromGeomT(t) }
pkg/geo/geomfn/snap_to_grid.go
0.75037
0.434101
snap_to_grid.go
starcoder
package editor import ( "fmt" "github.com/elpinal/coco3/editor/register" "github.com/elpinal/coco3/screen" ) type nvCommon struct { streamSet *editor count int } type normal struct { nvCommon regName rune } func newNormalWithRegister(s streamSet, e *editor, regName rune) *normal { return &normal{ nvCommon: nvCommon{ streamSet: s, editor: e, }, regName: regName, } } func newNormal(s streamSet, e *editor) *normal { return newNormalWithRegister(s, e, register.Unnamed) } func norm() modeChanger { return func(b *balancer) (moder, error) { return newNormal(b.streamSet, b.editor), nil } } func (e *normal) Mode() mode { return modeNormal } func (e *normal) Run() (end continuity, next modeChanger, err error) { r, _, err := e.streamSet.in.ReadRune() if err != nil { return end, next, err } for ('1' <= r && r <= '9') || (e.count != 0 && r == '0') { e.count = e.count*10 + int(r-'0') r1, _, err := e.streamSet.in.ReadRune() if err != nil { return end, next, err } r = r1 } if e.count == 0 { e.count = 1 } if e.regName == 0 { e.regName = register.Unnamed } if cmd, ok := normalCommands[r]; ok { if m := cmd(e); m != nil { next = m } e.count = 0 if e.pos == len(e.buf) { e.move(e.pos - 1) } } e.regName = 0 return } func (e *normal) Runes() []rune { return e.editor.buf } func (e *normal) Position() int { return e.editor.pos } func (e *normal) Message() []rune { return nil } func (e *normal) Highlight() *screen.Hi { return nil } type normalCommand func(*normal) modeChanger var normalCommands = map[rune]normalCommand{ CharCtrlR: (*normal).redoCmd, '"': (*normal).handleRegister, ':': (*normal).commandline, '|': (*normal).column, '/': (*normal).search, '?': (*normal).searchBackward, '+': (*normal).increment, '-': (*normal).decrement, '~': (*normal).switchCase, '[': (*normal).prevUnmatched, ']': (*normal).nextUnmatched, '%': (*normal).moveToMatch, '$': (*normal).endline, '^': (*normal).beginlineNonBlank, '0': (*normal).beginline, 'A': (*normal).appendAtEnd, 'B': (*normal).wordBackNonBlank, 'C': (*normal).changeToEnd, 'D': (*normal).deleteToEnd, 'E': (*normal).wordEndNonBlank, 'F': (*normal).searchCharacterBackward, 'I': (*normal).insertFirstNonBlank, 'N': (*normal).previous, 'P': (*normal).putHere, 'R': (*normal).replaceMode, 'T': (*normal).searchCharacterBackwardAfter, 'V': (*normal).visualLine, 'W': (*normal).wordNonBlank, 'X': (*normal).deleteBefore, 'Y': (*normal).yankToEnd, 'a': (*normal).appendAfter, 'b': (*normal).wordBack, 'c': (*normal).changeOp, 'd': (*normal).deleteOp, 'e': (*normal).wordEnd, 'f': (*normal).searchCharacter, 'g': (*normal).gCmd, 'h': (*normal).left, 'i': (*normal).edit, 'j': (*normal).down, 'k': (*normal).up, 'l': (*normal).right, 'n': (*normal).next, 'p': (*normal).put1, 'q': (*normal).record, 'r': (*normal).replace, 's': (*normal).siegeOp, 't': (*normal).searchCharacterBefore, 'u': (*normal).undoCmd, 'v': (*normal).visual, 'w': (*normal).word, 'x': (*normal).deleteUnder, 'y': (*normal).yankOp, } func (e *nvCommon) endline() (_ modeChanger) { e.move(len(e.buf)) return } func (e *nvCommon) beginline() (_ modeChanger) { e.move(0) return } func (e *nvCommon) beginlineNonBlank() (_ modeChanger) { i := e.indexFunc(isWhitespace, 0, false) if i < 0 { return e.endline() } e.move(i) return } func (e *nvCommon) wordBack() (_ modeChanger) { for i := 0; i < e.count; i++ { e.wordBackward() } return } func (e *nvCommon) wordBackNonBlank() (_ modeChanger) { for i := 0; i < e.count; i++ { e.wordBackwardNonBlank() } return } func (e *normal) changeOp() modeChanger { return opPend(OpChange, e.count, e.regName) } func (e *normal) deleteOp() modeChanger { return opPend(OpDelete, e.count, e.regName) } func (e *normal) yankOp() modeChanger { return opPend(OpYank, e.count, e.regName) } func (e *nvCommon) left() (_ modeChanger) { e.move(e.pos - e.count) return } func ins(rightmost bool) modeChanger { return func(b *balancer) (moder, error) { if rightmost { // Revert to the rightmost position. b.pos = len(b.buf) } return newInsert(b.streamSet, b.editor, b.s, b.conf), nil } } func (e *normal) insertFromBeginning() modeChanger { e.move(0) return ins(e.pos == len(e.buf)) } func (e *normal) appendAtEnd() modeChanger { e.move(len(e.buf)) return ins(e.pos == len(e.buf)) } func (e *normal) insertFirstNonBlank() modeChanger { _ = e.beginlineNonBlank() return ins(e.pos == len(e.buf)) } func (e *normal) appendAfter() modeChanger { e.move(e.pos + 1) return ins(e.pos == len(e.buf)) } func (e *normal) edit() modeChanger { return ins(e.pos == len(e.buf)) } func (e *normal) down() (_ modeChanger) { if e.age >= len(e.history)-e.count { return } e.history[e.age] = e.buf e.age += e.count e.buf = e.history[e.age] e.pos = len(e.buf) return } func (e *normal) up() (_ modeChanger) { if e.age <= e.count-1 { return } if e.age == len(e.history) { e.history = append(e.history, e.buf) } else { e.history[e.age] = e.buf } e.age -= e.count e.buf = e.history[e.age] e.pos = len(e.buf) return } func (e *nvCommon) right() (_ modeChanger) { e.move(e.pos + e.count) return } func (e *normal) putHere() (_ modeChanger) { for i := 0; i < e.count; i++ { e.put(e.regName, e.pos) e.move(e.pos - 1) } e.undoTree.add(e.buf) return } func (e *normal) put1() (_ modeChanger) { for i := 0; i < e.count; i++ { e.put(e.regName, e.pos+1) e.move(e.pos + len(e.Read(e.regName))) } e.undoTree.add(e.buf) return } func (e *normal) replace() (_ modeChanger) { r, _, _ := e.streamSet.in.ReadRune() s := make([]rune, e.count) for i := 0; i < e.count; i++ { s[i] = r } e.editor.replace(s, e.pos) e.move(e.pos + e.count - 1) e.undoTree.add(e.buf) return } func (e *nvCommon) word() (_ modeChanger) { for i := 0; i < e.count; i++ { e.wordForward() } return } func (e *nvCommon) wordNonBlank() (_ modeChanger) { for i := 0; i < e.count; i++ { e.wordForwardNonBlank() } return } func (e *nvCommon) wordEnd() (_ modeChanger) { for i := 0; i < e.count; i++ { e.editor.wordEnd() } return } func (e *nvCommon) wordEndNonBlank() (_ modeChanger) { for i := 0; i < e.count; i++ { e.editor.wordEndNonBlank() } return } func (e *normal) deleteUnder() (_ modeChanger) { from, to := e.pos, e.pos+e.count e.yank(e.regName, from, to) e.delete(from, to) e.undoTree.add(e.buf) return } func (e *normal) deleteBefore() (_ modeChanger) { from, to := e.pos-e.count, e.pos e.yank(e.regName, from, to) e.delete(from, to) e.undoTree.add(e.buf) return } func (e *normal) deleteToEnd() (_ modeChanger) { from, to := e.pos, len(e.buf) e.yank(e.regName, from, to) e.delete(from, to) e.undoTree.add(e.buf) return } func (e *normal) changeToEnd() modeChanger { _ = e.deleteToEnd() return ins(e.pos == len(e.buf)) } func (e *normal) yankToEnd() (_ modeChanger) { e.yank(e.regName, e.pos, len(e.buf)) return } func (e *nvCommon) searchCharacter() (_ modeChanger) { r, _, err := e.streamSet.in.ReadRune() if err != nil { return } pos := e.pos for i := 0; i < e.count; i++ { i, err := e.charSearch(r) if err != nil { e.move(pos) return } e.move(i) } return } func (e *nvCommon) searchCharacterBackward() (_ modeChanger) { r, _, err := e.streamSet.in.ReadRune() if err != nil { return } pos := e.pos for i := 0; i < e.count; i++ { i, err := e.charSearchBackward(r) if err != nil { e.move(pos) return } e.move(i) } return } func (e *nvCommon) searchCharacterBefore() (_ modeChanger) { r, _, err := e.streamSet.in.ReadRune() if err != nil { return } pos := e.pos for i := 0; i < e.count; i++ { i, err := e.charSearchBefore(r) if err != nil { e.move(pos) return } e.move(i + 1) } e.move(e.pos - 1) return } func (e *nvCommon) searchCharacterBackwardAfter() (_ modeChanger) { r, _, err := e.streamSet.in.ReadRune() if err != nil { return } pos := e.pos for i := 0; i < e.count; i++ { i, err := e.charSearchBackwardAfter(r) if err != nil { e.move(pos) return } e.move(i - 1) } e.move(e.pos + 1) return } func (e *normal) gCmd() (_ modeChanger) { r, _, err := e.streamSet.in.ReadRune() if err != nil { return } switch r { case 'u': return opPend(OpLower, e.count, e.regName) case 'U': return opPend(OpUpper, e.count, e.regName) case '~': return opPend(OpSwitchCase, e.count, e.regName) case '/': return e.searchHistory() case 'I': return e.insertFromBeginning() case 'e': return e.wordEndBackward() case 'E': return e.wordEndBackwardNonBlank() } return } func (e *normal) undoCmd() (_ modeChanger) { for i := 0; i < e.count; i++ { e.undo() } return } func (e *normal) redoCmd() (_ modeChanger) { for i := 0; i < e.count; i++ { e.redo() } return } func (e *normal) replaceMode() modeChanger { return func(b *balancer) (moder, error) { return newReplace(b.streamSet, b.editor), nil } } func (e *normal) handleRegister() (_ modeChanger) { r, _, err := e.streamSet.in.ReadRune() if err != nil { return } if !register.IsValid(r) { return } return func(b *balancer) (moder, error) { return newNormalWithRegister(b.streamSet, b.editor, r), nil } } func (e *normal) commandline() modeChanger { return func(b *balancer) (moder, error) { return newCommandline(b.streamSet, b.editor), nil } } func (e *normal) visual() modeChanger { return func(b *balancer) (moder, error) { return newVisual(b.streamSet, b.editor), nil } } func (e *normal) visualLine() modeChanger { return func(b *balancer) (moder, error) { return newVisualLine(b.streamSet, b.editor), nil } } func (e *nvCommon) column() (_ modeChanger) { e.move(constrain(e.count-1, 0, len(e.buf))) return } func (e *normal) search() modeChanger { return func(b *balancer) (moder, error) { return newSearch(b.streamSet, b.editor, searchForward), nil } } func (e *normal) searchBackward() modeChanger { return func(b *balancer) (moder, error) { return newSearch(b.streamSet, b.editor, searchBackward), nil } } func (e *nvCommon) next() (_ modeChanger) { for i := 0; i < e.count; i++ { e.move(e.editor.next()) } return } func (e *nvCommon) previous() (_ modeChanger) { for i := 0; i < e.count; i++ { e.move(e.editor.previous()) } return } func (e *normal) searchHistory() (_ modeChanger) { return func(b *balancer) (moder, error) { return newSearch(b.streamSet, b.editor, searchHistoryForward), nil } } func (e *normal) indexNumber() int { if len(e.buf) == 0 { return -1 } i0 := e.pos for ; 0 < i0; i0-- { r := e.buf[i0] if !('0' <= r && r <= '9') { break } } var negative bool for i, r := range e.buf[i0:] { if '0' <= r && r <= '9' { if negative { i-- } return i + i0 } if !negative && r == '-' { negative = true } else if negative { // a digit does not follow '-', so the '-' is not a minus sign. negative = false } } return -1 } func (e *normal) parseNumber(i int) (a int, l int) { i0 := i if e.buf[i] == '-' { i0++ defer func() { a *= -1 }() } for n := i0; n < len(e.buf); n++ { r := int(e.buf[n]) if '0' <= r && r <= '9' { a = 10*a + r - '0' continue } return a, n - i } return a, len(e.buf) - i } func (e *normal) updateNumber(f func(int) int) { i := e.indexNumber() if i < 0 { return } n, l := e.parseNumber(i) e.delete(i, i+l) s := fmt.Sprint(f(n)) e.insert([]rune(s), i) e.move(i + len(s) - 1) } func (e *normal) increment() (_ modeChanger) { e.updateNumber(func(n int) int { return n + e.count }) return } func (e *normal) decrement() (_ modeChanger) { e.updateNumber(func(n int) int { return n - e.count }) return } func (e *normal) switchCase() (_ modeChanger) { e.editor.switchCase(e.pos, e.pos+e.count) e.move(e.pos + e.count) return } func (e *normal) siegeOp() (_ modeChanger) { return opPend(OpSiege, e.count, e.regName) } func (e *nvCommon) prevUnmatched() (_ modeChanger) { r, _, _ := e.in.ReadRune() var rp, lp rune switch r { case '(': rp = '(' lp = ')' case '{': rp = '{' lp = '}' default: return } i := e.searchLeft(rp, lp) if i < 0 { return } e.move(i) return } func (e *nvCommon) nextUnmatched() (_ modeChanger) { r, _, _ := e.in.ReadRune() var rp, lp rune switch r { case ')': rp = '(' lp = ')' case '}': rp = '{' lp = '}' default: return } i := e.searchRight(rp, lp) if i < 0 { return } e.move(i) return } func isParen(r rune) bool { switch r { case '(', ')', '[', ']', '{', '}': return true } return false } func getRightParen(r rune) rune { return map[rune]rune{ '(': ')', '[': ']', '{': '}', }[r] } func getLeftParen(r rune) rune { return map[rune]rune{ ')': '(', ']': '[', '}': '{', }[r] } func (e *nvCommon) moveToMatch() (_ modeChanger) { i := e.indexFunc(isParen, e.pos, true) if i < 0 { return } initPos := e.pos e.move(i) p := e.buf[i] switch p { case '(', '[', '{': i := e.searchRight(p, getRightParen(p)) if i < 0 { e.move(initPos) return } e.move(i) case ')', ']', '}': i := e.searchLeft(getLeftParen(p), p) if i < 0 { e.move(initPos) return } e.move(i) } return } func (e *nvCommon) wordEndBackwardNonBlank() (_ modeChanger) { for i := 0; i < e.count; i++ { e.editor.wordEndBackwardNonBlank() } return } func (e *nvCommon) wordEndBackward() (_ modeChanger) { for i := 0; i < e.count; i++ { e.editor.wordEndBackward() } return } func isAlphanum(r rune) bool { switch { case '0' <= r && r <= '9', 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z': return true } return false } func (e *nvCommon) record() (_ modeChanger) { if !e.in.record { e.in.Record() return } s := e.in.Stop() r, _, _ := e.in.ReadRune() if !isAlphanum(r) || r != '"' { return } e.Register(r, s) return }
editor/normal.go
0.521959
0.419291
normal.go
starcoder
package path_sum import ( "bytes" "sort" "strconv" ) /* 112. 路径总和 https://leetcode-cn.com/problems/path-sum 给定一个二叉树和一个目标和,判断该树中是否存在根结点到叶子结点的路径,这条路径上所有结点值相加等于目标和。 说明: 叶子结点是指没有子结点的结点。 示例: 给定如下二叉树,以及目标和 sum = 22, 5 / \ 4 8 / / \ 11 13 4 / \ \ 7 2 1 返回 true, 因为存在目标和为 22 的根结点到叶子结点的路径 5->4->11->2。 */ type TreeNode struct { Val int Left *TreeNode Right *TreeNode } // 常规dfs func hasPathSum(root *TreeNode, sum int) bool { if root == nil { return false } if root.Left == nil && root.Right == nil { return root.Val == sum } return hasPathSum(root.Left, sum-root.Val) || hasPathSum(root.Right, sum-root.Val) } /* 变体 113. 路径总和 II https://leetcode-cn.com/problems/path-sum-ii 给定一个二叉树和一个目标和,找到所有从根结点到叶子结点路径总和等于给定目标和的路径。 说明: 叶子结点是指没有子节点的节点。 示例: 给定如下二叉树,以及目标和 sum = 22, 5 / \ 4 8 / / \ 11 13 4 / \ / \ 7 2 5 1 返回: [ [5,4,11,2], [5,8,4,5] ] */ /* 用一个切片path记录遍历的路径,到达叶子节点发现path内元素和为sum则将当期path添加到结果里, 注意切片底层是同一个数组,添加到结果时要深拷贝一份 */ func pathSum(root *TreeNode, sum int) [][]int { var result [][]int var path []int prefixSum := 0 var dfs func(*TreeNode) dfs = func(node *TreeNode) { if node == nil { return } path = append(path, node.Val) prefixSum += node.Val if node.Left == nil && node.Right == nil && prefixSum == sum { tmp := make([]int, len(path)) _ = copy(tmp, path) result = append(result, tmp) } dfs(node.Left) dfs(node.Right) path = path[:len(path)-1] prefixSum -= node.Val } dfs(root) return result } /*变体,类似前缀树Trie的实现,用数组表示树,且树是多叉树,应该怎么解? 假设有k个节点,每个节点从0到k-1编号,编号即为其id caps数组表示每个节点的值 哈希表relations,是个邻接表,键为节点id,值为节点的孩子节点组成的数组 给定sum,返回每条从根节点(id为0)出发到叶子节点,值相加和为sum的路径组成的集合 路径处理成字符串,前最终结果按照字符串非递增排序 */ func getPath(caps []int, relations map[int][]int, sum int) []string { var result []string var path []int prefixSum := 0 var dfs func(nodeId int) dfs = func(nodeId int) { path = append(path, caps[nodeId]) prefixSum += caps[nodeId] if len(relations[nodeId]) == 0 && prefixSum == sum { result = append(result, parsePath(path)) } for _, c := range relations[nodeId] { dfs(c) } path = path[:len(path)-1] prefixSum -= caps[nodeId] } dfs(0) sort.Slice(result, func(i, j int) bool { return result[i] > result[j] }) return result } func parsePath(path []int) string { buf := bytes.NewBuffer(nil) for _, v := range path { buf.WriteString(strconv.Itoa(v)) buf.WriteString(" ") } result := buf.String() return result[:len(result)-1] } /* 变体 假设不一定要从根节点开始,也不需要走到叶子节点,来查找和为定值的路径呢? 437. 路径总和 III https://leetcode-cn.com/problems/path-sum-iii 给定一个二叉树,它的每个结点都存放着一个整数值。 找出路径和等于给定数值的路径总数。 路径不需要从根节点开始,也不需要在叶子节点结束,但是路径方向必须是向下的(只能从父节点到子节点)。 二叉树不超过1000个节点,且节点数值范围是 [-1000000,1000000] 的整数。 示例: root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8 10 / \ 5 -3 / \ \ 3 2 11 / \ \ 3 -2 1 返回 3。和等于 8 的路径有: 1. 5 -> 3 2. 5 -> 2 -> 1 3. -3 -> 11 */ // 递归解法,时间复杂度较高,有比较多的重复计算 func pathSumCount(root *TreeNode, sum int) int { if root == nil { return 0 } result := countPrefix(root, sum) result += pathSumCount(root.Left, sum) result += pathSumCount(root.Right, sum) return result } // 返回前缀和为sum的路径个数, 递归版 func countPrefix(root *TreeNode, sum int) int { if root == nil { return 0 } result := 0 if root.Val == sum { result = 1 } result += countPrefix(root.Left, sum-root.Val) result += countPrefix(root.Right, sum-root.Val) return result } /* countPrefix的另一个实现,用一个变量prefixSum记录当前路径前缀和,到达叶子节点后会回溯 这个思路引出pathSumCount0的实现 */ func countPrefix1(root *TreeNode, sum int) int { prefixSum := 0 result := 0 var dfs func(node *TreeNode) dfs = func(node *TreeNode) { if node == nil { return } prefixSum += node.Val if prefixSum == sum { result++ } dfs(node.Left) dfs(node.Right) // 回溯 prefixSum -= node.Val } dfs(root) return result } /* 参考 560. 和为K的子数组问题前缀和技巧 如果某个节点 x 的前缀和等于其某个子孙节点 y 的前缀和减去sum, 即 prefixSum(x) = prefixSum(y)-sum ,说明 x 到 y 这条路径的和是 sum 借助一个哈希表记录每条路径上,每个前缀和出现的次数,减少重复计算 */ func pathSumCount0(root *TreeNode, sum int) int { counts := make(map[int]int, 0) // 记录前缀和,key为前缀和,value为前缀和的个数 counts[0] = 1 // 前缀和为0的一条路径,方便边界处理,即节点值就是sum这种情况 res := 0 prefixSum := 0 var dfs func(*TreeNode) dfs = func(node *TreeNode) { if node == nil { return } prefixSum += node.Val // 当前节点node的前缀和(即从root到当前节点这条路径的和) res += counts[prefixSum-sum] // 如果当前节点之前已经有前缀和为 prefixSum-sum 的节点,说明那些节点到当前节点的和就是sum counts[prefixSum]++ dfs(node.Left) dfs(node.Right) // 回溯 counts[prefixSum]-- prefixSum -= node.Val } dfs(root) return res }
solutions/path-sum/d.go
0.52975
0.463505
d.go
starcoder
package sqlbuilder import ( "strings" ) // Update returns a new UPDATE statement with the default dialect. func Update() UpdateStatement { return UpdateStatement{dialect: DefaultDialect} } type updateSet struct { col string arg interface{} raw bool } // UpdateStatement represents an UPDATE statement. type UpdateStatement struct { dialect Dialect table string sets []updateSet wheres []where args []interface{} } // Dialect returns a new statement with dialect set to 'dialect'. func (s UpdateStatement) Dialect(dialect Dialect) UpdateStatement { s.dialect = dialect return s } // Table returns a new statement with the table to update set to 'table'. func (s UpdateStatement) Table(table string) UpdateStatement { s.table = table return s } // Set returns a new statement with column 'col' set to value 'val'. func (s UpdateStatement) Set(col string, val interface{}) UpdateStatement { s.sets = append(s.sets, updateSet{col: col, arg: val, raw: false}) return s } // SetSQL returns a new statement with column 'col' set to SQL expression 'sql'. func (s UpdateStatement) SetSQL(col string, sql string) UpdateStatement { s.sets = append(s.sets, updateSet{col: col, arg: sql, raw: true}) return s } // Where returns a new statement with condition 'cond'. // Multiple Where() are combined with AND. func (s UpdateStatement) Where(cond string, args ...interface{}) UpdateStatement { s.wheres = append(s.wheres, where{cond, args}) return s } // Build builds the SQL query. It returns the query and the argument slice. func (s UpdateStatement) Build() (query string, args []interface{}) { if len(s.sets) == 0 { panic("sqlbuilder: no columns set") } query = "UPDATE " + s.table + " SET " var sets []string idx := 0 for _, set := range s.sets { var arg string if set.raw { arg = set.arg.(string) } else { arg = s.dialect.Placeholder(idx) idx++ args = append(args, set.arg) } sets = append(sets, set.col+" = "+arg) } query += strings.Join(sets, ", ") if len(s.wheres) > 0 { var sqls []string for _, w := range s.wheres { sql := "(" + w.sql + ")" for _, arg := range w.args { p := s.dialect.Placeholder(idx) idx++ sql = strings.Replace(sql, "?", p, 1) sqls = append(sqls, sql) args = append(args, arg) } } query += " WHERE " + strings.Join(sqls, " AND ") } return }
vendor/github.com/thcyron/sqlbuilder/update.go
0.666497
0.418103
update.go
starcoder
package main import ( "strconv" "strings" ) //point Typedef for an x,y pair type point struct { x uint16 y uint16 } // ParsePipeMap parses a string representation of a map of pipes // Accepts diagonal lines on a 45 degree angle if allowDiagonal is set // Returns a map of points and the times they overlap. func ParsePipeMap(input []string, allowDiagonal bool) map[point]int16 { pointMap := make(map[point]int16) var validityCheck func(int16, int16, int16, int16) bool if allowDiagonal { validityCheck = diagonalCheck } else { validityCheck = nonDiagonalCheck } for _, line := range input { split := strings.Split(line, " -> ") start := strings.Split(split[0], ",") end := strings.Split(split[1], ",") t, _ := strconv.Atoi(start[0]) x1 := int16(t) t, _ = strconv.Atoi(start[1]) y1 := int16(t) t, _ = strconv.Atoi(end[0]) x2 := int16(t) t, _ = strconv.Atoi(end[1]) y2 := int16(t) if validityCheck(x1, y1, x2, y2) { incrementPoint(pointMap, x1, y1) incrementPoint(pointMap, x2, y2) if !nonDiagonalCheck(x1, y1, x2, y2) { processDiagonalLine(pointMap, x1, y1, x2, y2) } else { for i := x1 + 1; i < x2; i++ { incrementPoint(pointMap, i, y1) } for i := y1 + 1; i < y2; i++ { incrementPoint(pointMap, x1, i) } for i := x2 + 1; i < x1; i++ { incrementPoint(pointMap, i, y1) } for i := y2 + 1; i < y1; i++ { incrementPoint(pointMap, x1, i) } } } } return pointMap } // incrementPoint Increments the point type in the pointmap. func incrementPoint(pointMap map[point]int16, x int16, y int16) { p := point{uint16(x), uint16(y)} pointMap[p]++ } // nonDiagonalCheck only allows lines that are horizontal or vertical. func nonDiagonalCheck(x1 int16, y1 int16, x2 int16, y2 int16) bool { return x1 == x2 || y1 == y2 } // diagonalCheck only allows line that are horizontal, vertical or // on a 45 degree angle. func diagonalCheck(x1 int16, y1 int16, x2 int16, y2 int16) bool { x := x1 - x2 if x < 0 { x = -x } y := y1 - y2 if y < 0 { y = -y } return x1 == x2 || y1 == y2 || x == y } // processDiagonalLine adds a diagonal line from x1,y2 to x2,y2 to the point map func processDiagonalLine(pointMap map[point]int16, x1 int16, y1 int16, x2 int16, y2 int16) { if x2 > x1 { x := x2 - x1 if y2 > y1 { for i := int16(1); i < x; i++ { incrementPoint(pointMap, x1+i, y1+i) } } else { for i := int16(1); i < x; i++ { incrementPoint(pointMap, x1+i, y1-i) } } } else { x := x1 - x2 if y2 > y1 { for i := int16(1); i < x; i++ { incrementPoint(pointMap, x1-i, y1+i) } } else { for i := int16(1); i < x; i++ { incrementPoint(pointMap, x1-i, y1-i) } } } }
day5.go
0.656438
0.520131
day5.go
starcoder
package pricing import ( "fmt" "github.com/transcom/mymove/pkg/models" ) //same values used in each parse and verify function const feeColIndexStart int = 6 // start at column 6 to get the rates const feeRowIndexStart int = 10 // start at row 10 to get the rates const originPriceAreaIDColumn int = 2 const originPriceAreaColumn int = 3 const destinationPriceAreaIDColumn int = 4 const destinationPriceAreaColumn int = 5 // parseOconusToOconusPrices: parser for 3a) OCONUS to OCONUS Prices var parseOconusToOconusPrices processXlsxSheet = func(params ParamConfig, sheetIndex int, logger Logger) (interface{}, error) { // XLSX Sheet consts const xlsxDataSheetNum int = 10 // 3a) OCONUS TO OCONUS Prices if xlsxDataSheetNum != sheetIndex { return nil, fmt.Errorf("parseOconusToOconusPrices expected to process sheet %d, but received sheetIndex %d", xlsxDataSheetNum, sheetIndex) } prefixPrinter := newDebugPrefix("StageOconusToOconusPrice") var oconusToOconusPrices []models.StageOconusToOconusPrice sheet := params.XlsxFile.Sheets[xlsxDataSheetNum] for rowIndex := feeRowIndexStart; rowIndex < sheet.MaxRow; rowIndex++ { colIndex := feeColIndexStart // For each Rate Season for _, r := range rateSeasons { oconusToOconusPrice := models.StageOconusToOconusPrice{ OriginIntlPriceAreaID: mustGetCell(sheet, rowIndex, originPriceAreaIDColumn), OriginIntlPriceArea: mustGetCell(sheet, rowIndex, originPriceAreaColumn), DestinationIntlPriceAreaID: mustGetCell(sheet, rowIndex, destinationPriceAreaIDColumn), DestinationIntlPriceArea: mustGetCell(sheet, rowIndex, destinationPriceAreaColumn), Season: r, } oconusToOconusPrice.HHGShippingLinehaulPrice = mustGetCell(sheet, rowIndex, colIndex) colIndex++ oconusToOconusPrice.UBPrice = mustGetCell(sheet, rowIndex, colIndex) prefixPrinter.Printf("%+v\n", oconusToOconusPrice) oconusToOconusPrices = append(oconusToOconusPrices, oconusToOconusPrice) colIndex += 2 // skip 1 column (empty column) before starting next Rate type } } return oconusToOconusPrices, nil } // parseConusToOconusPrices: parser for 3b) CONUS to OCONUS Prices var parseConusToOconusPrices processXlsxSheet = func(params ParamConfig, sheetIndex int, logger Logger) (interface{}, error) { // XLSX Sheet consts const xlsxDataSheetNum int = 11 // 3b) CONUS TO OCONUS Prices if xlsxDataSheetNum != sheetIndex { return nil, fmt.Errorf("parseConusToOconusPrices expected to process sheet %d, but received sheetIndex %d", xlsxDataSheetNum, sheetIndex) } prefixPrinter := newDebugPrefix("StageConusToOconusPrice") var conusToOconusPrices []models.StageConusToOconusPrice sheet := params.XlsxFile.Sheets[xlsxDataSheetNum] for rowIndex := feeRowIndexStart; rowIndex < sheet.MaxRow; rowIndex++ { colIndex := feeColIndexStart // For each Rate Season for _, r := range rateSeasons { conusToOconusPrice := models.StageConusToOconusPrice{ OriginDomesticPriceAreaCode: mustGetCell(sheet, rowIndex, originPriceAreaIDColumn), OriginDomesticPriceArea: mustGetCell(sheet, rowIndex, originPriceAreaColumn), DestinationIntlPriceAreaID: mustGetCell(sheet, rowIndex, destinationPriceAreaIDColumn), DestinationIntlPriceArea: mustGetCell(sheet, rowIndex, destinationPriceAreaColumn), Season: r, } conusToOconusPrice.HHGShippingLinehaulPrice = mustGetCell(sheet, rowIndex, colIndex) colIndex++ conusToOconusPrice.UBPrice = mustGetCell(sheet, rowIndex, colIndex) prefixPrinter.Printf("%+v\n", conusToOconusPrice) conusToOconusPrices = append(conusToOconusPrices, conusToOconusPrice) colIndex += 2 // skip 1 column (empty column) before starting next Rate type } } return conusToOconusPrices, nil } // parseOconusToConusPrices: parser for 3c) OCONUS to CONUS Prices var parseOconusToConusPrices processXlsxSheet = func(params ParamConfig, sheetIndex int, logger Logger) (interface{}, error) { // XLSX Sheet consts const xlsxDataSheetNum int = 12 // 3c) OCONUS TO CONUS Prices if xlsxDataSheetNum != sheetIndex { return nil, fmt.Errorf("parseOconusToConusPrices expected to process sheet %d, but received sheetIndex %d", xlsxDataSheetNum, sheetIndex) } prefixPrinter := newDebugPrefix("StageOconusToConusPrice") var oconusToConusPrices []models.StageOconusToConusPrice sheet := params.XlsxFile.Sheets[xlsxDataSheetNum] for rowIndex := feeRowIndexStart; rowIndex < sheet.MaxRow; rowIndex++ { colIndex := feeColIndexStart // For each Rate Season for _, r := range rateSeasons { oconusToConusPrice := models.StageOconusToConusPrice{ OriginIntlPriceAreaID: mustGetCell(sheet, rowIndex, originPriceAreaIDColumn), OriginIntlPriceArea: mustGetCell(sheet, rowIndex, originPriceAreaColumn), DestinationDomesticPriceAreaCode: mustGetCell(sheet, rowIndex, destinationPriceAreaIDColumn), DestinationDomesticPriceArea: mustGetCell(sheet, rowIndex, destinationPriceAreaColumn), Season: r, } oconusToConusPrice.HHGShippingLinehaulPrice = mustGetCell(sheet, rowIndex, colIndex) colIndex++ oconusToConusPrice.UBPrice = mustGetCell(sheet, rowIndex, colIndex) prefixPrinter.Printf("%+v\n", oconusToConusPrice) oconusToConusPrices = append(oconusToConusPrices, oconusToConusPrice) colIndex += 2 // skip 1 column (empty column) before starting next Rate type } } return oconusToConusPrices, nil } func verifyInternationalPrices(params ParamConfig, sheetIndex int, xlsxSheetNum int) error { // XLSX Sheet consts xlsxDataSheetNum := xlsxSheetNum // Check headers const headerIndexStart = feeRowIndexStart - 3 const verifyHeaderIndexEnd = headerIndexStart + 2 const repeatingHeaderIndexStart = feeRowIndexStart - 2 const verifyHeaderIndexEnd2 = repeatingHeaderIndexStart + 2 if xlsxDataSheetNum != sheetIndex { return fmt.Errorf("verifyInternationalPrices expected to process sheet %d, but received sheetIndex %d", xlsxDataSheetNum, sheetIndex) } // Verify header strings repeatingHeaders := []string{ "HHG Shipping / Linehaul Price (except SIT) (per cwt)", "UB Price (except SIT) (per cwt)", } sheet := params.XlsxFile.Sheets[xlsxDataSheetNum] for dataRowIndex := headerIndexStart; dataRowIndex < verifyHeaderIndexEnd; dataRowIndex++ { colIndex := feeColIndexStart // For each Rate Season for _, r := range rateSeasons { verificationLog := fmt.Sprintf(" , verfication for row index: %d, colIndex: %d, rateSeasons %v", dataRowIndex, colIndex, r) if dataRowIndex == 0 { if xlsxSheetNum == 10 { if "OriginIntlPriceAreaID" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaIDColumn)) { return fmt.Errorf("format error: Header <OriginIntlPriceAreaID> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaIDColumn)), verificationLog) } if "OriginIntlPriceArea(PPIRA)" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaColumn)) { return fmt.Errorf("format error: Header <OriginIntlPriceArea(PPIRA)> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaColumn)), verificationLog) } if "DestinationIntlPriceAreaID" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaIDColumn)) { return fmt.Errorf("format error: Header <DestinationIntlPriceAreaID> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaIDColumn)), verificationLog) } if "DestinationIntlPriceArea(PPIRA)" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaColumn)) { return fmt.Errorf("format error: Header <DestinationIntlPriceArea(PPIRA)> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaColumn)), verificationLog) } } if xlsxSheetNum == 11 { if "OriginDomesticPriceAreaCode" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaIDColumn)) { return fmt.Errorf("format error: Header <OriginDomesticPriceAreaCode> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaIDColumn)), verificationLog) } if "OriginDomesticPriceArea(PPDRA)" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaColumn)) { return fmt.Errorf("format error: Header <OriginDomesticPriceArea(PPDRA)> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaColumn)), verificationLog) } if "DestinationIntlPriceAreaID" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaIDColumn)) { return fmt.Errorf("format error: Header <DestinationIntlPriceAreaID> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaIDColumn)), verificationLog) } if "DestinationIntlPriceArea(PPIRA)" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaColumn)) { return fmt.Errorf("format error: Header <DestinationIntlPriceArea(PPIRA)> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaColumn)), verificationLog) } } if xlsxSheetNum == 12 { if "OriginIntlPriceAreaID" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaIDColumn)) { return fmt.Errorf("format error: Header <OriginIntlPriceAreaID> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaIDColumn)), verificationLog) } if "OriginInternationalPriceArea(PPIRA)" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaColumn)) { return fmt.Errorf("format error: Header <OriginInternationalPriceArea(PPIRA)> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, originPriceAreaColumn)), verificationLog) } if "DestinationDomesticPriceAreaCode" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaIDColumn)) { return fmt.Errorf("format error: Header <DestinationDomesticPriceAreaCode> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaIDColumn)), verificationLog) } if "DestinationDomesticPriceArea(PPDRA)" != removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaColumn)) { return fmt.Errorf("format error: Header <DestinationDomesticPriceArea(PPDRA)> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, dataRowIndex, destinationPriceAreaColumn)), verificationLog) } } for repeatingRowIndex := repeatingHeaderIndexStart; repeatingRowIndex < verifyHeaderIndexEnd2; repeatingRowIndex++ { if repeatingRowIndex == 0 { colIndex := feeColIndexStart for _, repeatingHeader := range repeatingHeaders { if removeWhiteSpace(repeatingHeader) != removeWhiteSpace(mustGetCell(sheet, repeatingRowIndex, colIndex)) { return fmt.Errorf("format error: Header contains <%s> is missing got <%s> instead\n%s", removeWhiteSpace(repeatingHeader), removeWhiteSpace(mustGetCell(sheet, repeatingRowIndex, colIndex)), verificationLog) } colIndex++ } } else if dataRowIndex == 1 { if "EXAMPLE" != removeWhiteSpace(mustGetCell(sheet, repeatingRowIndex, originPriceAreaColumn)) { return fmt.Errorf("format error: Filler text <EXAMPLE> is missing got <%s> instead\n%s", removeWhiteSpace(mustGetCell(sheet, repeatingRowIndex, originPriceAreaColumn)), verificationLog) } } } } } } return nil } var verifyIntlOconusToOconusPrices verifyXlsxSheet = func(params ParamConfig, sheetIndex int) error { const xlsxSheetNum = 10 return verifyInternationalPrices(params, sheetIndex, xlsxSheetNum) } var verifyIntlConusToOconusPrices verifyXlsxSheet = func(params ParamConfig, sheetIndex int) error { const xlsxSheetNum = 11 return verifyInternationalPrices(params, sheetIndex, xlsxSheetNum) } var verifyIntlOconusToConusPrices verifyXlsxSheet = func(params ParamConfig, sheetIndex int) error { const xlsxSheetNum = 12 return verifyInternationalPrices(params, sheetIndex, xlsxSheetNum) }
pkg/parser/pricing/parse_international_prices.go
0.556641
0.592608
parse_international_prices.go
starcoder
package types import ( "encoding/json" "fmt" "math/big" "strings" "github.com/thetatoken/theta/common" ) var ( Zero *big.Int Hundred *big.Int ) func init() { Zero = big.NewInt(0) Hundred = big.NewInt(100) } type Coins struct { ThetaWei *big.Int TFuelWei *big.Int } type CoinsJSON struct { ThetaWei *common.JSONBig `json:"thetawei"` TFuelWei *common.JSONBig `json:"tfuelwei"` } func NewCoinsJSON(coin Coins) CoinsJSON { return CoinsJSON{ ThetaWei: (*common.JSONBig)(coin.ThetaWei), TFuelWei: (*common.JSONBig)(coin.TFuelWei), } } func (c CoinsJSON) Coins() Coins { return Coins{ ThetaWei: (*big.Int)(c.ThetaWei), TFuelWei: (*big.Int)(c.TFuelWei), } } func (c Coins) MarshalJSON() ([]byte, error) { return json.Marshal(NewCoinsJSON(c)) } func (c *Coins) UnmarshalJSON(data []byte) error { var a CoinsJSON if err := json.Unmarshal(data, &a); err != nil { return err } *c = a.Coins() return nil } // NewCoins is a convenient method for creating small amount of coins. func NewCoins(theta int64, tfuel int64) Coins { return Coins{ ThetaWei: big.NewInt(theta), TFuelWei: big.NewInt(tfuel), } } func (coins Coins) String() string { return fmt.Sprintf("%v %v, %v %v", coins.ThetaWei, DenomThetaWei, coins.TFuelWei, DenomTFuelWei) } func (coins Coins) IsValid() bool { return coins.IsNonnegative() } func (coins Coins) NoNil() Coins { theta := coins.ThetaWei if theta == nil { theta = big.NewInt(0) } tfuel := coins.TFuelWei if tfuel == nil { tfuel = big.NewInt(0) } return Coins{ ThetaWei: theta, TFuelWei: tfuel, } } // CalculatePercentage function calculates amount of coins for the given the percentage func (coins Coins) CalculatePercentage(percentage uint) Coins { c := coins.NoNil() p := big.NewInt(int64(percentage)) theta := new(big.Int) theta.Mul(c.ThetaWei, p) theta.Div(theta, Hundred) tfuel := new(big.Int) tfuel.Mul(c.TFuelWei, p) tfuel.Div(tfuel, Hundred) return Coins{ ThetaWei: theta, TFuelWei: tfuel, } } // Currently appends an empty coin ... func (coinsA Coins) Plus(coinsB Coins) Coins { cA := coinsA.NoNil() cB := coinsB.NoNil() theta := new(big.Int) theta.Add(cA.ThetaWei, cB.ThetaWei) tfuel := new(big.Int) tfuel.Add(cA.TFuelWei, cB.TFuelWei) return Coins{ ThetaWei: theta, TFuelWei: tfuel, } } func (coins Coins) Negative() Coins { c := coins.NoNil() theta := new(big.Int) theta.Neg(c.ThetaWei) tfuel := new(big.Int) tfuel.Neg(c.TFuelWei) return Coins{ ThetaWei: theta, TFuelWei: tfuel, } } func (coinsA Coins) Minus(coinsB Coins) Coins { return coinsA.Plus(coinsB.Negative()) } func (coinsA Coins) IsGTE(coinsB Coins) bool { diff := coinsA.Minus(coinsB) return diff.IsNonnegative() } func (coins Coins) IsZero() bool { c := coins.NoNil() return c.ThetaWei.Cmp(Zero) == 0 && c.TFuelWei.Cmp(Zero) == 0 } func (coinsA Coins) IsEqual(coinsB Coins) bool { cA := coinsA.NoNil() cB := coinsB.NoNil() return cA.ThetaWei.Cmp(cB.ThetaWei) == 0 && cA.TFuelWei.Cmp(cB.TFuelWei) == 0 } func (coins Coins) IsPositive() bool { c := coins.NoNil() return (c.ThetaWei.Cmp(Zero) > 0 && c.TFuelWei.Cmp(Zero) >= 0) || (c.ThetaWei.Cmp(Zero) >= 0 && c.TFuelWei.Cmp(Zero) > 0) } func (coins Coins) IsNonnegative() bool { c := coins.NoNil() return c.ThetaWei.Cmp(Zero) >= 0 && c.TFuelWei.Cmp(Zero) >= 0 } // ParseCoinAmount parses a string representation of coin amount. func ParseCoinAmount(in string) (*big.Int, bool) { inWei := false if len(in) > 3 && strings.EqualFold("wei", in[len(in)-3:]) { inWei = true in = in[:len(in)-3] } f, ok := new(big.Float).SetPrec(1024).SetString(in) if !ok || f.Sign() < 0 { return nil, false } if !inWei { f = f.Mul(f, new(big.Float).SetPrec(1024).SetUint64(1e18)) } ret, _ := f.Int(nil) return ret, true }
VM/ledger/types/coin.go
0.693992
0.485661
coin.go
starcoder
package models import ( i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time" i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) // SynchronizationTaskExecution type SynchronizationTaskExecution struct { // Identifier of the job run. activityIdentifier *string // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. additionalData map[string]interface{} // Count of processed entries that were assigned for this application. countEntitled *int64 // Count of processed entries that were assigned for provisioning. countEntitledForProvisioning *int64 // Count of entries that were escrowed (errors). countEscrowed *int64 // Count of entries that were escrowed, including system-generated escrows. countEscrowedRaw *int64 // Count of exported entries. countExported *int64 // Count of entries that were expected to be exported. countExports *int64 // Count of imported entries. countImported *int64 // Count of imported delta-changes. countImportedDeltas *int64 // Count of imported delta-changes pertaining to reference changes. countImportedReferenceDeltas *int64 // If an error was encountered, contains a synchronizationError object with details. error SynchronizationErrorable // Code summarizing the result of this run. Possible values are: Succeeded, Failed, EntryLevelErrors. state *SynchronizationTaskExecutionResult // Time when this job run began. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. timeBegan *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time // Time when this job run ended. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. timeEnded *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time } // NewSynchronizationTaskExecution instantiates a new synchronizationTaskExecution and sets the default values. func NewSynchronizationTaskExecution()(*SynchronizationTaskExecution) { m := &SynchronizationTaskExecution{ } m.SetAdditionalData(make(map[string]interface{})); return m } // CreateSynchronizationTaskExecutionFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreateSynchronizationTaskExecutionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { return NewSynchronizationTaskExecution(), nil } // GetActivityIdentifier gets the activityIdentifier property value. Identifier of the job run. func (m *SynchronizationTaskExecution) GetActivityIdentifier()(*string) { if m == nil { return nil } else { return m.activityIdentifier } } // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *SynchronizationTaskExecution) GetAdditionalData()(map[string]interface{}) { if m == nil { return nil } else { return m.additionalData } } // GetCountEntitled gets the countEntitled property value. Count of processed entries that were assigned for this application. func (m *SynchronizationTaskExecution) GetCountEntitled()(*int64) { if m == nil { return nil } else { return m.countEntitled } } // GetCountEntitledForProvisioning gets the countEntitledForProvisioning property value. Count of processed entries that were assigned for provisioning. func (m *SynchronizationTaskExecution) GetCountEntitledForProvisioning()(*int64) { if m == nil { return nil } else { return m.countEntitledForProvisioning } } // GetCountEscrowed gets the countEscrowed property value. Count of entries that were escrowed (errors). func (m *SynchronizationTaskExecution) GetCountEscrowed()(*int64) { if m == nil { return nil } else { return m.countEscrowed } } // GetCountEscrowedRaw gets the countEscrowedRaw property value. Count of entries that were escrowed, including system-generated escrows. func (m *SynchronizationTaskExecution) GetCountEscrowedRaw()(*int64) { if m == nil { return nil } else { return m.countEscrowedRaw } } // GetCountExported gets the countExported property value. Count of exported entries. func (m *SynchronizationTaskExecution) GetCountExported()(*int64) { if m == nil { return nil } else { return m.countExported } } // GetCountExports gets the countExports property value. Count of entries that were expected to be exported. func (m *SynchronizationTaskExecution) GetCountExports()(*int64) { if m == nil { return nil } else { return m.countExports } } // GetCountImported gets the countImported property value. Count of imported entries. func (m *SynchronizationTaskExecution) GetCountImported()(*int64) { if m == nil { return nil } else { return m.countImported } } // GetCountImportedDeltas gets the countImportedDeltas property value. Count of imported delta-changes. func (m *SynchronizationTaskExecution) GetCountImportedDeltas()(*int64) { if m == nil { return nil } else { return m.countImportedDeltas } } // GetCountImportedReferenceDeltas gets the countImportedReferenceDeltas property value. Count of imported delta-changes pertaining to reference changes. func (m *SynchronizationTaskExecution) GetCountImportedReferenceDeltas()(*int64) { if m == nil { return nil } else { return m.countImportedReferenceDeltas } } // GetError gets the error property value. If an error was encountered, contains a synchronizationError object with details. func (m *SynchronizationTaskExecution) GetError()(SynchronizationErrorable) { if m == nil { return nil } else { return m.error } } // GetFieldDeserializers the deserialization information for the current model func (m *SynchronizationTaskExecution) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) res["activityIdentifier"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetActivityIdentifier(val) } return nil } res["countEntitled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountEntitled(val) } return nil } res["countEntitledForProvisioning"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountEntitledForProvisioning(val) } return nil } res["countEscrowed"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountEscrowed(val) } return nil } res["countEscrowedRaw"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountEscrowedRaw(val) } return nil } res["countExported"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountExported(val) } return nil } res["countExports"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountExports(val) } return nil } res["countImported"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountImported(val) } return nil } res["countImportedDeltas"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountImportedDeltas(val) } return nil } res["countImportedReferenceDeltas"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt64Value() if err != nil { return err } if val != nil { m.SetCountImportedReferenceDeltas(val) } return nil } res["error"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetObjectValue(CreateSynchronizationErrorFromDiscriminatorValue) if err != nil { return err } if val != nil { m.SetError(val.(SynchronizationErrorable)) } return nil } res["state"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetEnumValue(ParseSynchronizationTaskExecutionResult) if err != nil { return err } if val != nil { m.SetState(val.(*SynchronizationTaskExecutionResult)) } return nil } res["timeBegan"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetTimeValue() if err != nil { return err } if val != nil { m.SetTimeBegan(val) } return nil } res["timeEnded"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetTimeValue() if err != nil { return err } if val != nil { m.SetTimeEnded(val) } return nil } return res } // GetState gets the state property value. Code summarizing the result of this run. Possible values are: Succeeded, Failed, EntryLevelErrors. func (m *SynchronizationTaskExecution) GetState()(*SynchronizationTaskExecutionResult) { if m == nil { return nil } else { return m.state } } // GetTimeBegan gets the timeBegan property value. Time when this job run began. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. func (m *SynchronizationTaskExecution) GetTimeBegan()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) { if m == nil { return nil } else { return m.timeBegan } } // GetTimeEnded gets the timeEnded property value. Time when this job run ended. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. func (m *SynchronizationTaskExecution) GetTimeEnded()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) { if m == nil { return nil } else { return m.timeEnded } } // Serialize serializes information the current object func (m *SynchronizationTaskExecution) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { { err := writer.WriteStringValue("activityIdentifier", m.GetActivityIdentifier()) if err != nil { return err } } { err := writer.WriteInt64Value("countEntitled", m.GetCountEntitled()) if err != nil { return err } } { err := writer.WriteInt64Value("countEntitledForProvisioning", m.GetCountEntitledForProvisioning()) if err != nil { return err } } { err := writer.WriteInt64Value("countEscrowed", m.GetCountEscrowed()) if err != nil { return err } } { err := writer.WriteInt64Value("countEscrowedRaw", m.GetCountEscrowedRaw()) if err != nil { return err } } { err := writer.WriteInt64Value("countExported", m.GetCountExported()) if err != nil { return err } } { err := writer.WriteInt64Value("countExports", m.GetCountExports()) if err != nil { return err } } { err := writer.WriteInt64Value("countImported", m.GetCountImported()) if err != nil { return err } } { err := writer.WriteInt64Value("countImportedDeltas", m.GetCountImportedDeltas()) if err != nil { return err } } { err := writer.WriteInt64Value("countImportedReferenceDeltas", m.GetCountImportedReferenceDeltas()) if err != nil { return err } } { err := writer.WriteObjectValue("error", m.GetError()) if err != nil { return err } } if m.GetState() != nil { cast := (*m.GetState()).String() err := writer.WriteStringValue("state", &cast) if err != nil { return err } } { err := writer.WriteTimeValue("timeBegan", m.GetTimeBegan()) if err != nil { return err } } { err := writer.WriteTimeValue("timeEnded", m.GetTimeEnded()) if err != nil { return err } } { err := writer.WriteAdditionalData(m.GetAdditionalData()) if err != nil { return err } } return nil } // SetActivityIdentifier sets the activityIdentifier property value. Identifier of the job run. func (m *SynchronizationTaskExecution) SetActivityIdentifier(value *string)() { if m != nil { m.activityIdentifier = value } } // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *SynchronizationTaskExecution) SetAdditionalData(value map[string]interface{})() { if m != nil { m.additionalData = value } } // SetCountEntitled sets the countEntitled property value. Count of processed entries that were assigned for this application. func (m *SynchronizationTaskExecution) SetCountEntitled(value *int64)() { if m != nil { m.countEntitled = value } } // SetCountEntitledForProvisioning sets the countEntitledForProvisioning property value. Count of processed entries that were assigned for provisioning. func (m *SynchronizationTaskExecution) SetCountEntitledForProvisioning(value *int64)() { if m != nil { m.countEntitledForProvisioning = value } } // SetCountEscrowed sets the countEscrowed property value. Count of entries that were escrowed (errors). func (m *SynchronizationTaskExecution) SetCountEscrowed(value *int64)() { if m != nil { m.countEscrowed = value } } // SetCountEscrowedRaw sets the countEscrowedRaw property value. Count of entries that were escrowed, including system-generated escrows. func (m *SynchronizationTaskExecution) SetCountEscrowedRaw(value *int64)() { if m != nil { m.countEscrowedRaw = value } } // SetCountExported sets the countExported property value. Count of exported entries. func (m *SynchronizationTaskExecution) SetCountExported(value *int64)() { if m != nil { m.countExported = value } } // SetCountExports sets the countExports property value. Count of entries that were expected to be exported. func (m *SynchronizationTaskExecution) SetCountExports(value *int64)() { if m != nil { m.countExports = value } } // SetCountImported sets the countImported property value. Count of imported entries. func (m *SynchronizationTaskExecution) SetCountImported(value *int64)() { if m != nil { m.countImported = value } } // SetCountImportedDeltas sets the countImportedDeltas property value. Count of imported delta-changes. func (m *SynchronizationTaskExecution) SetCountImportedDeltas(value *int64)() { if m != nil { m.countImportedDeltas = value } } // SetCountImportedReferenceDeltas sets the countImportedReferenceDeltas property value. Count of imported delta-changes pertaining to reference changes. func (m *SynchronizationTaskExecution) SetCountImportedReferenceDeltas(value *int64)() { if m != nil { m.countImportedReferenceDeltas = value } } // SetError sets the error property value. If an error was encountered, contains a synchronizationError object with details. func (m *SynchronizationTaskExecution) SetError(value SynchronizationErrorable)() { if m != nil { m.error = value } } // SetState sets the state property value. Code summarizing the result of this run. Possible values are: Succeeded, Failed, EntryLevelErrors. func (m *SynchronizationTaskExecution) SetState(value *SynchronizationTaskExecutionResult)() { if m != nil { m.state = value } } // SetTimeBegan sets the timeBegan property value. Time when this job run began. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. func (m *SynchronizationTaskExecution) SetTimeBegan(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() { if m != nil { m.timeBegan = value } } // SetTimeEnded sets the timeEnded property value. Time when this job run ended. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. func (m *SynchronizationTaskExecution) SetTimeEnded(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() { if m != nil { m.timeEnded = value } }
models/synchronization_task_execution.go
0.701713
0.424114
synchronization_task_execution.go
starcoder
package main import ( "fmt" "strings" "github.com/theatlasroom/advent-of-code/go/utils" ) /** --- Day 6: Custom Customs --- As your flight approaches the regional airport where you'll switch to a much larger plane, customs declaration forms are distributed to the passengers. The form asks a series of 26 yes-or-no questions marked a through z. All you need to do is identify the questions for which anyone in your group answers "yes". Since your group is just you, this doesn't take very long. However, the person sitting next to you seems to be experiencing a language barrier and asks if you can help. For each of the people in their group, you write down the questions for which they answer "yes", one per line. For example: abcx abcy abcz In this group, there are 6 questions to which anyone answered "yes": a, b, c, x, y, and z. (Duplicate answers to the same question don't count extra; each question counts at most once.) Another group asks for your help, then another, and eventually you've collected answers from every group on the plane (your puzzle input). Each group's answers are separated by a blank line, and within each group, each person's answers are on a single line. For example: abc a b c ab ac a a a a b This list represents answers from five groups: The first group contains one person who answered "yes" to 3 questions: a, b, and c. The second group contains three people; combined, they answered "yes" to 3 questions: a, b, and c. The third group contains two people; combined, they answered "yes" to 3 questions: a, b, and c. The fourth group contains four people; combined, they answered "yes" to only 1 question, a. The last group contains one person who answered "yes" to only 1 question, b. In this example, the sum of these counts is 3 + 3 + 3 + 1 + 1 = 11. For each group, count the number of questions to which anyone answered "yes". What is the sum of those counts? */ type answers map[string]bool type group struct { responses string answers completeAnswers int } type customsGroups []group func (g group) countAnswers() int { return len(g.answers) } func (cg customsGroups) sumAnswers() int { count := 0 for _, g := range cg { count += g.countAnswers() } return count } func (cg customsGroups) sumCompleteAnswers() int { count := 0 for _, g := range cg { count += g.completeAnswers } return count } func parseGroupResponses(str string) (answers, int) { participants := len(strings.Split(str, " ")) completed := 0 a := make(answers) unique := make(map[string]int) for _, x := range str { char := string(x) if char != " " { a[char] = true unique[char]++ } } for _, y := range unique { if y >= participants { completed++ } } return a, completed } func parseData(data string) customsGroups { var cg customsGroups groupAnswers := strings.Split(strings.Replace(data, "\n", " ", -1), " ") for _, responses := range groupAnswers { answers, completeAnswers := parseGroupResponses(responses) cg = append(cg, group{responses, answers, completeAnswers}) } return cg } func main() { utils.Banner(utils.BannerConfig{Year: 2020, Day: 6}) data := utils.LoadDataAsString("6.txt") groups := parseData(data) fmt.Println(groups.sumAnswers()) fmt.Println(groups.sumCompleteAnswers()) }
go/2020/6.go
0.665628
0.529263
6.go
starcoder
package configs import ( "errors" "fmt" "math/big" "os" "reflect" "strconv" "strings" ) // MustLoadWithPrefix loads the environment variables into a struct. // It panics if any of the environment variables' values can't be // coerced into the type defined on the struct. func MustLoadWithPrefix(container interface{}, prefix string) { err := LoadWithPrefix(container, prefix) if err != nil { panic(err) } } // LoadWithPrefix loads the values of environment variables into a struct. // It returns an error if any of the environment variable values don't match // the type defined on the struct. func LoadWithPrefix(container interface{}, prefix string) error { return visit(container, loader(prefix)) } // loader returns a visitor which populates the struct's properties with // environment variables. func loader(prefix string) visitor { return visitor(func(environment string, value reflect.Value) *visitError { environment = prefix + environment environmentValue, isSet := os.LookupEnv(environment) if !isSet { return nil } switch value.Kind() { case reflect.Bool: return parseAndSetBool(environment, value, environmentValue) case reflect.Int: return parseAndSetInt(environment, value, environmentValue) case reflect.Uint64: return parseAndSetUInt(environment, value, environmentValue, 64) case reflect.Uint32: return parseAndSetUInt(environment, value, environmentValue, 32) case reflect.Uint16: return parseAndSetUInt(environment, value, environmentValue, 16) case reflect.Uint8: return parseAndSetUInt(environment, value, environmentValue, 8) case reflect.String: value.SetString(environmentValue) return nil case reflect.Slice: switch value.Type().Elem().Kind() { case reflect.String: value.Set(reflect.ValueOf(parseCommaSeparatedStrings(environmentValue))) return nil case reflect.Int: return parseAndSetIntSlice(environment, value, environmentValue) default: panic(fmt.Sprintf("loadEnvironmentVisitor() is not yet implement for slices of type %v", value.Type().Elem().Kind())) } case reflect.Struct: switch value.Type().String() { case "big.Int": return parseAndSetBigInt(environment, value, environmentValue) default: panic("loadEnvironmentVisitor() hasn't yet implemented parsing for type " + value.Type().String()) } case reflect.Ptr: switch value.Type().String() { case "*big.Int": return parseAndSetBigIntPointer(environment, value, environmentValue) default: panic("loadEnvironmentVisitor() hasn't yet implemented parsing for type " + value.Type().String()) } default: panic("loadEnvironmentVisitor() hasn't yet implemented parsing for type " + value.String()) } }) } func parseAndSetBool(env string, toSet reflect.Value, value string) *visitError { switch value { case "true": toSet.SetBool(true) case "false": toSet.SetBool(false) default: return &visitError{ error: errors.New(`must be "true" or "false"`), Key: env, } } return nil } func parseAndSetInt(env string, toSet reflect.Value, value string) *visitError { parsed, err := parseInt(value) if err != nil { return &visitError{ error: errors.New("must be an int"), Key: env, } } toSet.SetInt(parsed) return nil } func parseAndSetUInt(env string, toSet reflect.Value, value string, bitSize int) *visitError { parsed, err := strconv.ParseUint(value, 10, bitSize) if casted, ok := err.(*strconv.NumError); ok && casted != nil { if casted.Err == strconv.ErrRange { return &visitError{ error: fmt.Errorf("has a max value of %d", parsed), Key: env, } } if _, err := strconv.ParseInt(value, 10, 64); err == nil { return &visitError{ error: errors.New("has a min value of 0"), Key: env, } } return &visitError{ error: errors.New("must be a uint" + strconv.FormatInt(int64(bitSize), 10)), Key: env, } } toSet.SetUint(parsed) return nil } func parseInt(value string) (int64, error) { return strconv.ParseInt(value, 10, 64) } func parseAndSetBigInt(env string, toSet reflect.Value, value string) *visitError { parsed, ok := parseBigInt(value) if !ok { return &visitError{ error: errors.New("must be a base-10 big.Int"), Key: env, } } toSet.Set(reflect.ValueOf(parsed)) return nil } func parseAndSetBigIntPointer(env string, toSet reflect.Value, value string) *visitError { parsed, ok := parseBigInt(value) if !ok { return &visitError{ error: errors.New("must be a base-10 big.Int"), Key: env, } } toSet.Set(reflect.ValueOf(&parsed)) return nil } func parseBigInt(value string) (big.Int, bool) { parsed := big.Int{} _, ok := parsed.SetString(value, 10) return parsed, ok } func parseCommaSeparatedStrings(value string) []string { if value == "" { return nil } return strings.Split(value, ",") } func parseAndSetIntSlice(env string, toSet reflect.Value, value string) *visitError { parsed, err := parseCommaSeparatedInts(value) if err != nil { return &visitError{ error: err, Key: env, } } toSet.Set(reflect.ValueOf(parsed)) return nil } func parseCommaSeparatedInts(value string) ([]int, error) { if value == "" { return nil, nil } stringSlice := strings.Split(value, ",") intSlice := make([]int, len(stringSlice)) for i := 0; i < len(stringSlice); i++ { parsed, err := strconv.Atoi(stringSlice[i]) if err != nil { return nil, fmt.Errorf(`must be a comma-separated list of ints: index %d is invalid`, i) } intSlice[i] = parsed } return intSlice, nil }
load.go
0.605916
0.520984
load.go
starcoder
package main import ( "math/rand" . "github.com/jakecoffman/cp" "github.com/jakecoffman/cp/examples" ) const ( bevel = 1 ) func randUnitCircle() Vector { v := Vector{rand.Float64()*2.0 - 1.0, rand.Float64()*2.0 - 1.0} if v.LengthSq() < 1.0 { return v } return randUnitCircle() } var simpleTerrainVerts = []Vector{ {350.00, 425.07}, {336.00, 436.55}, {272.00, 435.39}, {258.00, 427.63}, {225.28, 420.00}, {202.82, 396.00}, {191.81, 388.00}, {189.00, 381.89}, {173.00, 380.39}, {162.59, 368.00}, {150.47, 319.00}, {128.00, 311.55}, {119.14, 286.00}, {126.84, 263.00}, {120.56, 227.00}, {141.14, 178.00}, {137.52, 162.00}, {146.51, 142.00}, {156.23, 136.00}, {158.00, 118.27}, {170.00, 100.77}, {208.43, 84.00}, {224.00, 69.65}, {249.30, 68.00}, {257.00, 54.77}, {363.00, 45.94}, {374.15, 54.00}, {386.00, 69.60}, {413.00, 70.73}, {456.00, 84.89}, {468.09, 99.00}, {467.09, 123.00}, {464.92, 135.00}, {469.00, 141.03}, {497.00, 148.67}, {513.85, 180.00}, {509.56, 223.00}, {523.51, 247.00}, {523.00, 277.00}, {497.79, 311.00}, {478.67, 348.00}, {467.90, 360.00}, {456.76, 382.00}, {432.95, 389.00}, {417.00, 411.32}, {373.00, 433.19}, {361.00, 430.02}, {350.00, 425.07}, } func addCircle(space *Space, index int, radius float64) { mass := radius * radius / 25.0 body := space.AddBody(NewBody(mass, MomentForCircle(mass, 0, radius, Vector{}))) body.SetPosition(randUnitCircle().Mult(180)) shape := space.AddShape(NewCircle(body, radius, Vector{})) shape.SetElasticity(0) shape.SetFriction(0.9) } func update(space *Space, dt float64) { space.Step(dt) } func simpleTerrain() *Space { space := NewSpace() space.Iterations = 10 space.SetGravity(Vector{0, -100}) space.SetCollisionSlop(0.5) offset := Vector{-320, -240} for i := 0; i < len(simpleTerrainVerts)-1; i++ { a := simpleTerrainVerts[i] b := simpleTerrainVerts[i+1] space.AddShape(NewSegment(space.StaticBody, a.Add(offset), b.Add(offset), 0)) } return space } func simpleTerrainCircles_1000() *Space { space := simpleTerrain() for i := 0; i < 1000; i++ { addCircle(space, i, 5) } return space } func main() { examples.Main(simpleTerrainCircles_1000(), 1.0/60.0, update, examples.DefaultDraw) }
examples/bench/bench.go
0.633864
0.579817
bench.go
starcoder
package synthesizer // EqualTemperedNote type EqualTemperedNote float64 // These constants represent the frequncies for the equal-tempered scale tuned to A4 = 440Hz const ( C0 EqualTemperedNote = 16.35 C0S EqualTemperedNote = 17.32 D0 EqualTemperedNote = 18.35 D0S EqualTemperedNote = 19.45 E0 EqualTemperedNote = 20.60 F0 EqualTemperedNote = 21.83 F0S EqualTemperedNote = 23.12 G0 EqualTemperedNote = 24.50 G0S EqualTemperedNote = 25.96 A0 EqualTemperedNote = 27.50 A0S EqualTemperedNote = 29.14 B0 EqualTemperedNote = 30.87 C1 EqualTemperedNote = 32.70 C1S EqualTemperedNote = 34.65 D1 EqualTemperedNote = 36.71 D1S EqualTemperedNote = 38.89 E1 EqualTemperedNote = 41.20 F1 EqualTemperedNote = 43.65 F1S EqualTemperedNote = 46.25 G1 EqualTemperedNote = 49.00 G1S EqualTemperedNote = 51.91 A1 EqualTemperedNote = 55.00 A1S EqualTemperedNote = 58.27 B1 EqualTemperedNote = 61.74 C2 EqualTemperedNote = 65.41 C2S EqualTemperedNote = 69.30 D2 EqualTemperedNote = 73.42 D2S EqualTemperedNote = 77.78 E2 EqualTemperedNote = 82.41 F2 EqualTemperedNote = 87.31 F2S EqualTemperedNote = 92.50 G2 EqualTemperedNote = 98.00 G2S EqualTemperedNote = 103.83 A2 EqualTemperedNote = 110.00 A2S EqualTemperedNote = 116.54 B2 EqualTemperedNote = 123.47 C3 EqualTemperedNote = 130.81 C3S EqualTemperedNote = 138.59 D3 EqualTemperedNote = 146.83 D3S EqualTemperedNote = 155.56 E3 EqualTemperedNote = 164.81 F3 EqualTemperedNote = 174.61 F3S EqualTemperedNote = 185.00 G3 EqualTemperedNote = 196.00 G3S EqualTemperedNote = 207.65 A3 EqualTemperedNote = 220.00 A3S EqualTemperedNote = 233.08 B3 EqualTemperedNote = 246.94 C4 EqualTemperedNote = 261.63 C4S EqualTemperedNote = 277.18 D4 EqualTemperedNote = 293.66 D4S EqualTemperedNote = 311.13 E4 EqualTemperedNote = 329.63 F4 EqualTemperedNote = 349.23 F4S EqualTemperedNote = 369.99 G4 EqualTemperedNote = 392.00 G4S EqualTemperedNote = 415.30 A4 EqualTemperedNote = 440.00 A4S EqualTemperedNote = 466.16 B4 EqualTemperedNote = 493.88 C5 EqualTemperedNote = 523.25 C5S EqualTemperedNote = 554.37 D5 EqualTemperedNote = 587.33 D5S EqualTemperedNote = 622.25 E5 EqualTemperedNote = 659.25 F5 EqualTemperedNote = 698.46 F5S EqualTemperedNote = 739.99 G5 EqualTemperedNote = 783.99 G5S EqualTemperedNote = 830.61 A5 EqualTemperedNote = 880.00 A5S EqualTemperedNote = 932.33 B5 EqualTemperedNote = 987.77 C6 EqualTemperedNote = 1046.50 C6S EqualTemperedNote = 1108.73 D6 EqualTemperedNote = 1174.66 D6S EqualTemperedNote = 1244.51 E6 EqualTemperedNote = 1318.51 F6 EqualTemperedNote = 1396.91 F6S EqualTemperedNote = 1479.98 G6 EqualTemperedNote = 1567.98 G6S EqualTemperedNote = 1661.22 A6 EqualTemperedNote = 1760.00 A6S EqualTemperedNote = 1864.66 B6 EqualTemperedNote = 1975.53 C7 EqualTemperedNote = 2093.00 C7S EqualTemperedNote = 2217.46 D7 EqualTemperedNote = 2349.32 D7S EqualTemperedNote = 2489.02 E7 EqualTemperedNote = 2636.02 F7 EqualTemperedNote = 2793.83 F7S EqualTemperedNote = 2959.96 G7 EqualTemperedNote = 3135.96 G7S EqualTemperedNote = 3322.44 A7 EqualTemperedNote = 3520.00 A7S EqualTemperedNote = 3729.31 B7 EqualTemperedNote = 3951.07 C8 EqualTemperedNote = 4186.01 C8S EqualTemperedNote = 4434.92 D8 EqualTemperedNote = 4698.63 D8S EqualTemperedNote = 4978.03 E8 EqualTemperedNote = 5274.04 F8 EqualTemperedNote = 5587.65 F8S EqualTemperedNote = 5919.91 G8 EqualTemperedNote = 6271.93 G8S EqualTemperedNote = 6644.88 A8 EqualTemperedNote = 7040.00 A8S EqualTemperedNote = 7458.62 B8 EqualTemperedNote = 7902.13 )
synthesizer/constants.go
0.665302
0.832169
constants.go
starcoder
package task import ( "github.com/twinj/uuid" "time" ) /* Task represents a task or a subtask. Tasks can be stand-alone todo items or they can be broken down into subtasks, which can then have their own subtasks, and so on. Subtasks have parents which they can be grouped into. Tasks can have multiple parents to cover the possibility of a single task accomplishing two parent tasks, for example "clean room" can be a subtask for "clean house" as well as "prepare for parents' visit". A task with no parents is called a "root" task. */ type Task struct { ID string `json:"id"` Name string `json:"name"` Complete bool `json:complete` CreatedDate int64 `json:"createdDate"` ModifiedDate int64 `json:"modifiedDate"` DueDate int64 `json:"dueDate"` Categories []string `json:"categories"` Parents []*Task `json:"-"` Subtasks []*Task `json:"subtasks"` } /* NewTask creates a new task with the assigned parents. The new task is in an incomplete state, with no subtasks of it's own. Passing in nil or an empty array as the argument for parent means it has no parent(s) and so it is a root task. */ func NewTask(name string, parents []*Task) *Task { if parents == nil { parents = make([]*Task, 0) } now := time.Now().Unix() newTask := &Task{ ID: uuid.NewV4().String(), Name: name, Complete: false, CreatedDate: now, ModifiedDate: now, DueDate: 0, Categories: make([]string, 0), Parents: parents, Subtasks: make([]*Task, 0), } for _, parent := range parents { parent.AddSubtask(newTask) } return newTask } /* IsRootTask returns true if the task is a root task. A root task is one that has no parents. */ func (t Task) IsRootTask() bool { return len(t.Parents) == 0 } /* MarkAsComplete marks a task and all of it's subtasks as complete. It the task itself is a subtask of a parent task and the task was previously the only remaining task to be completed, the parent will be marked as complete as well. This works all the way up the chain. If this method is called on a task already marked as complete, nothing happens. */ func (t *Task) MarkAsComplete() { if t.Complete { return } t.Complete = true for _, subtask := range t.Subtasks { subtask.MarkAsComplete() } for _, parent := range t.Parents { if parent.allSubtasksAreComplete() { parent.MarkAsComplete() } } } /* MarkAsIncomplete marks a task, as well as all parents up it's chain, as incomplete. If the task is already incomplete, nothing happens. */ func (t *Task) MarkAsIncomplete() { if !t.Complete { return } t.Complete = false for _, parent := range t.Parents { parent.MarkAsIncomplete() } } /* SetComplete is a convenience method/shortcut for marking a task as complete or incomplete. It simply delegates the work to either MarkAsComplete() or MarkAsIncomplete() as appropriate. */ func (t *Task) SetComplete(complete bool) { if complete { t.MarkAsComplete() } else { t.MarkAsIncomplete() } } /* AddSubtask adds a subtask to a task. If the subtask is incomplete, the task will be marked as incomplete as well. If the provided task is already a listed subtask, nothing happens. */ func (t *Task) AddSubtask(subtask *Task) { if findTaskInSlice(t.Subtasks, subtask.ID) != -1 { return } if t.Complete && !subtask.Complete { t.MarkAsIncomplete() } t.Subtasks = append(t.Subtasks, subtask) } /* AddParent adds a parent to a task. If the task is incomplete, it marks the new parent as incomplete as well. If the parent is already included in the list of parents, nothing happens. */ func (t *Task) AddParent(parent *Task) { if findTaskInSlice(t.Parents, parent.ID) != -1 { return } if !t.Complete && parent.Complete { parent.MarkAsIncomplete() } t.Parents = append(t.Parents, parent) parent.AddSubtask(t) } /* Delete removes the current Node and all subtasks from the Task structure. Similar to MarkAsComplete() any parent tasks are then marked as complete if they have no remaining incomplete subtasks. */ func (t *Task) Delete() { t.MarkAsComplete() for _, subtask := range t.Subtasks { subtask.Delete() } for _, parent := range t.Parents { parent.Subtasks = deleteFromSliceByID(parent.Subtasks, t.ID) } } /* allSubtasksAreComplete returns true if all subtasks of the task have been marked as complete. Also returns true if there are no subtasks. */ func (t Task) allSubtasksAreComplete() bool { for _, subtask := range t.Subtasks { if !subtask.Complete { return false } } return true }
task/task.go
0.508788
0.421909
task.go
starcoder
package tmux import ( "bytes" "fmt" "io" "strings" "github.com/arl/gitstatus" ) const truncateSymbol string = "..." // Config is the configuration of the Git status tmux formatter. type Config struct { // Symbols contains the symbols printed before the Git status components. Symbols symbols // Styles contains the tmux style strings for symbols and Git status // components. Styles styles // Layout sets the output format of the Git status. Layout []string `yaml:",flow"` // Options contains additional configuration options. Options options } type symbols struct { Branch string // Branch is the string shown before local branch name. HashPrefix string // HasPrefix is the string shown before a SHA1 ref. Ahead string // Ahead is the string shown before the ahead count for the local/upstream branch divergence. Behind string // Behind is the string shown before the behind count for the local/upstream branch divergence. Staged string // Staged is the string shown before the count of staged files. Conflict string // Conflict is the string shown before the count of files with conflicts. Modified string // Modified is the string shown before the count of modified files. Untracked string // Untracked is the string shown before the count of untracked files. Stashed string // Stashed is the string shown before the count of stash entries. Clean string // Clean is the string shown when the working tree is clean. } type styles struct { Clear string // Clear is the style string that clears all styles. State string // State is the style string printed before eventual special state. Branch string // Branch is the style string printed before the local branch. Remote string // Remote is the style string printed before the upstream branch. Staged string // Staged is the style string printed before the staged files count. Conflict string // Conflict is the style string printed before the conflict count. Modified string // Modified is the style string printed before the modified files count. Untracked string // Untracked is the style string printed before the untracked files count. Stashed string // Stashed is the style string printed before the stash entries count. Clean string // Clean is the style string printed before the clean symbols. Divergence string // Divergence is the style string printed before divergence count/symbols. } type options struct { // BranchMaxLen is the maximum displayed length for local and remote branch names. BranchMaxLen int `yaml:"branch_max_len"` } // DefaultCfg is the default tmux configuration. var DefaultCfg = Config{ Symbols: symbols{ Branch: "⎇ ", Staged: "● ", Conflict: "✖ ", Modified: "✚ ", Untracked: "… ", Stashed: "⚑ ", Clean: "✔", Ahead: "↑·", Behind: "↓·", HashPrefix: ":", }, Styles: styles{ Clear: "#[fg=default]", State: "#[fg=red,bold]", Branch: "#[fg=white,bold]", Remote: "#[fg=cyan]", Divergence: "#[fg=default]", Staged: "#[fg=green,bold]", Conflict: "#[fg=red,bold]", Modified: "#[fg=red,bold]", Untracked: "#[fg=magenta,bold]", Stashed: "#[fg=cyan,bold]", Clean: "#[fg=green,bold]", }, Layout: []string{"branch", "..", "remote-branch", "divergence", " - ", "flags"}, Options: options{ BranchMaxLen: 0, }, } // A Formater formats git status to a tmux style string. type Formater struct { Config b bytes.Buffer st *gitstatus.Status } // Truncates branch name if longer than maxlen. If isremote, the leading // "<remote>/" is ignored when counting length. func truncateBranchName(name string, maxlen int, isremote bool) string { remoteName := "" branchName := name const ( idxRemote = 0 idxBranch = 1 numItems = 2 ) if isremote { a := strings.SplitAfterN(name, "/", numItems) if len(a) == numItems { remoteName = a[idxRemote] branchName = a[idxBranch] } } // To count length of characters and extract substring from UTF-8 strings. branchNameRune := []rune(branchName) truncateSymbolRune := []rune(truncateSymbol) if maxlen > 0 && maxlen < len(branchNameRune) { nameLen := maxlen - len(truncateSymbolRune) if nameLen > 0 { branchName = string(branchNameRune[:nameLen]) + truncateSymbol } else { branchName = string(truncateSymbolRune[:maxlen]) } } return remoteName + branchName } // Format writes st as json into w. func (f *Formater) Format(w io.Writer, st *gitstatus.Status) error { f.st = st f.clear() // overall working tree state if f.st.IsInitial { fmt.Fprintf(w, "%s%s [no commits yet]", f.Styles.Branch, truncateBranchName(f.st.LocalBranch, f.Options.BranchMaxLen, false)) f.flags() _, err := f.b.WriteTo(w) return err } f.format() _, err := f.b.WriteTo(w) return err } func (f *Formater) format() { for _, item := range f.Layout { switch item { case "branch": f.specialState() case "remote": f.remote() case "remote-branch": f.remoteBranch() case "divergence": f.divergence() case "flags": f.flags() default: f.clear() f.b.WriteString(item) } } } func (f *Formater) specialState() { f.clear() switch f.st.State { case gitstatus.Rebasing: fmt.Fprintf(&f.b, "%s[rebase] ", f.Styles.State) case gitstatus.AM: fmt.Fprintf(&f.b, "%s[am] ", f.Styles.State) case gitstatus.AMRebase: fmt.Fprintf(&f.b, "%s[am-rebase] ", f.Styles.State) case gitstatus.Merging: fmt.Fprintf(&f.b, "%s[merge] ", f.Styles.State) case gitstatus.CherryPicking: fmt.Fprintf(&f.b, "%s[cherry-pick] ", f.Styles.State) case gitstatus.Reverting: fmt.Fprintf(&f.b, "%s[revert] ", f.Styles.State) case gitstatus.Bisecting: fmt.Fprintf(&f.b, "%s[bisect] ", f.Styles.State) case gitstatus.Default: fmt.Fprintf(&f.b, "%s%s", f.Styles.Branch, f.Symbols.Branch) } f.currentRef() } func (f *Formater) remote() { if f.st.RemoteBranch == "" { return } f.clear() fmt.Fprintf(&f.b, "%s%s", f.Styles.Remote, truncateBranchName(f.st.RemoteBranch, f.Options.BranchMaxLen, true)) f.divergence() } func (f *Formater) remoteBranch() { if f.st.RemoteBranch != "" { f.clear() fmt.Fprintf(&f.b, "%s%s", f.Styles.Remote, truncateBranchName(f.st.RemoteBranch, f.Options.BranchMaxLen, true)) } } func (f *Formater) divergence() { if f.st.BehindCount == 0 && f.st.AheadCount == 0 { return } f.clear() f.b.WriteByte(' ') fmt.Fprintf(&f.b, "%s", f.Styles.Divergence) if f.st.BehindCount != 0 { fmt.Fprintf(&f.b, "%s%d", f.Symbols.Behind, f.st.BehindCount) } if f.st.AheadCount != 0 { fmt.Fprintf(&f.b, "%s%d", f.Symbols.Ahead, f.st.AheadCount) } } func (f *Formater) clear() { // clear global style f.b.WriteString(f.Styles.Clear) } func (f *Formater) currentRef() { f.clear() if f.st.IsDetached { fmt.Fprintf(&f.b, "%s%s%s", f.Styles.Branch, f.Symbols.HashPrefix, f.st.HEAD) return } fmt.Fprintf(&f.b, "%s%s", f.Styles.Branch, truncateBranchName(f.st.LocalBranch, f.Options.BranchMaxLen, false)) } func (f *Formater) flags() { if f.st.IsClean { f.clear() fmt.Fprintf(&f.b, "%s%s", f.Styles.Clean, f.Symbols.Clean) return } var flags []string if f.st.NumStaged != 0 { flags = append(flags, fmt.Sprintf("%s%s%d", f.Styles.Staged, f.Symbols.Staged, f.st.NumStaged)) } if f.st.NumConflicts != 0 { flags = append(flags, fmt.Sprintf("%s%s%d", f.Styles.Conflict, f.Symbols.Conflict, f.st.NumConflicts)) } if f.st.NumModified != 0 { flags = append(flags, fmt.Sprintf("%s%s%d", f.Styles.Modified, f.Symbols.Modified, f.st.NumModified)) } if f.st.NumStashed != 0 { flags = append(flags, fmt.Sprintf("%s%s%d", f.Styles.Stashed, f.Symbols.Stashed, f.st.NumStashed)) } if f.st.NumUntracked != 0 { flags = append(flags, fmt.Sprintf("%s%s%d", f.Styles.Untracked, f.Symbols.Untracked, f.st.NumUntracked)) } if len(flags) > 0 { f.clear() f.b.WriteString(strings.Join(flags, " ")) } }
tmux/formater.go
0.608827
0.406509
formater.go
starcoder
package anvil import ( "errors" "fmt" "reflect" "strconv" "strings" ) type ( // mode of (non-)skipping empty values mode int // Anvil executor structure Anvil struct { //Mode behavior for skipping empty values Mode mode //Glue string to glue fields Glue string // modifier it's a list of functions used as a rule // to find out empty or not empty value of a field with given type and // type representation // exported type key as a key and list of functions to execute. modifier map[string]func(f reflect.Value) (interface{}, bool, error) // collection of []{key => value} items []Item deep int // reserved for a future features } // Item field with typed value as a result of notation Item struct { Key string Value interface{} } ) const ( // NoSkipEmpty fields with empty values NoSkipEmpty mode = iota // SkipEmpty fields with empty values SkipEmpty ) // RegisterModifierFunc - assign a modifier function // to extract value of given type as // result of callback function used (value, isEmpty, error) where // value is an interface{} value, isEmpty - valuable for // behaviour Mode, and error if error occurred, used to stop execution func (s *Anvil) RegisterModifierFunc(t interface{}, mod func(f reflect.Value) (interface{}, bool, error)) *Anvil { if s.modifier == nil { s.modifier = make(map[string]func(f reflect.Value) (interface{}, bool, error)) } s.modifier[reflect.TypeOf(t).String()] = mod return s } // Notation of go type as a list of []Item // where key is a string and value is a typed interface value func Notation(source interface{}, behaviour mode, glue string) ([]Item, error) { if source == nil { return nil, nil } s := &Anvil{ Glue: glue, Mode: behaviour, modifier: make(map[string]func(f reflect.Value) (interface{}, bool, error)), } return s.notation("", reflect.ValueOf(source), false) } // Notation of go type as a list of []Item // where key is a string and value is a typed interface value func (s *Anvil) Notation(sample interface{}) ([]Item, error) { if sample == nil { return nil, nil } return s.notation("", reflect.ValueOf(sample), false) } // notation structure nested func (s *Anvil) notation(key string, v reflect.Value, title bool) (items []Item, err error) { var ( value interface{} empty = true skip = s.Mode == SkipEmpty ) // get value by pointer if it is v = reflect.Indirect(v) // set default prefix for a field if len(key) < 1 { key = v.Type().Name() } switch v.Kind() { case reflect.Invalid: return nil, errors.New("anvil:invalid value of " + v.Type().Name()) case reflect.Array: if v.Len() < 1 { break } if value, empty, err = s.modify(v); err != nil { break } if !empty { break } for i := 0; i < v.Len(); i++ { n, err := s.notation(arrayPrefix(key, i), v.Index(i), true) if err != nil { return nil, err } if len(n) < 1 { continue } items = append(items, n...) } case reflect.Slice: if v.IsNil() { break } if value, empty, err = s.modify(v); err != nil { break } if v.Len() < 1 { break } for i := 0; i < v.Len(); i++ { if v.Index(i).CanAddr() { n, err := s.notation(arrayPrefix(key, i), reflect.Indirect(v.Index(i).Addr()), true) if err != nil { return nil, err } if len(n) < 1 { continue } items = append(items, n...) } } case reflect.Struct: if value, empty, err = s.modify(v); err != nil { return nil, err } if !empty { break } l := v.NumField() for i := 0; i < l; i++ { f := reflect.Indirect(v.Field(i)) // skip invalid field if f.Kind() == reflect.Invalid { continue } n, err := s.notation(s.key(key, v.Type().Field(i), false), f, true) if err != nil { return nil, err } if len(n) < 1 { continue } items = append(items, n...) } case reflect.Interface: if !v.Elem().IsValid() { break } n, err := s.notation(key, v.Elem(), true) if err != nil { return nil, err } if len(n) < 1 { break } items = n case reflect.Int: value, empty = int(v.Int()), v.Int() == 0 case reflect.Int8: value, empty = int8(v.Int()), v.Int() == 0 case reflect.Int16: value, empty = int16(v.Int()), v.Int() == 0 case reflect.Int32: value, empty = int32(v.Int()), v.Int() == 0 case reflect.Int64: value, empty = v.Int(), v.Int() == 0 case reflect.Float32: value, empty = float32(v.Float()), v.Float() == .0 case reflect.Float64: value, empty = v.Float(), v.Float() == 0 case reflect.Uint: value, empty = uint(v.Uint()), v.Uint() == 0 case reflect.Uint8: value, empty = uint8(v.Uint()), v.Uint() == 0 case reflect.Uint16: value, empty = uint16(v.Uint()), v.Uint() == 0 case reflect.Uint32: value, empty = uint32(v.Uint()), v.Uint() == 0 case reflect.Uint64: value, empty = v.Uint(), v.Uint() == 0 case reflect.Bool: value, empty = v.Bool(), !v.Bool() case reflect.String: value, empty = v.String(), len(v.String()) < 1 case reflect.Map: if v.IsNil() || v.Len() < 1 { break } keys := v.MapKeys() for i := range keys { n, err := s.notation(mapPrefix(key, keys[i]), v.MapIndex(keys[i]), true) if err != nil { return nil, err } if len(n) < 1 { continue } items = append(items, n...) } case reflect.Complex64: value = complex64(v.Complex()) empty = complex64(reflect.Zero(v.Type()).Complex()) == value case reflect.Complex128: value = v.Complex() empty = reflect.Zero(v.Type()).Complex() == value case reflect.Uintptr, reflect.Ptr, reflect.UnsafePointer: fallthrough default: return nil, errors.New("anvil:not implemented for " + v.Kind().String()) } if len(items) > 0 { return items, err } if empty && skip { return nil, err } return append(s.items, Item{Key: key, Value: value}), err } // modify - call modifier function if presented for a given type func (s *Anvil) modify(v reflect.Value) (interface{}, bool, error) { var err error defer func() { if r := recover(); r != nil { err = fmt.Errorf("anvil: %v on appendix call", r) } }() if fn, ok := s.modifier[v.Type().String()]; ok { return fn(v) } return nil, true, err } // key of field func (s *Anvil) key(pref string, v reflect.StructField, omit bool) string { if omit { return pref } var title string json, ok := v.Tag.Lookup("json") if !ok || len(json) < 1 { title = v.Name } else { tags := strings.Split(json, ",") if len(tags[0]) > 1 || tags[0] != "-" { title = tags[0] } } if len(title) < 1 { title = v.Name } return pref + s.Glue + title } // arrayPrefix - make a notation prefix for a slice/array fields func arrayPrefix(pref string, idx int) string { return pref + "[" + strconv.Itoa(idx) + "]" } // mapPrefix - make a notation prefix for a map fields func mapPrefix(pref string, idx reflect.Value) string { var val string switch idx.Kind() { case reflect.String: val = idx.String() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: val = strconv.FormatInt(idx.Int(), 10) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: val = strconv.FormatUint(idx.Uint(), 10) case reflect.Float32: val = strconv.FormatFloat(idx.Float(), 'f', -1, 32) case reflect.Float64: val = strconv.FormatFloat(idx.Float(), 'f', -1, 64) case reflect.Bool: val = strconv.FormatBool(idx.Bool()) } return pref + "[" + val + "]" }
anvil.go
0.520984
0.476519
anvil.go
starcoder
package model import ( "math" "strconv" "time" "github.com/prometheus/common/model" "github.com/timescale/promscale/pkg/prompb" ) type SamplesInfo struct { Labels *Labels SeriesID SeriesID Samples []prompb.Sample } // SeriesID represents a globally unique id for the series. This should be equivalent // to the PostgreSQL type in the series table (currently BIGINT). type SeriesID int64 func (s SeriesID) String() string { return strconv.FormatInt(int64(s), 10) } // SampleInfoIterator is an iterator over a collection of sampleInfos that returns // data in the format expected for the data table row. type SampleInfoIterator struct { SampleInfos []SamplesInfo SampleInfoIndex int SampleIndex int MinSeen int64 } // NewSampleInfoIterator is the constructor func NewSampleInfoIterator() SampleInfoIterator { si := SampleInfoIterator{SampleInfos: make([]SamplesInfo, 0)} si.ResetPosition() return si } //Append adds a sample info to the back of the iterator func (t *SampleInfoIterator) Append(s SamplesInfo) { t.SampleInfos = append(t.SampleInfos, s) } //ResetPosition resets the iteration position to the beginning func (t *SampleInfoIterator) ResetPosition() { t.SampleIndex = -1 t.SampleInfoIndex = 0 t.MinSeen = math.MaxInt64 } // Next returns true if there is another row and makes the next row data // available to Values(). When there are no more rows available or an error // has occurred it returns false. func (t *SampleInfoIterator) Next() bool { t.SampleIndex++ if t.SampleInfoIndex < len(t.SampleInfos) && t.SampleIndex >= len(t.SampleInfos[t.SampleInfoIndex].Samples) { t.SampleInfoIndex++ t.SampleIndex = 0 } return t.SampleInfoIndex < len(t.SampleInfos) } // Values returns the values for the current row func (t *SampleInfoIterator) Values() (time.Time, float64, SeriesID) { info := t.SampleInfos[t.SampleInfoIndex] sample := info.Samples[t.SampleIndex] if t.MinSeen > sample.Timestamp { t.MinSeen = sample.Timestamp } return model.Time(sample.Timestamp).Time(), sample.Value, info.SeriesID } // Err returns any error that has been encountered by the CopyFromSource. If // this is not nil *Conn.CopyFrom will abort the copy. func (t *SampleInfoIterator) Err() error { return nil }
pkg/pgmodel/model/samples.go
0.859693
0.44083
samples.go
starcoder
package bow import ( "fmt" "sort" ) // InnerJoin joins columns of two Bows on common columns and rows. // The Metadata of the two Bows are also joined by appending keys and values. func (b *bow) InnerJoin(other Bow) Bow { left := b right, ok := other.(*bow) if !ok { panic("bow.InnerJoin: non bow object passed as argument") } if left.NumCols() == 0 && right.NumCols() == 0 { return left.NewSlice(0, 0) } if left.NumCols() > 0 && right.NumCols() == 0 { return left.NewSlice(0, 0) } if left.NumCols() == 0 && right.NumCols() > 0 { return right.NewSlice(0, 0) } // Get common columns indices commonCols := getCommonCols(left, right) // Get common rows indices commonRows := getCommonRows(left, right, commonCols) // Prepare new Series Slice newNumCols := left.NumCols() + right.NumCols() - len(commonCols) newSeries := make([]Series, newNumCols) newNumRows := len(commonRows.l) innerFillLeftBowCols(&newSeries, left, right, newNumRows, commonRows) innerFillRightBowCols(&newSeries, left, right, newNumRows, newNumCols, commonCols, commonRows) // Join Metadata var keys, values []string keys = append(keys, left.Schema().Metadata().Keys()...) keys = append(keys, right.Schema().Metadata().Keys()...) values = append(values, left.Schema().Metadata().Values()...) values = append(values, right.Schema().Metadata().Values()...) newBow, err := NewBowWithMetadata( NewMetadata(keys, values), newSeries...) if err != nil { panic(fmt.Errorf("bow.InnerJoin: %w", err)) } return newBow } // OuterJoin joins columns of two Bows on common columns, and keeps all rows. // The Metadata of the two Bows are also joined by appending keys and values. func (b *bow) OuterJoin(other Bow) Bow { left := b right, ok := other.(*bow) if !ok { panic("bow.OuterJoin: non bow object passed as argument") } // Get common columns indices commonCols := getCommonCols(left, right) // Get common rows indices commonRows := getCommonRows(left, right, commonCols) // Compute new rows number var uniquesLeft, uniquesRight int if len(commonRows.l) > 0 { uniquesLeft, uniquesRight = 1, 1 sortedLeft := make([]int, len(commonRows.l)) sortedRight := make([]int, len(commonRows.l)) copy(sortedLeft, commonRows.l) copy(sortedRight, commonRows.r) sort.Ints(sortedLeft) sort.Ints(sortedRight) for i := 0; i < len(commonRows.l)-1; i++ { if sortedLeft[i] != sortedLeft[i+1] { uniquesLeft++ } if sortedRight[i] != sortedRight[i+1] { uniquesRight++ } } } newNumRows := left.NumRows() + right.NumRows() + len(commonRows.l) - uniquesLeft - uniquesRight // Prepare new Series Slice newNumCols := left.NumCols() + right.NumCols() - len(commonCols) newSeries := make([]Series, newNumCols) outerFillLeftBowCols(&newSeries, left, right, newNumRows, uniquesLeft, commonCols, commonRows) outerFillRightBowCols(&newSeries, left, right, newNumCols, newNumRows, uniquesLeft, commonCols, commonRows) // Join Metadata var keys, values []string keys = append(keys, left.Schema().Metadata().Keys()...) keys = append(keys, right.Schema().Metadata().Keys()...) values = append(values, left.Schema().Metadata().Values()...) values = append(values, right.Schema().Metadata().Values()...) newBow, err := NewBowWithMetadata( NewMetadata(keys, values), newSeries...) if err != nil { panic(fmt.Errorf("bow.OuterJoin: %w", err)) } return newBow } // getCommonCols returns in key column names and corresponding buffers on left / right schemas func getCommonCols(left, right Bow) map[string][]Buffer { commonCols := make(map[string][]Buffer) for _, lField := range left.Schema().Fields() { rFields, commonCol := right.Schema().FieldsByName(lField.Name) if !commonCol { continue } if len(rFields) > 1 { panic(fmt.Errorf( "bow.Join: too many columns have the same name: right:%+v left:%+v", right.String(), left.String())) } rField := rFields[0] if rField.Type.ID() != lField.Type.ID() { panic(fmt.Errorf( "bow.Join: left and right bow on join columns are of incompatible types: %s", lField.Name)) } commonCols[lField.Name] = []Buffer{ left.NewBufferFromCol(left.Schema().FieldIndices(lField.Name)[0]), right.NewBufferFromCol(right.Schema().FieldIndices(lField.Name)[0])} } return commonCols } type CommonRows struct { l, r []int } func getCommonRows(left, right Bow, commonColBufs map[string][]Buffer) CommonRows { var commonRows CommonRows if len(commonColBufs) == 0 { return commonRows } for leftRow := 0; leftRow < left.NumRows(); leftRow++ { for rightRow := 0; rightRow < right.NumRows(); rightRow++ { isRowCommon := true for _, colBufs := range commonColBufs { if colBufs[0].GetValue(leftRow) != colBufs[1].GetValue(rightRow) { isRowCommon = false continue } } if isRowCommon { commonRows.l = append(commonRows.l, leftRow) commonRows.r = append(commonRows.r, rightRow) } } } return commonRows }
bowjoin.go
0.704465
0.527317
bowjoin.go
starcoder
package kpath import ( "errors" "strings" ) type kpath struct { Part string // current key part Path string // remaining path More bool // there is more path to parse } // Split parses a kpath string into an array of parts. // Kpath parts are delimited either by a period (ex: partA.partB) or brackets and quotes: (ex: partA["partB"]). // To index an array, use brackets with an numeric index (ex: partA[0]). // All delimiters may be used in combination (ex: partA.partB["partC"].partD[1]). // Period delimiting is preferred when indexing a map, unless the part contains a period, in which case brackets and // quotes should be used. func Split(path string) ([]string, error) { var s []string for { r, err := parse(path) if err != nil { return s, err } s = append(s, r.Part) path = r.Path if !r.More { break } } return s, nil } // parse extracts the first part in a kpath string, returning the part, the remaining path, and whether the remaining // path is expected to have more parts. // If more=true and path="", the next parse call should error (usually because of a trailing delimiter). func parse(path string) (kpath, error) { var r kpath if path == "" { return r, errors.New("empty path") } if path[0] == '[' { var i int if len(path) < 2 { return r, errors.New("unclosed array index in path") } if path[1] == '"' { // explicit string map index i = strings.Index(path, "\"]") if i < 0 { return r, errors.New("unclosed map index in path") } r.Part = path[2:i] i += 2 } else { // array index i = strings.IndexRune(path, ']') if i < 0 { return r, errors.New("unclosed array index in path") } r.Part = path[1:i] i++ } if len(path) > i { r.More = true if path[i] == '.' { // exlude delimiter r.Path = path[i+1:] } else { // include delimiter r.Path = path[i:] } } return r, nil } // implicit string map index for i := 0; i < len(path); i++ { if path[i] == '.' { // exlude delimiter r.Part = path[:i] r.Path = path[i+1:] r.More = true return r, nil } if path[i] == '[' { // include delimiter r.Part = path[:i] r.Path = path[i:] r.More = true return r, nil } } // entire path is the last part r.Part = path return r, nil }
pkg/kpath/kpath.go
0.561696
0.439687
kpath.go
starcoder
package stl import ( "io" "unsafe" ) func Sizeof[T any]() int { var v T return int(unsafe.Sizeof(v)) } func SizeOfMany[T any](cnt int) int { var v T return int(unsafe.Sizeof(v)) * cnt } type Bytes struct { Data []byte Offset []uint32 Length []uint32 } type Vector[T any] interface { // Close free the vector allocated memory // Caller must call Close() or a memory leak will occur Close() // Clone deep copy data from offset to offset+length and create a new vector Clone(offset, length int) Vector[T] // ReadBytes reads a serialized buffer and initializes the vector using the buf // as its initial contents. // If share is true, vector release allocated memory and use the buf and its data storage // If share is false, vector will copy the data from buf to its own data storage ReadBytes(buf *Bytes, share bool) // Reset resets the buffer to be empty // but it retains the underlying storage for use by future writes Reset() // IsView returns true if the vector shares the data storage with external buffer IsView() bool // Bytes returns the underlying data storage buffer Bytes() *Bytes // Data returns the underlying data storage buffer // For Vector[[]byte], it only returns the data buffer Data() []byte // DataWindow returns a data window [offset, offset+length) DataWindow(offset, length int) []byte // Slice returns the underlying data storage of type T Slice() []T SliceWindow(offset, length int) []T // Get returns the specified element value at i Get(i int) (v T) // Append appends a element into the vector // If the prediction length is large than Capacity, it will cause the underlying memory reallocation. // Reallocation: // 1. Apply a new memory node from allocator // 2. Copy existing data into new buffer // 3. Swap owned memory node // 4. Free old memory node Append(v T) // Append appends many elements into the vector AppendMany(vals ...T) // Append updates a element at i to a new value // For T=[]byte, Update may introduce a underlying memory reallocation Update(i int, v T) // Delete deletes a element at i Delete(i int) (deleted T) // Delete deletes elements in [offset, offset+length) RangeDelete(offset, length int) // Returns the underlying memory allocator GetAllocator() MemAllocator // Returns the capacity, which is always >= Length(). // It is related to the number of elements. Same as C++ std::vector::capacity Capacity() int // Returns the number of elements in the vertor Length() int // Return the space allocted Allocated() int String() string Desc() string // WriteTo writes data to w until the buffer is drained or an error occurs WriteTo(io.Writer) (int64, error) // ReadFrom reads data from r until EOF and appends it to the buffer, growing // the buffer as needed. ReadFrom(io.Reader) (int64, error) }
pkg/vm/engine/tae/stl/types.go
0.635675
0.402245
types.go
starcoder
package shp import ( "encoding/binary" "fmt" "github.com/pkg/errors" ) // Polyline is an ordered set of verticies that consists of one or more parts, where a part is one or more Point. type Polyline struct { BoundingBox BoundingBox Parts []Part number uint32 } // Part is a sequence of Points. type Part []Point // DecodePolyline parses a single polyline shape, but does not validate its complicance with the spec. func DecodePolyline(buf []byte, num uint32) (*Polyline, error) { return decodePolyline(buf, num, nil) } // DecodePolylineP parses a single polyline shape with the specified precision, // but does not validate its complicance with the spec. func DecodePolylineP(buf []byte, num uint32, precision uint) (*Polyline, error) { return decodePolyline(buf, num, &precision) } // Type is PolylineType. func (p *Polyline) Type() ShapeType { return PolylineType } // RecordNumber returns the position in the shape file. func (p *Polyline) RecordNumber() uint32 { return p.number } // Polygon has the same syntax as a Polyline, but the parts should be unbroken rings. type Polygon Polyline // DecodePolygon decodes a single polygon shape, but does not validate its complicance with the spec. func DecodePolygon(buf []byte, num uint32) (*Polygon, error) { p, err := DecodePolyline(buf, num) if err != nil { return nil, err } return (*Polygon)(p), nil } // DecodePolygonP decodes a single polygon shape with the specified precision, // but does not validate its complicance with the spec. func DecodePolygonP(buf []byte, num uint32, precision uint) (*Polygon, error) { p, err := DecodePolylineP(buf, num, precision) if err != nil { return nil, err } return (*Polygon)(p), nil } // Type is PolygonType. func (p *Polygon) Type() ShapeType { return PolygonType } // RecordNumber returns the position in the shape file. func (p *Polygon) RecordNumber() uint32 { return p.number } func decodePolyline(buf []byte, num uint32, precision *uint) (*Polyline, error) { var box *BoundingBox var err error if precision == nil { if box, err = DecodeBoundingBox(buf[0:]); err != nil { return nil, err } } else { if box, err = DecodeBoundingBoxP(buf[0:], *precision); err != nil { return nil, err } } const minBytes = 40 if len(buf) < minBytes { return nil, fmt.Errorf("expecting %d bytes but only have %d", minBytes, len(buf)) } numParts := binary.LittleEndian.Uint32(buf[32:36]) numPoints := binary.LittleEndian.Uint32(buf[36:40]) numBytes := minBytes + (numParts * 4) + (numPoints * 16) if len(buf) < int(numBytes) { return nil, fmt.Errorf("expecting %d bytes but only have %d", numBytes, len(buf)) } out := &Polyline{ BoundingBox: *box, Parts: make([]Part, numParts), number: num, } parts := make([]uint32, numParts) for i := range parts { n := minBytes + (i * 4) parts[i] = binary.LittleEndian.Uint32(buf[n : n+4]) } var point func([]byte, uint32) (*Point, error) if precision == nil { point = DecodePoint } else { point = func(buf []byte, num uint32) (*Point, error) { return DecodePointP(buf, num, *precision) } } pointsOffset := int(minBytes + (numParts * 4)) for i, start := range parts { var end uint32 if i == len(parts)-1 { end = numPoints } else { end = parts[i+1] } out.Parts[i] = make(Part, end-start) for j := 0; j < len(out.Parts[i]); j++ { x := int(start) + j p, err := point(buf[pointsOffset+(x*16):pointsOffset+(x*16)+16], num) if err != nil { return nil, errors.Wrap(err, "failed to decode point") } p.box = box out.Parts[i][j] = *p } } return out, nil }
shp/polyline.go
0.779867
0.69416
polyline.go
starcoder
package entity import ( "time" ) type CameraMap map[string]Camera func (m CameraMap) Get(name string) Camera { if result, ok := m[name]; ok { return result } return *NewCamera(name, "") } func (m CameraMap) Pointer(name string) *Camera { if result, ok := m[name]; ok { return &result } return NewCamera(name, "") } var CameraFixtures = CameraMap{ "apple-iphone-se": { ID: 1000000, CameraSlug: "apple-iphone-se", CameraName: "Apple iPhone SE", CameraMake: "Apple", CameraModel: "iPhone SE", CameraType: "", CameraDescription: "", CameraNotes: "", CreatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), UpdatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), DeletedAt: nil, }, "canon-eos-5d": { ID: 1000001, CameraSlug: "canon-eos-5d", CameraName: "Canon EOS 5D", CameraMake: "Canon", CameraModel: "EOS 5D", CameraType: "", CameraDescription: "", CameraNotes: "", CreatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), UpdatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), DeletedAt: nil, }, "canon-eos-7d": { ID: 1000002, CameraSlug: "canon-eos-7d", CameraName: "Canon EOS 7D", CameraMake: "Canon", CameraModel: "EOS 7D", CameraType: "", CameraDescription: "", CameraNotes: "", CreatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), UpdatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), DeletedAt: nil, }, "canon-eos-6d": { ID: 1000003, CameraSlug: "canon-eos-6d", CameraModel: "EOS 6D", CameraMake: "Canon", CameraType: "", CameraDescription: "", CameraNotes: "", CreatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), UpdatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), DeletedAt: nil, }, "apple-iphone-6": { ID: 1000004, CameraSlug: "apple-iphone-6", CameraName: "Apple iPhone 6", CameraMake: "Apple", CameraModel: "iPhone 6", CameraType: "", CameraDescription: "", CameraNotes: "", CreatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), UpdatedAt: time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), DeletedAt: nil, }, "apple-iphone-7": { ID: 1000005, CameraSlug: "apple-iphone-7", CameraName: "Apple iPhone 7", CameraMake: "Apple", CameraModel: "iPhone 7", CameraType: "", CameraDescription: "", CameraNotes: "", CreatedAt: Timestamp(), UpdatedAt: Timestamp(), DeletedAt: nil, }, } // CreateCameraFixtures inserts known entities into the database for testing. func CreateCameraFixtures() { for _, entity := range CameraFixtures { Db().Create(&entity) } }
internal/entity/camera_fixtures.go
0.712132
0.501953
camera_fixtures.go
starcoder
package shared import ( "fmt" "reflect" "strings" ) // Intertuple defines an interface for manipulating tuples. type Intertuple interface { Length() int GetFieldAt(i int) interface{} SetFieldAt(i int, val interface{}) } // Tuple contains a set of fields, where fields can be any primitive or type. // A tuple is used to store information which is placed in a tuple space. type Tuple struct { Fields []interface{} // Field of the tuple. } // CreateTuple create a tuple according to the values in the fields. func CreateTuple(fields ...interface{}) Tuple { tf := make([]interface{}, len(fields)) copy(tf, fields) tuple := Tuple{tf} return tuple } // CreateTupleFromTemplate reads a template and returns a new tuple tp. // CreateTupleFromTemplate extracts values from any pointers it finds in template t. func CreateTupleFromTemplate(t ...interface{}) (tp Tuple) { fields := make([]interface{}, len(t)) for i, value := range t { if reflect.TypeOf(value).Kind() == reflect.Ptr { fields[i] = (reflect.ValueOf(value).Elem().Interface()).(interface{}) } else { fields[i] = value } } tp = CreateTuple(fields...) return tp } // Length returns the amount of fields of the tuple. func (t *Tuple) Length() int { return len((*t).Fields) } // GetFieldAt returns the i'th field of the tuple. func (t *Tuple) GetFieldAt(i int) interface{} { return (*t).Fields[i] } // SetFieldAt sets the i'th field of the tuple to the value of val. func (t *Tuple) SetFieldAt(i int, val interface{}) { (*t).Fields[i] = val } // Match pattern matches the tuple against the template tp. // Match discriminates between encapsulated formal fields and actual fields. // Match returns true if the template matches the tuple, and false otherwise. func (t *Tuple) Match(tp Template) bool { if (*t).Length() != tp.Length() { return false } else if (*t).Length() == 0 && tp.Length() == 0 { return true } // Run through corresponding fields of tuple and template to see if they are // matching. for i := 0; i < tp.Length(); i++ { tf := (*t).GetFieldAt(i) tpf := tp.GetFieldAt(i) // Check if the field of the template is an encapsulated formal or actual field. if reflect.TypeOf(tpf) == reflect.TypeOf(TypeField{}) { if reflect.TypeOf(tf) != tpf.(TypeField).GetType() { return false } } else if !reflect.DeepEqual(tf, tpf) { return false } } return true } // GetParenthesisType returns a pair of strings that encapsulates the tuple. // GetParenthesisType is used in the String() method. func (t Tuple) GetParenthesisType() (string, string) { return "(", ")" } // GetDelimiter returns the delimiter used to seperated the tuple fields. // GetParenthesisType is used in the String() method. func (t Tuple) GetDelimiter() string { return ", " } // String returns a print friendly representation of the tuple. func (t Tuple) String() string { ld, rd := t.GetParenthesisType() delim := t.GetDelimiter() strs := make([]string, t.Length()) for i, _ := range strs { field := t.GetFieldAt(i) if field != nil { if reflect.TypeOf(field).Kind() == reflect.String { strs[i] = fmt.Sprintf("%s%s%s", "\"", field, "\"") } else { strs[i] = fmt.Sprintf("%v", field) } } else { strs[i] = "nil" } } return fmt.Sprintf("%s%s%s", ld, strings.Join(strs, delim), rd) } // WriteToVariables will overwrite the values pointed to by pointers with // the values contained in the tuple. // WriteToVariables will ignore unaddressable pointers. // TODO: There should be placed a lock around the variables that are being // changed, to ensure that mix of two tuple are written to the variables. func (t *Tuple) WriteToVariables(params ...interface{}) { for i, param := range params { if reflect.TypeOf(param).Kind() == reflect.Ptr { value := reflect.ValueOf(param).Elem() if value.CanSet() { value.Set(reflect.ValueOf((*t).GetFieldAt(i))) } } } }
shared/tuple.go
0.659624
0.486636
tuple.go
starcoder
package observer import ( "fmt" "math" "math/rand" "github.com/gonum/floats" ) const ( // RandSeed Индекс для генератора случайных чисел. RandSeed = 512 minHeatIndexTemperature = 27 ) // Интерфейс визуального элемента. type displayer interface { Display() string } // DisplayElement Интерфейс визуального элемента, способного быть наблюдателем. type DisplayElement interface { displayer observer } // Визуальный элемент текущего состояния. type currentConditionsDisplay struct { temperature float64 humidity float64 weatherData subject } // Update Обновить данные. func (c *currentConditionsDisplay) Update(observable subject, data *Measurements) string { if data != nil { c.temperature = data.Temperature c.humidity = data.Humidity } else { wd, ok := observable.(WeatherDater) if ok { c.temperature = wd.GetTemperature() c.humidity = wd.GetHumidity() } } return c.Display() } // Display Вывести информацию. func (c *currentConditionsDisplay) Display() string { text := "Current conditions:\n" text += fmt.Sprintf("\tTemperature: %.1f\n", c.temperature) text += fmt.Sprintf("\tHumidity: %.1f\n", c.humidity) return text } // NewCurrentConditionsDisplay Создать визуальный элемент текущего состояния. func NewCurrentConditionsDisplay(weatherData subject) DisplayElement { display := new(currentConditionsDisplay) display.weatherData = weatherData display.weatherData.RegisterObserver(display) return display } // Визуальный элемент статистики. type statisticsDisplay struct { temperature []float64 humidity []float64 pressure []float64 weatherData subject } // Update Обновить данные. func (s *statisticsDisplay) Update(observable subject, data *Measurements) string { if data != nil { s.temperature = append(s.temperature, data.Temperature) s.humidity = append(s.humidity, data.Humidity) s.pressure = append(s.pressure, data.Pressure) } else { wd, ok := observable.(WeatherDater) if ok { s.temperature = append(s.temperature, wd.GetTemperature()) s.humidity = append(s.humidity, wd.GetHumidity()) s.pressure = append(s.pressure, wd.GetPressure()) } } return s.Display() } // Display Вывести информацию. func (s *statisticsDisplay) Display() string { var ( temperatureMax, temperatureMin, temperatureAvg float64 humidityMax, humidityMin, humidityAvg float64 pressureMax, pressureMin, pressureAvg float64 ) if len(s.temperature) > 0 { temperatureMax = floats.Max(s.temperature) temperatureMin = floats.Min(s.temperature) temperatureAvg = floats.Sum(s.temperature) / float64(len(s.temperature)) } if len(s.humidity) > 0 { humidityMax = floats.Max(s.humidity) humidityMin = floats.Min(s.humidity) humidityAvg = floats.Sum(s.humidity) / float64(len(s.humidity)) } if len(s.pressure) > 0 { pressureMax = floats.Max(s.pressure) pressureMin = floats.Min(s.pressure) pressureAvg = floats.Sum(s.pressure) / float64(len(s.pressure)) } text := "Statistics:\n" text += fmt.Sprintf("\tTemperature (min/max/avg): %.1f/%.1f/%.1f\n", temperatureMin, temperatureMax, temperatureAvg) text += fmt.Sprintf("\tHumidity (min/max/avg): %.1f/%.1f/%.1f\n", humidityMin, humidityMax, humidityAvg) text += fmt.Sprintf("\tPressure (min/max/avg): %.1f/%.1f/%.1f\n", pressureMin, pressureMax, pressureAvg) return text } // NewStatisticsDisplay Создать визуальный элемент статистики. func NewStatisticsDisplay(weatherData subject) DisplayElement { display := new(statisticsDisplay) display.weatherData = weatherData display.weatherData.RegisterObserver(display) return display } // Визуальный элемент прогноза. type forecastDisplay struct { temperature float64 humidity float64 pressure float64 weatherData subject } // Update Обновить данные. func (f *forecastDisplay) Update(observable subject, data *Measurements) string { if data != nil { f.temperature = data.Temperature f.humidity = data.Humidity f.pressure = data.Pressure } else { wd, ok := observable.(WeatherDater) if ok { f.temperature = wd.GetTemperature() f.humidity = wd.GetHumidity() f.pressure = wd.GetPressure() } } f.makeForecast() return f.Display() } // Display Вывести информацию. func (f *forecastDisplay) Display() string { text := "Forecast:\n" text += fmt.Sprintf("\tTemperature: %.1f\n", f.temperature) text += fmt.Sprintf("\tHumidity: %.1f\n", f.humidity) text += fmt.Sprintf("\tPressure: %.1f\n", f.pressure) return text } // Сделать прогноз. func (f *forecastDisplay) makeForecast() { rand.Seed(RandSeed) f.temperature = f.getCoefficient() * f.temperature f.humidity = f.getCoefficient() * f.humidity f.pressure = f.getCoefficient() * f.pressure } // Получить коэффициент для прогноза. func (f *forecastDisplay) getCoefficient() float64 { // nolint:gosec // Example return 0.7 + rand.Float64()*(1.3-0.7) } // NewForecastDisplay Создать визуальный элемент прогноза. func NewForecastDisplay(weatherData subject) DisplayElement { display := new(forecastDisplay) display.weatherData = weatherData display.weatherData.RegisterObserver(display) return display } // Визуальный элемент теплового индекса. type heatIndexDisplay struct { temperature float64 humidity float64 weatherData subject } // Update Обновить данные. func (h *heatIndexDisplay) Update(observable subject, data *Measurements) string { if data != nil { h.temperature = data.Temperature h.humidity = data.Humidity } else { wd, ok := observable.(WeatherDater) if ok { h.temperature = wd.GetTemperature() h.humidity = wd.GetHumidity() } } return h.Display() } // Display Вывести информацию. func (h *heatIndexDisplay) Display() string { if h.temperature < minHeatIndexTemperature { return "" } c1 := -8.78469475556 c2 := 1.61139411 c3 := 2.33854883889 c4 := -0.14611605 c5 := -0.012308094 c6 := -0.0164248277778 c7 := 0.002211732 c8 := 0.00072546 c9 := -0.000003582 heatIndex := c1 + c2*h.temperature + c3*h.humidity + c4*h.temperature*h.humidity + c5*math.Pow(h.temperature, 2) + c6*math.Pow(h.humidity, 2) + c7*math.Pow(h.temperature, 2)*h.humidity + c8*h.temperature*math.Pow(h.humidity, 2) + c9*math.Pow(h.temperature, 2)*math.Pow(h.humidity, 2) return fmt.Sprintf("Heat index: %.1f\n", heatIndex) } // NewHeatIndexDisplay Создать визуальный элемент теплового индекса. func NewHeatIndexDisplay(weatherData subject) DisplayElement { display := new(heatIndexDisplay) display.weatherData = weatherData display.weatherData.RegisterObserver(display) return display }
pkg/behavioral/observer/displays.go
0.591487
0.593197
displays.go
starcoder
package creational import "fmt" /* Summary: Factory pattern is used to create product's objects without specifying concrete types. NewProduct() method is used to create the concrete product instead of &Product{} Example: Parking lot service. This service has a persistent storage as a dependency and Factory pattern helps in doing dependency injection. Benefit: 1. Dependency Injection/Unit testable: All dependencies are passed as interfaces in the factory method as parameters, making the code unit-testable/mockable. 2. Reusability: All clients can just call the Factory method to make the object. 3. Extensibility: All client codes remain untouched, when we add some new things in object making. Only one method changes. */ type ParkingType int const ( Street ParkingType = iota Mall ) // Repository is the external dependency of Parking type Repository interface { Create() Update() } type Parking interface { ParkVehicle() UnParkVehicle() DisplayFreeSlots() } // NewParking is the factory method that makes store be passed // as an external dependency which can be mocked. It is the single // function that would be used by all clients for creating all // kinds of parking: onstreet, mall, etc.. func NewParking(parkType ParkingType, store Repository) Parking { switch parkType { case Street: return newOnStreetParking(store) case Mall: return newMallParking(store) } return newMallParking(store) } type streetParking struct { store Repository } func newOnStreetParking(store Repository) *streetParking { return &streetParking{store} } func (p *streetParking) ParkVehicle() { fmt.Println("street parked") } func (p *streetParking) UnParkVehicle() { fmt.Println("street unparked") } func (p *streetParking) DisplayFreeSlots() { fmt.Println("street free slots") } type mallParking struct { store Repository } func newMallParking(store Repository) *mallParking { return &mallParking{store} } func (p *mallParking) ParkVehicle() { fmt.Println("mall parked") } func (p *mallParking) UnParkVehicle() { fmt.Println("mall unparked") } func (p *mallParking) DisplayFreeSlots() { fmt.Println("mall free slots") }
creational/factory_method.go
0.728459
0.417271
factory_method.go
starcoder
package set import ( "reflect" "sort" ) type Set[Key comparable] map[Key]struct{} // Creates a new set that contains all the given keys. func New[Key comparable](keys ...Key) Set[Key] { if len(keys) == 0 { return make(Set[Key]) } resultset := make(Set[Key], len(keys)) for i := range keys { resultset[keys[i]] = struct{}{} } return resultset } // Checks if sets are equal: ⋂(a, b, sets) = a func Equal[Key comparable](a, b Set[Key], sets ...Set[Key]) bool { if !reflect.DeepEqual(a, b) { return false } for i := range sets { if !reflect.DeepEqual(a, sets[i]) { return false } } return true } // Checks if sets are disjoint: ⋂(a, b, sets) = ∅ func Disjoint[Key comparable](a, b Set[Key], sets ...Set[Key]) bool { // Use the smallest set to check the others. sorted_sets := sortSetsByLength(a, b, sets...) candidate := sorted_sets[0] // Any empty set in the arguments means the sets are disjoint. if len(candidate) == 0 { return true } others := sorted_sets[1:] outer: for k := range candidate { for i := range others { if !others[i].has(k) { continue outer } } return false } return true } // Returns a pair of values: // - ok=true if s is a subset of other // - proper=true if s is a proper subset (i.e., !Equal(s, other))) func (s Set[Key]) IsSubsetOf(other Set[Key]) (ok bool, proper bool) { if len(s) > len(other) { return false, false } if len(s) > 0 { for k := range s { if !other.has(k) { return false, false } } } return true, len(s) < len(other) } // Returns a pair of values: // - ok=true if s is a superset of other // - proper=true if s is a proper superset (i.e., !Equal(s, other))) func (s Set[Key]) IsSupersetOf(other Set[Key]) (ok bool, proper bool) { return other.IsSubsetOf(s) } // Creates a deep copy of the set. Will never return nil. func (s Set[Key]) Copy() Set[Key] { resultset := make(Set[Key], len(s)) for k := range s { resultset[k] = struct{}{} } return resultset } // Checks if the set contains all of the given keys. func (s Set[Key]) Contains(key Key, keys ...Key) bool { if !s.has(key) { return false } for i := range keys { if !s.has(keys[i]) { return false } } return true } // Adds keys to the set. func (s Set[Key]) Add(key Key, keys ...Key) { s[key] = struct{}{} for i := range keys { s[keys[i]] = struct{}{} } } // Delets keys from the set. func (s Set[Key]) Del(key Key, keys ...Key) { delete(s, key) for i := range keys { delete(s, keys[i]) } } // The union of all the sets: ⋃(a, b, sets) = a ∪ b ∪ sets[0] ∪ sets[1] ... func Union[Key comparable](a, b Set[Key], sets ...Set[Key]) Set[Key] { resultset := a.Copy() resultset.Update(b, sets...) return resultset } // Like Union, but modifies the set in place. func (s Set[Key]) Update(a Set[Key], sets ...Set[Key]) { for k := range a { s[k] = struct{}{} } for i := range sets { for k := range sets[i] { s[k] = struct{}{} } } } // The intersection of all the sets: ⋂(a, b, sets) = a ∩ b ∩ sets[0] ∩ sets[1] ... func Intersection[Key comparable](a, b Set[Key], sets ...Set[Key]) Set[Key] { // The result will be empty if any of the sets are empty. if len(a) == 0 || len(b) == 0 { return make(Set[Key]) } // Use the smallest set as the candidate result. sorted_sets := sortSetsByLength(a, b, sets...) candidate := sorted_sets[0] // Any empty set in the arguments produces an empty intersection. if len(candidate) == 0 { return make(Set[Key]) } others := sorted_sets[1:] resultset := make(Set[Key], len(candidate)) outer: for k := range candidate { for i := range others { if !others[i].has(k) { continue outer } } resultset[k] = struct{}{} } return resultset } // Like Intersection, but modifies the set in place. func (s Set[Key]) Intersect(a Set[Key], sets ...Set[Key]) { // The result will be empty if this set is empty. if len(s) == 0 { return } // Clear the set if any of the arguments is an empty set. sets = append(sets, a) for i := range sets { if len(sets[i]) == 0 { s.clear() return } } rm := make([]Key, 0, len(s)) outer: for k := range s { for i := range sets { if !sets[i].has(k) { rm = append(rm, k) continue outer } } } for i := range rm { delete(s, rm[i]) } } // The difference of all the sets: a ∖ b ∖ sets[0] ∖ sets[1] ... func Difference[Key comparable](a, b Set[Key], sets ...Set[Key]) Set[Key] { // The result will be empty if the first set is empty. if len(a) == 0 { return make(Set[Key]) } sets = append(sets, b) resultset := make(Set[Key], len(a)) outer: for k := range a { for i := range sets { if sets[i].has(k) { continue outer } } resultset[k] = struct{}{} } return resultset } // Like Difference, but modifies the set in place. func (s Set[Key]) Remove(a Set[Key], sets ...Set[Key]) { // The result will be empty if this set is empty. if len(s) == 0 { return } sets = append(sets, a) rm := make([]Key, 0, len(s)) outer: for k := range s { for i := range sets { if sets[i].has(k) { rm = append(rm, k) continue outer } } } for i := range rm { delete(s, rm[i]) } } // Symmetric difference of all the sets: ⋃(a, b, sets) ∖ ⋂(a, b, sets). func SymmetricDifference[Key comparable](a, b Set[Key], sets ...Set[Key]) Set[Key] { return Difference(Union(a, b, sets...), Intersection(a, b, sets...)) } // Like SymmetricDifference, but modifies the set in place. func (s Set[Key]) SymmetricRemove(a Set[Key], sets ...Set[Key]) { // The symmetric difference of a set with itself is the empty set. if &s == &a { s.clear() return } rm := s.Copy() rm.Intersect(a, sets...) s.Update(a, sets...) s.Remove(rm) } func (s Set[Key]) clear() { for k := range s { delete(s, k) } } func (s Set[Key]) has(key Key) bool { _, exists := s[key] return exists } func sortSetsByLength[Key comparable](a, b Set[Key], sets ...Set[Key]) []Set[Key] { // Do *not* modify the function argument array, copy it before sorting. if len(sets) == 0 { if len(b) < len(a) { return []Set[Key]{b, a} } return []Set[Key]{a, b} } else { sorted := make([]Set[Key], 0, 2+len(sets)) sorted = append(append(append(sorted, a), b), sets...) sort.Slice(sorted, func(i, j int) bool { return len(sorted[i]) < len(sorted[j]) }) return sorted } }
set/set.go
0.745676
0.490846
set.go
starcoder
package analysis import ( "fmt" "math" "strings" "github.com/AlessandroPomponio/go-gibberish/consts" "github.com/AlessandroPomponio/go-gibberish/structs" ) // AverageTransitionProbability returns the probability of // generating the input string digraph by digraph according // to the occurrences matrix. func AverageTransitionProbability(line string, occurrences [][]float64, position map[rune]int) (float64, error) { logProb := 0.0 transitionCt := 0.0 for _, pair := range GetDigraphs(line) { firstPosition, firstRuneFound := position[pair.First] if !firstRuneFound { return -1, fmt.Errorf("AverageTransitionProbability: unable to find the position of the rune %s", string(pair.First)) } secondPosition, secondRuneFound := position[pair.Second] if !secondRuneFound { return -1, fmt.Errorf("AverageTransitionProbability: unable to find the position of the rune %s", string(pair.First)) } logProb += occurrences[firstPosition][secondPosition] transitionCt++ } if transitionCt == 0 { transitionCt = 1 } return math.Exp(logProb / transitionCt), nil } // GetDigraphs returns pairs of adjacent runes, after // normalizing the input line. func GetDigraphs(line string) []structs.Digraph { runes := Normalize(line) if len(runes) == 0 { return []structs.Digraph{} } digraphs := make([]structs.Digraph, len(runes)-1) for i := 0; i < len(runes)-1; i++ { digraphs[i] = structs.Digraph{First: runes[i], Second: runes[i+1]} } return digraphs } // Normalize returns the subset of runes in the line // that are in the accepted characters. This helps // keeping the model relatively small by ignoring // punctuation, symbols, etc. func Normalize(line string) []rune { line = strings.ToLower(line) result := make([]rune, 0, len(line)) for _, r := range line { if strings.ContainsRune(consts.AcceptedCharacters, r) { result = append(result, r) } } return result } // MaxForSlice returns the maximum value in a // float64 slice. func MaxForSlice(slice []float64) float64 { max := -math.MaxFloat64 for _, item := range slice { if item > max { max = item } } return max } // MinForSlice returns the minimum value in // a float64 slice. func MinForSlice(slice []float64) float64 { min := math.MaxFloat64 for _, item := range slice { if item < min { min = item } } return min }
analysis/analysis.go
0.778102
0.432183
analysis.go
starcoder
package stathat import ( "encoding/json" "net/http" "net/url" "strconv" "time" ) // GetOptions are passed into Get to provide optional values type GetOptions struct { Start *time.Time Period string Interval string Summary bool } // Dataset is a dataset type Dataset struct { Name string Timeframe string Points []Datapoint } // Datapoint is a datapoint type Datapoint struct { Timestamp int64 `json:"time"` Value float64 Time time.Time `json:"-"` } // Get returns data for a stat. // If `start` is nil, StatHat will use their default of `start = now - period`. // `period` is the span of time from which to return data, starting at `start` and ending at `start+period`. This is in the same format as `interval`. // `interval` is in the format of `1h` meaning "one hour". Other units available are in the set `[mhdwMy]`. // If `summary` is true, `interval` is ignored. The `summary` flag causes this to instead provide a daily summary for the requested `period` in full days. // If `summary` is true but `period` is not, the default of `1w` is used. // See https://www.stathat.com/manual/export for accepted time unit format for `interval` and `period`. // `stats` can be range from one to five stats. Stats listed beyond five are ignored. func (s StatHat) Get(opts GetOptions, stats ...string) ([]Dataset, error) { rawurl := s.apiPrefix() + `/data/` if len(stats) == 0 { return nil, ErrNotFound // FIXME: maybe not the best error to return } // (StatHat only cares about the first five stats requested and ignores any extra) for i := 0; i < 5 && i < len(stats); i++ { rawurl += url.PathEscape(stats[i]) + "/" } u, err := url.Parse(rawurl) if err != nil { return nil, err } q := u.Query() if opts.Start != nil && !opts.Start.IsZero() { q.Add("start", strconv.FormatInt(opts.Start.Unix(), 10)) } if opts.Summary { if len(opts.Period) == 0 { opts.Period = "1w" } q.Add("summary", opts.Period) } else { if len(opts.Period) > 0 || len(opts.Interval) > 0 { q.Add("t", opts.Period+opts.Interval) } } u.RawQuery = q.Encode() rawurl = u.String() req, err := http.NewRequest(http.MethodGet, rawurl, nil) if err != nil { return nil, err } resp, err := httpDo(req) if err != nil { return nil, err } defer resp.Body.Close() var ds []Dataset j := json.NewDecoder(resp.Body) err = j.Decode(&ds) for dsI := range ds { for point := range ds[dsI].Points { ds[dsI].Points[point].Time = time.Unix(ds[dsI].Points[point].Timestamp, 0) } } return ds, err }
get.go
0.725551
0.439687
get.go
starcoder
package main import ( "github.com/go-gl/gl/v2.1/gl" ) var ( bulletMaxHP = 1 bulletMass = 0.5 bulletColSize = 20.0 bulletScale = vertex{x: 10, y: 10, z: 10} bulletModel polyModel ) // bullet is a small projectile shot by players or enemies. type bullet struct { uidGenerator loc vertex moveDir vertex speed float64 rot float64 alliance alliance harmful collision hp int } // bulletPosData is positional data for a bullet, used to initialize. type bulletPosData struct { loc vertex rot float64 moveDir vertex speed float64 } func newBullet(data bulletPosData, alliance alliance) *bullet { bul := new(bullet) bul.hp = bulletMaxHP bul.loc = data.loc bul.rot = data.rot bul.moveDir = data.moveDir bul.speed = data.speed bul.alliance = alliance return bul } func (b *bullet) tick() []entity { b.loc.x += (b.speed * mainWindow.delta) * b.moveDir.x b.loc.y += (b.speed * mainWindow.delta) * b.moveDir.y b.updateCols() return []entity{} } func (b *bullet) draw() { gl.PushMatrix() rotateOnPoint(b.rot, b.loc) gl.Translated(b.loc.x, b.loc.y, b.loc.z) gl.Scaled(bulletScale.x, bulletScale.y, bulletScale.z) bulletModel.draw() gl.PopMatrix() } func (b *bullet) collision(yours, other collision) { switch other.typ { case bouncer: if other.alliance == unaligned { // We bumped into a wall or something b.hp-- } case vulnerable: if other.alliance != b.alliance { // We probably hit an enemy b.hp-- } } } func (b *bullet) updateCols() { b.harmful.alliance = b.alliance b.harmful.typ = harmful b.harmful.bounding = bounding{ vertex{b.loc.x - bulletColSize/2.0, b.loc.y - bulletColSize/2.0, 0.0}, vertex{b.loc.x + bulletColSize/2.0, b.loc.y + bulletColSize/2.0, 0.0}} } func (b *bullet) collisions() []collision { return []collision{ b.harmful} } func (b *bullet) location() vertex { return b.loc } func (b *bullet) deletable() bool { return b.hp <= 0 } func (b *bullet) mass() float64 { return bulletMass }
bullet.go
0.709422
0.40248
bullet.go
starcoder
package gpsabl import ( "math" "time" ) // Copyright 2019 by <EMAIL>. All // rights reserved. Use of this source code is governed // by a BSD-style license that can be found in the // LICENSE file. // CompareFloat64With4Digits - Compare two float64 to 4 digits after decimal func CompareFloat64With4Digits(in1, in2 float64) bool { return RoundFloat64To4Digits(in1) == RoundFloat64To4Digits(in2) } // RoundFloat64To4Digits - Rounds a float64 to 4 digits after decimal func RoundFloat64To4Digits(in float64) float64 { return math.Round(in*10000) / 10000 } // RoundFloat64To2Digits - Rounds a float64 to 2 digits after decimal func RoundFloat64To2Digits(in float64) float64 { return math.Round(in*100) / 100 } // OutputLine - Represents one line in the output type OutputLine struct { Name string Data TrackSummaryProvider } // NewOutputLine - Get a new OutputLine struct func NewOutputLine(name string, data TrackSummaryProvider) *OutputLine { ret := OutputLine{} ret.Data = data ret.Name = name return &ret } // TrackDataArrays - The tracks data in arrays, sorted by values not the line type TrackDataArrays struct { AllTimeDataValid bool Distances []float64 HorizontalDistances []float64 ElevationGains []float64 ElevationLoses []float64 AltitudeRanges []float64 MinimumAltitudes []float64 MaximumAltitudes []float64 UpwardsDistances []float64 DownwardsDistances []float64 StartTimes []time.Time EndTimes []time.Time Durations []time.Duration MovingTimes []time.Duration UpwardsTimes []time.Duration DownwardsTimes []time.Duration AverageSpeeds []float64 UpwardsSpeeds []float64 DownwardsSpeeds []float64 } // TrackStatisticSummaryData - Contains statistic data from a bunch of tracks type TrackStatisticSummaryData struct { AllTimeDataValid bool InputTackCount int Sum ExtendedTrackSummary Average ExtendedTrackSummary Minimum ExtendedTrackSummary Maximum ExtendedTrackSummary } // ExtendedTrackSummary - The TrackSummary extended by the duration type ExtendedTrackSummary struct { TrackSummary Duration time.Duration AverageSpeed float64 UpwardsSpeed float64 DownwardsSpeed float64 AltitudeRange float64 } // GetTrackDataArrays - Get the tracks data in arrays, sorted by values not the line func GetTrackDataArrays(lines []OutputLine) TrackDataArrays { ret := TrackDataArrays{} ret.AllTimeDataValid = allTimeDataValid(lines) for _, line := range lines { info := line.Data ret.Distances = append(ret.Distances, info.GetDistance()) ret.HorizontalDistances = append(ret.HorizontalDistances, info.GetHorizontalDistance()) ret.ElevationGains = append(ret.ElevationGains, float64(info.GetElevationGain())) ret.ElevationLoses = append(ret.ElevationLoses, float64(info.GetElevationLose())) ret.AltitudeRanges = append(ret.AltitudeRanges, float64(info.GetAltitudeRange())) ret.MinimumAltitudes = append(ret.MinimumAltitudes, float64(info.GetMinimumAltitude())) ret.MaximumAltitudes = append(ret.MaximumAltitudes, float64(info.GetMaximumAltitude())) ret.UpwardsDistances = append(ret.UpwardsDistances, info.GetUpwardsDistance()) ret.DownwardsDistances = append(ret.DownwardsDistances, info.GetDownwardsDistance()) if ret.AllTimeDataValid { ret.Durations = append(ret.Durations, info.GetEndTime().Sub(info.GetStartTime())) ret.StartTimes = append(ret.StartTimes, info.GetStartTime()) ret.EndTimes = append(ret.EndTimes, info.GetEndTime()) ret.MovingTimes = append(ret.MovingTimes, info.GetMovingTime()) ret.UpwardsTimes = append(ret.UpwardsTimes, info.GetUpwardsTime()) ret.DownwardsTimes = append(ret.DownwardsTimes, info.GetDownwardsTime()) ret.AverageSpeeds = append(ret.AverageSpeeds, info.GetAvarageSpeed()) ret.UpwardsSpeeds = append(ret.UpwardsSpeeds, info.GetUpwardsSpeed()) ret.DownwardsSpeeds = append(ret.DownwardsSpeeds, info.GetDownwardsSpeed()) } } return ret } // GetStatisticSummaryData - Get the TrackStatisticSummaryData of the input tracks func GetStatisticSummaryData(lines []OutputLine) TrackStatisticSummaryData { ret := TrackStatisticSummaryData{} arrays := GetTrackDataArrays(lines) ret.AllTimeDataValid = arrays.AllTimeDataValid ret.InputTackCount = len(lines) ret.Sum.Distance = sumFloat64Array(arrays.Distances) ret.Average.Distance = ret.Sum.Distance / float64(ret.InputTackCount) ret.Minimum.Distance = minFloat64Array(arrays.Distances) ret.Maximum.Distance = maxFloat64Array(arrays.Distances) ret.Sum.DownwardsDistance = sumFloat64Array(arrays.DownwardsDistances) ret.Average.DownwardsDistance = ret.Sum.DownwardsDistance / float64(ret.InputTackCount) ret.Minimum.DownwardsDistance = minFloat64Array(arrays.DownwardsDistances) ret.Maximum.DownwardsDistance = maxFloat64Array(arrays.DownwardsDistances) ret.Sum.ElevationGain = float32(sumFloat64Array(arrays.ElevationGains)) ret.Average.ElevationGain = ret.Sum.ElevationGain / float32(ret.InputTackCount) ret.Minimum.ElevationGain = float32(minFloat64Array(arrays.ElevationGains)) ret.Maximum.ElevationGain = float32(maxFloat64Array(arrays.ElevationGains)) ret.Sum.ElevationLose = float32(sumFloat64Array(arrays.ElevationLoses)) ret.Average.ElevationLose = ret.Sum.ElevationLose / float32(ret.InputTackCount) // Since ElevationLose is negative, the absolute biggest number is the minimum ret.Minimum.ElevationLose = float32(maxFloat64Array(arrays.ElevationLoses)) // Since ElevationLose is negative, the absolute smallest number is the maximum ret.Maximum.ElevationLose = float32(minFloat64Array(arrays.ElevationLoses)) ret.Sum.HorizontalDistance = sumFloat64Array(arrays.HorizontalDistances) ret.Average.HorizontalDistance = ret.Sum.HorizontalDistance / float64(ret.InputTackCount) ret.Minimum.HorizontalDistance = minFloat64Array(arrays.HorizontalDistances) ret.Maximum.HorizontalDistance = maxFloat64Array(arrays.HorizontalDistances) ret.Sum.UpwardsDistance = sumFloat64Array(arrays.UpwardsDistances) ret.Average.UpwardsDistance = ret.Sum.UpwardsDistance / float64(ret.InputTackCount) ret.Minimum.UpwardsDistance = minFloat64Array(arrays.UpwardsDistances) ret.Maximum.UpwardsDistance = maxFloat64Array(arrays.UpwardsDistances) sumRange := sumFloat64Array(arrays.AltitudeRanges) ret.Average.AltitudeRange = sumRange / float64(ret.InputTackCount) ret.Minimum.AltitudeRange = minFloat64Array(arrays.AltitudeRanges) ret.Maximum.AltitudeRange = maxFloat64Array(arrays.AltitudeRanges) ret.Minimum.MinimumAltitude = float32(minFloat64Array(arrays.MinimumAltitudes)) ret.Minimum.MaximumAltitude = float32(minFloat64Array(arrays.MaximumAltitudes)) ret.Maximum.MinimumAltitude = float32(maxFloat64Array(arrays.MinimumAltitudes)) ret.Maximum.MaximumAltitude = float32(maxFloat64Array(arrays.MaximumAltitudes)) ret.Maximum.TimeDataValid = ret.AllTimeDataValid ret.Minimum.TimeDataValid = ret.AllTimeDataValid ret.Average.TimeDataValid = ret.AllTimeDataValid ret.Sum.TimeDataValid = ret.AllTimeDataValid if ret.AllTimeDataValid { ret.Sum.DownwardsTime = sumTimeDurationArray(arrays.DownwardsTimes) ret.Average.DownwardsTime = averageDuration(ret.Sum.DownwardsTime, ret.InputTackCount) ret.Minimum.DownwardsTime = minTimeDurationArray(arrays.DownwardsTimes) ret.Maximum.DownwardsTime = maxTimeDurationArray(arrays.DownwardsTimes) ret.Sum.Duration = sumTimeDurationArray(arrays.Durations) ret.Average.Duration = averageDuration(ret.Sum.Duration, ret.InputTackCount) ret.Minimum.Duration = minTimeDurationArray(arrays.Durations) ret.Maximum.Duration = maxTimeDurationArray(arrays.Durations) ret.Sum.MovingTime = sumTimeDurationArray(arrays.MovingTimes) ret.Average.MovingTime = averageDuration(ret.Sum.MovingTime, ret.InputTackCount) ret.Minimum.MovingTime = minTimeDurationArray(arrays.MovingTimes) ret.Maximum.MovingTime = maxTimeDurationArray(arrays.MovingTimes) ret.Sum.UpwardsTime = sumTimeDurationArray(arrays.UpwardsTimes) ret.Average.UpwardsTime = averageDuration(ret.Sum.UpwardsTime, ret.InputTackCount) ret.Minimum.UpwardsTime = minTimeDurationArray(arrays.UpwardsTimes) ret.Maximum.UpwardsTime = maxTimeDurationArray(arrays.UpwardsTimes) speedSum := sumFloat64Array(arrays.AverageSpeeds) ret.Average.AverageSpeed = speedSum / float64(ret.InputTackCount) ret.Maximum.AverageSpeed = maxFloat64Array(arrays.AverageSpeeds) ret.Minimum.AverageSpeed = minFloat64Array(arrays.AverageSpeeds) speedSum = sumFloat64Array(arrays.UpwardsSpeeds) ret.Average.UpwardsSpeed = speedSum / float64(ret.InputTackCount) ret.Maximum.UpwardsSpeed = maxFloat64Array(arrays.UpwardsSpeeds) ret.Minimum.UpwardsSpeed = minFloat64Array(arrays.UpwardsSpeeds) speedSum = sumFloat64Array(arrays.DownwardsSpeeds) ret.Average.DownwardsSpeed = speedSum / float64(ret.InputTackCount) ret.Minimum.DownwardsSpeed = minFloat64Array(arrays.DownwardsSpeeds) ret.Maximum.DownwardsSpeed = maxFloat64Array(arrays.DownwardsSpeeds) ret.Maximum.StartTime = maxTimeArray(arrays.StartTimes) ret.Maximum.EndTime = maxTimeArray(arrays.EndTimes) ret.Minimum.StartTime = minTimeArray(arrays.StartTimes) ret.Minimum.EndTime = minTimeArray(arrays.EndTimes) } return ret } // OutputContainsLineByTimeStamps - tell if a track with the same start and endtime already is in the output buffer func OutputContainsLineByTimeStamps(output []OutputLine, newLine OutputLine) bool { // Don't tread all lines with no valid time values as duplicates if newLine.Data.GetTimeDataValid() == false { return false } newLineStartTime := newLine.Data.GetStartTime() newLineEndTime := newLine.Data.GetEndTime() for _, outLine := range output { outLineStartTime := outLine.Data.GetStartTime() outLineEndTime := outLine.Data.GetEndTime() if outLineStartTime == newLineStartTime && outLineEndTime == newLineEndTime { return true } } return false } func averageDuration(sum time.Duration, count int) time.Duration { timeSumNanoSec := int64(sum) avrDurationNanoSec := timeSumNanoSec / int64(count) return time.Duration(avrDurationNanoSec) } func allTimeDataValid(lines []OutputLine) bool { for _, line := range lines { if line.Data.GetTimeDataValid() == false { return false } } return true } func sumFloat64Array(data []float64) float64 { ret := 0.0 for _, value := range data { ret += value } return ret } func minFloat64Array(data []float64) float64 { min := data[0] for _, value := range data { if value < min { min = value } } return min } func maxFloat64Array(data []float64) float64 { max := data[0] for _, value := range data { if value > max { max = value } } return max } func sumTimeDurationArray(data []time.Duration) time.Duration { var ret time.Duration for _, value := range data { ret += value } return ret } func minTimeDurationArray(data []time.Duration) time.Duration { min := data[0] for _, value := range data { if value < min { min = value } } return min } func maxTimeDurationArray(data []time.Duration) time.Duration { max := data[0] for _, value := range data { if value > max { max = value } } return max } func minTimeArray(data []time.Time) time.Time { min := data[0] for _, value := range data { if value.Before(min) { min = value } } return min } func maxTimeArray(data []time.Time) time.Time { max := data[0] for _, value := range data { if max.Before(value) { max = value } } return max }
src/tobi.backfrak.de/internal/gpsabl/MathHelper.go
0.903417
0.486332
MathHelper.go
starcoder
package main import ( "fmt" "github.com/faiface/pixel" "github.com/faiface/pixel/pixelgl" "github.com/strangedev/vroom/algebra" "github.com/strangedev/vroom/gfx" "github.com/faiface/pixel/imdraw" "github.com/rs/xid" ) type SteeringIntent struct { SteerRadians float64 Acceleration float64 } type Car struct { Position *algebra.Vector2 Orientation *algebra.Vector2 Velocity float64 Steering *SteeringIntent tickCount uint64 id string Sprite *pixel.Sprite } func NewCar() Car { id := xid.New() return Car{ Position: &algebra.Vector2{}, Orientation: &algebra.Vector2{1, 0}, Velocity: 0, Steering: &SteeringIntent{}, id: id.String(), } } func NewCarWithGfx() (c Car) { c = NewCar() pic, err := gfx.LoadPicture("assets/sprites/turtle-small.png") if err != nil { panic(err) } c.Sprite = pixel.NewSprite(pic, pic.Bounds()) return } func (c *Car) GetTransformationMatrix() (m pixel.Matrix) { carAngle := -c.Orientation.AngleTo(algebra.Vector2{1, 0}) carPosition := c.Position.ToPixelVec() m = pixel.IM m = m.Scaled(pixel.ZV, 0.5) m = m.Rotated(pixel.ZV, carAngle) m = m.Moved(carPosition) return } func (c *Car) Draw(win *pixelgl.Window) { bounds := c.GetBoundingBox() imd := imdraw.New(nil) imd.Color = pixel.RGB(0, 1, 0) imd.Push( bounds.Ul.ToPixelVec(), bounds.Ur.ToPixelVec(), ) imd.Line(2) imd.Push( bounds.Ul.ToPixelVec(), bounds.Dl.ToPixelVec(), ) imd.Line(2) imd.Push( bounds.Ur.ToPixelVec(), bounds.Dr.ToPixelVec(), ) imd.Line(2) imd.Push( bounds.Dl.ToPixelVec(), bounds.Dr.ToPixelVec(), ) imd.Line(2) imd.Draw(win) c.Sprite.Draw(win, c.GetTransformationMatrix()) } func (c *Car) Tick(dt float64) { c.Velocity += c.Steering.Acceleration * dt c.Position.AddInPlace(c.Orientation.Scale(c.Velocity)) c.Orientation.RotateInPlace(c.Steering.SteerRadians) c.tickCount++ } func (c Car) Print() { fmt.Printf("Car: %p (%v ticks)\n", &c, c.tickCount) fmt.Printf(" Pos:\t%v\n", *c.Position) fmt.Printf(" Ori:\t%v\n", *c.Orientation) fmt.Printf(" Vel:\t%v\n", c.Velocity) fmt.Println(" Steer:") fmt.Printf(" Acc:\t%v\n", c.Steering.Acceleration) fmt.Printf(" Rad:\t%v\n", c.Steering.SteerRadians) fmt.Println("---") } func (c *Car) GetBoundingBox() algebra.Rectangle { return algebra.Rectangle{ Ul: algebra.Vector2{c.Position[0] - 5, c.Position[1] + 5}, Ur: algebra.Vector2{c.Position[0] + 5, c.Position[1] + 5}, Dl: algebra.Vector2{c.Position[0] - 5, c.Position[1] - 5}, Dr: algebra.Vector2{c.Position[0] + 5, c.Position[1] - 5}, } } func (c *Car) GetPosition() algebra.Vector2 { return *(c.Position) } func (c *Car) MoveTo(v algebra.Vector2) { c.Position = &v } func (c *Car) GetId() string { return c.id }
car.go
0.644337
0.416322
car.go
starcoder
package sunspec import ( "errors" "regexp" ) // Model defines a instantiated sunspec model. type Model interface { // Group defines a sunspec container for points. Group // ID returns the models identifier as defined by the first point "ID". ID() Uint16 // Length returns the model length as defined by the second point "L". Length() Uint16 } // Definition describes the behavior of a model reference, which can be instantiated. type Definition interface { // ID returns the model´s identifier. ID() uint16 // Instance derives a new useable model from the definition. Instance(adr uint16, callback func(pts []Point) error) (Model, error) } // ModelDef is the definition of a sunspec Model. type ModelDef struct { Id uint16 `json:"id"` Group GroupDef `json:"group"` Label string `json:"label,omitempty"` Description string `json:"desc,omitempty"` Detail string `json:"detail,omitempty"` Notes string `json:"notes,omitempty"` Comments []string `json:"comments,omitempty"` } var _ Definition = (*ModelDef)(nil) // ID returns the definitions model identifier. func (def *ModelDef) ID() uint16 { return def.Id } // Instance derives a new useable Model from the definition. func (def *ModelDef) Instance(adr uint16, callback func(pts []Point) error) (Model, error) { m := &model{} var iterate func(def GroupDef) (Group, error) iterate = func(def GroupDef) (Group, error) { g := &group{ name: def.Name, atomic: bool(def.Atomic), } if m.group == nil { m.group = g } for _, def := range def.Points { for c := m.count(def.Count); c != 0; c-- { g.points = append(g.points, def.Instance(adr, g)) adr = ceil(g.points.Last()) } } if callback != nil { if err := callback(g.points); err != nil { return nil, err } } for _, def := range def.Groups { for c := m.count(def.Count); c != 0; c-- { x, err := iterate(def) if err != nil { return nil, err } g.groups = append(g.groups, x) } } return g, nil } if _, err := iterate(def.Group); err != nil { return nil, err } m.ID().Set(def.Id) m.Length().Set(m.Quantity() - 2) return m, nil } // model is internally used to build out a usable model. type model struct{ *group } // count returns the number of occurrences of a point or group in the model. func (m *model) count(c interface{}) uint16 { switch v := c.(type) { case int: return uint16(v) case float64: return uint16(v) case string: for _, p := range m.Points() { if p.Name() == v { switch p := p.(type) { case Int16: return uint16(p.Get()) case Int32: return uint16(p.Get()) case Int64: return uint16(p.Get()) case Uint16: return uint16(p.Get()) case Uint32: return uint16(p.Get()) case Uint64: return uint16(p.Get()) case Acc16: return uint16(p.Get()) case Acc32: return uint16(p.Get()) case Acc64: return uint16(p.Get()) case Count: return uint16(p.Get()) } } } } return 1 } // ID returns the models identifier as defined by the first point "ID". func (m *model) ID() Uint16 { if id := m.Points().Point("ID"); id != nil { return id.(Uint16) } return nil } // Length returns the model length as defined by the second point "L". func (m *model) Length() Uint16 { if l := m.Points().Point("L"); l != nil { return l.(Uint16) } return nil } // Verify validates the given model, checking for its compliance regarding the official sunspec specification. func Verify(m Model) error { if m.Length().Get()+2 != m.Quantity() { return errors.New("sunspec: Identifier L does not correlate with model quantity") } adr := m.Address() // spec ref 4.2.1 "An ID MUST consist of only alphanumeric characters // and the underscore character" - applies to group, point and symbol r, _ := regexp.Compile("^([[:alnum:]]|_)+$") return iterate(m, func(g Group) error { switch { case g.Address() != adr: return errors.New("sunspec: the given address range is not continuous") case !r.Match([]byte(g.Name())): return errors.New("sunspec: the name is violating the specifications definition") case g.Points() == nil: return errors.New("sunspec: the group is missing it´s point definition") } for _, p := range g.Points() { switch { case p.Address() != adr: return errors.New("sunspec: the given address range is not continuous") case !r.Match([]byte(p.Name())): return errors.New("sunspec: the name is violating the specifications definition") } adr += p.Quantity() } return nil }) } // Models is a collection wrapper for multiple models. // Offering functionalities applicable for them. type Models []Model // First returns the first model from the collection. func (mls Models) First() Model { return mls[0] } // Last returns the last model from the collection. func (mls Models) Last() Model { return mls[len(mls)-1] } // Model returns the first immediate model identified by id. func (mls Models) Model(id uint16) Model { for _, m := range mls { if m.ID().Get() == id { return m } } return nil } // Models returns all models from the device. // If ids are omitted all models are returned. func (mls Models) Models(ids ...uint16) Models { if len(ids) == 0 { return append(Models(nil), mls...) } col := make(Models, 0, len(ids)) for _, m := range mls { for _, id := range ids { if m.ID().Get() == id { col = append(col, m) break } } } return col } // Index returns the merged indexes of all models in the collection. func (mls Models) Index() []Index { idx := make([]Index, 0, len(mls)) for _, m := range mls { idx = append(idx, m) } return merge(idx) }
model.go
0.789437
0.401424
model.go
starcoder
package unityai type Matrix4x4f struct { m_Data [16]float32 } func (this *Matrix4x4f) SetTR(pos Vector3f, q Quaternionf) { QuaternionToMatrix4(q, this) this.m_Data[12] = pos.x this.m_Data[13] = pos.y this.m_Data[14] = pos.z } func (this *Matrix4x4f) SetTRS(pos Vector3f, q Quaternionf, s Vector3f) { QuaternionToMatrix4(q, this) this.m_Data[0] *= s.x this.m_Data[1] *= s.x this.m_Data[2] *= s.x this.m_Data[4] *= s.y this.m_Data[5] *= s.y this.m_Data[6] *= s.y this.m_Data[8] *= s.z this.m_Data[9] *= s.z this.m_Data[10] *= s.z this.m_Data[12] = pos.x this.m_Data[13] = pos.y this.m_Data[14] = pos.z } func (this *Matrix4x4f) GetLossyScale() Vector3f { var result Vector3f result.x = Magnitude(this.GetAxisX()) result.y = Magnitude(this.GetAxisY()) result.z = Magnitude(this.GetAxisZ()) return result } func (this *Matrix4x4f) SetTRInverse(pos Vector3f, q Quaternionf) { QuaternionToMatrix4(InverseQuaternion(q), this) var v Vector3f v.x = -pos.x v.y = -pos.y v.z = -pos.z this.Translate(v) } func (this *Matrix4x4f) Get(row, col int) float32 { return this.m_Data[row+col*4] } func (this *Matrix4x4f) Set(row, col int, data float32) { this.m_Data[row+col*4] = data } func (this *Matrix4x4f) Translate(inTrans Vector3f) { d := this.Get(0, 0)*inTrans.x + this.Get(0, 1)*inTrans.y + this.Get(0, 2)*inTrans.z + this.Get(0, 3) this.Set(0, 3, d) d = this.Get(1, 0)*inTrans.x + this.Get(1, 1)*inTrans.y + this.Get(1, 2)*inTrans.z + this.Get(1, 3) this.Set(1, 3, d) d = this.Get(2, 0)*inTrans.x + this.Get(2, 1)*inTrans.y + this.Get(2, 2)*inTrans.z + this.Get(2, 3) this.Set(2, 3, d) d = this.Get(3, 0)*inTrans.x + this.Get(3, 1)*inTrans.y + this.Get(3, 2)*inTrans.z + this.Get(3, 3) this.Set(3, 3, d) } func (this *Matrix4x4f) MultiplyPoint3(v Vector3f) Vector3f { var res Vector3f res.x = this.m_Data[0]*v.x + this.m_Data[4]*v.y + this.m_Data[8]*v.z + this.m_Data[12] res.y = this.m_Data[1]*v.x + this.m_Data[5]*v.y + this.m_Data[9]*v.z + this.m_Data[13] res.z = this.m_Data[2]*v.x + this.m_Data[6]*v.y + this.m_Data[10]*v.z + this.m_Data[14] return res } func (this *Matrix4x4f) MultiplyVector3(v Vector3f) Vector3f { var res Vector3f res.x = this.m_Data[0]*v.x + this.m_Data[4]*v.y + this.m_Data[8]*v.z res.y = this.m_Data[1]*v.x + this.m_Data[5]*v.y + this.m_Data[9]*v.z res.z = this.m_Data[2]*v.x + this.m_Data[6]*v.y + this.m_Data[10]*v.z return res } func (this *Matrix4x4f) Scale(scale Vector3f) { this.m_Data[0+0*4] *= scale.x this.m_Data[1+0*4] *= scale.x this.m_Data[2+0*4] *= scale.x this.m_Data[3+0*4] *= scale.x this.m_Data[0+1*4] *= scale.y this.m_Data[1+1*4] *= scale.y this.m_Data[2+1*4] *= scale.y this.m_Data[3+1*4] *= scale.y this.m_Data[0+2*4] *= scale.z this.m_Data[1+2*4] *= scale.z this.m_Data[2+2*4] *= scale.z this.m_Data[3+2*4] *= scale.z } func (this *Matrix4x4f) GetAxisX() Vector3f { return NewVector3f(this.Get(0, 0), this.Get(1, 0), this.Get(2, 0)) } func (this *Matrix4x4f) GetAxisY() Vector3f { return NewVector3f(this.Get(0, 1), this.Get(1, 1), this.Get(2, 1)) } func (this *Matrix4x4f) GetAxisZ() Vector3f { return NewVector3f(this.Get(0, 2), this.Get(1, 2), this.Get(2, 2)) } type Matrix3x3f struct { m_Data [9]float32 } func (this *Matrix3x3f) Get(row, column int) float32 { return this.m_Data[row+(column*3)] } func (this *Matrix3x3f) MultiplyPoint3(v Vector3f) Vector3f { return this.MultiplyVector3(v) } func (this *Matrix3x3f) MultiplyVector3(v Vector3f) Vector3f { var res Vector3f res.x = this.m_Data[0]*v.x + this.m_Data[3]*v.y + this.m_Data[6]*v.z res.y = this.m_Data[1]*v.x + this.m_Data[4]*v.y + this.m_Data[7]*v.z res.z = this.m_Data[2]*v.x + this.m_Data[5]*v.y + this.m_Data[8]*v.z return res } func (this *Matrix3x3f) GetColumn(i int) Vector3f { return NewVector3f(this.Get(0, i), this.Get(1, i), this.Get(2, i)) } func (this *Matrix3x3f) GetAxisX() Vector3f { return NewVector3f(this.Get(0, 0), this.Get(1, 0), this.Get(2, 0)) } func (this *Matrix3x3f) GetAxisY() Vector3f { return NewVector3f(this.Get(0, 1), this.Get(1, 1), this.Get(2, 1)) } func (this *Matrix3x3f) GetAxisZ() Vector3f { return NewVector3f(this.Get(0, 2), this.Get(1, 2), this.Get(2, 2)) }
matrix.go
0.777638
0.612976
matrix.go
starcoder
package gcs import ( "io" ) type bitWriter struct { bytes []byte p *byte // Pointer to last byte next byte // Next bit to write or skip } // writeOne writes a one bit to the bit stream. func (b *bitWriter) writeOne() { if b.next == 0 { b.bytes = append(b.bytes, 1<<7) b.p = &b.bytes[len(b.bytes)-1] b.next = 1 << 6 return } *b.p |= b.next b.next >>= 1 } // writeZero writes a zero bit to the bit stream. func (b *bitWriter) writeZero() { if b.next == 0 { b.bytes = append(b.bytes, 0) b.p = &b.bytes[len(b.bytes)-1] b.next = 1 << 6 return } b.next >>= 1 } // writeNBits writes n number of LSB bits of data to the bit stream in big // endian format. Panics if n > 64. func (b *bitWriter) writeNBits(data uint64, n uint) { if n > 64 { panic("gcs: cannot write more than 64 bits of a uint64") } data <<= 64 - n // If byte is partially written, fill the rest for n > 0 { if b.next == 0 { break } if data&(1<<63) != 0 { b.writeOne() } else { b.writeZero() } n-- data <<= 1 } if n == 0 { return } // Write 8 bits at a time. for n >= 8 { b.bytes = append(b.bytes, byte(data>>56)) n -= 8 data <<= 8 } // Write the remaining bits. for n > 0 { if data&(1<<63) != 0 { b.writeOne() } else { b.writeZero() } n-- data <<= 1 } } type bitReader struct { bytes []byte next byte // next bit to read in bytes[0] } func newBitReader(bitstream []byte) bitReader { return bitReader{ bytes: bitstream, next: 1 << 7, } } // readUnary returns the number of unread sequential one bits before the next // zero bit. Errors with io.EOF if no zero bits are encountered. func (b *bitReader) readUnary() (uint64, error) { var value uint64 for { if len(b.bytes) == 0 { return value, io.EOF } for b.next != 0 { bit := b.bytes[0] & b.next b.next >>= 1 if bit == 0 { return value, nil } value++ } b.bytes = b.bytes[1:] b.next = 1 << 7 } } // readNBits reads n number of LSB bits of data from the bit stream in big // endian format. Panics if n > 64. func (b *bitReader) readNBits(n uint) (uint64, error) { if n > 64 { panic("gcs: cannot read more than 64 bits as a uint64") } if n == 0 { return 0, nil } if len(b.bytes) == 0 { return 0, io.EOF } var value uint64 // If byte is partially read, read the rest if b.next != 1<<7 { for n > 0 { if b.next == 0 { b.next = 1 << 7 b.bytes = b.bytes[1:] break } n-- if b.bytes[0]&b.next != 0 { value |= 1 << n } b.next >>= 1 } } if n == 0 { return value, nil } // Read 8 bits at a time. for n >= 8 { if len(b.bytes) == 0 { return 0, io.EOF } n -= 8 value |= uint64(b.bytes[0]) << n b.bytes = b.bytes[1:] } if len(b.bytes) == 0 { if n != 0 { return 0, io.EOF } return value, nil } // Read the remaining bits. for n > 0 { n-- if b.bytes[0]&b.next != 0 { value |= 1 << n } b.next >>= 1 } return value, nil }
gcs/bits.go
0.526343
0.483222
bits.go
starcoder
package parser import ( "errors" "strconv" ) type interpol struct { t float64 u float64 } func ParseDigitalIn(ch uint8, data []byte) uint8 { hex := parseHexDigit(32, data) * 16 + parseHexDigit(33, data) if (hex & (1 << ch)) > 0 { return 1 } return 0 } // Sensor type A func ParseADCSensorA(ch int, data []byte) (ret float64) { points := []interpol { interpol { 20, 2.47 }, interpol { 25, 2.37 }, interpol { 30, 2.27 }, interpol { 35, 2.17 }, interpol { 40, 2.06 }, interpol { 45, 1.94 }, interpol { 50, 1.82 }, interpol { 55, 1.70 }, interpol { 60, 1.57 }, interpol { 65, 1.46 }, interpol { 70, 1.34 }, interpol { 75, 1.23 }, interpol { 80, 1.13 }, } volt := parseADCVolt(ch, data) return interpolate(volt, points) } func interpolate(u float64, table []interpol) float64 { if u >= table[0].u { return table[0].t } for index := 0; index < len(table); index++ { if (table[index].u < u) { interval := table[index-1].u - table[index].u a := table[index-1].u - u frac := a / interval t_interval := table[index-1].t - table[index].t return table[index-1].t - t_interval * frac } } return table[len(table)-1].t } // Sensor type B func ParseADCSensorB(ch int, data []byte) (ret float64) { points := []interpol { interpol { -20, 4.54 }, interpol { -15, 4.42 }, interpol { -10, 4.29 }, interpol { -5, 4.13 }, interpol { 0, 3.96 }, interpol { 5, 3.77 }, interpol { 10, 3.56 }, interpol { 15, 3.34 }, interpol { 20, 3.05 }, } volt := parseADCVolt(ch, data) return interpolate(volt, points) } // Sensor type C func ParseADCSensorC(ch int, data []byte) (ret float64) { points := []interpol { interpol { 20, 2.60 }, interpol { 25, 2.47 }, interpol { 30, 2.34 }, interpol { 35, 2.20 }, interpol { 40, 2.06 }, interpol { 45, 1.91 }, interpol { 50, 1.77 }, interpol { 55, 1.63 }, interpol { 60, 1.49 }, interpol { 65, 1.36 }, interpol { 70, 1.23 }, interpol { 75, 1.12 }, interpol { 80, 1.01 }, } volt := parseADCVolt(ch, data) return interpolate(volt, points) } func parseADCVolt(ch int, data []byte) (ret float64) { return float64(parseADC(ch, data)) * (4.97 / 1024.0) } func parseADC(ch int, data []byte) (ret uint16) { index := ch * 4 ret = parseHexDigit(index, data) * 256 ret += parseHexDigit(index + 1, data) * 16 ret += parseHexDigit(index + 2, data) return } func parseHexDigit(index int, data []byte) uint16 { val := uint16(data[index]) if val > 57 { return val - 87 } return val - 48 } func IsSmallHexDigit(data []byte, index int) error { c := data[index] if c < 48 || c > 52 { return errors.New("char at index " + strconv.Itoa(index) + " must be a valid small hex digit (0-3) but was " + string(c)) } return nil } func IsHexDigit(data []byte, index int) error { c := data[index] if c < 48 || (c > 57 && (c < 97 || c > 102)) { return errors.New("char at index " + strconv.Itoa(index) + " must be a valid lowercase hex digit (0-9,a-f) but was " + string(c)) } return nil }
raspberry/gopath/src/b00lduck/datalogger/serial/parser/parser.go
0.614394
0.505066
parser.go
starcoder
package flatbuffers import ( "math" ) type ( // A SOffsetT stores a signed offset into arbitrary data. SOffsetT int32 // A UOffsetT stores an unsigned offset into vector data. UOffsetT uint32 // A VOffsetT stores an unsigned offset in a vtable. VOffsetT uint16 ) const ( // VtableMetadataFields is the count of metadata fields in each vtable. VtableMetadataFields = 2 ) // GetByte decodes a little-endian byte from a byte slice. func GetByte(buf []byte) byte { return byte(GetUint8(buf)) } // GetBool decodes a little-endian bool from a byte slice. func GetBool(buf []byte) bool { return buf[0] == 1 } // GetUint8 decodes a little-endian uint8 from a byte slice. func GetUint8(buf []byte) (n uint8) { n = uint8(buf[0]) return } // GetUint16 decodes a little-endian uint16 from a byte slice. func GetUint16(buf []byte) (n uint16) { n |= uint16(buf[0]) n |= uint16(buf[1]) << 8 return } // GetUint32 decodes a little-endian uint32 from a byte slice. func GetUint32(buf []byte) (n uint32) { n |= uint32(buf[0]) n |= uint32(buf[1]) << 8 n |= uint32(buf[2]) << 16 n |= uint32(buf[3]) << 24 return } // GetUint64 decodes a little-endian uint64 from a byte slice. func GetUint64(buf []byte) (n uint64) { n |= uint64(buf[0]) n |= uint64(buf[1]) << 8 n |= uint64(buf[2]) << 16 n |= uint64(buf[3]) << 24 n |= uint64(buf[4]) << 32 n |= uint64(buf[5]) << 40 n |= uint64(buf[6]) << 48 n |= uint64(buf[7]) << 56 return } // GetInt8 decodes a little-endian int8 from a byte slice. func GetInt8(buf []byte) (n int8) { n = int8(buf[0]) return } // GetInt16 decodes a little-endian int16 from a byte slice. func GetInt16(buf []byte) (n int16) { n |= int16(buf[0]) n |= int16(buf[1]) << 8 return } // GetInt32 decodes a little-endian int32 from a byte slice. func GetInt32(buf []byte) (n int32) { n |= int32(buf[0]) n |= int32(buf[1]) << 8 n |= int32(buf[2]) << 16 n |= int32(buf[3]) << 24 return } // GetInt64 decodes a little-endian int64 from a byte slice. func GetInt64(buf []byte) (n int64) { n |= int64(buf[0]) n |= int64(buf[1]) << 8 n |= int64(buf[2]) << 16 n |= int64(buf[3]) << 24 n |= int64(buf[4]) << 32 n |= int64(buf[5]) << 40 n |= int64(buf[6]) << 48 n |= int64(buf[7]) << 56 return } // GetFloat32 decodes a little-endian float32 from a byte slice. func GetFloat32(buf []byte) float32 { x := GetUint32(buf) return math.Float32frombits(x) } // GetFloat64 decodes a little-endian float64 from a byte slice. func GetFloat64(buf []byte) float64 { x := GetUint64(buf) return math.Float64frombits(x) } // GetUOffsetT decodes a little-endian UOffsetT from a byte slice. func GetUOffsetT(buf []byte) UOffsetT { return UOffsetT(GetInt32(buf)) } // GetSOffsetT decodes a little-endian SOffsetT from a byte slice. func GetSOffsetT(buf []byte) SOffsetT { return SOffsetT(GetInt32(buf)) } // GetVOffsetT decodes a little-endian VOffsetT from a byte slice. func GetVOffsetT(buf []byte) VOffsetT { return VOffsetT(GetUint16(buf)) } // WriteByte encodes a little-endian uint8 into a byte slice. func WriteByte(buf []byte, n byte) { WriteUint8(buf, uint8(n)) } // WriteBool encodes a little-endian bool into a byte slice. func WriteBool(buf []byte, b bool) { buf[0] = 0 if b { buf[0] = 1 } } // WriteUint8 encodes a little-endian uint8 into a byte slice. func WriteUint8(buf []byte, n uint8) { buf[0] = byte(n) } // WriteUint16 encodes a little-endian uint16 into a byte slice. func WriteUint16(buf []byte, n uint16) { buf[0] = byte(n) buf[1] = byte(n >> 8) } // WriteUint32 encodes a little-endian uint32 into a byte slice. func WriteUint32(buf []byte, n uint32) { buf[0] = byte(n) buf[1] = byte(n >> 8) buf[2] = byte(n >> 16) buf[3] = byte(n >> 24) } // WriteUint64 encodes a little-endian uint64 into a byte slice. func WriteUint64(buf []byte, n uint64) { for i := uint(0); i < uint(SizeUint64); i++ { buf[i] = byte(n >> (i * 8)) } } // WriteInt8 encodes a little-endian int8 into a byte slice. func WriteInt8(buf []byte, n int8) { buf[0] = byte(n) } // WriteInt16 encodes a little-endian int16 into a byte slice. func WriteInt16(buf []byte, n int16) { buf[0] = byte(n) buf[1] = byte(n >> 8) } // WriteInt32 encodes a little-endian int32 into a byte slice. func WriteInt32(buf []byte, n int32) { buf[0] = byte(n) buf[1] = byte(n >> 8) buf[2] = byte(n >> 16) buf[3] = byte(n >> 24) } // WriteInt64 encodes a little-endian int64 into a byte slice. func WriteInt64(buf []byte, n int64) { for i := uint(0); i < uint(SizeInt64); i++ { buf[i] = byte(n >> (i * 8)) } } // WriteFloat32 encodes a little-endian float32 into a byte slice. func WriteFloat32(buf []byte, n float32) { WriteUint32(buf, math.Float32bits(n)) } // WriteFloat64 encodes a little-endian float64 into a byte slice. func WriteFloat64(buf []byte, n float64) { WriteUint64(buf, math.Float64bits(n)) } // WriteVOffsetT encodes a little-endian VOffsetT into a byte slice. func WriteVOffsetT(buf []byte, n VOffsetT) { WriteUint16(buf, uint16(n)) } // WriteSOffsetT encodes a little-endian SOffsetT into a byte slice. func WriteSOffsetT(buf []byte, n SOffsetT) { WriteInt32(buf, int32(n)) } // WriteUOffsetT encodes a little-endian UOffsetT into a byte slice. func WriteUOffsetT(buf []byte, n UOffsetT) { WriteUint32(buf, uint32(n)) }
vendor/github.com/elastic/beats/vendor/github.com/google/flatbuffers/go/encode.go
0.765944
0.420481
encode.go
starcoder
package leetcode import ( "container/heap" "math" "sort" ) /* You are given two integer arrays nums1 and nums2 sorted in ascending order and an integer k. Define a pair (u,v) which consists of one element from the first array and one element from the second array. Find the k pairs (u1,v1),(u2,v2) ...(uk,vk) with the smallest sums. Example 1: ``` Given nums1 = [1,7,11], nums2 = [2,4,6], k = 3 Return: [1,2],[1,4],[1,6] The first 3 pairs are returned from the sequence: [1,2],[1,4],[1,6],[7,2],[7,4],[11,2],[7,6],[11,4],[11,6] ``` Example 2: ``` Given nums1 = [1,1,2], nums2 = [1,2,3], k = 2 Return: [1,1],[1,1] The first 2 pairs are returned from the sequence: [1,1],[1,1],[1,2],[2,1],[1,2],[2,2],[1,3],[1,3],[2,3] ``` Example 3: ``` Given nums1 = [1,2], nums2 = [3], k = 3 Return: [1,3],[2,3] All possible pairs are returned from the sequence: [1,3],[2,3] ``` */ type Sum struct { n1, n2 int sum int } type Sums []*Sum func (s Sums) Len() int { return len(s) } func (s Sums) Less(i, j int) bool { return s[i].sum < s[j].sum } func (s Sums) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func KSmallestPairs(nums1 []int, nums2 []int, k int) [][]int { len1, len2 := len(nums1), len(nums2) if len1 == 0 || len2 == 0 || k <= 0 { return nil } sums := make(Sums, 0, len1*len2) for _, n1 := range nums1 { for _, n2 := range nums2 { sums = append(sums, &Sum{ n1: n1, n2: n2, sum: n1 + n2, }) } } sort.Sort(sums) if k > len(sums) { k = len(sums) } res := make([][]int, k) for i := 0; i < k; i++ { s := sums[i] res[i] = []int{s.n1, s.n2} } return res } // https://discuss.leetcode.com/topic/50527/java-10ms-solution-no-priority-queue func KSmallestPairs2(nums1 []int, nums2 []int, k int) [][]int { len1, len2 := len(nums1), len(nums2) if len1 == 0 || len2 == 0 || k <= 0 { return nil } if m := len1 * len2; k > m { k = m } // index类似于突起,向后滑动索引 index := make([]int, len1) ret := make([][]int, 0, k) for ; k > 0; k-- { minVal := int(math.MaxInt32) in := -1 for i, n := range nums1 { ind := index[i] if ind >= len2 { continue } if s := n + nums2[ind]; s < minVal { minVal = s in = i } } if in == -1 { break } ret = append(ret, []int{nums1[in], nums2[index[in]]}) index[in]++ } return ret } type Item struct { value []int // The value of the item; arbitrary. priority int // The priority of the item in the queue. // The index is needed by update and is maintained by the heap.Interface methods. index int // The index of the item in the heap. } // A PriorityQueue implements heap.Interface and holds Items. type PriorityQueue []*Item func (pq PriorityQueue) Len() int { return len(pq) } func (pq PriorityQueue) Less(i, j int) bool { // We want Pop to give us the highest, not lowest, priority so we use greater than here. return pq[i].priority > pq[j].priority } func (pq PriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] pq[i].index = i pq[j].index = j } func (pq *PriorityQueue) Push(x interface{}) { n := len(*pq) item := x.(*Item) item.index = n *pq = append(*pq, item) } func (pq *PriorityQueue) Pop() interface{} { old := *pq n := len(old) item := old[n-1] item.index = -1 // for safety *pq = old[0 : n-1] return item } func KSmallestPairs3(nums1 []int, nums2 []int, k int) [][]int { len1, len2 := len(nums1), len(nums2) if len1 == 0 || len2 == 0 || k <= 0 { return nil } if m := len1 * len2; k > m { k = m } ret := make([][]int, 0, k) queue := make(PriorityQueue, 0, k) heap.Push(&queue, &Item{ value: []int{0, 0}, priority: nums1[0] + nums2[0], }) for k > 0 && queue.Len() > 0 { item := heap.Pop(&queue).(*Item) i, j := item.value[0], item.value[1] //fmt.Println("pop\t", "i:", i, "\tj:", j) ret = append(ret, []int{nums1[i], nums2[j]}) k-- if j+1 < len2 { heap.Push(&queue, &Item{ value: []int{i, j + 1}, priority: -(nums1[i] + nums2[j+1]), }) //fmt.Println("push\t", "i:", i, "\tj:", j+1, "\tprio:", nums1[i]+nums2[j+1]) } if j == 0 && i+1 < len1 { heap.Push(&queue, &Item{ value: []int{i + 1, 0}, priority: -(nums1[i+1] + nums2[0]), }) //fmt.Println("push\t", "i:", i+1, "\tj:", 0, "\tprio:", nums1[i+1]+nums2[0]) } } return ret } // https://discuss.leetcode.com/topic/50908/heap-java-o-k-log-min-n-k-8ms-solution-with-briefly-explanation func KSmallestPairs4(nums1 []int, nums2 []int, k int) [][]int { len1, len2 := len(nums1), len(nums2) if len1 == 0 || len2 == 0 || k <= 0 { return nil } if m := len1 * len2; k > m { k = m } ret := make([][]int, 0, k) m := int(math.Min(float64(k), float64(len1))) queue := make(PriorityQueue, m) // nums1的所有元素和nums2[0]组合 for i := 0; i < m; i++ { queue[i] = &Item{ value: []int{i, 0}, priority: -(nums1[i] + nums2[0]), index: i, } } heap.Init(&queue) for i := 0; i < k; i++ { item := heap.Pop(&queue).(*Item) row, col := item.value[0], item.value[1] //fmt.Println("pop\t", "row:", row, "\tcol:", col) if col < len2-1 { heap.Push(&queue, &Item{ value: []int{row, col + 1}, priority: -(nums1[row] + nums2[col+1]), }) //fmt.Println("push\t", "row:", row, "\tcol:", col+1, "\tprio:", nums1[row]+nums2[col+1]) } ret = append(ret, []int{nums1[row], nums2[col]}) } return ret }
impl-go/k-pairs-with-smallest-sums.go
0.544075
0.864539
k-pairs-with-smallest-sums.go
starcoder
package main import ( "fmt" ) /* Given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list. Input: l1 = [2,4,3], l2 = [5,6,4] Output: [7,0,8] Explanation: 342 + 465 = 807. Example 2: Input: l1 = [0], l2 = [0] Output: [0] Example 3: Input: l1 = [9,9,9,9,9,9,9], l2 = [9,9,9,9] Output: [8,9,9,9,0,0,0,1] */ // ListNode struct type ListNode struct { Val int Next *ListNode } func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode { var ( val int carry int total *ListNode ) val = l1.Val + l2.Val if val > 9 { val = val - 10 carry = 1 } total = &ListNode{Val: val, Next: nil} helper(l1.Next, l2.Next, total, carry) return total } func helper(l1, l2, total *ListNode, carry int) { var ( val int carryNext int ) // base case if l1 == nil && l2 == nil && carry == 0 { total.Next = nil } else if l1 == nil && l2 == nil && carry > 0 { val = carry total.Next = &ListNode{Val: val, Next: nil} helper(nil, nil, total.Next, 0) // will hit base case } else if l1 != nil && l2 == nil { val = l1.Val + 0 + carry if val > 9 { val = val - 10 carryNext = 1 } total.Next = &ListNode{Val: val, Next: nil} helper(l1.Next, nil, total.Next, carryNext) } else if l1 == nil && l2 != nil { val = l2.Val + 0 + carry if val > 9 { val = val - 10 carryNext = 1 } total.Next = &ListNode{Val: val, Next: nil} helper(nil, l2.Next, total.Next, carryNext) } else { val = l1.Val + l2.Val + carry if val > 9 { val = val - 10 carryNext = 1 } total.Next = &ListNode{Val: val, Next: nil} helper(l1.Next, l2.Next, total.Next, carryNext) } return } func convertListNodeToString(total *ListNode) string { output := "[" for t := total; t != nil; t = t.Next { if t.Next != nil { output += fmt.Sprintf("%+v,", t.Val) } else { output += fmt.Sprintf("%+v", t.Val) } } output += "]" return output } func buildListNodeFromSlice(l1 []int) *ListNode { head := &ListNode{Val: l1[0], Next: nil} buildListNodeFromSliceHelper(l1[1:], head) return head } func buildListNodeFromSliceHelper(l1 []int, total *ListNode) { if len(l1) == 0 { return } total.Next = &ListNode{Val: l1[0], Next: nil} buildListNodeFromSliceHelper(l1[1:], total.Next) } func main() { var ( l1 *ListNode l2 *ListNode ) l1 = buildListNodeFromSlice([]int{2, 4, 3}) l2 = buildListNodeFromSlice([]int{5, 6, 4}) fmt.Println(convertListNodeToString(l1) + " + " + convertListNodeToString(l2) + " = " + convertListNodeToString(addTwoNumbers(l1, l2))) l1 = buildListNodeFromSlice([]int{9, 9, 9, 9, 9, 9, 9}) l2 = buildListNodeFromSlice([]int{9, 9, 9, 9}) fmt.Println(convertListNodeToString(l1) + " + " + convertListNodeToString(l2) + " = " + convertListNodeToString(addTwoNumbers(l1, l2))) l1 = buildListNodeFromSlice([]int{0}) l2 = buildListNodeFromSlice([]int{0}) fmt.Println(convertListNodeToString(l1) + " + " + convertListNodeToString(l2) + " = " + convertListNodeToString(addTwoNumbers(l1, l2))) l1 = buildListNodeFromSlice([]int{9, 9, 9}) l2 = buildListNodeFromSlice([]int{2}) fmt.Println(convertListNodeToString(l1) + " + " + convertListNodeToString(l2) + " = " + convertListNodeToString(addTwoNumbers(l1, l2))) }
Graphs/AddTwoNumbers/main.go
0.612773
0.454835
main.go
starcoder
package samples func init() { sampleDataProposalCreateOperation[52] = `{ "expiration_time": "2016-10-09T10:19:01", "extensions": [], "fee": { "amount": 2326143, "asset_id": "1.3.0" }, "fee_paying_account": "1.2.111576", "proposed_ops": [ { "op": [ 6, { "account": "1.2.132053", "active": { "account_auths": [ [ "1.2.159", 1 ], [ "1.2.285", 1 ], [ "1.2.11243", 1 ], [ "1.2.11709", 1 ], [ "1.2.12287", 1 ], [ "1.2.12376", 1 ], [ "1.2.22458", 1 ], [ "1.2.36590", 1 ], [ "1.2.111576", 1 ], [ "1.2.125488", 1 ] ], "address_auths": [], "key_auths": [], "weight_threshold": 5 }, "extensions": {}, "fee": { "amount": 30126, "asset_id": "1.3.0" }, "new_options": { "extensions": [], "memo_key": "<KEY>", "num_committee": 0, "num_witness": 0, "votes": [], "voting_account": "1.2.5" }, "owner": { "account_auths": [ [ "1.2.159", 1 ], [ "1.2.285", 1 ], [ "1.2.11243", 1 ], [ "1.2.12287", 1 ], [ "1.2.12376", 1 ], [ "1.2.22458", 1 ], [ "1.2.36590", 1 ], [ "1.2.111576", 4 ], [ "1.2.125488", 4 ] ], "address_auths": [], "key_auths": [], "weight_threshold": 9 } } ] } ] }` } //end of file
gen/samples/proposalcreateoperation_52.go
0.527317
0.448306
proposalcreateoperation_52.go
starcoder
package prayertime import ( "github.com/buildscientist/prayertime/julian" "github.com/buildscientist/prayertime/trig" "math" "strconv" "time" ) var methodParams = make(map[int][]float64) var PrayerTimeNames = []string{FAJR, SUNRISE, DHUHR, ASR, SUNSET, MAGHRIB, ISHA} var julianDate float64 var prayerTimesCurrent []float64 var Offsets = [7]int{0, 0, 0, 0, 0, 0, 0} func init() { //Prayer Time Method Parameters methodParams[JAFARI] = []float64{16, 0, 4, 0, 14} methodParams[KARACHI] = []float64{18, 1, 0, 0, 18} methodParams[ISNA] = []float64{15, 1, 0, 0, 15} methodParams[MWL] = []float64{18, 1, 0, 0, 17} methodParams[MAKKAH] = []float64{18.5, 1, 0, 1, 90} methodParams[EGYPT] = []float64{19.5, 1, 0, 0, 17.5} methodParams[TEHRAN] = []float64{17.7, 0, 4.5, 0, 14} methodParams[CUSTOM] = []float64{18, 1, 0, 0, 17} } type PrayerLocale struct { latitude, longitude, timezone float64 PrayerCalcMethod, AsrJuristic, AdjustHighLats, TimeFormat int } func New(latitude, longitude, timezone float64) PrayerLocale { return PrayerLocale{latitude, longitude, timezone, ISNA, SHAFII, NONE, TIME_12} } //Prayer Time Calculation functions func sunPosition(julianDate float64) (position []float64) { var daysFromJulianEpoch = julianDate - 2451545.0 var meanSunAnomaly = trig.FixAngle(357.529 + (0.98560028 * daysFromJulianEpoch)) var meanSunLongitude = trig.FixAngle(280.459 + (0.98564736 * daysFromJulianEpoch)) var geoCentricElipticSunLongitude = trig.FixAngle(meanSunLongitude + (1.915 * trig.DegreeSin(meanSunAnomaly)) + (0.020 * trig.DegreeSin(2*meanSunAnomaly))) var meanObliquityEcliptic = 23.439 - (0.00000036 * daysFromJulianEpoch) var sunDeclination = trig.DegreeArcSin(trig.DegreeSin(meanObliquityEcliptic) * trig.DegreeSin(geoCentricElipticSunLongitude)) var rightAscension = (trig.DegreeArcTan2(trig.DegreeCos(meanObliquityEcliptic)*trig.DegreeSin(geoCentricElipticSunLongitude), trig.DegreeCos(geoCentricElipticSunLongitude))) / 15.0 rightAscension = trig.FixHour(rightAscension) var equationOfTime = meanSunLongitude/15.0 - rightAscension return []float64{sunDeclination, equationOfTime} } func equationOfTime(julianDate float64) float64 { var equationOfTime = sunPosition(julianDate)[1] return equationOfTime } func sunDeclination(julianDate float64) float64 { var declinationAngle = sunPosition(julianDate)[0] return declinationAngle } func computeMidDay(time float64) float64 { var currentTime = equationOfTime(julianDate + time) return trig.FixHour(12 - currentTime) } func computeTime(prayTime *PrayerLocale, angle, time float64) float64 { var D = sunDeclination(julianDate + time) var Z = computeMidDay(time) var beg = -trig.DegreeSin(angle) - trig.DegreeSin(D)*trig.DegreeSin(prayTime.latitude) var mid = trig.DegreeCos(D) * trig.DegreeCos(prayTime.latitude) var v = trig.DegreeArcCos(beg/mid) / 15.0 if angle > 90 { return Z - v } return Z + v } func computeAsr(prayTime *PrayerLocale, step, time float64) float64 { var D = sunDeclination(julianDate + time) var G = -trig.DegreeArcCot(step + trig.DegreeTan(math.Abs(prayTime.latitude-D))) return computeTime(prayTime, G, time) } func timeDifference(timeOne, timeTwo float64) float64 { return trig.FixHour(timeTwo - timeOne) } func getDatePrayerTimes(prayTime *PrayerLocale, year, month, day int) []string { julianDate = julian.ConvertFromGregToJul(year, month, day) var longitudinalDiff = prayTime.longitude / (15.0 * 24.0) julianDate = julianDate - longitudinalDiff return computeDayTimes(prayTime) } func CalculatePrayerTimes(prayTime *PrayerLocale, today time.Time) []string { var year = today.Year() var month = int(today.Month()) var day = today.Day() return getDatePrayerTimes(prayTime, year, month, day) } func setCustomParams(params []float64, prayTime *PrayerLocale) { for x := 0; x < 5; x++ { if params[x] == -1 { params[x] = methodParams[prayTime.PrayerCalcMethod][x] methodParams[CUSTOM] = params } else { methodParams[CUSTOM][x] = params[x] } } prayTime.PrayerCalcMethod = CUSTOM } func setPrayerAngle(prayerName string, angle float64, prayTime *PrayerLocale) { switch { case prayerName == FAJR: setCustomParams([]float64{angle, -1, -1, -1, -1}, prayTime) case prayerName == MAGHRIB: setCustomParams([]float64{-1, 0, angle, -1, -1}, prayTime) case prayerName == ISHA: setCustomParams([]float64{-1, -1, -1, 0, angle}, prayTime) } } func setPrayerMinutes(prayerName string, minutes float64, prayTime *PrayerLocale) { switch { case prayerName == MAGHRIB: setCustomParams([]float64{-1, 1, minutes, -1, -1}, prayTime) case prayerName == ISHA: setCustomParams([]float64{-1, -1, -1, 1, minutes}, prayTime) } } func floatToTime(time float64, useSuffix, twentyFourHourFormat bool) string { if math.IsNaN(time) { return INVALID_TIME } var result, suffix string time = trig.FixHour(time + 0.5/60.0) var hours = int(math.Floor(time)) var minutes = math.Floor((time - float64(hours)) * 60.0) if useSuffix { switch { case hours >= 12: suffix = "PM" default: suffix = "AM" } if !twentyFourHourFormat { hours = (((hours + 12) - 1) % 12) + 1 //Note the order of operations } switch { case (hours >= 0 && hours <= 9) && (minutes >= 0 && minutes <= 9): result = "0" + strconv.Itoa(hours) + ":0" + strconv.Itoa(int(minutes)) + " " + suffix case hours >= 0 && hours <= 9: result = "0" + strconv.Itoa(hours) + ":" + strconv.Itoa(int(minutes)) + " " + suffix case minutes >= 0 && minutes <= 9: result = strconv.Itoa(hours) + ":0" + strconv.Itoa(int(minutes)) + " " + suffix default: result = strconv.Itoa(hours) + ":" + strconv.Itoa(int(minutes)) + " " + suffix } } else { if !twentyFourHourFormat { hours = (((hours + 12) - 1) % 12) + 1 //Note the order of operations } switch { case (hours >= 0 && hours <= 9) && (minutes >= 0 && minutes <= 9): result = "0" + strconv.Itoa(hours) + ":0" + strconv.Itoa(int(minutes)) case hours >= 0 && hours <= 9: result = "0" + strconv.Itoa(hours) + ":" + strconv.Itoa(int(minutes)) case minutes >= 0 && minutes <= 9: result = strconv.Itoa(hours) + ":0" + strconv.Itoa(int(minutes)) default: result = strconv.Itoa(hours) + ":" + strconv.Itoa(int(minutes)) } } return result } func dayPortion(times []float64) []float64 { for x := 0; x < 7; x++ { times[x] /= 24 } return times } func computePrayerTime(prayTime *PrayerLocale, times []float64) []float64 { var time = dayPortion(times) var angle = 180 - methodParams[prayTime.PrayerCalcMethod][0] var fajr = computeTime(prayTime, angle, time[0]) var sunrise = computeTime(prayTime, 180-0.833, time[1]) var dhuhr = computeMidDay(time[2]) var asr = computeAsr(prayTime, float64(1+prayTime.AsrJuristic), time[3]) var sunset = computeTime(prayTime, 0.833, time[4]) var maghrib = computeTime(prayTime, methodParams[prayTime.PrayerCalcMethod][2], time[5]) var isha = computeTime(prayTime, methodParams[prayTime.PrayerCalcMethod][4], time[6]) var computedPrayerTimes = []float64{fajr, sunrise, dhuhr, asr, sunset, maghrib, isha} return computedPrayerTimes } func adjustTimes(prayTime *PrayerLocale, times []float64) []float64 { for x := 0; x < len(times); x++ { times[x] = times[x] + (prayTime.timezone - (prayTime.longitude / 15)) } times[2] = times[2] + float64(DHUHR_MINUTES/60) if methodParams[prayTime.PrayerCalcMethod][1] == 1 { times[5] = times[4] + methodParams[prayTime.PrayerCalcMethod][2]/60 } if methodParams[prayTime.PrayerCalcMethod][3] == 1 { times[6] = times[5] + methodParams[prayTime.PrayerCalcMethod][4]/60 } if prayTime.AdjustHighLats != 0 { times = adjustHighLatTimes(times, prayTime) } return times } // Adjust Fajr, Isha and Maghrib for locations in higher latitudes func adjustHighLatTimes(times []float64, prayTime *PrayerLocale) []float64 { var nightTime = timeDifference(times[4], times[1]) var fajrDiff = nightPortion(methodParams[prayTime.PrayerCalcMethod][0], prayTime) * nightTime if math.IsNaN(times[0]) || timeDifference(times[0], times[1]) > fajrDiff { times[0] = times[1] - fajrDiff } var ishaAngle float64 if methodParams[prayTime.PrayerCalcMethod][3] == 0 { ishaAngle = methodParams[prayTime.PrayerCalcMethod][4] } else { ishaAngle = 18.0 } var ishaDiff = nightPortion(ishaAngle, prayTime) * nightTime if math.IsNaN(times[6]) || timeDifference(times[4], times[6]) > ishaDiff { times[6] = times[4] + ishaDiff } var maghribAngle float64 if methodParams[prayTime.PrayerCalcMethod][1] == 0 { maghribAngle = methodParams[prayTime.PrayerCalcMethod][2] } else { maghribAngle = 4.0 } var maghribDiff = nightPortion(maghribAngle, prayTime) * nightTime if math.IsNaN(times[5]) || timeDifference(times[4], times[5]) > maghribDiff { times[5] = times[4] + maghribDiff } return times } func nightPortion(angle float64, prayTime *PrayerLocale) float64 { var calc = 0.0 switch { case prayTime.AdjustHighLats == ANGLE_BASED: calc = angle / 60.0 case prayTime.AdjustHighLats == MIDNIGHT: calc = 0.5 case prayTime.AdjustHighLats == ONE_SEVENTH: calc = 0.14286 } return calc } func tune(offsetTimes []int) { for x := 0; x < len(offsetTimes); x++ { Offsets[x] = offsetTimes[x] } } func tuneTimes(times []float64) []float64 { for x := 0; x < len(times); x++ { times[x] = times[x] + float64(Offsets[x]/60.0) } return times } func adjustTimesFormat(times []float64, prayTime *PrayerLocale) []string { var result []string if prayTime.TimeFormat == 3 { for index := range times { result = append(result, strconv.FormatFloat(times[index], 'f', -1, 64)) } return result } for x := 0; x < 7; x++ { switch { case prayTime.TimeFormat == TIME_12: result = append(result, floatToTime(times[x], true, false)) case prayTime.TimeFormat == TIME_12_NO_SUFFIX: result = append(result, floatToTime(times[x], false, false)) case prayTime.TimeFormat == TIME_24: result = append(result, floatToTime(times[x], false, true)) } } return result } func computeDayTimes(prayTime *PrayerLocale) []string { var times = []float64{5, 6, 12, 13, 18, 18, 18} for x := 1; x <= NUMBER_OF_ITERATIONS; x++ { times = computePrayerTime(prayTime, times) } times = adjustTimes(prayTime, times) times = tuneTimes(times) return adjustTimesFormat(times, prayTime) }
prayertime.go
0.615435
0.453746
prayertime.go
starcoder
package utils import ( "log" "math" "strconv" ) // NextIndex finds next valid index in given collection. // It prevents index out of bound by wrapping back to 0 func NextIndex(collection []string, index int) int { max := len(collection) - 1 if index >= max { return 0 } return index + 1 } // StringPrefixSum returns a list of prefix sum for string type. // e.g, "text" -> ["", "t", "te", "tex", "text"] func StringPrefixSum(text string) []string { sum := make([]string, 0) for i := range text { sum = append(sum, text[0:i]) } return append(sum, text) } // TernaryString is a simple implementation of ternary operator. // Only use this for trivial assignments as all expressions will be evaluated func TernaryString(condition bool, ifTrue, ifFalse string) string { if condition { return ifTrue } return ifFalse } // TernaryBool is a simple implementation of ternary operator. // Only use this for trivial assignments as all expressions will be evaluated func TernaryBool(condition, ifTrue, ifFalse bool) bool { if condition { return ifTrue } return ifFalse } // TernaryInt is a simple implementation of ternary operator. // Only use this for trivial assignments as all expressions will be evaluated func TernaryInt(condition bool, ifTrue, ifFalse int) int { if condition { return ifTrue } return ifFalse } // MultiplyUint8 multiplies uint8 inputs without wrapping func MultiplyUint8(a, b uint8) uint8 { if a == 0 || b == 0 { return 0 } if math.MaxUint8/a >= b { return a * b } return math.MaxUint8 } // MinusUint8 minuses uint8 inputs without wrapping func MinusUint8(a, b uint8) uint8 { if a > b { return a - b } return 0 } // ParseToStrings will try to parse dynamic data into a list of strings. // It assumes the underlying data to be a list of float64 func ParseToStrings(data interface{}) []string { var ( result = make([]string, 0) values, ok = data.([]interface{}) ) if !ok { log.Printf("Failed to parse %v into collection.\n", data) return result } for _, value := range values { if parsed, ok := value.(float64); !ok { log.Printf("Failed to parse %v into float64.\n", value) } else { result = append(result, strconv.Itoa(int(parsed))) } } return result } // FloatFromMap retrieves value from a map and parse it into float64. // If the value does not exist or the value is not float64, 0 will be returned func FloatFromMap(table map[string]interface{}, key string) (float64, bool) { if temp, ok := table[key]; !ok { return 0, false } else if value, ok := temp.(float64); !ok { return 0, false } else { return value, true } } // StringFromMap retrieves value from a map and parse it into string. // If the value does not exist or the value is not string, empty string will be returned func StringFromMap(table map[string]interface{}, key string) (string, bool) { if temp, ok := table[key]; !ok { return "", false } else if value, ok := temp.(string); !ok { return "", false } else { return value, true } } // StringsToUint8s maps a slice of strings into a slice of uint8s func StringsToUint8s(data []string) []uint8 { result := make([]uint8, len(data)) for i, value := range data { parsed, err := strconv.Atoi(value) if err != nil { result[i] = 0 } else { result[i] = uint8(parsed) } } return result }
utils/genericutils.go
0.715623
0.401394
genericutils.go
starcoder
package raycaster import ( "github.com/mattkimber/gorender/internal/geometry" "github.com/mattkimber/gorender/internal/voxelobject" "math" ) func castFpRay(object voxelobject.ProcessedVoxelObject, loc0 geometry.Vector3, loc geometry.Vector3, ray geometry.Vector3, limits geometry.Vector3, flipY bool) (result RayResult) { if collision, loc, approachedBB := castRayToCandidate(object, loc, ray, limits, flipY); collision { lx, ly, lz, isRecovered := recoverNonSurfaceVoxel(object, loc, ray, limits, flipY) return RayResult{ X: lx, Y: ly, Z: lz, IsRecovered: isRecovered, HasGeometry: true, Depth: int(loc0.Subtract(loc).Length()), ApproachedBoundingBox: approachedBB, } } else if approachedBB { return RayResult{ApproachedBoundingBox: true} } return } func castRayToCandidate(object voxelobject.ProcessedVoxelObject, loc geometry.Vector3, ray geometry.Vector3, limits geometry.Vector3, flipY bool) (bool, geometry.Vector3, bool) { i, fi := 0, 0.0 bSizeY := object.Size.Y - 1 loc0 := loc approachedBB := false for { // CanTerminate is an expensive check but we don't need to run it every cycle if i%4 == 0 && canTerminateRay(loc, ray, limits) { break } if isInsideBoundingVolume(loc, limits) { approachedBB = true lx, ly, lz := int(loc.X), int(loc.Y), int(loc.Z) if flipY { ly = bSizeY - ly } if object.Elements[lx][ly][lz].Index != 0 { return true, loc, approachedBB } } else if !approachedBB && isNearlyInsideBoundingVolume(loc, limits) { approachedBB = true } i++ fi++ loc = loc0.Add(ray.MultiplyByConstant(fi)) } return false, geometry.Vector3{}, approachedBB } // Attempt to recover a non-surface voxel by taking a more DDA-like approach where we trace backward up the ray // starting with X, then Y, then Z, then repeat until we find a surface voxel or bail. func recoverNonSurfaceVoxel(object voxelobject.ProcessedVoxelObject, loc geometry.Vector3, ray geometry.Vector3, limits geometry.Vector3, flipY bool) (lx int, ly int, lz int, isRecovered bool) { bSizeY := object.Size.Y - 1 lx, ly, lz = int(loc.X), int(loc.Y), int(loc.Z) if flipY { ly = bSizeY - ly } if isInsideBoundingVolume(loc, limits) && object.Elements[lx][ly][lz].IsSurface { return } // Signify this voxel was recovered isRecovered = true // Check always checks a 9 voxel "halo" check := make([]geometry.Point, 9) checkOrder := []int{4, 1, 7, 3, 5, 0, 2, 6, 8} loc0 := loc x, y, z := ray.X, ray.Y, ray.Z for i := 0; i < 10; i++ { lx, ly, lz = int(loc.X), int(loc.Y), int(loc.Z) if flipY { ly = bSizeY - ly } for j := 0; j < 3; j++ { if math.Abs(x) > math.Abs(y) && math.Abs(x) > math.Abs(z) { // X-major for k := 0; k < 9; k++ { check[k] = geometry.Point{X: lx, Y: ly - 1 + (k % 3), Z: lz - 1 + (k / 3)} } x = 0 } else if math.Abs(y) > math.Abs(x) && math.Abs(y) > math.Abs(z) { // Y-major for k := 0; k < 9; k++ { check[k] = geometry.Point{X: lx - 1 + (k % 3), Y: ly, Z: lz - 1 + (k / 3)} } y = 0 } else if math.Abs(z) > math.Abs(x) && math.Abs(z) > math.Abs(y) { // Z-major for k := 0; k < 9; k++ { check[k] = geometry.Point{X: lx - 1 + (k % 3), Y: ly - 1 + (k / 3), Z: lz} } z = 0 } for k := 0; k < 9; k++ { point := check[checkOrder[k]] pointF := geometry.Vector3{X: float64(point.X), Y: float64(point.Y), Z: float64(point.Z)} lx, ly, lz = point.X, point.Y, point.Z if isInsideBoundingVolume(pointF, limits) { if object.Elements[lx][ly][lz].IsSurface { return } } } if x == 0 && y == 0 && z == 0 { x, y, z = ray.X, ray.Y, ray.Z } } loc = loc.Subtract(ray.Normalise()) } lx, ly, lz = int(loc0.X), int(loc0.Y), int(loc0.Z) if flipY { ly = bSizeY - ly } return } func getIntersectionWithBounds(loc, ray, limits geometry.Vector3) geometry.Vector3 { if canTerminateRay(loc, ray, limits) { return loc } loc = loc.Add(getIntersectionVector(ray.X, loc.X, limits.X, ray)) loc = loc.Add(getIntersectionVector(ray.Y, loc.Y, limits.Y, ray)) return loc } func getIntersectionVector(rayDimension, locDimension, limitDimension float64, ray geometry.Vector3) geometry.Vector3 { dist := -1.0 if rayDimension > 0.1 { dist = -locDimension } if rayDimension < -0.1 { dist = limitDimension - locDimension } if dist > 0 { return ray.MultiplyByConstant(dist / rayDimension) } return geometry.Zero() } func isNearlyInsideBoundingVolume(loc geometry.Vector3, limits geometry.Vector3) bool { // We are within 3 voxels of the bounding box, which is considered "approached" return loc.X >= -3 && loc.Y >= -3 && loc.Z >= -3 && loc.X < limits.X+3 && loc.Y < limits.Y+3 && loc.Z < limits.Z+3 } func isInsideBoundingVolume(loc geometry.Vector3, limits geometry.Vector3) bool { return loc.X >= 0 && loc.Y >= 0 && loc.Z >= 0 && loc.X < limits.X && loc.Y < limits.Y && loc.Z < limits.Z } func canTerminateRay(loc geometry.Vector3, ray geometry.Vector3, limits geometry.Vector3) bool { return (loc.X < 0 && ray.X <= 0) || (loc.Y < 0 && ray.Y <= 0) || (loc.Z < 0 && ray.Z <= 0) || (loc.X > limits.X && ray.X >= 0) || (loc.Y > limits.Y && ray.Y >= 0) || (loc.Z > limits.Z && ray.Z >= 0) }
internal/raycaster/fp.go
0.898553
0.561395
fp.go
starcoder
package spec_util import ( "github.com/golang/glog" "github.com/golang/protobuf/proto" pb "github.com/akitasoftware/akita-ir/go/api_spec" ) // A "view" into RefMap that keeps track of the prefix MethodTemplates in order // to avoid returning references that are not useful. A reference is defined as // "unuseful" if it: // - Refers to yet another reference (we will also return the other // reference, so the new reference is redundant). RefMap may return // references to references in response values, so we should filter these // out. // - Refers to a skipped optional field // This cuts down on the number of possible sequences we can generate for a // given data type. type RefMapView struct { m RefMap prefix []*pb.MethodTemplate } func NewRefMapView(m RefMap, prefix []*pb.MethodTemplate) *RefMapView { return &RefMapView{ m: m, prefix: prefix, } } func (v *RefMapView) Copy() *RefMapView { n := &RefMapView{ m: v.m, prefix: make([]*pb.MethodTemplate, len(v.prefix)), } for i, t := range v.prefix { n.prefix[i] = t } return n } func (v *RefMapView) HasRefs(dID DataTypeID) bool { return v.m.HasRefs(dID) } func (v *RefMapView) GetDataRefs(dID DataTypeID, aa *pb.AkitaAnnotations) []*pb.MethodDataRef { refs := make([]*pb.MethodDataRef, 0) for _, ref := range v.m.GetDataRefs(dID, aa) { if isUsefulMethodDataRef(v.prefix[ref.GetMethodIndex()], ref) { refs = append(refs, ref) } } return refs } // Returns the set of data specs that can be filled using the data that's // currently fillable. In most cases, if the arg is fillable, the returned // result is identical. However, if the arg contains oneof fields, all fillable // options are expanded and returned as separate results, where each result no // longer has oneof fields. func (v *RefMapView) GetFillableArgs(arg *pb.Data) []*pb.Data { // Note: we always consider arguments of prefix methods to be valid refs for // filling args. This is useful in scenarios such as setting a password on a // file and later using that password to read the file. The API won't echo // back your password in the result. if v.HasRefs(DataToTypeID(arg)) { return []*pb.Data{arg} } switch x := arg.Value.(type) { case *pb.Data_Primitive: if x.Primitive.GetAkitaAnnotations().GetIsFree() { return []*pb.Data{arg} } var fixedValuesLen int switch y := x.Primitive.Value.(type) { case *pb.Primitive_BoolValue: fixedValuesLen = len(y.BoolValue.GetType().GetFixedValues()) case *pb.Primitive_BytesValue: fixedValuesLen = len(y.BytesValue.GetType().GetFixedValues()) case *pb.Primitive_StringValue: fixedValuesLen = len(y.StringValue.GetType().GetFixedValues()) case *pb.Primitive_Int32Value: fixedValuesLen = len(y.Int32Value.GetType().GetFixedValues()) case *pb.Primitive_Int64Value: fixedValuesLen = len(y.Int64Value.GetType().GetFixedValues()) case *pb.Primitive_Uint32Value: fixedValuesLen = len(y.Uint32Value.GetType().GetFixedValues()) case *pb.Primitive_Uint64Value: fixedValuesLen = len(y.Uint64Value.GetType().GetFixedValues()) case *pb.Primitive_DoubleValue: fixedValuesLen = len(y.DoubleValue.GetType().GetFixedValues()) case *pb.Primitive_FloatValue: fixedValuesLen = len(y.FloatValue.GetType().GetFixedValues()) } // Normally, having fixed values does not imply that the arg is fillable. // However, if there is only one fixed value, then the arg is effectively // free. if fixedValuesLen == 1 { return []*pb.Data{arg} } return nil case *pb.Data_Struct: // Map key to list of fillable data for the key. alts := make(map[string][]*pb.Data, len(x.Struct.Fields)) for k, field := range x.Struct.Fields { f := v.GetFillableArgs(field) if f == nil { return nil } alts[k] = f } fillableArgs := FlattenAlternatives(alts) results := make([]*pb.Data, 0, len(fillableArgs)) for _, fields := range fillableArgs { results = append(results, &pb.Data{ Value: &pb.Data_Struct{&pb.Struct{Fields: fields}}, Meta: arg.Meta, }) } return results case *pb.Data_List: if len(x.List.Elems) > 0 { results := []*pb.Data{} for _, f := range v.GetFillableArgs(x.List.Elems[0]) { results = append(results, &pb.Data{ Value: &pb.Data_List{&pb.List{Elems: []*pb.Data{f}}}, Meta: arg.Meta, }) } return results } glog.Errorf("Did not expect nil list %s", proto.MarshalTextString(arg)) return nil case *pb.Data_Optional: return []*pb.Data{arg} case *pb.Data_Oneof: results := []*pb.Data{} for _, option := range x.Oneof.Options { results = append(results, v.GetFillableArgs(option)...) } for _, r := range results { r.Meta = MergeOneOfMeta(arg.Meta, r.Meta) } return results default: glog.Errorf("Unsupported data type in GetFillableArgs: %T", x) return nil } } func isUsefulMethodDataRef(mt *pb.MethodTemplate, ref *pb.MethodDataRef) bool { switch r := ref.Ref.(type) { case *pb.MethodDataRef_ArgRef: if arg, ok := mt.GetArgTemplates()[r.ArgRef.GetKey()]; ok { return isUsefulDataRef(arg, r.ArgRef.GetDataRef()) } else { // The argument might be missing because it's optional and skipped. return false } case *pb.MethodDataRef_ResponseRef: // We don't know the response values at sequence generation time, and it's // impossible to have references in response, so this reference is always // useful. return true default: glog.Errorf("unrecognized ref type %T, default to useful ref", r) return true } } func isUsefulDataRef(d *pb.DataTemplate, ref *pb.DataRef) bool { switch x := d.ValueTemplate.(type) { case *pb.DataTemplate_StructTemplate: if r, ok := ref.ValueRef.(*pb.DataRef_StructRef); ok { return isUsefulStructRef(x.StructTemplate, r.StructRef) } else { glog.Errorf("got value_ref type %T for struct template, assuming non-useful ref", ref.ValueRef) return false } case *pb.DataTemplate_ListTemplate: if r, ok := ref.ValueRef.(*pb.DataRef_ListRef); ok { return isUsefulListRef(x.ListTemplate, r.ListRef) } else { glog.Errorf("got value_ref type %T for list template, assuming non-useful ref", ref.ValueRef) return false } case *pb.DataTemplate_Value: if resolved, err := GetDataRef(ref, x.Value); err != nil { glog.Errorf("GetDataRef failed, default to non-useful ref: %v", err) return false } else { if opt, isOptional := resolved.Value.(*pb.Data_Optional); isOptional { if _, isNone := opt.Optional.Value.(*pb.Optional_None); isNone { return false } } } return true case *pb.DataTemplate_Ref: return false case *pb.DataTemplate_OptionalTemplate: return isUsefulDataRef(x.OptionalTemplate.GetValueTemplate(), ref) default: glog.Errorf("unrecognized value_template type %T, defaulting to useful ref", x) return true } } func isUsefulStructRef(s *pb.StructTemplate, ref *pb.StructRef) bool { switch r := ref.Ref.(type) { case *pb.StructRef_FullStruct: // TODO: A full struct is only useful if all fields don't contain any refs. // Current implementation is a conservative estimate. return true case *pb.StructRef_FieldRef: if field, ok := s.GetFieldTemplates()[r.FieldRef.GetKey()]; ok { return isUsefulDataRef(field, r.FieldRef.GetDataRef()) } else { // The struct field might be missing because it's optional and skipped. return false } default: glog.Errorf("unsupported StructRef type %T, default to useful ref", r) return true } } func isUsefulListRef(l *pb.ListTemplate, ref *pb.ListRef) bool { switch r := ref.Ref.(type) { case *pb.ListRef_FullList: // TODO: A full list is only useful if all elems don't contain any refs. // Current implementation is a conservative estimate. return true case *pb.ListRef_ElemRef: if r.ElemRef.GetIndex() < 0 || r.ElemRef.GetIndex() >= int32(len(l.ElemTemplates)) { // The list elem might be missing because it's optional and skipped. return false } return isUsefulDataRef(l.ElemTemplates[r.ElemRef.GetIndex()], r.ElemRef.GetDataRef()) default: glog.Errorf("unsupported ListRef type %T, default to useful ref", r) return true } }
spec_util/ref_map_view.go
0.661704
0.400046
ref_map_view.go
starcoder
package pdfcpu import "fmt" type dim struct { w, h int } // AspectRatio returns the relation between width and height. func (d dim) AspectRatio() float64 { return float64(d.w) / float64(d.h) } // Landscape returns true if d is in landscape mode. func (d dim) Landscape() bool { return d.AspectRatio() > 1 } // Portrait returns true if d is in portrait mode. func (d dim) Portrait() bool { return d.AspectRatio() < 1 } func (d dim) String() string { return fmt.Sprintf("%dx%d points", d.w, d.h) } // PaperSize is a map of known paper sizes in user units (=72 dpi pixels). var PaperSize = map[string]*dim{ // ISO 216:1975 A "4A0": {4768, 6741}, // 66 1/4" x 93 5/8" 1682 x 2378 mm "2A0": {3370, 4768}, // 46 3/4" x 66 1/4" 1189 x 1682 mm "A0": {2384, 3370}, // 33" x 46 3/4" 841 x 1189 mm "A1": {1684, 2384}, // 23 3/8" x 33" 594 x 841 mm "A2": {1191, 1684}, // 16 1/2" x 23 3/8" 420 x 594 mm "A3": {842, 1191}, // 11 3/4" x 16 1/2" 297 x 420 mm "A4": {595, 842}, // 8 1/4" x 11 3/4" 210 x 297 mm "A5": {420, 595}, // 5 7/8" x 8 1/4" 148 x 210 mm "A6": {298, 420}, // 4 1/8" x 5 7/8" 105 x 148 mm "A7": {210, 298}, // 2 7/8" x 4 1/8" 74 x 105 mm "A8": {147, 210}, // 2" x 2 7/8" 52 x 74 mm "A9": {105, 147}, // 1 1/2" x 2" 37 x 52 mm "A10": {74, 105}, // 1" x 1 1/2" 26 x 37 mm // ISO 216:1975 B "B0+": {3170, 4479}, // 44" x 62 1/4" 1118 x 1580 mm "B0": {2835, 4008}, // 39 3/8" x 55 3/4" 1000 x 1414 mm "B1+": {2041, 2892}, // 28 3/8" x 40 1/8" 720 x 1020 mm "B1": {2004, 2835}, // 27 3/4" x 39 3/8" 707 x 1000 mm "B2+": {1474, 2041}, // 20 1/2" x 28 3/8" 520 x 720 mm "B2": {1417, 2004}, // 19 3/4" x 27 3/4" 500 x 707 mm "B3": {1001, 1417}, // 13 7/8" x 19 3/4" 353 x 500 mm "B4": {709, 1001}, // 9 7/8" x 13 7/8" 250 x 353 mm "B5": {499, 709}, // 7" x 9 7/8" 176 x 250 mm "B6": {354, 499}, // 4 7/8" x 7" 125 x 176 mm "B7": {249, 354}, // 3 1/2" x 4 7/8" 88 x 125 mm "B8": {176, 249}, // 2 1/2" x 3 1/2" 62 x 88 mm "B9": {125, 176}, // 1 3/4" x 2 1/2" 44 x 62 mm "B10": {88, 125}, // 1 1/4" x 1 3/4" 31 x 44 mm // ISO 269:1985 envelopes aka ISO C "C0": {2599, 3677}, // 36" x 51" 917 x 1297 mm "C1": {1837, 2599}, // 25 1/2" x 36" 648 x 917 mm "C2": {1298, 1837}, // 18" x 25 1/2" 458 x 648 mm "C3": {918, 1298}, // 12 3/4" x 18" 324 x 458 mm "C4": {649, 918}, // 9" x 12 3/4" 229 x 324 mm "C5": {459, 649}, // 6 3/8" x 9" 162 x 229 mm "C6": {323, 459}, // 4 1/2" x 6 3/8" 114 x 162 mm "C7": {230, 323}, // 3 3/16" x 4 1/2" 81 x 114 mm "C8": {162, 230}, // 2 1/4" x 3 3/16 57 x 81 mm "C9": {113, 162}, // 1 5/8" x 2 1/4" 40 x 57 mm "C10": {79, 113}, // 1 1/8" x 1 5/8" 28 x 40 mm // ISO 217:2013 untrimmed raw paper "RA0": {2438, 3458}, // 33.9" x 48.0" 860 x 1220 mm "RA1": {1729, 2438}, // 24.0" x 33.9" 610 x 860 mm "RA2": {1219, 1729}, // 16.9" x 24.0" 430 x 610 mm "RA3": {865, 1219}, // 12.0" x 16.9" 305 x 430 mm "RA4": {610, 865}, // 8.5" x 12.0" 215 x 305 mm "SRA0": {2551, 3628}, // 35.4" x 50.4" 900 x 1280 mm "SRA1": {1814, 2551}, // 25.2" x 35.4" 640 x 900 mm "SRA2": {1276, 1814}, // 17.7" x 25.2" 450 x 640 mm "SRA3": {907, 1276}, // 12.6" x 17.7" 320 x 450 mm "SRA4": {638, 907}, // 8.9" x 12.6" 225 x 320 mm "SRA1+": {2835, 4008}, // 26.0" x 36.2" 660 x 920 mm "SRA2+": {1361, 1843}, // 18.9" x 25.6" 480 x 650 mm "SRA3+": {907, 1304}, // 12.6" x 18.1" 320 x 460 mm "SRA3++": {2835, 4008}, // 12.6" x 18.3" 320 x 464 mm // American "SuperB": {936, 1368}, // 13" x 19" "B+": {936, 1368}, "Tabloid": {791, 1225}, // 11" x 17" ANSIB, DobleCarta "ExtraTabloid": {865, 1296}, // 12" x 18" ARCHB, Arch2 "Ledger": {1225, 791}, // 17" x 11" ANSIB "Legal": {612, 1009}, // 8 1/2" x 14" "GovLegal": {612, 936}, // 8 1/2" x 13" "Oficio": {612, 936}, "Folio": {612, 936}, "Letter": {612, 791}, // 8 1/2" x 11" ANSIA "Carta": {612, 791}, "AmericanQuarto": {612, 791}, "DobleCarta": {791, 1225}, // 11" x 17" Tabloid, ANSIB "GovLetter": {576, 757}, // 8" x 10 1/2" "Executive": {522, 756}, // 7 1/4" x 10 1/2" "HalfLetter": {397, 612}, // 5 1/2" x 8 1/2" "Memo": {397, 612}, "Statement": {397, 612}, "Stationary": {397, 612}, "JuniorLegal": {360, 576}, // 5" x 8" "IndexCard": {360, 576}, "Photo": {288, 432}, // 4" x 6" // ANSI/ASME Y14.1 "ANSIA": {612, 791}, // 8 1/2" x 11" Letter, Carta, AmericanQuarto "ANSIB": {791, 1225}, // 11" x 17" Ledger, Tabloid, DobleCarta "ANSIC": {1225, 1585}, // 17" x 22" "ANSID": {1585, 2449}, // 22" x 34" "ANSIE": {2449, 3170}, // 34" x 44" "ANSIF": {2016, 2880}, // 28" x 40" // ANSI/ASME Y14.1 Architectural series "ARCHA": {649, 865}, // 9" x 12" Arch 1 "ARCHB": {865, 1296}, // 12" x 18" Arch 2, ExtraTabloide "ARCHC": {1296, 1729}, // 18" x 24" Arch 3 "ARCHD": {1729, 2591}, // 24" x 36" Arch 4 "ARCHE": {2591, 3456}, // 36" x 48" Arch 6 "ARCHE1": {2160, 3025}, // 30" x 42" Arch 5 "ARCHE2": {1871, 2736}, // 26" x 38" "ARCHE3": {1945, 2809}, // 27" x 39" "Arch1": {649, 865}, // 9" x 12" ARCHA "Arch2": {865, 1296}, // 12" x 18" ARCHB, ExtraTabloide "Arch3": {1296, 1729}, // 18" x 24" ARCHC "Arch4": {1729, 2591}, // 24" x 36" ARCHD "Arch5": {2160, 3025}, // 30" x 42" ARCHE1 "Arch6": {2591, 3456}, // 36" x 48" ARCHE // American Uncut "Bond": {1584, 1224}, // 22" x 17" "Book": {2736, 1800}, // 38" x 25" "Cover": {1872, 1440}, // 26" x 20" "Index": {2196, 1836}, // 30 1/2" x 25 1/2" "Newsprint": {2592, 1728}, // 36" x 24" "Tissue": {2592, 1728}, "Offset": {2736, 1800}, // 38" x 25" "Text": {2736, 1800}, // English Uncut "Crown": {1170, 1512}, // 16 1/4" x 21" "DoubleCrown": {1440, 2160}, // 20" x 30" "Quad": {2160, 2880}, // 30" x 40" "Demy": {1242, 1620}, // 17 3/4" x 22 1/2" "DoubleDemy": {1620, 2556}, // 22 1/2" x 35 1/2" "Medium": {1314, 1656}, // 18 1/4" x 23" "Royal": {1440, 1804}, // 20" x 25 1/16" "SuperRoyal": {1512, 1944}, // 21" x 27" "DoublePott": {1080, 1800}, // 15" x 25" "DoublePost": {1368, 2196}, // 19" x 30 1/2" "Foolscap": {972, 1224}, // 13 1/2" x 17" "DoubleFoolscap": {1224, 1944}, // 17" x 27" "F4": {595, 935}, // 8 1/4" x 13" // GB/T 148-1997 D Series China "D0": {2166, 3016}, // 29.9" x 41.9" 764 x 1064 mm "D1": {1508, 2155}, // 20.9" x 29.9" 532 x 760 mm "D2": {1077, 1497}, // 15.0" x 20.8" 380 x 528 mm "D3": {748, 1066}, // 10.4" x 14.8" 264 x 376 mm "D4": {533, 737}, // 7.4" x 10.2" 188 x 260 mm "D5": {369, 522}, // 5.1" x 7.2" 130 x 184 mm "D6": {261, 357}, // 3.6" x 5.0" 92 x 126 mm "RD0": {2231, 3096}, // 31.0" x 43.0" 787 x 1092 mm "RD1": {1548, 2231}, // 21.5" x 31.0" 546 x 787 mm "RD2": {1114, 1548}, // 15.5" x 21.5" 393 x 546 mm "RD3": {774, 1114}, // 10.7" x 15.5" 273 x 393 mm "RD4": {556, 774}, // 7.7" x 10.7" 196 x 273 mm "RD5": {386, 556}, // 5.4" x 7.7" 136 x 196 mm "RD6": {278, 386}, // 3.9" x 5.4" 98 x 136 mm // Japanese B-series variant "JIS-B0": {2920, 4127}, // 40.55" x 57.32" 1030 x 1456 mm "JIS-B1": {2064, 2920}, // 28.66" x 40.55" 728 x 1030 mm "JIS-B2": {1460, 2064}, // 20.28" x 28.66" 515 x 728 mm "JIS-B3": {1032, 1460}, // 14.33" x 20.28" 364 x 515 mm "JIS-B4": {729, 1032}, // 10.12" x 14.33" 257 x 364 mm "JIS-B5": {516, 729}, // 7.17" x 10.12" 182 x 257 mm "JIS-B6": {363, 516}, // 5.04" x 7.17" 128 x 182 mm "JIS-B7": {258, 363}, // 3.58" x 5.04" 91 x 128 mm "JIS-B8": {181, 258}, // 2.52" x 3.58" 64 x 91 mm "JIS-B9": {127, 181}, // 1.77" x 2.52" 45 x 64 mm "JIS-B10": {91, 127}, // 1.26" x 1.77" 32 x 45 mm "JIS-B11": {63, 91}, // 0.87" x 1.26" 22 x 32 mm "JIS-B12": {45, 63}, // 0.63" x 0.87" 16 x 22 mm "Shirokuban4": {748, 1074}, // 10.39" x 14.92" 264 x 379 mm "Shirokuban5": {536, 742}, // 7.44" x 10.31" 189 x 262 mm "Shirokuban6": {360, 533}, // 5.00" x 7.40" 127 x 188 mm "Kiku4": {644, 868}, // 8.94" x 12.05" 227 x 306 mm "Kiku5": {428, 644}, // 5.95" x 8.94" 151 x 227 mm "AB": {595, 729}, // 8.27" x 10.12" 210 x 257 mm "B40": {292, 516}, // 4.06" x 7.17" 103 x 182 mm "Shikisen": {238, 420}, // 3.31" x 5.83" 84 x 148 mm }
pkg/pdfcpu/paperSize.go
0.672009
0.478102
paperSize.go
starcoder
package avro import ( "errors" "fmt" "reflect" ) // Reader is an interface that may be implemented to avoid using runtime reflection during deserialization. // Implementing it is optional and may be used as an optimization. Falls back to using reflection if not implemented. type Reader interface { Read(dec Decoder) error } // DatumReader is an interface that is responsible for reading structured data according to schema from a decoder type DatumReader interface { // Reads a single structured entry using this DatumReader according to provided Schema. // Accepts a value to fill with data and a Decoder to read from. Given value MUST be of pointer type. // May return an error indicating a read failure. Read(interface{}, Decoder) error // Sets the schema for this DatumReader to know the data structure. // Note that it must be called before calling Read. SetSchema(Schema) } // Generic Avro enum representation. This is still subject to change and may be rethought. type GenericEnum struct { // Avro enum symbols. Symbols []string symbolsToIndex map[string]int32 index int32 } // Returns a new GenericEnum that uses provided enum symbols. func NewGenericEnum(symbols []string) *GenericEnum { symbolsToIndex := make(map[string]int32) for index, symbol := range symbols { symbolsToIndex[symbol] = int32(index) } return &GenericEnum{ Symbols: symbols, symbolsToIndex: symbolsToIndex, } } // Gets the numeric value for this enum. func (this *GenericEnum) GetIndex() int32 { return this.index } // Gets the string value for this enum (e.g. symbol). func (this *GenericEnum) Get() string { return this.Symbols[this.index] } // Sets the numeric value for this enum. func (this *GenericEnum) SetIndex(index int32) { this.index = index } // Sets the string value for this enum (e.g. symbol). // Panics if the given symbol does not exist in this enum. func (this *GenericEnum) Set(symbol string) { if index, exists := this.symbolsToIndex[symbol]; !exists { panic("Unknown enum symbol") } else { this.index = index } } // SpecificDatumReader implements DatumReader and is used for filling Go structs with data. // Each value passed to Read is expected to be a pointer. type SpecificDatumReader struct { schema Schema } // Creates a new SpecificDatumReader. func NewSpecificDatumReader() *SpecificDatumReader { return &SpecificDatumReader{} } // Sets the schema for this SpecificDatumReader to know the data structure. // Note that it must be called before calling Read. func (this *SpecificDatumReader) SetSchema(schema Schema) { this.schema = schema } // Reads a single structured entry using this SpecificDatumReader. // Accepts a Go struct with exported fields to fill with data and a Decoder to read from. Given value MUST be of // pointer type. Field names should match field names in Avro schema but be exported (e.g. "some_value" in Avro // schema is expected to be Some_value in struct) or you may provide Go struct tags to explicitly show how // to map fields (e.g. if you want to map "some_value" field of type int to SomeValue in Go struct you should define // your struct field as follows: SomeValue int32 `avro:"some_field"`). // May return an error indicating a read failure. func (this *SpecificDatumReader) Read(v interface{}, dec Decoder) error { if reader, ok := v.(Reader); ok { return reader.Read(dec) } rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr || rv.IsNil() { return errors.New("Not applicable for non-pointer types or nil") } if this.schema == nil { return SchemaNotSet } sch := this.schema.(*RecordSchema) for i := 0; i < len(sch.Fields); i++ { field := sch.Fields[i] this.findAndSet(v, field, dec) } return nil } func (this *SpecificDatumReader) findAndSet(v interface{}, field *SchemaField, dec Decoder) error { structField, err := findField(reflect.ValueOf(v), field.Name) if err != nil { return err } value, err := this.readValue(field.Type, structField, dec) if err != nil { return err } this.setValue(field, structField, value) return nil } func (this *SpecificDatumReader) readValue(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) { switch field.Type() { case Null: return reflect.ValueOf(nil), nil case Boolean: return this.mapPrimitive(func() (interface{}, error) { return dec.ReadBoolean() }) case Int: return this.mapPrimitive(func() (interface{}, error) { return dec.ReadInt() }) case Long: return this.mapPrimitive(func() (interface{}, error) { return dec.ReadLong() }) case Float: return this.mapPrimitive(func() (interface{}, error) { return dec.ReadFloat() }) case Double: return this.mapPrimitive(func() (interface{}, error) { return dec.ReadDouble() }) case Bytes: return this.mapPrimitive(func() (interface{}, error) { return dec.ReadBytes() }) case String: return this.mapPrimitive(func() (interface{}, error) { return dec.ReadString() }) case Array: return this.mapArray(field, reflectField, dec) case Enum: return this.mapEnum(field, dec) case Map: return this.mapMap(field, reflectField, dec) case Union: return this.mapUnion(field, reflectField, dec) case Fixed: return this.mapFixed(field, dec) case Record: return this.mapRecord(field, reflectField, dec) case Recursive: return this.mapRecord(field.(*RecursiveSchema).Actual, reflectField, dec) } return reflect.ValueOf(nil), fmt.Errorf("Unknown field type: %v", field.Type()) } func (this *SpecificDatumReader) setValue(field *SchemaField, where reflect.Value, what reflect.Value) { zero := reflect.Value{} if zero != what { where.Set(what) } } func (this *SpecificDatumReader) mapPrimitive(reader func() (interface{}, error)) (reflect.Value, error) { if value, err := reader(); err != nil { return reflect.ValueOf(value), err } else { return reflect.ValueOf(value), nil } } func (this *SpecificDatumReader) mapArray(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) { if arrayLength, err := dec.ReadArrayStart(); err != nil { return reflect.ValueOf(arrayLength), err } else { array := reflect.MakeSlice(reflectField.Type(), 0, 0) for { arrayPart := reflect.MakeSlice(reflectField.Type(), int(arrayLength), int(arrayLength)) var i int64 = 0 for ; i < arrayLength; i++ { val, err := this.readValue(field.(*ArraySchema).Items, arrayPart.Index(int(i)), dec) if err != nil { return reflect.ValueOf(arrayLength), err } pointer := reflectField.Type().Elem().Kind() == reflect.Ptr if pointer && val.Kind() != reflect.Ptr { val = val.Addr() } else if !pointer && val.Kind() == reflect.Ptr { val = val.Elem() } arrayPart.Index(int(i)).Set(val) } //concatenate arrays concatArray := reflect.MakeSlice(reflectField.Type(), array.Len()+int(arrayLength), array.Cap()+int(arrayLength)) reflect.Copy(concatArray, array) reflect.Copy(concatArray, arrayPart) array = concatArray arrayLength, err = dec.ArrayNext() if err != nil { return reflect.ValueOf(arrayLength), err } else if arrayLength == 0 { break } } return array, nil } } func (this *SpecificDatumReader) mapMap(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) { if mapLength, err := dec.ReadMapStart(); err != nil { return reflect.ValueOf(mapLength), err } else { resultMap := reflect.MakeMap(reflectField.Type()) for { var i int64 = 0 for ; i < mapLength; i++ { key, err := this.readValue(&StringSchema{}, reflectField, dec) if err != nil { return reflect.ValueOf(mapLength), err } val, err := this.readValue(field.(*MapSchema).Values, reflectField, dec) if err != nil { return reflect.ValueOf(mapLength), nil } if val.Kind() == reflect.Ptr { resultMap.SetMapIndex(key, val.Elem()) } else { resultMap.SetMapIndex(key, val) } } mapLength, err = dec.MapNext() if err != nil { return reflect.ValueOf(mapLength), err } else if mapLength == 0 { break } } return resultMap, nil } } func (this *SpecificDatumReader) mapEnum(field Schema, dec Decoder) (reflect.Value, error) { if enumIndex, err := dec.ReadEnum(); err != nil { return reflect.ValueOf(enumIndex), err } else { enum := NewGenericEnum(field.(*EnumSchema).Symbols) enum.SetIndex(enumIndex) return reflect.ValueOf(enum), nil } } func (this *SpecificDatumReader) mapUnion(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) { if unionType, err := dec.ReadInt(); err != nil { return reflect.ValueOf(unionType), err } else { union := field.(*UnionSchema).Types[unionType] return this.readValue(union, reflectField, dec) } } func (this *SpecificDatumReader) mapFixed(field Schema, dec Decoder) (reflect.Value, error) { fixed := make([]byte, field.(*FixedSchema).Size) if err := dec.ReadFixed(fixed); err != nil { return reflect.ValueOf(fixed), err } return reflect.ValueOf(fixed), nil } func (this *SpecificDatumReader) mapRecord(field Schema, reflectField reflect.Value, dec Decoder) (reflect.Value, error) { var t reflect.Type switch reflectField.Kind() { case reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice, reflect.Chan: t = reflectField.Type().Elem() default: t = reflectField.Type() } record := reflect.New(t).Interface() recordSchema := field.(*RecordSchema) for i := 0; i < len(recordSchema.Fields); i++ { this.findAndSet(record, recordSchema.Fields[i], dec) } return reflect.ValueOf(record), nil } // GenericDatumReader implements DatumReader and is used for filling GenericRecords or other Avro supported types // (full list is: interface{}, bool, int32, int64, float32, float64, string, slices of any type, maps with string keys // and any values, GenericEnums) with data. // Each value passed to Read is expected to be a pointer. type GenericDatumReader struct { schema Schema } // Creates a new GenericDatumReader. func NewGenericDatumReader() *GenericDatumReader { return &GenericDatumReader{} } // Sets the schema for this GenericDatumReader to know the data structure. // Note that it must be called before calling Read. func (this *GenericDatumReader) SetSchema(schema Schema) { this.schema = schema } // Reads a single entry using this GenericDatumReader. // Accepts a value to fill with data and a Decoder to read from. Given value MUST be of pointer type. // May return an error indicating a read failure. func (this *GenericDatumReader) Read(v interface{}, dec Decoder) error { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr || rv.IsNil() { return errors.New("Not applicable for non-pointer types or nil") } rv = rv.Elem() if this.schema == nil { return SchemaNotSet } //read the value value, err := this.readValue(this.schema, dec) if err != nil { return err } newValue := reflect.ValueOf(value) // dereference the value if needed if newValue.Kind() == reflect.Ptr { newValue = newValue.Elem() } //set the new value rv.Set(newValue) return nil } func (this *GenericDatumReader) findAndSet(record *GenericRecord, field *SchemaField, dec Decoder) error { value, err := this.readValue(field.Type, dec) if err != nil { return err } switch typedValue := value.(type) { case *GenericEnum: if typedValue.GetIndex() >= int32(len(typedValue.Symbols)) { return errors.New("Enum index invalid!") } record.Set(field.Name, typedValue.Symbols[typedValue.GetIndex()]) default: record.Set(field.Name, value) } return nil } func (this *GenericDatumReader) readValue(field Schema, dec Decoder) (interface{}, error) { switch field.Type() { case Null: return nil, nil case Boolean: return dec.ReadBoolean() case Int: return dec.ReadInt() case Long: return dec.ReadLong() case Float: return dec.ReadFloat() case Double: return dec.ReadDouble() case Bytes: return dec.ReadBytes() case String: return dec.ReadString() case Array: return this.mapArray(field, dec) case Enum: return this.mapEnum(field, dec) case Map: return this.mapMap(field, dec) case Union: return this.mapUnion(field, dec) case Fixed: return this.mapFixed(field, dec) case Record: return this.mapRecord(field, dec) case Recursive: return this.mapRecord(field.(*RecursiveSchema).Actual, dec) } return nil, fmt.Errorf("Unknown field type: %v", field.Type()) } func (this *GenericDatumReader) mapArray(field Schema, dec Decoder) ([]interface{}, error) { if arrayLength, err := dec.ReadArrayStart(); err != nil { return nil, err } else { array := make([]interface{}, 0) for { arrayPart := make([]interface{}, arrayLength, arrayLength) var i int64 = 0 for ; i < arrayLength; i++ { val, err := this.readValue(field.(*ArraySchema).Items, dec) if err != nil { return nil, err } arrayPart[i] = val } //concatenate arrays concatArray := make([]interface{}, len(array)+int(arrayLength), cap(array)+int(arrayLength)) copy(concatArray, array) copy(concatArray, arrayPart) array = concatArray arrayLength, err = dec.ArrayNext() if err != nil { return nil, err } else if arrayLength == 0 { break } } return array, nil } } func (this *GenericDatumReader) mapEnum(field Schema, dec Decoder) (*GenericEnum, error) { if enumIndex, err := dec.ReadEnum(); err != nil { return nil, err } else { enum := NewGenericEnum(field.(*EnumSchema).Symbols) enum.SetIndex(enumIndex) return enum, nil } } func (this *GenericDatumReader) mapMap(field Schema, dec Decoder) (map[string]interface{}, error) { if mapLength, err := dec.ReadMapStart(); err != nil { return nil, err } else { resultMap := make(map[string]interface{}) for { var i int64 = 0 for ; i < mapLength; i++ { key, err := this.readValue(&StringSchema{}, dec) if err != nil { return nil, err } val, err := this.readValue(field.(*MapSchema).Values, dec) if err != nil { return nil, nil } resultMap[key.(string)] = val } mapLength, err = dec.MapNext() if err != nil { return nil, err } else if mapLength == 0 { break } } return resultMap, nil } } func (this *GenericDatumReader) mapUnion(field Schema, dec Decoder) (interface{}, error) { if unionType, err := dec.ReadInt(); err != nil { return nil, err } else { union := field.(*UnionSchema).Types[unionType] return this.readValue(union, dec) } } func (this *GenericDatumReader) mapFixed(field Schema, dec Decoder) ([]byte, error) { fixed := make([]byte, field.(*FixedSchema).Size) if err := dec.ReadFixed(fixed); err != nil { return nil, err } return fixed, nil } func (this *GenericDatumReader) mapRecord(field Schema, dec Decoder) (*GenericRecord, error) { record := NewGenericRecord(field) recordSchema := field.(*RecordSchema) for i := 0; i < len(recordSchema.Fields); i++ { this.findAndSet(record, recordSchema.Fields[i], dec) } return record, nil }
datum_reader.go
0.808332
0.444866
datum_reader.go
starcoder
package timeutil import ( "strings" "time" ) const ( // OneSecond is the number of millisecond for a second OneSecond int64 = 1000 // OneMinute is the number of millisecond for a minute OneMinute = 60 * OneSecond // OneHour is the number of millisecond for an hour OneHour = 60 * OneMinute // OneDay is the number of millisecond for a day OneDay = 24 * OneHour // OneWeek is the number of millisecond for a week OneWeek = 7 * OneDay // OneMonth is the number of millisecond for a month OneMonth = 31 * OneDay // OneYear is the number of millisecond for a year OneYear = 365 * OneDay //TODO ???? dataTimeFormat1 = "20060102 15:04:05" dataTimeFormat2 = "2006-01-02 15:04:05" dataTimeFormat3 = "2006/01/02 15:04:05" ) // FormatTimestamp returns timestamp format based on layout func FormatTimestamp(timestamp int64, layout string) string { t := time.Unix(timestamp/1000, 0) return t.Format(layout) } // ParseTimestamp parses timestamp str value based on layout using local zone func ParseTimestamp(timestampStr string, layout ...string) (int64, error) { var format string if len(layout) > 0 { format = layout[0] } else { switch { case strings.Index(timestampStr, "-") > 0: format = dataTimeFormat2 case strings.Index(timestampStr, "/") > 0: format = dataTimeFormat3 default: format = dataTimeFormat1 } } tm, err := time.ParseInLocation(format, timestampStr, time.Local) if err != nil { return 0, err } return tm.UnixNano() / 1000000, nil } // Now returns t as a Unix time, the number of millisecond elapsed // since January 1, 1970 UTC. The result does not depend on the // location associated with t. func Now() int64 { return time.Now().UnixNano() / 1000000 } // CalPointCount calculates point counts between start time and end time by interval func CalPointCount(startTime, endTime, interval int64) int { diff := endTime - startTime pointCount := diff / interval if diff%interval > 0 { pointCount++ } if pointCount == 0 { pointCount = 1 } return int(pointCount) } func CalIntervalRatio(queryInterval, storageInterval int64) int { if queryInterval < storageInterval { return 1 } return int(queryInterval / storageInterval) }
pkg/timeutil/time.go
0.540439
0.436022
time.go
starcoder
package goose4 import ( "encoding/json" "time" ) // Test provides a way of having an API pass it's own healthcheck tests, // https://github.com/beamly/SE4/blob/master/SE4.md#healthcheck) // into goose4 to be run for the `/healthcheck/` endpoints. These are run in parallel // and so tests which rely on one another/ sequentialness are not allowed type Test struct { // A simple name to help identify tests from one another // there is no enforcement of uniqueness- it is left to the developer // to ensure these names make sense Name string `json:"test_name"` // RequiredForASG toggles whether the result of this Test is taken into account when checking ASG status RequiredForASG bool `json:"-"` // RequiredForGTG toggles whether the result of this Test is taken into account when checking GTG status RequiredForGTG bool `json:"-"` // F is a function which returns true for successful or false for a failure F func() bool `json:"-"` // The following are overwritten on whatsit Result string `json:"test_result"` Duration string `json:"duration_millis"` TestTime time.Time `json:"tested_at"` } func (t *Test) run() bool { t.TestTime = time.Now() success := t.F() if success { t.Result = "passed" } else { t.Result = "failed" } t.Duration = time.Since(t.TestTime).String() return success } // Healthcheck provides a full view of healthchecks and whether they fail or not type Healthcheck struct { ReportTime time.Time `json:"report_as_of"` Duration string `json:"report_duration"` Tests []Test `json:"tests"` } // NewHealthcheck creates a new Healthcheck func NewHealthcheck(t []Test) Healthcheck { return Healthcheck{ Tests: t, } } const ( testAll = iota testASGOnly testGTGOnly ) // All runs all tests; both RequiredByGTG and RequiredByASG options are ignored func (h *Healthcheck) All() (output []byte, errors bool, err error) { return h.executeTests(testAll) } // GTG runs tests that have RequiredByGTG option enabled func (h *Healthcheck) GTG() (output []byte, errors bool, err error) { return h.executeTests(testGTGOnly) } // ASG runs tests that have RequiredByASG option enabled func (h *Healthcheck) ASG() (output []byte, errors bool, err error) { return h.executeTests(testASGOnly) } func (h *Healthcheck) executeTests(mode int) ([]byte, bool, error) { h.ReportTime = time.Now() var errs bool bchan := make(chan Test) testList := h.getTestsByMode(mode) if len(testList) > 0 { for _, t := range testList { go func(t0 Test) { if !t0.run() { errs = true } bchan <- t0 }(t) } count := 1 completedTests := []Test{} for t := range bchan { completedTests = append(completedTests, t) if count == len(testList) { break } count++ } h.Tests = completedTests } h.Duration = time.Since(h.ReportTime).String() j, err := json.Marshal(h) return j, errs, err } func (h *Healthcheck) getTestsByMode(mode int) (filteredTests []Test) { for _, t := range h.Tests { switch mode { case testASGOnly: if t.RequiredForASG { filteredTests = append(filteredTests, t) } case testGTGOnly: if t.RequiredForGTG { filteredTests = append(filteredTests, t) } case testAll: filteredTests = append(filteredTests, t) } } return }
healthcheck.go
0.776453
0.497192
healthcheck.go
starcoder
package condition import ( "encoding/json" "github.com/Jeffail/benthos/v3/lib/log" "github.com/Jeffail/benthos/v3/lib/message" "github.com/Jeffail/benthos/v3/lib/metrics" "github.com/Jeffail/benthos/v3/lib/types" ) //------------------------------------------------------------------------------ func init() { Constructors[TypeAny] = TypeSpec{ constructor: NewAny, Description: ` Any is a condition that tests a child condition against each message of a batch individually. If any message passes the child condition then this condition also passes. For example, if we wanted to check that at least one message of a batch contains the word 'foo' we could use this config: ` + "``` yaml" + ` any: text: operator: contains arg: foo ` + "```" + ``, sanitiseConfigFunc: func(conf Config) (interface{}, error) { if conf.Any.Config == nil { return struct{}{}, nil } return SanitiseConfig(*conf.Any.Config) }, } } //------------------------------------------------------------------------------ // AnyConfig is a configuration struct containing fields for the Any condition. type AnyConfig struct { *Config `yaml:",inline" json:",inline"` } // NewAnyConfig returns a AnyConfig with default values. func NewAnyConfig() AnyConfig { return AnyConfig{ Config: nil, } } // MarshalJSON prints an empty object instead of nil. func (m AnyConfig) MarshalJSON() ([]byte, error) { if m.Config != nil { return json.Marshal(m.Config) } return json.Marshal(struct{}{}) } // MarshalYAML prints an empty object instead of nil. func (m AnyConfig) MarshalYAML() (interface{}, error) { if m.Config != nil { return *m.Config, nil } return struct{}{}, nil } // UnmarshalJSON ensures that when parsing child config it is initialised. func (m *AnyConfig) UnmarshalJSON(bytes []byte) error { if m.Config == nil { nConf := NewConfig() m.Config = &nConf } return json.Unmarshal(bytes, m.Config) } // UnmarshalYAML ensures that when parsing child config it is initialised. func (m *AnyConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if m.Config == nil { nConf := NewConfig() m.Config = &nConf } return unmarshal(m.Config) } //------------------------------------------------------------------------------ // Any is a condition that returns the logical or of all children. type Any struct { child Type mCount metrics.StatCounter mTrue metrics.StatCounter mFalse metrics.StatCounter } // NewAny returns an Any condition. func NewAny( conf Config, mgr types.Manager, log log.Modular, stats metrics.Type, ) (Type, error) { childConf := conf.Any.Config if childConf == nil { newConf := NewConfig() childConf = &newConf } child, err := New(*childConf, mgr, log.NewModule(".any"), metrics.Namespaced(stats, "any")) if err != nil { return nil, err } return &Any{ child: child, mCount: stats.GetCounter("count"), mTrue: stats.GetCounter("true"), mFalse: stats.GetCounter("false"), }, nil } //------------------------------------------------------------------------------ // Check attempts to check a message part against a configured condition. func (c *Any) Check(msg types.Message) bool { c.mCount.Incr(1) for i := 0; i < msg.Len(); i++ { if c.child.Check(message.Lock(msg, i)) { c.mTrue.Incr(1) return true } } c.mFalse.Incr(1) return false } //------------------------------------------------------------------------------
lib/condition/any.go
0.736116
0.630002
any.go
starcoder
package bs import "github.com/rannoch/cldr" var calendar = cldr.Calendar{ Formats: cldr.CalendarFormats{ Date: cldr.CalendarDateFormat{Full: "EEEE, dd. MMMM y.", Long: "dd. MMMM y.", Medium: "dd. MMM. y.", Short: "dd.MM.yy."}, Time: cldr.CalendarDateFormat{Full: "HH:mm:ss zzzz", Long: "HH:mm:ss z", Medium: "HH:mm:ss", Short: "HH:mm"}, DateTime: cldr.CalendarDateFormat{Full: "{1} 'u' {0}", Long: "{1} 'u' {0}", Medium: "{1} {0}", Short: "{1} {0}"}, }, FormatNames: cldr.CalendarFormatNames{ Months: cldr.CalendarMonthFormatNames{ Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "jan", Feb: "feb", Mar: "mar", Apr: "apr", May: "maj", Jun: "jun", Jul: "jul", Aug: "aug", Sep: "sep", Oct: "okt", Nov: "nov", Dec: "dec"}, Narrow: cldr.CalendarMonthFormatNameValue{Jan: "j", Feb: "f", Mar: "m", Apr: "a", May: "m", Jun: "j", Jul: "j", Aug: "a", Sep: "s", Oct: "o", Nov: "n", Dec: "d"}, Short: cldr.CalendarMonthFormatNameValue{}, Wide: cldr.CalendarMonthFormatNameValue{Jan: "januar", Feb: "februar", Mar: "mart", Apr: "april", May: "maj", Jun: "juni", Jul: "juli", Aug: "august", Sep: "septembar", Oct: "oktobar", Nov: "novembar", Dec: "decembar"}, }, Days: cldr.CalendarDayFormatNames{ Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "ned", Mon: "pon", Tue: "uto", Wed: "sri", Thu: "čet", Fri: "pet", Sat: "sub"}, Narrow: cldr.CalendarDayFormatNameValue{Sun: "n", Mon: "p", Tue: "u", Wed: "s", Thu: "č", Fri: "p", Sat: "s"}, Short: cldr.CalendarDayFormatNameValue{Sun: "ned", Mon: "pon", Tue: "uto", Wed: "sri", Thu: "čet", Fri: "pet", Sat: "sub"}, Wide: cldr.CalendarDayFormatNameValue{Sun: "nedjelja", Mon: "ponedjeljak", Tue: "utorak", Wed: "srijeda", Thu: "četvrtak", Fri: "petak", Sat: "subota"}, }, Periods: cldr.CalendarPeriodFormatNames{ Abbreviated: cldr.CalendarPeriodFormatNameValue{}, Narrow: cldr.CalendarPeriodFormatNameValue{AM: "prijepodne", PM: "popodne"}, Short: cldr.CalendarPeriodFormatNameValue{}, Wide: cldr.CalendarPeriodFormatNameValue{AM: "prije podne", PM: "popodne"}, }, }, }
resources/locales/bs/calendar.go
0.508544
0.431884
calendar.go
starcoder
// affine2d provides affine transformation for 2D graphics. // This code is comes from nanovgo and added gopher.js optimization. // https://github.com/shibukawa/nanovgo package affine2d import ( "github.com/rkusa/gm/math32" ) // Scala is a type of element of vector and matrix. // Scala is a float32 on regular environment, and float64 for Gopher.js // (https://github.com/gopherjs/gopherjs#performance-tips). type Scala float32 const ( // Pi is a constant of Pi value in Scala type. Pi Scala = Scala(math32.Pi) ) // The following functions can be used to make calculations on 2x3 transformation matrices. // TransformMatrix is a 2x3 matrix is represented as float[6]. type Matrix [6]Scala // IdentityMatrix makes the transform to identity matrix. func IdentityMatrix() Matrix { return Matrix{1.0, 0.0, 0.0, 1.0, 0.0, 0.0} } // TranslateMatrix makes the transform to translation matrix matrix. func TranslateMatrix(tx, ty Scala) Matrix { return Matrix{1.0, 0.0, 0.0, 1.0, tx, ty} } // ScaleMatrix makes the transform to scale matrix. func ScaleMatrix(sx, sy Scala) Matrix { return Matrix{sx, 0.0, 0.0, sy, 0.0, 0.0} } // RotateMatrix makes the transform to rotate matrix. Angle is specified in radians. func RotateMatrix(a Scala) Matrix { sin, cos := math32.Sincos(float32(a)) return Matrix{Scala(cos), Scala(sin), Scala(-sin), Scala(cos), 0.0, 0.0} } // SkewXMatrix makes the transform to skew-x matrix. Angle is specified in radians. func SkewXMatrix(a Scala) Matrix { return Matrix{1.0, 0.0, Scala(math32.Tan(float32(a))), 1.0, 0.0, 0.0} } // SkewYMatrix makes the transform to skew-y matrix. Angle is specified in radians. func SkewYMatrix(a Scala) Matrix { return Matrix{1.0, Scala(math32.Tan(float32(a))), 0.0, 1.0, 0.0, 0.0} } // Multiply makes the transform to the result of multiplication of two transforms, of A = A*B. func (t Matrix) Multiply(s Matrix) Matrix { t0 := t[0]*s[0] + t[1]*s[2] t2 := t[2]*s[0] + t[3]*s[2] t4 := t[4]*s[0] + t[5]*s[2] + s[4] t[1] = t[0]*s[1] + t[1]*s[3] t[3] = t[2]*s[1] + t[3]*s[3] t[5] = t[4]*s[1] + t[5]*s[3] + s[5] t[0] = t0 t[2] = t2 t[4] = t4 return t } // PreMultiply makes the transform to the result of multiplication of two transforms, of A = B*A. func (t Matrix) PreMultiply(s Matrix) Matrix { return s.Multiply(t) } // Inverse makes the destination to inverse of specified transform. // Returns 1 if the inverse could be calculated, else 0. func (t Matrix) Inverse() Matrix { det := t[0]*t[3] - t[2]*t[1] if det > -1e-6 && det < 1e-6 { return IdentityMatrix() } invdet := 1.0 / det return Matrix{ t[3] * invdet, -t[1] * invdet, -t[2] * invdet, t[0] * invdet, (t[2]*t[5] - t[3]*t[4]) * invdet, (t[1]*t[4] - t[0]*t[5]) * invdet, } } // TransformPoint transforms a point by given TransformMatrix. func (t Matrix) TransformPoint(sx, sy Scala) (dx, dy Scala) { dx = sx*t[0] + sy*t[2] + t[4] dy = sx*t[1] + sy*t[3] + t[5] return } // ToMat3x4 makes 3x4 matrix. func (t Matrix) ToMat3x4() []Scala { return []Scala{ t[0], t[1], 0.0, 0.0, t[2], t[3], 0.0, 0.0, t[4], t[5], 1.0, 0.0, } } func (t Matrix) getAverageScale() Scala { sx := math32.Sqrt(float32(t[0]*t[0] + t[2]*t[2])) sy := math32.Sqrt(float32(t[1]*t[1] + t[3]*t[3])) return Scala((sx + sy) * 0.5) }
affine2d.go
0.874573
0.867261
affine2d.go
starcoder
package blockchain import ( "time" "github.com/incognitochain/incognito-chain/common" "github.com/incognitochain/incognito-chain/incognitokey" "github.com/incognitochain/incognito-chain/metadata" ) type ShardToBeaconPool interface { RemoveBlock(map[byte]uint64) //GetFinalBlock() map[byte][]ShardToBeaconBlock AddShardToBeaconBlock(*ShardToBeaconBlock) (uint64, uint64, error) //ValidateShardToBeaconBlock(ShardToBeaconBlock) error GetValidBlockHash() map[byte][]common.Hash GetValidBlock(map[byte]uint64) map[byte][]*ShardToBeaconBlock GetValidBlockHeight() map[byte][]uint64 GetLatestValidPendingBlockHeight() map[byte]uint64 GetBlockByHeight(shardID byte, height uint64) *ShardToBeaconBlock SetShardState(map[byte]uint64) GetAllBlockHeight() map[byte][]uint64 RevertShardToBeaconPool(s byte, height uint64) } type CrossShardPool interface { AddCrossShardBlock(*CrossShardBlock) (map[byte]uint64, byte, error) GetValidBlock(map[byte]uint64) map[byte][]*CrossShardBlock GetLatestValidBlockHeight() map[byte]uint64 GetValidBlockHeight() map[byte][]uint64 GetBlockByHeight(_shardID byte, height uint64) *CrossShardBlock RemoveBlockByHeight(map[byte]uint64) UpdatePool() map[byte]uint64 GetAllBlockHeight() map[byte][]uint64 RevertCrossShardPool(uint64) FindBeaconHeightForCrossShardBlock(beaconHeight uint64, fromShardID byte, crossShardBlockHeight uint64) (uint64, error) } type ShardPool interface { RemoveBlock(height uint64) AddShardBlock(block *ShardBlock) error GetValidBlockHash() []common.Hash GetValidBlock() []*ShardBlock GetValidBlockHeight() []uint64 GetLatestValidBlockHeight() uint64 SetShardState(height uint64) RevertShardPool(uint64) GetAllBlockHeight() []uint64 GetPendingBlockHeight() []uint64 Start(chan struct{}) } type BeaconPool interface { RemoveBlock(height uint64) AddBeaconBlock(block *BeaconBlock) error GetValidBlock() []*BeaconBlock GetValidBlockHeight() []uint64 SetBeaconState(height uint64) GetBeaconState() uint64 RevertBeconPool(height uint64) GetAllBlockHeight() []uint64 Start(chan struct{}) GetPendingBlockHeight() []uint64 } type TxPool interface { // LastUpdated returns the last time a transaction was added to or // removed from the source pool. LastUpdated() time.Time // MiningDescs returns a slice of mining descriptors for all the // transactions in the source pool. MiningDescs() []*metadata.TxDesc // HaveTransaction returns whether or not the passed transaction hash // exists in the source pool. HaveTransaction(hash *common.Hash) bool // RemoveTx remove tx from tx resource RemoveTx(txs []metadata.Transaction, isInBlock bool) RemoveCandidateList([]string) EmptyPool() bool MaybeAcceptTransactionForBlockProducing(metadata.Transaction, int64) (*metadata.TxDesc, error) ValidateTxList(txs []metadata.Transaction) error //CheckTransactionFee // CheckTransactionFee(tx metadata.Transaction) (uint64, error) // Check tx validate by it self // ValidateTxByItSelf(tx metadata.Transaction) bool } type FeeEstimator interface { RegisterBlock(block *ShardBlock) error } type ChainInterface interface { GetChainName() string GetConsensusType() string GetLastBlockTimeStamp() int64 GetMinBlkInterval() time.Duration GetMaxBlkCreateTime() time.Duration IsReady() bool GetActiveShardNumber() int GetPubkeyRole(pubkey string, round int) (string, byte) CurrentHeight() uint64 GetCommitteeSize() int GetCommittee() []incognitokey.CommitteePublicKey GetPubKeyCommitteeIndex(string) int GetLastProposerIndex() int UnmarshalBlock(blockString []byte) (common.BlockInterface, error) CreateNewBlock(round int) (common.BlockInterface, error) InsertBlk(block common.BlockInterface) error InsertAndBroadcastBlock(block common.BlockInterface) error // ValidateAndInsertBlock(block common.BlockInterface) error ValidateBlockSignatures(block common.BlockInterface, committee []incognitokey.CommitteePublicKey) error ValidatePreSignBlock(block common.BlockInterface) error GetShardID() int } type BestStateInterface interface { GetLastBlockTimeStamp() uint64 GetBlkMinInterval() time.Duration GetBlkMaxCreateTime() time.Duration CurrentHeight() uint64 GetCommittee() []string GetLastProposerIdx() int }
blockchain/interface.go
0.538498
0.441372
interface.go
starcoder
package v1beta1 import ( "context" "reflect" "github.com/pkg/errors" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) // Creates a new Connectivity Test. After you create a test, the reachability analysis is performed as part of the long running operation, which completes when the analysis completes. If the endpoint specifications in `ConnectivityTest` are invalid (for example, containing non-existent resources in the network, or you don't have read permissions to the network configurations of listed projects), then the reachability result returns a value of `UNKNOWN`. If the endpoint specifications in `ConnectivityTest` are incomplete, the reachability result returns a value of AMBIGUOUS. For more information, see the Connectivity Test documentation. // Auto-naming is currently not supported for this resource. type ConnectivityTest struct { pulumi.CustomResourceState // The time the test was created. CreateTime pulumi.StringOutput `pulumi:"createTime"` // The user-supplied description of the Connectivity Test. Maximum of 512 characters. Description pulumi.StringOutput `pulumi:"description"` // Destination specification of the Connectivity Test. You can use a combination of destination IP address, Compute Engine VM instance, or VPC network to uniquely identify the destination location. Even if the destination IP address is not unique, the source IP location is unique. Usually, the analysis can infer the destination endpoint from route information. If the destination you specify is a VM instance and the instance has multiple network interfaces, then you must also specify either a destination IP address or VPC network to identify the destination interface. A reachability analysis proceeds even if the destination location is ambiguous. However, the result can include endpoints that you don't intend to test. Destination EndpointResponseOutput `pulumi:"destination"` // The display name of a Connectivity Test. DisplayName pulumi.StringOutput `pulumi:"displayName"` // Resource labels to represent user-provided metadata. Labels pulumi.StringMapOutput `pulumi:"labels"` // Unique name of the resource using the form: `projects/{project_id}/locations/global/connectivityTests/{test}` Name pulumi.StringOutput `pulumi:"name"` // The probing details of this test from the latest run, present for applicable tests only. The details are updated when creating a new test, updating an existing test, or triggering a one-time rerun of an existing test. ProbingDetails ProbingDetailsResponseOutput `pulumi:"probingDetails"` // IP Protocol of the test. When not provided, "TCP" is assumed. Protocol pulumi.StringOutput `pulumi:"protocol"` // The reachability details of this test from the latest run. The details are updated when creating a new test, updating an existing test, or triggering a one-time rerun of an existing test. ReachabilityDetails ReachabilityDetailsResponseOutput `pulumi:"reachabilityDetails"` // Other projects that may be relevant for reachability analysis. This is applicable to scenarios where a test can cross project boundaries. RelatedProjects pulumi.StringArrayOutput `pulumi:"relatedProjects"` // Source specification of the Connectivity Test. You can use a combination of source IP address, virtual machine (VM) instance, or Compute Engine network to uniquely identify the source location. Examples: If the source IP address is an internal IP address within a Google Cloud Virtual Private Cloud (VPC) network, then you must also specify the VPC network. Otherwise, specify the VM instance, which already contains its internal IP address and VPC network information. If the source of the test is within an on-premises network, then you must provide the destination VPC network. If the source endpoint is a Compute Engine VM instance with multiple network interfaces, the instance itself is not sufficient to identify the endpoint. So, you must also specify the source IP address or VPC network. A reachability analysis proceeds even if the source location is ambiguous. However, the test result may include endpoints that you don't intend to test. Source EndpointResponseOutput `pulumi:"source"` // The time the test's configuration was updated. UpdateTime pulumi.StringOutput `pulumi:"updateTime"` } // NewConnectivityTest registers a new resource with the given unique name, arguments, and options. func NewConnectivityTest(ctx *pulumi.Context, name string, args *ConnectivityTestArgs, opts ...pulumi.ResourceOption) (*ConnectivityTest, error) { if args == nil { return nil, errors.New("missing one or more required arguments") } if args.Destination == nil { return nil, errors.New("invalid value for required argument 'Destination'") } if args.Name == nil { return nil, errors.New("invalid value for required argument 'Name'") } if args.Source == nil { return nil, errors.New("invalid value for required argument 'Source'") } if args.TestId == nil { return nil, errors.New("invalid value for required argument 'TestId'") } var resource ConnectivityTest err := ctx.RegisterResource("google-native:networkmanagement/v1beta1:ConnectivityTest", name, args, &resource, opts...) if err != nil { return nil, err } return &resource, nil } // GetConnectivityTest gets an existing ConnectivityTest resource's state with the given name, ID, and optional // state properties that are used to uniquely qualify the lookup (nil if not required). func GetConnectivityTest(ctx *pulumi.Context, name string, id pulumi.IDInput, state *ConnectivityTestState, opts ...pulumi.ResourceOption) (*ConnectivityTest, error) { var resource ConnectivityTest err := ctx.ReadResource("google-native:networkmanagement/v1beta1:ConnectivityTest", name, id, state, &resource, opts...) if err != nil { return nil, err } return &resource, nil } // Input properties used for looking up and filtering ConnectivityTest resources. type connectivityTestState struct { } type ConnectivityTestState struct { } func (ConnectivityTestState) ElementType() reflect.Type { return reflect.TypeOf((*connectivityTestState)(nil)).Elem() } type connectivityTestArgs struct { // The user-supplied description of the Connectivity Test. Maximum of 512 characters. Description *string `pulumi:"description"` // Destination specification of the Connectivity Test. You can use a combination of destination IP address, Compute Engine VM instance, or VPC network to uniquely identify the destination location. Even if the destination IP address is not unique, the source IP location is unique. Usually, the analysis can infer the destination endpoint from route information. If the destination you specify is a VM instance and the instance has multiple network interfaces, then you must also specify either a destination IP address or VPC network to identify the destination interface. A reachability analysis proceeds even if the destination location is ambiguous. However, the result can include endpoints that you don't intend to test. Destination Endpoint `pulumi:"destination"` // Resource labels to represent user-provided metadata. Labels map[string]string `pulumi:"labels"` // Unique name of the resource using the form: `projects/{project_id}/locations/global/connectivityTests/{test}` Name string `pulumi:"name"` Project *string `pulumi:"project"` // IP Protocol of the test. When not provided, "TCP" is assumed. Protocol *string `pulumi:"protocol"` // Other projects that may be relevant for reachability analysis. This is applicable to scenarios where a test can cross project boundaries. RelatedProjects []string `pulumi:"relatedProjects"` // Source specification of the Connectivity Test. You can use a combination of source IP address, virtual machine (VM) instance, or Compute Engine network to uniquely identify the source location. Examples: If the source IP address is an internal IP address within a Google Cloud Virtual Private Cloud (VPC) network, then you must also specify the VPC network. Otherwise, specify the VM instance, which already contains its internal IP address and VPC network information. If the source of the test is within an on-premises network, then you must provide the destination VPC network. If the source endpoint is a Compute Engine VM instance with multiple network interfaces, the instance itself is not sufficient to identify the endpoint. So, you must also specify the source IP address or VPC network. A reachability analysis proceeds even if the source location is ambiguous. However, the test result may include endpoints that you don't intend to test. Source Endpoint `pulumi:"source"` TestId string `pulumi:"testId"` } // The set of arguments for constructing a ConnectivityTest resource. type ConnectivityTestArgs struct { // The user-supplied description of the Connectivity Test. Maximum of 512 characters. Description pulumi.StringPtrInput // Destination specification of the Connectivity Test. You can use a combination of destination IP address, Compute Engine VM instance, or VPC network to uniquely identify the destination location. Even if the destination IP address is not unique, the source IP location is unique. Usually, the analysis can infer the destination endpoint from route information. If the destination you specify is a VM instance and the instance has multiple network interfaces, then you must also specify either a destination IP address or VPC network to identify the destination interface. A reachability analysis proceeds even if the destination location is ambiguous. However, the result can include endpoints that you don't intend to test. Destination EndpointInput // Resource labels to represent user-provided metadata. Labels pulumi.StringMapInput // Unique name of the resource using the form: `projects/{project_id}/locations/global/connectivityTests/{test}` Name pulumi.StringInput Project pulumi.StringPtrInput // IP Protocol of the test. When not provided, "TCP" is assumed. Protocol pulumi.StringPtrInput // Other projects that may be relevant for reachability analysis. This is applicable to scenarios where a test can cross project boundaries. RelatedProjects pulumi.StringArrayInput // Source specification of the Connectivity Test. You can use a combination of source IP address, virtual machine (VM) instance, or Compute Engine network to uniquely identify the source location. Examples: If the source IP address is an internal IP address within a Google Cloud Virtual Private Cloud (VPC) network, then you must also specify the VPC network. Otherwise, specify the VM instance, which already contains its internal IP address and VPC network information. If the source of the test is within an on-premises network, then you must provide the destination VPC network. If the source endpoint is a Compute Engine VM instance with multiple network interfaces, the instance itself is not sufficient to identify the endpoint. So, you must also specify the source IP address or VPC network. A reachability analysis proceeds even if the source location is ambiguous. However, the test result may include endpoints that you don't intend to test. Source EndpointInput TestId pulumi.StringInput } func (ConnectivityTestArgs) ElementType() reflect.Type { return reflect.TypeOf((*connectivityTestArgs)(nil)).Elem() } type ConnectivityTestInput interface { pulumi.Input ToConnectivityTestOutput() ConnectivityTestOutput ToConnectivityTestOutputWithContext(ctx context.Context) ConnectivityTestOutput } func (*ConnectivityTest) ElementType() reflect.Type { return reflect.TypeOf((**ConnectivityTest)(nil)).Elem() } func (i *ConnectivityTest) ToConnectivityTestOutput() ConnectivityTestOutput { return i.ToConnectivityTestOutputWithContext(context.Background()) } func (i *ConnectivityTest) ToConnectivityTestOutputWithContext(ctx context.Context) ConnectivityTestOutput { return pulumi.ToOutputWithContext(ctx, i).(ConnectivityTestOutput) } type ConnectivityTestOutput struct{ *pulumi.OutputState } func (ConnectivityTestOutput) ElementType() reflect.Type { return reflect.TypeOf((**ConnectivityTest)(nil)).Elem() } func (o ConnectivityTestOutput) ToConnectivityTestOutput() ConnectivityTestOutput { return o } func (o ConnectivityTestOutput) ToConnectivityTestOutputWithContext(ctx context.Context) ConnectivityTestOutput { return o } func init() { pulumi.RegisterInputType(reflect.TypeOf((*ConnectivityTestInput)(nil)).Elem(), &ConnectivityTest{}) pulumi.RegisterOutputType(ConnectivityTestOutput{}) }
sdk/go/google/networkmanagement/v1beta1/connectivityTest.go
0.811489
0.441914
connectivityTest.go
starcoder
package quantile import ( "math" "sort" "github.com/caio/go-tdigest" ) type algorithm interface { Add(value float64) error Quantile(q float64) float64 } func newTDigest(compression float64) (algorithm, error) { return tdigest.New(tdigest.Compression(compression)) } type exactAlgorithmR7 struct { xs []float64 sorted bool } func newExactR7(compression float64) (algorithm, error) { return &exactAlgorithmR7{xs: make([]float64, 0, 100), sorted: false}, nil } func (e *exactAlgorithmR7) Add(value float64) error { e.xs = append(e.xs, value) e.sorted = false return nil } func (e *exactAlgorithmR7) Quantile(q float64) float64 { size := len(e.xs) // No information if len(e.xs) == 0 { return math.NaN() } // Sort the array if necessary if !e.sorted { sort.Float64s(e.xs) e.sorted = true } // Get the quantile index and the fraction to the neighbor // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R7 // Same as Excel and Numpy. N := float64(size) n := q * (N - 1) i, gamma := math.Modf(n) j := int(i) if j < 0 { return e.xs[0] } if j >= size { return e.xs[size-1] } // Linear interpolation return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j]) } type exactAlgorithmR8 struct { xs []float64 sorted bool } func newExactR8(compression float64) (algorithm, error) { return &exactAlgorithmR8{xs: make([]float64, 0, 100), sorted: false}, nil } func (e *exactAlgorithmR8) Add(value float64) error { e.xs = append(e.xs, value) e.sorted = false return nil } func (e *exactAlgorithmR8) Quantile(q float64) float64 { size := len(e.xs) // No information if size == 0 { return math.NaN() } // Sort the array if necessary if !e.sorted { sort.Float64s(e.xs) e.sorted = true } // Get the quantile index and the fraction to the neighbor // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R8 N := float64(size) n := q*(N+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper i, gamma := math.Modf(n) j := int(i) if j < 0 { return e.xs[0] } if j >= size { return e.xs[size-1] } // Linear interpolation return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j]) }
plugins/aggregators/quantile/algorithms.go
0.770292
0.482246
algorithms.go
starcoder
package model import ( "math" "time" "k8s.io/autoscaler/vertical-pod-autoscaler/recommender/util" ) // ContainerUsageSample is a measure of resource usage of a container over some // interval. type ContainerUsageSample struct { // Start of the measurement interval. MeasureStart time.Time // Average CPU usage in cores. CPUUsage float64 // Randomly sampled instant memory usage in bytes. MemoryUsage float64 } // ContainerState stores information about a single container instance. // It holds the recent history of CPU and memory utilization. // * CPU is stored in form of a distribution (histogram). // Currently we're using fixed weight samples in the CPU histogram (i.e. old // and fresh samples are equally important). Old samples are never deleted. // TODO: Add exponential decaying of weights over time to address this. // * Memory is stored for the period of length MemoryAggregationWindowLength in // the form of usage peaks, one value per MemoryAggregationInterval. // For example if window legth is one week and aggregation interval is one day // it will store 7 peaks, one per day, for the last week. // Note: samples are added to intervals based on their start timestamps. type ContainerState struct { // Distribution of CPU usage. The measurement unit is 1 CPU core. CPUUsage util.Histogram // Memory peaks stored in the intervals belonging to the aggregation window // (one value per interval). The measurement unit is a byte. MemoryUsagePeaks util.FloatSlidingWindow // End time of the most recent interval covered by the aggregation window. windowEnd time.Time // Start of the latest usage sample that was aggregated. lastSampleStart time.Time } // NewContainerState returns a new, empty ContainerState. func NewContainerState() *ContainerState { return &ContainerState{ util.NewHistogram(CPUHistogramOptions), // CPUUsage util.NewFloatSlidingWindow( // memoryUsagePeaks int(MemoryAggregationWindowLength / MemoryAggregationInterval)), time.Unix(0, 0), time.Unix(0, 0)} } func (sample *ContainerUsageSample) isValid() bool { return sample.CPUUsage >= 0.0 && sample.MemoryUsage >= 0.0 } // AddSample adds a usage sample to the given ContainerState. Requires samples // to be passed in chronological order (i.e. in order of growing MeasureStart). // Invalid samples (out of order or measure out of legal range) are discarded. // Returns true if the sample was aggregated, false if it was discarded. // Note: usage samples don't hold their end timestamp / duration. They are // implicitly assumed to be disjoint when aggregating. func (container *ContainerState) AddSample(sample *ContainerUsageSample) bool { ts := sample.MeasureStart if !sample.isValid() || !ts.After(container.lastSampleStart) { return false // Discard invalid or out-of-order samples. } if !ts.Before(container.windowEnd.Add(MemoryAggregationWindowLength)) { // The gap between this sample and the previous interval is so // large that the whole sliding window gets reset. // This also happens on the first memory usage sample. container.MemoryUsagePeaks.Clear() container.windowEnd = ts.Add(MemoryAggregationInterval) } else { for !ts.Before(container.windowEnd) { // Shift the memory aggregation window to the next interval. container.MemoryUsagePeaks.Push(0.0) container.windowEnd = container.windowEnd.Add(MemoryAggregationInterval) } } // Update the memory peak for the current interval. if container.MemoryUsagePeaks.Head() == nil { // Window is empty. container.MemoryUsagePeaks.Push(0.0) } *container.MemoryUsagePeaks.Head() = math.Max( *container.MemoryUsagePeaks.Head(), sample.MemoryUsage) // Update the CPU usage distribution. container.CPUUsage.AddSample(sample.CPUUsage, 1.0) container.lastSampleStart = ts return true }
vertical-pod-autoscaler/recommender/model/container.go
0.586168
0.479808
container.go
starcoder
package math import ( "math" ) func R2D(r float32) float32 { return 180.0 * r / Pi } func D2R(r float32) float32 { return Pi * r / 180.0 } func Absf(v float32) float32 { if v < 0 { return -v } else { return v } } func Round(v float32) int { if v < 0 { return int(v - 0.5) } else { return int(v + 0.4999999) } } func Sinf(v float32) float32 { return float32(math.Sin(float64(v))) } func Cosf(v float32) float32 { return float32(math.Cos(float64(v))) } func Tanf(v float32) float32 { return float32(math.Tan(float64(v))) } func Asinf(v float32) float32 { return float32(math.Asin(float64(v))) } func Acosf(v float32) float32 { return float32(math.Acos(float64(v))) } func Atanf(v float32) float32 { return float32(math.Atan(float64(v))) } func Sqrtf(v float32) float32 { return float32(math.Sqrt(float64(v))) } func Powf(v, e float32) float32 { return float32(math.Pow(float64(v), float64(e))) } func Lerp(a, b int, s float32) int { r := float32(b - a) return a + int(r*s) } func Lerpf(a, b float32, s float32) float32 { r := b - a return a + r*s } func Ramp(s float32, a, b float32) float32 { return (s - a) / (b - a) } func RampSat(s float32, a, b float32) float32 { return Saturate((s - a) / (b - a)) } func Saturate(x float32) float32 { return Clampf(x, 0, 1) } func SmoothStep(s float32, a, b float32) float32 { x := RampSat(s, a, b) return x * x * (3 - 2*x) } func Clamp(x, min, max int) int { switch { case x < min: return min case x > max: return max default: return x } } func Clampf(x, min, max float32) float32 { switch { case x < min: return min case x > max: return max default: return x } } func Min(values ...int) int { m := MaxInt for _, v := range values { if v < m { m = v } } return m } func Minf(values ...float32) float32 { m := float32(math.MaxFloat32) for _, v := range values { if v < m { m = v } } return m } func Max(values ...int) int { m := MinInt for _, v := range values { if v > m { m = v } } return m } func Maxf(values ...float32) float32 { m := float32(-math.MaxFloat32) for _, v := range values { if v > m { m = v } } return m } func Mod(a, b int) int { x := a % b if x < 0 { return x + b } else { return x } }
Beam/go/vendor/github.com/google/gxui/math/math.go
0.827096
0.610628
math.go
starcoder
package graph import "math/rand" // NamedVertices contains example named vertices var NamedVertices = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M"} // DAG returns a weighted (random costs) directed acyclic graph. func DAG() *Graph { g := New() rand.Seed(1) g.AddVertices(NamedVertices) g.AddEdge("A", "D", rand.Intn(20)) g.AddEdge("B", "D", rand.Intn(20)) g.AddEdge("C", "A", rand.Intn(20)) g.AddEdge("C", "B", rand.Intn(20)) g.AddEdges("D", []string{"H", "G"}, []int{rand.Intn(20), rand.Intn(20)}) g.AddEdges("E", []string{"A", "D", "F"}, []int{rand.Intn(20), rand.Intn(20), rand.Intn(20)}) g.AddEdges("F", []string{"K", "J"}, []int{rand.Intn(20), rand.Intn(20)}) g.AddEdge("G", "I", rand.Intn(20)) g.AddEdges("H", []string{"I", "J"}, []int{rand.Intn(20), rand.Intn(20)}) g.AddEdge("I", "J", rand.Intn(20)) g.AddEdges("J", []string{"L", "M"}, []int{rand.Intn(20), rand.Intn(20)}) g.AddEdge("K", "J", rand.Intn(20)) return g } // WithCycle returns a graph with a simple cycle func WithCycle() *Graph { g := New() rand.Seed(1) g.AddVertices([]string{"A", "B", "C"}) g.AddEdge("A", "B", rand.Intn(20)) g.AddEdge("B", "C", rand.Intn(20)) g.AddEdge("C", "A", rand.Intn(20)) return g } // Directed returns a directed graph (with possible cycles) func Directed(disconnected bool) *Graph { g := New() rand.Seed(1) g.AddVertices(NamedVertices) g.AddVertices(NamedVertices) g.AddEdges("A", []string{"B", "C"}, []int{rand.Intn(20), rand.Intn(20)}) g.AddEdges("B", []string{"D"}, []int{rand.Intn(20)}) g.AddEdges("C", []string{"E", "L"}, []int{rand.Intn(20), rand.Intn(20)}) if !disconnected { g.AddEdges("D", []string{"F"}, []int{rand.Intn(20)}) g.AddEdges("E", []string{"H"}, []int{rand.Intn(20)}) } else { g.AddEdges("D", []string{}, []int{}) g.AddEdges("E", []string{}, []int{}) } g.AddEdges("F", []string{"G", "I"}, []int{rand.Intn(20), rand.Intn(20)}) g.AddEdges("G", []string{}, []int{}) g.AddEdges("H", []string{"F"}, []int{rand.Intn(20)}) g.AddEdges("I", []string{"K", "G"}, []int{rand.Intn(20), rand.Intn(20)}) g.AddEdges("J", []string{"F", "H"}, []int{rand.Intn(20), rand.Intn(20)}) g.AddEdges("K", []string{"J"}, []int{rand.Intn(20)}) if !disconnected { g.AddEdges("L", []string{"H"}, []int{rand.Intn(20)}) g.AddEdges("M", []string{"B", "D", "G"}, []int{rand.Intn(20), rand.Intn(20), rand.Intn(20)}) } else { g.AddEdges("L", []string{}, []int{}) g.AddEdges("M", []string{"B", "D"}, []int{rand.Intn(20), rand.Intn(20)}) } return g }
struct/graph/adjacency/examplegraph.go
0.665519
0.499268
examplegraph.go
starcoder
package blackjack import "github.com/shopspring/decimal" // DoubleRule represents different double rule variants. type DoubleRule int // When is it possible for the player to double their hand? const ( DoubleAny DoubleRule = iota DoubleOnly9_10_11 DoubleOnly10_11 ) //go:generate stringer -type=DoubleRule // SurrenderRule represents different surrender rule variants. type SurrenderRule int // Surrender rule options. const ( NoSurrender SurrenderRule = iota EarlySurrender LateSurrender ) //go:generate stringer -type=SurrenderRule // Rules represent the game rules and mechanics. type Rules interface { NumDecks() uint DealerHitSoft17() bool Surrender() SurrenderRule CanSplit([]Hand) bool Double() DoubleRule DoubleAfterSplit() bool BlackjackAfterSplit() bool NoHoleCard() bool OriginalBetsOnly() bool BlackjackRatio() decimal.Decimal DealerWinsTie() bool PerfectPair() bool PerfectPairRatio() (mixed, same, perfect int) } // Game rules in different casino's. var ( HollandCasino Rules = holland{} TapTapBoom Rules = tapTapBoom{} ) // AvailableRules is a slice that hold all available game rules. var AvailableRules = []Rules{ HollandCasino, TapTapBoom, } type holland struct{} func (holland) NumDecks() uint { return 6 } func (holland) DealerHitSoft17() bool { return true } func (holland) Surrender() SurrenderRule { return NoSurrender } func (holland) CanSplit([]Hand) bool { return true } func (holland) Double() DoubleRule { return DoubleOnly9_10_11 } func (holland) DoubleAfterSplit() bool { return true } func (holland) BlackjackAfterSplit() bool { return true } func (holland) NoHoleCard() bool { return true } func (holland) OriginalBetsOnly() bool { return false } func (holland) BlackjackRatio() decimal.Decimal { return decimal.New(15, -1) } func (holland) DealerWinsTie() bool { return false } func (holland) PerfectPair() bool { return true } func (holland) PerfectPairRatio() (m, s, p int) { return 6, 12, 25 } type tapTapBoom struct{} func (tapTapBoom) NumDecks() uint { return 6 } // guess func (tapTapBoom) DealerHitSoft17() bool { return true } func (tapTapBoom) Surrender() SurrenderRule { return NoSurrender } func (tapTapBoom) CanSplit(h []Hand) bool { return len(h) == 1 } func (tapTapBoom) Double() DoubleRule { return DoubleAny } func (tapTapBoom) DoubleAfterSplit() bool { return true } func (tapTapBoom) BlackjackAfterSplit() bool { return true } func (tapTapBoom) NoHoleCard() bool { return false } func (tapTapBoom) OriginalBetsOnly() bool { return false } func (tapTapBoom) BlackjackRatio() decimal.Decimal { return decimal.New(15, -1) } func (tapTapBoom) DealerWinsTie() bool { return false } func (tapTapBoom) PerfectPair() bool { return false } func (tapTapBoom) PerfectPairRatio() (m, s, p int) { return }
blackjack/rules.go
0.586049
0.48749
rules.go
starcoder
package siesta import ( "errors" "fmt" "io" "net" "strconv" "strings" "sync" "time" ) // InvalidOffset is a constant that is used to denote an invalid or uninitialized offset. const InvalidOffset int64 = -1 // Connector is an interface that should provide ways to clearly interact with Kafka cluster and hide all broker management stuff from user. type Connector interface { // GetTopicMetadata is primarily used to discover leaders for given topics and how many partitions these topics have. // Passing it an empty topic list will retrieve metadata for all topics in a cluster. GetTopicMetadata(topics []string) (*MetadataResponse, error) // GetAvailableOffset issues an offset request to a specified topic and partition with a given offset time. // More on offset time here - https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetRequest GetAvailableOffset(topic string, partition int32, offsetTime int64) (int64, error) // Fetch issues a single fetch request to a broker responsible for a given topic and partition and returns a FetchResponse that contains messages starting from a given offset. Fetch(topic string, partition int32, offset int64) (*FetchResponse, error) // GetOffset gets the offset for a given group, topic and partition from Kafka. A part of new offset management API. GetOffset(group string, topic string, partition int32) (int64, error) // CommitOffset commits the offset for a given group, topic and partition to Kafka. A part of new offset management API. CommitOffset(group string, topic string, partition int32, offset int64) error GetLeader(topic string, partition int32) (BrokerLink, error) // Tells the Connector to close all existing connections and stop. // This method is NOT blocking but returns a channel which will get a single value once the closing is finished. Close() <-chan bool } // ConnectorConfig is used to pass multiple configuration values for a Connector type ConnectorConfig struct { // BrokerList is a bootstrap list to discover other brokers in a cluster. At least one broker is required. BrokerList []string // ReadTimeout is a timeout to read the response from a TCP socket. ReadTimeout time.Duration // WriteTimeout is a timeout to write the request to a TCP socket. WriteTimeout time.Duration // ConnectTimeout is a timeout to connect to a TCP socket. ConnectTimeout time.Duration // Sets whether the connection should be kept alive. KeepAlive bool // A keep alive period for a TCP connection. KeepAliveTimeout time.Duration // Maximum number of open connections for a connector. MaxConnections int // Maximum number of open connections for a single broker for a connector. MaxConnectionsPerBroker int // Maximum fetch size in bytes which will be used in all Consume() calls. FetchSize int32 // The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will block FetchMinBytes int32 // The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy FetchMinBytes FetchMaxWaitTime int32 // Number of retries to get topic metadata. MetadataRetries int // Backoff value between topic metadata requests. MetadataBackoff time.Duration // Number of retries to commit an offset. CommitOffsetRetries int // Backoff value between commit offset requests. CommitOffsetBackoff time.Duration // Number of retries to get consumer metadata. ConsumerMetadataRetries int // Backoff value between consumer metadata requests. ConsumerMetadataBackoff time.Duration // ClientID that will be used by a connector to identify client requests by broker. ClientID string } // NewConnectorConfig returns a new ConnectorConfig with sane defaults. func NewConnectorConfig() *ConnectorConfig { return &ConnectorConfig{ ReadTimeout: 5 * time.Second, WriteTimeout: 5 * time.Second, ConnectTimeout: 5 * time.Second, KeepAlive: true, KeepAliveTimeout: 1 * time.Minute, MaxConnections: 5, MaxConnectionsPerBroker: 5, FetchMinBytes: 1, FetchSize: 1024000, FetchMaxWaitTime: 1000, MetadataRetries: 5, MetadataBackoff: 200 * time.Millisecond, CommitOffsetRetries: 5, CommitOffsetBackoff: 200 * time.Millisecond, ConsumerMetadataRetries: 15, ConsumerMetadataBackoff: 500 * time.Millisecond, ClientID: "siesta", } } // Validate validates this ConnectorConfig. Returns a corresponding error if the ConnectorConfig is invalid and nil otherwise. func (cc *ConnectorConfig) Validate() error { if cc == nil { return errors.New("Please provide a ConnectorConfig.") } if len(cc.BrokerList) == 0 { return errors.New("BrokerList must have at least one broker.") } if cc.ReadTimeout < time.Millisecond { return errors.New("ReadTimeout must be at least 1ms.") } if cc.WriteTimeout < time.Millisecond { return errors.New("WriteTimeout must be at least 1ms.") } if cc.ConnectTimeout < time.Millisecond { return errors.New("ConnectTimeout must be at least 1ms.") } if cc.KeepAliveTimeout < time.Millisecond { return errors.New("KeepAliveTimeout must be at least 1ms.") } if cc.MaxConnections < 1 { return errors.New("MaxConnections cannot be less than 1.") } if cc.MaxConnectionsPerBroker < 1 { return errors.New("MaxConnectionsPerBroker cannot be less than 1.") } if cc.FetchSize < 1 { return errors.New("FetchSize cannot be less than 1.") } if cc.MetadataRetries < 0 { return errors.New("MetadataRetries cannot be less than 0.") } if cc.MetadataBackoff < time.Millisecond { return errors.New("MetadataBackoff must be at least 1ms.") } if cc.CommitOffsetRetries < 0 { return errors.New("CommitOffsetRetries cannot be less than 0.") } if cc.CommitOffsetBackoff < time.Millisecond { return errors.New("CommitOffsetBackoff must be at least 1ms.") } if cc.ConsumerMetadataRetries < 0 { return errors.New("ConsumerMetadataRetries cannot be less than 0.") } if cc.ConsumerMetadataBackoff < time.Millisecond { return errors.New("ConsumerMetadataBackoff must be at least 1ms.") } if cc.ClientID == "" { return errors.New("ClientId cannot be empty.") } return nil } // DefaultConnector is a default (and only one for now) Connector implementation for Siesta library. type DefaultConnector struct { config ConnectorConfig leaders map[string]map[int32]*brokerLink links []*brokerLink bootstrapLinks []*brokerLink lock sync.Mutex //offset coordination part offsetCoordinators map[string]int32 } // NewDefaultConnector creates a new DefaultConnector with a given ConnectorConfig. May return an error if the passed config is invalid. func NewDefaultConnector(config *ConnectorConfig) (*DefaultConnector, error) { if err := config.Validate(); err != nil { return nil, err } connector := &DefaultConnector{ config: *config, leaders: make(map[string]map[int32]*brokerLink), offsetCoordinators: make(map[string]int32), } return connector, nil } // Returns a string representation of this DefaultConnector. func (dc *DefaultConnector) String() string { return "Default Connector" } // GetTopicMetadata is primarily used to discover leaders for given topics and how many partitions these topics have. // Passing it an empty topic list will retrieve metadata for all topics in a cluster. func (dc *DefaultConnector) GetTopicMetadata(topics []string) (*MetadataResponse, error) { for i := 0; i <= dc.config.MetadataRetries; i++ { if metadata, err := dc.getMetadata(topics); err == nil { return metadata, nil } Debugf(dc, "GetTopicMetadata for %s failed after %d try", topics, i) time.Sleep(dc.config.MetadataBackoff) } return nil, fmt.Errorf("Could not get topic metadata for %s after %d retries", topics, dc.config.MetadataRetries) } // GetAvailableOffset issues an offset request to a specified topic and partition with a given offset time. func (dc *DefaultConnector) GetAvailableOffset(topic string, partition int32, offsetTime int64) (int64, error) { request := new(OffsetRequest) request.AddPartitionOffsetRequestInfo(topic, partition, offsetTime, 1) response, err := dc.sendToAllAndReturnFirstSuccessful(request, dc.offsetValidator) if response != nil { return response.(*OffsetResponse).PartitionErrorAndOffsets[topic][partition].Offsets[0], err } return -1, err } // Fetch issues a single fetch request to a broker responsible for a given topic and partition and returns a FetchResponse that contains messages starting from a given offset. func (dc *DefaultConnector) Fetch(topic string, partition int32, offset int64) (*FetchResponse, error) { response, err := dc.tryFetch(topic, partition, offset) if err != nil { return response, err } if response.Error(topic, partition) == ErrNotLeaderForPartition { Infof(dc, "Sent a fetch reqest to a non-leader broker. Refleshing metadata for topic %s and retrying the request", topic) dc.refreshMetadata([]string{topic}) response, err = dc.tryFetch(topic, partition, offset) } return response, err } func (dc *DefaultConnector) tryFetch(topic string, partition int32, offset int64) (*FetchResponse, error) { link := dc.getLeader(topic, partition) if link == nil { leader, err := dc.tryGetLeader(topic, partition, dc.config.MetadataRetries) if err != nil { return nil, err } link = leader } request := new(FetchRequest) request.MinBytes = dc.config.FetchMinBytes request.MaxWait = dc.config.FetchMaxWaitTime request.AddFetch(topic, partition, offset, dc.config.FetchSize) bytes, err := dc.syncSendAndReceive(link, request) if err != nil { dc.removeLeader(topic, partition) return nil, err } decoder := NewBinaryDecoder(bytes) response := new(FetchResponse) decodingErr := response.Read(decoder) if decodingErr != nil { dc.removeLeader(topic, partition) Errorf(dc, "Could not decode a FetchResponse. Reason: %s", decodingErr.Reason()) return nil, decodingErr.Error() } return response, nil } // GetOffset gets the offset for a given group, topic and partition from Kafka. A part of new offset management API. func (dc *DefaultConnector) GetOffset(group string, topic string, partition int32) (int64, error) { coordinator, err := dc.getOffsetCoordinator(group) if err != nil { return InvalidOffset, err } request := NewOffsetFetchRequest(group) request.AddOffset(topic, partition) bytes, err := dc.syncSendAndReceive(coordinator, request) if err != nil { return InvalidOffset, err } response := new(OffsetFetchResponse) decodingErr := dc.decode(bytes, response) if decodingErr != nil { Errorf(dc, "Could not decode an OffsetFetchResponse. Reason: %s", decodingErr.Reason()) return InvalidOffset, decodingErr.Error() } topicOffsets, exist := response.Offsets[topic] if !exist { return InvalidOffset, fmt.Errorf("OffsetFetchResponse does not contain information about requested topic") } if offset, exists := topicOffsets[partition]; !exists { return InvalidOffset, fmt.Errorf("OffsetFetchResponse does not contain information about requested partition") } else if offset.Error != ErrNoError { return InvalidOffset, offset.Error } else { return offset.Offset, nil } } // CommitOffset commits the offset for a given group, topic and partition to Kafka. A part of new offset management API. func (dc *DefaultConnector) CommitOffset(group string, topic string, partition int32, offset int64) error { for i := 0; i <= dc.config.CommitOffsetRetries; i++ { if err := dc.tryCommitOffset(group, topic, partition, offset); err == nil { return nil } Debugf(dc, "Failed to commit offset %d for group %s, topic %s, partition %d after %d try", offset, group, topic, partition, i) time.Sleep(dc.config.CommitOffsetBackoff) } return fmt.Errorf("Could not get commit offset %d for group %s, topic %s, partition %d after %d retries", offset, group, topic, partition, dc.config.CommitOffsetRetries) } func (dc *DefaultConnector) GetLeader(topic string, partition int32) (BrokerLink, error) { link := dc.getLeader(topic, partition) if link == nil { link, err := dc.tryGetLeader(topic, partition, dc.config.MetadataRetries) if err != nil { return nil, err } return link, nil } return link, nil } // Close tells the Connector to close all existing connections and stop. // This method is NOT blocking but returns a channel which will get a single value once the closing is finished. func (dc *DefaultConnector) Close() <-chan bool { closed := make(chan bool) go func() { dc.closeBrokerLinks() for _, link := range dc.bootstrapLinks { link.stop <- true } dc.bootstrapLinks = nil dc.links = nil closed <- true }() return closed } func (dc *DefaultConnector) closeBrokerLinks() { for _, link := range dc.links { link.stop <- true } } func (dc *DefaultConnector) refreshMetadata(topics []string) { if len(dc.bootstrapLinks) == 0 { for i := 0; i < len(dc.config.BrokerList); i++ { broker := dc.config.BrokerList[i] hostPort := strings.Split(broker, ":") if len(hostPort) != 2 { panic(fmt.Sprintf("incorrect broker connection string: %s", broker)) } port, err := strconv.Atoi(hostPort[1]) if err != nil { panic(fmt.Sprintf("incorrect port in broker connection string: %s", broker)) } dc.bootstrapLinks = append(dc.bootstrapLinks, NewBrokerLink(&Broker{ID: -1, Host: hostPort[0], Port: int32(port)}, dc.config.KeepAlive, dc.config.KeepAliveTimeout, dc.config.MaxConnectionsPerBroker)) } } response, err := dc.sendToAllLinks(dc.links, NewMetadataRequest(topics), dc.topicMetadataValidator(topics)) if err != nil { Warnf(dc, "Could not get topic metadata from all known brokers, trying bootstrap brokers...") if response, err = dc.sendToAllLinks(dc.bootstrapLinks, NewMetadataRequest(topics), dc.topicMetadataValidator(topics)); err != nil { Errorf(dc, "Could not get topic metadata from all known brokers") return } } dc.refreshLeaders(response.(*MetadataResponse)) } func (dc *DefaultConnector) refreshLeaders(response *MetadataResponse) { brokers := make(map[int32]*brokerLink) for _, broker := range response.Brokers { brokers[broker.ID] = NewBrokerLink(broker, dc.config.KeepAlive, dc.config.KeepAliveTimeout, dc.config.MaxConnectionsPerBroker) } if len(brokers) != 0 && len(response.TopicsMetadata) != 0 { dc.closeBrokerLinks() dc.links = make([]*brokerLink, 0) } for _, metadata := range response.TopicsMetadata { for _, partitionMetadata := range metadata.PartitionsMetadata { if leader, exists := brokers[partitionMetadata.Leader]; exists { dc.putLeader(metadata.Topic, partitionMetadata.PartitionID, leader) } else { Warnf(dc, "Topic Metadata response has no leader present for topic %s, parition %d", metadata.Topic, partitionMetadata.PartitionID) //TODO: warn about incomplete broker list } } } } func (dc *DefaultConnector) getMetadata(topics []string) (*MetadataResponse, error) { response, err := dc.sendToAllAndReturnFirstSuccessful(NewMetadataRequest(topics), dc.topicMetadataValidator(topics)) if response != nil { return response.(*MetadataResponse), err } return nil, err } func (dc *DefaultConnector) tryGetLeader(topic string, partition int32, retries int) (*brokerLink, error) { for i := 0; i <= retries; i++ { dc.refreshMetadata([]string{topic}) if link := dc.getLeader(topic, partition); link != nil { return link, nil } time.Sleep(dc.config.MetadataBackoff) } return nil, fmt.Errorf("Could not get leader for %s:%d after %d retries", topic, partition, retries) } func (dc *DefaultConnector) getLeader(topic string, partition int32) *brokerLink { leadersForTopic, exists := dc.leaders[topic] if !exists { return nil } return leadersForTopic[partition] } func (dc *DefaultConnector) putLeader(topic string, partition int32, leader *brokerLink) { Tracef(dc, "putLeader for topic %s, partition %d - %s", topic, partition, leader.broker) dc.lock.Lock() defer dc.lock.Unlock() if _, exists := dc.leaders[topic]; !exists { dc.leaders[topic] = make(map[int32]*brokerLink) } exists := false for _, link := range dc.links { if *link.broker == *leader.broker { exists = true break } } if !exists { dc.links = append(dc.links, leader) } dc.leaders[topic][partition] = leader } func (dc *DefaultConnector) removeLeader(topic string, partition int32) { dc.lock.Lock() defer dc.lock.Unlock() if leadersForTopic, exists := dc.leaders[topic]; exists { delete(leadersForTopic, partition) } } func (dc *DefaultConnector) refreshOffsetCoordinator(group string) error { for i := 0; i <= dc.config.ConsumerMetadataRetries; i++ { if err := dc.tryRefreshOffsetCoordinator(group); err == nil { return nil } Debugf(dc, "Failed to get consumer coordinator for group %s after %d try", group, i) time.Sleep(dc.config.ConsumerMetadataBackoff) } return fmt.Errorf("Could not get consumer coordinator for group %s after %d retries", group, dc.config.ConsumerMetadataRetries) } func (dc *DefaultConnector) tryRefreshOffsetCoordinator(group string) error { request := NewConsumerMetadataRequest(group) response, err := dc.sendToAllAndReturnFirstSuccessful(request, dc.consumerMetadataValidator) if err != nil { Infof(dc, "Could not get consumer metadata from all known brokers") return err } dc.offsetCoordinators[group] = response.(*ConsumerMetadataResponse).Coordinator.ID return nil } func (dc *DefaultConnector) getOffsetCoordinator(group string) (*brokerLink, error) { coordinatorID, exists := dc.offsetCoordinators[group] if !exists { err := dc.refreshOffsetCoordinator(group) if err != nil { return nil, err } coordinatorID = dc.offsetCoordinators[group] } Debugf(dc, "Offset coordinator for group %s: %d", group, coordinatorID) var brokerLink *brokerLink for _, link := range dc.links { if link.broker.ID == coordinatorID { brokerLink = link break } } if brokerLink == nil { return nil, fmt.Errorf("Could not find broker with node id %d", coordinatorID) } return brokerLink, nil } func (dc *DefaultConnector) tryCommitOffset(group string, topic string, partition int32, offset int64) error { coordinator, err := dc.getOffsetCoordinator(group) if err != nil { return err } request := NewOffsetCommitRequest(group) request.AddOffset(topic, partition, offset, time.Now().Unix(), "") bytes, err := dc.syncSendAndReceive(coordinator, request) if err != nil { return err } response := new(OffsetCommitResponse) decodingErr := dc.decode(bytes, response) if decodingErr != nil { Errorf(dc, "Could not decode an OffsetCommitResponse. Reason: %s", decodingErr.Reason()) return decodingErr.Error() } topicErrors, exist := response.CommitStatus[topic] if !exist { return fmt.Errorf("OffsetCommitResponse does not contain information about requested topic") } if partitionError, exist := topicErrors[partition]; !exist { return fmt.Errorf("OffsetCommitResponse does not contain information about requested partition") } else if partitionError != ErrNoError { return partitionError } return nil } func (dc *DefaultConnector) decode(bytes []byte, response Response) *DecodingError { decoder := NewBinaryDecoder(bytes) decodingErr := response.Read(decoder) if decodingErr != nil { Errorf(dc, "Could not decode a response. Reason: %s", decodingErr.Reason()) return decodingErr } return nil } func (dc *DefaultConnector) sendToAllAndReturnFirstSuccessful(request Request, check func([]byte) Response) (Response, error) { if len(dc.links) == 0 { dc.refreshMetadata(nil) } response, err := dc.sendToAllLinks(dc.links, request, check) if err != nil { response, err = dc.sendToAllLinks(dc.bootstrapLinks, request, check) } return response, err } func (dc *DefaultConnector) sendToAllLinks(links []*brokerLink, request Request, check func([]byte) Response) (Response, error) { if len(links) == 0 { return nil, errors.New("Empty broker list") } responses := make(chan *rawResponseAndError, len(links)) for i := 0; i < len(links); i++ { link := links[i] go func() { bytes, err := dc.syncSendAndReceive(link, request) responses <- &rawResponseAndError{bytes, link, err} }() } var response *rawResponseAndError for i := 0; i < len(links); i++ { response = <-responses if response.err == nil { if checkResult := check(response.bytes); checkResult != nil { return checkResult, nil } response.err = errors.New("Check result did not pass") } // Infof(dc, "Could not process request with broker %s:%d", response.link.broker.Host, response.link.broker.Port) } return nil, response.err } func (dc *DefaultConnector) syncSendAndReceive(link *brokerLink, request Request) ([]byte, error) { id, conn, err := link.GetConnection() if err != nil { link.Failed() return nil, err } if err := dc.send(id, conn, request); err != nil { link.Failed() return nil, err } bytes, err := dc.receive(conn) if err != nil { link.Failed() return nil, err } link.Succeeded() link.connectionPool.Return(conn) return bytes, err } func (dc *DefaultConnector) send(correlationID int32, conn *net.TCPConn, request Request) error { writer := NewRequestHeader(correlationID, dc.config.ClientID, request) bytes := make([]byte, writer.Size()) encoder := NewBinaryEncoder(bytes) writer.Write(encoder) conn.SetWriteDeadline(time.Now().Add(dc.config.WriteTimeout)) _, err := conn.Write(bytes) return err } func (dc *DefaultConnector) receive(conn *net.TCPConn) ([]byte, error) { conn.SetReadDeadline(time.Now().Add(dc.config.ReadTimeout)) header := make([]byte, 8) _, err := io.ReadFull(conn, header) if err != nil { return nil, err } decoder := NewBinaryDecoder(header) length, err := decoder.GetInt32() if err != nil { return nil, err } response := make([]byte, length-4) _, err = io.ReadFull(conn, response) if err != nil { return nil, err } return response, nil } func (dc *DefaultConnector) topicMetadataValidator(topics []string) func(bytes []byte) Response { return func(bytes []byte) Response { response := new(MetadataResponse) err := dc.decode(bytes, response) if err != nil { return nil } if len(topics) > 0 { for _, topic := range topics { var topicMetadata *TopicMetadata for _, topicMetadata = range response.TopicsMetadata { if topicMetadata.Topic == topic { break } } if topicMetadata.Error != ErrNoError { Infof(dc, "Topic metadata err: %s", topicMetadata.Error) return nil } for _, partitionMetadata := range topicMetadata.PartitionsMetadata { if partitionMetadata.Error != ErrNoError && partitionMetadata.Error != ErrReplicaNotAvailable { Infof(dc, "Partition metadata err: %s", partitionMetadata.Error) return nil } } } } return response } } func (dc *DefaultConnector) consumerMetadataValidator(bytes []byte) Response { response := new(ConsumerMetadataResponse) err := dc.decode(bytes, response) if err != nil || response.Error != ErrNoError { return nil } return response } func (dc *DefaultConnector) offsetValidator(bytes []byte) Response { response := new(OffsetResponse) err := dc.decode(bytes, response) if err != nil { return nil } for _, offsets := range response.PartitionErrorAndOffsets { for _, offset := range offsets { if offset.Error != ErrNoError { return nil } } } return response } type BrokerLink interface { Failed() Succeeded() GetConnection() (int32, *net.TCPConn, error) ReturnConnection(*net.TCPConn) } type brokerLink struct { sync.RWMutex broker *Broker connectionPool *connectionPool lastConnectTime time.Time lastSuccessfulConnectTime time.Time failedAttempts int correlationIds chan int32 stop chan bool } func NewBrokerLink(broker *Broker, keepAlive bool, keepAliveTimeout time.Duration, maxConnectionsPerBroker int) *brokerLink { brokerConnect := fmt.Sprintf("%s:%d", broker.Host, broker.Port) correlationIds := make(chan int32) stop := make(chan bool) go correlationIDGenerator(correlationIds, stop) return &brokerLink{ broker: broker, connectionPool: newConnectionPool(brokerConnect, maxConnectionsPerBroker, keepAlive, keepAliveTimeout), correlationIds: correlationIds, stop: stop, } } func (bl *brokerLink) Failed() { bl.Lock() defer bl.Unlock() bl.lastConnectTime = time.Now() bl.failedAttempts++ } func (bl *brokerLink) Succeeded() { bl.Lock() defer bl.Unlock() timestamp := time.Now() bl.lastConnectTime = timestamp bl.lastSuccessfulConnectTime = timestamp } func (bl *brokerLink) ReturnConnection(conn *net.TCPConn) { bl.connectionPool.Return(conn) } func (bl *brokerLink) GetConnection() (int32, *net.TCPConn, error) { correlationID := <-bl.correlationIds conn, err := bl.connectionPool.Borrow() return correlationID, conn, err } func correlationIDGenerator(out chan int32, stop chan bool) { var correlationID int32 for { select { case out <- correlationID: correlationID++ case <-stop: return } } } type rawResponseAndError struct { bytes []byte link BrokerLink err error }
Godeps/_workspace/src/github.com/elodina/siesta/connector.go
0.742048
0.416144
connector.go
starcoder
package pvss import ( "math/big" "github.com/jinzhu/copier" "github.com/torusresearch/torus-common/common" "github.com/torusresearch/torus-common/secp256k1" pcmn "github.com/torusresearch/torus-node/common" ) // Mobile Proactive Secret Sharing // Generate random polynomial // Note: this does not have a y-intercept of 0 func generateRandomPolynomial(threshold int) *pcmn.PrimaryPolynomial { coeff := make([]big.Int, threshold) for i := 0; i < threshold; i++ { //randomly choose coeffs coeff[i] = *RandomBigInt() } return &pcmn.PrimaryPolynomial{Coeff: coeff, Threshold: threshold} } func genPolyForTarget(x int, threshold int) *pcmn.PrimaryPolynomial { tempPoly := generateRandomPolynomial(threshold) var poly pcmn.PrimaryPolynomial _ = copier.Copy(&poly, &tempPoly) valAtX := polyEval(*tempPoly, x) temp := new(big.Int).Sub(&poly.Coeff[0], valAtX) poly.Coeff[0].Mod(temp, secp256k1.GeneratorOrder) return &poly } // LagrangePolys is used in PSS // When each share is subshared, each share is associated with a commitment polynomial // we then choose k such subsharings to form the refreshed shares and secrets // those refreshed shares are lagrange interpolated, but they also correspond to a langrage // interpolated polynomial commitment that is different from the original commitment // here, we calculate this interpolated polynomial commitment func LagrangePolys(indexes []int, polys [][]common.Point) (res []common.Point) { if len(polys) == 0 { return } if len(indexes) != len(polys) { return } for l := 0; l < len(polys[0]); l++ { sum := common.Point{X: *big.NewInt(int64(0)), Y: *big.NewInt(int64(0))} for j, index := range indexes { lambda := new(big.Int).SetInt64(int64(1)) upper := new(big.Int).SetInt64(int64(1)) lower := new(big.Int).SetInt64(int64(1)) for _, otherIndex := range indexes { if otherIndex != index { tempUpper := big.NewInt(int64(0)) tempUpper.Sub(tempUpper, big.NewInt(int64(otherIndex))) upper.Mul(upper, tempUpper) upper.Mod(upper, secp256k1.GeneratorOrder) tempLower := big.NewInt(int64(index)) tempLower.Sub(tempLower, big.NewInt(int64(otherIndex))) tempLower.Mod(tempLower, secp256k1.GeneratorOrder) lower.Mul(lower, tempLower) lower.Mod(lower, secp256k1.GeneratorOrder) } } // finite field division inv := new(big.Int) inv.ModInverse(lower, secp256k1.GeneratorOrder) lambda.Mul(upper, inv) lambda.Mod(lambda, secp256k1.GeneratorOrder) tempPt := common.BigIntToPoint(secp256k1.Curve.ScalarMult(&polys[j][l].X, &polys[j][l].Y, lambda.Bytes())) sum = common.BigIntToPoint(secp256k1.Curve.Add(&tempPt.X, &tempPt.Y, &sum.X, &sum.Y)) } res = append(res, sum) } return }
pvss/pss.go
0.627038
0.513729
pss.go
starcoder
package enigma // ReflectorModel specifies the type of reflector (different Enigma models supported different reflectors) type ReflectorModel string // all supported reflector models const ( UkwK ReflectorModel = "K" UkwA ReflectorModel = "A" UkwB ReflectorModel = "B" UkwC ReflectorModel = "C" UkwBThin ReflectorModel = "BThin" UkwCThin ReflectorModel = "CThin" UkwD ReflectorModel = "D" UkwT ReflectorModel = "T" ) // IsThin shows whether this reflector model is thin, or normal size, // only thin reflectors can fit to the small slot in 4-rotor Enigma model func (r ReflectorModel) IsThin() bool { return reflectorDefinitions[r].isThin } // IsMovable shows if this reflector model can rotate func (r ReflectorModel) IsMovable() bool { return reflectorDefinitions[r].isMovable } // IsRewirable shows if this reflector model can be custom-rewired func (r ReflectorModel) IsRewirable() bool { return reflectorDefinitions[r].isRewirable } func (r ReflectorModel) getWiring() string { return reflectorDefinitions[r].wiring } type reflectorDefinition struct { isRewirable bool isMovable bool isThin bool wiring string } var reflectorDefinitions = map[ReflectorModel]reflectorDefinition{ UkwK: { isRewirable: false, isMovable: true, isThin: false, wiring: "IMETCGFRAYSQBZXWLHKDVUPOJN", }, UkwA: { isRewirable: false, isMovable: false, isThin: false, wiring: "EJMZALYXVBWFCRQUONTSPIKHGD", }, UkwB: { isRewirable: false, isMovable: false, isThin: false, wiring: "YRUHQSLDPXNGOKMIEBFZCWVJAT", }, UkwC: { isRewirable: false, isMovable: false, isThin: false, wiring: "FVPJIAOYEDRZXWGCTKUQSBNMHL", }, UkwBThin: { isRewirable: false, isMovable: false, isThin: true, wiring: "ENKQAUYWJICOPBLMDXZVFTHRGS", }, UkwCThin: { isRewirable: false, isMovable: false, isThin: true, wiring: "RDOBJNTKVEHMLFCWZAXGYIPSUQ", }, UkwD: { isRewirable: true, isMovable: false, isThin: false, wiring: "FOWULAQYSRTEZVBXGJIKDNCPHM", // corresponds to the wiring "AV BO CT DM EZ FN GX HQ IS KR LU PW" }, UkwT: { isRewirable: false, isMovable: false, isThin: false, wiring: "GEKPBTAUMOCNILJDXZYFHWVQSR", }, }
reflector_definition.go
0.721645
0.446374
reflector_definition.go
starcoder
package day03 import ( "math" ) const ( BearEast = iota BearSouth BearWest BearNorth ) type Position struct { X, Y int } func (p *Position) Move(bearing, moves int) { switch bearing { case BearNorth: p.Y -= moves case BearEast: p.X += moves case BearSouth: p.Y += moves case BearWest: p.X -= moves } } func (p Position) Neighbours() []Position { neighbours := []int{-1, 0, 1} positions := make([]Position, 0, len(neighbours)^2-1) for _, xOffset := range neighbours { for _, yOffset := range neighbours { // ignore centre (self) if xOffset == 0 && yOffset == 0 { continue } positions = append(positions, Position{ X: p.X + xOffset, Y: p.Y + yOffset, }) } } return positions } func Part1(targetValue int) int { var ( side, sideLength int position Position ) cellValue := 1 for cellValue < targetValue { side++ // the length increments every other side if side%2 == 0 { sideLength++ } var moves int if cellValue+sideLength <= targetValue { // jump to next corner moves = sideLength } else { // jump to target value moves = targetValue - cellValue } bearing := side % 4 position.Move(bearing, moves) cellValue += moves } // calculate manhatten distance return int(math.Abs(float64(position.X)) + math.Abs(float64(position.Y))) } func Part2(targetAfterValue int) int { var ( side, sideLength int cellValue int position Position ) // pre-populate first cell because it has no neighbours yet cells := map[Position]int{ position: 1, } for cellValue <= targetAfterValue { side++ // the length increments every other side if side%2 == 0 { sideLength++ } bearing := side % 4 for moves := 0; moves < sideLength && cellValue <= targetAfterValue; moves++ { position.Move(bearing, 1) // sum neighbours cellValue = 0 for _, neighbour := range position.Neighbours() { if val, ok := cells[neighbour]; ok { cellValue += val } } cells[position] = cellValue } } return cellValue }
2017/day03/day03.go
0.67971
0.447038
day03.go
starcoder
package semver const ( LessThan = iota - 1 Equal GreaterThan ) func (a *Version) LessThan(b *Version) bool { return LessThan == compare(a, b) } func (a *Version) GreaterThan(b *Version) bool { return GreaterThan == compare(a, b) } func (a *Version) LessThanOrEqual(b *Version) bool { comparison := compare(a, b) return LessThan == comparison || Equal == comparison } func (a *Version) GreaterThanOrEqual(b *Version) bool { comparison := compare(a, b) return GreaterThan == comparison || Equal == comparison } func (a *Version) Equal(b *Version) bool { return Equal == compare(a, b) } func (a *Version) NotEqual(b *Version) bool { return Equal != compare(a, b) } func (a *Version) Compare(b *Version, operator string) bool { switch operator { case ">": return GreaterThan == compare(a, b) case ">=": comparison := compare(a, b) return GreaterThan == comparison || Equal == comparison case "<": return LessThan == compare(a, b) case "<=": comparison := compare(a, b) return LessThan == comparison || Equal == comparison case "==", "=": return Equal == compare(a, b) case "!=", "<>": return Equal != compare(a, b) } return false } func compare(a *Version, b *Version) int { if d := comparePart(a.major(), b.major()); d != Equal { return d } if d := comparePart(a.minor(), b.minor()); d != Equal { return d } if d := comparePart(a.patch(), b.patch()); d != Equal { return d } if d := compareStability(a.stability(), b.stability()); d != Equal { return d } aPre := a.pre() bPre := b.pre() if aPre > bPre { return GreaterThan } if aPre < bPre { return LessThan } return Equal } func compareStability(a string, b string) int { matches := map[string]int{"dev": 1, "alpha": 2, "beta": 3, "RC": 4, "stable": 5, "": 5} if matches[a] > matches[b] { return GreaterThan } if matches[a] < matches[b] { return LessThan } return Equal } func comparePart(a int, b int) int { if a > b { return GreaterThan } if b > a { return LessThan } return Equal }
compare.go
0.815637
0.432003
compare.go
starcoder
package types var SubtleCyphers []Cypher = []Cypher{ Cypher{Name: "Analeptic", Level: "1d6 + 2", Effect: ` Restores $(LEVEL) to the user’s Speed Pool.`}, Cypher{Name: "Best tool", Level: "1d6", Effect: ` Provides an additional asset for any one task using a tool, even if that means exceeding the normal limit of two assets.`}, Cypher{Name: "Burst of speed", Level: "1d6", Effect: ` For one minute, a user who normally can move a short distance as an action can move a long distance instead.`}, Cypher{Name: "Contingent activator", Level: "1d6 + 2", Effect: ` If the device is activated in conjunction with another cypher, the user can specify a condition under which the linked cypher will activate. The linked cypher retains the contingent command until it is used (either normally or contingently). For example, when this cypher is linked to a cypher that provides a form of healing or protection, the user could specify that the linked cypher will activate if they become damaged to a certain degree or are subject to a particular dangerous circumstance. Until the linked cypher is used, this cypher continues to count toward the maximum number of cyphers a PC can carry.`}, Cypher{Name: "Curative", Level: "1d6 + 2", Effect: ` Restores $(LEVEL) to the user’s Might Pool.`}, Cypher{Name: "Darksight", Level: "1d6", Effect: ` Grants the ability to see in the dark for $(LEVEL * 5) hour(s).`}, Cypher{Name: "Disarm", Level: "1d6 + 1", Effect: ` One NPC within immediate range whose level is lower than $(LEVEL) drops whatever they are holding.`}, Cypher{Name: "Eagleseye", Level: "1d6", Effect: ` Grants the ability to see ten times as far as normal for $(LEVEL) hour(s).`}, Cypher{Name: "Effect resistance", Level: "1d6 + 1", Effect: ` Provides a chance for additional resistance to directly damaging effects of all kinds, such as fire, lightning, and the like, for one day. (It does not provide resistance to blunt force, slashing, or piercing attacks.) If the level of the effect is $(LEVEL) or lower, the user gains an additional defense roll to avoid it. On a successful defense roll, treat the attack as if the user had succeeded on their regular defense roll. (If the user is an NPC, a PC attacking them with this kind of effect must succeed on two attack rolls to harm them.)`}, Cypher{Name: "Effort enhancer (combat)", Level: "1d6 + 1", Effect: ` For the next hour, the user can apply one free level of Effort to any task (including a combat task) without spending points from a Pool. The free level of Effort provided by this cypher does not count toward the maximum amount of Effort a character can normally apply to one task. Once this free level of Effort is used, the effect of the cypher ends.`}, Cypher{Name: "Effort enhancer (noncombat)", Level: "1d6", Effect: ` For the next hour, the user can apply one free level of Effort to a noncombat task without spending points from a Pool. The level of Effort provided by this cypher does not count toward the maximum amount of Effort a character can normally apply to one task. Once this free level of Effort is used, the effect of the cypher ends.`}, Cypher{Name: "Enduring shield", Level: "1d6 + 4", Effect: ` For the next day, the user has an asset to Speed defense rolls.`}, Cypher{Name: "Intellect booster", Level: "1d6 + 2", Effect: ` Adds 1 to the user’s Intellect Edge for one hour (or 2 if the cypher is level 5 or higher).`}, Cypher{Name: "Intelligence enhancement", Level: "1d6", Effect: ` All of the user’s tasks involving intelligent deduction—such as playing chess, inferring a connection between clues, solving a mathematical problem, finding a bug in computer code, and so on—are eased by two steps for one hour. In the subsequent hour, the strain hinders the same tasks by two steps.`}, Cypher{Name: "Knowledge enhancement", Level: "1d6", Effect: ` For the next day, the character has training in a predetermined skill (or two skills if the cypher is level 5 or higher). The skill could be anything (including something specific to the operation of a particular device), or roll a d100 to choose a common skill. 01–10 Melee attacks 11–20 Ranged attacks 21–40 One type of academic or esoteric lore (biology, history, magic, and so on) 41–50 Repairing (sometimes specific to one device) 51–60 Crafting (usually specific to one thing) 61–70 Persuasion 71–75 Healing 76–80 Speed defense 81–85 Intellect defense 86–90 Swimming 91–95 Riding 96–00 Sneaking`}, Cypher{Name: "Meditation aid", Level: "1d6 + 2", Effect: ` Restores $(LEVEL) to the user’s Intellect Pool.`}, Cypher{Name: "Mind stabilizer", Level: "1d6", Effect: ` The user gains +5 to Armor against Intellect damage.`}, Cypher{Name: "Motion sensor", Level: "1d6 + 2", Effect: ` For $(LEVEL) hour(s), the user knows when any movement occurs within short range, and when large creatures or objects move within long range (the cypher distinguishes between the two). It also indicates the number and size of the creatures or objects in motion.`}, Cypher{Name: "Nutrition and hydration", Level: "1d6 + 1", Effect: ` The user can go without food and water for $(LEVEL) days level without ill effect.`}, Cypher{Name: "Perfect memory", Level: "1d6", Effect: ` Allows the user to mentally record everything they see for $(LEVEL * 30) seconds and store the recording permanently in their long-term memory. This cypher is useful for watching someone pick a specific lock, enter a complex code, or do something else that happens quickly.`}, Cypher{Name: "Perfection", Level: "1d6 + 2", Effect: ` The user treats their next action as if they had rolled a natural 20.`}, Cypher{Name: "Reflex enhancer", Level: "1d6", Effect: ` All tasks involving manual dexterity—such as pickpocketing, lockpicking, juggling, operating on a patient, defusing a bomb, and so on—are eased by two steps for one hour.`}, Cypher{Name: "Rejuvenator", Level: "1d6 + 2", Effect: ` Restores $(LEVEL) points to one random stat Pool. 01–50 Might Pool 51–75 Speed Pool 76–00 Intellect Pool`}, Cypher{Name: "Remembering", Level: "1d6", Effect: ` Allows the user to recall any one experience they’ve ever had. The experience can be no longer than one minute per cypher level, but the recall is perfect, so (for example) if they saw someone dial a phone, they will remember the number.`}, Cypher{Name: "Repel", Level: "1d6 + 1", Effect: ` One NPC within immediate range who is of a level lower than $(LEVEL) decides to leave, using their next five rounds to move away quickly.`}, Cypher{Name: "Secret", Level: "1d6 + 2", Effect: ` The user can ask the GM one question and get a general answer. The GM assigns a level to the question, so the more obscure the answer, the more difficult the task. Generally, knowledge that a PC could find by looking somewhere other than their current location is level 1, and obscure knowledge of the past is level 7. Gaining knowledge of the future is level 10, and such knowledge is always open to interpretation. The cypher cannot provide an answer to a question above level $(LEVEL).`}, Cypher{Name: "Skill boost", Level: "1d6", Effect: ` Dramatically but temporarily alters the user’s mind and body so they can ease one specific kind of physical action by three steps. Once activated, this boost can be used $(LEVEL) times , but only within a twenty-four-hour period. The boost takes effect each time the action is performed. For example, a level 3 cypher boosts the first three times that action is attempted. Roll a d100 to determine the action. 01–15 Melee attack 16–30 Ranged attack 31–40 Speed defense 41–50 Might defense 51–60 Intellect defense 61–68 Jumping 69–76 Climbing 77–84 Running 85–92 Swimming 93–94 Sneaking 95–96 Balancing 97–98 Perceiving 99 Carrying 00 Escaping`}, Cypher{Name: "Speed boost", Level: "1d6 + 2", Effect: ` Adds 1 to the user’s Speed Edge for one hour (adds 2 if the cypher is level 5 or higher).`}, Cypher{Name: "Stim", Level: "1d6", Effect: ` Eases the user’s next action taken by three steps.`}, Cypher{Name: "Strength boost", Level: "1d6 + 2", Effect: ` Adds 1 to Might Edge for one hour (or 2 if the cypher is level 5 or higher).`}, Cypher{Name: "Strength enhancer", Level: "1d6", Effect: ` All noncombat tasks involving raw strength—such as breaking down a door, lifting a heavy boulder, forcing open elevator doors, competing in a weightlifting competition, and so on—are eased by two steps for one hour.`}, Cypher{Name: "<NAME>", Level: "1d6 + 4", Effect: ` For the next hour, the user regains 1 point lost to damage per round, up to a total number of points equal to $(LEVEL * 2). As each point is regained, they choose which Pool to add it to. If all their Pools are at maximum, the regeneration pauses until they take more damage, at which point it begins again (if any time remains in the hour) until the duration expires.`}, }
types/SubtleCyphers.go
0.782621
0.560974
SubtleCyphers.go
starcoder
package main import ( "fmt" "math/rand" ) // Start by building a prototype that generates 10 random tickets and displays // them in a tabular format with a nice header...The table should have four columns: // * The spaceline company providing the service // * The duration in days for the trip to Mars (one-way) // * Whether the price covers a return trip // * The price in millions of dollars // For each ticket, randomly select one of the following spacelines: Space Adventures, // SpaceX, or Virgin Galactic. // Use October 13, 2020 as the departure date for all tickets. Mars will be 62,100,000 km // away from Earth at the time. // Randomly choose the speed the ship will travel, from 16 to 30 km/s. This will determine // the duration for the trip to Mars and also the ticket price. Make faster ships more // expensive, ranging in price from $36 million to $50 million. Double the price for round trips. func main() { // Distance to Mars in km on Oct 13, 2020 const distance = 62100000 // Begin the report with header fmt.Println("Spaceline Days Trip type Price") fmt.Println("=========================================") // Spaceline, if trip is round-trip or one-way var company, tripType string // Determine price (millions USD), speed of the ship (km/s), and duration of trip (days) var price, speed, duration int for i := 0; i < 10; i++ { switch num := rand.Intn(3); num { case 0: company = "Space Adventures" case 1: company = "SpaceX " case 2: company = "Virgin Galactic " } // Get spaceship speed which will vary from 16 - 30 km/s speed = rand.Intn(14) + 16 // km/s // Min - max price = $14M price = speed - 16 + 36 // Get one way duration in days duration = distance / (speed * 3600 * 24) if num := rand.Intn(2); num == 0 { tripType = "One-way " } else { tripType = "Round-trip" price *= 2 } fmt.Printf("%s %v %s %v\n", company, duration, tripType, price) } fmt.Println("=========================================") }
lesson05/ticket-to-mars.go
0.564819
0.487978
ticket-to-mars.go
starcoder
package arrays // StringContains function checks if a string element is present in a slice. // false is returned if the given slice is nil. func StringContains(slice []string, element string) bool { return StringIndexOf(slice, element) != -1 } // RuneContains function checks if a rune element is present in a slice. // false is returned if the given slice is nil. func RuneContains(slice []rune, element rune) bool { return RuneIndexOf(slice, element) != -1 } // ByteContains function checks if a byte element is present in a slice. // false is returned if the given slice is nil. func ByteContains(slice []byte, element byte) bool { return ByteIndexOf(slice, element) != -1 } // UintContains function checks if a uint element is present in a slice. // false is returned if the given slice is nil. func UintContains(slice []uint, element uint) bool { return UintIndexOf(slice, element) != -1 } // IntContains function checks if a int element is present in a slice. // false is returned if the given slice is nil. func IntContains(slice []int, element int) bool { return IntIndexOf(slice, element) != -1 } // Int8Contains function checks if a int8 element is present in a slice. // false is returned if the given slice is nil. func Int8Contains(slice []int8, element int8) bool { return Int8IndexOf(slice, element) != -1 } // Uint8Contains function checks if a uint8 element is present in a slice. // false is returned if the given slice is nil. func Uint8Contains(slice []uint8, element uint8) bool { return Uint8IndexOf(slice, element) != -1 } // Int16Contains function checks if a int16 element is present in a slice. // false is returned if the given slice is nil. func Int16Contains(slice []int16, element int16) bool { return Int16IndexOf(slice, element) != -1 } // Uint16Contains function checks if a uint16 element is present in a slice. // false is returned if the given slice is nil. func Uint16Contains(slice []uint16, element uint16) bool { return Uint16IndexOf(slice, element) != -1 } // Int32Contains function checks if a int32 element is present in a slice. // false is returned if the given slice is nil. func Int32Contains(slice []int32, element int32) bool { return Int32IndexOf(slice, element) != -1 } // Uint32Contains function checks if a uint32 element is present in a slice. // false is returned if the given slice is nil. func Uint32Contains(slice []uint32, element uint32) bool { return Uint32IndexOf(slice, element) != -1 } // Int64Contains function checks if a int64 element is present in a slice. // false is returned if the given slice is nil. func Int64Contains(slice []int64, element int64) bool { return Int64IndexOf(slice, element) != -1 } // Uint64Contains function checks if a uint64 element is present in a slice. // false is returned if the given slice is nil. func Uint64Contains(slice []uint64, element uint64) bool { return Uint64IndexOf(slice, element) != -1 }
arrays/contains.go
0.891037
0.516595
contains.go
starcoder