code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package battleship
import "io"
// NewGame returns a new zero game object, filled in with default values.
func NewGame() *G {
/* Fill in this Function */
return &G{}
}
// G represents the game, and the state of the game. This is the main structure the run loop should interact with.
type G struct{}
// PlaceShip allows the runner of the game to add ships to the game. The game must verify that it is not allowing ships to be placed that overlap.
func (g *G) PlaceShip(ship ShipType, position string, vertical bool) error {
/* Fill in this Function */
return nil
}
// Guess, allows the player to enter in the guesses for the round. The guess function must return an error of the incorrect number of guesses have been entered. It should, also, return which guesses hit something, and if any ships were sunk what the positions of those ships were.
func (g *G) Guess(positions []string) (hits []string, sank map[ShipType][]string, err error) {
/* Fill in this Function */
return
}
// ShipsLeft returns which ships have not been sunk yet.
func (g *G) ShipsLeft() []ShipType {
/* Fill in this Function */
return []ShipType{}
}
// Score returns the current score for the game
func (g *G) Score() int {
/* Fill in this Function */
return 0
}
// PrintBoard prints the representation of the baord to the given io.Writer.
func (g *G) PrintBoard(w io.Writer) error {
/* Fill in this Function */
return nil
}
// Round return the current round the game is in.
func (g *G) Round() int {
/* Fill in this Function */
return 1
}
// TotalRounds returns the total number of rounds this game is allowed to go to.
func (g *G) TotalRounds() int {
/* Fill in this Function */
return 0
}
// Ended returns weather or not the game is still playable. Use the DidWin function to figure out if the User won or the Computer
func (g *G) Ended() bool {
/* Fill in this Function */
return false
}
// DidWin returns weather or not the user has won the game.
func (g *G) DidWin() bool {
/* Fill in this Function */
return false
}
// ShowBoard tells the game object that we want to show the board with all the ships revealed.
func (g *G) ShowBoard() {
/* Fill in this Function */
return
} | battleship/game.go | 0.74872 | 0.479991 | game.go | starcoder |
package gompatible
import (
"go/types"
)
// FuncChange represents a change between functions.
type FuncChange struct {
Before *Func
After *Func
}
func (fc FuncChange) TypesObject() types.Object {
return fc.Before.Types
}
func (fc FuncChange) ShowBefore() string {
f := fc.Before
if f == nil || f.Doc == nil {
return ""
}
return f.Package.showASTNode(f.Doc.Decl)
}
func (fc FuncChange) ShowAfter() string {
f := fc.After
if f == nil || f.Doc == nil {
return ""
}
return f.Package.showASTNode(f.Doc.Decl)
}
func (fc FuncChange) Kind() ChangeKind {
switch {
case fc.Before == nil && fc.After == nil:
// might not happen
return ChangeUnchanged
case fc.Before == nil:
return ChangeAdded
case fc.After == nil:
return ChangeRemoved
// We do not use types.Identical as we want to identify functions by their signature; not by the details of
// parameters or return types, not:
// case types.Identical(fc.Before.Types.Type().Underlying(), fc.After.Types.Type().Underlying()):
case identicalSansNames(fc.Before.Types, fc.After.Types):
return ChangeUnchanged
case fc.isCompatible():
return ChangeCompatible
default:
return ChangeBreaking
}
}
// identicalSansNames compares two functions to check if their types are identical
// according to the names. e.g.
// - It does not care if the names of the parameters or return values differ
// - It does not care if the implementations of the types differ
func identicalSansNames(fa, fb *types.Func) bool {
// must always succeed
sigA := fa.Type().(*types.Signature)
sigB := fb.Type().(*types.Signature)
var (
lenParams = sigA.Params().Len()
lenResults = sigA.Results().Len()
)
if sigB.Params().Len() != lenParams {
return false
}
if sigB.Results().Len() != lenResults {
return false
}
for i := 0; i < lenParams; i++ {
if types.TypeString(sigA.Params().At(i).Type(), nil) != types.TypeString(sigB.Params().At(i).Type(), nil) {
return false
}
}
for i := 0; i < lenResults; i++ {
if types.TypeString(sigA.Results().At(i).Type(), nil) != types.TypeString(sigB.Results().At(i).Type(), nil) {
return false
}
}
return true
}
// sigParamsCompatible determines if the parameter parts of two signatures of functions are compatible.
// They are compatible if:
// - The number of parameters equal and the types of parameters are compatible for each of them.
// - The latter parameters have exactly one extra parameter which is a variadic parameter.
func sigParamsCompatible(s1, s2 *types.Signature) bool {
extra := tuplesCompatibleExtra(s1.Params(), s2.Params(), cmpLower)
switch {
case extra == nil:
// s2 params is incompatible with s1 params
return false
case len(extra) == 0:
// s2 params is compatible with s1 params
return true
case len(extra) == 1:
// s2 params is compatible with s1 params with an extra variadic arg
if s1.Variadic() == false && s2.Variadic() == true {
return true
}
}
return false
}
func sigResultsCompatible(s1, s2 *types.Signature) bool {
if s1.Results().Len() == 0 {
return true
}
extra := tuplesCompatibleExtra(s1.Results(), s2.Results(), cmpUpper)
switch {
case extra == nil:
return false
case len(extra) == 0:
return true
}
return false
}
func tuplesCompatibleExtra(p1, p2 *types.Tuple, typeDirection cmp) []*types.Var {
len1 := p1.Len()
len2 := p2.Len()
if len1 > len2 {
return nil
}
vars := make([]*types.Var, len2-len1)
for i := 0; i < len2; i++ {
if i >= len1 {
v2 := p2.At(i)
vars[i-len1] = v2
continue
}
v1 := p1.At(i)
v2 := p2.At(i)
c := cmpTypes(v1.Type(), v2.Type())
if c == cmpEqual || c == typeDirection {
continue
}
return nil
}
return vars
}
func (fc FuncChange) isCompatible() bool {
if fc.Before == nil || fc.After == nil {
return false
}
typeBefore, typeAfter := fc.Before.Types.Type(), fc.After.Types.Type()
if typeBefore == nil || typeAfter == nil {
return false
}
sigBefore, sigAfter := typeBefore.(*types.Signature), typeAfter.(*types.Signature)
if sigParamsCompatible(sigBefore, sigAfter) == false {
return false
}
if sigResultsCompatible(sigBefore, sigAfter) == false {
return false
}
return true
} | func.go | 0.743634 | 0.464719 | func.go | starcoder |
package mysql_db
import (
flatbuffers "github.com/google/flatbuffers/go"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/mysql_db/serial"
)
// serializePrivilegeTypes writes the given PrivilegeTypes into the flatbuffer Builder using the given flatbuffer start function, and returns the offset
// This helper function is used by PrivilegeSetColumn, PrivilegeSetTable, and PrivilegeSetDatabase
func serializePrivilegeTypes(b *flatbuffers.Builder, StartPTVector func(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT, pts []sql.PrivilegeType) flatbuffers.UOffsetT {
// Order doesn't matter since it's a set of indexes
StartPTVector(b, len(pts))
for _, gs := range pts {
b.PrependInt32(int32(gs))
}
return b.EndVector(len(pts))
}
// TODO: should have a generic serialize strings helper method if used in future
// serializeVectorOffsets writes the given offsets slice to the flatbuffer Builder using the given start vector function, and returns the offset
func serializeVectorOffsets(b *flatbuffers.Builder, StartVector func(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT, offsets []flatbuffers.UOffsetT) flatbuffers.UOffsetT {
// Expect the given offsets slice to already be in reverse order
StartVector(b, len(offsets))
for _, offset := range offsets {
b.PrependUOffsetT(offset)
}
return b.EndVector(len(offsets))
}
// serializeStrings writes the set of strings to flatbuffer builder, and returns offset of resulting vector
func serializeGlobalDynamic(b *flatbuffers.Builder, strs map[string]struct{}) flatbuffers.UOffsetT {
// Write strings, and save offsets
i := 0
offsets := make([]flatbuffers.UOffsetT, len(strs))
for str := range strs {
offsets[i] = b.CreateString(str)
i++
}
// Write string offsets (already reversed)
return serializeVectorOffsets(b, serial.PrivilegeSetStartGlobalDynamicVector, offsets)
}
func serializeColumns(b *flatbuffers.Builder, columns []PrivilegeSetColumn) flatbuffers.UOffsetT {
// Write column variables, and save offsets
offsets := make([]flatbuffers.UOffsetT, len(columns))
for i, column := range columns {
name := b.CreateString(column.Name())
privs := serializePrivilegeTypes(b, serial.PrivilegeSetColumnStartPrivsVector, column.ToSlice())
serial.PrivilegeSetColumnStart(b)
serial.PrivilegeSetColumnAddName(b, name)
serial.PrivilegeSetColumnAddPrivs(b, privs)
offsets[len(offsets)-i-1] = serial.PrivilegeSetColumnEnd(b) // reverse order
}
// Write column offsets (already reversed)
return serializeVectorOffsets(b, serial.PrivilegeSetTableStartColumnsVector, offsets)
}
func serializeTables(b *flatbuffers.Builder, tables []PrivilegeSetTable) flatbuffers.UOffsetT {
// Write table variables, and save offsets
offsets := make([]flatbuffers.UOffsetT, len(tables))
for i, table := range tables {
name := b.CreateString(table.Name())
privs := serializePrivilegeTypes(b, serial.PrivilegeSetTableStartPrivsVector, table.ToSlice())
cols := serializeColumns(b, table.GetColumns())
serial.PrivilegeSetTableStart(b)
serial.PrivilegeSetTableAddName(b, name)
serial.PrivilegeSetTableAddPrivs(b, privs)
serial.PrivilegeSetTableAddColumns(b, cols)
offsets[len(offsets)-i-1] = serial.PrivilegeSetTableEnd(b) // reverse order
}
// Write table offsets (order already reversed)
return serializeVectorOffsets(b, serial.PrivilegeSetDatabaseStartTablesVector, offsets)
}
// serializeDatabases writes the given Privilege Set Databases into the flatbuffer Builder, and returns the offset
func serializeDatabases(b *flatbuffers.Builder, databases []PrivilegeSetDatabase) flatbuffers.UOffsetT {
// Write database variables, and save offsets
offsets := make([]flatbuffers.UOffsetT, len(databases))
for i, database := range databases {
name := b.CreateString(database.Name())
privs := serializePrivilegeTypes(b, serial.PrivilegeSetDatabaseStartPrivsVector, database.ToSlice())
tables := serializeTables(b, database.GetTables())
serial.PrivilegeSetDatabaseStart(b)
serial.PrivilegeSetDatabaseAddName(b, name)
serial.PrivilegeSetDatabaseAddPrivs(b, privs)
serial.PrivilegeSetDatabaseAddTables(b, tables)
offsets[len(offsets)-i-1] = serial.PrivilegeSetDatabaseEnd(b)
}
// Write database offsets (order already reversed)
return serializeVectorOffsets(b, serial.PrivilegeSetStartDatabasesVector, offsets)
}
func serializePrivilegeSet(b *flatbuffers.Builder, ps *PrivilegeSet) flatbuffers.UOffsetT {
// Write privilege set variables, and save offsets
globalStatic := serializePrivilegeTypes(b, serial.PrivilegeSetStartGlobalStaticVector, ps.ToSlice())
globalDynamic := serializeGlobalDynamic(b, ps.globalDynamic)
databases := serializeDatabases(b, ps.GetDatabases())
// Write PrivilegeSet
serial.PrivilegeSetStart(b)
serial.PrivilegeSetAddGlobalStatic(b, globalStatic)
serial.PrivilegeSetAddGlobalDynamic(b, globalDynamic)
serial.PrivilegeSetAddDatabases(b, databases)
return serial.PrivilegeSetEnd(b)
}
// serializeAttributes will deference and write the given string pointer to the flatbuffer builder and will return the
// offset. Will return 0 for offset if string pointer is null; this causes the accessor to also return nil when loading
func serializeAttributes(b *flatbuffers.Builder, attributes *string) flatbuffers.UOffsetT {
if attributes == nil {
return 0
} else {
return b.CreateString(*attributes)
}
}
func serializeUser(b *flatbuffers.Builder, users []*User) flatbuffers.UOffsetT {
// Write user variables, and save offsets
offsets := make([]flatbuffers.UOffsetT, len(users))
for i, user := range users {
userName := b.CreateString(user.User)
host := b.CreateString(user.Host)
privilegeSet := serializePrivilegeSet(b, &user.PrivilegeSet)
plugin := b.CreateString(user.Plugin)
password := b.CreateString(user.Password)
attributes := serializeAttributes(b, user.Attributes)
serial.UserStart(b)
serial.UserAddUser(b, userName)
serial.UserAddHost(b, host)
serial.UserAddPrivilegeSet(b, privilegeSet)
serial.UserAddPlugin(b, plugin)
serial.UserAddPassword(b, password)
serial.UserAddPasswordLastChanged(b, user.PasswordLastChanged.Unix())
serial.UserAddLocked(b, user.Locked)
serial.UserAddAttributes(b, attributes)
offsets[len(users)-i-1] = serial.UserEnd(b) // reverse order
}
// Write user offsets (already in reverse order)
return serializeVectorOffsets(b, serial.MySQLDbStartUserVector, offsets)
}
func serializeRoleEdge(b *flatbuffers.Builder, roleEdges []*RoleEdge) flatbuffers.UOffsetT {
offsets := make([]flatbuffers.UOffsetT, len(roleEdges))
for i, roleEdge := range roleEdges {
// Serialize each of the member vars in RoleEdge and save their offsets
fromHost := b.CreateString(roleEdge.FromHost)
fromUser := b.CreateString(roleEdge.FromUser)
toHost := b.CreateString(roleEdge.ToHost)
toUser := b.CreateString(roleEdge.ToUser)
// Start RoleEdge
serial.RoleEdgeStart(b)
// Write their offsets to flatbuffer builder
serial.RoleEdgeAddFromHost(b, fromHost)
serial.RoleEdgeAddFromUser(b, fromUser)
serial.RoleEdgeAddToHost(b, toHost)
serial.RoleEdgeAddToUser(b, toUser)
// Write WithAdminOption (boolean value doesn't need offset)
serial.RoleEdgeAddWithAdminOption(b, roleEdge.WithAdminOption)
// End RoleEdge
offset := serial.RoleEdgeEnd(b)
offsets[len(roleEdges)-i-1] = offset // reverse order
}
// Write role_edges vector (already in reversed order)
return serializeVectorOffsets(b, serial.MySQLDbStartRoleEdgesVector, offsets)
} | sql/mysql_db/mysql_db_serialize.go | 0.553505 | 0.487429 | mysql_db_serialize.go | starcoder |
package main
/**
题目介绍:https://www.nowcoder.com/practice/e3769a5f49894d49b871c09cadd13a61?tpId=117&tqId=1006010&tab=answerKey
* 题目描述
设计LRU缓存结构,该结构在构造时确定大小,假设大小为K,并有如下两个功能
set(key, value):将记录(key, value)插入该结构
get(key):返回key对应的value值
[要求]
set和get方法的时间复杂度为O(1)
某个key的set或get操作一旦发生,认为这个key的记录成了最常使用的。
当缓存的大小超过K时,移除最不经常使用的记录,即set或get最久远的。
若opt=1,接下来两个整数x, y,表示set(x, y)
若opt=2,接下来一个整数x,表示get(x),若x未出现过或已被移除,则返回-1
对于每个操作2,输出一个答案
* @Description:
* @Params:
* @date: 2021/2/9
*/
func main() {
}
/**
* lru design
* @param operators int整型二维数组 the ops
* @param k int整型 the k
* @return int整型一维数组
*/
func LRU( operators [][]int , k int ) []int {
// write code here
return nil
}
/**
https://leetcode-cn.com/problems/lru-cache-lcci/solution/goshuang-xiang-lian-biao-map-shi-xian-lru-by-pengt/
*/
type LinkNode struct{
key, value int
pre, next *LinkNode
}
type LRUCache struct {
m map[int]*LinkNode
capacity int
head, tail *LinkNode
}
func Constructor(capacity int) LRUCache {
head := &LinkNode{-1, -1, nil, nil}
tail := &LinkNode{-1, -1, nil, nil}
head.next = tail
tail.pre = head
cache := LRUCache{make(map[int]*LinkNode), capacity, head, tail}
return cache
}
func (this *LRUCache) AddNode(node *LinkNode) {
node.pre = this.head
node.next = this.head.next
this.head.next = node
node.next.pre = node
}
func (this *LRUCache) RemoveNode(node *LinkNode) {
node.pre.next = node.next
node.next.pre = node.pre
}
func (this *LRUCache) MoveToHead(node *LinkNode) {
this.RemoveNode(node)
this.AddNode(node)
}
func (this *LRUCache) Get(key int) int {
m := this.m
if node, ok := m[key]; ok {
this.MoveToHead(node)
return node.value
} else {
return -1
}
}
func (this *LRUCache) Put(key int, value int) {
m := this.m
if node, ok := m[key]; ok {
node.value = value
this.MoveToHead(node)
} else {
n := &LinkNode{key, value, nil, nil}
if len(m) >= this.capacity {
delete(m, this.tail.pre.key)
this.RemoveNode(this.tail.pre)
}
m[key] = n
this.AddNode(n)
}
}
/**
* Your LRUCache object will be instantiated and called as such:
* obj := Constructor(capacity);
* param_1 := obj.Get(key);
* obj.Put(key,value);
*/
//作者:pengtuo
//链接:https://leetcode-cn.com/problems/lru-cache-lcci/solution/goshuang-xiang-lian-biao-map-shi-xian-lru-by-pengt/
//来源:力扣(LeetCode)
//著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。 | cache/lru.go | 0.61659 | 0.474083 | lru.go | starcoder |
package kubernetes
import (
"encoding/json"
"fmt"
"testing"
"github.com/ingrammicro/cio/api/types"
"github.com/ingrammicro/cio/utils"
"github.com/stretchr/testify/assert"
)
// ListClustersMocked test mocked function
func ListClustersMocked(t *testing.T, clustersIn []*types.Cluster) []*types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clustersIn)
assert.Nil(err, "Clusters test data corrupted")
// call service
cs.On("Get", APIPathKubernetesClusters).Return(dIn, 200, nil)
clustersOut, err := ds.ListClusters()
assert.Nil(err, "Error getting clusters")
assert.Equal(clustersIn, clustersOut, "ListClusters returned different clusters")
return clustersOut
}
// ListClustersFailErrMocked test mocked function
func ListClustersFailErrMocked(t *testing.T, clustersIn []*types.Cluster) []*types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clustersIn)
assert.Nil(err, "Clusters test data corrupted")
// call service
cs.On("Get", APIPathKubernetesClusters).Return(dIn, 200, fmt.Errorf("mocked error"))
clustersOut, err := ds.ListClusters()
assert.NotNil(err, "We are expecting an error")
assert.Nil(clustersOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return clustersOut
}
// ListClustersFailStatusMocked test mocked function
func ListClustersFailStatusMocked(t *testing.T, clustersIn []*types.Cluster) []*types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clustersIn)
assert.Nil(err, "Clusters test data corrupted")
// call service
cs.On("Get", APIPathKubernetesClusters).Return(dIn, 499, nil)
clustersOut, err := ds.ListClusters()
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(clustersOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return clustersOut
}
// ListClustersFailJSONMocked test mocked function
func ListClustersFailJSONMocked(t *testing.T, clustersIn []*types.Cluster) []*types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Get", APIPathKubernetesClusters).Return(dIn, 200, nil)
clustersOut, err := ds.ListClusters()
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(clustersOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return clustersOut
}
// GetClusterMocked test mocked function
func GetClusterMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID)).Return(dIn, 200, nil)
clusterOut, err := ds.GetCluster(clusterIn.ID)
assert.Nil(err, "Error getting cluster")
assert.Equal(*clusterIn, *clusterOut, "GetCluster returned different cluster")
return clusterOut
}
// GetClusterFailErrMocked test mocked function
func GetClusterFailErrMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID)).Return(dIn, 200, fmt.Errorf("mocked error"))
clusterOut, err := ds.GetCluster(clusterIn.ID)
assert.NotNil(err, "We are expecting an error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return clusterOut
}
// GetClusterFailStatusMocked test mocked function
func GetClusterFailStatusMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID)).Return(dIn, 499, nil)
clusterOut, err := ds.GetCluster(clusterIn.ID)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return clusterOut
}
// GetClusterFailJSONMocked test mocked function
func GetClusterFailJSONMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Get", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID)).Return(dIn, 200, nil)
clusterOut, err := ds.GetCluster(clusterIn.ID)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return clusterOut
}
// CreateClusterMocked test mocked function
func CreateClusterMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Post", APIPathKubernetesClusters, mapIn).Return(dOut, 200, nil)
clusterOut, err := ds.CreateCluster(mapIn)
assert.Nil(err, "Error creating cluster")
assert.Equal(clusterIn, clusterOut, "CreateCluster returned different cluster")
return clusterOut
}
// CreateClusterFailErrMocked test mocked function
func CreateClusterFailErrMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Post", APIPathKubernetesClusters, mapIn).Return(dOut, 200, fmt.Errorf("mocked error"))
clusterOut, err := ds.CreateCluster(mapIn)
assert.NotNil(err, "We are expecting an error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return clusterOut
}
// CreateClusterFailStatusMocked test mocked function
func CreateClusterFailStatusMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Post", APIPathKubernetesClusters, mapIn).Return(dOut, 499, nil)
clusterOut, err := ds.CreateCluster(mapIn)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return clusterOut
}
// CreateClusterFailJSONMocked test mocked function
func CreateClusterFailJSONMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Post", APIPathKubernetesClusters, mapIn).Return(dIn, 200, nil)
clusterOut, err := ds.CreateCluster(mapIn)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return clusterOut
}
// UpdateClusterMocked test mocked function
func UpdateClusterMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID), mapIn).Return(dOut, 200, nil)
clusterOut, err := ds.UpdateCluster(clusterIn.ID, mapIn)
assert.Nil(err, "Error updating cluster")
assert.Equal(clusterIn, clusterOut, "UpdateCluster returned different cluster")
return clusterOut
}
// UpdateClusterFailErrMocked test mocked function
func UpdateClusterFailErrMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID), mapIn).
Return(dOut, 200, fmt.Errorf("mocked error"))
clusterOut, err := ds.UpdateCluster(clusterIn.ID, mapIn)
assert.NotNil(err, "We are expecting an error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return clusterOut
}
// UpdateClusterFailStatusMocked test mocked function
func UpdateClusterFailStatusMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID), mapIn).Return(dOut, 499, nil)
clusterOut, err := ds.UpdateCluster(clusterIn.ID, mapIn)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return clusterOut
}
// UpdateClusterFailJSONMocked test mocked function
func UpdateClusterFailJSONMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Put", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID), mapIn).Return(dIn, 200, nil)
clusterOut, err := ds.UpdateCluster(clusterIn.ID, mapIn)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return clusterOut
}
// DeleteClusterMocked test mocked function
func DeleteClusterMocked(t *testing.T, clusterIn *types.Cluster) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID)).Return(dIn, 200, nil)
clusterOut, err := ds.DeleteCluster(clusterIn.ID)
assert.Nil(err, "Error deleting cluster")
assert.Equal(clusterIn, clusterOut, "DeleteCluster returned different cluster")
}
// DeleteClusterFailErrMocked test mocked function
func DeleteClusterFailErrMocked(t *testing.T, clusterIn *types.Cluster) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID)).Return(dIn, 200, fmt.Errorf("mocked error"))
clusterOut, err := ds.DeleteCluster(clusterIn.ID)
assert.NotNil(err, "We are expecting an error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
}
// DeleteClusterFailStatusMocked test mocked function
func DeleteClusterFailStatusMocked(t *testing.T, clusterIn *types.Cluster) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID)).Return(dIn, 499, nil)
clusterOut, err := ds.DeleteCluster(clusterIn.ID)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
}
// DeleteClusterFailJSONMocked test mocked function
func DeleteClusterFailJSONMocked(t *testing.T, clusterIn *types.Cluster) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Delete", fmt.Sprintf(APIPathKubernetesCluster, clusterIn.ID)).Return(dIn, 200, nil)
clusterOut, err := ds.DeleteCluster(clusterIn.ID)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
}
// RetryClusterMocked test mocked function
func RetryClusterMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathKubernetesClusterRetry, clusterIn.ID), mapIn).Return(dOut, 200, nil)
clusterOut, err := ds.RetryCluster(clusterIn.ID, mapIn)
assert.Nil(err, "Error retrying cluster")
assert.Equal(clusterIn, clusterOut, "RetryCluster returned different cluster")
return clusterOut
}
// RetryClusterFailErrMocked test mocked function
func RetryClusterFailErrMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathKubernetesClusterRetry, clusterIn.ID), mapIn).
Return(dOut, 200, fmt.Errorf("mocked error"))
clusterOut, err := ds.RetryCluster(clusterIn.ID, mapIn)
assert.NotNil(err, "We are expecting an error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return clusterOut
}
// RetryClusterFailStatusMocked test mocked function
func RetryClusterFailStatusMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// to json
dOut, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Put", fmt.Sprintf(APIPathKubernetesClusterRetry, clusterIn.ID), mapIn).Return(dOut, 499, nil)
clusterOut, err := ds.RetryCluster(clusterIn.ID, mapIn)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return clusterOut
}
// RetryClusterFailJSONMocked test mocked function
func RetryClusterFailJSONMocked(t *testing.T, clusterIn *types.Cluster) *types.Cluster {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Put", fmt.Sprintf(APIPathKubernetesClusterRetry, clusterIn.ID), mapIn).Return(dIn, 200, nil)
clusterOut, err := ds.RetryCluster(clusterIn.ID, mapIn)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(clusterOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return clusterOut
}
// DiscardClusterMocked test mocked function
func DiscardClusterMocked(t *testing.T, clusterIn *types.Cluster) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathKubernetesClusterDiscard, clusterIn.ID)).Return(dIn, 200, nil)
err = ds.DiscardCluster(clusterIn.ID)
assert.Nil(err, "Error discarding cluster")
}
// DiscardClusterFailErrMocked test mocked function
func DiscardClusterFailErrMocked(t *testing.T, clusterIn *types.Cluster) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathKubernetesClusterDiscard, clusterIn.ID)).
Return(dIn, 200, fmt.Errorf("mocked error"))
err = ds.DiscardCluster(clusterIn.ID)
assert.NotNil(err, "We are expecting an error")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
}
// DiscardClusterFailStatusMocked test mocked function
func DiscardClusterFailStatusMocked(t *testing.T, clusterIn *types.Cluster) {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterIn)
assert.Nil(err, "Cluster test data corrupted")
// call service
cs.On("Delete", fmt.Sprintf(APIPathKubernetesClusterDiscard, clusterIn.ID)).Return(dIn, 499, nil)
err = ds.DiscardCluster(clusterIn.ID)
assert.NotNil(err, "We are expecting an status code error")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
}
// GetClusterPlanMocked test mocked function
func GetClusterPlanMocked(t *testing.T, clusterPlanID string, clusterPlanIn *types.ClusterPlan) *types.ClusterPlan {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterPlanIn)
assert.Nil(err, "ClusterPlan test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathKubernetesClusterPlan, clusterPlanID)).Return(dIn, 200, nil)
clusterPlanOut, err := ds.GetClusterPlan(clusterPlanID)
assert.Nil(err, "Error getting cluster plan")
assert.Equal(clusterPlanIn, clusterPlanOut, "GetClusterPlan returned different cluster plan")
return clusterPlanOut
}
// GetClusterPlanFailErrMocked test mocked function
func GetClusterPlanFailErrMocked(
t *testing.T,
clusterPlanID string,
clusterPlanIn *types.ClusterPlan,
) *types.ClusterPlan {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterPlanIn)
assert.Nil(err, "ClusterPlan test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathKubernetesClusterPlan, clusterPlanID)).
Return(dIn, 200, fmt.Errorf("mocked error"))
clusterPlanOut, err := ds.GetClusterPlan(clusterPlanID)
assert.NotNil(err, "We are expecting an error")
assert.Nil(clusterPlanOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return clusterPlanOut
}
// GetClusterPlanFailStatusMocked test mocked function
func GetClusterPlanFailStatusMocked(
t *testing.T,
clusterPlanID string,
clusterPlanIn *types.ClusterPlan,
) *types.ClusterPlan {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// to json
dIn, err := json.Marshal(clusterPlanIn)
assert.Nil(err, "ClusterPlan test data corrupted")
// call service
cs.On("Get", fmt.Sprintf(APIPathKubernetesClusterPlan, clusterPlanID)).Return(dIn, 499, nil)
clusterPlanOut, err := ds.GetClusterPlan(clusterPlanID)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(clusterPlanOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return clusterPlanOut
}
// GetClusterPlanFailJSONMocked test mocked function
func GetClusterPlanFailJSONMocked(
t *testing.T,
clusterPlanID string,
clusterPlanIn *types.ClusterPlan,
) *types.ClusterPlan {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewClusterService(cs)
assert.Nil(err, "Couldn't load cluster service")
assert.NotNil(ds, "Cluster service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Get", fmt.Sprintf(APIPathKubernetesClusterPlan, clusterPlanID)).Return(dIn, 200, nil)
clusterPlanOut, err := ds.GetClusterPlan(clusterPlanID)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(clusterPlanOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return clusterPlanOut
} | api/kubernetes/clusters_api_mocked.go | 0.731155 | 0.489076 | clusters_api_mocked.go | starcoder |
package vetsql
// INTS
const minU = 0
const minInt = -2147483648
const maxInt = 2147483647
const maxUInt = 4294967295
const minTinyInt = -128
const maxTinyInt = 127
const maxUTinyInt = 255
const minSmallInt = -32768
const maxSmallInt = 32767
const maxUSmallInt = 65535
const minMediumInt = -8388608
const maxMediumInt = 8388607
const maxUMediumInt = 16777215
const minBigInt = -9223372036854775808
const maxBigInt = 9223372036854775807
const maxUBigInt = 18446744073709551615
// ValidSQLInt ...
func ValidSQLInt(value int) bool {
if value <= maxInt && value >= minInt {
return true
}
return false
}
// ValidSQLUInt ...
func ValidSQLUInt(value int) bool {
if value <= maxUInt && value >= minU {
return true
}
return false
}
// ValidSQLTinyInt ...
func ValidSQLTinyInt(value int) bool {
if value <= maxTinyInt && value >= minTinyInt {
return true
}
return false
}
// ValidSQLUTinyInt ...
func ValidSQLUTinyInt(value int) bool {
if value <= maxUTinyInt && value >= minU {
return true
}
return false
}
// ValidSQLSmallInt ...
func ValidSQLSmallInt(value int) bool {
if value <= maxSmallInt && value >= minSmallInt {
return true
}
return false
}
// ValidSQLUSmallInt ...
func ValidSQLUSmallInt(value int) bool {
if value <= maxUSmallInt && value >= minU {
return true
}
return false
}
// ValidSQLMediumInt ...
func ValidSQLMediumInt(value int) bool {
if value <= maxMediumInt && value >= minMediumInt {
return true
}
return false
}
// ValidSQLUMediumInt ...
func ValidSQLUMediumInt(value int) bool {
if value <= maxUMediumInt && value >= minU {
return true
}
return false
}
// ValidSQLBigInt ...
func ValidSQLBigInt(value int) bool {
if value <= maxBigInt && value >= minBigInt {
return true
}
return false
}
// ValidSQLUBigInt ...
func ValidSQLUBigInt(value uint64) bool {
if value <= maxUBigInt && value >= minU {
return true
}
return false
}
// strings
const blobLimit = 65535
const tinyBlobLimit = 255
const mediumBlobLimit = 16777215
const longBlobLimit = 4294967295
// ExceedsCharLimit ...
func ExceedsCharLimit(str string, limit int) bool {
if len(str) > limit {
return true
}
return false
}
// ExceedsBlobLimit ...
func ExceedsBlobLimit(str string) bool {
return ExceedsCharLimit(str, blobLimit)
}
// ExceedsTinyBlobLimit ...
func ExceedsTinyBlobLimit(str string) bool {
return ExceedsCharLimit(str, tinyBlobLimit)
}
// ExceedsMediumBlobLimit ...
func ExceedsMediumBlobLimit(str string) bool {
return ExceedsCharLimit(str, mediumBlobLimit)
}
// ExceedsLongBlobLimit ...
func ExceedsLongBlobLimit(str string) bool {
return ExceedsCharLimit(str, longBlobLimit)
}
// ExceedsTextLimit ...
func ExceedsTextLimit(str string) bool {
return ExceedsBlobLimit(str)
}
// ExceedsTinyTextLimit ...
func ExceedsTinyTextLimit(str string) bool {
return ExceedsTinyBlobLimit(str)
}
// ExceedsMediumTextLimit ...
func ExceedsMediumTextLimit(str string) bool {
return ExceedsMediumBlobLimit(str)
}
// ExceedsLongTextLimit ...
func ExceedsLongTextLimit(str string) bool {
return ExceedsLongBlobLimit(str)
} | govetsql.go | 0.637369 | 0.430387 | govetsql.go | starcoder |
package main
import (
"encoding/csv"
"fmt"
"io"
"log"
"math"
"os"
"strconv"
)
// squaredError returns the error associted with a particular
// pair of prediction and observation.
func squaredError(observation, prediction float64) float64 {
return math.Pow(observation-prediction, 2)
}
// sgdTrain calculates the ideal parameters using SGD for
// a linear regression model.
func sgdTrain(features, response []float64, lr float64, epochs int) (float64, float64, float64) {
// Initialize the weight and bias.
w := 0.0
b := 0.0
// Set the number of observations in the data.
n := float64(len(response))
// Loop over the number of epochs.
loss := 0.0
for i := 0; i < epochs; i++ {
// Calculate current predictions.
var predictions []float64
for _, x := range features {
predictions = append(predictions, w*x+b)
}
// Calculate the loss for this epoch.
loss = 0.0
for idx, p := range predictions {
loss += squaredError(p, response[idx]) / n
}
// Output some info to standard out so we know
// how training is progressing.
if i%10 == 0 {
fmt.Printf("Epoch %d, Loss %0.4f\n", i, loss)
}
// Calculate the gradients for w and b.
wGradient := 0.0
bGradient := 0.0
for idx, p := range predictions {
wGradient += -(2 / n) * (features[idx] * (response[idx] - p))
bGradient += -(2 / n) * (response[idx] - p)
}
// Update the weight and bias.
w = w - (lr * wGradient)
b = b - (lr * bGradient)
}
return w, b, loss
}
func main() {
// Open the Advertising data set.
f, err := os.Open("../data/training.csv")
if err != nil {
log.Fatal(err)
}
defer f.Close()
// Create a new CSV reader reading from the opened file.
reader := csv.NewReader(f)
// We should have 4 fields per line. By setting
// FieldsPerRecord to 4, we can validate that each of
// the rows in our CSV has the correct number of fields.
reader.FieldsPerRecord = 4
// features and response will hold our successfully parsed
// TV and Sales values respectively.
line := 1
var features []float64
var response []float64
for {
// Read in a row. Check if we are at the end of
// the file.
record, err := reader.Read()
if err == io.EOF {
break
}
// Skip the header.
if line == 1 {
line++
continue
}
// If we had a parsing error, log the error
// and move on.
if err != nil {
log.Println(err)
line++
continue
}
// Try to parse the values we want (the TV and Sales values)
// as floats.
var tv float64
if tv, err = strconv.ParseFloat(record[0], 64); err != nil {
log.Printf("Unexpected type in TV column at line %d\n", line)
fmt.Println(record)
line++
continue
}
var sales float64
if sales, err = strconv.ParseFloat(record[3], 64); err != nil {
log.Printf("Unexpected type in Sales column at line %d\n", line)
line++
continue
}
// Append the records to our feature and response slices.
features = append(features, tv)
response = append(response, sales)
line++
}
// Train our linear regression model.
w, b, _ := sgdTrain(features, response, 0.1, 200)
// Print our results.
fmt.Printf("\nRegression Formula:\ny = %0.2f * x + %0.2f\n\n", w, b)
} | linear_regression/example3/example3.go | 0.823435 | 0.512449 | example3.go | starcoder |
package xiao
import (
"github.com/OpenWhiteBox/primitives/encoding"
"github.com/OpenWhiteBox/primitives/matrix"
)
// When you compose a matrix with 16-by-16 blocks with ShiftRows, you get a matrix with one 16-by-8 block in each
// column. The value in blockPos at position i is the vertical position of the ith block.
var blockPos = []int{0, 6, 5, 3, 2, 0, 7, 5, 4, 2, 1, 7, 6, 4, 3, 1}
// blockOfInverse computes a block of the something something something.
func blockOfInverse(swap [2]int, eqs [4]int) matrix.Matrix {
unmixcol := unMixColumn.Dup()
// Swap chosen rows.
for i, ok := range swap {
if ok == 1 {
for row := 16 * i; row < 16*i+8; row++ {
unmixcol[row], unmixcol[8+row] = unmixcol[8+row], unmixcol[row]
}
}
}
// Generate matrix corresponding to self-equivalence noise from S-box.
noise := matrix.GenerateEmpty(32, 32)
for i, eq := range eqs {
for row := 0; row < 8; row++ {
noise[8*i+row][i] = equivalences[eq][row][0]
}
}
return noise.Compose(unmixcol)
}
// affineLayer implements methods for disambiguating an affine layer of the SPN.
type affineLayer encoding.BlockAffine
func (al affineLayer) Encode(in [16]byte) [16]byte {
return encoding.BlockAffine(al).Encode(in)
}
func (al affineLayer) Decode(in [16]byte) [16]byte {
return encoding.BlockAffine(al).Decode(in)
}
// leftCompose composes a Block encoding on the left.
func (al *affineLayer) leftCompose(left encoding.Block) {
temp, _ := encoding.DecomposeBlockAffine(encoding.ComposedBlocks{
left, encoding.BlockAffine(*al),
})
*al = affineLayer(temp)
}
// rightCompose composes a Block encoding on the right.
func (al *affineLayer) rightCompose(right encoding.Block) {
temp, _ := encoding.DecomposeBlockAffine(encoding.ComposedBlocks{
encoding.BlockAffine(*al), right,
})
*al = affineLayer(temp)
}
// FindPermutation is called on the first affine layer. It returns the permutation matrix corresponding to the row
// permutation that has occured to the first layer.
func (al *affineLayer) findPermutation() matrix.Matrix {
permed := (*al).BlockLinear.Forwards
unpermed := matrix.Matrix{}
for i := 0; i < 8; i++ {
for pos := 0; pos < 16; pos++ {
if h := permed[8*pos].Height(); 16*i <= h && h < 16*(i+1) {
unpermed = append(unpermed, permed[8*pos:8*(pos+1)]...)
}
}
}
unpermed, _ = unpermed.Invert()
perm := permed.Compose(unpermed)
return perm
}
// cleanLeft gets the last affine layer back to a matrix with 16-by-16 blocks along the diagonal, times ShiftRows, times
// MixColumns and returns the matrix on the input encoding that it used to do this.
func (al *affineLayer) cleanLeft() encoding.Block {
inverse := matrix.GenerateEmpty(128, 128)
mixcols := matrix.GenerateEmpty(128, 128)
// Combine individual blocks of the inverse matrix into the full inverse matrix. Also build the matrix corresponding
// to the full-block MixColumns operation.
for block := 0; block < 4; block++ {
inv := al.findBlockOfInverse(block)
for row := 0; row < 32; row++ {
copy(inverse[32*block+row][4*block:], inv[row])
copy(mixcols[32*block+row][4*block:], mixColumn[row])
}
}
out := encoding.NewBlockLinear(inverse.Compose(mixcols))
al.leftCompose(out)
return encoding.InverseBlock{out}
}
// findBlockOfInverse finds any S-box transpositions or self-equivalence noise that may be hiding in the given block of
// the last affine layer and returns them.
func (al *affineLayer) findBlockOfInverse(block int) matrix.Matrix {
for swap1 := 0; swap1 < 2; swap1++ {
for swap2 := 0; swap2 < 2; swap2++ {
for p1 := 0; p1 < 8; p1++ {
for p2 := 0; p2 < 8; p2++ {
for p3 := 0; p3 < 8; p3++ {
for p4 := 0; p4 < 8; p4++ {
cand := blockOfInverse([2]int{swap1, swap2}, [4]int{p1, p2, p3, p4})
if al.isBlockOfInverse(block, cand) {
return cand
}
}
}
}
}
}
}
panic("Could not find block of inverse!")
}
// isBlockOfInverse takes a candidate solution for the given block of the matrix and returns true if it is valid and
// false if it isn't.
func (al *affineLayer) isBlockOfInverse(block int, cand matrix.Matrix) bool {
// Pad matrix.
inv := matrix.GenerateEmpty(32*block, 32)
for _, row := range cand {
inv = append(inv, row)
}
for row := 0; row < 96-32*block; row++ {
inv = append(inv, matrix.NewRow(32))
}
// Test if this is consistent with inverse.
res := (*al).BlockLinear.Forwards.Compose(inv).Transpose()
for i := 0; i < 4; i++ {
row, pos := res[8*i], blockPos[4*block+i]
if h := row.Height(); !(16*pos <= h && h < 16*(pos+1)) {
return false
}
if !row[2*(pos+1):].IsZero() {
return false
}
}
return true
}
// getBlock returns the 8-by-8 block of the affine layer at the given position.
func (al *affineLayer) getBlock(row, col int) matrix.Matrix {
out := matrix.Matrix{}
for i := 0; i < 8; i++ {
out = append(out, matrix.Row{al.BlockLinear.Forwards[8*row+i][col]})
}
return out
} | cryptanalysis/xiao/affine.go | 0.760473 | 0.537588 | affine.go | starcoder |
package assert
import (
"testing"
"time"
"github.com/ppapapetrou76/go-testing/internal/pkg/types"
)
// AssertableTime is the assertable structure for time.Time values.
type AssertableTime struct {
t *testing.T
actual types.TimeValue
}
// ThatTime returns an AssertableTime structure initialized with the test reference and the actual value to assert.
func ThatTime(t *testing.T, actual time.Time) AssertableTime {
t.Helper()
return AssertableTime{
t: t,
actual: types.NewTimeValue(actual),
}
}
// IsSameAs asserts if the expected time.Time is equal to the assertable time.Time value
// It errors the tests if the compared values (actual VS expected) are not equal.
func (a AssertableTime) IsSameAs(expected time.Time) AssertableTime {
a.t.Helper()
if a.actual.IsNotSameAs(expected) {
a.t.Error(shouldBeEqual(a.actual, expected))
}
return a
}
// IsAlmostSameAs asserts if the expected time.Time is almost equal to the assertable time.Time value
// It errors the tests if the compared values (actual VS expected) are not equal.
func (a AssertableTime) IsAlmostSameAs(expected time.Time) AssertableTime {
a.t.Helper()
if !a.actual.IsAlmostSameAs(expected) {
a.t.Error(shouldBeAlmostSame(a.actual, expected))
}
return a
}
// IsNotTheSameAs asserts if the expected time.Time is not equal to the assertable time.Time value
// It errors the tests if the compared values (actual VS expected) are equal.
func (a AssertableTime) IsNotTheSameAs(expected time.Time) AssertableTime {
a.t.Helper()
if a.actual.IsSameAs(expected) {
a.t.Error(shouldNotBeEqual(a.actual, expected))
}
return a
}
// IsBefore asserts if the assertable time.Time value is before the expected value
// It errors the tests if is not greater.
func (a AssertableTime) IsBefore(expected time.Time) AssertableTime {
a.t.Helper()
if !a.actual.IsBefore(expected) {
a.t.Error(shouldBeGreater(a.actual, expected))
}
return a
}
// IsAfter asserts if the assertable time.Time value is after the expected value
// It errors the tests if is not later.
func (a AssertableTime) IsAfter(expected time.Time) AssertableTime {
a.t.Helper()
if !a.actual.IsAfter(expected) {
a.t.Error(shouldBeGreaterOrEqual(a.actual, expected))
}
return a
}
// IsDefined asserts if the expected time.Time is defined.
// It errors the tests if the value is not defined.
func (a AssertableTime) IsDefined() AssertableTime {
a.t.Helper()
if a.actual.IsNotDefined() {
a.t.Error(shouldBeDefined(a.actual))
}
return a
}
// IsNotDefined asserts if the expected time.Time is not defined.
// It errors the tests if the value is defined.
func (a AssertableTime) IsNotDefined() AssertableTime {
a.t.Helper()
if a.actual.IsDefined() {
a.t.Error(shouldNotBeDefined(a.actual))
}
return a
} | assert/time.go | 0.81637 | 0.782372 | time.go | starcoder |
package vector
import (
"fmt"
"github.com/go-gl/mathgl/mgl32"
"math"
)
type Vector2d struct {
X, Y float64
}
func NewVec2d(x, y float64) Vector2d {
return Vector2d{x, y}
}
func NewVec2dRad(rad, length float64) Vector2d {
return Vector2d{math.Cos(rad) * length, math.Sin(rad) * length}
}
func (v Vector2d) X32() float32 {
return float32(v.X)
}
func (v Vector2d) Y32() float32 {
return float32(v.Y)
}
func (v Vector2d) AsVec3() mgl32.Vec3 {
return mgl32.Vec3{float32(v.X), float32(v.Y), 0}
}
func (v Vector2d) AsVec4() mgl32.Vec4 {
return mgl32.Vec4{float32(v.X), float32(v.Y), 0, 1}
}
func (v Vector2d) String() string {
return fmt.Sprintf("%fx%f", v.X, v.Y)
}
func (v Vector2d) Add(v1 Vector2d) Vector2d {
return Vector2d{v.X + v1.X, v.Y + v1.Y}
}
func (v Vector2d) AddS(x, y float64) Vector2d {
return Vector2d{v.X + x, v.Y + y}
}
func (v Vector2d) Sub(v1 Vector2d) Vector2d {
return Vector2d{v.X - v1.X, v.Y - v1.Y}
}
func (v Vector2d) SubS(x, y float64) Vector2d {
return Vector2d{v.X - x, v.Y - y}
}
func (v Vector2d) Mult(v1 Vector2d) Vector2d {
return Vector2d{v.X * v1.X, v.Y * v1.Y}
}
func (v Vector2d) Mid(v1 Vector2d) Vector2d {
return Vector2d{(v.X + v1.X) / 2, (v.Y + v1.Y) / 2}
}
func (v Vector2d) Dot(v1 Vector2d) float64 {
return v.X*v1.X + v.Y*v1.Y
}
func (v Vector2d) Dst(v1 Vector2d) float64 {
x := v1.X - v.X
y := v1.Y - v.Y
return math.Sqrt(x*x + y*y)
}
func (v Vector2d) DstSq(v1 Vector2d) float64 {
x := v1.X - v.X
y := v1.Y - v.Y
return x*x + y*y
}
func (v Vector2d) Angle() float64 {
return v.AngleR() * 180 / math.Pi
}
func (v Vector2d) AngleR() float64 {
return math.Atan2(v.Y, v.X)
}
func (v Vector2d) Nor() Vector2d {
length := v.LenSq()
if length < epsilon {
return v
}
length = math.Sqrt(length)
return Vector2d{v.X / length, v.Y / length}
}
func (v Vector2d) AngleRV(v1 Vector2d) float64 {
return math.Atan2(v.Y-v1.Y, v.X-v1.X)
}
func (v Vector2d) Lerp(v1 Vector2d, t float64) Vector2d {
return Vector2d{
(v1.X-v.X)*t + v.X,
(v1.Y-v.Y)*t + v.Y,
}
}
func (v Vector2d) Rotate(rad float64) Vector2d {
cos := math.Cos(rad)
sin := math.Sin(rad)
return Vector2d{
v.X*cos - v.Y*sin,
v.X*sin + v.Y*cos,
}
}
func (v Vector2d) Len() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y)
}
func (v Vector2d) LenSq() float64 {
return v.X*v.X + v.Y*v.Y
}
func (v Vector2d) Scl(mag float64) Vector2d {
return Vector2d{v.X * mag, v.Y * mag}
}
func (v Vector2d) Abs() Vector2d {
return Vector2d{math.Abs(v.X), math.Abs(v.Y)}
}
func (v Vector2d) Copy() Vector2d {
return Vector2d{v.X, v.Y}
}
func (v Vector2d) Copy32() Vector2f {
return Vector2f{float32(v.X), float32(v.Y)}
}
func IsStraightLine64(a, b, c Vector2d) bool {
return math.Abs((b.Y-a.Y)*(c.X-a.X)-(b.X-a.X)*(c.Y-a.Y)) < 0.001
}
func AngleBetween64(centre, p1, p2 Vector2d) float64 { //nolint:misspell
a := centre.Dst(p1)
b := centre.Dst(p2)
c := p1.Dst(p2)
return math.Acos((a*a + b*b - c*c) / (2 * a * b))
} | framework/math/vector/vector2d.go | 0.872198 | 0.753625 | vector2d.go | starcoder |
package parser
import "fmt"
// state is the state of the parsing FSA.
type state int
const (
// psEmpty states that we haven't read anything yet.
psEmpty state = iota
// psPreTest states that we haven't hit the actual test yet.
// In this state, we feed any line that isn't the 'Test' line to the implementation.
psPreTest
// psPreamble states that we're in the pre-state matter.
psPreamble
// psState states that we're in a state block.
// We expect the specific number of states defined in the preamble.
psState
// psSummary states that we're expecting to read the summary tag.
psSummary
// psPostamble states that we're in the post-summary matter.
psPostamble
)
// afterBegin advances the parser state after determining the observation is non-empty.
func (p *parser) afterBegin() error {
return p.transition(psEmpty, psPreTest)
}
// afterPreTest advances the parser state after finishing the pre-test matter.
func (p *parser) afterPreTest() error {
return p.transition(psPreTest, psPreamble)
}
// afterPreamble advances the parser state after parsing the state count.
func (p *parser) afterPreamble(nstates uint64) error {
err := p.checkState(psPreamble)
if err != nil {
return err
}
p.setStateCount(nstates)
return nil
}
// setStateCount sets the state and state count of the parser according to nstates.
func (p *parser) setStateCount(nstates uint64) {
if nstates == 0 {
p.state = psSummary
} else {
p.state = psState
p.nstates = nstates
}
}
// afterStateLine advances the parser state after a state line.
func (p *parser) afterStateLine() error {
if err := p.checkState(psState); err != nil {
return nil
}
p.nstates--
if p.nstates == 0 {
p.state = psSummary
}
return nil
}
func (p *parser) afterSummary() error {
return p.transition(psSummary, psPostamble)
}
// transition handles a simple state transition between from and to.
// It returns an error if the current state isn't from.
func (p *parser) transition(from, to state) error {
err := p.checkState(from)
p.state = to
return err
}
// checkFinalState checks to see if the parser has ended in an appropriate state, and returns an error if not.
func (p *parser) checkFinalState() error {
switch p.state {
case psEmpty:
return ErrInputEmpty
case psPreTest:
return ErrNoTest
case psPreamble:
return ErrNoStates
case psState:
return fmt.Errorf("%w: %d state(s) remain", ErrNotEnoughStates, p.nstates)
case psSummary:
return ErrNoSummary
case psPostamble:
return nil
default:
return fmt.Errorf("%w: %v", ErrBadState, p.state)
}
}
// checkState returns with an error if the current automaton state isn't want.
func (p *parser) checkState(want state) error {
if p.state != want {
return fmt.Errorf("%w: got=%v, want=%v", ErrBadTransition, p.state, want)
}
return nil
} | internal/serviceimpl/backend/herdstyle/parser/fsa.go | 0.605099 | 0.556821 | fsa.go | starcoder |
package export
import (
color2 "github.com/RH12503/Triangula/color"
"github.com/RH12503/Triangula/geom"
"github.com/RH12503/Triangula/image"
"github.com/RH12503/Triangula/normgeom"
"github.com/RH12503/Triangula/rasterize"
"github.com/RH12503/Triangula/render"
"github.com/RH12503/Triangula/triangulation"
"github.com/fogleman/gg"
"image/color"
"math"
)
// WriteEffectPNG saves a PNG of a result with an effect applied
func WriteEffectPNG(filename string, points normgeom.NormPointGroup, img image.Data, pixelScale float64, gradient bool) error {
imageW, imageH := img.Size()
w := multAndRound(imageW, pixelScale)
h := multAndRound(imageH, pixelScale)
dc := gg.NewContext(w, h)
dc.SetColor(color.White)
dc.DrawRectangle(0, 0, float64(w), float64(h))
dc.Fill()
triangles := triangulation.Triangulate(points, imageW, imageH)
triangleData := render.TrianglesOnImage(triangles, img)
for i, _ := range triangleData {
tri := triangles[i]
points := tri.Points
y2y3 := points[1].Y - points[2].Y
x3x2 := points[2].X - points[1].X
x1x3 := points[0].X - points[2].X
y1y3 := points[0].Y - points[2].Y
y3y1 := points[2].Y - points[0].Y
dcol := float64(y2y3*x1x3 + x3x2*y1y3)
avg0 := color2.AverageRGB{}
avg1 := color2.AverageRGB{}
avg2 := color2.AverageRGB{}
rasterize.DDATriangle(tri, func(x, y int) {
xx3 := x - points[2].X
yy3 := y - points[2].Y
// Calculate Barymetric coordinates for filling gradients
l0 := math.Max(float64(y2y3*xx3+x3x2*yy3)/dcol, 0)
l1 := math.Max(float64(y3y1*xx3+x1x3*yy3)/dcol, 0)
l2 := math.Max(1-l0-l1, 0)
max := math.Max(l0, math.Max(l1, l2))
col := img.RGBAt(x, y)
if max == l0 {
avg0.Add(col)
} else if max == l1 {
avg1.Add(col)
} else {
avg2.Add(col)
}
})
c0 := avg0.Average()
c1 := avg1.Average()
c2 := avg2.Average()
// Prevent blank triangles
if avg0.Count() == 0 {
c0 = img.RGBAt(min(points[0].X, imageW-1), min(points[0].Y, imageH-1))
}
if avg1.Count() == 0 {
c1 = img.RGBAt(min(points[1].X, imageW-1), min(points[1].Y, imageH-1))
}
if avg2.Count() == 0 {
c2 = img.RGBAt(min(points[2].X, imageW-1), min(points[2].Y, imageH-1))
}
scaledTri := geom.NewTriangle(
multAndRound(points[0].X, pixelScale),
multAndRound(points[0].Y, pixelScale),
multAndRound(points[1].X, pixelScale),
multAndRound(points[1].Y, pixelScale),
multAndRound(points[2].X, pixelScale),
multAndRound(points[2].Y, pixelScale),
)
rasterize.DDATriangle(scaledTri, func(x, y int) {
xx3 := float64(x)/pixelScale - float64(points[2].X)
yy3 := float64(y)/pixelScale - float64(points[2].Y)
l0 := math.Max((float64(y2y3)*xx3+float64(x3x2)*yy3)/dcol, 0)
l1 := math.Max((float64(y3y1)*xx3+float64(x1x3)*yy3)/dcol, 0)
l2 := math.Max(1-l0-l1, 0)
if gradient {
dc.SetColor(color.RGBA{
R: uint8(scale(math.Min(c0.R*l0+c1.R*l1+c2.R*l2, 1), 255)),
G: uint8(scale(math.Min(c0.G*l0+c1.G*l1+c2.G*l2, 1), 255)),
B: uint8(scale(math.Min(c0.B*l0+c1.B*l1+c2.B*l2, 1), 255)),
A: 255,
})
} else {
max := math.Max(l0, math.Max(l1, l2))
if max == l0 {
dc.SetColor(color.RGBA{
R: uint8(scale(c0.R, 255)),
G: uint8(scale(c0.G, 255)),
B: uint8(scale(c0.B, 255)),
A: 255,
})
} else if max == l1 {
dc.SetColor(color.RGBA{
R: uint8(scale(c1.R, 255)),
G: uint8(scale(c1.G, 255)),
B: uint8(scale(c1.B, 255)),
A: 255,
})
} else {
dc.SetColor(color.RGBA{
R: uint8(scale(c2.R, 255)),
G: uint8(scale(c2.G, 255)),
B: uint8(scale(c2.B, 255)),
A: 255,
})
}
}
dc.SetPixel(x, y)
})
}
err := dc.SavePNG(filename)
return err
} | export/effect.go | 0.597021 | 0.416797 | effect.go | starcoder |
package main
import (
"log"
"math/rand"
"github.com/go-gl/gl/v4.1-core/gl"
)
var (
squarePoints = []float32{
// Bottom left right-angle triangle
-1, 1, 0,
1, -1, 0,
-1, -1, 0,
// Top right right-angle triangle
-1, 1, 0,
1, 1, 0,
1, -1, 0,
}
squarePointCount = int32(len(squarePoints) / 3)
)
type cell struct {
drawable uint32
x int
y int
alive bool
nextState bool
}
// checkState determines the state of the cell for the next tick of the game.
func (c *cell) checkState(cells [][]*cell) {
c.alive = c.nextState
c.nextState = c.alive
liveCount := c.liveNeighbors(cells)
if c.alive {
// 1. Any live cell with fewer than two live neighbours dies, as if caused by underpopulation.
if liveCount < 2 {
c.nextState = false
}
// 2. Any live cell with two or three live neighbours lives on to the next generation.
if liveCount == 2 || liveCount == 3 {
c.nextState = true
}
// 3. Any live cell with more than three live neighbours dies, as if by overpopulation.
if liveCount > 3 {
c.nextState = false
}
} else {
// 4. Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
if liveCount == 3 {
c.nextState = true
}
}
}
// liveNeighbors returns the number of live neighbors for a cell.
func (c *cell) liveNeighbors(cells [][]*cell) int {
var liveCount int
add := func(x, y int) {
// If we're at an edge, check the other side of the board.
if x == len(cells) {
x = 0
} else if x == -1 {
x = len(cells) - 1
}
if y == len(cells[x]) {
y = 0
} else if y == -1 {
y = len(cells[x]) - 1
}
if cells[x][y].alive {
liveCount++
}
}
add(c.x-1, c.y) // To the left
add(c.x+1, c.y) // To the right
add(c.x, c.y+1) // up
add(c.x, c.y-1) // down
add(c.x-1, c.y+1) // top-left
add(c.x+1, c.y+1) // top-right
add(c.x-1, c.y-1) // bottom-left
add(c.x+1, c.y-1) // bottom-right
return liveCount
}
// draw draws the cell if it is alive.
func (c *cell) draw() {
if !c.alive {
return
}
gl.BindVertexArray(c.drawable)
gl.DrawArrays(gl.TRIANGLES, 0, squarePointCount)
}
// newCell initializes and returns a cell with the given x/y coordinates.
func newCell(x, y int) *cell {
points := make([]float32, len(squarePoints), len(squarePoints))
copy(points, squarePoints)
for i := 0; i < len(points); i++ {
var factor float32
var size float32
switch i % 3 {
case 0:
size = 1.0 / float32(columns)
factor = float32(x) * size
case 1:
size = 1.0 / float32(rows)
factor = float32(y) * size
default:
continue
}
if points[i] < 0 {
points[i] = (factor * 2) - 1
} else {
points[i] = ((factor + size) * 2) - 1
}
}
return &cell{
drawable: makeVao(points),
x: x,
y: y,
}
}
// makeCells creates the cell matrix and sets the initial state of the game.
func makeCells(seed int64, threshold float64) [][]*cell {
log.Printf("Using seed=%v, threshold=%v", seed, threshold)
rand.Seed(seed)
cells := make([][]*cell, rows, rows)
for x := 0; x < rows; x++ {
for y := 0; y < columns; y++ {
c := newCell(x, y)
c.alive = rand.Float64() < threshold
c.nextState = c.alive
cells[x] = append(cells[x], c)
}
}
return cells
} | cell.go | 0.663996 | 0.52074 | cell.go | starcoder |
package day267
// Chessboard is a slice of strings. The first string represents the top row
// and the last string represents the bottom row of the board.
// The top is the black player's side and the bottom is the white player's side.
type Chessboard []string
// IsSoloBlackKingInCheck answers if a solo black king is in check.
// All other pieces on the board are white pieces.
func IsSoloBlackKingInCheck(board Chessboard) bool {
kR, kC := findBlackKing(board)
for r := range board {
for c := range board[r] {
switch board[r][c] {
case 'B':
if bishop(board, r, c, kR, kC) {
return true
}
case 'N':
if knight(board, r, c, kR, kC) {
return true
}
case 'P':
if pawn(board, r, c, kR, kC) {
return true
}
case 'R':
if rook(board, r, c, kR, kC) {
return true
}
case 'Q':
if queen(board, r, c, kR, kC) {
return true
}
}
}
}
return false
}
func knight(_ Chessboard, nR, nC, kR, kC int) bool {
return (nR-2 == kR && nC+1 == kC) ||
(nR-1 == kR && nC+2 == kC) ||
(nR+1 == kR && nC+2 == kC) ||
(nR+2 == kR && nC+1 == kC) ||
(nR+2 == kR && nC-1 == kC) ||
(nR+1 == kR && nC-2 == kC) ||
(nR-1 == kR && nC-2 == kC) ||
(nR-2 == kR && nC-1 == kC)
}
func pawn(_ Chessboard, pR, pC, kR, kC int) bool {
return (pR-1 == kR && pC-1 == kC) || (pR-1 == kR && pC+1 == kC)
}
func queen(board Chessboard, qR, qC, kR, kC int) bool {
return rook(board, qR, qC, kR, kC) || bishop(board, qR, qC, kR, kC)
}
func rook(board Chessboard, rR, rC, kR, kC int) bool {
// up
for r := rR - 1; r >= 0; r-- {
if r == kR && rC == kC {
return true
} else if board[r][rC] != '.' {
break
}
}
// left
for c := rC - 1; c >= 0; c-- {
if rR == kR && c == kC {
return true
} else if board[rR][c] != '.' {
break
}
}
// right
for c := rC + 1; c < 8; c++ {
if rR == kR && c == kC {
return true
} else if board[rR][c] != '.' {
break
}
}
// down
for r := rR + 1; r < 8; r++ {
if r == kR && rC == kC {
return true
} else if board[r][rC] != '.' {
break
}
}
return false
}
func bishop(board Chessboard, bR, bC, kR, kC int) bool {
// up-right
for r, c := bR-1, bC+1; r >= 0 && c < 8; r, c = r-1, c+1 {
if r == kR && c == kC {
return true
} else if board[r][c] != '.' {
break
}
}
// up-left
for r, c := bR-1, bC-1; r >= 0 && c >= 0; r, c = r-1, c-1 {
if r == kR && c == kC {
return true
} else if board[r][c] != '.' {
break
}
}
// down-right
for r, c := bR+1, bC+1; r < 8 && c < 8; r, c = r+1, c+1 {
if r == kR && c == kC {
return true
} else if board[r][c] != '.' {
break
}
}
// down-left
for r, c := bR+1, bC-1; r < 8 && c >= 0; r, c = r+1, c-1 {
if r == kR && c == kC {
return true
} else if board[r][c] != '.' {
break
}
}
return false
}
func findBlackKing(board Chessboard) (row int, col int) {
for row := range board {
for col := range board[row] {
if board[row][col] == 'K' {
return row, col
}
}
}
return
} | day267/problem.go | 0.725843 | 0.566139 | problem.go | starcoder |
package physics2d
import (
"github.com/ByteArena/box2d"
"github.com/PucklaMotzer09/GoHomeEngine/src/gohome"
"github.com/PucklaMotzer09/mathgl/mgl32"
"github.com/PucklaMotzer09/tmx"
"runtime"
"strconv"
"strings"
"sync"
)
var (
// Defines how big one meter is
PIXEL_PER_METER float32 = 100.0
// Defines the size of the world (can be used to remove everything outside of the world)
WORLD_SIZE mgl32.Vec2
// The number of velocity iterations of Box2D
VELOCITY_ITERATIONS = 6
// The number of position iterations of Box2D
POSITION_ITERATIONS = 2
)
// Converts a physics scalar value to a pixel value
func ScalarToPixel(v float64) float32 {
return float32(v) * PIXEL_PER_METER
}
// Converts a pixel scalar value to a physics value
func ScalarToBox2D(v float32) float64 {
return float64(v / PIXEL_PER_METER)
}
// Converts a pixel Vec2 to a physics Vec2
func Vec2ToB2Vec2(vec mgl32.Vec2) box2d.B2Vec2 {
return box2d.B2Vec2{
ScalarToBox2D(vec[0]),
ScalarToBox2D(vec[1]),
}
}
// Converts a physics Vec2 to a pixel Vec2
func B2Vec2ToVec2(vec box2d.B2Vec2) mgl32.Vec2 {
return mgl32.Vec2{
ScalarToPixel(vec.X),
ScalarToPixel(vec.Y),
}
}
// Converts a physics direction into a pixel direction
func ToPixelDirection(vec box2d.B2Vec2) mgl32.Vec2 {
vec1 := B2Vec2ToVec2(vec)
vec1[1] = -vec1[1]
return vec1
}
// Converts a pixel direction into a physics direction
func ToBox2DDirection(vec mgl32.Vec2) box2d.B2Vec2 {
vec[1] = -vec[1]
return Vec2ToB2Vec2(vec)
}
// Converts physics coordinates to pixel coordinates
func ToPixelCoordinates(vec box2d.B2Vec2) mgl32.Vec2 {
vec1 := B2Vec2ToVec2(vec)
vec1[1] = WORLD_SIZE[1] - vec1[1]
return vec1
}
// Converts pixel coordinates to physics coordinates
func ToBox2DCoordinates(vec mgl32.Vec2) box2d.B2Vec2 {
vec[1] = WORLD_SIZE[1] - vec[1]
return Vec2ToB2Vec2(vec)
}
// Converts a pixel angel to physics angle
func ToBox2DAngle(angle float32) float64 {
return float64(mgl32.DegToRad(angle))
}
// Converts a physics angle to pixel angle
func ToPixelAngle(angle float64) float32 {
return mgl32.RadToDeg(float32(angle))
}
// The manager that handles all 2D physics
type PhysicsManager2D struct {
// The Box2D world
World box2d.B2World
// Wether the world is paused (no movement)
Paused bool
connectors []*PhysicsConnector2D
}
// Initialises the world and everything using a gravity value
func (this *PhysicsManager2D) Init(gravity mgl32.Vec2) {
this.World = box2d.MakeB2World(ToBox2DDirection(gravity))
WORLD_SIZE = gohome.Render.GetNativeResolution()
this.Paused = false
gohome.ErrorMgr.Log("Physics", "Box2D", "Initialized!")
}
// Gets called every frame and updates the physics
func (this *PhysicsManager2D) Update(delta_time float32) {
if this.Paused {
return
}
this.World.Step(float64(delta_time), int(VELOCITY_ITERATIONS), int(POSITION_ITERATIONS))
if len(this.connectors) != 0 {
if runtime.GOOS != "android" {
var wg sync.WaitGroup
wg.Add(len(this.connectors))
for _, c := range this.connectors {
go func(_c *PhysicsConnector2D) {
_c.Update()
wg.Done()
}(c)
}
wg.Wait()
} else {
for _, c := range this.connectors {
c.Update()
}
}
}
}
// Creates a box that can move around
func (this *PhysicsManager2D) CreateDynamicBox(pos mgl32.Vec2, size mgl32.Vec2) *box2d.B2Body {
bodyDef := box2d.MakeB2BodyDef()
bodyDef.Type = box2d.B2BodyType.B2_dynamicBody
bodyDef.Position = ToBox2DCoordinates(pos)
shape := box2d.MakeB2PolygonShape()
shape.SetAsBox(ScalarToBox2D(size[0])/2.0, ScalarToBox2D(size[1])/2.0)
body := this.World.CreateBody(&bodyDef)
body.CreateFixture(&shape, 1.0)
return body
}
// Creates a box that sticks to a position
func (this *PhysicsManager2D) CreateStaticBox(pos mgl32.Vec2, size mgl32.Vec2) *box2d.B2Body {
bodyDef := box2d.MakeB2BodyDef()
bodyDef.Type = box2d.B2BodyType.B2_staticBody
bodyDef.Position = ToBox2DCoordinates(pos)
fdef := box2d.MakeB2FixtureDef()
fdef.Density = 1.0
fdef.Friction = 2.0
fdef.Restitution = 0.0
shape := box2d.MakeB2PolygonShape()
shape.SetAsBox(ScalarToBox2D(size[0])/2.0, ScalarToBox2D(size[1])/2.0)
fdef.Shape = &shape
body := this.World.CreateBody(&bodyDef)
body.CreateFixtureFromDef(&fdef)
return body
}
// Creates a circle that can move around
func (this *PhysicsManager2D) CreateDynamicCircle(pos mgl32.Vec2, radius float32) *box2d.B2Body {
bodyDef := box2d.MakeB2BodyDef()
bodyDef.Type = box2d.B2BodyType.B2_dynamicBody
bodyDef.Position = ToBox2DCoordinates(pos)
shape := box2d.MakeB2CircleShape()
shape.SetRadius(ScalarToBox2D(radius))
body := this.World.CreateBody(&bodyDef)
body.CreateFixture(&shape, 1.0)
return body
}
// Creates a circle that sticks to a position
func (this *PhysicsManager2D) CreateStaticCircle(pos mgl32.Vec2, radius float32) *box2d.B2Body {
bodyDef := box2d.MakeB2BodyDef()
bodyDef.Type = box2d.B2BodyType.B2_staticBody
bodyDef.Position = ToBox2DCoordinates(pos)
fdef := box2d.MakeB2FixtureDef()
fdef.Density = 1.0
fdef.Friction = 2.0
fdef.Restitution = 0.0
shape := box2d.MakeB2CircleShape()
shape.SetRadius(ScalarToBox2D(radius))
fdef.Shape = &shape
body := this.World.CreateBody(&bodyDef)
body.CreateFixtureFromDef(&fdef)
return body
}
// Returns a new debug drawer
func (this *PhysicsManager2D) GetDebugDraw() PhysicsDebugDraw2D {
return PhysicsDebugDraw2D{
mgr: this,
DrawBodies: true,
DrawJoints: true,
DrawAABBs: false,
Visible: true,
}
}
// Converts the objects of a tmx map layer to static collision objects
func (this *PhysicsManager2D) LayerToCollision(tiledmap *gohome.TiledMap, layerName string) (bodies []*box2d.B2Body) {
layers := tiledmap.Layers
for i := 0; i < len(layers); i++ {
l := layers[i]
if !strings.Contains(l.Name, layerName) {
continue
}
objs := l.Objects
if len(objs) == 0 {
continue
}
var lx, ly float64 = 0.0, 0.0
if l.OffsetX != nil {
lx = *l.OffsetX
}
if l.OffsetY != nil {
ly = *l.OffsetY
}
for j := 0; j < len(objs); j++ {
o := objs[j]
if o.Ellipse != nil {
if o.Width != nil && o.Height != nil {
bodies = append(bodies, this.CreateEllipse(lx+o.X, ly+o.Y, *o.Width, *o.Height))
}
} else if o.Polygon != nil {
bodies = append(bodies, this.CreatePolygon(lx+o.X, ly+o.Y, o.Polygon))
} else if o.Polyline != nil {
bodies = append(bodies, this.CreatePolyline(lx+o.X, ly+o.Y, o.Polyline))
} else if o.Point == nil && o.Text == nil && o.GID == nil {
if o.Width != nil && o.Height != nil {
bodies = append(bodies, this.CreateRectangle(lx+o.X, ly+o.Y, *o.Width, *o.Height))
}
}
}
}
return
}
// Creates an ellipse
func (this *PhysicsManager2D) CreateEllipse(X, Y, Width, Height float64) *box2d.B2Body {
radius := float32((Width + Height) / 2.0 / 2.0)
pos := [2]float32{float32(X) + radius, float32(Y) + radius}
return this.CreateStaticCircle(pos, radius)
}
// Creates a rectangle
func (this *PhysicsManager2D) CreateRectangle(X, Y, Width, Height float64) *box2d.B2Body {
size := mgl32.Vec2{float32(Width), float32(Height)}
pos := mgl32.Vec2{float32(X), float32(Y)}.Add(size.Mul(0.5))
return this.CreateStaticBox(pos, size)
}
// Creates a polygon from a tmx polygon
func (this *PhysicsManager2D) CreatePolygon(X, Y float64, poly *tmx.Polygon) *box2d.B2Body {
points := strings.Split(poly.Points, " ")
if len(points) > 8 {
gohome.ErrorMgr.Error("Physics", "Box2D", "Couldn't create collision polygon: It has more than 8 vertices")
return nil
}
vertices := make([]mgl32.Vec2, len(points))
b2vertices := make([]box2d.B2Vec2, len(points))
for i := 0; i < len(points); i++ {
point := strings.Split(points[i], ",")
x, _ := strconv.ParseFloat(point[0], 32)
y, _ := strconv.ParseFloat(point[1], 32)
vertices[i][0] = float32(x)
vertices[i][1] = float32(y)
}
for i := 0; i < len(vertices); i++ {
b2vertices[i] = ToBox2DDirection(vertices[i])
}
bodyDef := box2d.MakeB2BodyDef()
bodyDef.Type = box2d.B2BodyType.B2_staticBody
bodyDef.Position = ToBox2DCoordinates([2]float32{float32(X), float32(Y)})
fdef := box2d.MakeB2FixtureDef()
fdef.Density = 1.0
fdef.Friction = 2.0
fdef.Restitution = 0.0
shape := box2d.MakeB2PolygonShape()
shape.Set(b2vertices, len(b2vertices))
fdef.Shape = &shape
body := this.World.CreateBody(&bodyDef)
body.CreateFixtureFromDef(&fdef)
return body
}
// Creates a polyline from a tmx polyline
func (this *PhysicsManager2D) CreatePolyline(X, Y float64, line *tmx.Polyline) *box2d.B2Body {
bodyDef := box2d.MakeB2BodyDef()
bodyDef.Type = box2d.B2BodyType.B2_staticBody
bodyDef.Position = ToBox2DCoordinates([2]float32{float32(X), float32(Y)})
shape := box2d.MakeB2ChainShape()
points := strings.Split(line.Points, " ")
vertices := make([]mgl32.Vec2, len(points))
b2vertices := make([]box2d.B2Vec2, len(points))
for i := 0; i < len(points); i++ {
point := strings.Split(points[i], ",")
x, _ := strconv.ParseFloat(point[0], 32)
y, _ := strconv.ParseFloat(point[1], 32)
vertices[i][0] = float32(x)
vertices[i][1] = float32(y)
}
for i := 0; i < len(vertices); i++ {
b2vertices[i] = ToBox2DDirection(vertices[i])
}
fdef := box2d.MakeB2FixtureDef()
fdef.Friction = 2.0
fdef.Density = 1.0
fdef.Restitution = 0.0
shape.CreateChain(b2vertices, len(b2vertices))
fdef.Shape = &shape
body := this.World.CreateBody(&bodyDef)
body.CreateFixtureFromDef(&fdef)
return body
}
// Destroys all bodies and the world
func (this *PhysicsManager2D) Terminate() {
for b := this.World.GetBodyList(); b != nil; b = b.GetNext() {
this.World.DestroyBody(b)
}
this.World.Destroy()
this.connectors = this.connectors[:0]
} | src/physics2d/physicsmanager2d.go | 0.721939 | 0.701731 | physicsmanager2d.go | starcoder |
package a
import (
"fmt"
"strings"
)
// Color represents a color in RGBA format.
type Color struct {
R, G, B, A byte
}
// Creates new Color struct using the specified arguments.
// This function can accept the following arguments:
// - a color hex string,
// - 1 byte value for grayscale color,
// - 3 byte values for RGB color,
// - 4 byte values for RGBA color.
// If the given arguments cannot be interpreted as a color returns black color.
func NewColor(params ...interface{}) Color {
switch len(params) {
case 1:
if hex, ok := params[0].(string); ok {
return ParseHexColor(hex)
} else if b, ok := params[0].(byte); ok {
return Color{
R: b,
G: b,
B: b,
A: 255,
}
} else {
return BlackColor()
}
case 3:
var r, g, b, a byte = requireByte(params[0]), requireByte(params[1]), requireByte(params[2]), 255
return Color{
R: r,
G: g,
B: b,
A: a,
}
case 4:
var r, g, b, a = requireByte(params[0]), requireByte(params[1]), requireByte(params[2]), requireByte(params[3])
return Color{
R: r,
G: g,
B: b,
A: a,
}
default:
return BlackColor()
}
}
// Parses color hex string in format #rrggbbaa, #rrggbb, #rgba or #rgb to a Color struct.
func ParseHexColor(hex string) Color {
c := BlackColor()
if !strings.HasPrefix(hex, "#") {
return c
}
switch len(hex) {
case 9:
_, _ = fmt.Sscanf(hex, "#%02x%02x%02x%02x", &c.R, &c.G, &c.B, &c.A)
case 7:
_, _ = fmt.Sscanf(hex, "#%02x%02x%02x", &c.R, &c.G, &c.B)
case 5:
_, _ = fmt.Sscanf(hex, "#%1x%1x%1x%1x", &c.R, &c.G, &c.B, &c.A)
case 4:
_, _ = fmt.Sscanf(hex, "#%1x%1x%1x", &c.R, &c.G, &c.B)
c.R *= 17
c.G *= 17
c.B *= 17
}
return c
}
// Returns a color hex string in format #rrggbbaa or #rrggbb.
func (c *Color) GetHex() string {
if c.A == 255 {
return fmt.Sprintf("#%02x%02x%02x", c.R, c.G, c.B)
} else {
return fmt.Sprintf("#%02x%02x%02x%02x", c.R, c.G, c.B, c.A)
}
}
func (c *Color) Equals(other interface{}) bool {
if otherColor, ok := other.(Color); ok {
return c.R == otherColor.R && c.G == otherColor.G && c.B == otherColor.B && c.A == otherColor.A
} else if otherColorPtr, ok := other.(*Color); ok {
return c.R == otherColorPtr.R && c.G == otherColorPtr.G && c.B == otherColorPtr.B && c.A == otherColorPtr.A
} else {
return false
}
}
//region Stringable implementation
func (c *Color) ToString() string {
return c.GetHex()
}
func (c *Color) FromString(src string) {
c1 := ParseHexColor(src)
c.R = c1.R
c.G = c1.G
c.B = c1.B
c.A = c1.A
}
//endregion
//region Mappable implementation
func (c *Color) ToMap() SiMap {
return map[string]interface{}{
"r": c.R,
"g": c.G,
"b": c.B,
"a": c.A,
}
}
func (c *Color) FromMap(siMap SiMap) {
c.R = requireByte(siMap["r"])
c.G = requireByte(siMap["g"])
c.B = requireByte(siMap["b"])
c.A = requireByte(siMap["a"])
}
//endregion
func (c *Color) EncodeToByteArray() []byte {
arr := make([]byte, 4)
arr[0] = c.R
arr[1] = c.G
arr[2] = c.B
arr[3] = c.A
return arr
}
// Returns Vector4 with normalized color values.
func (c *Color) Normalize() Vector4 {
x := float32(c.R) / 255
y := float32(c.G) / 255
z := float32(c.B) / 255
w := float32(c.A) / 255
return NewVector4(x, y, z, w)
}
//region color presets
func BlackColor() Color {
return NewColor(0, 0, 0, 255)
}
func WhiteColor() Color {
return NewColor(255, 255, 255, 255)
}
func RedColor() Color {
return NewColor(255, 0, 0, 255)
}
func GreenColor() Color {
return NewColor(0, 255, 0, 255)
}
func BlueColor() Color {
return NewColor(0, 0, 255, 255)
}
func TransparentColor() Color {
return NewColor(0, 0, 0, 0)
}
func PinkColor() Color {
return NewColor(255,192,203, 255)
}
//endregion
func requireByte(num interface{}) byte {
switch num.(type) {
case byte:
return num.(byte)
case int:
return byte(num.(int))
case int32:
return byte(num.(int32))
case int64:
return byte(num.(int64))
case float32:
return byte(num.(float32))
case float64:
return byte(num.(float64))
default:
return 0
}
} | common/a/color.go | 0.823257 | 0.486758 | color.go | starcoder |
package problem
import (
"github.com/water-vapor/euclidea-solver/pkg/geom"
"math"
)
// Problem 1: Angel Of 60 Degree
func angelOf60Degree() *Statement {
problem := geom.NewBoard()
pt1 := geom.NewPoint(0, 0)
pt2 := geom.NewPoint(1, math.Sqrt(3))
problem.AddPoint(pt1)
problem.HalfLines.Add(geom.NewHalfLineFromDirection(pt1, geom.NewVector2D(1, 0)))
target := NewTarget()
target.Lines.Add(geom.NewLineFromTwoPoints(pt1, pt2))
sequences := map[string]string{"E": "OOI"}
return NewStatement(problem, target, sequences, "1.1 Angel Of 60 Degree")
}
//Problem 2: Perpendicular Bisector
func perpendicularBisector() *Statement {
problem := geom.NewBoard()
pt1 := geom.NewPoint(-1, 0)
pt2 := geom.NewPoint(1, 0)
s := geom.NewSegment(pt1, pt2)
l := s.Bisector()
problem.AddSegment(s)
target := NewTarget()
target.Lines.Add(l)
sequences := map[string]string{"E": "OOI"}
return NewStatement(problem, target, sequences, "1.2 Perpendicular Bisector")
}
//Problem 3: Midpoint
func midpoint() *Statement {
problem := geom.NewBoard()
pt1 := geom.NewPoint(-1, 0)
pt2 := geom.NewPoint(1, 0)
problem.AddPoint(pt1)
problem.AddPoint(pt2)
pt3 := geom.NewPoint(0, 0)
target := NewTarget()
target.Points.Add(pt3)
sequences := map[string]string{"E": "I+"}
return NewStatement(problem, target, sequences, "1.3 Midpoint")
}
//Problem 4: Circle in Square
func circleInSquare() *Statement {
problem := geom.NewBoard()
pt1 := geom.NewPoint(-1, -1)
pt2 := geom.NewPoint(-1, 1)
pt3 := geom.NewPoint(1, 1)
pt4 := geom.NewPoint(1, -1)
s1 := geom.NewSegment(pt1, pt2)
s2 := geom.NewSegment(pt2, pt3)
s3 := geom.NewSegment(pt3, pt4)
s4 := geom.NewSegment(pt4, pt1)
problem.AddPoint(pt1)
problem.AddPoint(pt2)
problem.AddPoint(pt3)
problem.AddPoint(pt4)
problem.AddSegment(s1)
problem.AddSegment(s2)
problem.AddSegment(s3)
problem.AddSegment(s4)
target := NewTarget()
target.Circles.Add(geom.NewCircleByRadius(geom.NewPoint(0, 0), 1))
sequences := map[string]string{"E": "I+O"}
return NewStatement(problem, target, sequences, "1.4 Circle in Square")
}
//Problem 5: Rhombus in Rectangle
func rhombusInRectangle() *Statement {
problem := geom.NewBoard()
pt1 := geom.NewPoint(0, 0)
pt2 := geom.NewPoint(3, 0)
pt3 := geom.NewPoint(3, math.Sqrt(3))
pt4 := geom.NewPoint(0, math.Sqrt(3))
s1 := geom.NewSegment(pt1, pt2)
s2 := geom.NewSegment(pt2, pt3)
s3 := geom.NewSegment(pt3, pt4)
s4 := geom.NewSegment(pt4, pt1)
problem.AddPoint(pt1)
problem.AddPoint(pt2)
problem.AddPoint(pt3)
problem.AddPoint(pt4)
problem.AddSegment(s1)
problem.AddSegment(s2)
problem.AddSegment(s3)
problem.AddSegment(s4)
target := NewTarget()
pt5 := geom.NewPoint(2, 0)
pt6 := geom.NewPoint(1, math.Sqrt(3))
l1 := geom.NewLineFromTwoPoints(pt1, pt6)
l2 := geom.NewLineFromTwoPoints(pt5, pt3)
target.Lines.Add(l1)
target.Lines.Add(l2)
sequences := map[string]string{"E": "+II"}
return NewStatement(problem, target, sequences, "1.5 Rhombus in Rectangle")
}
//Problem 6: Circle Center
func circleCenter() *Statement {
problem := geom.NewBoard()
pt1 := geom.NewPoint(0, 0)
c := geom.NewCircleByRadius(pt1, 2)
problem.AddCircle(c)
target := NewTarget()
target.Points.Add(pt1)
sequences := map[string]string{"E": "OOOII", "L": "++"}
return NewStatement(problem, target, sequences, "1.6 Circle Center")
}
//Problem 7: Inscribed Square
func inscribedSquare() *Statement {
// the last two unnecessary lines are removed
problem := geom.NewBoard()
pt1 := geom.NewPoint(0, 0)
c := geom.NewCircleByRadius(pt1, 2)
problem.AddCircle(c)
problem.AddPoint(pt1)
problem.AddPoint(geom.NewPoint(0, 2))
target := NewTarget()
pt2 := geom.NewPoint(2, 0)
pt3 := geom.NewPoint(-2, 0)
pt4 := geom.NewPoint(0, -2)
target.Points.Add(pt2)
target.Points.Add(pt3)
target.Points.Add(pt4)
target.Lines.Add(geom.NewLineFromTwoPoints(pt2, pt4))
target.Lines.Add(geom.NewLineFromTwoPoints(pt3, pt4))
sequences := map[string]string{"E": "OOIII", "L": "I+IIII"}
return NewStatement(problem, target, sequences, "1.7 Inscribed Square")
} | problem/alpha.go | 0.779196 | 0.40031 | alpha.go | starcoder |
package decoder
import (
"fmt"
"math"
"github.com/rqme/neat"
)
// Special case: 1 layer of nodes in this case, examine nodes to separate out "vitural layers" by neuron type
// othewise connect every neuron in one layer to the subsequent layer
type HyperNEATSettings interface {
SubstrateLayers() []SubstrateNodes // Substrate definitions
WeightRange() float64 // Weight range for new connections
}
type HyperNEAT struct {
HyperNEATSettings
CppnDecoder neat.Decoder
}
// Outputs 0..len(substrate layers) = weights. if len(outputs) = 2* that number, second set is activation function, 3rd is bias connection? Need flags for these
// n = number of layers - 1
// first n = weights
// flags for bias oututs = 1 or 2 meaning use outputs starting at 1*n or 2*n
// activation, too.
func (d *HyperNEAT) Decode(g neat.Genome) (p neat.Phenome, err error) {
// Validate the number of inputs and outputs
if err = d.validate(g); err != nil {
return
}
// Decode the CPPN
var cppn neat.Phenome
cppn, err = d.CppnDecoder.Decode(g)
if err != nil {
return nil, err
}
// Create a new Substrate
layers := d.SubstrateLayers()
ncnt := len(layers[0])
ccnt := 0
for i := 1; i < len(layers); i++ {
ncnt += len(layers[i])
ccnt += len(layers[i]) * len(layers[i-1])
}
s := &Substrate{
Nodes: make([]SubstrateNode, 0, ncnt),
Conns: make([]SubstrateConn, 0, ccnt),
}
// Add the nodes to the substrate
i := 0
for _, l := range layers {
// TODO: Should I sort the nodes by position in the network?
for j, n := range l {
l[j].id = i
s.Nodes = append(s.Nodes, n)
i += 1
}
}
// Create connections
var outputs []float64 // output from the Cppn
wr := d.WeightRange()
for l := 1; l < len(layers); l++ {
for _, src := range layers[l-1] {
for _, tgt := range layers[l] {
outputs, err = cppn.Activate(append(src.Position, tgt.Position...))
if err != nil {
return nil, err
}
w := math.Abs(outputs[l-1])
if w > 0.2 {
s.Conns = append(s.Conns, SubstrateConn{
Source: src.id,
Target: tgt.id,
Weight: math.Copysign((w-0.2)*wr/0.8, outputs[l-1]),
})
}
}
}
}
// Return the new network
var net neat.Network
net, err = s.Decode()
if err != nil {
return nil, err
}
p = Phenome{g, net}
return
}
func (d *HyperNEAT) validate(g neat.Genome) error {
var icnt, ocnt int
for _, n := range g.Nodes {
if n.NeuronType == neat.Input {
icnt += 1
} else if n.NeuronType == neat.Output {
min, max := n.ActivationType.Range()
found := false
switch {
case math.IsNaN(min), math.IsNaN(max):
found = true
case min >= 0:
found = true
}
if found {
return fmt.Errorf("Invalid activation type for output: %s [%f, %f]", n.ActivationType, min, max)
}
ocnt += 1
}
}
layers := d.SubstrateLayers()
cnt := len(layers[0][0].Position)
for i, l := range layers {
for j, n := range l {
if len(n.Position) != cnt {
return fmt.Errorf("Inconsistent position length in substrate layer %d node %d. Expected %d but found %d.", i, j, cnt, len(n.Position))
}
}
}
if cnt*2 < icnt {
return fmt.Errorf("Insufficient number of inputs to decode substrate. Need %d but have %d", cnt*2, icnt)
}
if ocnt < len(layers)-1 {
return fmt.Errorf("Insufficient number of outputs to decode substrate. Need %d but have %d", len(layers)-1, ocnt)
}
return nil
} | decoder/hyperneat.go | 0.594434 | 0.415195 | hyperneat.go | starcoder |
package mm
import (
"fmt"
"math"
)
type Box3 struct {
Min *Vector3
Max *Vector3
}
func NewBox3() *Box3 {
return &Box3{
Vector3_Max(),
Vector3_Min(),
}
}
func (b *Box3) String() string {
return fmt.Sprintf("&Box3{Min: %s, Max: %s}", b.Min, b.Max)
}
func (b *Box3) Set(min, max *Vector3) *Box3 {
b.Min.Copy(min)
b.Max.Copy(max)
return b
}
func (b *Box3) SetFromArray(array []float64) *Box3 {
minX := math.MaxFloat64
minY := math.MaxFloat64
minZ := math.MaxFloat64
maxX := math.SmallestNonzeroFloat64
maxY := math.SmallestNonzeroFloat64
maxZ := math.SmallestNonzeroFloat64
for i, l := 0, len(array); i < l; i += 3 {
x := array[i]
y := array[i+1]
z := array[i+2]
if x < minX {
minX = x
}
if y < minY {
minY = y
}
if z < minZ {
minZ = z
}
if x > maxX {
maxX = x
}
if y > maxY {
maxY = y
}
if z > maxZ {
maxZ = z
}
}
b.Min.Set(minX, minY, minZ)
b.Max.Set(maxX, maxY, maxZ)
return b
}
func (b *Box3) SetFromPoints(points []*Vector3) *Box3 {
b.MakeEmpty()
for _, point := range points {
b.ExpandByPoint(point)
}
return b
}
func (b *Box3) SetFromCenterAndSize(center, size *Vector3) *Box3 {
v1 := NewVector3()
halfSize := v1.Copy(size).MultiplyScalar(0.5)
b.Min.Copy(center).Sub(halfSize)
b.Max.Copy(center).Add(halfSize)
return b
}
//TODO
/*func (b *Box3) SetFromObject(object Object) *Box3 {
// Computes the world-axis-aligned bounding box of an object (including its children),
// accounting for both the object's, and children's, world transforms
v1 := NewVector3()
object.UpdateMatrixWorld(true)
b.MakeEmpty()
object.Traverse(func(node Object) {
var geometry *Geometry
switch n := node.(type) {
case *Mesh:
geometry = n.Geometry
}
if geometry == nil {
return
}
switch geo := ToGeneric(geometry).(type) {
case BufferGeometry:
attribute := geo.Attributes.Position
if attribute == nil {
return
}
var array []float64
var offset, stride uint32
switch attribute.(type) {
case InterleavedBufferAttribute:
iba := attribute.(InterleavedBufferAttribute)
array = iba.data.array
offset = iba.offset
stride = iba.data.stride
default:
array = attribute.(Attribute).Array
offset = 0
stride = 3
}
for i := offset; i < len(array); i += stride {
v1.FromArray(array, i)
v1.ApplyMatrix4(node.MatrixWorld)
b.expandByPoint(v1)
}
case Geometry:
vertices := geo.Vertices
for _, vertex := range vertices {
v1.Copy(vertex)
v1.ApplyMatrix4(node.(*Object3D).MatrixWorld)
b.ExpandByPoint(v1)
}
}
})
return b
}*/
func (b *Box3) Clone() *Box3 {
return NewBox3().Copy(b)
}
func (b *Box3) Copy(src *Box3) *Box3 {
b.Min.Copy(src.Min)
b.Max.Copy(src.Max)
return b
}
func (b *Box3) MakeEmpty() *Box3 {
b.Min = Vector3_Max()
b.Max = Vector3_Min()
return b
}
func (b *Box3) IsEmpty() bool {
// b is a more robust check for empty than ( volume <= 0 ) because volume can get positive with two negative axes
return (b.Max.X < b.Min.X) || (b.Max.Y < b.Min.Y) || (b.Max.Z < b.Min.Z)
}
func (b *Box3) Center(target *Vector3) *Vector3 {
if nil == target {
target = NewVector3()
}
return target.AddVectors(b.Min, b.Max).MultiplyScalar(0.5)
}
func (b *Box3) Size(target *Vector3) *Vector3 {
if nil == target {
target = NewVector3()
}
return target.SubVectors(b.Max, b.Min)
}
func (b *Box3) ExpandByPoint(point *Vector3) *Box3 {
b.Min.Min(point)
b.Max.Max(point)
return b
}
func (b *Box3) ExpandByVector(vector *Vector3) *Box3 {
b.Min.Sub(vector)
b.Max.Add(vector)
return b
}
func (b *Box3) ExpandByScalar(scalar float64) *Box3 {
b.Min.AddScalar(-scalar)
b.Max.AddScalar(scalar)
return b
}
func (b *Box3) ContainsPoint(point *Vector3) bool {
if point.X < b.Min.X || point.X > b.Max.X ||
point.Y < b.Min.Y || point.Y > b.Max.Y ||
point.Z < b.Min.Z || point.Z > b.Max.Z {
return false
}
return true
}
func (b *Box3) ContainsBox(box *Box3) bool {
if (b.Min.X <= box.Min.X) && (box.Max.X <= b.Max.X) &&
(b.Min.Y <= box.Min.Y) && (box.Max.Y <= b.Max.Y) &&
(b.Min.Z <= box.Min.Z) && (box.Max.Z <= b.Max.Z) {
return true
}
return false
}
func (b *Box3) GetParameter(point, target *Vector3) *Vector3 {
// This can potentially have a divide by zero if the box
// has a size dimension of 0.
if nil == target {
target = NewVector3()
}
return target.Set(
(point.X-b.Min.X)/(b.Max.X-b.Min.X),
(point.Y-b.Min.Y)/(b.Max.Y-b.Min.Y),
(point.Z-b.Min.Z)/(b.Max.Z-b.Min.Z),
)
}
func (b *Box3) IntersectsBox(other *Box3) bool {
// using 6 splitting planes to rule out intersections.
if other.Max.X < b.Min.X || other.Min.X > b.Max.X ||
other.Max.Y < b.Min.Y || other.Min.Y > b.Max.Y ||
other.Max.Z < b.Min.Z || other.Min.Z > b.Max.Z {
return false
}
return true
}
//TODO
/*func (b *Box3) IntersectsSphere(sphere *Sphere) bool {
closestPoint := NewVector3()
// Find the point on the AABB closest to the sphere center.
b.ClampPoint(sphere.Center, closestPoint)
// If that point is inside the sphere, the AABB and sphere intersect.
return closestPoint.DistanceToSquared(sphere.Center) <= (sphere.Radius * sphere.Radius)
}*/
//TODO
/*func (b *Box3) IntersectsPlane(plane *Plane) bool {
// We compute the minimum and maximum dot product values. If those values
// are on the same side (back or front) of the plane, then there is no intersection.
var min, max float64
if plane.Normal.X > 0 {
min = plane.Normal.X * b.Min.X
max = plane.Normal.X * b.Max.X
} else {
min = plane.Normal.X * b.Max.X
max = plane.Normal.X * b.Min.X
}
if plane.Normal.Y > 0 {
min += plane.Normal.Y * b.Min.Y
max += plane.Normal.Y * b.Max.Y
} else {
min += plane.Normal.Y * b.Max.Y
max += plane.Normal.Y * b.Min.Y
}
if plane.Normal.Z > 0 {
min += plane.Normal.Z * b.Min.Z
max += plane.Normal.Z * b.Max.Z
} else {
min += plane.Normal.Z * b.Max.Z
max += plane.Normal.Z * b.Min.Z
}
return (min <= plane.Constant && max >= plane.Constant)
}*/
func (b *Box3) ClampPoint(point, target *Vector3) *Vector3 {
if nil == target {
target = NewVector3()
}
return target.Copy(point).Clamp(b.Min, b.Max)
}
func (b *Box3) DistanceToPoint(point *Vector3) float64 {
v1 := NewVector3()
clampedPoint := v1.Copy(point).Clamp(b.Min, b.Max)
return clampedPoint.Sub(point).Length()
}
//TODO
/*func (b *Box3) GetBoundingSphere(target *Sphere) *Sphere {
v1 := NewVector3()
if nil == target {
target = NewSphere()
}
target.Center = b.Center(nil)
target.Radius = b.Size(v1).Length() * 0.5
return target
}*/
func (b *Box3) Intersect(other *Box3) *Box3 {
b.Min.Max(other.Min)
b.Max.Min(other.Max)
// ensure that if there is no overlap, the result is fully empty, not slightly empty with non-inf/+inf values that will cause subsequence intersects to erroneously return valid values.
if b.IsEmpty() {
b.MakeEmpty()
}
return b
}
func (b *Box3) Union(other *Box3) *Box3 {
b.Min.Min(other.Min)
b.Max.Max(other.Max)
return b
}
func (b *Box3) ApplyMatrix4(matrix *Matrix4) *Box3 {
points := []*Vector3{
NewVector3(),
NewVector3(),
NewVector3(),
NewVector3(),
NewVector3(),
NewVector3(),
NewVector3(),
NewVector3(),
}
// transform of empty box is an empty box.
if b.IsEmpty() {
return b
}
// NOTE: I am using a binary pattern to specify all 2^3 combinations below
points[0].Set(b.Min.X, b.Min.Y, b.Min.Z).ApplyMatrix4(matrix) // 000
points[1].Set(b.Min.X, b.Min.Y, b.Max.Z).ApplyMatrix4(matrix) // 001
points[2].Set(b.Min.X, b.Max.Y, b.Min.Z).ApplyMatrix4(matrix) // 010
points[3].Set(b.Min.X, b.Max.Y, b.Max.Z).ApplyMatrix4(matrix) // 011
points[4].Set(b.Max.X, b.Min.Y, b.Min.Z).ApplyMatrix4(matrix) // 100
points[5].Set(b.Max.X, b.Min.Y, b.Max.Z).ApplyMatrix4(matrix) // 101
points[6].Set(b.Max.X, b.Max.Y, b.Min.Z).ApplyMatrix4(matrix) // 110
points[7].Set(b.Max.X, b.Max.Y, b.Max.Z).ApplyMatrix4(matrix) // 111
return b.SetFromPoints(points)
}
func (b *Box3) Translate(offset *Vector3) *Box3 {
b.Min.Add(offset)
b.Max.Add(offset)
return b
}
func (b *Box3) Equals(box *Box3) bool {
return b.Min.Equals(box.Min) && b.Max.Equals(box.Max)
} | box3.go | 0.710025 | 0.605595 | box3.go | starcoder |
package main
/*
Write a program to sort an array of integers. The program should partition the array into 4 parts,
each of which is sorted by a different goroutine. Each partition should be of approximately equal size.
Then the main goroutine should merge the 4 sorted subarrays into one large sorted array.
The program should prompt the user to input a series of integers.
Each goroutine which sorts ¼ of the array should print the subarray that it will sort.
When sorting is complete, the main goroutine should print the entire sorted list.
*/
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
"strings"
)
func main() {
const partitionSize = 4
chl := make(chan []int, partitionSize)
var results []int
groups := readInputIntoGroups(partitionSize)
for i := 0; i < len(groups); i++ {
go sortSlice(groups[i], chl)
sortedSlice := <-chl
results = mergeResult(results, sortedSlice)
}
fmt.Println("Final sorted:", results)
}
func readInputIntoGroups(groups int) [][]int {
ints := make([][]int, groups, groups)
scanner := bufio.NewScanner(os.Stdin)
fmt.Println("Please enter a series of integers> ")
scanner.Scan()
input := strings.TrimSpace(scanner.Text())
inputs := strings.Split(input, " ")
counter := 0
for _, value := range inputs {
parsed, err := strconv.Atoi(value)
if err != nil {
fmt.Println("Wrong input: ", value)
os.Exit(2)
}
currentGroup := counter % groups
ints[currentGroup] = append(ints[currentGroup], parsed)
counter++
}
return ints
}
func sortSlice(ints []int, c chan []int) {
fmt.Println("Sorting", ints)
sort.Ints(ints)
c <- ints
}
func mergeResult(results []int, sortedSlice []int) []int {
var temporary []int
indx1, indx2 := 0, 0
for {
if len(sortedSlice) == indx1 {
temporary = copySlice(indx2, results, temporary)
break
}
if len(results) == indx2 {
temporary = copySlice(indx1, sortedSlice, temporary)
break
}
if sortedSlice[indx1] < results[indx2] {
temporary = append(temporary, sortedSlice[indx1])
indx1++
} else {
temporary = append(temporary, results[indx2])
indx2++
}
}
return temporary
}
func copySlice(indx int, results []int, temporary []int) []int {
for ; indx < len(results); indx++ {
temporary = append(temporary, results[indx])
}
return temporary
} | src/course3/module3/gosort.go | 0.568895 | 0.467149 | gosort.go | starcoder |
package api
import (
"encoding/binary"
"log"
)
// Copy returns an ImageData with a separate Data slice copied from the original.
func (img ImageData) Copy() ImageData {
data := make([]byte, len(img.Data))
copy(data, img.Data)
return ImageData{
BitsPerPixel: img.BitsPerPixel,
Size_: &Size2DI{X: img.Size_.X, Y: img.Size_.Y},
Data: data,
}
}
// assertBPP checks for the expected pixel size and panics if it doesn't match.
func (img ImageData) assertBPP(count int32) {
if img.BitsPerPixel != count {
log.Panicf("bad BitsPerPixel, expected %v got %v", count, img.BitsPerPixel)
}
}
// Bits returns a bit-indexed version of the ImageData.
// It panics if ImageData.BitsPerPixel != 1.
func (img ImageData) Bits() ImageDataBits {
img.assertBPP(1)
return ImageDataBits{imageData{*img.Size_, img.Data}}
}
// ImageDataBits is a bit-indexed version of ImageData.
type ImageDataBits struct {
imageData
}
// NewImageDataBits returns an empty bit-indexed ImageData of the given size.
func NewImageDataBits(w, h int32) ImageDataBits {
size := Size2DI{int32(w), int32(h)}
data := make([]byte, (w*h+7)/8)
return ImageDataBits{imageData{size, data}}
}
// Get returns the bit value at (x, y).
// True if the bit is set, false if not or if (x, y) is out of bounds.
func (img ImageDataBits) Get(x, y int32) bool {
if img.InBounds(x, y) {
i := img.offset(x, y)
i, bit := i/8, byte(1<<(7-(uint(i)%8)))
return img.data[i]&bit != 0
}
return false
}
// Copy returns an ImageDataBits with a separate data slice copied from the original.
func (img ImageDataBits) Copy() ImageDataBits {
data := make([]byte, len(img.data))
copy(data, img.data)
return ImageDataBits{imageData{img.size, data}}
}
// Set updates the bit at (x, y) to the given value.
// If (x, y) is out of bounds it does nothing.
func (img ImageDataBits) Set(x, y int32, value bool) {
if img.InBounds(x, y) {
i := img.offset(x, y)
i, bit := i/8, byte(1<<(7-(uint(i)%8)))
if value {
img.data[i] |= bit // set
} else {
img.data[i] &^= bit // clear
}
}
}
// ToBytes converts a bitmap into a bytemap with false -> 0 and true -> 255.
func (img ImageDataBits) ToBytes() ImageDataBytes {
bytes := ImageDataBytes{imageData{
img.size,
make([]byte, img.Width()*img.Height()),
}}
for y := int32(0); y < img.Height(); y++ {
for x := int32(0); x < img.Width(); x++ {
if img.Get(x, y) {
bytes.Set(x, y, 255)
}
}
}
return bytes
}
// Bytes returns a byte-indexed version of the ImageData.
// It panics if ImageData.BitsPerPixel != 8.
func (img ImageData) Bytes() ImageDataBytes {
img.assertBPP(8)
return ImageDataBytes{imageData{*img.Size_, img.Data}}
}
// ImageDataBytes is a byte-indexed version of ImageData.
type ImageDataBytes struct {
imageData
}
// NewImageDataBytes returns an empty byte-indexed ImageData of the given size.
func NewImageDataBytes(w, h int32) ImageDataBytes {
size := Size2DI{int32(w), int32(h)}
data := make([]byte, w*h)
return ImageDataBytes{imageData{size, data}}
}
// Copy returns an ImageDataBytes with a separate data slice copied from the original.
func (img ImageDataBytes) Copy() ImageDataBytes {
data := make([]byte, len(img.data))
copy(data, img.data)
return ImageDataBytes{imageData{img.size, data}}
}
// Get returns the byte value at (x, y).
// If (x, y) is out of bounds it returns 0.
func (img ImageDataBytes) Get(x, y int32) byte {
if img.InBounds(x, y) {
return img.data[img.offset(x, y)]
}
return 0
}
// Set updates the byte at (x, y) to the given value.
// If (x, y) is out of bounds it does nothing.
func (img ImageDataBytes) Set(x, y int32, value byte) {
if img.InBounds(x, y) {
img.data[img.offset(x, y)] = value
}
}
// Ints returns an int32-indexed version of the ImageData.
// It panics if ImageData.BitsPerPixel != 32.
func (img ImageData) Ints() ImageDataInt32 {
img.assertBPP(32)
return ImageDataInt32{imageData{*img.Size_, img.Data}}
}
// ImageDataInt32 is an int32-indexed version of ImageData.
// LittleEndian byte ordering is assumed.
type ImageDataInt32 struct {
imageData
}
// NewImageDataInts returns an empty int32-indexed ImageData of the given size.
func NewImageDataInts(w, h int32) ImageDataInt32 {
size := Size2DI{int32(w), int32(h)}
data := make([]byte, w*h*4)
return ImageDataInt32{imageData{size, data}}
}
// Copy returns an ImageDataInt32 with a separate data slice copied from the original.
func (img ImageDataInt32) Copy() ImageDataInt32 {
data := make([]byte, len(img.data))
copy(data, img.data)
return ImageDataInt32{imageData{img.size, data}}
}
// Get returns the int32 value at (x, y).
// If (x, y) is out of bounds it returns 0.
func (img ImageDataInt32) Get(x, y int32) int32 {
if img.InBounds(x, y) {
i := img.offset(x, y)
// Assuming this is LE byte order...
return int32(binary.LittleEndian.Uint32(img.data[4*i : 4*(i+1)]))
}
return 0
}
// Set updates the int32 at (x, y) to the given value.
// If (x, y) is out of bounds it does nothing.
func (img ImageDataInt32) Set(x, y int32, value int32) {
if img.InBounds(x, y) {
i := img.offset(x, y)
// Assuming this is LE byte order...
binary.LittleEndian.PutUint32(img.data[4*i:4*(i+1)], uint32(value))
}
}
// imageData contains common data and methods used by the typed versions above.
type imageData struct {
size Size2DI
data []byte
}
// Width is the horizontal size of the data.
func (img imageData) Width() int32 {
return img.size.X
}
// Height is the vertical size of the data.
func (img imageData) Height() int32 {
return img.size.Y
}
// InBounds checks that the coordinates fall within the valid range for the image.
func (img imageData) InBounds(x, y int32) bool {
return 0 <= x && x < img.Width() && 0 <= y && y < img.Height()
}
// offset converts XY coordinates into a linear offset into the ImageData.
func (img imageData) offset(x, y int32) int32 {
// Image data is stored with an upper left origin
return x + y*img.Width()
} | api/image.go | 0.864682 | 0.689946 | image.go | starcoder |
package vehicle
import "github.com/dpb587/go-schemaorg"
var (
// Indicates whether the vehicle has been used for special purposes, like
// commercial rental, driving school, or as a taxi. The legislation in many
// countries requires this information to be revealed when offering a car for
// sale.
VehicleSpecialUsage = schemaorg.NewProperty("vehicleSpecialUsage")
// The total number of forward gears available for the transmission system of
// the vehicle.</p>
//
// <p>Typical unit code(s): C62
NumberOfForwardGears = schemaorg.NewProperty("numberOfForwardGears")
// The total distance travelled by the particular vehicle since its initial
// production, as read from its odometer.</p>
//
// <p>Typical unit code(s): KMT for kilometers, SMI for statute miles
MileageFromOdometer = schemaorg.NewProperty("mileageFromOdometer")
// The color or color combination of the interior of the vehicle.
VehicleInteriorColor = schemaorg.NewProperty("vehicleInteriorColor")
// The position of the steering wheel or similar device (mostly for cars).
SteeringPosition = schemaorg.NewProperty("steeringPosition")
// Information about the engine or engines of the vehicle.
VehicleEngine = schemaorg.NewProperty("vehicleEngine")
// The release date of a vehicle model (often used to differentiate versions of
// the same make and model).
VehicleModelDate = schemaorg.NewProperty("vehicleModelDate")
// The number of doors.</p>
//
// <p>Typical unit code(s): C62
NumberOfDoors = schemaorg.NewProperty("numberOfDoors")
// A short text indicating the configuration of the vehicle, e.g. '5dr hatchback
// ST 2.5 MT 225 hp' or 'limited edition'.
VehicleConfiguration = schemaorg.NewProperty("vehicleConfiguration")
// The type of fuel suitable for the engine or engines of the vehicle. If the
// vehicle has only one engine, this property can be attached directly to the
// vehicle.
FuelType = schemaorg.NewProperty("fuelType")
// The Vehicle Identification Number (VIN) is a unique serial number used by the
// automotive industry to identify individual motor vehicles.
VehicleIdentificationNumber = schemaorg.NewProperty("vehicleIdentificationNumber")
// <p>The amount of fuel consumed for traveling a particular distance or
// temporal duration with the given vehicle (e.g. liters per 100 km).</p>
//
// <ul>
// <li>Note 1: There are unfortunately no standard unit codes for liters per 100
// km. Use <a class="localLink" href="http://schema.org/unitText">unitText</a>
// to indicate the unit of measurement, e.g. L/100 km.</li>
// <li>Note 2: There are two ways of indicating the fuel consumption, <a
// class="localLink"
// href="http://schema.org/fuelConsumption">fuelConsumption</a> (e.g. 8 liters
// per 100 km) and <a class="localLink"
// href="http://schema.org/fuelEfficiency">fuelEfficiency</a> (e.g. 30 miles per
// gallon). They are reciprocal.</li>
// <li>Note 3: Often, the absolute value is useful only when related to driving
// speed ("at 80 km/h") or usage pattern ("city traffic"). You can use <a
// class="localLink" href="http://schema.org/valueReference">valueReference</a>
// to link the value for the fuel consumption to another value.</li>
// </ul>
//
FuelConsumption = schemaorg.NewProperty("fuelConsumption")
// The number of owners of the vehicle, including the current one.</p>
//
// <p>Typical unit code(s): C62
NumberOfPreviousOwners = schemaorg.NewProperty("numberOfPreviousOwners")
// <p>The distance traveled per unit of fuel used; most commonly miles per
// gallon (mpg) or kilometers per liter (km/L).</p>
//
// <ul>
// <li>Note 1: There are unfortunately no standard unit codes for miles per
// gallon or kilometers per liter. Use <a class="localLink"
// href="http://schema.org/unitText">unitText</a> to indicate the unit of
// measurement, e.g. mpg or km/L.</li>
// <li>Note 2: There are two ways of indicating the fuel consumption, <a
// class="localLink"
// href="http://schema.org/fuelConsumption">fuelConsumption</a> (e.g. 8 liters
// per 100 km) and <a class="localLink"
// href="http://schema.org/fuelEfficiency">fuelEfficiency</a> (e.g. 30 miles per
// gallon). They are reciprocal.</li>
// <li>Note 3: Often, the absolute value is useful only when related to driving
// speed ("at 80 km/h") or usage pattern ("city traffic"). You can use <a
// class="localLink" href="http://schema.org/valueReference">valueReference</a>
// to link the value for the fuel economy to another value.</li>
// </ul>
//
FuelEfficiency = schemaorg.NewProperty("fuelEfficiency")
// The number of axles.</p>
//
// <p>Typical unit code(s): C62
NumberOfAxles = schemaorg.NewProperty("numberOfAxles")
// The type or material of the interior of the vehicle (e.g. synthetic fabric,
// leather, wood, etc.). While most interior types are characterized by the
// material used, an interior type can also be based on vehicle usage or target
// audience.
VehicleInteriorType = schemaorg.NewProperty("vehicleInteriorType")
// A textual description of known damages, both repaired and unrepaired.
KnownVehicleDamages = schemaorg.NewProperty("knownVehicleDamages")
// The number or type of airbags in the vehicle.
NumberOfAirbags = schemaorg.NewProperty("numberOfAirbags")
// The number of passengers that can be seated in the vehicle, both in terms of
// the physical space available, and in terms of limitations set by law.</p>
//
// <p>Typical unit code(s): C62 for persons.
VehicleSeatingCapacity = schemaorg.NewProperty("vehicleSeatingCapacity")
// The available volume for cargo or luggage. For automobiles, this is usually
// the trunk volume.</p>
//
// <p>Typical unit code(s): LTR for liters, FTQ for cubic foot/feet</p>
//
// <p>Note: You can use <a class="localLink"
// href="http://schema.org/minValue">minValue</a> and <a class="localLink"
// href="http://schema.org/maxValue">maxValue</a> to indicate ranges.
CargoVolume = schemaorg.NewProperty("cargoVolume")
// The type of component used for transmitting the power from a rotating power
// source to the wheels or other relevant component(s) ("gearbox" for cars).
VehicleTransmission = schemaorg.NewProperty("vehicleTransmission")
// The date of the first registration of the vehicle with the respective public
// authorities.
DateVehicleFirstRegistered = schemaorg.NewProperty("dateVehicleFirstRegistered")
// The date the item e.g. vehicle was purchased by the current owner.
PurchaseDate = schemaorg.NewProperty("purchaseDate")
// The date of production of the item, e.g. vehicle.
ProductionDate = schemaorg.NewProperty("productionDate")
// The drive wheel configuration, i.e. which roadwheels will receive torque from
// the vehicle's engine via the drivetrain.
DriveWheelConfiguration = schemaorg.NewProperty("driveWheelConfiguration")
) | things/vehicle/properties.go | 0.582016 | 0.402891 | properties.go | starcoder |
package types
import (
"math"
"strconv"
)
// Color3 represents a color in RGB space.
type Color3 struct {
R, G, B float32
}
// NewColor3FromRGB returns a Color3 from the given red, green, and blue
// components, each having an interval of [0, 255].
func NewColor3FromRGB(r, g, b int) Color3 {
return Color3{
R: float32(r) / 255,
G: float32(g) / 255,
B: float32(b) / 255,
}
}
// NewColor3FromHSV returns a Color3 from the given hue, saturation, and value.
func NewColor3FromHSV(h, s, v float64) (c Color3) {
var k float64
k = math.Mod(5+h*6, 6)
c.R = float32(v - v*s*math.Max(0, math.Min(math.Min(k, 4-k), 1)))
k = math.Mod(3+h*6, 6)
c.G = float32(v - v*s*math.Max(0, math.Min(math.Min(k, 4-k), 1)))
k = math.Mod(1+h*6, 6)
c.B = float32(v - v*s*math.Max(0, math.Min(math.Min(k, 4-k), 1)))
return c
}
// Lerp returns a Color3 linearly interpolated between *c* and *goal*, according
// to *alpha*, which has an interval of [0, 1].
func (c Color3) Lerp(goal Color3, alpha float64) Color3 {
a := float32(alpha)
na := 1 - a
return Color3{
R: na*c.R + a*goal.R,
G: na*c.G + a*goal.G,
B: na*c.B + a*goal.B,
}
}
// ToHSV returns the hue, saturation, and value of the color.
func (c Color3) ToHSV() (h, s, v float64) {
var r, g, b = float64(c.R), float64(c.G), float64(c.B)
min := math.Min(math.Min(r, g), b)
max := math.Max(math.Max(r, g), b)
delta := max - min
v = max
if max != 0 {
s = delta / max
} else {
s = 0
h = -1
}
switch max {
case r:
h = ((g - b) / delta) / 6
case g:
h = (2 + (b-r)/delta) / 6
case b:
h = (4 + (r-g)/delta) / 6
}
if h <= 0 {
h++
}
return h, s, v
}
// Type returns a string that identifies the type.
func (Color3) Type() string {
return "Color3"
}
// String returns a human-readable string representation of the value.
func (c Color3) String() string {
var b []byte
b = strconv.AppendFloat(b, float64(c.R), 'g', -1, 32)
b = append(b, ", "...)
b = strconv.AppendFloat(b, float64(c.G), 'g', -1, 32)
b = append(b, ", "...)
b = strconv.AppendFloat(b, float64(c.B), 'g', -1, 32)
return string(b)
}
// Copy returns a copy of the value.
func (c Color3) Copy() PropValue {
return c
} | Color3.go | 0.913484 | 0.522446 | Color3.go | starcoder |
package archive
import "math"
// GameVariableLimits describes limits of a game variable.
type GameVariableLimits struct {
Minimum int16
Maximum int16
}
// GameVariableInfo describes a variable in the game state of the archive.
type GameVariableInfo struct {
// InitValue is nil if the initial value is system dependent.
InitValue *int16
// Name is the short identifier for the variable.
Name string
// Description is an optional text with further information on the variable.
Description string
// Limits may be provided to describe the range of possible values.
Limits *GameVariableLimits
// ValueNames may be set for enumerated values.
ValueNames map[int16]string
// Hardcoded variables are fixed for a new game.
Hardcoded bool
}
// ResetValueInt returns the expected reset value for integer variables.
func (info GameVariableInfo) ResetValueInt() int16 {
if info.InitValue == nil {
return 0
}
return *info.InitValue
}
// ResetValueBool returns the expected reset value for boolean variables.
func (info GameVariableInfo) ResetValueBool() bool {
if info.InitValue == nil {
return false
}
return *info.InitValue != 0
}
// GameVariableInfoProvider provides lookup functionality for integer and boolean variables.
type GameVariableInfoProvider interface {
IntegerVariable(index int) GameVariableInfo
BooleanVariable(index int) GameVariableInfo
}
// GameVariableInfoFor returns a new instance with given name.
func GameVariableInfoFor(name string) GameVariableInfo {
return GameVariableInfo{Name: name}
}
// At returns an information based on the current one, with the given initial value.
func (info GameVariableInfo) At(value int16) GameVariableInfo {
info.InitValue = &value
return info
}
// HardcodedConfig returns an information that is marked as being a hardcoded initialized configuration value.
func (info GameVariableInfo) HardcodedConfig() GameVariableInfo {
info.Hardcoded = true
return info
}
// HardcodedAt returns an information that is marked as being a hardcoded variable with given initial value.
func (info GameVariableInfo) HardcodedAt(value int16) GameVariableInfo {
info.InitValue = &value
info.Hardcoded = true
return info
}
// Enumerated returns an information with given value names.
func (info GameVariableInfo) Enumerated(names map[int16]string) GameVariableInfo {
info.ValueNames = names
return info
}
// Boolean returns an enumerated information with No/Yes as possible values.
func (info GameVariableInfo) Boolean() GameVariableInfo {
return info.Enumerated(map[int16]string{
0: "No",
1: "Yes",
})
}
// LimitedBy returns an information with given minimum and maximum values.
func (info GameVariableInfo) LimitedBy(min, max int16) GameVariableInfo {
info.Limits = &GameVariableLimits{
Minimum: min,
Maximum: max,
}
return info
}
// DescribedAs returns an information with given text as description.
func (info GameVariableInfo) DescribedAs(text string) GameVariableInfo {
info.Description = text
return info
}
// GameVariables is a lookup map for information on game variables.
type GameVariables map[int]GameVariableInfo
// Lookup returns the information for given index. If the index is not known, nil is returned.
func (vars GameVariables) Lookup(index int) *GameVariableInfo {
info, known := vars[index]
if !known {
return nil
}
return &info
}
const (
securityValueDescription = "The current security value is re-calculated whenever a level is loaded up."
randomCodeDescription = "If both codes are equal at the start of a new game, they will be randomized."
highscoreCodeDescription = "The highscore values are combined as one 32-bit integer, used for the MFD game MCOM."
)
var engineIntegerVariables = GameVariables{
2: GameVariableInfoFor("Plastique explosion counter").At(0).LimitedBy(0, 100).
DescribedAs("This value is incremented for each exploding plastique."),
9: GameVariableInfoFor("Plot counter").At(0),
13: GameVariableInfoFor("Difficulty: Mission").HardcodedConfig().LimitedBy(0, 3),
14: GameVariableInfoFor("Difficulty: Cyber").HardcodedConfig().LimitedBy(0, 3),
15: GameVariableInfoFor("Difficulty: Combat").HardcodedConfig().LimitedBy(0, 3),
16: GameVariableInfoFor("Security Value: Level 0").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
17: GameVariableInfoFor("Security Value: Level 1").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
18: GameVariableInfoFor("Security Value: Level 2").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
19: GameVariableInfoFor("Security Value: Level 3").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
20: GameVariableInfoFor("Security Value: Level 4").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
21: GameVariableInfoFor("Security Value: Level 5").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
22: GameVariableInfoFor("Security Value: Level 6").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
23: GameVariableInfoFor("Security Value: Level 7").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
24: GameVariableInfoFor("Security Value: Level 8").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
25: GameVariableInfoFor("Security Value: Level 9").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
26: GameVariableInfoFor("Security Value: Level 10").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
27: GameVariableInfoFor("Security Value: Level 11").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
28: GameVariableInfoFor("Security Value: Level 12").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
29: GameVariableInfoFor("Security Value: Level 13").HardcodedAt(0).LimitedBy(0, 1000).DescribedAs(securityValueDescription),
30: GameVariableInfoFor("Difficulty: Puzzle").HardcodedConfig().LimitedBy(0, 3),
31: GameVariableInfoFor("Random Code 1").At(0).DescribedAs(randomCodeDescription),
32: GameVariableInfoFor("Random Code 2").At(0).DescribedAs(randomCodeDescription),
41: GameVariableInfoFor("Music Volume").HardcodedConfig().LimitedBy(0, 100),
42: GameVariableInfoFor("Video Gamma").HardcodedConfig().LimitedBy(0, math.MaxInt16),
43: GameVariableInfoFor("SFX Volume").HardcodedConfig().LimitedBy(0, 100),
44: GameVariableInfoFor("Mouse Handedness").HardcodedConfig().Enumerated(
map[int16]string{
0: "Right Handed",
1: "Left Handed",
}),
45: GameVariableInfoFor("Game: Highscore (pt1)").At(0).DescribedAs(highscoreCodeDescription),
46: GameVariableInfoFor("Game: Highscore (pt2)").At(0).DescribedAs(highscoreCodeDescription),
47: GameVariableInfoFor("Double-Click Speed").HardcodedConfig().LimitedBy(0, math.MaxInt16),
48: GameVariableInfoFor("Language").HardcodedConfig().Enumerated(map[int16]string{
0: "Default",
1: "French",
2: "German",
}),
49: GameVariableInfoFor("Audiolog Volume").HardcodedConfig().LimitedBy(0, 100),
50: GameVariableInfoFor("Screen Mode").HardcodedConfig(),
51: GameVariableInfoFor("Joystick Sensitivity").At(0x100).LimitedBy(0, 0x100),
52: GameVariableInfoFor("Show Fullscreen Icons").HardcodedConfig().Boolean(),
53: GameVariableInfoFor("Audio Messages").HardcodedConfig().Enumerated(map[int16]string{
0: "Text Only",
1: "Speech Only",
2: "Text and Speech",
}),
54: GameVariableInfoFor("Show Fullscreen Vitals").HardcodedConfig().Boolean(),
55: GameVariableInfoFor("Show Map Notes").HardcodedConfig().Boolean(),
56: GameVariableInfoFor("Game: Wing Level").At(0).LimitedBy(0, 100),
57: GameVariableInfoFor("HUD Color Bank").HardcodedConfig().LimitedBy(0, 2),
58: GameVariableInfoFor("Audio Channels").HardcodedConfig().Enumerated(map[int16]string{
0: "2",
1: "4",
2: "8",
}),
}
var engineBooleanVariables = GameVariables{
0: GameVariableInfoFor("Always False").At(0).
DescribedAs("This variable should always stay at 'False'.\n" +
"Default conditions assume boolean var 0 is zero to not block.\n" +
"For example, default doors would all become locked if it were 'True'."),
10: GameVariableInfoFor("Status HW: Delta Launch Enable").At(0),
11: GameVariableInfoFor("Status HW: Alpha Launch Enable").At(0),
12: GameVariableInfoFor("Status HW: Beta Launch Enable").At(0),
15: GameVariableInfoFor("Status HW: Beta Launched").At(0),
20: GameVariableInfoFor("Reactor on Destruct").At(0).
DescribedAs("Note: Rumble will stop if boolean var 152 is 'True'."),
145: GameVariableInfoFor("On-Line Help"),
152: GameVariableInfoFor("Self-Destruct Rumble Stop").At(0).
DescribedAs("Disables rumble caused by boolean var 20."),
153: GameVariableInfoFor("Four or more plastique exploded").At(0).
DescribedAs("Set to 'True' if integer variable 2 reached 4 (or higher)."),
300: GameVariableInfoFor("New Message Flag").At(0),
}
// EngineIntegerVariable returns a variable info for integer variables.
// If the given index is not used by the engine, nil is returned.
func EngineIntegerVariable(index int) *GameVariableInfo {
return engineIntegerVariables.Lookup(index)
}
// EngineBooleanVariable returns a variable info for boolean variables.
// If the given index is not used by the engine, nil is returned.
func EngineBooleanVariable(index int) *GameVariableInfo {
return engineBooleanVariables.Lookup(index)
}
// IsRandomIntegerVariable returns true for the special variables that are randomized.
func IsRandomIntegerVariable(index int) bool {
return (index == 31) || (index == 32)
}
// EngineVariables is a collector of engine-specific variable accessors.
type EngineVariables struct{}
var unusedVar = GameVariableInfoFor("(unused)").At(0)
// IntegerVariable returns a variable info for given index.
func (vars EngineVariables) IntegerVariable(index int) GameVariableInfo {
varInfo := EngineIntegerVariable(index)
if varInfo == nil {
return unusedVar
}
return *varInfo
}
// BooleanVariable returns a variable info for given index.
func (vars EngineVariables) BooleanVariable(index int) GameVariableInfo {
varInfo := EngineBooleanVariable(index)
if varInfo == nil {
return unusedVar
}
return *varInfo
} | ss1/content/archive/Variables.go | 0.816004 | 0.54359 | Variables.go | starcoder |
package corgi
import (
"os"
"time"
"strconv"
)
var predefineVariables []*Variable = []*Variable {
&Variable {
Name : "hostname",
Get : predefineVariableHostname,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "time_local",
Get : predefineVariableTimeLocal,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "pid",
Get : predefineVariablePID,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "pwd",
Get : predefineVariablePWD,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "year",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "month",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "week",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "day",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "hour",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "minute",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "zone",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "second",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "time",
Get : predefineVariableTime,
Flags : VARIABLE_CHANGEABLE,
},
&Variable {
Name : "env_",
Get : predefineVariableENV,
Flags : VARIABLE_UNKNOWN,
},
}
func predefineVariableTime(value *VariableValue, _ interface{}, component string) error {
now := time.Now()
value.NotFound = false
value.Cacheable = false
if component == "time" {
value.Value = strconv.FormatInt(now.Unix(), 10)
return nil
}
if component == "year" {
value.Value = strconv.Itoa(now.Year())
return nil
}
if component == "month" {
value.Value = strconv.Itoa(int(now.Month()))
return nil
}
if component == "week" {
week := now.Weekday()
switch (week) {
case time.Sunday:
value.Value = "Sun"
case time.Monday:
value.Value = "Mon"
case time.Tuesday:
value.Value = "Tue"
case time.Wednesday:
value.Value = "Wed"
case time.Thursday:
value.Value = "Thu"
case time.Friday:
value.Value = "Fri"
default:
value.Value = "Sat"
}
return nil
}
if component == "day" {
value.Value = strconv.Itoa(now.Day())
return nil
}
if component == "hour" {
value.Value = strconv.Itoa(now.Hour())
return nil
}
if component == "minute" {
value.Value = strconv.Itoa(now.Minute())
return nil
}
if component == "second" {
value.Value = strconv.Itoa(now.Second())
return nil
}
if component == "zone" {
value.Value, _ = now.Zone()
return nil
}
value.NotFound = true
value.Cacheable = true
return nil
}
func predefineVariableHostname(value *VariableValue, _ interface{}, _ string) error {
if name, err := os.Hostname(); err != nil {
value.NotFound = true
value.Cacheable = false
return err
} else {
value.Value = name
value.NotFound = false
value.Cacheable = true
}
return nil
}
func predefineVariableTimeLocal(value *VariableValue, _ interface{}, _ string) error {
value.Value = time.Now().Format("02/Jan/2006:15:04:05 -0700")
value.Cacheable = false
value.NotFound = false
return nil
}
func predefineVariablePID(value *VariableValue, _ interface{}, _ string) error {
pid := os.Getpid()
value.Value = strconv.Itoa(pid)
value.NotFound = false
value.Cacheable = true
return nil
}
func predefineVariablePWD(value *VariableValue, _ interface{}, _ string) error {
value.Cacheable = false
if dir, err := os.Getwd(); err != nil {
value.NotFound = true
return err
} else {
value.Value = dir
}
value.NotFound = false
return nil
}
func predefineVariableENV(value *VariableValue, _ interface{}, key string) error {
val := os.Getenv(key)
value.Cacheable = false
if val == "" {
value.NotFound = true
} else {
value.NotFound = false
value.Value = val
}
return nil
} | predefine.go | 0.533884 | 0.4016 | predefine.go | starcoder |
package world
import (
"fmt"
"math"
"github.com/go-gl/mathgl/mgl32"
"github.com/samuelyuan/openbiohazard2/fileio"
)
func RemoveCollisionEntity(collisionEntities []fileio.CollisionEntity, entityId int) {
for i, entity := range collisionEntities {
if entity.ScaIndex == entityId {
collisionEntities = append(collisionEntities[:i], collisionEntities[i+1:]...)
fmt.Println("Removing collision entity id ", entityId)
return
}
}
}
func CheckCollision(newPosition mgl32.Vec3, collisionEntities []fileio.CollisionEntity) *fileio.CollisionEntity {
playerFloorNum := int(math.Round(float64(newPosition.Y()) / fileio.FLOOR_HEIGHT_UNIT))
for _, entity := range collisionEntities {
// The boundary is on a different floor than the player
if !entity.FloorCheck[playerFloorNum] {
continue
}
switch entity.Shape {
case 0:
// Rectangle
corner1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
corner2 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z) + float32(entity.Density)}
corner3 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z) + float32(entity.Density)}
corner4 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z)}
if isPointInRectangle(newPosition, corner1, corner2, corner3, corner4) {
return &entity
}
case 1:
// Triangle \\|
vertex1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z + entity.Density)}
vertex2 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z + entity.Density)}
vertex3 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z)}
if isPointInTriangle(newPosition, vertex1, vertex2, vertex3) {
return &entity
}
case 2:
// Triangle |/
vertex1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
vertex2 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z + entity.Density)}
vertex3 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z + entity.Density)}
if isPointInTriangle(newPosition, vertex1, vertex2, vertex3) {
return &entity
}
case 3:
// Triangle /|
vertex1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
vertex2 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z + entity.Density)}
vertex3 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z)}
if isPointInTriangle(newPosition, vertex1, vertex2, vertex3) {
return &entity
}
case 6:
// Circle
radius := float32(entity.Width) / 2.0
center := mgl32.Vec3{float32(entity.X) + radius, 0, float32(entity.Z) + radius}
if isPointInCircle(newPosition, center, radius) {
return &entity
}
case 7:
// Ellipse, rectangle with rounded corners on the x-axis
majorAxis := float32(entity.Width) / 2.0
minorAxis := float32(entity.Density) / 2.0
center := mgl32.Vec3{float32(entity.X) + majorAxis, 0, float32(entity.Z) + minorAxis}
if isPointInEllipseXAxisMajor(newPosition, center, majorAxis, minorAxis) {
return &entity
}
case 8:
// Ellipse, rectangle with rounded corners on the z-axis
majorAxis := float32(entity.Density) / 2.0
minorAxis := float32(entity.Width) / 2.0
center := mgl32.Vec3{float32(entity.X) + minorAxis, 0, float32(entity.Z) + majorAxis}
if isPointInEllipseZAxisMajor(newPosition, center, majorAxis, minorAxis) {
return &entity
}
case 9:
// Rectangle climb up
corner1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
corner2 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z) + float32(entity.Density)}
corner3 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z) + float32(entity.Density)}
corner4 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z)}
if isPointInRectangle(newPosition, corner1, corner2, corner3, corner4) {
return &entity
}
case 10:
// Rectangle jump down
corner1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
corner2 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z) + float32(entity.Density)}
corner3 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z) + float32(entity.Density)}
corner4 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z)}
if isPointInRectangle(newPosition, corner1, corner2, corner3, corner4) {
return &entity
}
case fileio.SCA_TYPE_SLOPE: // 11
corner1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
corner2 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z) + float32(entity.Density)}
corner3 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z) + float32(entity.Density)}
corner4 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z)}
if isPointInRectangle(newPosition, corner1, corner2, corner3, corner4) {
return &entity
}
case fileio.SCA_TYPE_STAIRS: // 12
corner1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
corner2 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z) + float32(entity.Density)}
corner3 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z) + float32(entity.Density)}
corner4 := mgl32.Vec3{float32(entity.X) + float32(entity.Width), 0, float32(entity.Z)}
if isPointInRectangle(newPosition, corner1, corner2, corner3, corner4) {
return &entity
}
}
}
return nil
}
func CheckRamp(entity *fileio.CollisionEntity) bool {
return entity.Shape == fileio.SCA_TYPE_SLOPE || entity.Shape == fileio.SCA_TYPE_STAIRS
}
func CheckNearbyBoxClimb(playerPosition mgl32.Vec3, collisionEntities []fileio.CollisionEntity) bool {
for _, entity := range collisionEntities {
switch entity.Shape {
case 9:
// Rectangle climb up
rectMinX := math.Min(float64(entity.X), float64(entity.X)+float64(entity.Width))
rectMaxX := math.Max(float64(entity.X), float64(entity.X)+float64(entity.Width))
rectMinZ := math.Min(float64(entity.Z), float64(entity.Z)+float64(entity.Density))
rectMaxZ := math.Max(float64(entity.Z), float64(entity.Z)+float64(entity.Density))
dx := math.Max(rectMinX-float64(playerPosition.X()), float64(playerPosition.X())-rectMaxX)
dz := math.Max(rectMinZ-float64(playerPosition.Z()), float64(playerPosition.Z())-rectMaxZ)
dist := math.Sqrt(dx*dx + dz*dz)
if dist <= 1000 {
return true
}
case 10:
// Rectangle climb down
rectMinX := math.Min(float64(entity.X), float64(entity.X)+float64(entity.Width))
rectMaxX := math.Max(float64(entity.X), float64(entity.X)+float64(entity.Width))
rectMinZ := math.Min(float64(entity.Z), float64(entity.Z)+float64(entity.Density))
rectMaxZ := math.Max(float64(entity.Z), float64(entity.Z)+float64(entity.Density))
dx := math.Max(rectMinX-float64(playerPosition.X()), float64(playerPosition.X())-rectMaxX)
dz := math.Max(rectMinZ-float64(playerPosition.Z()), float64(playerPosition.Z())-rectMaxZ)
dist := math.Sqrt(dx*dx + dz*dz)
if dist <= 1000 {
return true
}
}
}
return false
}
func isPointInTriangle(point mgl32.Vec3, corner1 mgl32.Vec3, corner2 mgl32.Vec3, corner3 mgl32.Vec3) bool {
// area of triangle ABC
area := triangleArea(corner1, corner2, corner3)
// area of PBC
area1 := triangleArea(point, corner2, corner3)
// area of APC
area2 := triangleArea(corner1, point, corner3)
// area of ABP
area3 := triangleArea(corner1, corner2, point)
// areas should be equal if point is in triangle
areaDifference := area - (area1 + area2 + area3)
return math.Abs(float64(areaDifference)) <= 0.01
}
// Find the area of triangle formed by p1, p2 and p3
func triangleArea(p1 mgl32.Vec3, p2 mgl32.Vec3, p3 mgl32.Vec3) float32 {
return float32(math.Abs(float64((p1.X()*(p2.Z()-p3.Z()) + p2.X()*(p3.Z()-p1.Z()) + p3.X()*(p1.Z()-p2.Z())) / 2.0)))
}
func isPointInRectangle(point mgl32.Vec3, corner1 mgl32.Vec3, corner2 mgl32.Vec3, corner3 mgl32.Vec3, corner4 mgl32.Vec3) bool {
x := point.X()
z := point.Z()
x1 := corner1.X()
z1 := corner1.Z()
x2 := corner2.X()
z2 := corner2.Z()
x3 := corner3.X()
z3 := corner3.Z()
x4 := corner4.X()
z4 := corner4.Z()
a := (x2-x1)*(z-z1) - (z2-z1)*(x-x1)
b := (x3-x2)*(z-z2) - (z3-z2)*(x-x2)
c := (x4-x3)*(z-z3) - (z4-z3)*(x-x3)
d := (x1-x4)*(z-z4) - (z1-z4)*(x-x4)
if (a > 0 && b > 0 && c > 0 && d > 0) ||
(a < 0 && b < 0 && c < 0 && d < 0) {
return true
}
return false
}
func isPointInCircle(point mgl32.Vec3, circleCenter mgl32.Vec3, radius float32) bool {
distance := point.Sub(circleCenter).Len()
return distance <= radius
}
func isPointInEllipseXAxisMajor(point mgl32.Vec3, ellipseCenter mgl32.Vec3, majorAxis float32, minorAxis float32) bool {
xDistance := math.Pow(float64(point.X()-ellipseCenter.X()), 2) / float64(majorAxis*majorAxis)
zDistance := math.Pow(float64(point.Z()-ellipseCenter.Z()), 2) / float64(minorAxis*minorAxis)
return xDistance+zDistance <= 1.0
}
func isPointInEllipseZAxisMajor(point mgl32.Vec3, ellipseCenter mgl32.Vec3, majorAxis float32, minorAxis float32) bool {
xDistance := math.Pow(float64(point.X()-ellipseCenter.X()), 2) / float64(minorAxis*minorAxis)
zDistance := math.Pow(float64(point.Z()-ellipseCenter.Z()), 2) / float64(majorAxis*majorAxis)
return xDistance+zDistance <= 1.0
} | world/collision.go | 0.711932 | 0.514888 | collision.go | starcoder |
package mocks
import "time"
// MetricsProvider implements a mock ActivityPub metrics provider.
type MetricsProvider struct{}
// OutboxPostTime records the time it takes to post a message to the outbox.
func (m *MetricsProvider) OutboxPostTime(value time.Duration) {
}
// OutboxResolveInboxesTime records the time it takes to resolve inboxes for an outbox post.
func (m *MetricsProvider) OutboxResolveInboxesTime(value time.Duration) {
}
// InboxHandlerTime records the time it takes to handle an activity posted to the inbox.
func (m *MetricsProvider) InboxHandlerTime(activityType string, value time.Duration) {
}
// WriteAnchorTime records the time it takes to write an anchor credential and post an 'Offer' activity.
func (m *MetricsProvider) WriteAnchorTime(value time.Duration) {
}
// ProcessWitnessedAnchorCredentialTime records the time it takes to process a witnessed anchor credential
// by publishing it to the Observer and posting a 'Create' activity.
func (m *MetricsProvider) ProcessWitnessedAnchorCredentialTime(value time.Duration) {
}
// AddOperationTime records the time it takes to add an operation to the queue.
func (m *MetricsProvider) AddOperationTime(value time.Duration) {
}
// BatchCutTime records the time it takes to cut an operation batch.
func (m *MetricsProvider) BatchCutTime(value time.Duration) {
}
// BatchRollbackTime records the time it takes to roll back an operation batch (in case of a transient error).
func (m *MetricsProvider) BatchRollbackTime(value time.Duration) {
}
// ProcessAnchorTime records the time it takes for the Observer to process an anchor credential.
func (m *MetricsProvider) ProcessAnchorTime(value time.Duration) {
}
// ProcessDIDTime records the time it takes for the Observer to process a DID.
func (m *MetricsProvider) ProcessDIDTime(value time.Duration) {
}
// CASWriteTime records the time it takes to write a document to CAS.
func (m *MetricsProvider) CASWriteTime(value time.Duration) {
}
// CASResolveTime records the time it takes to resolve a document from CAS.
func (m *MetricsProvider) CASResolveTime(value time.Duration) {
}
// BatchAckTime records the time to acknowledge all of the operations that are removed from the queue.
func (m *MetricsProvider) BatchAckTime(value time.Duration) {
}
// BatchNackTime records the time to nack all of the operations that are to be placed back on the queue.
func (m *MetricsProvider) BatchNackTime(value time.Duration) {
}
// WitnessAnchorCredentialTime records the time it takes for a verifiable credential to gather proofs from all
// required witnesses (according to witness policy). The start time is when the verifiable credential is issued
// and the end time is the time that the witness policy is satisfied.
func (m *MetricsProvider) WitnessAnchorCredentialTime(value time.Duration) {
}
// DocumentCreateUpdateTime records the time it takes the REST handler to process a create/update operation.
func (m *MetricsProvider) DocumentCreateUpdateTime(value time.Duration) {
}
// DocumentResolveTime records the time it takes the REST handler to resolve a document.
func (m *MetricsProvider) DocumentResolveTime(value time.Duration) {
}
// OutboxIncrementActivityCount increments the number of activities of the given type posted to the outbox.
func (m *MetricsProvider) OutboxIncrementActivityCount(activityType string) {
}
// CASIncrementCacheHitCount increments the number of CAS cache hits.
func (m *MetricsProvider) CASIncrementCacheHitCount() {
}
// CASIncrementCacheMissCount increments the number of CAS cache misses.
func (m *MetricsProvider) CASIncrementCacheMissCount() {
}
// BatchSize records the size of an operation batch.
func (m *MetricsProvider) BatchSize(float64) {
} | pkg/mocks/metricsprovider.go | 0.705582 | 0.450359 | metricsprovider.go | starcoder |
package enigma
import (
"fmt"
)
// Enigma represents the whole Enigma machine
type Enigma struct {
Model
plugboard plugboard
entryWheel etw
rotors []rotor
reflector reflector
}
// RotorSlot represents the slot for the rotor. Most Enigmas had three
type RotorSlot int
// all available rotor slots
const (
Right RotorSlot = 0
Middle RotorSlot = 1
Left RotorSlot = 2
Fourth RotorSlot = 3
)
// NewEnigma creates the given Enigma machine model with the default settings (usually everything on "zero" position)
func NewEnigma(model Model) (Enigma, error) {
if !model.exists() {
return Enigma{}, fmt.Errorf("unsupported model %s", model)
}
e := Enigma{
Model: model,
plugboard: newPlugboard(model.HasPlugboard()),
entryWheel: newEtw(model.getEtwWiring()),
rotors: []rotor{},
reflector: newReflector(model.getDefaultReflectorModel()),
}
// select default rotors to all the slots
if err := e.RotorsSelect(e.getDefaultRotorModels()); err != nil {
panic(fmt.Errorf("failed to select default rotors in %s model: %w", e.GetName(), err))
}
return e, nil
}
// NewEnigmaWithSetup create new Enigma machine with the full configuration
func NewEnigmaWithSetup(model Model, rotors map[RotorSlot]RotorConfig, reflector ReflectorConfig, plugboard string) (Enigma, error) {
e, err := NewEnigma(model)
if err != nil {
return Enigma{}, err
}
if len(rotors) > 0 {
if err := e.RotorsSetup(rotors); err != nil {
return Enigma{}, fmt.Errorf("failed to setup rotors: %w", err)
}
}
if !reflector.isEmpty() {
if err := e.ReflectorSetup(reflector); err != nil {
return Enigma{}, fmt.Errorf("failed to setup reflector: %w", err)
}
}
if plugboard != "" {
if err := e.PlugboardSetup(plugboard); err != nil {
return Enigma{}, fmt.Errorf("failed to setup plugboard: %w", err)
}
}
return e, nil
}
// GetReflectorModel returns the reflector model currently placed in this Enigma machine
func (e *Enigma) GetReflectorModel() ReflectorModel {
return e.reflector.model
}
// -------------------------------------- SETUP --------------------------------------
// RotorsSetup fully configures all rotors in this Enigma machine
func (e *Enigma) RotorsSetup(config map[RotorSlot]RotorConfig) error {
rotorModels := map[RotorSlot]RotorModel{}
for i, rotor := range e.rotors {
rotorModels[e.rotorIndexToSlot(i)] = rotor.model // fill with current values
}
for slot, rotorConfig := range config {
if !e.HasRotorSlot(slot) {
return fmt.Errorf("unsupported rotor slot %d", slot)
}
rotorModels[slot] = rotorConfig.Model
}
rotors, err := e.getRotors(rotorModels)
if err != nil {
return fmt.Errorf("failed to set rotor models: %w", err)
}
for slot, rotorConfig := range config {
if rotorConfig.WheelPosition != 0 {
if err = rotors[e.rotorSlotToIndex(slot)].setWheelPosition(rotorConfig.WheelPosition); err != nil {
return fmt.Errorf("failed to set wheel position for rotor %s: %w", rotorConfig.Model, err)
}
}
if rotorConfig.RingPosition != 0 {
if err = rotors[e.rotorSlotToIndex(slot)].setRingPosition(rotorConfig.RingPosition); err != nil {
return fmt.Errorf("failed to set ring position for rotor %s: %w", rotorConfig.Model, err)
}
}
}
e.rotors = rotors
return nil
}
// RotorsSelect places given rotor models into given slots on this Enigma machine
func (e *Enigma) RotorsSelect(rotorModels map[RotorSlot]RotorModel) error {
rotors, err := e.getRotors(rotorModels)
if err == nil {
e.rotors = rotors
}
return err
}
func (e *Enigma) getRotors(rotorModels map[RotorSlot]RotorModel) ([]rotor, error) {
availableSlots := e.GetAvailableRotorSlots()
if len(rotorModels) != len(availableSlots) {
return nil, fmt.Errorf("%s model has %d rotors, but %d rotors selected", e.GetName(), len(availableSlots), len(rotorModels))
}
rotors := make([]rotor, len(availableSlots))
isDuplicateModel := map[RotorModel]struct{}{}
for slot, rotorModel := range rotorModels {
// can only populate slots supported by the current model
if !e.HasRotorSlot(slot) {
return nil, fmt.Errorf("unsupported rotor slot %d", slot)
}
// can only place supported rotor to the slot
if !e.supportsRotorModel(rotorModel, slot) {
return nil, fmt.Errorf("%s model does not support rotor %s in slot %d", e.GetName(), rotorModel, slot)
}
// handle duplicates
if _, ok := isDuplicateModel[rotorModel]; ok {
return nil, fmt.Errorf("cannot select the rotor %s twice", rotorModel)
}
// all good, add the rotor
rotors[slot] = newRotor(rotorModel)
isDuplicateModel[rotorModel] = struct{}{}
}
return rotors, nil
}
// RotorSetWheel sets the wheel position (rotation) of the given rotor
func (e *Enigma) RotorSetWheel(slot RotorSlot, position byte) error {
if !e.HasRotorSlot(slot) {
return fmt.Errorf("unsupported rotor slot %d", slot)
}
return e.rotors[e.rotorSlotToIndex(slot)].setWheelPosition(position)
}
// RotorSetRing adjusts the ring setting (ringstellung) of the given rotor
func (e *Enigma) RotorSetRing(slot RotorSlot, position int) error {
if !e.HasRotorSlot(slot) {
return fmt.Errorf("unsupported rotor slot %d", slot)
}
return e.rotors[e.rotorSlotToIndex(slot)].setRingPosition(position)
}
// RotorsReset resets the rotors to their starting (wheel) positions.
// This is necessary before encoding / decoding another message as the rotors move after every encoded letter
func (e *Enigma) RotorsReset() {
for slot := range e.rotors {
e.rotors[slot].reset()
}
}
// ReflectorSetup fully configures the reflector in this Enigma machine
func (e *Enigma) ReflectorSetup(config ReflectorConfig) error {
model := config.Model
if model == "" {
model = e.reflector.model // use current if not specified
}
ref, err := e.getReflector(model)
if err != nil {
return fmt.Errorf("failed to select reflector: %w", err)
}
if config.WheelPosition != 0 {
if err = ref.setWheelPosition(config.WheelPosition); err != nil {
return fmt.Errorf("failed to set reflector position: %w", err)
}
}
if config.Wiring != "" {
if err = ref.setWiring(config.Wiring); err != nil {
return fmt.Errorf("failed to rewire reflector: %w", err)
}
}
e.reflector = ref
return nil
}
// ReflectorSelect places the given reflector model into this Enigma machine
func (e *Enigma) ReflectorSelect(reflectorModel ReflectorModel) error {
ref, err := e.getReflector(reflectorModel)
if err == nil {
e.reflector = ref
}
return err
}
func (e *Enigma) getReflector(reflectorModel ReflectorModel) (reflector, error) {
if !e.supportsReflectorModel(reflectorModel) {
return reflector{}, fmt.Errorf("%s model does not support reflector %s", e.GetName(), reflectorModel)
}
return newReflector(reflectorModel), nil
}
// ReflectorSetWheel sets the reflector in this Enigma machine to the given position (only for movable reflectors)
func (e *Enigma) ReflectorSetWheel(position byte) error {
err := e.reflector.setWheelPosition(position)
if err != nil {
return err
}
return nil
}
// ReflectorRewire changes internal wiring of the reflector in this Enigma machine (only for rewirable reflectors)
func (e *Enigma) ReflectorRewire(wiring string) error {
err := e.reflector.setWiring(wiring)
if err != nil {
return err
}
return nil
}
// PlugboardSetup configures the plugboard (if supported by this Enigma model)
func (e *Enigma) PlugboardSetup(plugConfig string) error {
if !e.HasPlugboard() {
return fmt.Errorf("%s model does not have a plugboard", e.GetName())
}
return e.plugboard.setup(plugConfig)
}
func (e *Enigma) rotorSlotToIndex(slot RotorSlot) int {
return int(slot)
}
func (e *Enigma) rotorIndexToSlot(index int) RotorSlot {
return RotorSlot(index)
}
// -------------------------------------- ENCODING --------------------------------------
// Encode encodes the given test (for decoding reset the reflectors and run with the encoded text)
func (e *Enigma) Encode(text string) (string, error) {
result, _, err := e.doEncode(text)
return result, err
}
// EncodeVerbose used for debugging the encoding process,
// returns detailed encryption sequences instead of just the encrypted text
func (e *Enigma) EncodeVerbose(text string) ([]EncryptionSequence, error) {
_, sequences, err := e.doEncode(text)
return sequences, err
}
func (e *Enigma) doEncode(text string) (string, []EncryptionSequence, error) {
result := make([]byte, len(text))
sequences := make([]EncryptionSequence, len(text))
for i, letter := range text {
sequence, err := e.translate(byte(letter))
if err != nil {
return "", nil, fmt.Errorf("failed to encode letter \"%s\": %w", string(letter), err)
}
result[i] = sequence.GetResult()
sequences[i] = sequence
}
return string(result), sequences, nil
}
func (e *Enigma) translate(in byte) (EncryptionSequence, error) {
letter, ok := Alphabet.charToInt(in)
if !ok {
return EncryptionSequence{}, fmt.Errorf("unsupported letter")
}
// rotate the rotors first and start sequence
e.rotate()
sequence := EncryptionSequence{}
sequence.start(e.rotors, letter)
// I. plugboard -> ETW
if e.HasPlugboard() {
letter = e.plugboard.translate(letter)
sequence.addStep("plugboard", letter)
}
// II. ETW -> rotors
letter = e.entryWheel.translateIn(letter)
sequence.addStep("etw", letter)
// III. rotors -> reflector (reverse order of rotors, the letter goes from right to left)
slots := e.GetAvailableRotorSlots()
for _, slot := range slots {
slotIndex := e.rotorSlotToIndex(slot)
letter = e.rotors[slotIndex].translateIn(letter)
sequence.addStep(fmt.Sprintf("rotor %d", slotIndex+1), letter)
}
// IV. reflector -> rotors
letter = e.reflector.translate(letter)
sequence.addStep("reflector", letter)
// V. rotors -> ETW
for i := len(slots) - 1; i >= 0; i-- {
slotIndex := e.rotorSlotToIndex(slots[i])
letter = e.rotors[slotIndex].translateOut(letter)
sequence.addStep(fmt.Sprintf("rotor %d", slotIndex+1), letter)
}
// VI. ETW -> plugboard
letter = e.entryWheel.translateOut(letter)
sequence.addStep("etw", letter)
// VII. plugboard -> output bulb
if e.HasPlugboard() {
letter = e.plugboard.translate(letter)
sequence.addStep("plugboard", letter)
}
sequence.finish(letter)
return sequence, nil
}
func (e *Enigma) rotate() {
// determine which rotors should be rotated in this step
rotateMiddle := e.rotors[e.rotorSlotToIndex(Right)].shouldRotateNext()
rotateLeft := e.rotors[e.rotorSlotToIndex(Middle)].shouldRotateNext()
e.rotors[e.rotorSlotToIndex(Right)].rotate() // always rotate the right rotor
if rotateMiddle {
e.rotors[e.rotorSlotToIndex(Middle)].rotate()
}
if rotateLeft {
// double-stepping - middle rotor rotates again if left rotor rotates
e.rotors[e.rotorSlotToIndex(Middle)].rotate()
e.rotors[e.rotorSlotToIndex(Left)].rotate()
}
} | enigma.go | 0.785267 | 0.424173 | enigma.go | starcoder |
package elastic
// The geo_distance facet is a facet providing information for ranges of
// distances from a provided geo_point including count of the number of hits
// that fall within each range, and aggregation information (like total).
// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-geo-distance-facet.html
type GeoDistanceFacet struct {
facetFilter Filter
global *bool
nested string
mode string
fieldName string
valueFieldName string
lat float64
lon float64
geoHash string
geoDistance string
unit string
params map[string]interface{}
valueScript string
lang string
entries []geoDistanceFacetEntry
}
func NewGeoDistanceFacet() GeoDistanceFacet {
return GeoDistanceFacet{
params: make(map[string]interface{}),
entries: make([]geoDistanceFacetEntry, 0),
}
}
func (f GeoDistanceFacet) FacetFilter(filter Facet) GeoDistanceFacet {
f.facetFilter = filter
return f
}
func (f GeoDistanceFacet) Global(global bool) GeoDistanceFacet {
f.global = &global
return f
}
func (f GeoDistanceFacet) Nested(nested string) GeoDistanceFacet {
f.nested = nested
return f
}
func (f GeoDistanceFacet) Mode(mode string) GeoDistanceFacet {
f.mode = mode
return f
}
func (f GeoDistanceFacet) Field(fieldName string) GeoDistanceFacet {
f.fieldName = fieldName
return f
}
func (f GeoDistanceFacet) ValueField(valueFieldName string) GeoDistanceFacet {
f.valueFieldName = valueFieldName
return f
}
func (f GeoDistanceFacet) ValueScript(valueScript string) GeoDistanceFacet {
f.valueScript = valueScript
return f
}
func (f GeoDistanceFacet) Lang(lang string) GeoDistanceFacet {
f.lang = lang
return f
}
func (f GeoDistanceFacet) ScriptParam(name string, value interface{}) GeoDistanceFacet {
f.params[name] = value
return f
}
func (f GeoDistanceFacet) Point(lat, lon float64) GeoDistanceFacet {
f.lat = lat
f.lon = lon
return f
}
func (f GeoDistanceFacet) Lat(lat float64) GeoDistanceFacet {
f.lat = lat
return f
}
func (f GeoDistanceFacet) Lon(lon float64) GeoDistanceFacet {
f.lon = lon
return f
}
func (f GeoDistanceFacet) GeoHash(geoHash string) GeoDistanceFacet {
f.geoHash = geoHash
return f
}
func (f GeoDistanceFacet) GeoDistance(geoDistance string) GeoDistanceFacet {
f.geoDistance = geoDistance
return f
}
func (f GeoDistanceFacet) AddRange(from, to float64) GeoDistanceFacet {
f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: to})
return f
}
func (f GeoDistanceFacet) AddUnboundedTo(from float64) GeoDistanceFacet {
f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: nil})
return f
}
func (f GeoDistanceFacet) AddUnboundedFrom(to float64) GeoDistanceFacet {
f.entries = append(f.entries, geoDistanceFacetEntry{From: nil, To: to})
return f
}
func (f GeoDistanceFacet) Unit(distanceUnit string) GeoDistanceFacet {
f.unit = distanceUnit
return f
}
func (f GeoDistanceFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
if f.facetFilter != nil {
source["facet_filter"] = f.facetFilter.Source()
}
if f.nested != "" {
source["nested"] = f.nested
}
if f.global != nil {
source["global"] = *f.global
}
if f.mode != "" {
source["mode"] = f.mode
}
}
func (f GeoDistanceFacet) Source() interface{} {
source := make(map[string]interface{})
f.addFilterFacetAndGlobal(source)
opts := make(map[string]interface{})
source["geo_distance"] = opts
if f.geoHash != "" {
opts[f.fieldName] = f.geoHash
} else {
opts[f.fieldName] = []float64{f.lat, f.lon}
}
if f.valueFieldName != "" {
opts["value_field"] = f.valueFieldName
}
if f.valueScript != "" {
opts["value_script"] = f.valueScript
if f.lang != "" {
opts["lang"] = f.lang
}
if len(f.params) > 0 {
opts["params"] = f.params
}
}
ranges := make([]interface{}, 0)
for _, ent := range f.entries {
r := make(map[string]interface{})
if ent.From != nil {
switch from := ent.From.(type) {
case int, int16, int32, int64, float32, float64:
r["from"] = from
case string:
r["from"] = from
}
}
if ent.To != nil {
switch to := ent.To.(type) {
case int, int16, int32, int64, float32, float64:
r["to"] = to
case string:
r["to"] = to
}
}
ranges = append(ranges, r)
}
opts["ranges"] = ranges
if f.unit != "" {
opts["unit"] = f.unit
}
if f.geoDistance != "" {
opts["distance_type"] = f.geoDistance
}
return source
}
type geoDistanceFacetEntry struct {
From interface{}
To interface{}
} | Godeps/_workspace/src/github.com/google/cadvisor/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance.go | 0.933476 | 0.600481 | search_facets_geo_distance.go | starcoder |
package aoc2020
/*
Day 08 - Handheld Halting
https://adventofcode.com/2020/day/8
Your flight to the major airline hub reaches cruising altitude without incident. While you consider checking the in-flight menu for one of those drinks that come with a little umbrella, you are interrupted by the kid sitting next to you.
Their handheld game console won't turn on! They ask if you can take a look.
You narrow the problem down to a strange infinite loop in the boot code (your puzzle input) of the device. You should be able to fix it, but first you need to be able to run the code in isolation.
The boot code is represented as a text file with one instruction per line of text. Each instruction consists of an operation (acc, jmp, or nop) and an argument (a signed number like +4 or -20).
acc increases or decreases a single global value called the accumulator by the value given in the argument. For example, acc +7 would increase the accumulator by 7. The accumulator starts at 0. After an acc instruction, the instruction immediately below it is executed next.
jmp jumps to a new instruction relative to itself. The next instruction to execute is found using the argument as an offset from the jmp instruction; for example, jmp +2 would skip the next instruction, jmp +1 would continue to the instruction immediately below it, and jmp -20 would cause the instruction 20 lines above to be executed next.
nop stands for No OPeration - it does nothing. The instruction immediately below it is executed next.
For example, consider the following program:
nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6
These instructions are visited in this order:
nop +0 | 1
acc +1 | 2, 8(!)
jmp +4 | 3
acc +3 | 6
jmp -3 | 7
acc -99 |
acc +1 | 4
jmp -4 | 5
acc +6 |
First, the nop +0 does nothing. Then, the accumulator is increased from 0 to 1 (acc +1) and jmp +4 sets the next instruction to the other acc +1 near the bottom. After it increases the accumulator from 1 to 2, jmp -4 executes, setting the next instruction to the only acc +3. It sets the accumulator to 5, and jmp -3 causes the program to continue back at the first acc +1.
This is an infinite loop: with this sequence of jumps, the program will run forever. The moment the program tries to run any instruction a second time, you know it will never terminate.
Immediately before the program would run an instruction a second time, the value in the accumulator is 5.
Run your copy of the boot code. Immediately before any instruction is executed a second time, what value is in the accumulator?
*/
import (
"fmt"
"os"
"strconv"
"strings"
goutils "github.com/simonski/goutils"
)
// AOC_2020_08 is the entrypoint
func (app *Application) Y2020D08P1() {
AOC_2020_08_part1_attempt1(app)
}
func (app *Application) Y2020D08P2() {
AOC_2020_08_part2_attempt1(app)
}
func AOC_2020_08_part1_attempt1(app *Application) {
cli := app.CLI
filename := cli.GetFileExistsOrDie("-input")
p := NewProgramFromFilename(filename)
p.Debug()
// Ok, step until Accumulator is 5
for true {
p.Step()
instruction := p.GetCurrentInstruction()
if instruction.ExecutionCount == 1 {
// then it has run once.
fmt.Printf("Accumulator is now %v, index is %v, instruction is %v\n", p.Accumulator, p.Index, instruction)
break
}
}
}
func AOC_2020_08_part2_attempt1(app *Application) {
cli := app.CLI
filename := cli.GetFileExistsOrDie("-input")
p := NewProgramFromFilename(filename)
// one jmp is a nop
// build list of jmps that exist and for each one run the test until complete or in loop
jmps := p.FindInstructionIndexes("jmp")
// okay so for each jmp, flip it to nop and run
for index := range jmps {
jmpIndex := jmps[index]
// load the program, change the instruction
testProgram := NewProgramFromFilename(filename)
instruction := testProgram.GetInstructionAtIndex(jmpIndex)
instruction.Operation = "nop"
// Ok now run the program until we eithe rcomplete or move to a loop
for true {
if testProgram.Step() == false {
// then we went to a loop
fmt.Printf("jmp->nop at index %v causes a loop.\n", jmpIndex)
break
} else if testProgram.IsComplete() {
// this is the one we want
fmt.Printf("jmp->nop at index %v fixes our program, Accumulator is %v\n", jmpIndex, testProgram.Accumulator)
os.Exit(0)
}
}
}
nops := p.FindInstructionIndexes("nop")
// okay so for each jmp, flip it to nop and run
for index := range nops {
jmpIndex := nops[index]
// load the program, change the instruction
testProgram := NewProgramFromFilename(filename)
instruction := testProgram.GetInstructionAtIndex(jmpIndex)
instruction.Operation = "jmp"
// Ok now run the program until we eithe rcomplete or move to a loop
for true {
if testProgram.Step() == false {
// then we went to a loop
fmt.Printf("nop->jmp at index %v causes a loop.\n", jmpIndex)
break
} else if testProgram.IsComplete() {
// this is the one we want
fmt.Printf("nop->jmp at index %v fixes our program, Accumulator is %v\n", jmpIndex, testProgram.Accumulator)
os.Exit(0)
}
}
}
}
type Program struct {
Instructions []*Instruction
Accumulator int
Index int
CurrentStep int
}
// Returns the posiiton of all Instructions with the specified Operation type
func (p *Program) FindInstructionIndexes(operation string) []int {
results := make([]int, 0)
for index := range p.Instructions {
instruction := p.GetInstructionAtIndex(index)
if instruction.Operation == operation {
results = append(results, index)
}
}
return results
}
func (p *Program) Size() int {
return len(p.Instructions)
}
// GetInstructionAtIndex returns the instruct at any index
func (p *Program) GetInstructionAtIndex(index int) *Instruction {
return p.Instructions[index]
}
// GetCurrentInstruct returns the Instruction at the current Index
func (p *Program) GetCurrentInstruction() *Instruction {
return p.Instructions[p.Index]
}
// Reset set the Program to the original state, Index and Accumulator to 0
func (p *Program) Reset() {
p.Accumulator = 0
p.Index = 0
p.CurrentStep = 0
for index := range p.Instructions {
instruction := p.GetInstructionAtIndex(index)
instruction.ExecutionCount = 0
instruction.ExecutedOnStep = 0
}
}
// Performs the current instruction, moving the index to the next valid value
// returns true if the instruction executes and continues normally
// returns false if the insruction executes and puts us into an infinite loop
func (p *Program) Step() bool {
p.CurrentStep++
instruction := p.GetCurrentInstruction()
if instruction.Operation == "acc" {
// increase of decrease the accumulator by the value
p.Accumulator += instruction.Argument
p.Index++
} else if instruction.Operation == "jmp" {
p.Index += instruction.Argument
} else if instruction.Operation == "nop" {
p.Index++
}
instruction.ExecutionCount++
instruction.ExecutedOnStep = p.CurrentStep
if instruction.ExecutionCount == 1 {
return true
}
return false
}
// IsComplete indicates if the program has completed
func (p *Program) IsComplete() bool {
return p.Index == len(p.Instructions)
}
func (p *Program) Debug() {
for index := range p.Instructions {
instruction := p.GetInstructionAtIndex(index)
instruction.Debug()
}
}
func NewProgram(lines []string) *Program {
instructions := make([]*Instruction, 0)
for index := range lines {
line := lines[index]
if strings.TrimSpace(line) != "" {
i := NewInstruction(line)
instructions = append(instructions, i)
}
}
return &Program{Instructions: instructions, Accumulator: 0, Index: 0}
}
func NewProgramFromFilename(filename string) *Program {
lines := goutils.Load_file_to_strings(filename)
return NewProgram(lines)
}
type Instruction struct {
Operation string
Argument int
ExecutionCount int
ExecutedOnStep int
}
func (i *Instruction) Debug() {
fmt.Printf("[%v] [%v] %v %v\n", i.ExecutionCount, i.ExecutedOnStep, i.Operation, i.Argument)
}
func NewInstruction(line string) *Instruction {
line = strings.TrimSpace(line)
splits := strings.Split(line, " ")
operation := splits[0]
argument, _ := strconv.Atoi(splits[1])
i := Instruction{Operation: operation, Argument: argument, ExecutionCount: 0}
return &i
} | app/aoc2020/aoc2020_08.go | 0.685844 | 0.7003 | aoc2020_08.go | starcoder |
package streams
import "io"
// ByteFilterr filters all intercepted bytes based on the passed ByteFilterFunc. It should
// be safe to use either a statefull or idempotent function in this. However you should avoid reuse
// of a stateful ByteFilterFunc as correct behavior is difficult and error prone to implement.
type ByteFilter struct {
bffn ByteFilterFunc
}
func NewByteFilter(bffn ByteFilterFunc) Interceptor {
return &ByteFilter{
bffn: bffn,
}
}
func (bf *ByteFilter) InterceptWrite(w io.Writer, p []byte) (n int, err error) {
newP := p[:0] // this way we reuse the underlying buffer safely
for _, b := range p {
if bf.bffn(b) {
newP = append(newP, b)
}
}
return w.Write(newP)
}
func (bf *ByteFilter) InterceptRead(r io.Reader, p []byte) (n int, err error) {
if n, err = r.Read(p); err != nil && err != io.EOF {
return
}
newP := p[:0] // this way we reuse the underlying buffer safely
for _, b := range p[:n] {
if bf.bffn(b) {
newP = append(newP, b)
}
}
return len(newP), err
}
// CompiledByteFilter prebuilds an array based on the output of the passed
// ByteFilterFunc, using this array as a fast lookup to filter all intercepted bytes. However as
// this assumes that the passed ByteFilterFunc is idempotent or stateless in nature, it has
// undefined behvaior if this is not the case.
func CompiledByteFilter(bytefilterFunc ByteFilterFunc) Interceptor {
return NewByteFilter(CompileByteFilterFunc(bytefilterFunc))
}
// ByteFilterFunc is used to filter bytes returning true if the byte should be retained. This can be
// a stateful or stateless function. However you should avoid mutating or reuse of a stateful
// ByteFilterFunc as correct behvior is difficult and error prone to perfect.
type ByteFilterFunc func(byte) bool
// CompileByteFilterFunc prebuilds an array based on the passed ByteFilterFunc. Using this array to
// perform a fast lookup filter of bytes. However as this assumes that the passed ByteFilterFunc is
// idempotent or stateless in nature, it has undefined behvaior if this is not the case.
func CompileByteFilterFunc(byteFilterFunc ByteFilterFunc) ByteFilterFunc {
// This compiles this into a a very fast array lookup
var byteMap [256]bool
for i := range byteMap {
byteMap[i] = byteFilterFunc(byte(i))
}
return func(b byte) bool {
return byteMap[b]
}
} | byte_filter.go | 0.763131 | 0.464719 | byte_filter.go | starcoder |
package kneedle
import (
"math"
"github.com/pkg/errors"
)
/*
Given set of values, look for the elbow/knee points.
See paper: "Finding a Kneedle in a Haystack: Detecting Knee Points in System Behavior"
@author Jagatheesan
*/
// findCancidateIndices finds the indices of all local minimum or local maximum values
// where findMinima is to indicate whether to find local minimums or local maximums.
func findCandidateIndices(data [][]float64, findMinima bool) (candidates []int){
//a coordinate is considered a candidate if both of its adjacent points have y-values
//that are greater or less (depending on whether we want local minima or local maxima)
for i := 1; i < len(data) - 1; i++{
prev := data[i-1][1]
cur := data[i][1]
next := data[i+1][1]
var isCandidate bool
if findMinima == true{
isCandidate = prev > cur && next > cur
} else {
isCandidate = prev < cur && next < cur
}
if(isCandidate){
candidates = append(candidates, i)
}
}
return
}
//findElbowIndex fings the index in the data the represents a most exaggerated elbow point.
func findElbowIndex(data []float64) (bestIdx int){
var bestScore float64
for i := 0; i < len(data); i++{
score := math.Abs(data[i])
if score > bestScore{
bestScore = score
bestIdx = i
}
}
return bestIdx
}
//Prepare prepares the data by smoothing, then normalising into unit range 0-1,
//and finally, subtracting the y-value from the x-value where
//smoothingWindow is the size of the smoothing window.
func prepare(data [][]float64, smoothingWindow int) (normalisedData [][]float64){
//smooth the data to make local minimum/maximum easier to find (this is Step 1 in the paper)
smoothedData, _ := gaussianSmooth2d(data, smoothingWindow)
//prepare the data into the unit range (step 2 of paper)
normalisedData, _ = minmaxNormalise(smoothedData)
//subtract normalised x from normalised y (this is step 3 in the paper)
for i := 0; i < len(normalisedData); i++{
normalisedData[i][1] = normalisedData[i][1] - normalisedData[i][0]
}
return
}
func computeAverageVarianceX(data [][]float64) float64{
var sumVariance float64
for i := 0; i < len(data) - 1; i++{
sumVariance += data[i + 1][0] - data[i][0]
}
return sumVariance / float64((len(data) - 1))
}
//Run takes in a 2D slice containing data where knee or elbow needs to be found.
//The function also takes in the number of "flat" points that is required before considering
//a point as knee or elbow. The smoothingWindow parameter is used to indicate the average used for
//the Gaussian kernel average smoother (you can try with 3 to begin with). The findElbows parameter indicates
//whether to find an elbow or a knee when the value of parameter is true or false respectively
func Run(data [][]float64, s int, smoothingWindow int, findElbows bool) (localMinMaxPts [][]float64, err error){
if(len(data) == 0){
err = errors.New("Cannot find elbow or knee points in empty data.")
return
}
if(len(data[0]) != 2){
err = errors.New("Cannot run Kneedle, this method expects all data to be 2d.")
return
}
//do steps 1,2,3 of the paper in the prepare method
normalisedData := prepare(data, smoothingWindow)
//find candidate indices (this is step 4 in the paper)
candidateIndices := findCandidateIndices(normalisedData, findElbows)
//go through each candidate index, i, and see if the indices after i are satisfy the threshold requirement
//(this is step 5 in the paper)
step := computeAverageVarianceX(normalisedData)
if findElbows{
step = step * float64(s)
} else {
step = step * -float64(s)
}
//check each candidate to see if it is a real elbow/knee
//(this is step 6 in the paper)
for i := 0; i < len(candidateIndices); i++{
candidateIdx := candidateIndices[i]
var endIdx int
if i + 1 < len(candidateIndices){
endIdx = candidateIndices[i+1]
} else {
endIdx = len(data)
}
threshold := normalisedData[candidateIdx][1] + step
for j := candidateIdx + 1; j < endIdx; j++{
var isRealElbowOrKnee bool
if findElbows{
isRealElbowOrKnee = normalisedData[j][1] > threshold
} else {
isRealElbowOrKnee = normalisedData[j][1] < threshold
}
if isRealElbowOrKnee {
localMinMaxPts = append(localMinMaxPts, data[candidateIdx])
break
}
}
}
return
} | kneedle.go | 0.584508 | 0.520862 | kneedle.go | starcoder |
package bingo
import (
"encoding/base64"
"errors"
"math/rand"
"strconv"
"strings"
"time"
)
type (
// Game represents a bingo game. The zero value can be used to start a new game.
Game struct {
numbers [MaxNumber - MinNumber + 1]Number
numbersDrawn int
}
// Resetter resets games to valid, shuffled states. It can be seeded to be predictable reset the next reset game.
Resetter interface {
// Reset resets the game.
Reset(g *Game)
// Seed sets the GameResetter to reset the next game from a starting point.
Seed(seed int64)
}
// shuffler is the internal implementation of GameResetter.
// It uses a random source to randomly swap numbers when shuffling.
shuffler struct {
*rand.Rand
swap func(numbers []Number) func(i, j int)
}
)
// GameResetter shuffles the game numbers. It is seeded to the time it is created; it should only be used when testing.
var GameResetter Resetter = &shuffler{
Rand: rand.New(rand.NewSource(time.Now().UnixNano())),
swap: func(numbers []Number) func(i, j int) {
return func(i, j int) {
numbers[i], numbers[j] = numbers[j], numbers[i]
}
},
}
// NumbersLeft reports how many available numbers in the game can be drawn.
func (g Game) NumbersLeft() int {
g.normalizeNumbersDrawn()
return len(g.numbers) - g.numbersDrawn
}
// DrawnNumbers is the numbers in the game that have been drawn
func (g Game) DrawnNumbers() []Number {
g.normalizeNumbersDrawn()
return g.numbers[:g.numbersDrawn]
}
// DrawNumber move the next available number to DrawnNumbers.
// The game is reset if no numbers have been drawn.
func (g *Game) DrawNumber() {
g.normalizeNumbersDrawn()
switch {
case g.numbersDrawn == 0:
GameResetter.Reset(g)
g.numbersDrawn = 1
case g.numbersDrawn < len(g.numbers):
g.numbersDrawn++
}
}
// DrawnNumberColumns partitions the drawn numbers by columns in the order that they were drawn.
func (g Game) DrawnNumberColumns() map[int][]Number {
cols := make(map[int][]Number, 5)
drawnNumbers := g.DrawnNumbers()
for _, n := range drawnNumbers {
c := n.Column()
cols[c] = append(cols[c], n)
}
return cols
}
// PreviousNumberDrawn is the last number drawn, or 0 of no numbers have been drawn.
func (g Game) PreviousNumberDrawn() Number {
g.normalizeNumbersDrawn()
if g.numbersDrawn == 0 {
return 0
}
return g.numbers[g.numbersDrawn-1]
}
// Reset clears drawn numbers and resets/shuffles all the possible available numbers.
// To shuffle the numbers to a specific order, call rand.Seed() with a constant value.
func (s *shuffler) Reset(g *Game) {
for i := range g.numbers {
g.numbers[i] = Number(i + 1)
}
s.Rand.Shuffle(len(g.numbers), s.swap(g.numbers[:]))
g.numbersDrawn = 0
}
// normalizeNumbersDrawn clamps numbersDrawn to [0,75]
func (g *Game) normalizeNumbersDrawn() {
switch {
case g.numbersDrawn < 0:
g.numbersDrawn = 0
case g.numbersDrawn > len(g.numbers):
g.numbersDrawn = len(g.numbers)
}
}
// ID encodes the game into an easy to transport string.
func (g Game) ID() (string, error) {
g.normalizeNumbersDrawn()
switch {
case g.numbersDrawn == 0:
return "0", nil
case !numbers(g.numbers[:]).Valid():
return "", errors.New("game has duplicate/invalid numbers")
}
data := make([]byte, len(g.numbers))
for i, n := range g.numbers {
data[i] = byte(n)
}
nums := base64.URLEncoding.EncodeToString(data)
id := strconv.Itoa(g.numbersDrawn) + "-" + nums
return id, nil
}
// GameFromID creates a game from the identifying string.
func GameFromID(id string) (*Game, error) {
i := strings.IndexAny(id, "-")
switch {
case id == "0":
return new(Game), nil
case i < 0, i >= len(id):
return nil, errors.New("could not split id string into numbersDrawn and numbers")
}
numbersDrawnStr, numsStr := id[:i], id[i+1:]
numbersDrawn, err := strconv.Atoi(numbersDrawnStr)
if err != nil {
return nil, errors.New("parsing numbersLeft: " + err.Error())
}
data, err := base64.URLEncoding.DecodeString(numsStr)
if err != nil {
return nil, errors.New("decoding game numbers: " + err.Error())
}
var g Game
if len(data) != len(g.numbers) {
return nil, errors.New("decoded numbers too large/small")
}
for i, n := range data {
g.numbers[i] = Number(n)
}
if !numbers(g.numbers[:]).Valid() {
return nil, errors.New("game has duplicate/invalid numbers")
}
g.numbersDrawn = numbersDrawn
return &g, nil
} | bingo/game.go | 0.565299 | 0.470433 | game.go | starcoder |
package aut
import (
"bufio"
"bytes"
"io"
"strconv"
)
// Scanner is a lexical scanner.
type Scanner struct {
r *bufio.Reader
pos TokenPos
}
// NewScanner returns a new instance of Scanner.
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r), pos: TokenPos{Char: 0, Lines: []int{}}}
}
// read reads the next rune from the buffered reader.
// Returns the rune(0) if reached the end or error occurs.
func (s *Scanner) read() rune {
ch, _, err := s.r.ReadRune()
if err != nil {
return eof
}
if ch == '\n' {
s.pos.Lines = append(s.pos.Lines, s.pos.Char)
s.pos.Char = 0
} else {
s.pos.Char++
}
return ch
}
// unread places the previously read rune back on the reader.
func (s *Scanner) unread() {
_ = s.r.UnreadRune()
if s.pos.Char == 0 {
s.pos.Char = s.pos.Lines[len(s.pos.Lines)-1]
s.pos.Lines = s.pos.Lines[:len(s.pos.Lines)-1]
} else {
s.pos.Char--
}
}
// Scan returns the next token and parsed value.
func (s *Scanner) Scan() Token {
var startPos, endPos TokenPos
ch := s.read()
if isWhitespace(ch) {
s.skipWhitespace()
ch = s.read()
}
if isLetter(ch) {
s.unread()
return s.scanUnquoted()
}
if isDigit(ch) {
s.unread()
return s.scanDigit()
}
// Track token positions.
startPos = s.pos
defer func() { endPos = s.pos }()
switch ch {
case eof:
return &SymToken{t: 0, start: startPos, end: endPos}
case ',':
return &SymToken{t: COMMA, start: startPos, end: endPos}
case '(':
return &SymToken{t: LPAREN, start: startPos, end: endPos}
case ')':
return &SymToken{t: RPAREN, start: startPos, end: endPos}
case '*': // special case for unquoted.
return &LabelToken{str: "*", start: startPos, end: endPos}
case '"': // Quoted string.
s.unread()
return s.scanQuoted()
}
return &SymToken{t: ILLEGAL, start: startPos, end: endPos}
}
func (s *Scanner) scanUnquoted() Token {
var startPos, endPos TokenPos
var buf bytes.Buffer
startPos = s.pos
defer func() { endPos = s.pos }()
buf.WriteRune(s.read())
for {
if ch := s.read(); ch == eof {
break
} else if !isUnquoted(ch) {
s.unread()
break
} else {
_, _ = buf.WriteRune(ch)
}
}
if buf.String() == "des" {
return &SymToken{t: DES, start: startPos, end: endPos}
}
return &LabelToken{str: buf.String(), start: startPos, end: endPos}
}
func (s *Scanner) scanDigit() Token {
var startPos, endPos TokenPos
var buf bytes.Buffer
startPos = s.pos
defer func() { endPos = s.pos }()
for {
if ch := s.read(); ch == eof {
break
} else if !isDigit(ch) {
s.unread()
break
} else {
_, _ = buf.WriteRune(ch)
}
}
if i, err := strconv.Atoi(buf.String()); err == nil {
return &DigitsToken{num: i, start: startPos, end: endPos}
}
return &SymToken{t: ILLEGAL, start: startPos, end: endPos}
}
func (s *Scanner) scanQuoted() Token {
var startPos, endPos TokenPos
var buf bytes.Buffer
startPos = s.pos
defer func() { endPos = s.pos }()
buf.WriteRune(s.read())
QUOTESEARCH:
for {
if ch := s.read(); ch == eof {
break
} else if ch == '"' {
var searchBuf bytes.Buffer
var nextRune rune
searchBuf.WriteRune(ch)
for nextRune = s.read(); isWhitespace(nextRune); nextRune = s.read() {
searchBuf.WriteRune(nextRune)
}
if nextRune == ',' {
// BUG(nickng): Heuristic to detect end of quoted string, could give wrong end-of-quote if string includes '" ,'
s.unread()
buf.WriteRune('"') // Put a final quote back in.
break QUOTESEARCH
} else {
s.unread()
buf.WriteString(searchBuf.String())
}
} else {
_, _ = buf.WriteRune(ch)
}
}
return &LabelToken{str: buf.String(), start: startPos, end: endPos}
}
func (s *Scanner) skipWhitespace() {
for {
if ch := s.read(); ch == eof {
break
} else if !isWhitespace(ch) {
s.unread()
break
}
}
} | scanner.go | 0.585931 | 0.501526 | scanner.go | starcoder |
package influxql
import (
"bytes"
"container/heap"
"fmt"
"math"
"sort"
"time"
)
/*
This file contains iterator implementations for each function call available
in InfluxQL. Call iterators are separated into two groups:
1. Map/reduce-style iterators - these are passed to IteratorCreator so that
processing can be at the low-level storage and aggregates are returned.
2. Raw aggregate iterators - these require the full set of data for a window.
These are handled by the select() function and raw points are streamed in
from the low-level storage.
There are helpers to aid in building aggregate iterators. For simple map/reduce
iterators, you can use the reduceIterator types and pass a reduce function. This
reduce function is passed a previous and current value and the new timestamp,
value, and auxilary fields are returned from it.
For raw aggregate iterators, you can use the reduceSliceIterators which pass
in a slice of all points to the function and return a point. For more complex
iterator types, you may need to create your own iterators by hand.
Once your iterator is complete, you'll need to add it to the NewCallIterator()
function if it is to be available to IteratorCreators and add it to the select()
function to allow it to be included during planning.
*/
// NewCallIterator returns a new iterator for a Call.
func NewCallIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
name := opt.Expr.(*Call).Name
switch name {
case "count":
return newCountIterator(input, opt)
case "min":
return newMinIterator(input, opt)
case "max":
return newMaxIterator(input, opt)
case "sum":
return newSumIterator(input, opt)
case "first":
return newFirstIterator(input, opt)
case "last":
return newLastIterator(input, opt)
case "mean":
return newMeanIterator(input, opt)
default:
return nil, fmt.Errorf("unsupported function call: %s", name)
}
}
// newCountIterator returns an iterator for operating on a count() call.
func newCountIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
// FIXME: Wrap iterator in int-type iterator and always output int value.
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, IntegerPointEmitter) {
fn := NewFloatFuncIntegerReducer(FloatCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime})
return fn, fn
}
return &floatReduceIntegerIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerFuncReducer(IntegerCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime})
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
case StringIterator:
createFn := func() (StringPointAggregator, IntegerPointEmitter) {
fn := NewStringFuncIntegerReducer(StringCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime})
return fn, fn
}
return &stringReduceIntegerIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil
case BooleanIterator:
createFn := func() (BooleanPointAggregator, IntegerPointEmitter) {
fn := NewBooleanFuncIntegerReducer(BooleanCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime})
return fn, fn
}
return &booleanReduceIntegerIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported count iterator type: %T", input)
}
}
// FloatCountReduce returns the count of points.
func FloatCountReduce(prev *IntegerPoint, curr *FloatPoint) (int64, int64, []interface{}) {
if prev == nil {
return ZeroTime, 1, nil
}
return ZeroTime, prev.Value + 1, nil
}
// IntegerCountReduce returns the count of points.
func IntegerCountReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {
if prev == nil {
return ZeroTime, 1, nil
}
return ZeroTime, prev.Value + 1, nil
}
// StringCountReduce returns the count of points.
func StringCountReduce(prev *IntegerPoint, curr *StringPoint) (int64, int64, []interface{}) {
if prev == nil {
return ZeroTime, 1, nil
}
return ZeroTime, prev.Value + 1, nil
}
// BooleanCountReduce returns the count of points.
func BooleanCountReduce(prev *IntegerPoint, curr *BooleanPoint) (int64, int64, []interface{}) {
if prev == nil {
return ZeroTime, 1, nil
}
return ZeroTime, prev.Value + 1, nil
}
// newMinIterator returns an iterator for operating on a min() call.
func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatFuncReducer(FloatMinReduce, nil)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerFuncReducer(IntegerMinReduce, nil)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
case BooleanIterator:
createFn := func() (BooleanPointAggregator, BooleanPointEmitter) {
fn := NewBooleanFuncReducer(BooleanMinReduce, nil)
return fn, fn
}
return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported min iterator type: %T", input)
}
}
// FloatMinReduce returns the minimum value between prev & curr.
func FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {
if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// IntegerMinReduce returns the minimum value between prev & curr.
func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {
if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// BooleanMinReduce returns the minimum value between prev & curr.
func BooleanMinReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) {
if prev == nil || (curr.Value != prev.Value && !curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// newMaxIterator returns an iterator for operating on a max() call.
func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatFuncReducer(FloatMaxReduce, nil)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerFuncReducer(IntegerMaxReduce, nil)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
case BooleanIterator:
createFn := func() (BooleanPointAggregator, BooleanPointEmitter) {
fn := NewBooleanFuncReducer(BooleanMaxReduce, nil)
return fn, fn
}
return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported max iterator type: %T", input)
}
}
// FloatMaxReduce returns the maximum value between prev & curr.
func FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {
if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// IntegerMaxReduce returns the maximum value between prev & curr.
func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {
if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// BooleanMaxReduce returns the minimum value between prev & curr.
func BooleanMaxReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) {
if prev == nil || (curr.Value != prev.Value && curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// newSumIterator returns an iterator for operating on a sum() call.
func newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatFuncReducer(FloatSumReduce, &FloatPoint{Value: 0, Time: ZeroTime})
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerFuncReducer(IntegerSumReduce, &IntegerPoint{Value: 0, Time: ZeroTime})
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported sum iterator type: %T", input)
}
}
// FloatSumReduce returns the sum prev value & curr value.
func FloatSumReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {
if prev == nil {
return ZeroTime, curr.Value, nil
}
return prev.Time, prev.Value + curr.Value, nil
}
// IntegerSumReduce returns the sum prev value & curr value.
func IntegerSumReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {
if prev == nil {
return ZeroTime, curr.Value, nil
}
return prev.Time, prev.Value + curr.Value, nil
}
// newFirstIterator returns an iterator for operating on a first() call.
func newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatFuncReducer(FloatFirstReduce, nil)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerFuncReducer(IntegerFirstReduce, nil)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
case StringIterator:
createFn := func() (StringPointAggregator, StringPointEmitter) {
fn := NewStringFuncReducer(StringFirstReduce, nil)
return fn, fn
}
return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil
case BooleanIterator:
createFn := func() (BooleanPointAggregator, BooleanPointEmitter) {
fn := NewBooleanFuncReducer(BooleanFirstReduce, nil)
return fn, fn
}
return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported first iterator type: %T", input)
}
}
// FloatFirstReduce returns the first point sorted by time.
func FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {
if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// IntegerFirstReduce returns the first point sorted by time.
func IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {
if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// StringFirstReduce returns the first point sorted by time.
func StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) {
if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// BooleanFirstReduce returns the first point sorted by time.
func BooleanFirstReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) {
if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && !curr.Value && prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// newLastIterator returns an iterator for operating on a last() call.
func newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatFuncReducer(FloatLastReduce, nil)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerFuncReducer(IntegerLastReduce, nil)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
case StringIterator:
createFn := func() (StringPointAggregator, StringPointEmitter) {
fn := NewStringFuncReducer(StringLastReduce, nil)
return fn, fn
}
return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil
case BooleanIterator:
createFn := func() (BooleanPointAggregator, BooleanPointEmitter) {
fn := NewBooleanFuncReducer(BooleanLastReduce, nil)
return fn, fn
}
return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported last iterator type: %T", input)
}
}
// FloatLastReduce returns the last point sorted by time.
func FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {
if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// IntegerLastReduce returns the last point sorted by time.
func IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {
if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// StringLastReduce returns the first point sorted by time.
func StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) {
if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// BooleanLastReduce returns the first point sorted by time.
func BooleanLastReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) {
if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value && !prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// NewDistinctIterator returns an iterator for operating on a distinct() call.
func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatDistinctReducer()
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerDistinctReducer()
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
case StringIterator:
createFn := func() (StringPointAggregator, StringPointEmitter) {
fn := NewStringDistinctReducer()
return fn, fn
}
return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil
case BooleanIterator:
createFn := func() (BooleanPointAggregator, BooleanPointEmitter) {
fn := NewBooleanDistinctReducer()
return fn, fn
}
return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported distinct iterator type: %T", input)
}
}
// newMeanIterator returns an iterator for operating on a mean() call.
func newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatMeanReducer()
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, FloatPointEmitter) {
fn := NewIntegerMeanReducer()
return fn, fn
}
return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported mean iterator type: %T", input)
}
}
// NewMedianIterator returns an iterator for operating on a median() call.
func NewMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
return newMedianIterator(input, opt)
}
// newMedianIterator returns an iterator for operating on a median() call.
func newMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(FloatMedianReduceSlice)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, FloatPointEmitter) {
fn := NewIntegerSliceFuncFloatReducer(IntegerMedianReduceSlice)
return fn, fn
}
return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported median iterator type: %T", input)
}
}
// FloatMedianReduceSlice returns the median value within a window.
func FloatMedianReduceSlice(a []FloatPoint) []FloatPoint {
if len(a) == 1 {
return a
}
// OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1.
// Return the middle value from the points.
// If there are an even number of points then return the mean of the two middle points.
sort.Sort(floatPointsByValue(a))
if len(a)%2 == 0 {
lo, hi := a[len(a)/2-1], a[(len(a)/2)]
return []FloatPoint{{Time: ZeroTime, Value: lo.Value + (hi.Value-lo.Value)/2}}
}
return []FloatPoint{{Time: ZeroTime, Value: a[len(a)/2].Value}}
}
// IntegerMedianReduceSlice returns the median value within a window.
func IntegerMedianReduceSlice(a []IntegerPoint) []FloatPoint {
if len(a) == 1 {
return []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}}
}
// OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1.
// Return the middle value from the points.
// If there are an even number of points then return the mean of the two middle points.
sort.Sort(integerPointsByValue(a))
if len(a)%2 == 0 {
lo, hi := a[len(a)/2-1], a[(len(a)/2)]
return []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}}
}
return []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}}
}
// newModeIterator returns an iterator for operating on a mode() call.
func NewModeIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(FloatModeReduceSlice)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerSliceFuncReducer(IntegerModeReduceSlice)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
case StringIterator:
createFn := func() (StringPointAggregator, StringPointEmitter) {
fn := NewStringSliceFuncReducer(StringModeReduceSlice)
return fn, fn
}
return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil
case BooleanIterator:
createFn := func() (BooleanPointAggregator, BooleanPointEmitter) {
fn := NewBooleanSliceFuncReducer(BooleanModeReduceSlice)
return fn, fn
}
return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported median iterator type: %T", input)
}
}
// FloatModeReduceSlice returns the mode value within a window.
func FloatModeReduceSlice(a []FloatPoint) []FloatPoint {
if len(a) == 1 {
return a
}
// fmt.Println(a[0])
sort.Sort(floatPointsByValue(a))
mostFreq := 0
currFreq := 0
currMode := a[0].Value
mostMode := a[0].Value
mostTime := a[0].Time
currTime := a[0].Time
for _, p := range a {
if p.Value != currMode {
currFreq = 1
currMode = p.Value
currTime = p.Time
continue
}
currFreq++
if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) {
continue
}
mostFreq = currFreq
mostMode = p.Value
mostTime = p.Time
}
return []FloatPoint{{Time: ZeroTime, Value: mostMode}}
}
// IntegerModeReduceSlice returns the mode value within a window.
func IntegerModeReduceSlice(a []IntegerPoint) []IntegerPoint {
if len(a) == 1 {
return a
}
sort.Sort(integerPointsByValue(a))
mostFreq := 0
currFreq := 0
currMode := a[0].Value
mostMode := a[0].Value
mostTime := a[0].Time
currTime := a[0].Time
for _, p := range a {
if p.Value != currMode {
currFreq = 1
currMode = p.Value
currTime = p.Time
continue
}
currFreq++
if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) {
continue
}
mostFreq = currFreq
mostMode = p.Value
mostTime = p.Time
}
return []IntegerPoint{{Time: ZeroTime, Value: mostMode}}
}
// StringModeReduceSlice returns the mode value within a window.
func StringModeReduceSlice(a []StringPoint) []StringPoint {
if len(a) == 1 {
return a
}
sort.Sort(stringPointsByValue(a))
mostFreq := 0
currFreq := 0
currMode := a[0].Value
mostMode := a[0].Value
mostTime := a[0].Time
currTime := a[0].Time
for _, p := range a {
if p.Value != currMode {
currFreq = 1
currMode = p.Value
currTime = p.Time
continue
}
currFreq++
if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) {
continue
}
mostFreq = currFreq
mostMode = p.Value
mostTime = p.Time
}
return []StringPoint{{Time: ZeroTime, Value: mostMode}}
}
// BooleanModeReduceSlice returns the mode value within a window.
func BooleanModeReduceSlice(a []BooleanPoint) []BooleanPoint {
if len(a) == 1 {
return a
}
trueFreq := 0
falsFreq := 0
mostMode := false
for _, p := range a {
if p.Value {
trueFreq++
} else {
falsFreq++
}
}
// In case either of true or false are mode then retuned mode value wont be
// of metric with oldest timestamp
if trueFreq >= falsFreq {
mostMode = true
}
return []BooleanPoint{{Time: ZeroTime, Value: mostMode}}
}
// newStddevIterator returns an iterator for operating on a stddev() call.
func newStddevIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(FloatStddevReduceSlice)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, FloatPointEmitter) {
fn := NewIntegerSliceFuncFloatReducer(IntegerStddevReduceSlice)
return fn, fn
}
return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
case StringIterator:
createFn := func() (StringPointAggregator, StringPointEmitter) {
fn := NewStringSliceFuncReducer(StringStddevReduceSlice)
return fn, fn
}
return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported stddev iterator type: %T", input)
}
}
// FloatStddevReduceSlice returns the stddev value within a window.
func FloatStddevReduceSlice(a []FloatPoint) []FloatPoint {
// If there is only one point then return 0.
if len(a) < 2 {
return []FloatPoint{{Time: ZeroTime, Nil: true}}
}
// Calculate the mean.
var mean float64
var count int
for _, p := range a {
if math.IsNaN(p.Value) {
continue
}
count++
mean += (p.Value - mean) / float64(count)
}
// Calculate the variance.
var variance float64
for _, p := range a {
if math.IsNaN(p.Value) {
continue
}
variance += math.Pow(p.Value-mean, 2)
}
return []FloatPoint{{
Time: ZeroTime,
Value: math.Sqrt(variance / float64(count-1)),
}}
}
// IntegerStddevReduceSlice returns the stddev value within a window.
func IntegerStddevReduceSlice(a []IntegerPoint) []FloatPoint {
// If there is only one point then return 0.
if len(a) < 2 {
return []FloatPoint{{Time: ZeroTime, Nil: true}}
}
// Calculate the mean.
var mean float64
var count int
for _, p := range a {
count++
mean += (float64(p.Value) - mean) / float64(count)
}
// Calculate the variance.
var variance float64
for _, p := range a {
variance += math.Pow(float64(p.Value)-mean, 2)
}
return []FloatPoint{{
Time: ZeroTime,
Value: math.Sqrt(variance / float64(count-1)),
}}
}
// StringStddevReduceSlice always returns "".
func StringStddevReduceSlice(a []StringPoint) []StringPoint {
return []StringPoint{{Time: ZeroTime, Value: ""}}
}
// newSpreadIterator returns an iterator for operating on a spread() call.
func newSpreadIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(FloatSpreadReduceSlice)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerSliceFuncReducer(IntegerSpreadReduceSlice)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported spread iterator type: %T", input)
}
}
// FloatSpreadReduceSlice returns the spread value within a window.
func FloatSpreadReduceSlice(a []FloatPoint) []FloatPoint {
// Find min & max values.
min, max := a[0].Value, a[0].Value
for _, p := range a[1:] {
min = math.Min(min, p.Value)
max = math.Max(max, p.Value)
}
return []FloatPoint{{Time: ZeroTime, Value: max - min}}
}
// IntegerSpreadReduceSlice returns the spread value within a window.
func IntegerSpreadReduceSlice(a []IntegerPoint) []IntegerPoint {
// Find min & max values.
min, max := a[0].Value, a[0].Value
for _, p := range a[1:] {
if p.Value < min {
min = p.Value
}
if p.Value > max {
max = p.Value
}
}
return []IntegerPoint{{Time: ZeroTime, Value: max - min}}
}
func newTopIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags []int) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
aggregateFn := NewFloatTopReduceSliceFunc(int(n.Val), tags, opt.Interval)
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(aggregateFn)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
aggregateFn := NewIntegerTopReduceSliceFunc(int(n.Val), tags, opt.Interval)
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerSliceFuncReducer(aggregateFn)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported top iterator type: %T", input)
}
}
// NewFloatTopReduceSliceFunc returns the top values within a window.
func NewFloatTopReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc {
return func(a []FloatPoint) []FloatPoint {
// Filter by tags if they exist.
if len(tags) > 0 {
a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool {
return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring higher values and breaking ties
// based on the earliest time for a point.
h := floatPointsSortBy(a, func(a, b *FloatPoint) bool {
if a.Value != b.Value {
return a.Value > b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]FloatPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(FloatPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = ZeroTime
}
} else {
sort.Stable(floatPointsByTime(points))
}
return points
}
}
// NewIntegerTopReduceSliceFunc returns the top values within a window.
func NewIntegerTopReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc {
return func(a []IntegerPoint) []IntegerPoint {
// Filter by tags if they exist.
if len(tags) > 0 {
a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool {
return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring higher values and breaking ties
// based on the earliest time for a point.
h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool {
if a.Value != b.Value {
return a.Value > b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]IntegerPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(IntegerPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = ZeroTime
}
} else {
sort.Stable(integerPointsByTime(points))
}
return points
}
}
func newBottomIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags []int) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
aggregateFn := NewFloatBottomReduceSliceFunc(int(n.Val), tags, opt.Interval)
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(aggregateFn)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
aggregateFn := NewIntegerBottomReduceSliceFunc(int(n.Val), tags, opt.Interval)
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerSliceFuncReducer(aggregateFn)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported bottom iterator type: %T", input)
}
}
// NewFloatBottomReduceSliceFunc returns the bottom values within a window.
func NewFloatBottomReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc {
return func(a []FloatPoint) []FloatPoint {
// Filter by tags if they exist.
if len(tags) > 0 {
a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool {
return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring lower values and breaking ties
// based on the earliest time for a point.
h := floatPointsSortBy(a, func(a, b *FloatPoint) bool {
if a.Value != b.Value {
return a.Value < b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]FloatPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(FloatPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = ZeroTime
}
} else {
sort.Stable(floatPointsByTime(points))
}
return points
}
}
// NewIntegerBottomReduceSliceFunc returns the bottom values within a window.
func NewIntegerBottomReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc {
return func(a []IntegerPoint) []IntegerPoint {
// Filter by tags if they exist.
if len(tags) > 0 {
a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool {
return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring lower values and breaking ties
// based on the earliest time for a point.
h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool {
if a.Value != b.Value {
return a.Value < b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]IntegerPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(IntegerPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = ZeroTime
}
} else {
sort.Stable(integerPointsByTime(points))
}
return points
}
}
func filterFloatByUniqueTags(a []FloatPoint, tags []int, cmpFunc func(cur, p *FloatPoint) bool) []FloatPoint {
pointMap := make(map[string]FloatPoint)
for _, p := range a {
keyBuf := bytes.NewBuffer(nil)
for i, index := range tags {
if i > 0 {
keyBuf.WriteString(",")
}
fmt.Fprintf(keyBuf, "%s", p.Aux[index])
}
key := keyBuf.String()
cur, ok := pointMap[key]
if ok {
if cmpFunc(&cur, &p) {
pointMap[key] = p
}
} else {
pointMap[key] = p
}
}
// Recreate the original array with our new filtered list.
points := make([]FloatPoint, 0, len(pointMap))
for _, p := range pointMap {
points = append(points, p)
}
return points
}
func filterIntegerByUniqueTags(a []IntegerPoint, tags []int, cmpFunc func(cur, p *IntegerPoint) bool) []IntegerPoint {
pointMap := make(map[string]IntegerPoint)
for _, p := range a {
keyBuf := bytes.NewBuffer(nil)
for i, index := range tags {
if i > 0 {
keyBuf.WriteString(",")
}
fmt.Fprintf(keyBuf, "%s", p.Aux[index])
}
key := keyBuf.String()
cur, ok := pointMap[key]
if ok {
if cmpFunc(&cur, &p) {
pointMap[key] = p
}
} else {
pointMap[key] = p
}
}
// Recreate the original array with our new filtered list.
points := make([]IntegerPoint, 0, len(pointMap))
for _, p := range pointMap {
points = append(points, p)
}
return points
}
// newPercentileIterator returns an iterator for operating on a percentile() call.
func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
floatPercentileReduceSlice := NewFloatPercentileReduceSliceFunc(percentile)
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(floatPercentileReduceSlice)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
integerPercentileReduceSlice := NewIntegerPercentileReduceSliceFunc(percentile)
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerSliceFuncReducer(integerPercentileReduceSlice)
return fn, fn
}
return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported percentile iterator type: %T", input)
}
}
// NewFloatPercentileReduceSliceFunc returns the percentile value within a window.
func NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc {
return func(a []FloatPoint) []FloatPoint {
length := len(a)
i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1
if i < 0 || i >= length {
return nil
}
sort.Sort(floatPointsByValue(a))
return []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: a[i].Aux}}
}
}
// NewIntegerPercentileReduceSliceFunc returns the percentile value within a window.
func NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceFunc {
return func(a []IntegerPoint) []IntegerPoint {
length := len(a)
i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1
if i < 0 || i >= length {
return nil
}
sort.Sort(integerPointsByValue(a))
return []IntegerPoint{{Time: ZeroTime, Value: a[i].Value, Aux: a[i].Aux}}
}
}
// newDerivativeIterator returns an iterator for operating on a derivative() call.
func newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatDerivativeReducer(interval, isNonNegative, opt.Ascending)
return fn, fn
}
return newFloatStreamFloatIterator(input, createFn, opt), nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, FloatPointEmitter) {
fn := NewIntegerDerivativeReducer(interval, isNonNegative, opt.Ascending)
return fn, fn
}
return newIntegerStreamFloatIterator(input, createFn, opt), nil
default:
return nil, fmt.Errorf("unsupported derivative iterator type: %T", input)
}
}
// newDifferenceIterator returns an iterator for operating on a difference() call.
func newDifferenceIterator(input Iterator, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatDifferenceReducer()
return fn, fn
}
return newFloatStreamFloatIterator(input, createFn, opt), nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerDifferenceReducer()
return fn, fn
}
return newIntegerStreamIntegerIterator(input, createFn, opt), nil
default:
return nil, fmt.Errorf("unsupported difference iterator type: %T", input)
}
}
// newElapsedIterator returns an iterator for operating on a elapsed() call.
func newElapsedIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, IntegerPointEmitter) {
fn := NewFloatElapsedReducer(interval)
return fn, fn
}
return newFloatStreamIntegerIterator(input, createFn, opt), nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerElapsedReducer(interval)
return fn, fn
}
return newIntegerStreamIntegerIterator(input, createFn, opt), nil
case BooleanIterator:
createFn := func() (BooleanPointAggregator, IntegerPointEmitter) {
fn := NewBooleanElapsedReducer(interval)
return fn, fn
}
return newBooleanStreamIntegerIterator(input, createFn, opt), nil
case StringIterator:
createFn := func() (StringPointAggregator, IntegerPointEmitter) {
fn := NewStringElapsedReducer(interval)
return fn, fn
}
return newStringStreamIntegerIterator(input, createFn, opt), nil
default:
return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input)
}
}
// newMovingAverageIterator returns an iterator for operating on a moving_average() call.
func newMovingAverageIterator(input Iterator, n int, opt IteratorOptions) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatMovingAverageReducer(n)
return fn, fn
}
return newFloatStreamFloatIterator(input, createFn, opt), nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, FloatPointEmitter) {
fn := NewIntegerMovingAverageReducer(n)
return fn, fn
}
return newIntegerStreamFloatIterator(input, createFn, opt), nil
default:
return nil, fmt.Errorf("unsupported moving average iterator type: %T", input)
}
}
// newHoltWintersIterator returns an iterator for operating on a elapsed() call.
func newHoltWintersIterator(input Iterator, opt IteratorOptions, h, m int, includeFitData bool, interval time.Duration) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval)
return fn, fn
}
return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil
case IntegerIterator:
createFn := func() (IntegerPointAggregator, FloatPointEmitter) {
fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval)
return fn, fn
}
return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil
default:
return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input)
}
} | influxql/call_iterator.go | 0.741768 | 0.476397 | call_iterator.go | starcoder |
package trie
// Trie is a trie. It doesn't support unicode strings.
type Trie struct {
name byte
terminal bool
children []*Trie
pfxMap map[string][]string
}
func (t *Trie) getNode(c byte) *Trie {
for _, n := range t.children {
if n.name == c {
return n
}
}
return nil
}
func (t *Trie) addChar(c byte) *Trie {
n := t.getNode(c)
if n == nil {
n = &Trie{name: c}
t.children = append(t.children, n)
}
return n
}
// Add adds the string to the trie.
func (t *Trie) Add(s string) {
if len(s) == 0 {
t.terminal = true
return
}
n := t.addChar(s[0])
n.Add(s[1:])
}
// HasPrefix returns whether any entry in the trie has s as its prefix.
func (t *Trie) HasPrefix(s string) bool {
if len(s) == 0 {
return true
}
n := t.getNode(s[0])
if n == nil {
return false
}
return n.HasPrefix(s[1:])
}
// HasString returns whether the given string is in the trie as a
// complete entry.
func (t *Trie) HasString(s string) bool {
if len(s) == 0 && t.terminal {
return true
}
if len(s) == 0 {
return false
}
n := t.getNode(s[0])
if n == nil {
return false
}
return n.HasString(s[1:])
}
func (t *Trie) substrings() []string {
var strs []string
for _, n := range t.children {
for _, sstr := range n.substrings() {
strs = append(strs, string(t.name)+sstr)
}
}
if t.terminal {
strs = append(strs, string(t.name))
}
return strs
}
func (t *Trie) subbytes(soFar []byte, strs *[]string) {
soFar = append(soFar, t.name)
for _, n := range t.children {
n.subbytes(soFar, strs)
}
if t.terminal {
*strs = append(*strs, string(soFar))
}
}
func (t *Trie) subtrie(s string) *Trie {
n := t
for i := 0; i < len(s); i++ {
n = n.getNode(s[i])
if n == nil {
return nil
}
}
return n
}
// WithPrefix returns all entries in the trie that begin with the
// given prefix.
func (t *Trie) WithPrefix(s string) []string {
if t.pfxMap == nil {
t.pfxMap = make(map[string][]string)
}
if m, ok := t.pfxMap[s]; ok {
return m
}
var strs []string
if len(s) == 0 {
t.subbytes([]byte{}, &strs)
t.pfxMap[s] = strs
return strs
}
n := t.subtrie(s)
if n == nil {
return nil
}
for _, n := range n.children {
n.subbytes([]byte(s), &strs)
}
t.pfxMap[s] = strs
return strs
}
// Previous WithPrefix, kept for benchmark purposes.
func (t *Trie) oldWithPrefix(s string) []string {
if len(s) == 0 {
return t.substrings()
}
n := t.subtrie(s)
if n == nil {
return nil
}
var strs []string
for _, str := range n.substrings() {
strs = append(strs, s[:len(s)-1]+str)
}
return strs
}
func (t *Trie) matchBytes(bl [][]byte, c []byte, mat *[]string) {
if t.name > 0 {
// 0 is ""
c = append(c, t.name)
}
if len(bl) == 0 {
if t.terminal {
*mat = append(*mat, string(c))
}
return
}
for _, r := range bl[0] {
n := t.getNode(r)
if n == nil {
continue
}
n.matchBytes(bl[1:], c, mat)
}
}
// Matches returns all the entries in the trie which match the given byte list.
// All the returned strings will have one character from bl[0] in the first
// position, bl[1] in the second position, etc.
func (t *Trie) Matches(bl [][]byte) []string {
if len(bl) == 0 {
return nil
}
var mat []string
t.matchBytes(bl, []byte{}, &mat)
return mat
}
// kept for benchmark
func (t *Trie) oldMatches(bl [][]byte) []string {
var name string
if t.name > 0 {
// the 0 code point is ""
name = string(t.name)
}
if len(bl) == 0 && t.terminal {
return []string{name}
}
if len(bl) == 0 {
return nil
}
var rtn []string
for _, r := range bl[0] {
n := t.getNode(r)
if n == nil {
continue
}
for _, mat := range n.Matches(bl[1:]) {
rtn = append(rtn, name+mat)
}
}
return rtn
} | trie/trie.go | 0.665084 | 0.427576 | trie.go | starcoder |
package vmath
import (
"math"
"math/rand"
"strconv"
)
// Vector 3 is represented by a x,y,z values.
type Vector3 struct {
X float64
Y float64
Z float64
}
// Create new vector3 with values.
func NewVector3(x float64, y float64, z float64) *Vector3 {
var v = new(Vector3)
v.X = x
v.Y = y
v.Z = z
return v
}
// Create new empty vector3 with values.
func NewEmptyVector3() *Vector3 {
return new(Vector3)
}
// Create new empty vector3 with values.
func NewRandomVector3(min float64, max float64) *Vector3 {
var delta = max - min
return NewVector3(rand.Float64() * delta + min, rand.Float64() * delta + min, rand.Float64() * delta + min)
}
// Set value of the vector.
func (v *Vector3) Set(x float64, y float64, z float64) {
v.X = x
v.Y = y
v.Z = z
}
// Add vectors
func (v *Vector3) Add(b *Vector3) {
v.X += b.X
v.Y += b.Y
v.Z += b.Z
}
// Subtract vectors
func (v *Vector3) Sub(b *Vector3) {
v.X -= b.X
v.Y -= b.Y
v.Z -= b.Z
}
// Multiply vectors
func (v *Vector3) Mul(b *Vector3) {
v.X *= b.X
v.Y *= b.Y
v.Z *= b.Z
}
// Multiply vectors
func (v *Vector3) Divide(b *Vector3) {
v.X *= b.X
v.Y *= b.Y
v.Z *= b.Z
}
// Multiply vector by scalar
func (v *Vector3) MulScalar(b float64) {
v.X *= b
v.Y *= b
v.Z *= b
}
// Divide vector by scalar
func (v *Vector3) DivideScalar(b float64) {
v.X /= b
v.Y /= b
v.Z /= b
}
// Apply sqrt to the individual components of the vector
func (v *Vector3) Sqrt() {
v.X = math.Sqrt(v.X)
v.Y = math.Sqrt(v.Y)
v.Z = math.Sqrt(v.Z)
}
// Normalize this vector
func (v *Vector3) Normalize() {
v.DivideScalar(v.Length())
}
// Calculate the reflection of a vector relative to a normal vector.
func Reflect(v *Vector3, n *Vector3) *Vector3 {
var normal = n.Clone()
normal.MulScalar(2.0 * Dot(v, n))
var reflected = v.Clone()
reflected.Sub(normal)
return reflected
}
// Calculate the refracted vector of a vector relative to a normal vector.
// This calculation is done using the snells law. Ni is the initial refractive indice and No is the out refraction indice.
// The refractionRatio parameters is calculated from Ni/No.
func Refract(v *Vector3, normal *Vector3, refractionRatio float64, refracted *Vector3) bool {
var uv = v.UnitVector()
var dt = Dot(uv, normal)
var discriminant = 1.0 - math.Pow(refractionRatio, 2) * (1 - math.Pow(dt, 2))
if discriminant > 0 {
var normalDt = normal.Clone()
normalDt.MulScalar(dt)
uv.Sub(normalDt)
uv.MulScalar(refractionRatio)
var normalDisc = normal.Clone()
normalDisc.MulScalar(math.Sqrt(discriminant))
uv.Sub(normalDisc)
refracted.Copy(uv)
return true
}
return false
}
// Real glass has reflectivity that varies with angle look at a window at a steep angle and it becomes a mirror.
// The behavior can be approximated by Christophe Schlick polynomial aproximation.
func Schlick(cosine float64, reflectiveIndex float64) float64 {
var r = math.Pow((1 - reflectiveIndex) / (1 + reflectiveIndex), 2)
return r + (1 - r) * math.Pow(1 - cosine, 5)
}
// Calculate a random unitary vector in the surface of a sphere.
// Get ray origins be on a disk around lookfrom rather than from a point.
func RandomInUnitDisk() *Vector3 {
var p = NewVector3(0, 0, 0)
for {
p.Set(rand.Float64() * 2.0 - 1.0, rand.Float64() * 2.0 - 1.0, 0.0)
if Dot(p, p) < 1.0 {
break
}
}
return p
}
// Calculate a random unitary vector in the surface of a sphere.
func RandomInUnitSphere() *Vector3 {
var p = NewVector3(0, 0, 0)
for {
p.Set(rand.Float64() * 2.0 - 1.0, rand.Float64() * 2.0 - 1.0, rand.Float64() * 2.0 - 1.0)
if p.SquaredLength() < 1.0 {
break
}
}
return p
}
// Dot product between two vectors
func Dot(a *Vector3, b *Vector3) float64 {
return a.X * b.X + a.Y * b.Y + a.Z * b.Z
}
// Cross product between two vectors
func Cross(a *Vector3, b *Vector3) *Vector3 {
return NewVector3(a.Y * b.Z - a.Z * b.Y, -(a.X * b.Z - a.Z * b.X), a.X * b.Y - a.Y * b.X)
}
// Return a copy of the vector
func (v *Vector3) Clone() *Vector3 {
return NewVector3(v.X, v.Y, v.Z)
}
// Copy the context of another vector to this one
func (v *Vector3) Copy(b *Vector3) {
v.X = b.X
v.Y = b.Y
v.Z = b.Z
}
// Create a new copy vector with a unit length vector with the same direction as this one.
func (v *Vector3) UnitVector() *Vector3 {
var unit = v.Clone()
unit.DivideScalar(v.Length())
return unit
}
// Length of the vector
func (v *Vector3) Length() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y + v.Z*v.Z)
}
// Squared length of the vector (useful for comparisons, avoids the squaredroot calc).
func (v *Vector3) SquaredLength() float64 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z
}
// Generate a string with the vector values
func (v *Vector3) ToString() string {
return "(" + strconv.FormatFloat(v.X, 'f', -1, 64) + ", " + strconv.FormatFloat(v.Y, 'f', -1, 64) + ", " + strconv.FormatFloat(v.Z, 'f', -1, 64) + ")"
} | vmath/vector3.go | 0.875335 | 0.684584 | vector3.go | starcoder |
package packed
// Efficient sequential read/write of packed integers.
type BulkOperationPacked18 struct {
*BulkOperationPacked
}
func newBulkOperationPacked18() BulkOperation {
return &BulkOperationPacked18{newBulkOperationPacked(18)}
}
func (op *BulkOperationPacked18) decodeLongToInt(blocks []int64, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 46))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>28) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>10) & 262143)
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block0 & 1023) << 8) | (int64(uint64(block1) >> 56)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>38) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>20) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>2) & 262143)
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block1 & 3) << 16) | (int64(uint64(block2) >> 48)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>30) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>12) & 262143)
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block2 & 4095) << 6) | (int64(uint64(block3) >> 58)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>40) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>22) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>4) & 262143)
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block3 & 15) << 14) | (int64(uint64(block4) >> 50)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>32) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>14) & 262143)
valuesOffset++
block5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block4 & 16383) << 4) | (int64(uint64(block5) >> 60)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>42) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>24) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>6) & 262143)
valuesOffset++
block6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block5 & 63) << 12) | (int64(uint64(block6) >> 52)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>34) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>16) & 262143)
valuesOffset++
block7 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block6 & 65535) << 2) | (int64(uint64(block7) >> 62)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block7)>>44) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block7)>>26) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block7)>>8) & 262143)
valuesOffset++
block8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block7 & 255) << 10) | (int64(uint64(block8) >> 54)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block8)>>36) & 262143)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block8)>>18) & 262143)
valuesOffset++
values[valuesOffset] = int32(block8 & 262143)
valuesOffset++
}
}
func (op *BulkOperationPacked18) DecodeByteToInt(blocks []byte, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte0) << 10) | (int64(byte1) << 2) | int64(uint8(byte2)>>6))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte2&63) << 12) | (int64(byte3) << 4) | int64(uint8(byte4)>>4))
valuesOffset++
byte5 := blocks[blocksOffset]
blocksOffset++
byte6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte4&15) << 14) | (int64(byte5) << 6) | int64(uint8(byte6)>>2))
valuesOffset++
byte7 := blocks[blocksOffset]
blocksOffset++
byte8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte6&3) << 16) | (int64(byte7) << 8) | int64(byte8))
valuesOffset++
}
}
func (op *BulkOperationPacked18) DecodeLongToLong(blocks []int64, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64(uint64(block0) >> 46)
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>28) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>10) & 262143
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block0 & 1023) << 8) | (int64(uint64(block1) >> 56))
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>38) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>20) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>2) & 262143
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block1 & 3) << 16) | (int64(uint64(block2) >> 48))
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>30) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>12) & 262143
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block2 & 4095) << 6) | (int64(uint64(block3) >> 58))
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>40) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>22) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>4) & 262143
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block3 & 15) << 14) | (int64(uint64(block4) >> 50))
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>32) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>14) & 262143
valuesOffset++
block5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block4 & 16383) << 4) | (int64(uint64(block5) >> 60))
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>42) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>24) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>6) & 262143
valuesOffset++
block6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block5 & 63) << 12) | (int64(uint64(block6) >> 52))
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>34) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>16) & 262143
valuesOffset++
block7 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block6 & 65535) << 2) | (int64(uint64(block7) >> 62))
valuesOffset++
values[valuesOffset] = int64(uint64(block7)>>44) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block7)>>26) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block7)>>8) & 262143
valuesOffset++
block8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block7 & 255) << 10) | (int64(uint64(block8) >> 54))
valuesOffset++
values[valuesOffset] = int64(uint64(block8)>>36) & 262143
valuesOffset++
values[valuesOffset] = int64(uint64(block8)>>18) & 262143
valuesOffset++
values[valuesOffset] = block8 & 262143
valuesOffset++
}
}
func (op *BulkOperationPacked18) decodeByteToLong(blocks []byte, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte0) << 10) | (int64(byte1) << 2) | int64(uint8(byte2)>>6))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte2&63) << 12) | (int64(byte3) << 4) | int64(uint8(byte4)>>4))
valuesOffset++
byte5 := blocks[blocksOffset]
blocksOffset++
byte6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte4&15) << 14) | (int64(byte5) << 6) | int64(uint8(byte6)>>2))
valuesOffset++
byte7 := blocks[blocksOffset]
blocksOffset++
byte8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte6&3) << 16) | (int64(byte7) << 8) | int64(byte8))
valuesOffset++
}
} | core/util/packed/bulkOperation18.go | 0.598195 | 0.671793 | bulkOperation18.go | starcoder |
package vector
import (
"fmt"
"math"
)
type Vector struct {
X float32
Y float32
Z float32
}
func (v *Vector) String() string {
str := fmt.Sprintf("(%g,%g,%g)", v.X, v.Y, v.Z)
return str
}
func VGet(x float32, y float32, z float32) Vector {
var v Vector
v.X = x
v.Y = y
v.Z = z
return v
}
func VAdd(lhs Vector, rhs Vector) Vector {
var ret Vector
ret.X = lhs.X + rhs.X
ret.Y = lhs.Y + rhs.Y
ret.Z = lhs.Z + rhs.Z
return ret
}
func VSub(lhs Vector, rhs Vector) Vector {
var ret Vector
ret.X = lhs.X - rhs.X
ret.Y = lhs.Y - rhs.Y
ret.Z = lhs.Z - rhs.Z
return ret
}
func VSize(v Vector) float32 {
size := math.Sqrt(float64(v.X*v.X + v.Y*v.Y + v.Z*v.Z))
return float32(size)
}
func VSquareSize(v Vector) float32 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z
}
func VNorm(v Vector) Vector {
size := VSize(v)
var ret Vector
ret.X = v.X / size
ret.Y = v.Y / size
ret.Z = v.Z / size
return ret
}
func VScale(v Vector, scale float32) Vector {
var ret Vector
ret.X = v.X * scale
ret.Y = v.Y * scale
ret.Z = v.Z * scale
return ret
}
func VDot(lhs Vector, rhs Vector) float32 {
return lhs.X*rhs.X + lhs.Y*rhs.Y + lhs.Z*rhs.Z
}
func VCross(lhs Vector, rhs Vector) Vector {
var ret Vector
ret.X = lhs.Y*rhs.Z - lhs.Z*rhs.Y
ret.Y = lhs.Z*rhs.X - lhs.X*rhs.Z
ret.Z = lhs.X*rhs.Y - lhs.Y*rhs.X
return ret
}
func VAverage(v []Vector) Vector {
var ret Vector
ret.X = 0.0
ret.Y = 0.0
ret.Z = 0.0
v_num := len(v)
for i := 0; i < v_num; i++ {
ret = VAdd(ret, v[i])
}
ret = VScale(ret, 1.0/float32(v_num))
return ret
}
func VAngleV(v Vector) float32 {
d := VSize(v)
if d < 1.0E-8 {
return 0.0
}
var th float32
sin_th := v.Y / d
th = float32(math.Asin(float64(sin_th)))
return th
}
func VAngleH(v Vector) float32 {
xz_vec := VGet(v.X, 0.0, v.Z)
xz_length := VSize(xz_vec)
if xz_length < 1.0E-8 {
return 0.0
}
var th float32
cos_th := v.X / xz_length
th = float32(math.Acos(float64(cos_th)))
if v.Z >= 0.0 {
th *= (-1.0)
}
return th
}
func VGetFromAngles(v_rotate float32, h_rotate float32) Vector {
var ret Vector
ret.X = float32(math.Cos(float64(h_rotate)))
ret.Y = float32(math.Sin(float64(v_rotate)))
ret.Z = float32(-math.Sin(float64(h_rotate)))
ret = VNorm(ret)
return ret
} | vector/vector.go | 0.847179 | 0.461441 | vector.go | starcoder |
package statistics
import (
"encoding/binary"
"math"
"time"
"github.com/cznic/mathutil"
"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
"github.com/whtcorpsinc/milevadb/stochastikctx/stmtctx"
"github.com/whtcorpsinc/milevadb/types"
)
// calcFraction is used to calculate the fraction of the interval [lower, upper] that lies within the [lower, value]
// using the continuous-value assumption.
func calcFraction(lower, upper, value float64) float64 {
if upper <= lower {
return 0.5
}
if value <= lower {
return 0
}
if value >= upper {
return 1
}
frac := (value - lower) / (upper - lower)
if math.IsNaN(frac) || math.IsInf(frac, 0) || frac < 0 || frac > 1 {
return 0.5
}
return frac
}
func convertCausetToScalar(value *types.Causet, commonPfxLen int) float64 {
switch value.HoTT() {
case types.HoTTMysqlDecimal:
scalar, err := value.GetMysqlDecimal().ToFloat64()
if err != nil {
return 0
}
return scalar
case types.HoTTMysqlTime:
valueTime := value.GetMysqlTime()
var minTime types.Time
switch valueTime.Type() {
case allegrosql.TypeDate:
minTime = types.NewTime(types.MinDatetime, allegrosql.TypeDate, types.DefaultFsp)
case allegrosql.TypeDatetime:
minTime = types.NewTime(types.MinDatetime, allegrosql.TypeDatetime, types.DefaultFsp)
case allegrosql.TypeTimestamp:
minTime = types.MinTimestamp
}
sc := &stmtctx.StatementContext{TimeZone: types.BoundTimezone}
return float64(valueTime.Sub(sc, &minTime).Duration)
case types.HoTTString, types.HoTTBytes:
bytes := value.GetBytes()
if len(bytes) <= commonPfxLen {
return 0
}
return convertBytesToScalar(bytes[commonPfxLen:])
default:
// do not know how to convert
return 0
}
}
// PreCalculateScalar converts the lower and upper to scalar. When the causet type is HoTTString or HoTTBytes, we also
// calculate their common prefix length, because when a value falls between lower and upper, the common prefix
// of lower and upper equals to the common prefix of the lower, upper and the value. For some simple types like `Int64`,
// we do not convert it because we can directly infer the scalar value.
func (hg *Histogram) PreCalculateScalar() {
len := hg.Len()
if len == 0 {
return
}
switch hg.GetLower(0).HoTT() {
case types.HoTTMysqlDecimal, types.HoTTMysqlTime:
hg.scalars = make([]scalar, len)
for i := 0; i < len; i++ {
hg.scalars[i] = scalar{
lower: convertCausetToScalar(hg.GetLower(i), 0),
upper: convertCausetToScalar(hg.GetUpper(i), 0),
}
}
case types.HoTTBytes, types.HoTTString:
hg.scalars = make([]scalar, len)
for i := 0; i < len; i++ {
lower, upper := hg.GetLower(i), hg.GetUpper(i)
common := commonPrefixLength(lower.GetBytes(), upper.GetBytes())
hg.scalars[i] = scalar{
commonPfxLen: common,
lower: convertCausetToScalar(lower, common),
upper: convertCausetToScalar(upper, common),
}
}
}
}
func (hg *Histogram) calcFraction(index int, value *types.Causet) float64 {
lower, upper := hg.Bounds.GetRow(2*index), hg.Bounds.GetRow(2*index+1)
switch value.HoTT() {
case types.HoTTFloat32:
return calcFraction(float64(lower.GetFloat32(0)), float64(upper.GetFloat32(0)), float64(value.GetFloat32()))
case types.HoTTFloat64:
return calcFraction(lower.GetFloat64(0), upper.GetFloat64(0), value.GetFloat64())
case types.HoTTInt64:
return calcFraction(float64(lower.GetInt64(0)), float64(upper.GetInt64(0)), float64(value.GetInt64()))
case types.HoTTUint64:
return calcFraction(float64(lower.GetUint64(0)), float64(upper.GetUint64(0)), float64(value.GetUint64()))
case types.HoTTMysqlDuration:
return calcFraction(float64(lower.GetDuration(0, 0).Duration), float64(upper.GetDuration(0, 0).Duration), float64(value.GetMysqlDuration().Duration))
case types.HoTTMysqlDecimal, types.HoTTMysqlTime:
return calcFraction(hg.scalars[index].lower, hg.scalars[index].upper, convertCausetToScalar(value, 0))
case types.HoTTBytes, types.HoTTString:
return calcFraction(hg.scalars[index].lower, hg.scalars[index].upper, convertCausetToScalar(value, hg.scalars[index].commonPfxLen))
}
return 0.5
}
func commonPrefixLength(lower, upper []byte) int {
minLen := len(lower)
if minLen > len(upper) {
minLen = len(upper)
}
for i := 0; i < minLen; i++ {
if lower[i] != upper[i] {
return i
}
}
return minLen
}
func convertBytesToScalar(value []byte) float64 {
// Bytes type is viewed as a base-256 value, so we only consider at most 8 bytes.
var buf [8]byte
copy(buf[:], value)
return float64(binary.BigEndian.Uint64(buf[:]))
}
func calcFraction4Causets(lower, upper, value *types.Causet) float64 {
switch value.HoTT() {
case types.HoTTFloat32:
return calcFraction(float64(lower.GetFloat32()), float64(upper.GetFloat32()), float64(value.GetFloat32()))
case types.HoTTFloat64:
return calcFraction(lower.GetFloat64(), upper.GetFloat64(), value.GetFloat64())
case types.HoTTInt64:
return calcFraction(float64(lower.GetInt64()), float64(upper.GetInt64()), float64(value.GetInt64()))
case types.HoTTUint64:
return calcFraction(float64(lower.GetUint64()), float64(upper.GetUint64()), float64(value.GetUint64()))
case types.HoTTMysqlDuration:
return calcFraction(float64(lower.GetMysqlDuration().Duration), float64(upper.GetMysqlDuration().Duration), float64(value.GetMysqlDuration().Duration))
case types.HoTTMysqlDecimal, types.HoTTMysqlTime:
return calcFraction(convertCausetToScalar(lower, 0), convertCausetToScalar(upper, 0), convertCausetToScalar(value, 0))
case types.HoTTBytes, types.HoTTString:
commonPfxLen := commonPrefixLength(lower.GetBytes(), upper.GetBytes())
return calcFraction(convertCausetToScalar(lower, commonPfxLen), convertCausetToScalar(upper, commonPfxLen), convertCausetToScalar(value, commonPfxLen))
}
return 0.5
}
const maxNumStep = 10
func enumRangeValues(low, high types.Causet, lowExclude, highExclude bool) []types.Causet {
if low.HoTT() != high.HoTT() {
return nil
}
exclude := 0
if lowExclude {
exclude++
}
if highExclude {
exclude++
}
switch low.HoTT() {
case types.HoTTInt64:
// Overflow check.
lowVal, highVal := low.GetInt64(), high.GetInt64()
if lowVal <= 0 && highVal >= 0 {
if lowVal < -maxNumStep || highVal > maxNumStep {
return nil
}
}
remaining := highVal - lowVal
if remaining >= maxNumStep+1 {
return nil
}
remaining = remaining + 1 - int64(exclude)
if remaining >= maxNumStep {
return nil
}
values := make([]types.Causet, 0, remaining)
startValue := lowVal
if lowExclude {
startValue++
}
for i := int64(0); i < remaining; i++ {
values = append(values, types.NewIntCauset(startValue+i))
}
return values
case types.HoTTUint64:
remaining := high.GetUint64() - low.GetUint64()
if remaining >= maxNumStep+1 {
return nil
}
remaining = remaining + 1 - uint64(exclude)
if remaining >= maxNumStep {
return nil
}
values := make([]types.Causet, 0, remaining)
startValue := low.GetUint64()
if lowExclude {
startValue++
}
for i := uint64(0); i < remaining; i++ {
values = append(values, types.NewUintCauset(startValue+i))
}
return values
case types.HoTTMysqlDuration:
lowDur, highDur := low.GetMysqlDuration(), high.GetMysqlDuration()
fsp := mathutil.MaxInt8(lowDur.Fsp, highDur.Fsp)
stepSize := int64(math.Pow10(int(types.MaxFsp-fsp))) * int64(time.Microsecond)
lowDur.Duration = lowDur.Duration.Round(time.Duration(stepSize))
remaining := int64(highDur.Duration-lowDur.Duration)/stepSize + 1 - int64(exclude)
if remaining >= maxNumStep {
return nil
}
startValue := int64(lowDur.Duration)
if lowExclude {
startValue += stepSize
}
values := make([]types.Causet, 0, remaining)
for i := int64(0); i < remaining; i++ {
values = append(values, types.NewDurationCauset(types.Duration{Duration: time.Duration(startValue + i*stepSize), Fsp: fsp}))
}
return values
case types.HoTTMysqlTime:
lowTime, highTime := low.GetMysqlTime(), high.GetMysqlTime()
if lowTime.Type() != highTime.Type() {
return nil
}
fsp := mathutil.MaxInt8(lowTime.Fsp(), highTime.Fsp())
var stepSize int64
sc := &stmtctx.StatementContext{TimeZone: time.UTC}
if lowTime.Type() == allegrosql.TypeDate {
stepSize = 24 * int64(time.Hour)
lowTime.SetCoreTime(types.FromDate(lowTime.Year(), lowTime.Month(), lowTime.Day(), 0, 0, 0, 0))
} else {
var err error
lowTime, err = lowTime.RoundFrac(sc, fsp)
if err != nil {
return nil
}
stepSize = int64(math.Pow10(int(types.MaxFsp-fsp))) * int64(time.Microsecond)
}
remaining := int64(highTime.Sub(sc, &lowTime).Duration)/stepSize + 1 - int64(exclude)
if remaining >= maxNumStep {
return nil
}
startValue := lowTime
var err error
if lowExclude {
startValue, err = lowTime.Add(sc, types.Duration{Duration: time.Duration(stepSize), Fsp: fsp})
if err != nil {
return nil
}
}
values := make([]types.Causet, 0, remaining)
for i := int64(0); i < remaining; i++ {
value, err := startValue.Add(sc, types.Duration{Duration: time.Duration(i * stepSize), Fsp: fsp})
if err != nil {
return nil
}
values = append(values, types.NewTimeCauset(value))
}
return values
}
return nil
} | causetstore/milevadb-server/statistics/scalar.go | 0.760917 | 0.47524 | scalar.go | starcoder |
package curves
import (
"github.com/wieku/danser-go/app/bmath"
"github.com/wieku/danser-go/framework/math/vector"
"sort"
)
const minPartWidth = 0.0001
type MultiCurve struct {
sections []float32
lines []Linear
length float32
firstPoint vector.Vector2f
}
func NewMultiCurve(typ string, points []vector.Vector2f) *MultiCurve {
lines := make([]Linear, 0)
if len(points) < 3 {
typ = "L"
}
switch typ {
case "P":
lines = append(lines, ApproximateCircularArc(points[0], points[1], points[2], 0.125)...)
case "L":
for i := 0; i < len(points)-1; i++ {
lines = append(lines, NewLinear(points[i], points[i+1]))
}
case "B":
lastIndex := 0
for i := 0; i < len(points); i++ {
multi := i < len(points)-2 && points[i] == points[i+1]
if multi || i == len(points)-1 {
subPoints := points[lastIndex : i+1]
if len(subPoints) > 2 {
lines = append(lines, ApproximateBezier(subPoints)...)
} else if len(subPoints) == 2 {
lines = append(lines, NewLinear(subPoints[0], subPoints[1]))
}
if multi {
i++
}
lastIndex = i
}
}
case "C":
if points[0] != points[1] {
points = append([]vector.Vector2f{points[0]}, points...)
}
if points[len(points)-1] != points[len(points)-2] {
points = append(points, points[len(points)-1])
}
for i := 0; i < len(points)-3; i++ {
lines = append(lines, ApproximateCatmullRom(points[i:i+4], 50)...)
}
}
length := float32(0.0)
for _, l := range lines {
length += l.GetLength()
}
firstPoint := points[0]
sections := make([]float32, len(lines)+1)
sections[0] = 0.0
prev := float32(0.0)
for i := 0; i < len(lines); i++ {
prev += lines[i].GetLength()
sections[i+1] = prev
}
return &MultiCurve{sections, lines, length, firstPoint}
}
func NewMultiCurveT(typ string, points []vector.Vector2f, desiredLength float64) *MultiCurve {
mCurve := NewMultiCurve(typ, points)
diff := float64(mCurve.length) - desiredLength
for len(mCurve.lines) > 0 {
line := mCurve.lines[len(mCurve.lines)-1]
if float64(line.GetLength()) > diff+minPartWidth {
if line.Point1 != line.Point2 {
pt := line.PointAt((line.GetLength() - float32(diff)) / line.GetLength())
mCurve.lines[len(mCurve.lines)-1] = NewLinear(line.Point1, pt)
}
break
}
diff -= float64(line.GetLength())
mCurve.lines = mCurve.lines[:len(mCurve.lines)-1]
}
mCurve.length = 0.0
for _, l := range mCurve.lines {
mCurve.length += l.GetLength()
}
mCurve.sections = make([]float32, len(mCurve.lines)+1)
mCurve.sections[0] = 0.0
prev := float32(0.0)
for i := 0; i < len(mCurve.lines); i++ {
prev += mCurve.lines[i].GetLength()
mCurve.sections[i+1] = prev
}
return mCurve
}
func (mCurve *MultiCurve) PointAt(t float32) vector.Vector2f {
if len(mCurve.lines) == 0 {
return mCurve.firstPoint
}
desiredWidth := mCurve.length * bmath.ClampF32(t, 0.0, 1.0)
withoutFirst := mCurve.sections[1:]
index := sort.Search(len(withoutFirst), func(i int) bool {
return withoutFirst[i] >= desiredWidth
})
index = bmath.MinI(index, len(mCurve.lines)-1)
return mCurve.lines[index].PointAt((desiredWidth - mCurve.sections[index]) / (mCurve.sections[index+1] - mCurve.sections[index]))
}
func (mCurve *MultiCurve) GetLength() float32 {
return mCurve.length
}
func (mCurve *MultiCurve) GetStartAngle() float32 {
if len(mCurve.lines) > 0 {
return mCurve.lines[0].GetStartAngle()
}
return 0.0
}
func (mCurve *MultiCurve) getLineAt(t float32) Linear {
if len(mCurve.lines) == 0 {
return Linear{}
}
desiredWidth := mCurve.length * bmath.ClampF32(t, 0.0, 1.0)
withoutFirst := mCurve.sections[1:]
index := sort.Search(len(withoutFirst), func(i int) bool {
return withoutFirst[i] >= desiredWidth
})
return mCurve.lines[index]
}
func (mCurve *MultiCurve) GetStartAngleAt(t float32) float32 {
if len(mCurve.lines) == 0 {
return 0
}
return mCurve.getLineAt(t).GetStartAngle()
}
func (mCurve *MultiCurve) GetEndAngle() float32 {
if len(mCurve.lines) > 0 {
return mCurve.lines[len(mCurve.lines)-1].GetEndAngle()
}
return 0.0
}
func (mCurve *MultiCurve) GetEndAngleAt(t float32) float32 {
if len(mCurve.lines) == 0 {
return 0
}
return mCurve.getLineAt(t).GetEndAngle()
}
func (mCurve *MultiCurve) GetLines() []Linear {
return mCurve.lines
} | framework/math/curves/multicurve.go | 0.636692 | 0.451266 | multicurve.go | starcoder |
package graph
import (
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
)
// PrintJobConfiguration
type PrintJobConfiguration struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{};
// Whether the printer should collate pages wehen printing multiple copies of a multi-page document.
collate *bool;
// The color mode the printer should use to print the job. Valid values are described in the table below. Read-only.
colorMode *PrintColorMode;
// The number of copies that should be printed. Read-only.
copies *int32;
// The resolution to use when printing the job, expressed in dots per inch (DPI). Read-only.
dpi *int32;
// The duplex mode the printer should use when printing the job. Valid values are described in the table below. Read-only.
duplexMode *PrintDuplexMode;
// The orientation to use when feeding media into the printer. Valid values are described in the following table. Read-only.
feedOrientation *PrinterFeedOrientation;
// Finishing processes to use when printing.
finishings []PrintFinishing;
//
fitPdfToPage *bool;
// The input bin (tray) to use when printing. See the printer's capabilities for a list of supported input bins.
inputBin *string;
// The margin settings to use when printing.
margin *PrintMargin;
// The media size to use when printing. Supports standard size names for ISO and ANSI media sizes. Valid values listed in the printerCapabilities topic.
mediaSize *string;
// The default media (such as paper) type to print the document on.
mediaType *string;
// The direction to lay out pages when multiple pages are being printed per sheet. Valid values are described in the following table.
multipageLayout *PrintMultipageLayout;
// The orientation setting the printer should use when printing the job. Valid values are described in the following table.
orientation *PrintOrientation;
// The output bin to place completed prints into. See the printer's capabilities for a list of supported output bins.
outputBin *string;
// The page ranges to print. Read-only.
pageRanges []IntegerRange;
// The number of document pages to print on each sheet.
pagesPerSheet *int32;
// The print quality to use when printing the job. Valid values are described in the table below. Read-only.
quality *PrintQuality;
// Specifies how the printer should scale the document data to fit the requested media. Valid values are described in the following table.
scaling *PrintScaling;
}
// NewPrintJobConfiguration instantiates a new printJobConfiguration and sets the default values.
func NewPrintJobConfiguration()(*PrintJobConfiguration) {
m := &PrintJobConfiguration{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PrintJobConfiguration) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetCollate gets the collate property value. Whether the printer should collate pages wehen printing multiple copies of a multi-page document.
func (m *PrintJobConfiguration) GetCollate()(*bool) {
if m == nil {
return nil
} else {
return m.collate
}
}
// GetColorMode gets the colorMode property value. The color mode the printer should use to print the job. Valid values are described in the table below. Read-only.
func (m *PrintJobConfiguration) GetColorMode()(*PrintColorMode) {
if m == nil {
return nil
} else {
return m.colorMode
}
}
// GetCopies gets the copies property value. The number of copies that should be printed. Read-only.
func (m *PrintJobConfiguration) GetCopies()(*int32) {
if m == nil {
return nil
} else {
return m.copies
}
}
// GetDpi gets the dpi property value. The resolution to use when printing the job, expressed in dots per inch (DPI). Read-only.
func (m *PrintJobConfiguration) GetDpi()(*int32) {
if m == nil {
return nil
} else {
return m.dpi
}
}
// GetDuplexMode gets the duplexMode property value. The duplex mode the printer should use when printing the job. Valid values are described in the table below. Read-only.
func (m *PrintJobConfiguration) GetDuplexMode()(*PrintDuplexMode) {
if m == nil {
return nil
} else {
return m.duplexMode
}
}
// GetFeedOrientation gets the feedOrientation property value. The orientation to use when feeding media into the printer. Valid values are described in the following table. Read-only.
func (m *PrintJobConfiguration) GetFeedOrientation()(*PrinterFeedOrientation) {
if m == nil {
return nil
} else {
return m.feedOrientation
}
}
// GetFinishings gets the finishings property value. Finishing processes to use when printing.
func (m *PrintJobConfiguration) GetFinishings()([]PrintFinishing) {
if m == nil {
return nil
} else {
return m.finishings
}
}
// GetFitPdfToPage gets the fitPdfToPage property value.
func (m *PrintJobConfiguration) GetFitPdfToPage()(*bool) {
if m == nil {
return nil
} else {
return m.fitPdfToPage
}
}
// GetInputBin gets the inputBin property value. The input bin (tray) to use when printing. See the printer's capabilities for a list of supported input bins.
func (m *PrintJobConfiguration) GetInputBin()(*string) {
if m == nil {
return nil
} else {
return m.inputBin
}
}
// GetMargin gets the margin property value. The margin settings to use when printing.
func (m *PrintJobConfiguration) GetMargin()(*PrintMargin) {
if m == nil {
return nil
} else {
return m.margin
}
}
// GetMediaSize gets the mediaSize property value. The media size to use when printing. Supports standard size names for ISO and ANSI media sizes. Valid values listed in the printerCapabilities topic.
func (m *PrintJobConfiguration) GetMediaSize()(*string) {
if m == nil {
return nil
} else {
return m.mediaSize
}
}
// GetMediaType gets the mediaType property value. The default media (such as paper) type to print the document on.
func (m *PrintJobConfiguration) GetMediaType()(*string) {
if m == nil {
return nil
} else {
return m.mediaType
}
}
// GetMultipageLayout gets the multipageLayout property value. The direction to lay out pages when multiple pages are being printed per sheet. Valid values are described in the following table.
func (m *PrintJobConfiguration) GetMultipageLayout()(*PrintMultipageLayout) {
if m == nil {
return nil
} else {
return m.multipageLayout
}
}
// GetOrientation gets the orientation property value. The orientation setting the printer should use when printing the job. Valid values are described in the following table.
func (m *PrintJobConfiguration) GetOrientation()(*PrintOrientation) {
if m == nil {
return nil
} else {
return m.orientation
}
}
// GetOutputBin gets the outputBin property value. The output bin to place completed prints into. See the printer's capabilities for a list of supported output bins.
func (m *PrintJobConfiguration) GetOutputBin()(*string) {
if m == nil {
return nil
} else {
return m.outputBin
}
}
// GetPageRanges gets the pageRanges property value. The page ranges to print. Read-only.
func (m *PrintJobConfiguration) GetPageRanges()([]IntegerRange) {
if m == nil {
return nil
} else {
return m.pageRanges
}
}
// GetPagesPerSheet gets the pagesPerSheet property value. The number of document pages to print on each sheet.
func (m *PrintJobConfiguration) GetPagesPerSheet()(*int32) {
if m == nil {
return nil
} else {
return m.pagesPerSheet
}
}
// GetQuality gets the quality property value. The print quality to use when printing the job. Valid values are described in the table below. Read-only.
func (m *PrintJobConfiguration) GetQuality()(*PrintQuality) {
if m == nil {
return nil
} else {
return m.quality
}
}
// GetScaling gets the scaling property value. Specifies how the printer should scale the document data to fit the requested media. Valid values are described in the following table.
func (m *PrintJobConfiguration) GetScaling()(*PrintScaling) {
if m == nil {
return nil
} else {
return m.scaling
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *PrintJobConfiguration) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error))
res["collate"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetCollate(val)
}
return nil
}
res["colorMode"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetEnumValue(ParsePrintColorMode)
if err != nil {
return err
}
if val != nil {
m.SetColorMode(val.(*PrintColorMode))
}
return nil
}
res["copies"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetCopies(val)
}
return nil
}
res["dpi"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetDpi(val)
}
return nil
}
res["duplexMode"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetEnumValue(ParsePrintDuplexMode)
if err != nil {
return err
}
if val != nil {
m.SetDuplexMode(val.(*PrintDuplexMode))
}
return nil
}
res["feedOrientation"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetEnumValue(ParsePrinterFeedOrientation)
if err != nil {
return err
}
if val != nil {
m.SetFeedOrientation(val.(*PrinterFeedOrientation))
}
return nil
}
res["finishings"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfEnumValues(ParsePrintFinishing)
if err != nil {
return err
}
if val != nil {
res := make([]PrintFinishing, len(val))
for i, v := range val {
res[i] = *(v.(*PrintFinishing))
}
m.SetFinishings(res)
}
return nil
}
res["fitPdfToPage"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetFitPdfToPage(val)
}
return nil
}
res["inputBin"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetInputBin(val)
}
return nil
}
res["margin"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewPrintMargin() })
if err != nil {
return err
}
if val != nil {
m.SetMargin(val.(*PrintMargin))
}
return nil
}
res["mediaSize"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetMediaSize(val)
}
return nil
}
res["mediaType"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetMediaType(val)
}
return nil
}
res["multipageLayout"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetEnumValue(ParsePrintMultipageLayout)
if err != nil {
return err
}
if val != nil {
m.SetMultipageLayout(val.(*PrintMultipageLayout))
}
return nil
}
res["orientation"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetEnumValue(ParsePrintOrientation)
if err != nil {
return err
}
if val != nil {
m.SetOrientation(val.(*PrintOrientation))
}
return nil
}
res["outputBin"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOutputBin(val)
}
return nil
}
res["pageRanges"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewIntegerRange() })
if err != nil {
return err
}
if val != nil {
res := make([]IntegerRange, len(val))
for i, v := range val {
res[i] = *(v.(*IntegerRange))
}
m.SetPageRanges(res)
}
return nil
}
res["pagesPerSheet"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetPagesPerSheet(val)
}
return nil
}
res["quality"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetEnumValue(ParsePrintQuality)
if err != nil {
return err
}
if val != nil {
m.SetQuality(val.(*PrintQuality))
}
return nil
}
res["scaling"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetEnumValue(ParsePrintScaling)
if err != nil {
return err
}
if val != nil {
m.SetScaling(val.(*PrintScaling))
}
return nil
}
return res
}
func (m *PrintJobConfiguration) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *PrintJobConfiguration) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
{
err := writer.WriteBoolValue("collate", m.GetCollate())
if err != nil {
return err
}
}
if m.GetColorMode() != nil {
cast := (*m.GetColorMode()).String()
err := writer.WriteStringValue("colorMode", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("copies", m.GetCopies())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("dpi", m.GetDpi())
if err != nil {
return err
}
}
if m.GetDuplexMode() != nil {
cast := (*m.GetDuplexMode()).String()
err := writer.WriteStringValue("duplexMode", &cast)
if err != nil {
return err
}
}
if m.GetFeedOrientation() != nil {
cast := (*m.GetFeedOrientation()).String()
err := writer.WriteStringValue("feedOrientation", &cast)
if err != nil {
return err
}
}
if m.GetFinishings() != nil {
err := writer.WriteCollectionOfStringValues("finishings", SerializePrintFinishing(m.GetFinishings()))
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("fitPdfToPage", m.GetFitPdfToPage())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("inputBin", m.GetInputBin())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("margin", m.GetMargin())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("mediaSize", m.GetMediaSize())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("mediaType", m.GetMediaType())
if err != nil {
return err
}
}
if m.GetMultipageLayout() != nil {
cast := (*m.GetMultipageLayout()).String()
err := writer.WriteStringValue("multipageLayout", &cast)
if err != nil {
return err
}
}
if m.GetOrientation() != nil {
cast := (*m.GetOrientation()).String()
err := writer.WriteStringValue("orientation", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("outputBin", m.GetOutputBin())
if err != nil {
return err
}
}
if m.GetPageRanges() != nil {
cast := make([]i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, len(m.GetPageRanges()))
for i, v := range m.GetPageRanges() {
temp := v
cast[i] = i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable(&temp)
}
err := writer.WriteCollectionOfObjectValues("pageRanges", cast)
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("pagesPerSheet", m.GetPagesPerSheet())
if err != nil {
return err
}
}
if m.GetQuality() != nil {
cast := (*m.GetQuality()).String()
err := writer.WriteStringValue("quality", &cast)
if err != nil {
return err
}
}
if m.GetScaling() != nil {
cast := (*m.GetScaling()).String()
err := writer.WriteStringValue("scaling", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PrintJobConfiguration) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetCollate sets the collate property value. Whether the printer should collate pages wehen printing multiple copies of a multi-page document.
func (m *PrintJobConfiguration) SetCollate(value *bool)() {
if m != nil {
m.collate = value
}
}
// SetColorMode sets the colorMode property value. The color mode the printer should use to print the job. Valid values are described in the table below. Read-only.
func (m *PrintJobConfiguration) SetColorMode(value *PrintColorMode)() {
if m != nil {
m.colorMode = value
}
}
// SetCopies sets the copies property value. The number of copies that should be printed. Read-only.
func (m *PrintJobConfiguration) SetCopies(value *int32)() {
if m != nil {
m.copies = value
}
}
// SetDpi sets the dpi property value. The resolution to use when printing the job, expressed in dots per inch (DPI). Read-only.
func (m *PrintJobConfiguration) SetDpi(value *int32)() {
if m != nil {
m.dpi = value
}
}
// SetDuplexMode sets the duplexMode property value. The duplex mode the printer should use when printing the job. Valid values are described in the table below. Read-only.
func (m *PrintJobConfiguration) SetDuplexMode(value *PrintDuplexMode)() {
if m != nil {
m.duplexMode = value
}
}
// SetFeedOrientation sets the feedOrientation property value. The orientation to use when feeding media into the printer. Valid values are described in the following table. Read-only.
func (m *PrintJobConfiguration) SetFeedOrientation(value *PrinterFeedOrientation)() {
if m != nil {
m.feedOrientation = value
}
}
// SetFinishings sets the finishings property value. Finishing processes to use when printing.
func (m *PrintJobConfiguration) SetFinishings(value []PrintFinishing)() {
if m != nil {
m.finishings = value
}
}
// SetFitPdfToPage sets the fitPdfToPage property value.
func (m *PrintJobConfiguration) SetFitPdfToPage(value *bool)() {
if m != nil {
m.fitPdfToPage = value
}
}
// SetInputBin sets the inputBin property value. The input bin (tray) to use when printing. See the printer's capabilities for a list of supported input bins.
func (m *PrintJobConfiguration) SetInputBin(value *string)() {
if m != nil {
m.inputBin = value
}
}
// SetMargin sets the margin property value. The margin settings to use when printing.
func (m *PrintJobConfiguration) SetMargin(value *PrintMargin)() {
if m != nil {
m.margin = value
}
}
// SetMediaSize sets the mediaSize property value. The media size to use when printing. Supports standard size names for ISO and ANSI media sizes. Valid values listed in the printerCapabilities topic.
func (m *PrintJobConfiguration) SetMediaSize(value *string)() {
if m != nil {
m.mediaSize = value
}
}
// SetMediaType sets the mediaType property value. The default media (such as paper) type to print the document on.
func (m *PrintJobConfiguration) SetMediaType(value *string)() {
if m != nil {
m.mediaType = value
}
}
// SetMultipageLayout sets the multipageLayout property value. The direction to lay out pages when multiple pages are being printed per sheet. Valid values are described in the following table.
func (m *PrintJobConfiguration) SetMultipageLayout(value *PrintMultipageLayout)() {
if m != nil {
m.multipageLayout = value
}
}
// SetOrientation sets the orientation property value. The orientation setting the printer should use when printing the job. Valid values are described in the following table.
func (m *PrintJobConfiguration) SetOrientation(value *PrintOrientation)() {
if m != nil {
m.orientation = value
}
}
// SetOutputBin sets the outputBin property value. The output bin to place completed prints into. See the printer's capabilities for a list of supported output bins.
func (m *PrintJobConfiguration) SetOutputBin(value *string)() {
if m != nil {
m.outputBin = value
}
}
// SetPageRanges sets the pageRanges property value. The page ranges to print. Read-only.
func (m *PrintJobConfiguration) SetPageRanges(value []IntegerRange)() {
if m != nil {
m.pageRanges = value
}
}
// SetPagesPerSheet sets the pagesPerSheet property value. The number of document pages to print on each sheet.
func (m *PrintJobConfiguration) SetPagesPerSheet(value *int32)() {
if m != nil {
m.pagesPerSheet = value
}
}
// SetQuality sets the quality property value. The print quality to use when printing the job. Valid values are described in the table below. Read-only.
func (m *PrintJobConfiguration) SetQuality(value *PrintQuality)() {
if m != nil {
m.quality = value
}
}
// SetScaling sets the scaling property value. Specifies how the printer should scale the document data to fit the requested media. Valid values are described in the following table.
func (m *PrintJobConfiguration) SetScaling(value *PrintScaling)() {
if m != nil {
m.scaling = value
}
} | models/microsoft/graph/print_job_configuration.go | 0.718989 | 0.422207 | print_job_configuration.go | starcoder |
package zltest
import "time"
// Entries represents zerolog log entries.
type Entries struct {
e []*Entry
t T // Test manager.
}
// Get returns the list of Entry in Entries
func (ets Entries) Get() []*Entry {
return ets.e
}
// ExpStr tests that at least one log entry has key, its value is a
// string and it's equal to exp.
func (ets Entries) ExpStr(key string, exp string) {
ets.exp(func(e *Entry) string { return e.expStr(key, exp) })
}
// NotExpStr tests that no log entry has key, its value is a
// string and it's equal to exp.
func (ets Entries) NotExpStr(key string, exp string) {
ets.notExp(func(e *Entry) string { return e.expStr(key, exp) })
}
// ExpTime tests that at least one log entry has key, its value is a
// string representing time in zerolog.TimeFieldFormat and it's equal
// to exp.
func (ets Entries) ExpTime(key string, exp time.Time) {
ets.exp(func(e *Entry) string { return e.expTime(key, exp) })
}
// NotExpTime tests that no one log entry has key, its value is a
// string representing time in zerolog.TimeFieldFormat and it's equal
// to exp.
func (ets Entries) NotExpTime(key string, exp time.Time) {
ets.notExp(func(e *Entry) string { return e.expTime(key, exp) })
}
// ExpDur tests that at least one log entry has key and its value is
// equal to exp time.Duration. The duration vale in the entry is
// multiplied by zerolog.DurationFieldUnit before the comparison.
func (ets Entries) ExpDur(key string, exp time.Duration) {
ets.exp(func(e *Entry) string { return e.expDur(key, exp) })
}
// NotExpDur tests that no log entry has key and its value is
// equal to exp time.Duration. The duration vale in the entry is
// multiplied by zerolog.DurationFieldUnit before the comparison.
func (ets Entries) NotExpDur(key string, exp time.Duration) {
ets.notExp(func(e *Entry) string { return e.expDur(key, exp) })
}
// ExpBool tests that at lest one entry has a key, its value is
// boolean and equal to exp.
func (ets Entries) ExpBool(key string, exp bool) {
ets.exp(func(e *Entry) string { return e.expBool(key, exp) })
}
// NotExpBool tests that no log entry has a key, its value is
// boolean and equal to exp.
func (ets Entries) NotExpBool(key string, exp bool) {
ets.notExp(func(e *Entry) string { return e.expBool(key, exp) })
}
// ExpMsg tests that at least one log entry message field
// (zerolog.MessageFieldName) is equal to exp.
func (ets Entries) ExpMsg(exp string) {
ets.exp(func(e *Entry) string { return e.expMsg(exp) })
}
// NotExpMsg tests that no log entry message field
// (zerolog.MessageFieldName) is equal to exp.
func (ets Entries) NotExpMsg(exp string) {
ets.notExp(func(e *Entry) string { return e.expMsg(exp) })
}
// ExpNum tests that at least one log entry has key and its numerical
// value is equal to exp.
func (ets Entries) ExpNum(key string, exp float64) {
ets.exp(func(e *Entry) string { return e.expNum(key, exp) })
}
// NotExpNum tests that at least one log entry has key and its numerical
// value is equal to exp.
func (ets Entries) NotExpNum(key string, exp float64) {
ets.notExp(func(e *Entry) string { return e.expNum(key, exp) })
}
func (ets Entries) exp(f func(*Entry) string) {
e := ets.Get()
for ent := range e {
if f(e[ent]) == "" {
return
}
}
ets.t.Error("No matching log entry was found")
}
func (ets Entries) notExp(f func(*Entry) string) {
e := ets.Get()
for ent := range e {
if f(e[ent]) == "" {
ets.t.Error("Matching log entry was found")
}
}
} | entries.go | 0.617513 | 0.654936 | entries.go | starcoder |
package service
import (
"github.com/Jeffail/benthos/v3/lib/metrics"
)
// Metrics allows plugin authors to emit custom metrics from components that are
// exported the same way as native Benthos metrics.
type Metrics struct {
t metrics.Type
}
func newReverseAirGapMetrics(t metrics.Type) *Metrics {
return &Metrics{t}
}
// NewCounter creates a new counter metric with a name and variant list of label
// keys.
func (m *Metrics) NewCounter(name string, labelKeys ...string) *MetricCounter {
cv := m.t.GetCounterVec(name, labelKeys)
return &MetricCounter{cv}
}
// NewTimer creates a new timer metric with a name and variant list of label
// keys.
func (m *Metrics) NewTimer(name string, labelKeys ...string) *MetricTimer {
tv := m.t.GetTimerVec(name, labelKeys)
return &MetricTimer{tv}
}
// NewGauge creates a new gauge metric with a name and variant list of label
// keys.
func (m *Metrics) NewGauge(name string, labelKeys ...string) *MetricGauge {
gv := m.t.GetGaugeVec(name, labelKeys)
return &MetricGauge{gv}
}
//------------------------------------------------------------------------------
// MetricCounter represents a counter metric of a given name and labels.
type MetricCounter struct {
cv metrics.StatCounterVec
}
// Incr increments a counter metric by an amount, the number of label values
// must match the number and order of labels specified when the counter was
// created.
func (c *MetricCounter) Incr(count int64, labelValues ...string) {
_ = c.cv.With(labelValues...).Incr(count)
}
// MetricTimer represents a timing metric of a given name and labels.
type MetricTimer struct {
tv metrics.StatTimerVec
}
// Timing adds a delta to a timing metric, the number of label values must match
// the number and order of labels specified when the timing was created.
func (t *MetricTimer) Timing(delta int64, labelValues ...string) {
_ = t.tv.With(labelValues...).Timing(delta)
}
// MetricGauge represents a gauge metric of a given name and labels.
type MetricGauge struct {
gv metrics.StatGaugeVec
}
// Set a gauge metric, the number of label values must match the number and
// order of labels specified when the gauge was created.
func (g *MetricGauge) Set(value int64, labelValues ...string) {
_ = g.gv.With(labelValues...).Set(value)
} | public/x/service/metrics.go | 0.851614 | 0.407805 | metrics.go | starcoder |
package primitives
import (
"bytes"
"fmt"
)
type ProtocolVersion uint32
func (x ProtocolVersion) String() string {
return fmt.Sprintf("%x", uint32(x))
}
func (x ProtocolVersion) Equal(y ProtocolVersion) bool {
return x == y
}
func (x ProtocolVersion) KeyForMap() uint32 {
return uint32(x)
}
type VirtualChainId uint32
func (x VirtualChainId) String() string {
return fmt.Sprintf("%x", uint32(x))
}
func (x VirtualChainId) Equal(y VirtualChainId) bool {
return x == y
}
func (x VirtualChainId) KeyForMap() uint32 {
return uint32(x)
}
type BlockHeight uint64
func (x BlockHeight) String() string {
return fmt.Sprintf("%x", uint64(x))
}
func (x BlockHeight) Equal(y BlockHeight) bool {
return x == y
}
func (x BlockHeight) KeyForMap() uint64 {
return uint64(x)
}
type TimestampNano uint64
func (x TimestampNano) String() string {
return fmt.Sprintf("%x", uint64(x))
}
func (x TimestampNano) Equal(y TimestampNano) bool {
return x == y
}
func (x TimestampNano) KeyForMap() uint64 {
return uint64(x)
}
type TimestampSeconds uint32
func (x TimestampSeconds) String() string {
return fmt.Sprintf("%x", uint32(x))
}
func (x TimestampSeconds) Equal(y TimestampSeconds) bool {
return x == y
}
func (x TimestampSeconds) KeyForMap() uint32 {
return uint32(x)
}
type NodeAddress []byte
func (x NodeAddress) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x NodeAddress) Equal(y NodeAddress) bool {
return bytes.Equal(x, y)
}
func (x NodeAddress) KeyForMap() string {
return string(x)
}
type ClientAddress []byte
func (x ClientAddress) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x ClientAddress) Equal(y ClientAddress) bool {
return bytes.Equal(x, y)
}
func (x ClientAddress) KeyForMap() string {
return string(x)
}
type ContractName string
func (x ContractName) String() string {
return fmt.Sprintf(string(x))
}
func (x ContractName) Equal(y ContractName) bool {
return x == y
}
func (x ContractName) KeyForMap() string {
return string(x)
}
type MethodName string
func (x MethodName) String() string {
return fmt.Sprintf(string(x))
}
func (x MethodName) Equal(y MethodName) bool {
return x == y
}
func (x MethodName) KeyForMap() string {
return string(x)
}
type EventName string
func (x EventName) String() string {
return fmt.Sprintf(string(x))
}
func (x EventName) Equal(y EventName) bool {
return x == y
}
func (x EventName) KeyForMap() string {
return string(x)
}
type ExecutionContextId []byte
func (x ExecutionContextId) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x ExecutionContextId) Equal(y ExecutionContextId) bool {
return bytes.Equal(x, y)
}
func (x ExecutionContextId) KeyForMap() string {
return string(x)
}
type LeanHelixMessageContent []byte
func (x LeanHelixMessageContent) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x LeanHelixMessageContent) Equal(y LeanHelixMessageContent) bool {
return bytes.Equal(x, y)
}
func (x LeanHelixMessageContent) KeyForMap() string {
return string(x)
}
type MerkleTreeProof []byte
func (x MerkleTreeProof) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x MerkleTreeProof) Equal(y MerkleTreeProof) bool {
return bytes.Equal(x, y)
}
func (x MerkleTreeProof) KeyForMap() string {
return string(x)
}
type LeanHelixBlockProof []byte
func (x LeanHelixBlockProof) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x LeanHelixBlockProof) Equal(y LeanHelixBlockProof) bool {
return bytes.Equal(x, y)
}
func (x LeanHelixBlockProof) KeyForMap() string {
return string(x)
}
type PackedReceiptProof []byte
func (x PackedReceiptProof) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x PackedReceiptProof) Equal(y PackedReceiptProof) bool {
return bytes.Equal(x, y)
}
func (x PackedReceiptProof) KeyForMap() string {
return string(x)
}
type PackedEventsArray []byte
func (x PackedEventsArray) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x PackedEventsArray) Equal(y PackedEventsArray) bool {
return bytes.Equal(x, y)
}
func (x PackedEventsArray) KeyForMap() string {
return string(x)
}
type PackedArgumentArray []byte
func (x PackedArgumentArray) String() string {
return fmt.Sprintf("%x", []byte(x))
}
func (x PackedArgumentArray) Equal(y PackedArgumentArray) bool {
return bytes.Equal(x, y)
}
func (x PackedArgumentArray) KeyForMap() string {
return string(x)
}
type Weight uint64
func (x Weight) String() string {
return fmt.Sprintf("%x", uint64(x))
}
func (x Weight) Equal(y Weight) bool {
return x == y
}
func (x Weight) KeyForMap() uint64 {
return uint64(x)
} | types/go/primitives/protocol.mb.go | 0.743075 | 0.541045 | protocol.mb.go | starcoder |
package raytrc
import (
"image"
"image/color"
"image/png"
"math"
"os"
)
// Vector - struct holding X Y Z values of a 3D vector
type Vector struct {
X, Y, Z float64
}
// Add - adds two vectors together
func (a Vector) Add(b Vector) Vector {
return Vector{
X: a.X + b.X,
Y: a.Y + b.Y,
Z: a.Z + b.Z,
}
}
// Sub - subtracts b Vector from a Vector
func (a Vector) Sub(b Vector) Vector {
return Vector{
X: a.X - b.X,
Y: a.Y - b.Y,
Z: a.Z - b.Z,
}
}
// MultiplyByScalar - multiplies a Vector by s float64
func (a Vector) MultiplyByScalar(s float64) Vector {
return Vector{
X: a.X * s,
Y: a.Y * s,
Z: a.Z * s,
}
}
// Length - calculates the length(magnitude) of the Vector
func (a Vector) Length() float64 {
return math.Sqrt(a.Dot(a))
}
// Dot - calculates the dot product of two Vectors
func (a Vector) Dot(b Vector) float64 {
return a.X*b.X + a.Y*b.Y + a.Z*b.Z
}
// Cross - calculates the cross product of two Vectors
func (a Vector) Cross(b Vector) Vector {
return Vector{
X: a.Y*b.Z - a.Z*b.Y,
Y: a.Z*b.X - a.X*b.Z,
Z: a.X*b.Y - a.Y*b.X,
}
}
// Normalize - returns a versor created from the given vector
func (a Vector) Normalize() Vector {
return a.MultiplyByScalar(1. / a.Length())
}
// Scene represents the s on which the scene is projected as a 2D picture
type Scene struct {
Width, Height int
Img *image.RGBA
}
// NewScene returns a new Scene
func NewScene(width int, height int) *Scene {
return &Scene{
Width: width,
Height: height,
Img: image.NewRGBA(image.Rect(0, 0, width, height)),
}
}
// EachPixel traverses the image s and calls the provided function for each pixel
func (s *Scene) EachPixel(colorFunction func(int, int) color.RGBA) {
for x := 0; x < s.Width; x++ {
for y := 0; y < s.Height; y++ {
s.setPixel(x, y, colorFunction(x, y))
}
}
}
// Save exports the image to hdd
func (s *Scene) Save(filename string) {
f, err := os.Create(filename)
if err != nil {
panic(err)
}
defer f.Close()
png.Encode(f, s.Img)
}
func (s *Scene) setPixel(x int, y int, color color.RGBA) {
s.Img.Set(x, y, color)
}
/*
func main() {
var width = 200
var height = 400
scene := NewScene(width, height)
scene.EachPixel(func(x, y int) color.RGBA {
return color.RGBA{
uint8(x * 255 / width),
uint8(y * 255 / height),
150,
255,
}
})
scene.Save(fmt.Sprintf("./renders/%d.png", time.Now().Unix()))
}
*/ | raytrc/rayt.go | 0.89597 | 0.774626 | rayt.go | starcoder |
package graphics
import (
"fmt"
mgl "github.com/go-gl/mathgl/mgl32"
"github.com/inkyblackness/shocked-client/opengl"
)
var bitmapTextureVertexShaderSource = `
#version 150
precision mediump float;
in vec2 vertexPosition;
in vec2 uvPosition;
uniform mat4 modelMatrix;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
out vec2 uv;
void main(void) {
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(vertexPosition, 0.0, 1.0);
uv = uvPosition;
}
`
var bitmapTextureFragmentShaderSource = `
#version 150
precision mediump float;
uniform sampler2D palette;
uniform sampler2D bitmap;
in vec2 uv;
out vec4 fragColor;
void main(void) {
vec4 pixel = texture(bitmap, uv);
if (pixel.a > 0.0) {
fragColor = texture(palette, vec2(pixel.a, 0.5));
} else {
discard;
}
}
`
// BitmapTextureRenderer renders bitmapped textures based on a palette.
type BitmapTextureRenderer struct {
renderContext *RenderContext
program uint32
vao *opengl.VertexArrayObject
vertexPositionBuffer uint32
vertexPositionAttrib int32
uvPositionAttrib int32
modelMatrixUniform opengl.Matrix4Uniform
viewMatrixUniform opengl.Matrix4Uniform
projectionMatrixUniform opengl.Matrix4Uniform
paletteUniform int32
bitmapUniform int32
paletteTexture Texture
}
// NewBitmapTextureRenderer returns a new instance of a texture renderer for bitmaps.
func NewBitmapTextureRenderer(renderContext *RenderContext, paletteTexture Texture) *BitmapTextureRenderer {
gl := renderContext.OpenGl()
program, programErr := opengl.LinkNewStandardProgram(gl, bitmapTextureVertexShaderSource, bitmapTextureFragmentShaderSource)
if programErr != nil {
panic(fmt.Errorf("BitmapTextureRenderer shader failed: %v", programErr))
}
renderer := &BitmapTextureRenderer{
renderContext: renderContext,
program: program,
vao: opengl.NewVertexArrayObject(gl, program),
vertexPositionBuffer: gl.GenBuffers(1)[0],
vertexPositionAttrib: gl.GetAttribLocation(program, "vertexPosition"),
uvPositionAttrib: gl.GetAttribLocation(program, "uvPosition"),
modelMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "modelMatrix")),
viewMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "viewMatrix")),
projectionMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "projectionMatrix")),
paletteTexture: paletteTexture,
paletteUniform: gl.GetUniformLocation(program, "palette"),
bitmapUniform: gl.GetUniformLocation(program, "bitmap")}
renderer.vao.WithSetter(func(gl opengl.OpenGl) {
floatSize := int(4)
stride := int32(4 * floatSize)
gl.EnableVertexAttribArray(uint32(renderer.vertexPositionAttrib))
gl.EnableVertexAttribArray(uint32(renderer.uvPositionAttrib))
gl.BindBuffer(opengl.ARRAY_BUFFER, renderer.vertexPositionBuffer)
gl.VertexAttribOffset(uint32(renderer.vertexPositionAttrib), 2, opengl.FLOAT, false, stride, 0*floatSize)
gl.VertexAttribOffset(uint32(renderer.uvPositionAttrib), 2, opengl.FLOAT, false, stride, 2*floatSize)
gl.BindBuffer(opengl.ARRAY_BUFFER, 0)
})
return renderer
}
// Dispose clears any resources.
func (renderer *BitmapTextureRenderer) Dispose() {
gl := renderer.renderContext.OpenGl()
renderer.vao.Dispose()
gl.DeleteBuffers([]uint32{renderer.vertexPositionBuffer})
gl.DeleteProgram(renderer.program)
}
// Render implements the TextureRenderer interface.
func (renderer *BitmapTextureRenderer) Render(modelMatrix *mgl.Mat4, texture Texture, textureRect Rectangle) {
gl := renderer.renderContext.OpenGl()
{
baseRect := RectByCoord(0, 0, 1.0, 1.0)
var vertices = []float32{
baseRect.Left(), baseRect.Top(), textureRect.Left(), textureRect.Top(),
baseRect.Left(), baseRect.Bottom(), textureRect.Left(), textureRect.Bottom(),
baseRect.Right(), baseRect.Top(), textureRect.Right(), textureRect.Top(),
baseRect.Right(), baseRect.Top(), textureRect.Right(), textureRect.Top(),
baseRect.Left(), baseRect.Bottom(), textureRect.Left(), textureRect.Bottom(),
baseRect.Right(), baseRect.Bottom(), textureRect.Right(), textureRect.Bottom()}
gl.BindBuffer(opengl.ARRAY_BUFFER, renderer.vertexPositionBuffer)
gl.BufferData(opengl.ARRAY_BUFFER, len(vertices)*4, vertices, opengl.STATIC_DRAW)
gl.BindBuffer(opengl.ARRAY_BUFFER, 0)
}
renderer.vao.OnShader(func() {
renderer.modelMatrixUniform.Set(gl, modelMatrix)
renderer.viewMatrixUniform.Set(gl, renderer.renderContext.ViewMatrix())
renderer.projectionMatrixUniform.Set(gl, renderer.renderContext.ProjectionMatrix())
textureUnit := int32(0)
gl.ActiveTexture(opengl.TEXTURE0 + uint32(textureUnit))
gl.BindTexture(opengl.TEXTURE_2D, renderer.paletteTexture.Handle())
gl.Uniform1i(renderer.paletteUniform, textureUnit)
textureUnit = 1
gl.ActiveTexture(opengl.TEXTURE0 + uint32(textureUnit))
gl.Uniform1i(renderer.bitmapUniform, textureUnit)
gl.BindTexture(opengl.TEXTURE_2D, texture.Handle())
gl.DrawArrays(opengl.TRIANGLES, 0, 6)
})
} | src/github.com/inkyblackness/shocked-client/graphics/BitmapTextureRenderer.go | 0.828627 | 0.524151 | BitmapTextureRenderer.go | starcoder |
package main
import (
"fmt"
"sort"
)
// infinite ...
const infinite = int(^uint(0) >> 1)
// DijkstraTable ...
type DijkstraTable struct {
Vertex *Vertex
Weight int
}
// NewTable ...
func (g *Graph) NewTable() map[*Vertex]int {
return make(map[*Vertex]int, 0)
}
// InitializeTable ...
func (g *Graph) InitializeTable(table map[*Vertex]int, origin *Vertex) map[*Vertex]int {
// Initialize all shortest paths as inifinity.
for v := range g.Vertices {
table[v] = infinite
}
// Assign 0 to origin vertex.
table[origin] = 0
return table
}
// Dijkstra ...
func (g *Graph) Dijkstra(origin *Vertex) map[*Vertex]int {
// Create a table to keep track of the shortest known distance to
// every vertex in the graph, as well as the previous vertex that
// we came from -- before checking the current vertex.
table := g.NewTable()
// Initialize table values with infinity to start out the shortest paths
// (considering we don't even know if all vertices are reachable), except
// the path from the origin vertex to iteself (which is initialized as 0).
table = g.InitializeTable(table, origin)
// Create a slice to keep track of which vertices were already visited.
visited := make(map[*Vertex]struct{}, 0)
for range g.Vertices {
// Get closest non visited vertex.
vertex := GetClosestVertex(table, visited)
// Get vertex's neighboring edges.
edges := g.GetNeighborEdges(vertex)
for _, e := range edges {
// Calculate distance from the current vertex to its neighbors.
distance := table[vertex] + e.Weight
// Check if the calculated distance is less than the currently-known
// shortest distance for current neighboring vertex.
if distance < table[e.To] {
// Update table with the new “shortest distance”.
table[e.To] = distance
}
}
// Mark current vertex as visited.
visited[vertex] = struct{}{}
// visited = append(visited, vertex)
}
return table
}
// GetClosestVertex returns the closest unvisited vertex from the table.
func GetClosestVertex(table map[*Vertex]int, visited map[*Vertex]struct{}) *Vertex {
var unvisited []*DijkstraTable
// Verify if the vertex has been visited already.
for vertex, weight := range table {
if _, ok := visited[vertex]; !ok {
unvisited = append(unvisited, &DijkstraTable{vertex, weight})
}
}
// Sort slice to retrieve the vertex with the loweest weight from the table.
sort.Slice(unvisited, func(i, j int) bool {
return unvisited[i].Weight < unvisited[j].Weight
})
return unvisited[0].Vertex
}
// GetNeighborEdges returns all the Edges connecting to the vertex's neighbors.
func (g *Graph) GetNeighborEdges(vertex *Vertex) []*Edge {
edges := make([]*Edge, 0)
for e := range g.Edges {
if e.From == vertex {
edges = append(edges, e)
}
}
return edges
}
// PrintShortestPaths prints the shortest paths from origin vertex to all others.
func (g *Graph) PrintShortestPaths(origin *Vertex) {
spaths := fmt.Sprintf("\nShortest Paths:")
for v, w := range g.Dijkstra(origin) {
spaths += fmt.Sprintf("\n\tfrom %s to %s: %d", origin.Label, v.Label, w)
}
fmt.Println(spaths)
} | go/graph/dijsktra.go | 0.750004 | 0.473231 | dijsktra.go | starcoder |
// +build gofuzz
package bn256
import (
"bytes"
"math/big"
cloudflare "github.com/matrix/go-matrix/crypto/bn256/cloudflare"
google "github.com/matrix/go-matrix/crypto/bn256/google"
)
// FuzzAdd fuzzez bn256 addition between the Google and Cloudflare libraries.
func FuzzAdd(data []byte) int {
// Ensure we have enough data in the first place
if len(data) != 128 {
return 0
}
// Ensure both libs can parse the first curve point
xc := new(cloudflare.G1)
_, errc := xc.Unmarshal(data[:64])
xg := new(google.G1)
_, errg := xg.Unmarshal(data[:64])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Ensure both libs can parse the second curve point
yc := new(cloudflare.G1)
_, errc = yc.Unmarshal(data[64:])
yg := new(google.G1)
_, errg = yg.Unmarshal(data[64:])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Add the two points and ensure they result in the same output
rc := new(cloudflare.G1)
rc.Add(xc, yc)
rg := new(google.G1)
rg.Add(xg, yg)
if !bytes.Equal(rc.Marshal(), rg.Marshal()) {
panic("add mismatch")
}
return 0
}
// FuzzMul fuzzez bn256 scalar multiplication between the Google and Cloudflare
// libraries.
func FuzzMul(data []byte) int {
// Ensure we have enough data in the first place
if len(data) != 96 {
return 0
}
// Ensure both libs can parse the curve point
pc := new(cloudflare.G1)
_, errc := pc.Unmarshal(data[:64])
pg := new(google.G1)
_, errg := pg.Unmarshal(data[:64])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Add the two points and ensure they result in the same output
rc := new(cloudflare.G1)
rc.ScalarMult(pc, new(big.Int).SetBytes(data[64:]))
rg := new(google.G1)
rg.ScalarMult(pg, new(big.Int).SetBytes(data[64:]))
if !bytes.Equal(rc.Marshal(), rg.Marshal()) {
panic("scalar mul mismatch")
}
return 0
}
func FuzzPair(data []byte) int {
// Ensure we have enough data in the first place
if len(data) != 192 {
return 0
}
// Ensure both libs can parse the curve point
pc := new(cloudflare.G1)
_, errc := pc.Unmarshal(data[:64])
pg := new(google.G1)
_, errg := pg.Unmarshal(data[:64])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Ensure both libs can parse the twist point
tc := new(cloudflare.G2)
_, errc = tc.Unmarshal(data[64:])
tg := new(google.G2)
_, errg = tg.Unmarshal(data[64:])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Pair the two points and ensure thet result in the same output
if cloudflare.PairingCheck([]*cloudflare.G1{pc}, []*cloudflare.G2{tc}) != google.PairingCheck([]*google.G1{pg}, []*google.G2{tg}) {
panic("pair mismatch")
}
return 0
} | crypto/bn256/bn256_fuzz.go | 0.614394 | 0.408867 | bn256_fuzz.go | starcoder |
package govcd
/*
* Copyright 2020 VMware, Inc. All rights reserved. Licensed under the Apache v2 License.
*/
import (
"fmt"
"regexp"
"github.com/kr/pretty"
)
// A conditionDef is the data being carried by the filter engine when performing comparisons
type conditionDef struct {
conditionType string // it's one of SupportedFilters
stored interface{} // Any value as handled by the filter being used
}
// A dateCondition can evaluate a date expression
type dateCondition struct {
dateExpression string
}
// A regexpCondition is a generic filter that is the basis for other filters that require a regular expression
type regexpCondition struct {
regExpression *regexp.Regexp
}
// an ipCondition is a condition that compares an IP using a regexp
type ipCondition regexpCondition
// a nameCondition is a condition that compares a name using a regexp
type nameCondition regexpCondition
// a metadataRegexpCondition compares the values corresponding to the given key using a regexp
type metadataRegexpCondition struct {
key string
regExpression *regexp.Regexp
}
// a parentCondition compares the entity parent name with the one stored
type parentCondition struct {
parentName string
}
// a parentIdCondition compares the entity parent ID with the one stored
type parentIdCondition struct {
parentId string
}
// matchParent matches the wanted parent name (passed in 'stored') to the parent of the queryItem
// Input:
// * stored: the data of the condition (a parentCondition)
// * item: a QueryItem
// Returns:
// * bool: the result of the comparison
// * string: a description of the operation
// * error: an error when the input is not as expected
func matchParent(stored, item interface{}) (bool, string, error) {
condition, ok := stored.(parentCondition)
if !ok {
return false, "", fmt.Errorf("stored value is not a Parent condition (%# v)", pretty.Formatter(stored))
}
queryItem, ok := item.(QueryItem)
if !ok {
return false, "", fmt.Errorf("item is not a queryItem searchable by parent: %# v", pretty.Formatter(item))
}
parent := queryItem.GetParentName()
return condition.parentName == parent, fmt.Sprintf("%s == %s", condition.parentName, queryItem.GetParentName()), nil
}
// matchParentId matches the wanted parent ID (passed in 'stored') to the parent ID of the queryItem
// The IDs being compared are filtered through extractUuid, to make them homogeneous
// Input:
// * stored: the data of the condition (a parentCondition)
// * item: a QueryItem
// Returns:
// * bool: the result of the comparison
// * string: a description of the operation
// * error: an error when the input is not as expected
func matchParentId(stored, item interface{}) (bool, string, error) {
condition, ok := stored.(parentIdCondition)
if !ok {
return false, "", fmt.Errorf("stored value is not a parent ID condition (%# v)", pretty.Formatter(stored))
}
queryItem, ok := item.(QueryItem)
if !ok {
return false, "", fmt.Errorf("item is not a queryItem searchable by parent ID: %# v", pretty.Formatter(item))
}
parentId := queryItem.GetParentId()
parentId = extractUuid(parentId)
condition.parentId = extractUuid(condition.parentId)
return condition.parentId == parentId, fmt.Sprintf("%s =~ %s", condition.parentId, parentId), nil
}
// matchName matches a name (passed in 'stored') to the name of the queryItem
// Input:
// * stored: the data of the condition (a nameCondition)
// * item: a QueryItem
// Returns:
// * bool: the result of the comparison
// * string: a description of the operation
// * error: an error when the input is not as expected
func matchName(stored, item interface{}) (bool, string, error) {
re, ok := stored.(nameCondition)
if !ok {
return false, "", fmt.Errorf("stored value is not a Name Regexp (%# v)", pretty.Formatter(stored))
}
queryItem, ok := item.(QueryItem)
if !ok {
return false, "", fmt.Errorf("item is not a queryItem searchable by regex: %# v", pretty.Formatter(item))
}
return re.regExpression.MatchString(queryItem.GetName()), fmt.Sprintf("%s =~ %s", re.regExpression.String(), queryItem.GetName()), nil
}
// matchIp matches an IP (passed in 'stored') to the IP of the queryItem
// Input:
// * stored: the data of the condition (an ipCondition)
// * item: a QueryItem
// Returns:
// * bool: the result of the comparison
// * string: a description of the operation
// * error: an error when the input is not as expected
func matchIp(stored, item interface{}) (bool, string, error) {
re, ok := stored.(ipCondition)
if !ok {
return false, "", fmt.Errorf("stored value is not a Condition Regexp (%# v)", pretty.Formatter(stored))
}
queryItem, ok := item.(QueryItem)
if !ok {
return false, "", fmt.Errorf("item is not a queryItem searchable by Ip: %# v", pretty.Formatter(item))
}
ip := queryItem.GetIp()
if ip == "" {
return false, "", fmt.Errorf("%s %s doesn't have an IP", queryItem.GetType(), queryItem.GetName())
}
return re.regExpression.MatchString(ip), fmt.Sprintf("%s =~ %s", re.regExpression.String(), queryItem.GetIp()), nil
}
// matchDate matches a date (passed in 'stored') to the date of the queryItem
// Input:
// * stored: the data of the condition (a dateCondition)
// * item: a QueryItem
// Returns:
// * bool: the result of the comparison
// * string: a description of the operation
// * error: an error when the input is not as expected
func matchDate(stored, item interface{}) (bool, string, error) {
expr, ok := stored.(dateCondition)
if !ok {
return false, "", fmt.Errorf("stored value is not a condition date (%# v)", pretty.Formatter(stored))
}
queryItem, ok := item.(QueryItem)
if !ok {
return false, "", fmt.Errorf("item is not a queryItem searchable by date: %# v", pretty.Formatter(item))
}
if queryItem.GetDate() == "" {
return false, "", nil
}
result, err := compareDate(expr.dateExpression, queryItem.GetDate())
return result, fmt.Sprintf("%s %s", queryItem.GetDate(), expr.dateExpression), err
}
// matchMetadata matches a value (passed in 'stored') to the metadata value retrieved from queryItem
// Input:
// * stored: the data of the condition (a metadataRegexpCondition)
// * item: a QueryItem
// Returns:
// * bool: the result of the comparison
// * string: a description of the operation
// * error: an error when the input is not as expected
func matchMetadata(stored, item interface{}) (bool, string, error) {
re, ok := stored.(metadataRegexpCondition)
if !ok {
return false, "", fmt.Errorf("stored value is not a Metadata condition (%# v)", pretty.Formatter(stored))
}
queryItem, ok := item.(QueryItem)
if !ok {
return false, "", fmt.Errorf("item is not a queryItem searchable by Metadata: %# v", pretty.Formatter(item))
}
return re.regExpression.MatchString(queryItem.GetMetadataValue(re.key)), fmt.Sprintf("metadata: %s -> %s", re.key, re.regExpression.String()), nil
} | govcd/filter_condition.go | 0.751648 | 0.4575 | filter_condition.go | starcoder |
package header
import (
"encoding/binary"
"github.com/brewlin/net-protocol/tcpip"
)
// ICMPv6 represents an ICMPv6 header stored in a byte array.
type ICMPv6 []byte
const (
// ICMPv6MinimumSize is the minimum size of a valid ICMP packet.
ICMPv6MinimumSize = 4
// ICMPv6ProtocolNumber is the ICMP transport protocol number.
ICMPv6ProtocolNumber tcpip.TransportProtocolNumber = 58
// ICMPv6NeighborSolicitMinimumSize is the minimum size of a
// neighbor solicitation packet.
ICMPv6NeighborSolicitMinimumSize = ICMPv6MinimumSize + 4 + 16
// ICMPv6NeighborAdvertSize is size of a neighbor advertisement.
ICMPv6NeighborAdvertSize = 32
// ICMPv6EchoMinimumSize is the minimum size of a valid ICMP echo packet.
ICMPv6EchoMinimumSize = 8
// ICMPv6DstUnreachableMinimumSize is the minimum size of a valid ICMP
// destination unreachable packet.
ICMPv6DstUnreachableMinimumSize = ICMPv6MinimumSize + 4
// ICMPv6PacketTooBigMinimumSize is the minimum size of a valid ICMP
// packet-too-big packet.
ICMPv6PacketTooBigMinimumSize = ICMPv6MinimumSize + 4
)
// ICMPv6Type is the ICMP type field described in RFC 4443 and friends.
type ICMPv6Type byte
// Typical values of ICMPv6Type defined in RFC 4443.
const (
ICMPv6DstUnreachable ICMPv6Type = 1
ICMPv6PacketTooBig ICMPv6Type = 2
ICMPv6TimeExceeded ICMPv6Type = 3
ICMPv6ParamProblem ICMPv6Type = 4
ICMPv6EchoRequest ICMPv6Type = 128
ICMPv6EchoReply ICMPv6Type = 129
// Neighbor Discovery Protocol (NDP) messages, see RFC 4861.
ICMPv6RouterSolicit ICMPv6Type = 133
ICMPv6RouterAdvert ICMPv6Type = 134
ICMPv6NeighborSolicit ICMPv6Type = 135
ICMPv6NeighborAdvert ICMPv6Type = 136
ICMPv6RedirectMsg ICMPv6Type = 137
)
// Values for ICMP code as defined in RFC 4443.
const (
ICMPv6PortUnreachable = 4
)
// Type is the ICMP type field.
func (b ICMPv6) Type() ICMPv6Type { return ICMPv6Type(b[0]) }
// SetType sets the ICMP type field.
func (b ICMPv6) SetType(t ICMPv6Type) { b[0] = byte(t) }
// Code is the ICMP code field. Its meaning depends on the value of Type.
func (b ICMPv6) Code() byte { return b[1] }
// SetCode sets the ICMP code field.
func (b ICMPv6) SetCode(c byte) { b[1] = c }
// Checksum is the ICMP checksum field.
func (b ICMPv6) Checksum() uint16 {
return binary.BigEndian.Uint16(b[2:])
}
// SetChecksum calculates and sets the ICMP checksum field.
func (b ICMPv6) SetChecksum(checksum uint16) {
binary.BigEndian.PutUint16(b[2:], checksum)
}
// SourcePort implements Transport.SourcePort.
func (ICMPv6) SourcePort() uint16 {
return 0
}
// DestinationPort implements Transport.DestinationPort.
func (ICMPv6) DestinationPort() uint16 {
return 0
}
// SetSourcePort implements Transport.SetSourcePort.
func (ICMPv6) SetSourcePort(uint16) {
}
// SetDestinationPort implements Transport.SetDestinationPort.
func (ICMPv6) SetDestinationPort(uint16) {
}
// Payload implements Transport.Payload.
func (b ICMPv6) Payload() []byte {
return b[ICMPv6MinimumSize:]
} | tcpip/header/icmpv6.go | 0.700588 | 0.461684 | icmpv6.go | starcoder |
package matchers
import (
"fmt"
"reflect"
"regexp"
"strings"
logger "github.com/ViaQ/logerr/log"
"github.com/onsi/gomega/types"
"github.com/openshift/cluster-logging-operator/test"
testtypes "github.com/openshift/cluster-logging-operator/test/helpers/types"
)
type LogMatcher struct {
expected interface{}
field string
}
func FitLogFormatTemplate(expected interface{}) types.GomegaMatcher {
return &LogMatcher{
expected: expected,
}
}
func (m *LogMatcher) Match(actual interface{}) (success bool, err error) {
if reflect.TypeOf(m.expected) != reflect.TypeOf(actual) {
return false, fmt.Errorf("matcher expects to compare same log types")
}
m.field, success, err = CompareLog(m.expected, actual)
return success, err
}
func (m *LogMatcher) FailureMessage(actual interface{}) (message string) {
return fmt.Sprintf("Expected\n\t%s\nto fit \n\t%s\nFailed field is: %s", test.JSONString(actual), test.JSONString(m.expected), m.field)
}
func (m *LogMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return fmt.Sprintf("Expected\n\t%s\nto not fit \n\t%s\nFailed field is: %s", test.JSONString(actual), test.JSONString(m.expected), m.field)
}
func isNil(i interface{}) bool {
if i == nil {
return true
}
switch reflect.TypeOf(i).Kind() {
case reflect.Ptr, reflect.Map, reflect.Array, reflect.Chan, reflect.Slice:
return reflect.ValueOf(i).IsNil()
}
return false
}
func DeepFields(iface interface{}, namePrefix string) ([]reflect.Value, []string) {
values := make([]reflect.Value, 0)
names := make([]string, 0)
ifv := reflect.ValueOf(iface)
ift := reflect.TypeOf(iface)
for i := 0; i < ift.NumField(); i++ {
v := ifv.Field(i)
n := namePrefix + ifv.Type().Field(i).Name
if !v.CanInterface() {
continue
}
switch v.Kind() {
case reflect.Array:
values = append(values, v)
names = append(names, n)
case reflect.Struct:
typename := v.Type().Name()
if typename == "Timing" {
break
}
if typename != "Time" {
moreFields, moreNames := DeepFields(v.Interface(), n+"_")
values = append(values, moreFields...)
names = append(names, moreNames...)
} else {
values = append(values, v)
names = append(names, n)
}
case reflect.Ptr:
if !isNil(v.Interface()) {
elm := v.Elem()
moreFields, moreNames := DeepFields(elm.Interface(), n+"_")
values = append(values, moreFields...)
names = append(names, moreNames...)
}
default:
values = append(values, v)
names = append(names, n)
}
}
return values, names
}
func compareLogLogic(name string, templateValue interface{}, value interface{}) bool {
templateValueString := fmt.Sprintf("%v", templateValue)
valueString := fmt.Sprintf("%v", value)
if reflect.TypeOf(templateValue).Name() == "OptionalInt" {
expValue := templateValue.(testtypes.OptionalInt)
actValue := value.(testtypes.OptionalInt)
logger.V(3).Info("CompareLogLogic: OptionalInt for", "name", name, "value", valueString, "exp", expValue, "act", actValue)
return expValue.IsSatisfiedBy(actValue)
}
if templateValueString == valueString { // Same value is ok
logger.V(3).Info("CompareLogLogic: Same value for", "name", name, "value", valueString)
return true
}
if templateValueString == "**optional**" {
logger.V(3).Info("CompareLogLogic: Optional value for **optional** ", "fieldname", name, "value", value)
return true
}
if templateValueString == "*" && valueString != "" { // Any value, not Nil is ok if template value is "*"
logger.V(3).Info("CompareLogLogic: Any value for * ", "fieldname", name, "value", value)
return true
}
if templateValueString == "[*]" && valueString != "" { // Any array
logger.V(3).Info("CompareLogLogic: Any value for array[*] ", "fieldname", name, "value", value)
return true
}
if templateValueString == "map[*:*]" && valueString != "" { // Any map
logger.V(3).Info("CompareLogLogic: Any value for map[*] ", "fieldname", name, "value", value)
return true
}
if templateValueString == "[]" && valueString != "[]" { // Any value, not Nil is ok if template value is an array "[*]"
logger.V(3).Info("CompareLogLogic: Any value for * ", "name", name, "value", valueString)
return true
}
if templateValueString == "0" && valueString != "" { // Any value, not Nil is ok if template value is an array "[*]"
logger.V(3).Info("CompareLogLogic: Any value for * ", "name", name, "value", valueString)
return true
}
if templateValueString == "0001-01-01 00:00:00 +0000 UTC" && valueString != "" { // Any time value not Nil is ok if template value is empty time
logger.V(3).Info("CompareLogLogic: Any value for 'empty time' ", "name", name, "value", valueString)
return true
}
if strings.HasPrefix(templateValueString, "regex:") { // Using regex if starts with "/"
match, _ := regexp.MatchString(templateValueString[6:], valueString)
if match {
logger.V(3).Info("CompareLogLogic: Fit regex ", "fieldname", name, "value", value)
return true
}
}
logger.V(3).Info("CompareLogLogic: Mismatch !!!", "fieldname", name, "templateValue", templateValueString, "value", valueString)
return false
}
func CompareLog(template interface{}, log interface{}) (string, bool, error) {
logFieldValues, logFieldNames := DeepFields(log, "")
// templateString := test.JSONLine(template)
// logger.V(3).Info("Marshalled", "template", templateString)
// allLog := &logtypes.AllLog{}
// test.MustUnmarshal(templateString, allLog)
// logger.V(3).Info("Unmarshled", "template", template)
templateFieldValues, templateFieldNames := DeepFields(template, "")
logger.V(3).Info("Template", "names", templateFieldNames)
for i := range templateFieldNames {
templateFieldValue := templateFieldValues[i].Interface()
templateFieldName := templateFieldNames[i]
foundMatchFields := false
for j := range logFieldValues {
logFieldValue := logFieldValues[j].Interface()
logFieldName := logFieldNames[j]
if templateFieldName == logFieldName {
foundMatchFields = true
logger.V(3).Info("CompareLog: comparing", "name", templateFieldName)
if !isNil(templateFieldValue) { // Are we interested this field?
if templateFieldValues[j].Kind() == reflect.Ptr { // Skip skeleton structure fields
logger.V(3).Info("CompareLog: skipping skeleton", "name", templateFieldName)
break
}
if compareLogLogic(templateFieldName, templateFieldValue, logFieldValue) {
break
}
return templateFieldName, false, nil
} else {
logger.V(3).Info("CompareLog: skipping not interesting field", "name", templateFieldName)
break // If this is not an interesting field
}
}
}
if !foundMatchFields {
logger.V(3).Info("CompareLog: skipping field, not found in log", "name", templateFieldName)
}
}
return "", true, nil
} | test/matchers/log_format.go | 0.524638 | 0.409693 | log_format.go | starcoder |
package game
import (
"time"
"github.com/oakmound/lowrez17/game/forceSpace"
"github.com/oakmound/lowrez17/game/layers"
"github.com/oakmound/oak/collision"
"github.com/oakmound/oak/physics"
"github.com/oakmound/oak/render"
)
func NetLeft(label collision.Label) func(*Entity) {
return func(p *Entity) {
PlayAt("NetLight", p.X(), p.Y())
fv := physics.NewForceVector(p.Dir.Copy().Rotate(180), 3)
basePos := p.CenterPos()
rot := p.Dir.Copy().Rotate(-130)
net := render.NewReverting(images["net"].Copy().Modify(render.FlipY))
render.Draw(net, layers.DebugLayer)
go func(rot physics.Vector) {
for a := 0; a < 90; a += 10 {
pos := basePos.Copy()
rot.Rotate(10)
NetRotateAbout(net, pos, basePos, rot.Angle())
time.Sleep(20 * time.Millisecond)
}
net.UnDraw()
}(rot.Copy())
for a := 0; a < 90; a += 10 {
pos := basePos.Copy().Add(rot.Copy().Scale(6))
forceSpace.NewHurtBox(pos.X(), pos.Y(), 5, 5, 75*time.Millisecond, label, fv, false)
rot.Rotate(10)
}
}
}
func NetRight(label collision.Label) func(*Entity) {
return func(p *Entity) {
PlayAt("NetLight", p.X(), p.Y())
fv := physics.NewForceVector(p.Dir.Copy().Rotate(180), 3)
basePos := p.CenterPos()
rot := p.Dir.Copy().Rotate(130)
net := render.NewReverting(images["net"].Copy())
render.Draw(net, layers.DebugLayer)
go func(rot physics.Vector) {
for a := 0; a < 90; a += 10 {
pos := basePos.Copy()
rot.Rotate(-10)
NetRotateAbout(net, pos, basePos, rot.Angle())
time.Sleep(20 * time.Millisecond)
}
net.UnDraw()
}(rot.Copy())
for a := 0; a < 90; a += 10 {
pos := basePos.Copy().Add(rot.Copy().Scale(6))
forceSpace.NewHurtBox(pos.X(), pos.Y(), 5, 5, 75*time.Millisecond, label, fv, false)
rot.Rotate(-10)
}
}
}
func NetRotateAbout(r *render.Reverting, pos, center physics.Vector, angle float64) {
r.RevertAndModify(1, render.Rotate(int(-angle)))
pos2 := pos.Copy().Add(physics.AngleVector(angle).Scale(3))
r.SetPos(pos2.X(), pos2.Y())
w, h := r.GetDims()
if pos2.X() < center.X()-1 {
r.ShiftX(float64(-w))
}
if pos2.Y() < center.Y()-1 {
r.ShiftY(float64(-h))
}
}
func NetTwirl(label collision.Label) func(*Entity) {
return func(p *Entity) {
PlayAt("NetHeavy", p.X(), p.Y())
basePos := p.CenterPos()
rot := p.Dir.Copy().Rotate(-10)
go func(basePos, rot physics.Vector) {
net := render.NewReverting(images["net"].Copy())
render.Draw(net, layers.DebugLayer)
for a := 0; a < 260; a += 10 {
pos := basePos.Copy()
rot.Rotate(-10)
NetRotateAbout(net, pos, basePos, rot.Angle())
time.Sleep(20 * time.Millisecond)
}
net.UnDraw()
}(basePos.Copy(), rot.Copy())
go func(basePos, rot physics.Vector) {
for a := 0; a < 260; a += 10 {
pos := basePos.Copy().Add(rot.Copy().Scale(6))
fv := physics.NewForceVector(rot.Copy().Rotate(90), 3)
forceSpace.NewHurtBox(pos.X(), pos.Y(), 8, 8, 150*time.Millisecond, label, fv)
rot.Rotate(-10)
time.Sleep(5 * time.Millisecond)
}
}(basePos, rot)
// Net's cooldown is too fast for this to be meaningful
// go timing.DoAfter(NetTwirlCooldown, func() {
// PlayAt("NetReady", p.X(), p.Y())
// })
}
} | game/net.go | 0.610686 | 0.431944 | net.go | starcoder |
package main
import (
"math"
. "github.com/jakecoffman/cp"
"github.com/jakecoffman/cp/examples"
)
func main() {
space := NewSpace()
space.Iterations = 10
space.SetGravity(Vector{0, -100})
space.SleepTimeThreshold = 0.5
walls := []Vector{
{-320, 240}, {320, 240},
{-320, 120}, {320, 120},
{-320, 0}, {320, 0},
{-320, -120}, {320, -120},
{-320, -240}, {320, -240},
{-320, -240}, {-320, 240},
{-160, -240}, {-160, 240},
{0, -240}, {0, 240},
{160, -240}, {160, 240},
{320, -240}, {320, 240},
}
for i := 0; i < len(walls)-1; i += 2 {
shape := space.AddShape(NewSegment(space.StaticBody, walls[i], walls[i+1], 0))
shape.SetElasticity(1)
shape.SetFriction(1)
shape.SetFilter(examples.NotGrabbableFilter)
}
var boxOffset Vector
var body1, body2 *Body
posA := Vector{50, 60}
posB := Vector{110, 60}
// Pin Joints - Link shapes with a solid bar or pin.
// Keeps the anchor points the same distance apart from when the joint was created.
boxOffset = Vector{-320, -240}
body1 = addBall(space, posA, boxOffset)
body2 = addBall(space, posB, boxOffset)
space.AddConstraint(NewPinJoint(body1, body2, Vector{15, 0}, Vector{-15, 0}))
// Slide Joints - Like pin joints but with a min/max distance.
// Can be used for a cheap approximation of a rope.
boxOffset = Vector{-160, -240}
body1 = addBall(space, posA, boxOffset)
body2 = addBall(space, posB, boxOffset)
space.AddConstraint(NewSlideJoint(body1, body2, Vector{15, 0}, Vector{-15, 0}, 20, 40))
// Pivot Joints - Holds the two anchor points together. Like a swivel.
boxOffset = Vector{0, -240}
body1 = addBall(space, posA, boxOffset)
body2 = addBall(space, posB, boxOffset)
space.AddConstraint(NewPivotJoint(body1, body2, boxOffset.Add(Vector{80, 60})))
// cpPivotJointNew() takes it's anchor parameter in world coordinates. The anchors are calculated from that
// cpPivotJointNew2() lets you specify the two anchor points explicitly
// Groove Joints - Like a pivot joint, but one of the anchors is a line segment that the pivot can slide in
boxOffset = Vector{160, -240}
body1 = addBall(space, posA, boxOffset)
body2 = addBall(space, posB, boxOffset)
space.AddConstraint(NewGrooveJoint(body1, body2, Vector{30, 30}, Vector{30, -30}, Vector{-30, 0}))
// Damped Springs
boxOffset = Vector{-320, -120}
body1 = addBall(space, posA, boxOffset)
body2 = addBall(space, posB, boxOffset)
space.AddConstraint(NewDampedSpring(body1, body2, Vector{15, 0}, Vector{-15, 0}, 20, 5, 0.3))
// Damped Rotary Springs
boxOffset = Vector{-160, -120}
body1 = addBar(space, posA, boxOffset)
body2 = addBar(space, posB, boxOffset)
space.AddConstraint(NewPivotJoint(body1, space.StaticBody, boxOffset.Add(posA)))
space.AddConstraint(NewPivotJoint(body2, space.StaticBody, boxOffset.Add(posB)))
space.AddConstraint(NewDampedRotarySpring(body1, body2, 0, 3000, 60))
// Rotary Limit Joint
boxOffset = Vector{0, -120}
body1 = addLever(space, posA, boxOffset)
body2 = addLever(space, posB, boxOffset)
space.AddConstraint(NewPivotJoint(body1, space.StaticBody, boxOffset.Add(posA)))
space.AddConstraint(NewPivotJoint(body2, space.StaticBody, boxOffset.Add(posB)))
// Hold their rotation within 90 degrees of each other.
space.AddConstraint(NewRotaryLimitJoint(body1, body2, -math.Pi/2.0, math.Pi/2.0))
// Ratchet Joint - A rotary ratchet, like a socket wrench
boxOffset = Vector{160, -120}
body1 = addLever(space, posA, boxOffset)
body2 = addLever(space, posB, boxOffset)
space.AddConstraint(NewPivotJoint(body1, space.StaticBody, boxOffset.Add(posA)))
space.AddConstraint(NewPivotJoint(body2, space.StaticBody, boxOffset.Add(posB)))
// Ratchet every 90 degrees
space.AddConstraint(NewRatchetJoint(body1, body2, 0, math.Pi/2.0))
// Gear Joint - Maintain a specific angular velocity ratio
boxOffset = Vector{-320, 0}
body1 = addBar(space, posA, boxOffset)
body2 = addBar(space, posB, boxOffset)
space.AddConstraint(NewPivotJoint(body1, space.StaticBody, boxOffset.Add(posA)))
space.AddConstraint(NewPivotJoint(body2, space.StaticBody, boxOffset.Add(posB)))
// Force one to sping 2x as fast as the other
space.AddConstraint(NewGearJoint(body1, body2, 0, 2))
// Simple Motor - Maintain a specific angular relative velocity
boxOffset = Vector{-160, 0}
body1 = addBar(space, posA, boxOffset)
body2 = addBar(space, posB, boxOffset)
space.AddConstraint(NewPivotJoint(body1, space.StaticBody, boxOffset.Add(posA)))
space.AddConstraint(NewPivotJoint(body2, space.StaticBody, boxOffset.Add(posB)))
// Make them spin at 1/2 revolution per second in relation to each other.
space.AddConstraint(NewSimpleMotor(body1, body2, math.Pi))
// Make a car with some nice soft suspension
boxOffset = Vector{}
wheel1 := addWheel(space, posA, boxOffset)
wheel2 := addWheel(space, posB, boxOffset)
chassis := addChassis(space, Vector{80, 100}, boxOffset)
space.AddConstraint(NewGrooveJoint(chassis, wheel1, Vector{-30, -10}, Vector{-30, -40}, Vector{}))
space.AddConstraint(NewGrooveJoint(chassis, wheel2, Vector{30, -10}, Vector{30, -40}, Vector{}))
space.AddConstraint(NewDampedSpring(chassis, wheel1, Vector{-30, 0}, Vector{}, 50, 20, 10))
space.AddConstraint(NewDampedSpring(chassis, wheel2, Vector{30, 0}, Vector{}, 50, 20, 10))
examples.Main(space, 1.0/60.0, update, examples.DefaultDraw)
}
func addBall(space *Space, pos, boxOffset Vector) *Body {
radius := 15.0
mass := 1.0
body := space.AddBody(NewBody(mass, MomentForCircle(mass, 0, radius, Vector{})))
body.SetPosition(pos.Add(boxOffset))
shape := space.AddShape(NewCircle(body, radius, Vector{}))
shape.SetElasticity(0)
shape.SetFriction(0.7)
return body
}
func addLever(space *Space, pos, boxOffset Vector) *Body {
mass := 1.0
a := Vector{0, 15}
b := Vector{0, -15}
body := space.AddBody(NewBody(mass, MomentForSegment(mass, a, b, 0)))
body.SetPosition(pos.Add(boxOffset.Add(Vector{0, -15})))
shape := space.AddShape(NewSegment(body, a, b, 5))
shape.SetElasticity(0)
shape.SetFriction(0.7)
return body
}
func addBar(space *Space, pos, boxOffset Vector) *Body {
mass := 2.0
a := Vector{0, 30}
b := Vector{0, -30}
body := space.AddBody(NewBody(mass, MomentForSegment(mass, a, b, 0)))
body.SetPosition(pos.Add(boxOffset))
shape := space.AddShape(NewSegment(body, a, b, 5))
shape.SetElasticity(0)
shape.SetFriction(0.7)
shape.SetFilter(NewShapeFilter(1, ALL_CATEGORIES, ALL_CATEGORIES))
return body
}
func addWheel(space *Space, pos, boxOffset Vector) *Body {
radius := 15.0
mass := 1.0
body := space.AddBody(NewBody(mass, MomentForCircle(mass, 0, radius, Vector{})))
body.SetPosition(pos.Add(boxOffset))
shape := space.AddShape(NewCircle(body, radius, Vector{}))
shape.SetElasticity(0)
shape.SetFriction(0.7)
shape.SetFilter(NewShapeFilter(1, ALL_CATEGORIES, ALL_CATEGORIES))
return body
}
func addChassis(space *Space, pos, boxOffset Vector) *Body {
mass := 5.0
width := 80.0
height := 30.0
body := space.AddBody(NewBody(mass, MomentForBox(mass, width, height)))
body.SetPosition(pos.Add(boxOffset))
shape := space.AddShape(NewBox(body, width, height, 0))
shape.SetElasticity(0)
shape.SetFriction(0.7)
shape.SetFilter(NewShapeFilter(1, ALL_CATEGORIES, ALL_CATEGORIES))
return body
}
func update(space *Space, dt float64) {
draw(Vector{-320, -240}, "Pin Joints")
draw(Vector{-160, -240}, "Slide Joints")
draw(Vector{0, -240}, "Pivot Joints")
draw(Vector{160, -240}, "Groove Joints")
draw(Vector{-320, -120}, "Damped Spring")
draw(Vector{-160, -120}, "Damped Rotary Spring")
draw(Vector{0, -120}, "Rotary Limit Joint")
draw(Vector{160, -120}, "Ratchet Joints")
draw(Vector{-320, 0}, "Gear Joint")
draw(Vector{-160, 0}, "Simple Motor")
draw(Vector{0, 0}, "Car")
space.Step(dt)
}
func draw(v Vector, words string) {
examples.DrawString(Vector{v.X + 10, v.Y + 105}, words)
} | examples/joints/joints.go | 0.654895 | 0.59131 | joints.go | starcoder |
package types
import (
"regexp"
"strings"
"unicode"
"github.com/tableauio/tableau/internal/atom"
"github.com/tableauio/tableau/proto/tableaupb"
"google.golang.org/protobuf/encoding/prototext"
)
var mapRegexp *regexp.Regexp
var listRegexp *regexp.Regexp
var keyedListRegexp *regexp.Regexp
var structRegexp *regexp.Regexp
var enumRegexp *regexp.Regexp
var propRegexp *regexp.Regexp
var boringIntegerRegexp *regexp.Regexp
// refer: https://github.com/google/re2/wiki/Syntax
const rawPropGroup = `(\|\{.+\})?` // e.g.: |{range:"1,10" refer:"XXXConf.ID"}
const typeCharSet = `[0-9A-Za-z,_>< \[\]\.\{\}]`
const typeGroup = `(` + typeCharSet + `+)`
const looseTypeGroup = typeGroup + `?` // `x?`: zero or one x, prefer one
const ungreedyTypeGroup = `(` + typeCharSet + `*?)`
const TypeGroup = ungreedyTypeGroup
func init() {
mapRegexp = regexp.MustCompile(`^map<` + typeGroup + `,` + typeGroup + `>` + rawPropGroup) // e.g.: map<uint32,Type>
listRegexp = regexp.MustCompile(`^\[` + ungreedyTypeGroup + `\]` + typeGroup + rawPropGroup) // e.g.: [Type]uint32
keyedListRegexp = regexp.MustCompile(`^\[` + ungreedyTypeGroup + `\]<` + typeGroup + `>` + rawPropGroup) // e.g.: [Type]<uint32>
structRegexp = regexp.MustCompile(`^\{` + ungreedyTypeGroup + `\}` + looseTypeGroup + rawPropGroup) // e.g.: {Type}uint32
enumRegexp = regexp.MustCompile(`^enum<` + typeGroup + `>` + rawPropGroup) // e.g.: enum<Type>
propRegexp = regexp.MustCompile(`\|?\{(.+)\}`) // e.g.: |{range:"1,10" refer:"XXXConf.ID"}
// trim float to integer after(include) dot, e.g: 0.0, 1.0, 1.00 ...
// refer: https://stackoverflow.com/questions/638565/parsing-scientific-notation-sensibly
boringIntegerRegexp = regexp.MustCompile(`([-+]?[0-9]+)\.0+$`)
}
func MatchMap(text string) []string {
return mapRegexp.FindStringSubmatch(text)
}
func IsMap(text string) bool {
return MatchMap(text) != nil
}
func MatchList(text string) []string {
return listRegexp.FindStringSubmatch(text)
}
func IsList(text string) bool {
return MatchList(text) != nil
}
func MatchKeyedList(text string) []string {
return keyedListRegexp.FindStringSubmatch(text)
}
func IsKeyedList(text string) bool {
return MatchKeyedList(text) != nil
}
func MatchStruct(text string) []string {
return structRegexp.FindStringSubmatch(text)
}
func IsStruct(text string) bool {
return MatchStruct(text) != nil
}
func MatchEnum(text string) []string {
return enumRegexp.FindStringSubmatch(text)
}
func IsEnum(text string) bool {
return MatchEnum(text) != nil
}
func MatchProp(text string) []string {
return propRegexp.FindStringSubmatch(text)
}
func MatchBoringInteger(text string) []string {
return boringIntegerRegexp.FindStringSubmatch(text)
}
func ParseProp(text string) *tableaupb.FieldProp {
matches := propRegexp.FindStringSubmatch(text)
if len(matches) > 0 {
propText := strings.TrimSpace(matches[1])
if propText == "" {
return nil
}
prop := &tableaupb.FieldProp{}
if err := prototext.Unmarshal([]byte(propText), prop); err != nil {
atom.Log.Errorf("parse prop failed: %s", err)
return nil
}
return prop
}
return nil
}
// BelongToFirstElement returns true if the name has specified `prefix+"1"`
// and the next character is not digit.
func BelongToFirstElement(name, prefix string) bool {
firstElemPrefix := prefix + "1"
nextCharPos := len(firstElemPrefix)
if strings.HasPrefix(name, firstElemPrefix) {
if len(name) > len(firstElemPrefix) {
char := name[nextCharPos]
return !unicode.IsDigit(rune(char))
}
}
return false
}
type Kind int
const (
ScalarKind Kind = iota
EnumKind
ListKind
MapKind
MessageKind
)
var typeKindMap map[string]Kind
func init() {
typeKindMap = map[string]Kind{
"bool": ScalarKind,
"enum": ScalarKind,
"int32": ScalarKind,
"sint32": ScalarKind,
"uint32": ScalarKind,
"int64": ScalarKind,
"sint64": ScalarKind,
"uint64": ScalarKind,
"sfixed32": ScalarKind,
"fixed32": ScalarKind,
"float": ScalarKind,
"sfixed64": ScalarKind,
"fixed64": ScalarKind,
"double": ScalarKind,
"string": ScalarKind,
"bytes": ScalarKind,
"repeated": ListKind,
"map": MapKind,
}
}
func IsScalarType(t string) bool {
if kind, ok := typeKindMap[t]; ok {
return kind == ScalarKind
}
return false
} | internal/types/types.go | 0.606732 | 0.418994 | types.go | starcoder |
package v1alpha1 // import "istio.io/api/rbac/v1alpha1"
/*
Istio RBAC (Role Based Access Control) defines ServiceRole and ServiceRoleBinding
objects.
A ServiceRole specification includes a list of rules (permissions). Each rule has
the following standard fields:
* services: a list of services.
* methods: HTTP methods. In the case of gRPC, this field is ignored because the value is always "POST".
* paths: HTTP paths or gRPC methods. Note that gRPC methods should be
presented in the form of "packageName.serviceName/methodName".
In addition to the standard fields, operators can use custom fields in the "constraints"
section. The name of a custom field must match one of the "properties" in the "action" part
of the "authorization" template (https://github.com/istio/istio/blob/master/mixer/template/authorization/template.proto).
For example, suppose we define an instance of the "authorization" template, named "requestcontext".
```yaml
apiVersion: "config.istio.io/v1alpha1"
kind: authorization
metadata:
name: requestcontext
namespace: istio-system
spec:
subject:
user: source.user | ""
groups: ""
properties:
service: source.service | ""
namespace: source.namespace | ""
action:
namespace: destination.namespace | ""
service: destination.service | ""
method: request.method | ""
path: request.path | ""
properties:
version: request.headers["version"] | ""
```
Below is an example of ServiceRole object "product-viewer", which has "read" ("GET" and "HEAD")
access to "products.svc.cluster.local" service at versions "v1" and "v2". "path" is not specified,
so it applies to any path in the service.
```yaml
apiVersion: "config.istio.io/v1alpha1"
kind: ServiceRole
metadata:
name: products-viewer
namespace: default
spec:
rules:
- services: ["products.svc.cluster.local"]
methods: ["GET", "HEAD"]
constraints:
- key: "version"
value: ["v1", "v2"]
```
A ServiceRoleBinding specification includes two parts:
* "roleRef" refers to a ServiceRole object in the same namespace.
* A list of "subjects" that are assigned the roles.
A subject is represented with a set of "properties". The name of a property must match one of
the fields ("user" or "groups" or one of the "properties") in the "subject" part of the "authorization"
template (https://github.com/istio/istio/blob/master/mixer/template/authorization/template.proto).
Below is an example of ServiceRoleBinding object "test-binding-products", which binds two subjects
to ServiceRole "product-viewer":
* User "<EMAIL>"
* "reviews" service in "abc" namespace.
```yaml
apiVersion: "config.istio.io/v1alpha1"
kind: ServiceRoleBinding
metadata:
name: test-binding-products
namespace: default
spec:
subjects:
- user: <EMAIL>
- properties:
service: "reviews"
namespace: "abc"
roleRef:
kind: ServiceRole
name: "products-viewer"
```
*/
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type RbacConfig_Mode int32
const (
// Disable Istio RBAC completely, any other config in RbacConfig will be ignored and Istio RBAC policies
// will not be enforced.
RbacConfig_OFF RbacConfig_Mode = 0
// Enable Istio RBAC for all services and namespaces.
RbacConfig_ON RbacConfig_Mode = 1
// Enable Istio RBAC only for services and namespaces specified in the inclusion field. Any other
// services and namespaces not in the inclusion field will not be enforced by Istio RBAC policies.
RbacConfig_ON_WITH_INCLUSION RbacConfig_Mode = 2
// Enable Istio RBAC for all services and namespaces except those specified in the exclusion field. Any other
// services and namespaces not in the exclusion field will be enforced by Istio RBAC policies.
RbacConfig_ON_WITH_EXCLUSION RbacConfig_Mode = 3
)
var RbacConfig_Mode_name = map[int32]string{
0: "OFF",
1: "ON",
2: "ON_WITH_INCLUSION",
3: "ON_WITH_EXCLUSION",
}
var RbacConfig_Mode_value = map[string]int32{
"OFF": 0,
"ON": 1,
"ON_WITH_INCLUSION": 2,
"ON_WITH_EXCLUSION": 3,
}
func (x RbacConfig_Mode) String() string {
return proto.EnumName(RbacConfig_Mode_name, int32(x))
}
func (RbacConfig_Mode) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{5, 0}
}
// ServiceRole specification contains a list of access rules (permissions).
// This represent the "Spec" part of the ServiceRole object. The name and namespace
// of the ServiceRole is specified in "metadata" section of the ServiceRole object.
type ServiceRole struct {
// Required. The set of access rules (permissions) that the role has.
Rules []*AccessRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServiceRole) Reset() { *m = ServiceRole{} }
func (m *ServiceRole) String() string { return proto.CompactTextString(m) }
func (*ServiceRole) ProtoMessage() {}
func (*ServiceRole) Descriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{0}
}
func (m *ServiceRole) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServiceRole.Unmarshal(m, b)
}
func (m *ServiceRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServiceRole.Marshal(b, m, deterministic)
}
func (dst *ServiceRole) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceRole.Merge(dst, src)
}
func (m *ServiceRole) XXX_Size() int {
return xxx_messageInfo_ServiceRole.Size(m)
}
func (m *ServiceRole) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceRole.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceRole proto.InternalMessageInfo
func (m *ServiceRole) GetRules() []*AccessRule {
if m != nil {
return m.Rules
}
return nil
}
// AccessRule defines a permission to access a list of services.
type AccessRule struct {
// Required. A list of service names.
// Exact match, prefix match, and suffix match are supported for service names.
// For example, the service name "bookstore.mtv.cluster.local" matches
// "bookstore.mtv.cluster.local" (exact match), or "bookstore*" (prefix match),
// or "*.mtv.cluster.local" (suffix match).
// If set to ["*"], it refers to all services in the namespace.
Services []string `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"`
// Optional. A list of HTTP paths or gRPC methods.
// gRPC methods must be presented as fully-qualified name in the form of
// packageName.serviceName/methodName.
// Exact match, prefix match, and suffix match are supported for paths.
// For example, the path "/books/review" matches
// "/books/review" (exact match), or "/books/*" (prefix match),
// or "*/review" (suffix match).
// If not specified, it applies to any path.
Paths []string `protobuf:"bytes,2,rep,name=paths" json:"paths,omitempty"`
// Optional. A list of HTTP methods (e.g., "GET", "POST").
// It is ignored in gRPC case because the value is always "POST".
// If set to ["*"] or not specified, it applies to any method.
Methods []string `protobuf:"bytes,3,rep,name=methods" json:"methods,omitempty"`
// Optional. Extra constraints in the ServiceRole specification.
// The above ServiceRole examples shows an example of constraint "version".
Constraints []*AccessRule_Constraint `protobuf:"bytes,4,rep,name=constraints" json:"constraints,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AccessRule) Reset() { *m = AccessRule{} }
func (m *AccessRule) String() string { return proto.CompactTextString(m) }
func (*AccessRule) ProtoMessage() {}
func (*AccessRule) Descriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{1}
}
func (m *AccessRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AccessRule.Unmarshal(m, b)
}
func (m *AccessRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AccessRule.Marshal(b, m, deterministic)
}
func (dst *AccessRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_AccessRule.Merge(dst, src)
}
func (m *AccessRule) XXX_Size() int {
return xxx_messageInfo_AccessRule.Size(m)
}
func (m *AccessRule) XXX_DiscardUnknown() {
xxx_messageInfo_AccessRule.DiscardUnknown(m)
}
var xxx_messageInfo_AccessRule proto.InternalMessageInfo
func (m *AccessRule) GetServices() []string {
if m != nil {
return m.Services
}
return nil
}
func (m *AccessRule) GetPaths() []string {
if m != nil {
return m.Paths
}
return nil
}
func (m *AccessRule) GetMethods() []string {
if m != nil {
return m.Methods
}
return nil
}
func (m *AccessRule) GetConstraints() []*AccessRule_Constraint {
if m != nil {
return m.Constraints
}
return nil
}
// Definition of a custom constraint. The key of a custom constraint must match
// one of the "properties" in the "action" part of the "authorization" template
// (https://github.com/istio/istio/blob/master/mixer/template/authorization/template.proto).
type AccessRule_Constraint struct {
// Key of the constraint.
Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
// List of valid values for the constraint.
// Exact match, prefix match, and suffix match are supported for constraint values.
// For example, the value "v1alpha2" matches
// "v1alpha2" (exact match), or "v1*" (prefix match),
// or "*alpha2" (suffix match).
Values []string `protobuf:"bytes,2,rep,name=values" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AccessRule_Constraint) Reset() { *m = AccessRule_Constraint{} }
func (m *AccessRule_Constraint) String() string { return proto.CompactTextString(m) }
func (*AccessRule_Constraint) ProtoMessage() {}
func (*AccessRule_Constraint) Descriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{1, 0}
}
func (m *AccessRule_Constraint) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AccessRule_Constraint.Unmarshal(m, b)
}
func (m *AccessRule_Constraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AccessRule_Constraint.Marshal(b, m, deterministic)
}
func (dst *AccessRule_Constraint) XXX_Merge(src proto.Message) {
xxx_messageInfo_AccessRule_Constraint.Merge(dst, src)
}
func (m *AccessRule_Constraint) XXX_Size() int {
return xxx_messageInfo_AccessRule_Constraint.Size(m)
}
func (m *AccessRule_Constraint) XXX_DiscardUnknown() {
xxx_messageInfo_AccessRule_Constraint.DiscardUnknown(m)
}
var xxx_messageInfo_AccessRule_Constraint proto.InternalMessageInfo
func (m *AccessRule_Constraint) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *AccessRule_Constraint) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
// ServiceRoleBinding assigns a ServiceRole to a list of subjects.
// This represents the "Spec" part of the ServiceRoleBinding object. The name and namespace
// of the ServiceRoleBinding is specified in "metadata" section of the ServiceRoleBinding
// object.
type ServiceRoleBinding struct {
// Required. List of subjects that are assigned the ServiceRole object.
Subjects []*Subject `protobuf:"bytes,1,rep,name=subjects" json:"subjects,omitempty"`
// Required. Reference to the ServiceRole object.
RoleRef *RoleRef `protobuf:"bytes,2,opt,name=roleRef" json:"roleRef,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServiceRoleBinding) Reset() { *m = ServiceRoleBinding{} }
func (m *ServiceRoleBinding) String() string { return proto.CompactTextString(m) }
func (*ServiceRoleBinding) ProtoMessage() {}
func (*ServiceRoleBinding) Descriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{2}
}
func (m *ServiceRoleBinding) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServiceRoleBinding.Unmarshal(m, b)
}
func (m *ServiceRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServiceRoleBinding.Marshal(b, m, deterministic)
}
func (dst *ServiceRoleBinding) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceRoleBinding.Merge(dst, src)
}
func (m *ServiceRoleBinding) XXX_Size() int {
return xxx_messageInfo_ServiceRoleBinding.Size(m)
}
func (m *ServiceRoleBinding) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceRoleBinding.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceRoleBinding proto.InternalMessageInfo
func (m *ServiceRoleBinding) GetSubjects() []*Subject {
if m != nil {
return m.Subjects
}
return nil
}
func (m *ServiceRoleBinding) GetRoleRef() *RoleRef {
if m != nil {
return m.RoleRef
}
return nil
}
// Subject defines an identity or a group of identities. The identity is either a user or
// a group or identified by a set of "properties". The name of the "properties" must match
// the "properties" in the "subject" part of the "authorization" template
// (https://github.com/istio/istio/blob/master/mixer/template/authorization/template.proto).
type Subject struct {
// Optional. The user name/ID that the subject represents.
User string `protobuf:"bytes,1,opt,name=user" json:"user,omitempty"`
// Optional. The group that the subject belongs to.
Group string `protobuf:"bytes,2,opt,name=group" json:"group,omitempty"`
// Optional. The set of properties that identify the subject.
// In the above ServiceRoleBinding example, the second subject has two properties:
// service: "reviews"
// namespace: "abc"
Properties map[string]string `protobuf:"bytes,3,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Subject) Reset() { *m = Subject{} }
func (m *Subject) String() string { return proto.CompactTextString(m) }
func (*Subject) ProtoMessage() {}
func (*Subject) Descriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{3}
}
func (m *Subject) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Subject.Unmarshal(m, b)
}
func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Subject.Marshal(b, m, deterministic)
}
func (dst *Subject) XXX_Merge(src proto.Message) {
xxx_messageInfo_Subject.Merge(dst, src)
}
func (m *Subject) XXX_Size() int {
return xxx_messageInfo_Subject.Size(m)
}
func (m *Subject) XXX_DiscardUnknown() {
xxx_messageInfo_Subject.DiscardUnknown(m)
}
var xxx_messageInfo_Subject proto.InternalMessageInfo
func (m *Subject) GetUser() string {
if m != nil {
return m.User
}
return ""
}
func (m *Subject) GetGroup() string {
if m != nil {
return m.Group
}
return ""
}
func (m *Subject) GetProperties() map[string]string {
if m != nil {
return m.Properties
}
return nil
}
// RoleRef refers to a role object.
type RoleRef struct {
// Required. The type of the role being referenced.
// Currently, "ServiceRole" is the only supported value for "kind".
Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"`
// Required. The name of the ServiceRole object being referenced.
// The ServiceRole object must be in the same namespace as the ServiceRoleBinding
// object.
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RoleRef) Reset() { *m = RoleRef{} }
func (m *RoleRef) String() string { return proto.CompactTextString(m) }
func (*RoleRef) ProtoMessage() {}
func (*RoleRef) Descriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{4}
}
func (m *RoleRef) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RoleRef.Unmarshal(m, b)
}
func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RoleRef.Marshal(b, m, deterministic)
}
func (dst *RoleRef) XXX_Merge(src proto.Message) {
xxx_messageInfo_RoleRef.Merge(dst, src)
}
func (m *RoleRef) XXX_Size() int {
return xxx_messageInfo_RoleRef.Size(m)
}
func (m *RoleRef) XXX_DiscardUnknown() {
xxx_messageInfo_RoleRef.DiscardUnknown(m)
}
var xxx_messageInfo_RoleRef proto.InternalMessageInfo
func (m *RoleRef) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *RoleRef) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// RbacConfig defines the global config to control Istio RBAC behavior.
// This Custom Resource is a singleton where only one Custom Resource should be created globally in
// the mesh and the namespace should be the same to other Istio components, which usually is istio-system.
// Note: This is enforced in both istioctl and server side, new Custom Resource will be rejected if found any
// existing one, the user should either delete the existing one or change the existing one directly.
//
// Below is an example of RbacConfig object "istio-rbac-config" which enables Istio RBAC for all
// services in the default namespace.
//
// ```yaml
// apiVersion: "config.istio.io/v1alpha1"
// kind: RbacConfig
// metadata:
// name: istio-rbac-config
// namespace: istio-system
// spec:
// mode: ON_WITH_INCLUSION
// inclusion:
// namespaces: [ "default" ]
// ```
type RbacConfig struct {
// Istio RBAC mode.
Mode RbacConfig_Mode `protobuf:"varint,1,opt,name=mode,enum=istio.rbac.v1alpha1.RbacConfig_Mode" json:"mode,omitempty"`
// A list of services or namespaces that should be enforced by Istio RBAC policies. Note: This field have
// effect only when mode is ON_WITH_INCLUSION and will be ignored for any other modes.
Inclusion *RbacConfig_Target `protobuf:"bytes,2,opt,name=inclusion" json:"inclusion,omitempty"`
// A list of services or namespaces that should not be enforced by Istio RBAC policies. Note: This field have
// effect only when mode is ON_WITH_EXCLUSION and will be ignored for any other modes.
Exclusion *RbacConfig_Target `protobuf:"bytes,3,opt,name=exclusion" json:"exclusion,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RbacConfig) Reset() { *m = RbacConfig{} }
func (m *RbacConfig) String() string { return proto.CompactTextString(m) }
func (*RbacConfig) ProtoMessage() {}
func (*RbacConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{5}
}
func (m *RbacConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RbacConfig.Unmarshal(m, b)
}
func (m *RbacConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RbacConfig.Marshal(b, m, deterministic)
}
func (dst *RbacConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_RbacConfig.Merge(dst, src)
}
func (m *RbacConfig) XXX_Size() int {
return xxx_messageInfo_RbacConfig.Size(m)
}
func (m *RbacConfig) XXX_DiscardUnknown() {
xxx_messageInfo_RbacConfig.DiscardUnknown(m)
}
var xxx_messageInfo_RbacConfig proto.InternalMessageInfo
func (m *RbacConfig) GetMode() RbacConfig_Mode {
if m != nil {
return m.Mode
}
return RbacConfig_OFF
}
func (m *RbacConfig) GetInclusion() *RbacConfig_Target {
if m != nil {
return m.Inclusion
}
return nil
}
func (m *RbacConfig) GetExclusion() *RbacConfig_Target {
if m != nil {
return m.Exclusion
}
return nil
}
// Target defines a list of services or namespaces.
type RbacConfig_Target struct {
// A list of services.
Services []string `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"`
// A list of namespaces.
Namespaces []string `protobuf:"bytes,2,rep,name=namespaces" json:"namespaces,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RbacConfig_Target) Reset() { *m = RbacConfig_Target{} }
func (m *RbacConfig_Target) String() string { return proto.CompactTextString(m) }
func (*RbacConfig_Target) ProtoMessage() {}
func (*RbacConfig_Target) Descriptor() ([]byte, []int) {
return fileDescriptor_rbac_665e05e3b5f24d86, []int{5, 0}
}
func (m *RbacConfig_Target) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RbacConfig_Target.Unmarshal(m, b)
}
func (m *RbacConfig_Target) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RbacConfig_Target.Marshal(b, m, deterministic)
}
func (dst *RbacConfig_Target) XXX_Merge(src proto.Message) {
xxx_messageInfo_RbacConfig_Target.Merge(dst, src)
}
func (m *RbacConfig_Target) XXX_Size() int {
return xxx_messageInfo_RbacConfig_Target.Size(m)
}
func (m *RbacConfig_Target) XXX_DiscardUnknown() {
xxx_messageInfo_RbacConfig_Target.DiscardUnknown(m)
}
var xxx_messageInfo_RbacConfig_Target proto.InternalMessageInfo
func (m *RbacConfig_Target) GetServices() []string {
if m != nil {
return m.Services
}
return nil
}
func (m *RbacConfig_Target) GetNamespaces() []string {
if m != nil {
return m.Namespaces
}
return nil
}
func init() {
proto.RegisterType((*ServiceRole)(nil), "istio.rbac.v1alpha1.ServiceRole")
proto.RegisterType((*AccessRule)(nil), "istio.rbac.v1alpha1.AccessRule")
proto.RegisterType((*AccessRule_Constraint)(nil), "istio.rbac.v1alpha1.AccessRule.Constraint")
proto.RegisterType((*ServiceRoleBinding)(nil), "istio.rbac.v1alpha1.ServiceRoleBinding")
proto.RegisterType((*Subject)(nil), "istio.rbac.v1alpha1.Subject")
proto.RegisterMapType((map[string]string)(nil), "istio.rbac.v1alpha1.Subject.PropertiesEntry")
proto.RegisterType((*RoleRef)(nil), "istio.rbac.v1alpha1.RoleRef")
proto.RegisterType((*RbacConfig)(nil), "istio.rbac.v1alpha1.RbacConfig")
proto.RegisterType((*RbacConfig_Target)(nil), "istio.rbac.v1alpha1.RbacConfig.Target")
proto.RegisterEnum("istio.rbac.v1alpha1.RbacConfig_Mode", RbacConfig_Mode_name, RbacConfig_Mode_value)
}
func init() { proto.RegisterFile("rbac/v1alpha1/rbac.proto", fileDescriptor_rbac_665e05e3b5f24d86) }
var fileDescriptor_rbac_665e05e3b5f24d86 = []byte{
// 530 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xd1, 0x6a, 0x13, 0x41,
0x14, 0x75, 0x77, 0xd3, 0xa4, 0xb9, 0x01, 0x8d, 0x63, 0x95, 0x25, 0x14, 0x0d, 0x8b, 0x48, 0x11,
0xd9, 0x90, 0x88, 0xa5, 0x08, 0x3e, 0xd8, 0x24, 0xc5, 0x40, 0x4c, 0x64, 0x52, 0x51, 0x7c, 0x29,
0x93, 0xcd, 0x34, 0x19, 0xbb, 0xd9, 0x59, 0x66, 0x76, 0x83, 0xfd, 0x01, 0xbf, 0xcb, 0x2f, 0xd1,
0x5f, 0x91, 0x99, 0xc9, 0x6c, 0xa2, 0xc4, 0x16, 0xdf, 0xee, 0x3d, 0xf7, 0x9e, 0xc3, 0xb9, 0x87,
0xdd, 0x01, 0x5f, 0x4c, 0x49, 0xd4, 0x5a, 0xb5, 0x49, 0x9c, 0x2e, 0x48, 0xbb, 0xa5, 0xba, 0x30,
0x15, 0x3c, 0xe3, 0xe8, 0x01, 0x93, 0x19, 0xe3, 0xa1, 0x46, 0xec, 0x3c, 0xe8, 0x41, 0x6d, 0x42,
0xc5, 0x8a, 0x45, 0x14, 0xf3, 0x98, 0xa2, 0x57, 0xb0, 0x27, 0xf2, 0x98, 0x4a, 0xdf, 0x69, 0x7a,
0x47, 0xb5, 0xce, 0x93, 0x70, 0x07, 0x27, 0x7c, 0x1b, 0x45, 0x54, 0x4a, 0x9c, 0xc7, 0x14, 0x9b,
0xed, 0xe0, 0xa7, 0x03, 0xb0, 0x41, 0x51, 0x03, 0xf6, 0xa5, 0x11, 0x35, 0x42, 0x55, 0x5c, 0xf4,
0xe8, 0x00, 0xf6, 0x52, 0x92, 0x2d, 0xa4, 0xef, 0xea, 0x81, 0x69, 0x90, 0x0f, 0x95, 0x25, 0xcd,
0x16, 0x7c, 0x26, 0x7d, 0x4f, 0xe3, 0xb6, 0x45, 0x43, 0xa8, 0x45, 0x3c, 0x91, 0x99, 0x20, 0x2c,
0xc9, 0xa4, 0x5f, 0xd2, 0xbe, 0x9e, 0xdf, 0xe2, 0x2b, 0xec, 0x16, 0x14, 0xbc, 0x4d, 0x6f, 0x1c,
0x03, 0x6c, 0x46, 0xa8, 0x0e, 0xde, 0x15, 0xbd, 0xf6, 0x9d, 0xa6, 0x73, 0x54, 0xc5, 0xaa, 0x44,
0x8f, 0xa0, 0xbc, 0x22, 0x71, 0x4e, 0xad, 0xbd, 0x75, 0x17, 0x7c, 0x77, 0x00, 0x6d, 0xe5, 0x74,
0xca, 0x92, 0x19, 0x4b, 0xe6, 0xe8, 0x04, 0xf6, 0x65, 0x3e, 0xfd, 0x4a, 0xa3, 0xcc, 0x26, 0x76,
0xb8, 0xd3, 0xd9, 0xc4, 0x2c, 0xe1, 0x62, 0x1b, 0x1d, 0x43, 0x45, 0xf0, 0x98, 0x62, 0x7a, 0xe9,
0xbb, 0x4d, 0xe7, 0x9f, 0x44, 0x6c, 0x76, 0xb0, 0x5d, 0x0e, 0x7e, 0x38, 0x50, 0x59, 0xab, 0x21,
0x04, 0xa5, 0x5c, 0x52, 0xb1, 0xf6, 0xaf, 0x6b, 0x15, 0xef, 0x5c, 0xf0, 0x3c, 0xd5, 0xaa, 0x55,
0x6c, 0x1a, 0x34, 0x04, 0x48, 0x05, 0x4f, 0xa9, 0xc8, 0x18, 0x35, 0x09, 0xd7, 0x3a, 0x2f, 0x6e,
0x72, 0x1a, 0x7e, 0x28, 0xd6, 0xfb, 0x49, 0x26, 0xae, 0xf1, 0x16, 0xbf, 0xf1, 0x06, 0xee, 0xfd,
0x35, 0xde, 0x91, 0xe4, 0x01, 0xec, 0xe9, 0xec, 0xac, 0x11, 0xdd, 0xbc, 0x76, 0x4f, 0x9c, 0xa0,
0x0d, 0x95, 0xf5, 0x59, 0xea, 0x82, 0x2b, 0x96, 0xcc, 0xec, 0x05, 0xaa, 0x56, 0x58, 0x42, 0x96,
0x96, 0xa7, 0xeb, 0xe0, 0x97, 0x0b, 0x80, 0xa7, 0x24, 0xea, 0xf2, 0xe4, 0x92, 0xa9, 0xd8, 0x4b,
0x4b, 0x3e, 0xa3, 0x9a, 0x76, 0xb7, 0xf3, 0x74, 0x77, 0x72, 0xc5, 0x7a, 0xf8, 0x9e, 0xcf, 0x28,
0xd6, 0x0c, 0xd4, 0x83, 0x2a, 0x4b, 0xa2, 0x38, 0x97, 0x8c, 0x27, 0xeb, 0xe0, 0x9f, 0xdd, 0x46,
0x3f, 0x27, 0x62, 0x4e, 0x33, 0xbc, 0x21, 0x2a, 0x15, 0xfa, 0xcd, 0xaa, 0x78, 0xff, 0xa7, 0x52,
0x10, 0x1b, 0x3d, 0x28, 0x1b, 0xf0, 0xc6, 0xff, 0xe5, 0x31, 0x80, 0x8a, 0x40, 0xa6, 0x24, 0x2a,
0xbe, 0xca, 0x2d, 0x24, 0xe8, 0x43, 0x49, 0xdd, 0x87, 0x2a, 0xe0, 0x8d, 0xcf, 0xce, 0xea, 0x77,
0x50, 0x19, 0xdc, 0xf1, 0xa8, 0xee, 0xa0, 0x87, 0x70, 0x7f, 0x3c, 0xba, 0xf8, 0x34, 0x38, 0x7f,
0x77, 0x31, 0x18, 0x75, 0x87, 0x1f, 0x27, 0x83, 0xf1, 0xa8, 0xee, 0x6e, 0xc3, 0xfd, 0xcf, 0x16,
0xf6, 0x4e, 0x0f, 0xbf, 0x34, 0xcc, 0x01, 0x8c, 0xb7, 0x48, 0xca, 0x5a, 0x7f, 0xbc, 0x22, 0xd3,
0xb2, 0x7e, 0x41, 0x5e, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xa7, 0x9a, 0xb8, 0x5d, 0x04,
0x00, 0x00,
} | vendor/istio.io/api/rbac/v1alpha1/rbac.pb.go | 0.62601 | 0.674037 | rbac.pb.go | starcoder |
package ioc
// ComponentState represents what state (stopped, running) or transition between states (stopping, starting) a component is currently in.
type ComponentState int
const (
//StoppedState indicates that a component has stopped
StoppedState = iota
//StoppingState indicates that a component is in the process of stopping
StoppingState
//StartingState indicates that a component is in the process of starting
StartingState
//AwaitingAccessState indicates that a component is available for connections from external sources
AwaitingAccessState
//RunningState indicates that a component is running normally
RunningState
//SuspendingState indicates that a component is in the process of being suspended
SuspendingState
//SuspendedState indicates that a component has been suspended and is effectively paused
SuspendedState
//ResumingState indicates that a component in the process of being resumed from a suspended state
ResumingState
)
// ProtoComponents is a wrapping structure for a list of ProtoComponents and FrameworkDependencies that is required when starting Granitic.
// A ProtoComponents structure is built by the grnc-bind tool.
type ProtoComponents struct {
// ProtoComponents to be finalised and stored in the IoC container.
Components []*ProtoComponent
// FrameworkDependencies are instructions to inject components into built-in Granitic components to alter their behaviour.
// The structure is map[
FrameworkDependencies map[string]map[string]string
//A Base64 encoded version of the JSON files found in resource/facility-confg
FrameworkConfig *string
}
// Clear removes the reference to the ProtoComponent objects held in this object, encouraging garbage collection.
func (pc *ProtoComponents) Clear() {
pc.Components = nil
}
// NewProtoComponents creates a wrapping structure for a list of ProtoComponents
func NewProtoComponents(pc []*ProtoComponent, fd map[string]map[string]string, ser *string) *ProtoComponents {
p := new(ProtoComponents)
p.Components = pc
p.FrameworkDependencies = fd
p.FrameworkConfig = ser
return p
}
// CreateProtoComponent creates a new ProtoComponent.
func CreateProtoComponent(componentInstance interface{}, componentName string) *ProtoComponent {
proto := new(ProtoComponent)
component := new(Component)
component.Name = componentName
component.Instance = componentInstance
proto.Component = component
return proto
}
// A ProtoComponent is a partially configured component that will be hosted in the Granitic IoC container once
// it is fully configured. Typically ProtoComponents are created using the grnc-bind tool.
type ProtoComponent struct {
// The name of a component and the component instance (a pointer to an instantiated struct).
Component *Component
// A map of fields on the component instance and the names of other components that should be injected into those fields.
Dependencies map[string]string
// A map of fields on the component instance and the config-path that will contain the configuration that shoud be inject into the field.
ConfigPromises map[string]string
// A map of default values for fields if a config promise is not fulfilled
DefaultValues map[string]string
}
// AddDependency requests that the container injects another component into the specified field during the configure phase of
// container startup
func (pc *ProtoComponent) AddDependency(fieldName, componentName string) {
if pc.Dependencies == nil {
pc.Dependencies = make(map[string]string)
}
pc.Dependencies[fieldName] = componentName
}
// AddConfigPromise requests that the container injects the config value at the specified path into the specified field during the configure phase of
// container startup.
func (pc *ProtoComponent) AddConfigPromise(fieldName, configPath string) {
if pc.ConfigPromises == nil {
pc.ConfigPromises = make(map[string]string)
}
pc.ConfigPromises[fieldName] = configPath
}
// AddDefaultValue records an untyped default value to use if a config promise is not fulfilled
func (pc *ProtoComponent) AddDefaultValue(fieldName, value string) {
if pc.DefaultValues == nil {
pc.DefaultValues = make(map[string]string)
}
pc.DefaultValues[fieldName] = value
}
// HasDefaultValue returns true if a default value has been registered for the supplied field
func (pc *ProtoComponent) HasDefaultValue(fieldName string) bool {
return pc.DefaultValues != nil && pc.DefaultValues[fieldName] != ""
}
// DefaultValue returns the default value recorded for the supplied field or empty string if it hasn't been set
func (pc *ProtoComponent) DefaultValue(fieldName string) string {
if pc.DefaultValues == nil {
return ""
}
return pc.DefaultValues[fieldName]
}
// A Component is an instance of a struct with a name that is unique within your application.
type Component struct {
// A pointer to a struct
Instance interface{}
// A name for this component that is unique within your application
Name string
}
// Components is a type definition for a slice of components to allow sorting.
type Components []*Component
// Len returns the number of components in the slice
func (s Components) Len() int { return len(s) }
// Swap exchanges the position of the components at the specified indexes
func (s Components) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// ByName allows a slice of components to be sorted by name
type ByName struct{ Components }
// Less returns true if the the component at index i has a name the lexicographically proceeds that at j
func (s ByName) Less(i, j int) bool { return s.Components[i].Name < s.Components[j].Name }
// ComponentNamer is implemented by components where the component's instance needs to be aware of its own component name.
type ComponentNamer interface {
// ComponentName returns the name of the component
ComponentName() string
// SetComponentName injects the component's name
SetComponentName(name string)
}
// NewComponent creates a new Component with the supplied name and instance
func NewComponent(name string, instance interface{}) *Component {
c := new(Component)
c.Instance = instance
c.Name = name
return c
} | ioc/component.go | 0.737158 | 0.448728 | component.go | starcoder |
package nune
import (
"fmt"
"math"
"reflect"
"strings"
)
// String returns a string representation of the Tensor.
func (t Tensor[T]) String() string {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return "Tensor(error)"
}
}
template := "Tensor({})"
f := newFmtState(template, t)
s := strings.Replace(template, "{}", fmtTensor(t, f), 1)
return fmt.Sprintf("%s", s)
}
// fmtTensor formats the Tensor into a string.
func fmtTensor[T Number](t Tensor[T], s fmtState) string {
var b strings.Builder
if t.Rank() == 0 {
b.WriteString(fmtNum(t.Scalar(), s))
} else {
b.WriteString("[")
if t.Size(0) > FmtConfig.Excerpt {
b.WriteString(fmtExcerpted(t, s))
} else {
b.WriteString(fmtComplete(t, s))
}
b.WriteString("]")
}
return b.String()
}
// fmtNum formats a numeric type into a string.
func fmtNum[T Number](x T, s fmtState) string {
switch reflect.ValueOf(x).Kind() {
case reflect.Float32, reflect.Float64:
return fmt.Sprintf("%*.*f", s.width, FmtConfig.Precision, float64(x))
case reflect.Uint8:
if FmtConfig.Btoa {
return fmt.Sprintf("%s", string(byte(x)))
}
fallthrough
default:
return fmt.Sprintf("%*d", s.width, int64(x))
}
}
// fmtExcerpted formats an excerpted representation of
// a Tensor into a string.
func fmtExcerpted[T Number](t Tensor[T], s fmtState) string {
var b strings.Builder
var f string
f = fmtTensor(t.Slice(0, FmtConfig.Excerpt/2), s)
f = f[1 : len(f)-1]
b.WriteString(f)
if t.Rank() == 1 {
b.WriteString(", ..., ")
} else {
b.WriteString("\n")
b.WriteString(strings.Repeat(" ", s.pad+1))
b.WriteString("...,\n")
b.WriteString(strings.Repeat(" ", s.pad+1))
}
f = fmtTensor(t.Slice(t.Size(0)-FmtConfig.Excerpt/2, t.Size(0)), s)
f = f[1 : len(f)-1]
b.WriteString(f)
return b.String()
}
// fmtComplete formats a complete representation of
// a Tensor into a string.
func fmtComplete[T Number](t Tensor[T], s fmtState) string {
var b strings.Builder
for i := 0; i < t.Size(0); i++ {
if t.Rank() == 1 {
b.WriteString(fmtTensor(t.Index(i), s))
if i < t.Size(0)-1 {
b.WriteString(", ")
}
} else {
b.WriteString(fmtTensor(t.Index(i), s.update()))
if i < t.Size(0)-1 {
b.WriteString(strings.Repeat("\n", s.esc))
b.WriteString(strings.Repeat(" ", s.pad+1))
}
}
}
return b.String()
}
// A fmtState holds the format configurations while formatting a Tensor.
type fmtState struct {
depth, esc, pad, width int
}
// update prepares all the fmtState configurations for the next format call.
func (f fmtState) update() fmtState {
f.depth += 1
f.esc -= 1
f.pad += 1
return f
}
// newFmtState returns a new fmtState configured to
// a base Tensor representation.
func newFmtState[T Number](fmt string, t Tensor[T]) fmtState {
s := fmtState{
depth: 0,
esc: t.Rank() - 1,
}
s.pad = cfgPad(fmt)
s.width = cfgWidth(t)
return s
}
// cfgPad configures the padding from a base Tensor representation.
func cfgPad(s string) int {
return len(strings.Split(s, "{}")[0])
}
// cfgWidth configures the numeric types' width from a given Tensor.
func cfgWidth[T Number](t Tensor[T]) int {
// find min and max numbers
var min, max T = t.Min().Scalar(), t.Max().Scalar()
// set x to min OR max, whichever has more numbers
x := T(math.Max(math.Abs(float64(min)), math.Abs(float64(max))))
var l int
switch reflect.ValueOf(x).Kind() {
case reflect.Float32, reflect.Float64:
l = len(fmt.Sprintf("%.*f", FmtConfig.Precision, float64(x)))
case reflect.Uint8:
if FmtConfig.Btoa {
l = 1
}
fallthrough
default:
l = len(fmt.Sprintf("%d", int64(x)))
}
if min < 0 {
l++
}
return l
} | string.go | 0.731346 | 0.497742 | string.go | starcoder |
package twistededwards
import (
"math/bits"
"github.com/consensys/gurvy/bn256/fr"
)
// Point point on a twisted Edwards curve
type Point struct {
X, Y fr.Element
}
// PointProj point in projective coordinates
type PointProj struct {
X, Y, Z fr.Element
}
// Set sets p to p1 and return it
func (p *PointProj) Set(p1 *PointProj) *PointProj {
p.X.Set(&p1.X)
p.Y.Set(&p1.Y)
p.Z.Set(&p1.Z)
return p
}
// NewPoint creates a new instance of Point
func NewPoint(x, y fr.Element) Point {
return Point{x, y}
}
// IsOnCurve checks if a point is on the twisted Edwards curve
func (p *Point) IsOnCurve() bool {
ecurve := GetEdwardsCurve()
var lhs, rhs, tmp fr.Element
tmp.Mul(&p.Y, &p.Y)
lhs.Mul(&p.X, &p.X).
Mul(&lhs, &ecurve.A).
Add(&lhs, &tmp)
tmp.Mul(&p.X, &p.X).
Mul(&tmp, &p.Y).
Mul(&tmp, &p.Y).
Mul(&tmp, &ecurve.D)
rhs.SetOne().Add(&rhs, &tmp)
// TODO why do we not compare lhs and rhs directly?
lhsreg := lhs.ToRegular()
rhsreg := rhs.ToRegular()
return rhsreg.Equal(&lhsreg)
}
// Add adds two points (x,y), (u,v) on a twisted Edwards curve with parameters a, d
// modifies p
func (p *Point) Add(p1, p2 *Point) *Point {
ecurve := GetEdwardsCurve()
var xu, yv, xv, yu, dxyuv, one, denx, deny fr.Element
pRes := new(Point)
xv.Mul(&p1.X, &p2.Y)
yu.Mul(&p1.Y, &p2.X)
pRes.X.Add(&xv, &yu)
xu.Mul(&p1.X, &p2.X).Mul(&xu, &ecurve.A)
yv.Mul(&p1.Y, &p2.Y)
pRes.Y.Sub(&yv, &xu)
dxyuv.Mul(&xv, &yu).Mul(&dxyuv, &ecurve.D)
one.SetOne()
denx.Add(&one, &dxyuv)
deny.Sub(&one, &dxyuv)
p.X.Div(&pRes.X, &denx)
p.Y.Div(&pRes.Y, &deny)
return p
}
// Double doubles point (x,y) on a twisted Edwards curve with parameters a, d
// modifies p
func (p *Point) Double(p1 *Point) *Point {
p.Add(p1, p1)
return p
}
// FromProj sets p in affine from p in projective
func (p *Point) FromProj(p1 *PointProj) *Point {
p.X.Div(&p1.X, &p1.Z)
p.Y.Div(&p1.Y, &p1.Z)
return p
}
// FromAffine sets p in projective from p in affine
func (p *PointProj) FromAffine(p1 *Point) *PointProj {
p.X.Set(&p1.X)
p.Y.Set(&p1.Y)
p.Z.SetOne()
return p
}
// Add adds points in projective coordinates
// cf https://hyperelliptic.org/EFD/g1p/auto-twisted-projective.html
func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
var res PointProj
ecurve := GetEdwardsCurve()
var A, B, C, D, E, F, G, H, I fr.Element
A.Mul(&p1.Z, &p2.Z)
B.Square(&A)
C.Mul(&p1.X, &p2.X)
D.Mul(&p1.Y, &p2.Y)
E.Mul(&ecurve.D, &C).Mul(&E, &D)
F.Sub(&B, &E)
G.Add(&B, &E)
H.Add(&p1.X, &p1.Y)
I.Add(&p2.X, &p2.Y)
res.X.Mul(&H, &I).
Sub(&res.X, &C).
Sub(&res.X, &D).
Mul(&res.X, &p1.Z).
Mul(&res.X, &F)
H.Mul(&ecurve.A, &C)
res.Y.Sub(&D, &H).
Mul(&res.Y, &p.Z).
Mul(&res.Y, &G)
res.Z.Mul(&F, &G)
p.Set(&res)
return p
}
// Double adds points in projective coordinates
// cf https://hyperelliptic.org/EFD/g1p/auto-twisted-projective.html
func (p *PointProj) Double(p1 *PointProj) *PointProj {
var res PointProj
ecurve := GetEdwardsCurve()
var B, C, D, E, F, H, J, tmp fr.Element
B.Add(&p1.X, &p1.Y).Square(&B)
C.Square(&p1.X)
D.Square(&p1.Y)
E.Mul(&ecurve.A, &C)
F.Add(&E, &D)
H.Square(&p1.Z)
tmp.Double(&H)
J.Sub(&F, &tmp)
res.X.Sub(&B, &C).
Sub(&res.X, &D).
Mul(&res.X, &J)
res.Y.Sub(&E, &D).Mul(&res.Y, &F)
res.Z.Mul(&F, &J)
p.Set(&res)
return p
}
// ScalarMul scalar multiplication of a point
// p1 points on the twisted Edwards curve
// c parameters of the twisted Edwards curve
// scal scalar NOT in Montgomery form
// modifies p
func (p *Point) ScalarMul(p1 *Point, scalar fr.Element) *Point {
var resProj, p1Proj PointProj
resProj.X.SetZero()
resProj.Y.SetOne()
resProj.Z.SetOne()
p1Proj.FromAffine(p1)
const wordSize = bits.UintSize
for i := 4 - 1; i >= 0; i-- {
for j := 0; j < wordSize; j++ {
resProj.Double(&resProj)
b := (scalar[i] & (uint64(1) << uint64(wordSize-1-j))) >> uint64(wordSize-1-j)
if b == 1 {
resProj.Add(&resProj, &p1Proj)
}
}
}
p.FromProj(&resProj)
return p
} | bn256/twistededwards/point.go | 0.677687 | 0.412589 | point.go | starcoder |
package calc
import (
"bytes"
"errors"
"math/big"
)
// associativity of an operator.
type associativity int
// associativity values.
const (
leftassociative associativity = iota
rightassociative
)
// operator is a binary operator.
type operator struct {
precedence int
associativity associativity
apply func(*big.Int, *big.Int, *big.Int) *big.Int
}
// operators supported by the calculator.
var operators = map[byte]operator{
'^': {precedence: 3, associativity: rightassociative, apply: func(z, x, y *big.Int) *big.Int { return z.Exp(x, y, nil) }},
'*': {precedence: 3, associativity: leftassociative, apply: (*big.Int).Mul},
'/': {precedence: 3, associativity: leftassociative, apply: (*big.Int).Div},
'+': {precedence: 2, associativity: leftassociative, apply: (*big.Int).Add},
'-': {precedence: 2, associativity: leftassociative, apply: (*big.Int).Sub},
}
// yard implements the "shunting yard" algorithm.
type yard struct {
operands []*big.Int
operators []operator
}
// operand pushes a new operand x.
func (y *yard) operand(x *big.Int) {
y.operands = append(y.operands, x)
}
// operator pushes a new operator.
func (y *yard) operator(op operator) error {
// Pop higher precedence operators.
for len(y.operators) > 0 {
top := y.peek()
if top.precedence < op.precedence || (top.precedence == op.precedence && op.associativity != leftassociative) {
break
}
if err := y.apply(top); err != nil {
return err
}
y.pop()
}
// Push operator on the stack.
y.operators = append(y.operators, op)
return nil
}
// apply operator to the operand stack.
func (y *yard) apply(op operator) error {
n := len(y.operands)
if n < 2 {
return errors.New("too few operands")
}
z := new(big.Int)
op.apply(z, y.operands[n-2], y.operands[n-1])
y.operands = append(y.operands[:n-2], z)
return nil
}
// result finalizes the evaluation and returns the result.
func (y *yard) result() (*big.Int, error) {
for len(y.operators) > 0 {
if err := y.apply(y.pop()); err != nil {
return nil, err
}
}
if len(y.operands) != 1 {
return nil, errors.New("wrong operand count")
}
return y.operands[0], nil
}
// peek returns the operator at the top of the stack.
func (y *yard) peek() operator {
return y.operators[len(y.operators)-1]
}
// pop removes and returns the operator at the top of the stack.
func (y *yard) pop() operator {
top := len(y.operators) - 1
op := y.operators[top]
y.operators = y.operators[:top]
return op
}
// Eval evaluates the arithmetic expression.
func Eval(expr string) (*big.Int, error) {
b := []byte(expr)
y := &yard{}
operand := true
for len(b) > 0 {
// Skip this character?
if skip(b[0]) {
b = b[1:]
continue
}
// Expect an operand.
if operand {
x, rest, err := number(b)
if err != nil {
return nil, err
}
y.operand(x)
b = rest
operand = false
continue
}
// Expect an operator.
op, ok := operators[b[0]]
if !ok {
return nil, errors.New("expected operator")
}
if err := y.operator(op); err != nil {
return nil, err
}
b = b[1:]
operand = true
}
return y.result()
}
// number parses a number.
func number(b []byte) (*big.Int, []byte, error) {
// Find the end.
i := 0
if len(b) > 0 && b[0] == '-' {
i++
}
isdigit := isdecimal
switch {
case bytes.HasPrefix(b[i:], []byte("0b")):
isdigit = isbinary
i += 2
case bytes.HasPrefix(b[i:], []byte("0x")):
isdigit = ishex
i += 2
}
for ; i < len(b) && isdigit(b[i]); i++ {
}
// Parse.
x, ok := new(big.Int).SetString(string(b[:i]), 0)
if !ok {
return nil, nil, errors.New("expected number")
}
return x, b[i:], nil
}
// skip reports whether b should be skipped.
func skip(b byte) bool {
return b == ' '
}
// isdecimal reports whether b is a decimal digit.
func isdecimal(b byte) bool {
return '0' <= b && b <= '9'
}
// ishex reports whether b is a hex digit.
func ishex(b byte) bool {
return isdecimal(b) || ('a' <= b && b <= 'f')
}
// isbinary reports whether b is a binary digit.
func isbinary(b byte) bool {
return b == '0' || b == '1'
} | internal/calc/calc.go | 0.763131 | 0.5144 | calc.go | starcoder |
package climate
import (
"context"
"github.com/ironarachne/world/pkg/geography/region"
"github.com/ironarachne/world/pkg/geometry"
)
// Climate is a geographic climate
type Climate struct {
CloudCover int `json:"cloud_cover"` // 0-99
WindStrength int `json:"wind_strength"` // 0-99
WindDirection int `json:"wind_direction"` // 0-7
PrecipitationAmount int `json:"precipitation_amount"` // 0-99
PrecipitationFrequency int `json:"precipitation_frequency"` // 0-99
PrecipitationType string `json:"precipitation_type"` // 0-99
}
// DescribeClouds gives a textual description for the clouds
func (c Climate) DescribeClouds() string {
var description string
if c.CloudCover < 10 {
description = "no clouds"
return description
} else if c.CloudCover < 30 {
description = "few clouds"
} else if c.CloudCover < 50 {
description = "some clouds"
} else if c.CloudCover < 70 {
description = "many clouds"
} else {
description = "frequently overcast skies"
}
return description
}
// Generate procedurally generates a geographic climate based on a region.
func Generate(ctx context.Context, r region.Region) Climate {
c := Climate{}
// cloud cover increases further away from mountains
// cloud cover increases if mountains are to the east
// cloud cover increases with temperature
// cloud cover increases with wind strength
// wind strength increases with temperature
// wind strength increases closer to mountains
// wind moves away from mountains
// wind slows down going uphill
// wind speeds up going downhill
c.WindDirection = geometry.OppositeDirection(r.NearestMountainsDirection)
c.WindStrength = getWindStrength(r.NearestMountainsDistance, r.NearestOceanDistance)
c.CloudCover = getCloudCover(r.Temperature, c.WindStrength, r.NearestMountainsDistance)
c.PrecipitationAmount = getPrecipitationAmount(r.Temperature, r.Humidity)
c.PrecipitationFrequency = getPrecipitationFrequency(c.CloudCover, c.PrecipitationAmount)
c.PrecipitationType = getPrecipitationType(r.Temperature)
return c
}
func getCloudCover(temperature int, windStrength int, mountainDistance int) int {
cloudCover := (temperature / 3) + (windStrength / 3) + (mountainDistance / 2)
if cloudCover > 99 {
cloudCover = 99
}
return cloudCover
}
func getPrecipitationAmount(temperature int, humidity int) int {
amount := (temperature / 2) + int(float64(humidity)*0.7)
if amount > 99 {
amount = 99
}
return amount
}
func getPrecipitationFrequency(cloudCover int, amount int) int {
frequency := (cloudCover / 3) + (amount / 3)
return frequency
}
func getPrecipitationType(temperature int) string {
if temperature < 30 {
return "snow"
}
return "rain"
}
func getWindStrength(mountainDistance int, oceanDistance int) int {
windStrength := (mountainDistance / 2) + (oceanDistance / 4)
if windStrength > 99 {
windStrength = 99
}
return windStrength
} | pkg/geography/climate/climate.go | 0.880142 | 0.400661 | climate.go | starcoder |
package matcher
import (
"errors"
"fmt"
"reflect"
"runtime/debug"
"github.com/onsi/gomega/format"
errorsutil "github.com/onsi/gomega/gstruct/errors"
"github.com/onsi/gomega/types"
)
// Simplify element matcher
// See https://github.com/onsi/gomega/blob/master/gstruct/elements.go
// MatchSlice succeeds if every element of a slice matches the element matcher it maps to through the id function, and every element matcher is matched.
func MatchSlice(elements Elements) types.GomegaMatcher {
m := &SliceMatcher{
Elements: elements,
}
return m
}
// SliceMatcher is a NestingMatcher that applies custom matchers to each element of a slice mapped
// by the Identifier function.
// TODO: Extend this to work with arrays & maps (map the key) as well.
type SliceMatcher struct {
// Matchers for each element.
Elements Elements
// State.
failures []error
}
// Elements ID to matcher.
type Elements []types.GomegaMatcher
// Match implements gomega.Matcher
func (m *SliceMatcher) Match(actual interface{}) (success bool, err error) {
if reflect.TypeOf(actual).Kind() != reflect.Slice {
return false, fmt.Errorf("%v is type %T, expected slice", actual, actual)
}
m.failures = m.matchElements(actual)
if len(m.failures) > 0 {
return false, nil
}
return true, nil
}
func (m *SliceMatcher) matchElements(actual interface{}) (errs []error) {
// Provide more useful error messages in the case of a panic.
defer func() {
if err := recover(); err != nil {
errs = append(errs, fmt.Errorf("panic checking %+v: %v\n%s", actual, err, debug.Stack()))
}
}()
val := reflect.ValueOf(actual)
length := val.Len()
if len(m.Elements) != length {
errs = append(errs, fmt.Errorf("unexpected slice length, expected: %v, actual: %v", len(m.Elements), length))
return errs
}
for i := 0; i < length; i++ {
element := val.Index(i).Interface()
matcher := m.Elements[i]
match, err := matcher.Match(element)
if match {
continue
}
if err == nil {
if nesting, ok := matcher.(errorsutil.NestingMatcher); ok {
err = errorsutil.AggregateError(nesting.Failures())
} else {
err = errors.New(matcher.FailureMessage(element))
}
}
errs = append(errs, errorsutil.Nest(fmt.Sprintf("[%v]", i), err))
}
return errs
}
// FailureMessage implements types.GomegaMatcher
func (m *SliceMatcher) FailureMessage(actual interface{}) (message string) {
failure := errorsutil.AggregateError(m.failures)
return format.Message(actual, fmt.Sprintf("to match elements: %v", failure))
}
// NegatedFailureMessage implements types.GomegaMatcher
func (m *SliceMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to match elements")
}
// Failures returns failures of matcher
func (m *SliceMatcher) Failures() []error {
return m.failures
} | matcher/slice.go | 0.69451 | 0.401453 | slice.go | starcoder |
package vibe
import (
"github.com/spf13/cast"
"time"
)
//GetString returns the value associated with the key as a string.
func GetString(key string) string { return v.GetString(key) }
func (v *Vibe) GetString(key string) string {
return cast.ToString(v.Get(key))
}
//GetBool returns the value associated with the key as a boolean.
func GetBool(key string) bool { return v.GetBool(key) }
func (v *Vibe) GetBool(key string) bool {
return cast.ToBool(v.Get(key))
}
//GetInt returns the value associated with the key as an integer.
func GetInt(key string) int { return v.GetInt(key) }
func (v *Vibe) GetInt(key string) int {
return cast.ToInt(v.Get(key))
}
//GetInt32 returns the value associated with the key as an integer.
func GetInt32(key string) int32 { return v.GetInt32(key) }
func (v *Vibe) GetInt32(key string) int32 {
return cast.ToInt32(v.Get(key))
}
//GetInt64 returns the value associated with the key as an integer.
func GetInt64(key string) int64 { return v.GetInt64(key) }
func (v *Vibe) GetInt64(key string) int64 {
return cast.ToInt64(v.Get(key))
}
//GetUint returns the value associated with the key as an unsigned integer.
func GetUint(key string) uint { return v.GetUint(key) }
func (v *Vibe) GetUint(key string) uint {
return cast.ToUint(v.Get(key))
}
//GetUint32 returns the value associated with the key as an unsigned integer.
func GetUint32(key string) uint32 {
return v.GetUint32(key)
}
func (v *Vibe) GetUint32(key string) uint32 {
return cast.ToUint32(v.Get(key))
}
//GetUint64 returns the value associated with the key as an unsigned integer.
func GetUint64(key string) uint64 {
return v.GetUint64(key)
}
func (v *Vibe) GetUint64(key string) uint64 {
return cast.ToUint64(v.Get(key))
}
//GetFloat64 returns the value associated with the key as a float64.
func GetFloat64(key string) float64 {
return v.GetFloat64(key)
}
func (v *Vibe) GetFloat64(key string) float64 {
return cast.ToFloat64(v.Get(key))
}
//GetTime returns the value associated with the key as time.
func GetTime(key string) time.Time {
return v.GetTime(key)
}
func (v *Vibe) GetTime(key string) time.Time {
return cast.ToTime(v.Get(key))
} | get.go | 0.84858 | 0.417153 | get.go | starcoder |
package render
import (
"image"
"image/draw"
"github.com/oakmound/oak/v3/alg/floatgeom"
"github.com/oakmound/oak/v3/render/mod"
)
// CompositeM Types display all of their parts at the same time,
// and respect the positions of their parts as relative to the
// position of the composite itself
type CompositeM struct {
LayeredPoint
rs []Modifiable
}
// NewCompositeM creates a CompositeM
func NewCompositeM(sl ...Modifiable) *CompositeM {
cs := new(CompositeM)
cs.LayeredPoint = NewLayeredPoint(0, 0, 0)
cs.rs = sl
return cs
}
// AppendOffset adds a new offset modifiable to the CompositeM
func (cs *CompositeM) AppendOffset(r Modifiable, p floatgeom.Point2) {
r.SetPos(p.X(), p.Y())
cs.Append(r)
}
// Append adds a renderable as is to the CompositeM
func (cs *CompositeM) Append(r Modifiable) {
cs.rs = append(cs.rs, r)
}
// Prepend adds a new renderable to the front of the CompositeMR.
func (cs *CompositeM) Prepend(r Modifiable) {
cs.rs = append([]Modifiable{r}, cs.rs...)
}
// SetIndex places a renderable at a certain point in the CompositeMs renderable slice
func (cs *CompositeM) SetIndex(i int, r Modifiable) {
cs.rs[i] = r
}
// Slice creates a new CompositeM as a subslice of the existing CompositeM.
// No Modifiables will be copied, and the original will not be modified.
func (cs *CompositeM) Slice(start, end int) *CompositeM {
if start < 0 {
start = 0
}
if end > len(cs.rs) {
end = len(cs.rs)
}
newRs := cs.rs[start:end]
return &CompositeM{
LayeredPoint: cs.LayeredPoint.Copy(),
rs: newRs,
}
}
// Len returns the number of renderables in this CompositeM.
func (cs *CompositeM) Len() int {
return len(cs.rs)
}
// AddOffset offsets all renderables in the CompositeM by a vector
func (cs *CompositeM) AddOffset(i int, p floatgeom.Point2) {
if i < len(cs.rs) {
cs.rs[i].SetPos(p.X(), p.Y())
}
}
// SetOffsets applies the initial offsets to the entire CompositeM
func (cs *CompositeM) SetOffsets(vs ...floatgeom.Point2) {
for i, v := range vs {
if i < len(cs.rs) {
cs.rs[i].SetPos(v.X(), v.Y())
}
}
}
// Get returns a renderable at the given index within the CompositeM
func (cs *CompositeM) Get(i int) Modifiable {
return cs.rs[i]
}
// Draw draws the CompositeM with some offset from its logical position
// (and therefore sub renderables logical positions).
func (cs *CompositeM) Draw(buff draw.Image, xOff, yOff float64) {
for _, c := range cs.rs {
c.Draw(buff, cs.X()+xOff, cs.Y()+yOff)
}
}
// Undraw stops the CompositeM from being drawn
func (cs *CompositeM) Undraw() {
cs.layer = Undraw
for _, c := range cs.rs {
c.Undraw()
}
}
// GetRGBA always returns nil from Composites
func (cs *CompositeM) GetRGBA() *image.RGBA {
return nil
}
// Modify applies mods to the CompositeM
func (cs *CompositeM) Modify(ms ...mod.Mod) Modifiable {
for _, r := range cs.rs {
r.Modify(ms...)
}
return cs
}
// Filter filters each component part of this CompositeM by all of the inputs.
func (cs *CompositeM) Filter(fs ...mod.Filter) {
for _, r := range cs.rs {
r.Filter(fs...)
}
}
// ToSprite converts the composite into a sprite by drawing each layer in order
// and overwriting lower layered pixels
func (cs *CompositeM) ToSprite() *Sprite {
var maxW, maxH int
for _, r := range cs.rs {
x, y := int(r.X()), int(r.Y())
w, h := r.GetDims()
if x+w > maxW {
maxW = x + w
}
if y+h > maxH {
maxH = y + h
}
}
sp := NewEmptySprite(cs.X(), cs.Y(), maxW, maxH)
for _, r := range cs.rs {
r.Draw(sp, 0, 0)
}
return sp
}
// Copy makes a new CompositeM with the same renderables
func (cs *CompositeM) Copy() Modifiable {
cs2 := new(CompositeM)
cs2.layer = cs.layer
cs2.Vector = cs.Vector.Copy()
cs2.rs = make([]Modifiable, len(cs.rs))
for i, v := range cs.rs {
cs2.rs[i] = v.Copy()
}
return cs2
} | render/compositeM.go | 0.737914 | 0.476823 | compositeM.go | starcoder |
package espressopp
import (
"io"
"github.com/alecthomas/participle"
"github.com/alecthomas/participle/lexer"
"github.com/alecthomas/participle/lexer/ebnf"
"github.com/alecthomas/repr"
)
type Term struct {
Identifier *string ` @Ident`
Integer *int `| @Int`
Decimal *float64 `| @Float`
String *string `| @String`
Date *string `| @Date`
Time *string `| @Time`
DateTime *string `| @DateTime`
Bool *string `| @Bool`
Macro *Macro `| @@`
}
type Macro struct {
Name string `@Macro`
Args []*Term `("(" (@@ ("," @@)*)? ")")?`
}
type Math struct {
Term1 *Term `@@`
Op string `@("add" | "sub" | "mul" | "div")`
Term2 *Term `@@`
}
type TermOrMath struct {
Math *Math ` @@`
SubMath *Math `| "(" @@ ")"`
Term *Term `| @@`
}
type Equality struct {
TermOrMath1 *TermOrMath `@@`
Op string `@("eq" | "neq")`
TermOrMath2 *TermOrMath `@@`
}
type Comparison struct {
TermOrMath1 *TermOrMath `@@`
Op string `@("gt" | "gte" | "lt" | "lte")`
TermOrMath2 *TermOrMath `@@`
}
type Range struct {
TermOrMath1 *TermOrMath `@@`
Between string `@("between")`
TermOrMath2 *TermOrMath `@@`
And string `@("and")`
TermOrMath3 *TermOrMath `@@`
}
type Match struct {
Term1 *Term `@@`
Op string `@("startswith" | "endswith" | "contains")`
Term2 *Term `@@`
}
type Is struct {
IsWithExplicitValue *IsWithExplicitValue ` @@`
IsWithImplicitValue *IsWithImplicitValue `| @@`
}
type IsWithExplicitValue struct {
Ident string `@Ident`
Not bool `"is" @("not")?`
Value string `@("true" | "false" | "null")`
}
type IsWithImplicitValue struct {
Not bool `"is" @("not")?`
Ident string `@Ident`
}
type SubExpression struct {
Not bool `@("not")?`
Expressions []*Expression `"(" @@+ ")"`
}
type Expression struct {
Op *string ` @("and" | "or")`
SubExpression *SubExpression `| @@`
Comparison *Comparison `| @@`
Equality *Equality `| @@`
Range *Range `| @@`
Match *Match `| @@`
Is *Is `| @@`
}
// Grammar is the set of structural rules that govern the composition of an
// Espesso++ expression.
type Grammar struct {
Expressions []*Expression `@@+`
}
// parser is the part of an interpreter that attaches meaning by classifying strings
// of tokens from the input Espresso++ expression as particular non-terminals
// and by building the parse tree.
type parser struct {
espressoppParser *participle.Parser
}
var (
espressoppLexer = lexer.Must(ebnf.New(`
Comment = "//" { "\u0000"…"\uffff"-"\n" } .
Date = "\"" date "\"" | "'" date "'" .
Time = "\"" time "\"" | "'" time "'" .
DateTime = "\"" date "T" time [ "+" digit digit ] "\"" | "'" date "T" time [ "+" digit digit ] "'" .
Bool = "true" | "false" .
Ident = ident .
Macro = "#" ident .
String = "\"" { "\u0000"…"\uffff"-"\""-"\\" | "\\" any } "\"" | "'" { "\u0000"…"\uffff"-"'"-"\\" | "\\" any } "'" .
Int = [ "-" | "+" ] digit { digit } .
Float = ("." | digit) {"." | digit} .
Punct = "!"…"/" | ":"…"@" | "["…` + "\"`\"" + ` | "{"…"~" .
Whitespace = " " | "\t" | "\n" | "\r" .
alpha = "a"…"z" | "A"…"Z" .
digit = "0"…"9" .
any = "\u0000"…"\uffff" .
ident = (alpha | "_") { "_" | alpha | digit } .
date = digit digit digit digit "-" digit digit "-" digit digit .
time = digit digit ":" digit digit ":" digit digit [ "." { digit } ] .
`))
)
// newParser creates a new instance of parser.
func newParser() *parser {
return &parser{
espressoppParser: participle.MustBuild(&Grammar{},
participle.Lexer(espressoppLexer),
participle.Unquote("String", "Date", "Time", "DateTime"),
participle.Elide("Whitespace", "Comment"),
participle.UseLookahead(2)),
}
}
// parse parses the Espresso++ expressions in r and returns the resulting grammar.
func (p *parser) parse(r io.Reader) (*Grammar, error) {
grammar := &Grammar{}
err := p.espressoppParser.Parse(r, grammar)
return grammar, err
}
// string returns a string representation of g.
func (p *parser) string(g *Grammar) string {
return repr.String(g, repr.Hide(&lexer.Position{}))
} | parser.go | 0.671794 | 0.406509 | parser.go | starcoder |
// Package kmeans implements Lloyd's k-means clustering for ℝⁿ data.
package kmeans
import (
"code.google.com/p/biogo.cluster/cluster"
"errors"
"math/rand"
)
type point []float64
func (p point) V() []float64 { return p }
type value struct {
point
w float64
cluster int
}
func (v *value) Weight() float64 { return v.w }
func (v *value) Cluster() int { return v.cluster }
type center struct {
point
w float64
count int
indices cluster.Indices
}
func (c *center) zero() {
p := c.point
for i := range p {
p[i] = 0
}
*c = center{point: p}
}
func (c *center) Members() cluster.Indices { return c.indices }
// Kmeans implements clustering of ℝⁿ data according to the Lloyd k-means algorithm.
type Kmeans struct {
dims int
values []value
means []center
}
// New creates a new k-means object populated with data from an Interface value, data.
func New(data cluster.Interface) (*Kmeans, error) {
v, d, err := convert(data)
if err != nil {
return nil, err
}
return &Kmeans{
dims: d,
values: v,
}, nil
}
// convert renders data to the internal float64 representation for a Kmeans.
func convert(data cluster.Interface) ([]value, int, error) {
va := make([]value, data.Len())
if data.Len() == 0 {
return nil, 0, errors.New("kmeans: no data")
}
dim := len(data.Values(0))
for i := 0; i < data.Len(); i++ {
vec := data.Values(i)
if len(vec) != dim {
return nil, 0, errors.New("kmeans: mismatched dimensions")
}
va[i] = value{point: append(point(nil), vec...)}
}
if w, ok := data.(cluster.Weighter); ok {
for i := 0; i < data.Len(); i++ {
va[i].w = w.Weight(i)
}
} else {
for i := 0; i < data.Len(); i++ {
va[i].w = 1
}
}
return va, dim, nil
}
// Seed generates the initial means for the k-means algorithm according to the k-means++
// algorithm
func (km *Kmeans) Seed(k int) {
km.means = make([]center, k)
for i := range km.means {
km.means[i].point = make(point, km.dims)
}
copy(km.means[0].point, km.values[rand.Intn(len(km.values))].point)
if k == 1 {
return
}
d := make([]float64, len(km.values))
for i := 1; i < k; i++ {
sum := 0.
for j, v := range km.values {
_, min := km.nearest(v.point)
d[j] = min
sum += d[j]
}
target := rand.Float64() * sum
j := 0
for sum = d[0]; sum < target; sum += d[j] {
j++
}
copy(km.means[i].point, km.values[j].point)
}
}
// SetCenters sets the locations of the centers to c.
func (km *Kmeans) SetCenters(c []cluster.Center) {
km.means = make([]center, len(c))
for i, cv := range c {
km.means[i] = center{point: append(point(nil), cv.V()...)}
}
}
// Find the nearest center to the point v. Returns c, the index of the nearest center
// and min, the square of the distance from v to that center.
func (km *Kmeans) nearest(v point) (c int, min float64) {
var ad float64
for j := range v {
ad = v[j] - km.means[0].point[j]
min += ad * ad
}
for i := 1; i < len(km.means); i++ {
var d float64
for j := range v {
ad = v[j] - km.means[i].point[j]
d += ad * ad
}
if d < min {
min = d
c = i
}
}
return c, min
}
// Cluster runs a clustering of the data using the k-means algorithm.
func (km *Kmeans) Cluster() error {
if len(km.means) == 0 {
return errors.New("kmeans: no centers")
}
for i, v := range km.values {
n, _ := km.nearest(v.point)
km.values[i].cluster = n
}
for {
for i := range km.means {
km.means[i].zero()
}
for _, v := range km.values {
for j := range km.means[v.cluster].point {
km.means[v.cluster].point[j] += v.point[j] * v.w
}
km.means[v.cluster].w += v.w
km.means[v.cluster].count++
}
for i := range km.means {
inv := 1 / km.means[i].w
for j := range km.means[i].point {
km.means[i].point[j] *= inv
}
}
deltas := 0
for i, v := range km.values {
if n, _ := km.nearest(v.point); n != v.cluster {
deltas++
km.values[i].cluster = n
}
}
if deltas == 0 {
break
}
}
return nil
}
// Total calculates the total sum of squares for the data relative to the data mean.
func (km *Kmeans) Total() float64 {
p := make([]float64, km.dims)
for _, v := range km.values {
for j := range p {
p[j] += v.point[j]
}
}
inv := 1 / float64(len(km.values))
for j := range p {
p[j] *= inv
}
var ss float64
for _, v := range km.values {
for j := range p {
d := p[j] - v.point[j]
ss += d * d
}
}
return ss
}
// Within calculates the sum of squares within each cluster.
// Returns nil if Cluster has not been called.
func (km *Kmeans) Within() []float64 {
if km.means == nil {
return nil
}
ss := make([]float64, len(km.means))
for _, v := range km.values {
for j := range v.point {
d := km.means[v.cluster].point[j] - v.point[j]
ss[v.cluster] += d * d
}
}
return ss
}
// Centers returns the k centers determined by a previous call to Cluster.
func (km *Kmeans) Centers() []cluster.Center {
c := make([]cluster.Indices, len(km.means))
for i := range c {
c[i] = make([]int, 0, km.means[i].count)
}
for i, v := range km.values {
c[v.cluster] = append(c[v.cluster], i)
}
cs := make([]cluster.Center, len(km.means))
for i := range km.means {
km.means[i].indices = c[i]
cs[i] = &km.means[i]
}
return cs
}
// Values returns a slice of the values in the Kmeans.
func (km *Kmeans) Values() []cluster.Value {
vs := make([]cluster.Value, len(km.values))
for i := range km.values {
vs[i] = &km.values[i]
}
return vs
} | ML/kmeans.go | 0.87289 | 0.638525 | kmeans.go | starcoder |
package compute
import (
"bytes"
"compress/gzip"
"io/ioutil"
"github.com/golang/protobuf/proto" //nolint need to update to new protobuf api
protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
"github.com/pkg/errors"
log "github.com/unchartedsoftware/plog"
"github.com/uncharted-distil/distil-compute/pipeline"
)
const (
unknownAPIVersion = "unknown"
// Task and sub-task types taken from the D3M LL problem schema - these are what is used internally within the
// application at the server and client level to capture the model building task type.
// For communication with TA2, these need to be translated via the `ConvertXXXFromTA3ToTA2` methods below.
// ForecastingTask represents timeseries forcasting
ForecastingTask = "forecasting"
// ClassificationTask represents a classification task on image, timeseries or basic tabular data
ClassificationTask = "classification"
// RegressionTask represents a regression task on image, timeseries or basic tabular data
RegressionTask = "regression"
// ClusteringTask represents an unsupervised clustering task on image, timeseries or basic tabular data
ClusteringTask = "clustering"
// LinkPredictionTask represents a link prediction task on graph data
LinkPredictionTask = "linkPrediction"
// VertexClassificationTask represents a vertex nomination task on graph data
VertexClassificationTask = "vertexClassification"
// VertexNominationTask represents a vertex nomination task on graph data
VertexNominationTask = "vertexNomination"
// CommunityDetectionTask represents an unsupervised community detectiontask on on graph data
CommunityDetectionTask = "communityDetection"
// GraphMatchingTask represents an unsupervised matching task on graph data
GraphMatchingTask = "graphMatching"
// CollaborativeFilteringTask represents a collaborative filtering recommendation task on basic tabular data
CollaborativeFilteringTask = "collaborativeFiltering"
// ObjectDetectionTask represents an object detection task on image data
ObjectDetectionTask = "objectDetection"
// SemiSupervisedTask represents a semi-supervised classification task on tabular data
SemiSupervisedTask = "semiSupervised"
// BinaryTask represents task involving a single binary value for each prediction
BinaryTask = "binary"
// MultiClassTask represents a task involving a multi class value for each prediction
MultiClassTask = "multiClass"
// MultiLabelTask represents a task involving multiple lables for each each prediction
MultiLabelTask = "multiLabel"
// UnivariateTask represents a task involving predictions on a single variable
UnivariateTask = "univariate"
// MultivariateTask represents a task involving predictions on multiple variables
MultivariateTask = "multivariate"
// OverlappingTask represents a task involving overlapping predictions
OverlappingTask = "overlapping"
// NonOverlappingTask represents a task involving non-overlapping predictions
NonOverlappingTask = "nonOverlapping"
// TabularTask represents a task involving tabular data
TabularTask = "tabular"
// RelationalTask represents a task involving relational data
RelationalTask = "relational"
// ImageTask represents a task involving image data
ImageTask = "image"
// AudioTask represents a task involving audio data
AudioTask = "audio"
// VideoTask represents a task involving video data
VideoTask = "video"
// SpeechTask represents a task involving speech data
SpeechTask = "speech"
// TextTask represents a task involving text data
TextTask = "text"
// GraphTask represents a task involving graph data
GraphTask = "graph"
// MultiGraphTask represents a task involving multiple graph data
MultiGraphTask = "multigraph"
// TimeSeriesTask represents a task involving timeseries data
TimeSeriesTask = "timeseries"
// GroupedTask represents a task involving grouped data
GroupedTask = "grouped"
// GeospatialTask represents a task involving geospatial data
GeospatialTask = "geospatial"
// RemoteSensingTask represents a task involving remote sensing data
RemoteSensingTask = "remoteSensing"
// LupiTask represents a task involving LUPI (Learning Using Priveleged Information) data
LupiTask = "lupi"
// UndefinedTask is a flag for undefined/unknown task values
UndefinedTask = "undefined"
// UndefinedMetric is a flag for undefined/uknown metric values
UndefinedMetric = "undefined"
// Value types accepted by the TA2 level
// CSVURIValueType denotes a CSV file URI at the TA2 level
CSVURIValueType = "CSV_URI"
// DatasetURIValueType denotes a D3M dataset file URI at the TA2 level
DatasetURIValueType = "DATASET_URI"
// ParquetURIValueType denotes a parquet file URI at the TA2 level
ParquetURIValueType = "PARQUET_URI"
// RawValueType denotes a raw numeric value
RawValueType = "RAW"
// HoldoutEvaluationMethod indicates a hold out model evaluation at the TA2 level
HoldoutEvaluationMethod = "HOLDOUT"
)
var (
// cached ta3ta2 API version
apiVersion string
problemMetricMap = map[string]string{
"accuracy": "ACCURACY",
"precision": "PRECISION",
"recall": "RECALL",
"f1": "F1",
"f1Micro": "F1_MICRO",
"f1Macro": "F1_MACRO",
"rocAuc": "ROC_AUC",
"rocAucMicro": "ROC_AUC_MICRO",
"rocAucMacro": "ROC_AUC_MACRO",
"meanSquaredError": "MEAN_SQUARED_ERROR",
"rootMeanSquaredError": "ROOT_MEAN_SQUARED_ERROR",
"rootMeanSquaredErrorAvg": "ROOT_MEAN_SQUARED_ERROR_AVG",
"meanAbsoluteError": "MEAN_ABSOLUTE_ERROR",
"rSquared": "R_SQUARED",
"normalizedMutualInformation": "NORMALIZED_MUTUAL_INFORMATION",
"jaccardSimilarityScore": "JACCARD_SIMILARITY_SCORE",
"precisionAtTopK": "PRECISION_AT_TOP_K",
"objectDetectionAP": "OBJECT_DETECTION_AVERAGE_PRECISION",
}
problemTaskMap = map[string]string{
ClassificationTask: "CLASSIFICATION",
RegressionTask: "REGRESSION",
ClusteringTask: "CLUSTERING",
LinkPredictionTask: "LINK_PREDICTION",
VertexNominationTask: "VERTEX_NOMINATION",
VertexClassificationTask: "VERTEX_CLASSIFICATION",
CommunityDetectionTask: "COMMUNITY_DETECTION",
GraphMatchingTask: "GRAPH_MATCHING",
ForecastingTask: "FORECASTING",
CollaborativeFilteringTask: "COLLABORATIVE_FILTERING",
ObjectDetectionTask: "OBJECT_DETECTION",
SemiSupervisedTask: "SEMISUPERVISED",
BinaryTask: "BINARY",
MultiClassTask: "MULTICLASS",
MultiLabelTask: "MULTILABEL",
UnivariateTask: "UNIVARIATE",
MultivariateTask: "MULTIVARIATE",
OverlappingTask: "OVERLAPPING",
NonOverlappingTask: "NONOVERLAPPING",
TabularTask: "TABULAR",
RelationalTask: "RELATIONAL",
ImageTask: "IMAGE",
AudioTask: "AUDIO",
VideoTask: "VIDEO",
SpeechTask: "SPEECH",
TextTask: "TEXT",
GraphTask: "GRAPH",
MultiGraphTask: "MULTIGRAPH",
TimeSeriesTask: "TIME_SERIES",
GroupedTask: "GROUPED",
GeospatialTask: "GEOSPATIAL",
RemoteSensingTask: "REMOTE_SENSING",
LupiTask: "LUPI",
}
defaultTaskMetricMap = map[string]string{
ClassificationTask: "f1Macro",
RegressionTask: "meanAbsoluteError",
ClusteringTask: "normalizedMutualInformation",
LinkPredictionTask: "accuracy",
VertexNominationTask: "accuracy",
CommunityDetectionTask: "accuracy",
GraphMatchingTask: "accuracy",
ForecastingTask: "rSquared",
CollaborativeFilteringTask: "rSquared",
ObjectDetectionTask: "objectDetectionAP",
}
metricScoreMultiplier = map[string]float64{
"ACCURACY": 1,
"PRECISION": 1,
"RECALL": 1,
"F1": 1,
"F1_MICRO": 1,
"F1_MACRO": 1,
"ROC_AUC": 1,
"ROC_AUC_MICRO": 1,
"ROC_AUC_MACRO": 1,
"MEAN_SQUARED_ERROR": -1,
"ROOT_MEAN_SQUARED_ERROR": -1,
"ROOT_MEAN_SQUARED_ERROR_AVG": -1,
"MEAN_ABSOLUTE_ERROR": -1,
"R_SQUARED": 1,
"NORMALIZED_MUTUAL_INFORMATION": 1,
"JACCARD_SIMILARITY_SCORE": 1,
"PRECISION_AT_TOP_K": 1,
"OBJECT_DETECTION_AVERAGE_PRECISION": 1,
}
metricLabel = map[string]string{
"ACCURACY": "Accuracy",
"PRECISION": "Precision",
"RECALL": "Recall",
"F1": "F1",
"F1_MICRO": "F1 Micro",
"F1_MACRO": "F1 Macro",
"ROC_AUC": "ROC AUC",
"ROC_AUC_MICRO": "ROC AUC Micro",
"ROC_AUC_MACRO": "ROC AUC Macro",
"MEAN_SQUARED_ERROR": "MSE",
"ROOT_MEAN_SQUARED_ERROR": "RMSE",
"ROOT_MEAN_SQUARED_ERROR_AVG": "RMSE Avg",
"MEAN_ABSOLUTE_ERROR": "MAE",
"R_SQUARED": "R Squared",
"NORMALIZED_MUTUAL_INFORMATION": "Normalized MI",
"JACCARD_SIMILARITY_SCORE": "Jaccard Similarity",
"PRECISION_AT_TOP_K": "Precision Top K",
"OBJECT_DETECTION_AVERAGE_PRECISION": "Avg Precision",
}
)
// ConvertProblemMetricToTA2 converts a problem schema metric to a TA2 metric.
func ConvertProblemMetricToTA2(metric string) string {
return problemMetricMap[metric]
}
// ConvertProblemTaskToTA2 converts a problem schema metric to a TA2 task.
func ConvertProblemTaskToTA2(metric string) string {
return problemTaskMap[metric]
}
// GetMetricScoreMultiplier returns a weight to determine whether a higher or
// lower score is `better`.
func GetMetricScoreMultiplier(metric string) float64 {
return metricScoreMultiplier[metric]
}
// GetMetricLabel returns a label string for a metric.
func GetMetricLabel(metric string) string {
return metricLabel[metric]
}
// GetDefaultTaskMetricsTA3 returns the default TA3 metrics for a supplied
// list of TA3 task keywords.
func GetDefaultTaskMetricsTA3(taskKeywords []string) []string {
metrics := []string{}
for _, task := range taskKeywords {
if val, ok := defaultTaskMetricMap[task]; ok {
metrics = append(metrics, val)
}
}
return metrics
}
// ConvertMetricsFromTA3ToTA2 converts metrics from TA3 to TA2 values.
func ConvertMetricsFromTA3ToTA2(metrics []string, posLabel string) []*pipeline.ProblemPerformanceMetric {
var res []*pipeline.ProblemPerformanceMetric
for _, metric := range metrics {
ta2Metric := ConvertProblemMetricToTA2(metric)
if ta2Metric == "" {
log.Warnf("unrecognized metric ('%s'), defaulting to undefined", metric)
ta2Metric = UndefinedMetric
}
res = append(res, &pipeline.ProblemPerformanceMetric{
Metric: ta2Metric,
PosLabel: posLabel,
})
}
return res
}
// ConvertTaskKeywordsFromTA3ToTA2 converts a task from TA3 to TA2.
func ConvertTaskKeywordsFromTA3ToTA2(taskKeywords []string) []string {
result := []string{}
for _, taskKeyword := range taskKeywords {
ta2Task := ConvertProblemTaskToTA2(taskKeyword)
if ta2Task == "" {
log.Warnf("unrecognized task type ('%s'), defaulting to undefined", taskKeyword)
result = append(result, UndefinedTask)
continue
}
result = append(result, ta2Task)
}
return result
}
// ConvertTargetFeaturesTA3ToTA2 creates a problem target from a target name.
func ConvertTargetFeaturesTA3ToTA2(target string, columnIndex int) []*pipeline.ProblemTarget {
return []*pipeline.ProblemTarget{
{
ColumnName: target,
ResourceId: DefaultResourceID,
TargetIndex: 0,
ColumnIndex: int32(columnIndex),
},
}
}
// ConvertDatasetTA3ToTA2 converts a dataset name from TA3 to TA2.
func ConvertDatasetTA3ToTA2(dataset string) string {
return dataset
}
// GetAPIVersion retrieves the ta3-ta2 API version embedded in the pipeline_core.proto file. This is
// a non-trivial operation, so the value is cached for quick access.
func GetAPIVersion() string {
if apiVersion != "" {
return apiVersion
}
// Get the raw file descriptor bytes
fileDesc := proto.FileDescriptor(pipeline.E_ProtocolVersion.Filename) //nolint need to update to new protobuf api
if fileDesc == nil {
log.Warnf("failed to find file descriptor for %v", pipeline.E_ProtocolVersion.Filename) //nolint need to update to new protobuf api
return unknownAPIVersion
}
// Open a gzip reader and decompress
r, err := gzip.NewReader(bytes.NewReader(fileDesc))
if err != nil {
log.Warn(errors.Wrap(err, "failed to open gzip reader"))
return unknownAPIVersion
}
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
log.Warn(errors.Wrap(err, "failed to decompress descriptor"))
return unknownAPIVersion
}
// Unmarshall the bytes from the proto format
fd := &protobuf.FileDescriptorProto{}
if err := proto.Unmarshal(b, fd); err != nil {
log.Warn(errors.Wrap(err, "malformed FileDescriptorProto"))
return unknownAPIVersion
}
// Fetch the extension from the FileDescriptorOptions message
ex, err := proto.GetExtension(fd.GetOptions(), pipeline.E_ProtocolVersion)
if err != nil {
log.Warn(errors.Wrap(err, "failed to fetch extension"))
return unknownAPIVersion
}
apiVersion = *ex.(*string)
return apiVersion
} | primitive/compute/ta3ta2.go | 0.599954 | 0.552359 | ta3ta2.go | starcoder |
package optimize
import (
"math"
"sort"
"github.com/ivan-pindrop/hclust/matrixop"
"github.com/ivan-pindrop/hclust/typedef"
)
type constraints struct {
left int
right int
}
type leafs struct {
a []int
b []int
}
// Optimal implements the "fast" leaf optimization approach of Bar-Joseph et al.
// 2001. See Figure 4.
func optimal(aSortOrder, bSortOrder []int, minDist float64, nodeScoresA map[int]float64, nodeScoresB map[int]float64, dist [][]float64) (score float64) {
// Current best maximal score.
score = math.MaxFloat64
for _, leftIndex := range aSortOrder {
ma := nodeScoresA[leftIndex]
if ma+nodeScoresB[bSortOrder[0]]+minDist >= score {
return
}
for _, rightIndex := range bSortOrder {
mb := nodeScoresB[rightIndex]
if ma+mb+minDist >= score {
break
}
currDist := ma + mb + dist[leftIndex][rightIndex]
if score > currDist {
score = currDist
}
}
}
return
}
// MaxInt finds the maximum between two integers
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}
// MinInt finds the minimum between two integers
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
// sortMap sorts a map in ascending order based on its keys.
func sortMap(unsortedMap map[int]float64) []int {
type kv struct {
key int
value float64
}
mapLength := len(unsortedMap)
// Convert map to a slice of kv type.
mapAsSlice := make([]kv, mapLength)
i := 0
for k, v := range unsortedMap {
mapAsSlice[i] = kv{key: k, value: v}
i++
}
sort.Slice(mapAsSlice, func(i, j int) bool {
return mapAsSlice[i].value < mapAsSlice[j].value
})
sortOrder := make([]int, mapLength)
for i := range mapAsSlice {
sortOrder[i] = mapAsSlice[i].key
}
return sortOrder
}
// shouldIgnore returns a function that determines if a node should be ignored
// based on the number of comparisons it would require.
func shouldIgnore(threshold int) (ignoreFunc func(comparisons int) bool) {
if threshold == 0 {
return func(comparisons int) bool {
return false
}
}
return func(comparisons int) bool {
if comparisons >= threshold {
return true
}
return false
}
}
// Optimize optimizes the leaf ordering of a dendrogram using the method
// of Bar-Joseph, et al. 2001.
func Optimize(dendrogram []typedef.SubCluster, dist [][]float64, ignore int) (optimized []typedef.SubCluster) {
// Number of nodes.
n := len(dendrogram)
// Get leafs beneath each node and group them into two pools: leafs on the left (a)
// go into one slice and leafs on the right (b) go into a second slice.
nodeLeafs := make(map[int]leafs, n)
for _, cluster := range dendrogram {
// Get first group of leafs.
aLeafs := make([]int, 0)
if cluster.Leafa <= n { // If Leaf is a leaf.
aLeafs = append(aLeafs, cluster.Leafa)
} else { // If Leaf is a node.
aLeafs = append(aLeafs, nodeLeafs[cluster.Leafa].a...)
aLeafs = append(aLeafs, nodeLeafs[cluster.Leafa].b...)
}
// Get second group of leafs.
bLeafs := make([]int, 0)
if cluster.Leafb <= n {
bLeafs = append(bLeafs, cluster.Leafb)
} else {
bLeafs = append(bLeafs, nodeLeafs[cluster.Leafb].a...)
bLeafs = append(bLeafs, nodeLeafs[cluster.Leafb].b...)
}
nodeLeafs[cluster.Node] = leafs{a: aLeafs, b: bLeafs}
}
// Initialize score map and set zero values for leafs. This is a 3D map with
// the first dimension corresponding a node and the second and third
// dimensions corresponding to leaf pairs. The 2D leaf will be the left most
// leaf of a pair and the 3D leaf will be its rightmost pair. The float64
// value is the between-leaf distance for that pair.
m := make(map[int]map[int]map[int]float64, 2*n+1)
for i := 0; i <= n; i++ {
m[i] = make(map[int]map[int]float64, 1)
m[i][i] = make(map[int]float64, 1)
m[i][i][i] = 0
}
ignoreFunc := shouldIgnore(ignore)
// Calculate optimal ordering score for each node.
for _, cluster := range dendrogram {
node := cluster.Node
numLeafsA := len(nodeLeafs[node].a)
numLeafsB := len(nodeLeafs[node].b)
// Initialize 2D and 3D maps.
m[node] = make(map[int]map[int]float64, numLeafsA+numLeafsB)
for _, leaf := range nodeLeafs[node].a {
m[node][leaf] = make(map[int]float64, numLeafsB)
}
for _, leaf := range nodeLeafs[node].b {
m[node][leaf] = make(map[int]float64, numLeafsA)
}
// Determine if a node should be optimized and calculate minimum distance
// between a leaf and potential b leafs if so.
shouldIgnore := ignoreFunc(numLeafsA * numLeafsB)
minDist := math.MaxFloat64
if !shouldIgnore {
for _, aLeaf := range nodeLeafs[node].a {
for _, bLeaf := range nodeLeafs[node].b {
if dist[aLeaf][bLeaf] < minDist {
minDist = dist[aLeaf][bLeaf]
}
}
}
}
// Iterate over leafs in pool a and b and generate scores.
for _, aLeaf := range nodeLeafs[node].a {
// Sort left nodes scores.
aSortOrder := sortMap(m[cluster.Leafa][aLeaf])
for _, bLeaf := range nodeLeafs[node].b {
// Sort right nodes scores.
bSortOrder := sortMap(m[cluster.Leafb][bLeaf])
// Calculate score for current node.
var optScore float64
if !shouldIgnore {
optScore = optimal(aSortOrder, bSortOrder, minDist, m[cluster.Leafa][aLeaf], m[cluster.Leafb][bLeaf], dist)
} else {
optScore = m[cluster.Leafa][aLeaf][aSortOrder[0]] + m[cluster.Leafb][bLeaf][bSortOrder[0]]
}
m[node][aLeaf][bLeaf] = optScore
m[node][bLeaf][aLeaf] = optScore
}
}
}
// Re-order dendrogram.
optimized = make([]typedef.SubCluster, n)
copy(optimized, dendrogram)
// Constraints contains the left and right contraints for each node. -1 is used
// to indicate there is no constraint.
constrain := make(map[int]constraints, n)
constrain[dendrogram[n-1].Node] = constraints{left: -1, right: -1}
// Iterate over nodes and reorder as needed.
for i := n - 1; i >= 0; i-- {
node := dendrogram[i].Node
// Find best leaf pair.
minDiff := math.MaxFloat64
var outerA, outerB int
if constrain[node].left >= 0 {
for leafb, value := range m[node][constrain[node].left] {
if value < minDiff {
minDiff = value
outerB = leafb
}
}
outerA = constrain[node].left
} else if constrain[node].right >= 0 {
for leafa, value := range m[node][constrain[node].right] {
if value < minDiff {
minDiff = value
outerA = leafa
}
}
outerB = constrain[node].right
} else { // For top node.
for leafa := range m[node] {
for leafb, value := range m[node][leafa] {
if value < minDiff {
minDiff = value
// Setting the leaf with the lower index as the left leaf for a
// consistent procedure.
outerA = minInt(leafa, leafb)
outerB = maxInt(leafa, leafb)
}
}
}
}
// Check if outerA leaf is already in left pool, if not switch left and
// right leafs.
leafAIndex := matrixop.SliceIndex(len(nodeLeafs[node].a), func(j int) bool { return nodeLeafs[node].a[j] == outerA })
if leafAIndex < 0 {
optimized[i] = typedef.SubCluster{
Leafa: dendrogram[i].Leafb,
Leafb: dendrogram[i].Leafa,
Lengtha: dendrogram[i].Lengthb,
Lengthb: dendrogram[i].Lengtha,
Node: dendrogram[i].Node,
}
} else {
optimized[i] = typedef.SubCluster{
Leafa: dendrogram[i].Leafa,
Leafb: dendrogram[i].Leafb,
Lengtha: dendrogram[i].Lengtha,
Lengthb: dendrogram[i].Lengthb,
Node: dendrogram[i].Node,
}
}
// Set contraints for subnodes.
if optimized[i].Leafa > n {
constrain[optimized[i].Leafa] = constraints{left: outerA, right: -1}
}
if optimized[i].Leafb > n {
constrain[optimized[i].Leafb] = constraints{left: -1, right: outerB}
}
}
return
} | optimize/optimize.go | 0.824391 | 0.455622 | optimize.go | starcoder |
package ssz
import (
"encoding/binary"
"errors"
)
// Proof represents a merkle proof against a general index.
type Proof struct {
Index int
Leaf []byte
Hashes [][]byte
}
// Multiproof represents a merkle proof of several leaves.
type Multiproof struct {
Indices []int
Leaves [][]byte
Hashes [][]byte
}
// Compress returns a new proof with zero hashes omitted.
// See `CompressedMultiproof` for more info.
func (p *Multiproof) Compress() *CompressedMultiproof {
compressed := &CompressedMultiproof{
Indices: p.Indices,
Leaves: p.Leaves,
Hashes: make([][]byte, 0, len(p.Hashes)),
ZeroLevels: make([]int, 0, len(p.Hashes)),
}
for _, h := range p.Hashes {
if l, ok := zeroHashLevels[string(h)]; ok {
compressed.ZeroLevels = append(compressed.ZeroLevels, l)
compressed.Hashes = append(compressed.Hashes, nil)
} else {
compressed.Hashes = append(compressed.Hashes, h)
}
}
return compressed
}
// CompressedMultiproof represents a compressed merkle proof of several leaves.
// Compression is achieved by omitting zero hashes (and their hashes). `ZeroLevels`
// contains information which helps the verifier fill in those hashes.
type CompressedMultiproof struct {
Indices []int
Leaves [][]byte
Hashes [][]byte
ZeroLevels []int // Stores the level for every omitted zero hash in the proof
}
// Decompress returns a new multiproof, filling in the omitted
// zero hashes. See `CompressedMultiProof` for more info.
func (c *CompressedMultiproof) Decompress() *Multiproof {
p := &Multiproof{
Indices: c.Indices,
Leaves: c.Leaves,
Hashes: make([][]byte, len(c.Hashes)),
}
zc := 0
for i, h := range c.Hashes {
if h == nil {
p.Hashes[i] = zeroHashes[c.ZeroLevels[zc]][:]
zc++
} else {
p.Hashes[i] = c.Hashes[i]
}
}
return p
}
// Node represents a node in the tree
// backing of a SSZ object.
type Node struct {
left *Node
right *Node
value []byte
}
// NewNodeWithValue initializes a leaf node.
func NewNodeWithValue(value []byte) *Node {
return &Node{left: nil, right: nil, value: value}
}
// NewNodeWithLR initializes a branch node.
func NewNodeWithLR(left, right *Node) *Node {
return &Node{left: left, right: right, value: nil}
}
// TreeFromChunks constructs a tree from leaf values.
// The number of leaves should be a power of 2.
func TreeFromChunks(chunks [][]byte) (*Node, error) {
numLeaves := len(chunks)
if !isPowerOfTwo(numLeaves) {
return nil, errors.New("Number of leaves should be a power of 2")
}
leaves := make([]*Node, numLeaves)
for i, c := range chunks {
leaves[i] = &Node{left: nil, right: nil, value: c}
}
return TreeFromNodes(leaves)
}
// TreeFromNodes constructs a tree from leaf nodes.
// This is useful for merging subtrees.
// The number of leaves should be a power of 2.
func TreeFromNodes(leaves []*Node) (*Node, error) {
numLeaves := len(leaves)
if numLeaves == 1 {
return leaves[0], nil
}
if numLeaves == 2 {
return NewNodeWithLR(leaves[0], leaves[1]), nil
}
if !isPowerOfTwo(numLeaves) {
return nil, errors.New("Number of leaves should be a power of 2")
}
numNodes := numLeaves*2 - 1
nodes := make([]*Node, numNodes)
for i := numNodes; i > 0; i-- {
// Is a leaf
if i > numNodes-numLeaves {
nodes[i-1] = leaves[i-numLeaves]
} else {
// Is a branch node
nodes[i-1] = &Node{left: nodes[(i*2)-1], right: nodes[(i*2+1)-1], value: nil}
}
}
return nodes[0], nil
}
func TreeFromNodesWithMixin(leaves []*Node, num, limit int) (*Node, error) {
numLeaves := len(leaves)
if !isPowerOfTwo(limit) {
return nil, errors.New("Size of tree should be a power of 2")
}
allLeaves := make([]*Node, limit)
emptyLeaf := NewNodeWithValue(make([]byte, 32))
for i := 0; i < limit; i++ {
if i < numLeaves {
allLeaves[i] = leaves[i]
} else {
allLeaves[i] = emptyLeaf
}
}
mainTree, err := TreeFromNodes(allLeaves)
if err != nil {
return nil, err
}
// Mixin len
countLeaf := LeafFromUint64(uint64(num))
return NewNodeWithLR(mainTree, countLeaf), nil
}
// Get fetches a node with the given general index.
func (n *Node) Get(index int) (*Node, error) {
pathLen := getPathLength(index)
cur := n
for i := pathLen - 1; i >= 0; i-- {
if isRight := getPosAtLevel(index, i); isRight {
cur = cur.right
} else {
cur = cur.left
}
if cur == nil {
return nil, errors.New("Node not found in tree")
}
}
return cur, nil
}
// Hash returns the hash of the subtree with the given Node as its root.
// If root has no children, it returns root's value (not its hash).
func (n *Node) Hash() []byte {
// TODO: handle special cases: empty root, one non-empty node
return hashNode(n)
}
func hashNode(n *Node) []byte {
// Leaf
if n.left == nil && n.right == nil {
return n.value
}
// Only one child
if n.left == nil || n.right == nil {
panic("Tree incomplete")
}
return hashFn(append(hashNode(n.left), hashNode(n.right)...))
}
// Prove returns a list of sibling values and hashes needed
// to compute the root hash for a given general index.
func (n *Node) Prove(index int) (*Proof, error) {
pathLen := getPathLength(index)
proof := &Proof{Index: index}
hashes := make([][]byte, 0, pathLen)
cur := n
for i := pathLen - 1; i >= 0; i-- {
var siblingHash []byte
if isRight := getPosAtLevel(index, i); isRight {
siblingHash = hashNode(cur.left)
cur = cur.right
} else {
siblingHash = hashNode(cur.right)
cur = cur.left
}
hashes = append([][]byte{siblingHash}, hashes...)
if cur == nil {
return nil, errors.New("Node not found in tree")
}
}
proof.Hashes = hashes
proof.Leaf = cur.value
return proof, nil
}
func (n *Node) ProveMulti(indices []int) (*Multiproof, error) {
reqIndices := getRequiredIndices(indices)
proof := &Multiproof{Indices: indices, Leaves: make([][]byte, len(indices)), Hashes: make([][]byte, len(reqIndices))}
for i, gi := range indices {
node, err := n.Get(gi)
if err != nil {
return nil, err
}
proof.Leaves[i] = node.value
}
for i, gi := range reqIndices {
cur, err := n.Get(gi)
if err != nil {
return nil, err
}
proof.Hashes[i] = hashNode(cur)
}
return proof, nil
}
func LeafFromUint64(i uint64) *Node {
buf := make([]byte, 32)
binary.LittleEndian.PutUint64(buf[:8], i)
return NewNodeWithValue(buf)
}
func LeafFromUint32(i uint32) *Node {
buf := make([]byte, 32)
binary.LittleEndian.PutUint32(buf[:4], i)
return NewNodeWithValue(buf)
}
func LeafFromUint16(i uint16) *Node {
buf := make([]byte, 32)
binary.LittleEndian.PutUint16(buf[:2], i)
return NewNodeWithValue(buf)
}
func LeafFromUint8(i uint8) *Node {
buf := make([]byte, 32)
buf[0] = byte(i)
return NewNodeWithValue(buf)
}
func LeafFromBool(b bool) *Node {
buf := make([]byte, 32)
if b {
buf[0] = 1
}
return NewNodeWithValue(buf)
}
func LeafFromBytes(b []byte) *Node {
l := len(b)
if l > 32 {
panic("Unimplemented")
}
if l == 32 {
return NewNodeWithValue(b[:])
}
return NewNodeWithValue(append(b, zeroBytes[:32-l]...))
}
func EmptyLeaf() *Node {
return NewNodeWithValue(zeroBytes[:32])
}
func LeavesFromUint64(items []uint64) []*Node {
if len(items) == 0 {
return []*Node{}
}
numLeaves := (len(items)*8 + 31) / 32
buf := make([]byte, numLeaves*32)
for i, v := range items {
binary.LittleEndian.PutUint64(buf[i*8:(i+1)*8], v)
}
leaves := make([]*Node, numLeaves)
for i := 0; i < numLeaves; i++ {
v := buf[i*32 : (i+1)*32]
leaves[i] = NewNodeWithValue(v)
}
return leaves
}
func isPowerOfTwo(n int) bool {
return (n & (n - 1)) == 0
} | tree.go | 0.749546 | 0.497742 | tree.go | starcoder |
package configuration
import (
"fmt"
"reflect"
"strconv"
"strings"
"time"
)
const sliceSeparator = ";"
// SetField sets field with `valStr` value (converts to the proper type beforehand)
func SetField(field reflect.StructField, v reflect.Value, valStr string) error {
if v.Kind() == reflect.Ptr {
return setPtrValue(field.Type, v, valStr)
}
return setValue(field.Type, v, valStr)
}
func setValue(t reflect.Type, v reflect.Value, val string) (err error) {
switch t.Kind() {
case reflect.String:
v.SetString(val)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
i, _ := strconv.ParseInt(val, 10, 64)
v.SetInt(i)
case reflect.Int64:
setInt64(v, val)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
i, _ := strconv.ParseUint(val, 10, 64)
v.SetUint(i)
case reflect.Float32, reflect.Float64:
f, _ := strconv.ParseFloat(val, 64)
v.SetFloat(f)
case reflect.Bool:
b, _ := strconv.ParseBool(val)
v.SetBool(b)
case reflect.Slice:
err = setSlice(t, v, val)
default:
err = fmt.Errorf("unsupported type: %v", v.Kind().String())
}
return
}
func setInt64(v reflect.Value, val string) {
// special case for parsing human readable input for time.Duration
if _, ok := v.Interface().(time.Duration); ok {
d, _ := time.ParseDuration(val)
v.SetInt(int64(d))
return
}
// regular int64 case
i, _ := strconv.ParseInt(val, 10, 64)
v.SetInt(i)
}
func setSlice(t reflect.Type, v reflect.Value, val string) error {
var items []string
for _, item := range strings.Split(val, sliceSeparator) {
item = strings.TrimSpace(item)
if len(item) > 0 {
items = append(items, item)
}
}
size := len(items)
if size == 0 {
return fmt.Errorf("setSlice: got emtpy slice")
}
slice := reflect.MakeSlice(t, size, size)
switch t.Elem().Kind() {
case reflect.String:
for i := 0; i < size; i++ {
slice.Index(i).SetString(items[i])
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
for i := 0; i < size; i++ {
val, _ := strconv.ParseInt(items[i], 10, 64)
slice.Index(i).SetInt(val)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
for i := 0; i < size; i++ {
val, _ := strconv.ParseUint(items[i], 10, 64)
slice.Index(i).SetUint(val)
}
case reflect.Float32, reflect.Float64:
for i := 0; i < size; i++ {
val, _ := strconv.ParseFloat(items[i], 64)
slice.Index(i).SetFloat(val)
}
case reflect.Bool:
for i := 0; i < size; i++ {
val, _ := strconv.ParseBool(items[i])
slice.Index(i).SetBool(val)
}
default:
return fmt.Errorf("setSlice: unsupported type of slice item: %v", t.Elem().Kind().String())
}
v.Set(slice)
return nil
}
func setPtrValue(t reflect.Type, v reflect.Value, val string) (err error) {
switch t.Elem().Name() {
case reflect.Int.String(): // doesn't care about 32bit systems
if i64, err := strconv.ParseInt(val, 10, 64); err == nil {
i := int(i64)
v.Set(reflect.ValueOf(&i))
}
case reflect.Int8.String():
if i64, err := strconv.ParseInt(val, 10, 8); err == nil {
i8 := int8(i64)
v.Set(reflect.ValueOf(&i8))
}
case reflect.Int16.String():
if i64, err := strconv.ParseInt(val, 10, 16); err == nil {
i16 := int16(i64)
v.Set(reflect.ValueOf(&i16))
}
case reflect.Int32.String():
if i64, err := strconv.ParseInt(val, 10, 32); err == nil {
i32 := int32(i64)
v.Set(reflect.ValueOf(&i32))
}
case reflect.Int64.String():
if i64, err := strconv.ParseInt(val, 10, 64); err == nil {
v.Set(reflect.ValueOf(&i64))
}
case reflect.Uint.String(): // doesn't care about 32bit systems
if ui64, err := strconv.ParseUint(val, 10, 64); err == nil {
ui := uint(ui64)
v.Set(reflect.ValueOf(&ui))
}
case reflect.Uint8.String():
if ui64, err := strconv.ParseUint(val, 10, 8); err == nil {
ui8 := uint8(ui64)
v.Set(reflect.ValueOf(&ui8))
}
case reflect.Uint16.String():
if ui64, err := strconv.ParseUint(val, 10, 16); err == nil {
ui16 := uint16(ui64)
v.Set(reflect.ValueOf(&ui16))
}
case reflect.Uint32.String():
if ui64, err := strconv.ParseUint(val, 10, 32); err == nil {
ui32 := uint32(ui64)
v.Set(reflect.ValueOf(&ui32))
}
case reflect.Uint64.String():
if ui64, err := strconv.ParseUint(val, 10, 64); err == nil {
v.Set(reflect.ValueOf(&ui64))
}
case reflect.Float32.String():
if f64, err := strconv.ParseFloat(val, 32); err == nil {
f32 := float32(f64)
v.Set(reflect.ValueOf(&f32))
}
case reflect.Float64.String():
if f64, err := strconv.ParseFloat(val, 64); err == nil {
v.Set(reflect.ValueOf(&f64))
}
case reflect.String.String():
if len(val) > 0 {
v.Set(reflect.ValueOf(&val))
}
case reflect.Bool.String():
if b, err := strconv.ParseBool(val); err == nil {
v.Set(reflect.ValueOf(&b))
}
default:
err = fmt.Errorf("setPtrValue: unsupported type: %v", t.Kind().String())
}
return
} | fieldSetter.go | 0.655336 | 0.407157 | fieldSetter.go | starcoder |
package life
import (
"image"
"image/color"
"time"
"fyne.io/fyne"
"fyne.io/fyne/canvas"
"fyne.io/fyne/theme"
"fyne.io/fyne/widget"
"github.com/fyne-io/examples/img/icon"
)
type board struct {
cells [][]bool
width int
height int
}
func (b *board) ifAlive(x, y int) int {
if x < 0 || x >= b.width {
return 0
}
if y < 0 || y >= b.height {
return 0
}
if b.cells[y][x] {
return 1
}
return 0
}
func (b *board) countNeighbours(x, y int) int {
sum := 0
sum += b.ifAlive(x-1, y-1)
sum += b.ifAlive(x, y-1)
sum += b.ifAlive(x+1, y-1)
sum += b.ifAlive(x-1, y)
sum += b.ifAlive(x+1, y)
sum += b.ifAlive(x-1, y+1)
sum += b.ifAlive(x, y+1)
sum += b.ifAlive(x+1, y+1)
return sum
}
func (b *board) nextGen() [][]bool {
state := make([][]bool, b.height)
for y := 0; y < b.height; y++ {
state[y] = make([]bool, b.width)
for x := 0; x < b.width; x++ {
n := b.countNeighbours(x, y)
if b.cells[y][x] {
state[y][x] = n == 2 || n == 3
} else {
state[y][x] = n == 3
}
}
}
return state
}
func (b *board) renderState(state [][]bool) {
for y := 0; y < b.height; y++ {
for x := 0; x < b.width; x++ {
b.cells[y][x] = state[y][x]
}
}
}
func (b *board) load() {
// gun
b.cells[5][1] = true
b.cells[5][2] = true
b.cells[6][1] = true
b.cells[6][2] = true
b.cells[3][13] = true
b.cells[3][14] = true
b.cells[4][12] = true
b.cells[4][16] = true
b.cells[5][11] = true
b.cells[5][17] = true
b.cells[6][11] = true
b.cells[6][15] = true
b.cells[6][17] = true
b.cells[6][18] = true
b.cells[7][11] = true
b.cells[7][17] = true
b.cells[8][12] = true
b.cells[8][16] = true
b.cells[9][13] = true
b.cells[9][14] = true
b.cells[1][25] = true
b.cells[2][23] = true
b.cells[2][25] = true
b.cells[3][21] = true
b.cells[3][22] = true
b.cells[4][21] = true
b.cells[4][22] = true
b.cells[5][21] = true
b.cells[5][22] = true
b.cells[6][23] = true
b.cells[6][25] = true
b.cells[7][25] = true
b.cells[3][35] = true
b.cells[3][36] = true
b.cells[4][35] = true
b.cells[4][36] = true
// spaceship
b.cells[34][2] = true
b.cells[34][3] = true
b.cells[34][4] = true
b.cells[34][5] = true
b.cells[35][1] = true
b.cells[35][5] = true
b.cells[36][5] = true
b.cells[37][1] = true
b.cells[37][4] = true
}
func newBoard() *board {
b := &board{nil, 60, 50}
b.cells = make([][]bool, b.height)
for y := 0; y < b.height; y++ {
b.cells[y] = make([]bool, b.width)
}
return b
}
type game struct {
board *board
paused bool
size fyne.Size
position fyne.Position
hidden bool
}
func (g *game) Size() fyne.Size {
return g.size
}
func (g *game) Resize(size fyne.Size) {
g.size = size
widget.Renderer(g).Layout(size)
}
func (g *game) Position() fyne.Position {
return g.position
}
func (g *game) Move(pos fyne.Position) {
g.position = pos
widget.Renderer(g).Layout(g.size)
}
func (g *game) MinSize() fyne.Size {
return widget.Renderer(g).MinSize()
}
func (g *game) Visible() bool {
return g.hidden
}
func (g *game) Show() {
g.hidden = false
}
func (g *game) Hide() {
g.hidden = true
}
type gameRenderer struct {
render *canvas.Raster
objects []fyne.CanvasObject
imgCache *image.RGBA
aliveColor color.Color
deadColor color.Color
game *game
}
func (g *gameRenderer) MinSize() fyne.Size {
return fyne.NewSize(g.game.board.width*10, g.game.board.height*10)
}
func (g *gameRenderer) Layout(size fyne.Size) {
g.render.Resize(size)
}
func (g *gameRenderer) ApplyTheme() {
g.aliveColor = theme.TextColor()
g.deadColor = theme.BackgroundColor()
}
func (g *gameRenderer) BackgroundColor() color.Color {
return theme.BackgroundColor()
}
func (g *gameRenderer) Refresh() {
canvas.Refresh(g.render)
}
func (g *gameRenderer) Objects() []fyne.CanvasObject {
return g.objects
}
func (g *gameRenderer) Destroy() {
}
func (g *gameRenderer) draw(w, h int) image.Image {
img := g.imgCache
if img == nil || img.Bounds().Size().X != w || img.Bounds().Size().Y != h {
img = image.NewRGBA(image.Rect(0, 0, w, h))
g.imgCache = img
}
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
xpos, ypos := g.game.cellForCoord(x, y, w, h)
if xpos < g.game.board.width && ypos < g.game.board.height && g.game.board.cells[ypos][xpos] {
img.Set(x, y, g.aliveColor)
} else {
img.Set(x, y, g.deadColor)
}
}
}
return img
}
func (g *game) CreateRenderer() fyne.WidgetRenderer {
renderer := &gameRenderer{game: g}
render := canvas.NewRaster(renderer.draw)
renderer.render = render
renderer.objects = []fyne.CanvasObject{render}
renderer.ApplyTheme()
return renderer
}
func (g *game) cellForCoord(x, y, w, h int) (int, int) {
xpos := int(float64(g.board.width) * (float64(x) / float64(w)))
ypos := int(float64(g.board.height) * (float64(y) / float64(h)))
return xpos, ypos
}
func (g *game) run() {
g.paused = false
}
func (g *game) stop() {
g.paused = true
}
func (g *game) toggleRun() {
g.paused = !g.paused
}
func (g *game) animate() {
go func() {
tick := time.NewTicker(time.Second / 6)
for {
select {
case <-tick.C:
if g.paused {
continue
}
state := g.board.nextGen()
g.board.renderState(state)
widget.Refresh(g)
}
}
}()
}
func (g *game) typedRune(r rune) {
if r == ' ' {
g.toggleRun()
}
}
func (g *game) Tapped(ev *fyne.PointEvent) {
xpos, ypos := g.cellForCoord(ev.Position.X, ev.Position.Y, g.size.Width, g.size.Height)
if ev.Position.X < 0 || ev.Position.Y < 0 || xpos >= g.board.width || ypos >= g.board.height {
return
}
g.board.cells[ypos][xpos] = !g.board.cells[ypos][xpos]
widget.Refresh(g)
}
func (g *game) TappedSecondary(ev *fyne.PointEvent) {
}
func newGame(b *board) *game {
g := &game{board: b}
return g
}
// Show starts a new game of life
func Show(app fyne.App) {
board := newBoard()
board.load()
game := newGame(board)
window := app.NewWindow("Life")
window.SetIcon(icon.LifeBitmap)
window.SetContent(game)
window.Canvas().SetOnTypedRune(game.typedRune)
// start the board animation before we show the window - it will block
game.animate()
window.Show()
} | life/main.go | 0.545528 | 0.437884 | main.go | starcoder |
package ahtree
import "crypto/sha256"
func VerifyInclusion(iproof [][sha256.Size]byte, i, j uint64, iLeaf, jRoot [sha256.Size]byte) bool {
if i > j || i == 0 || (i < j && len(iproof) == 0) {
return false
}
ciRoot := EvalInclusion(iproof, i, j, iLeaf)
return jRoot == ciRoot
}
func EvalInclusion(iproof [][sha256.Size]byte, i, j uint64, iLeaf [sha256.Size]byte) [sha256.Size]byte {
i1 := i - 1
j1 := j - 1
ciRoot := iLeaf
b := [1 + sha256.Size*2]byte{NodePrefix}
for _, h := range iproof {
if i1%2 == 0 && i1 != j1 {
copy(b[1:], ciRoot[:])
copy(b[sha256.Size+1:], h[:])
} else {
copy(b[1:], h[:])
copy(b[sha256.Size+1:], ciRoot[:])
}
ciRoot = sha256.Sum256(b[:])
i1 >>= 1
j1 >>= 1
}
return ciRoot
}
func VerifyConsistency(cproof [][sha256.Size]byte, i, j uint64, iRoot, jRoot [sha256.Size]byte) bool {
if i > j || i == 0 || (i < j && len(cproof) == 0) {
return false
}
if i == j && len(cproof) == 0 {
return iRoot == jRoot
}
ciRoot, cjRoot := EvalConsistency(cproof, i, j)
return iRoot == ciRoot && jRoot == cjRoot
}
func EvalConsistency(cproof [][sha256.Size]byte, i, j uint64) ([sha256.Size]byte, [sha256.Size]byte) {
fn := i - 1
sn := j - 1
for fn%2 == 1 {
fn >>= 1
sn >>= 1
}
ciRoot, cjRoot := cproof[0], cproof[0]
b := [1 + sha256.Size*2]byte{NodePrefix}
for _, h := range cproof[1:] {
if fn%2 == 1 || fn == sn {
copy(b[1:], h[:])
copy(b[1+sha256.Size:], ciRoot[:])
ciRoot = sha256.Sum256(b[:])
copy(b[1+sha256.Size:], cjRoot[:])
cjRoot = sha256.Sum256(b[:])
for fn%2 == 0 && fn != 0 {
fn >>= 1
sn >>= 1
}
} else {
copy(b[1:], cjRoot[:])
copy(b[1+sha256.Size:], h[:])
cjRoot = sha256.Sum256(b[:])
}
fn >>= 1
sn >>= 1
}
return ciRoot, cjRoot
}
func VerifyLastInclusion(iproof [][sha256.Size]byte, i uint64, leaf, root [sha256.Size]byte) bool {
if i == 0 {
return false
}
return root == EvalLastInclusion(iproof, i, leaf)
}
func EvalLastInclusion(iproof [][sha256.Size]byte, i uint64, leaf [sha256.Size]byte) [sha256.Size]byte {
i1 := i - 1
root := leaf
b := [1 + sha256.Size*2]byte{NodePrefix}
for _, h := range iproof {
copy(b[1:], h[:])
copy(b[sha256.Size+1:], root[:])
root = sha256.Sum256(b[:])
i1 >>= 1
}
return root
} | embedded/ahtree/verification.go | 0.566978 | 0.589835 | verification.go | starcoder |
package main
import (
"flag"
"fmt"
"math"
)
// https://projecteuler.net/problem=7
// We can offset unncessary computation by setting the prime check startpoint
// using the prime count function
// https://mathworld.wolfram.com/PrimeNumberTheorem.html
// For simplicity, the table listed in the @findPrimeAtIndex function is used to approximate the occurrence
// Additional study on implementing the code to generate the table is for the viewers discretion to do so
// Not too efficient, overflows on n=17
// Implements Wilson's theorem on prime validation
// Implementation guide: https://codegolf.stackexchange.com/a/94385
func primeCountingFunction(n int64, k int64, p int64) int{
count := 0
for k <= n{
// A prime is valid if p%k == 1
// and not if p%k == 0
if p % k == 1 {
count ++
}
fmt.Println(k, p, count)
p *= k * k
k += 1
}
return count
}
func isPrime(n int) bool{
i := 2
for i <= int(math.Sqrt(float64(n))) {
if n % i == 0 {
return false
}
i++
}
return true
}
func findPrimeAtIndex(n int) int{
// Using prime counting function table values
// Full table available here: https://mathworld.wolfram.com/PrimeCountingFunction.html
// Each index here corresponds to a count up to 10 of the power of that index
// 1 - 10, 2 -100, 3 - 1000 etc.
primeCount := []int{4, 25, 168, 1229, 9592, 78498, 664579}
availPrimeCount := 4
nCountValue := 10
for idx, num := range primeCount {
if n < num{
break
}
availPrimeCount = num
nCountValue = int(math.Pow(10, float64(idx+1)))
}
fmt.Println("Prime count, Current Value: ", availPrimeCount, nCountValue)
for n > availPrimeCount {
nCountValue++
if isPrime(nCountValue) {
availPrimeCount++
}
}
return nCountValue
}
func main(){
maxNumPtr := flag.Int("number", 20, "Integer for the max range")
flag.Parse()
fmt.Println("Prime is ", findPrimeAtIndex(*maxNumPtr))
} | Problem_7_Euler/main.go | 0.790207 | 0.445047 | main.go | starcoder |
package geometry
import (
"github.com/g3n/engine/geometry"
"github.com/g3n/engine/graphic"
"github.com/g3n/engine/light"
"github.com/g3n/engine/material"
"github.com/g3n/engine/math32"
"github.com/g3n/g3nd/app"
"github.com/g3n/g3nd/demos"
"math"
)
type Torus struct {
torus1 *graphic.Mesh
normals *graphic.NormalsHelper
}
func init() {
demos.Map["geometry.torus"] = &Torus{}
}
func (t *Torus) Initialize(a *app.App) {
// Add directional red light from right
l1 := light.NewDirectional(&math32.Color{1, 0, 0}, 1.0)
l1.SetPosition(1, 0, 0)
a.Scene().Add(l1)
// Add directional green light from top
l2 := light.NewDirectional(&math32.Color{0, 1, 0}, 1.0)
l2.SetPosition(0, 1, 0)
a.Scene().Add(l2)
// Add directional blue light from front
l3 := light.NewDirectional(&math32.Color{0, 0, 1}, 1.0)
l3.SetPosition(0, 0, 1)
a.Scene().Add(l3)
// Add torus at upper-left
geom1 := geometry.NewTorus(1, 0.25, 8, 8, 2*math.Pi)
mat1 := material.NewStandard(&math32.Color{0, 0, 0.5})
t.torus1 = graphic.NewMesh(geom1, mat1)
mat1.SetWireframe(true)
mat1.SetSide(material.SideDouble)
t.torus1.SetPosition(-2, 1.5, 0)
a.Scene().Add(t.torus1)
// Add torus at upper-right
geom2 := geometry.NewTorus(1, 0.25, 32, 32, 2*math.Pi)
mat2 := material.NewStandard(&math32.Color{1, 1, 1})
torus2 := graphic.NewMesh(geom2, mat2)
torus2.SetPosition(2, 1.5, 0)
a.Scene().Add(torus2)
// Add torus at bottom-left
geom3 := geometry.NewTorus(1, 0.25, 32, 32, 2*math.Pi)
mat3 := material.NewStandard(&math32.Color{0.5, 0.5, 0.5})
torus3 := graphic.NewMesh(geom3, mat3)
torus3.SetPosition(-2, -1.5, 0)
a.Scene().Add(torus3)
// Add torus at bottom-right
geom4 := geometry.NewTorus(1, 0.25, 64, 64, 3*math.Pi/2)
mat4 := material.NewStandard(&math32.Color{0.5, 0.5, 0.5})
mat4.SetSide(material.SideDouble)
torus4 := graphic.NewMesh(geom4, mat4)
torus4.SetPosition(2, -1.5, 0)
a.Scene().Add(torus4)
// Adds axis helper
axis := graphic.NewAxisHelper(2)
a.Scene().Add(axis)
// Adds normals helper
t.normals = graphic.NewNormalsHelper(t.torus1, 0.5, &math32.Color{0, 1, 0}, 1)
a.Scene().Add(t.normals)
}
func (t *Torus) Render(a *app.App) {
// Rotate at 1 rotation each 5 seconds
delta := a.FrameDeltaSeconds() * 2 * math32.Pi / 5
t.torus1.RotateZ(delta)
t.normals.Update()
} | geometry/torus.go | 0.695855 | 0.41478 | torus.go | starcoder |
// Package livelog provides a buffer that can be simultaneously written to by
// one writer and read from by many readers.
package livelog
import (
"io"
"sync"
)
const MaxBufferSize = 2 << 20 // 2MB of output is way more than we expect.
// Buffer is a WriteCloser that provides multiple Readers that each yield the same data.
// It is safe to Write to a Buffer while Readers consume that data.
// Its zero value is a ready-to-use buffer.
type Buffer struct {
mu sync.Mutex // Guards the fields below.
wake *sync.Cond // Created on demand by reader.
buf []byte
eof bool
lastID int
}
// Write appends data to the Buffer.
// It will wake any blocked Readers.
func (b *Buffer) Write(b2 []byte) (int, error) {
b.mu.Lock()
defer b.mu.Unlock()
b2len := len(b2)
if len(b.buf)+b2len > MaxBufferSize {
b2 = b2[:MaxBufferSize-len(b.buf)]
}
b.buf = append(b.buf, b2...)
b.wakeReaders()
return b2len, nil
}
// Close signals EOF to all Readers.
func (b *Buffer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
b.eof = true
b.wakeReaders()
return nil
}
// wakeReaders wakes any sleeping readers.
// b.mu must be held when calling.
func (b *Buffer) wakeReaders() {
if b.wake != nil {
b.wake.Broadcast()
}
}
// Bytes returns a copy of the underlying buffer.
func (b *Buffer) Bytes() []byte {
b.mu.Lock()
defer b.mu.Unlock()
return append([]byte(nil), b.buf...)
}
// String returns a copy of the underlying buffer as a string.
func (b *Buffer) String() string {
b.mu.Lock()
defer b.mu.Unlock()
return string(b.buf)
}
// Reader initializes and returns a ReadCloser that will emit the entire buffer.
// It is safe to call Read and Close concurrently.
func (b *Buffer) Reader() io.ReadCloser {
b.mu.Lock()
defer b.mu.Unlock()
b.lastID++
return &reader{buf: b, id: b.lastID}
}
type reader struct {
buf *Buffer
id int // Read-only.
read int // Bytes read; accessed by only the Read method.
closed bool // Guarded by buf.mu.
}
func (r *reader) Read(b []byte) (int, error) {
r.buf.mu.Lock()
defer r.buf.mu.Unlock()
// Wait for data or writer EOF or reader closed.
for len(r.buf.buf) == r.read && !r.buf.eof && !r.closed {
if r.buf.wake == nil {
r.buf.wake = sync.NewCond(&r.buf.mu)
}
r.buf.wake.Wait()
}
// Return EOF if writer reported EOF or this reader is closed.
if (len(r.buf.buf) == r.read && r.buf.eof) || r.closed {
return 0, io.EOF
}
// Emit some data.
n := copy(b, r.buf.buf[r.read:])
r.read += n
return n, nil
}
func (r *reader) Close() error {
r.buf.mu.Lock()
defer r.buf.mu.Unlock()
r.closed = true
// Wake any sleeping readers to unblock a pending read on this reader.
// (For other open readers this will be a no-op.)
r.buf.wakeReaders()
return nil
} | livelog/livelog.go | 0.643217 | 0.466603 | livelog.go | starcoder |
// Package coordsparser is a library for parsing (geographic) coordinates in various string formats
package coordsparser
import (
"fmt"
"regexp"
"strconv"
)
// Parse parses a coordinate string and returns a lat/lng pair or an error
func Parse(s string) (float64, float64, error) {
lat, lng, err := ParseD(s)
if err == nil {
return lat, lng, nil
}
lat, lng, err = ParseHD(s)
if err == nil {
return lat, lng, nil
}
lat, lng, err = ParseHDM(s)
if err == nil {
return lat, lng, nil
}
lat, lng, err = ParseHDMS(s)
if err == nil {
return lat, lng, nil
}
return 0, 0, fmt.Errorf("Cannot parse coordinates: %s", s)
}
// ParseD parses a coordinate string of the form "D.DDDD D.DDDD" and returns a lat/lng pair or an error
func ParseD(s string) (float64, float64, error) {
re := regexp.MustCompile(`^\s*([+-]?[\d\.]+)\s*(,|;|:|\s)\s*([+-]?[\d\.]+)\s*$`)
matches := re.FindStringSubmatch(s)
if matches == nil {
return 0, 0, fmt.Errorf("Cannot parse 'D' string: %s", s)
}
lat, err := strconv.ParseFloat(matches[1], 64)
if err != nil || lat < -90 || lat > 90 {
return 0, 0, fmt.Errorf("Cannot parse 'D' string: %s", s)
}
lng, err := strconv.ParseFloat(matches[3], 64)
if err != nil || lng < -180 || lng > 180 {
return 0, 0, fmt.Errorf("Cannot parse 'D' string: %s", s)
}
return lat, lng, nil
}
// ParseHD parses a coordinate string of the form "H D.DDDD H D.DDDD" and returns a lat/lng pair or an error
func ParseHD(s string) (float64, float64, error) {
re := regexp.MustCompile(`^\s*([NnSs])\s*([\d\.]+)\s+([EeWw])\s*([\d\.]+)\s*$`)
matches := re.FindStringSubmatch(s)
if matches == nil {
return 0, 0, fmt.Errorf("Cannot parse 'HD' string: %s", s)
}
lat, err := strconv.ParseFloat(matches[2], 64)
if err != nil || lat > 90 {
return 0, 0, fmt.Errorf("Cannot parse 'HD' string: %s", s)
}
if matches[1] == "S" || matches[1] == "s" {
lat = -lat
}
lng, err := strconv.ParseFloat(matches[4], 64)
if err != nil || lng > 180 {
return 0, 0, fmt.Errorf("Cannot parse 'HD' string: %s", s)
}
if matches[3] == "W" || matches[3] == "w" {
lng = -lng
}
return lat, lng, nil
}
// ParseHDM parses a coordinate string of the form "H D M.MMM H D M.MMM" and returns a lat/lng pair or an error
func ParseHDM(s string) (float64, float64, error) {
re := regexp.MustCompile(`^\s*([NnSs])\s*([\d]+)\s+([\d.]+)\s+([EeWw])\s*([\d]+)\s+([\d.]+)\s*$`)
matches := re.FindStringSubmatch(s)
if matches == nil {
return 0, 0, fmt.Errorf("Cannot parse 'HDM' string: %s", s)
}
latDeg, err := strconv.ParseFloat(matches[2], 64)
if err != nil || latDeg > 90 {
return 0, 0, fmt.Errorf("Cannot parse 'HDM' string: %s", s)
}
latMin, err := strconv.ParseFloat(matches[3], 64)
if err != nil || latMin >= 60 {
return 0, 0, fmt.Errorf("Cannot parse 'HDM' string: %s", s)
}
lat := latDeg + latMin/60.0
if matches[1] == "S" || matches[1] == "s" {
lat = -lat
}
lngDeg, err := strconv.ParseFloat(matches[5], 64)
if err != nil || lngDeg > 180 {
return 0, 0, fmt.Errorf("Cannot parse 'HDM' string: %s", s)
}
lngMin, err := strconv.ParseFloat(matches[6], 64)
if err != nil || lngMin >= 60 {
return 0, 0, fmt.Errorf("Cannot parse 'HDM' string: %s", s)
}
lng := lngDeg + lngMin/60.0
if matches[4] == "W" || matches[4] == "w" {
lng = -lng
}
return lat, lng, nil
}
// ParseHDMS parses a coordinate string of the form "H D M S.SSS H D M S.SSS" and returns a lat/lng pair or an error
func ParseHDMS(s string) (float64, float64, error) {
re := regexp.MustCompile(`^\s*([NnSs])\s*([\d]+)\s+([\d]+)\s+([\d.]+)\s+([EeWw])\s*([\d]+)\s+([\d]+)\s+([\d.]+)\s*$`)
matches := re.FindStringSubmatch(s)
if matches == nil {
return 0, 0, fmt.Errorf("Cannot parse 'HDMS' string: %s", s)
}
latDeg, err := strconv.ParseFloat(matches[2], 64)
if err != nil || latDeg > 90 {
return 0, 0, fmt.Errorf("Cannot parse 'HDMS' string: %s", s)
}
latMin, err := strconv.ParseFloat(matches[3], 64)
if err != nil || latMin >= 60 {
return 0, 0, fmt.Errorf("Cannot parse 'HDMS' string: %s", s)
}
latSec, err := strconv.ParseFloat(matches[4], 64)
if err != nil || latSec >= 60 {
return 0, 0, fmt.Errorf("Cannot parse 'HDMS' string: %s", s)
}
lat := latDeg + latMin/60.0 + latSec/3600.0
if matches[1] == "S" || matches[1] == "s" {
lat = -lat
}
lngDeg, err := strconv.ParseFloat(matches[6], 64)
if err != nil || lngDeg > 180 {
return 0, 0, fmt.Errorf("Cannot parse 'HDMS' string: %s", s)
}
lngMin, err := strconv.ParseFloat(matches[7], 64)
if err != nil || lngMin >= 60 {
return 0, 0, fmt.Errorf("Cannot parse 'HDMS' string: %s", s)
}
lngSec, err := strconv.ParseFloat(matches[8], 64)
if err != nil || lngSec >= 60 {
return 0, 0, fmt.Errorf("Cannot parse 'HDMS' string: %s", s)
}
lng := lngDeg + lngMin/60.0 + lngSec/3600.0
if matches[5] == "W" || matches[5] == "w" {
lng = -lng
}
return lat, lng, nil
} | vendor/github.com/flopp/go-coordsparser/coordsparser.go | 0.823648 | 0.519034 | coordsparser.go | starcoder |
package test_version1
import (
"testing"
"github.com/pip-services-users/pip-clients-roles-go/version1"
"github.com/pip-services3-go/pip-services3-commons-go/data"
"github.com/stretchr/testify/assert"
)
type RolesClientFixtureV1 struct {
Client version1.IRolesClientV1
}
var ROLES = []string{"Role 1", "Role 2", "Role 3"}
func NewRolesClientFixtureV1(client version1.IRolesClientV1) *RolesClientFixtureV1 {
return &RolesClientFixtureV1{
Client: client,
}
}
func (c *RolesClientFixtureV1) clear() {
page, _ := c.Client.GetRolesByFilter("", nil, nil)
if page != nil {
for _, v := range page.Data {
roles := v.(*version1.UserRolesV1)
c.Client.RevokeRoles("", roles.Id, roles.Roles)
}
}
}
func (c *RolesClientFixtureV1) TestGetAndSetRoles(t *testing.T) {
c.clear()
defer c.clear()
// Update party roles
roles, err := c.Client.SetRoles("", "1", ROLES)
assert.Nil(t, err)
assert.True(t, len(roles) == 3)
// Read and check party roles
roles, err = c.Client.GetRolesById("", "1")
assert.Nil(t, err)
assert.True(t, len(roles) == 3)
// Get roles by filter
page, err1 := c.Client.GetRolesByFilter("", data.NewFilterParamsFromTuples("roles", ROLES), nil)
assert.Nil(t, err1)
assert.NotNil(t, page)
assert.True(t, len(page.Data) >= 1)
}
func (c *RolesClientFixtureV1) TestGrantAndRevokeRoles(t *testing.T) {
c.clear()
defer c.clear()
// Grant roles first time
roles, err := c.Client.GrantRoles("", "1", []string{"Role 1"})
assert.Nil(t, err)
assert.Len(t, roles, 1)
assert.Contains(t, roles, "Role 1")
// Grant roles second time
roles, err = c.Client.GrantRoles("", "1", []string{"Role 1", "Role 2", "Role 3"})
assert.Nil(t, err)
assert.Len(t, roles, 3)
assert.Contains(t, roles, "Role 1")
assert.Contains(t, roles, "Role 2")
assert.Contains(t, roles, "Role 3")
// Revoke roles first time
roles, err = c.Client.RevokeRoles("", "1", []string{"Role 1"})
assert.Nil(t, err)
assert.Len(t, roles, 2)
assert.Contains(t, roles, "Role 2")
assert.Contains(t, roles, "Role 3")
// Get roles
roles, err = c.Client.GetRolesById("", "1")
assert.Nil(t, err)
assert.True(t, len(roles) == 2)
assert.Contains(t, roles, "Role 2")
assert.Contains(t, roles, "Role 3")
// Revoke roles second time
roles, err = c.Client.RevokeRoles("", "1", []string{"Role 1", "Role 2"})
assert.Nil(t, err)
assert.Len(t, roles, 1)
assert.Contains(t, roles, "Role 3")
}
func (c *RolesClientFixtureV1) TestAuthorize(t *testing.T) {
c.clear()
defer c.clear()
// Grant roles
roles, err := c.Client.GrantRoles("", "1", []string{"Role 1", "Role 2"})
assert.Nil(t, err)
assert.Len(t, roles, 2)
// Authorize positively
auth, err1 := c.Client.Authorize("", "1", []string{"Role 1"})
assert.Nil(t, err1)
assert.True(t, auth)
// Authorize negatively
auth, err1 = c.Client.Authorize("", "1", []string{"Role 2", "Role 3"})
assert.Nil(t, err1)
assert.False(t, auth)
} | test/version1/RolesClientFixtureV1.go | 0.668772 | 0.476092 | RolesClientFixtureV1.go | starcoder |
// Package spreadsheet provides simple a interface to read spreadsheet files
// including XLSX and CSV.
package spreadsheet
import (
"fmt"
"path/filepath"
"strconv"
"strings"
"github.com/pkg/errors"
)
// Open opens a spreadsheet file.
// filePath is the path to the spreadsheet file to be opened. Supported file
// types are XLSX and CSV (as described in RFC 4180).
// A Spreadsheet pointer will be returned if successful. Otherwise, a nil
// pointer will be returned with an error indicating what is wrong.
func Open(filePath string) (*Spreadsheet, error) {
ext := filepath.Ext(filePath)
switch strings.ToLower(ext[1:]) {
case "csv":
return openCSVFile(filePath)
case "xlsx":
return openXLSXFile(filePath)
default:
return nil, fmt.Errorf("input file is not a valid spreadsheet file")
}
}
// Spreadsheet is a high-level interface for spreadsheet files including XLSX,
// XLS and CSV. A Spreadsheet contains one or more sheets of data.
type Spreadsheet struct {
Sheets []*Sheet // For CSV there's only 1 sheet, named "Sheet 1".
SheetsByName map[string]*Sheet
}
func (ss *Spreadsheet) sheetsByName() map[string]*Sheet {
m := make(map[string]*Sheet)
for _, sheet := range ss.Sheets {
m[sheet.Name] = sheet
}
return m
}
// Sheet is a single page in a spreadsheet.
type Sheet struct {
Name string
Rows []*Row
}
// Cell returns the j-th cell in the i-th row of sheet s.
func (s *Sheet) Cell(i int, j int) *Cell {
return s.Rows[i].Cells[j]
}
// Row is a single row of data in a spreadsheet, containing multiple cells.
type Row struct {
Cells []*Cell
}
// Cell holds a single piece of data in a spreadsheet.
type Cell struct {
dataType CellDataType
data string
}
// CellDataType represents the primitive data type in spreadsheet cells.
type CellDataType uint8
// CellDataType constants.
const (
String CellDataType = iota
Numeric
Bool
)
// Name returns name of cell data type t.
func (t CellDataType) Name() string {
switch t {
case String:
return "String"
case Numeric:
return "Numeric"
case Bool:
return "Bool"
default:
return "Unknown"
}
}
// Type returns the data type of cell c.
func (c *Cell) Type() CellDataType {
return c.dataType
}
// Is returns true if data type of cell c is the same with the given type t.
func (c *Cell) Is(t CellDataType) bool {
return c.dataType == t
}
// String returns the cell data as a string.
func (c *Cell) String() string {
return c.data
}
// Float returns the cell data as a float.
// If the cell data is not a numeric value, an error is returned.
func (c *Cell) Float() (float64, error) {
if !c.Is(Numeric) {
return 0, fmt.Errorf("cell data is not numeric")
}
f, err := strconv.ParseFloat(c.data, 64)
if err != nil {
return 0, errors.Wrap(err, "failed to convert cell data to number")
}
return f, nil
}
// Int returns the cell data as an int.
// If the cell data is not a numeric value, an error is returned.
func (c *Cell) Int() (int, error) {
if !c.Is(Numeric) {
return 0, fmt.Errorf("cell data is not numeric")
}
f, err := strconv.ParseFloat(c.data, 64)
if err != nil {
return 0, errors.Wrap(err, "failed to convert cell data to number")
}
return int(f), nil
}
// Int64 returns the cell data as an int64.
// If the cell data is not a numeric value, an error is returned.
func (c *Cell) Int64() (int64, error) {
if !c.Is(Numeric) {
return 0, fmt.Errorf("cell data is not numeric")
}
i, err := strconv.ParseInt(c.data, 10, 64)
if err != nil {
return 0, errors.Wrap(err, "failed to convert cell data to number")
}
return i, nil
}
// Bool returns the cell data as a bool.
// If the cell data is not a bool
func (c *Cell) Bool() (bool, error) {
if !c.Is(Bool) {
return false, fmt.Errorf("cell data is not bool")
}
b, err := strconv.ParseBool(c.data)
if err != nil {
return false, errors.Wrap(err, "failed to convert cell data to bool")
}
return b, nil
} | spreadsheet.go | 0.786418 | 0.478102 | spreadsheet.go | starcoder |
package cloudsmith_api
type EntitlementsPartialUpdate struct {
// If enabled, the token will allow downloads based on configured restrictions (if any).
IsActive bool `json:"is_active,omitempty"`
// The maximum download bandwidth allowed for the token. Values are expressed as the selected unit of bandwidth. Please note that since downloads are calculated asynchronously (after the download happens), the limit may not be imposed immediately but at a later point.
LimitBandwidth int32 `json:"limit_bandwidth,omitempty"`
// None
LimitBandwidthUnit string `json:"limit_bandwidth_unit,omitempty"`
// The starting date/time the token is allowed to be used from.
LimitDateRangeFrom string `json:"limit_date_range_from,omitempty"`
// The ending date/time the token is allowed to be used until.
LimitDateRangeTo string `json:"limit_date_range_to,omitempty"`
// The maximum number of unique clients allowed for the token. Please note that since clients are calculated asynchronously (after the download happens), the limit may not be imposed immediately but at a later point.
LimitNumClients int32 `json:"limit_num_clients,omitempty"`
// The maximum number of downloads allowed for the token. Please note that since downloads are calculated asynchronously (after the download happens), the limit may not be imposed immediately but at a later point.
LimitNumDownloads int32 `json:"limit_num_downloads,omitempty"`
// The package-based search query to apply to restrict downloads to. This uses the same syntax as the standard search used for repositories, and also supports boolean logic operators such as OR/AND/NOT and parentheses for grouping. This will still allow access to non-package files, such as metadata.
LimitPackageQuery string `json:"limit_package_query,omitempty"`
// The path-based search query to apply to restrict downloads to. This supports boolean logic operators such as OR/AND/NOT and parentheses for grouping. The path evaluated does not include the domain name, the namespace, the entitlement code used, the package format, etc. and it always starts with a forward slash.
LimitPathQuery string `json:"limit_path_query,omitempty"`
// None
Metadata interface{} `json:"metadata,omitempty"`
// None
Name string `json:"name,omitempty"`
// The time at which the scheduled reset period has elapsed and the token limits were automatically reset to zero.
ScheduledResetAt string `json:"scheduled_reset_at,omitempty"`
// None
ScheduledResetPeriod string `json:"scheduled_reset_period,omitempty"`
// None
Token string `json:"token,omitempty"`
} | bindings/go/src/entitlements_partial_update.go | 0.787319 | 0.400251 | entitlements_partial_update.go | starcoder |
package main
//https://projecteuler.net/problem=9
import (
"fmt"
"math"
"flag"
)
func BinetFormula(n float64) float64 {
sqrt5 := math.Sqrt(5)
phi := (1 + sqrt5) / 2
ans := math.Round(math.Pow(phi, n) / sqrt5)
fmt.Printf("Value at n: %f in fibonnaci sequence is: %f\n", n, ans)
return ans
}
func getTriple(n float64) [3]float64{
firstTriple := BinetFormula(n) * BinetFormula(n+3)
secondTriple := BinetFormula(n+1) * BinetFormula(n+2) * 2
thirdTriple := math.Pow(BinetFormula(n+1), 2) + math.Pow(BinetFormula(n+2), 2)
return [...]float64{firstTriple, secondTriple, thirdTriple}
}
func validateConditionals(maxSum float64, firstTriple float64, secondTriple float64, thirdTriple float64) bool{
return math.Pow(firstTriple, 2) + math.Pow(secondTriple, 2) == math.Pow(thirdTriple, 2) && firstTriple + secondTriple + thirdTriple == maxSum
}
func getPythagoreanTriple(maxSum float64) [3]float64 {
var n float64 = 0
for n < math.Sqrt(maxSum){
triples := getTriple(n)
firstTriple := triples[0]
secondTriple := triples[1]
thirdTriple := triples[2]
if validateConditionals(maxSum, firstTriple, secondTriple, thirdTriple) {
return getTriple(n)
}
n++
}
return [...]float64{0, 0, 0}
}
func getPythagoreanTripleSquared(maxSum float64) [3]float64 {
var n float64 = 0
for n < math.Sqrt(maxSum){
firstTriple := 2 * n
secondTriple := math.Pow(n, 2) - 1
thirdTriple := math.Pow(n, 2) + 1
if validateConditionals(maxSum, firstTriple, secondTriple, thirdTriple) {
return [...]float64 {firstTriple, secondTriple, thirdTriple}
}
n++
}
return [...]float64{0, 0, 0}
}
func getPythagoreanTripleBruteForce(n float64) [3]float64{
var i float64 = 1
var j float64 = 0
var k float64 = 0
for i < n {
j = 0
for j < n {
k = n - i - j
// fmt.Println("Using the following values: ", i, j, k, n)
if validateConditionals(n, i, j, k) {
return [...]float64 {i, j, k}
}
j++
}
i++
}
return [...]float64{0, 0, 0}
}
func main(){
maxNumPtr := flag.Float64("number", 80, "Integer for the max range")
flag.Parse()
solution := getPythagoreanTripleBruteForce(*maxNumPtr)
fmt.Println("Product array is ", solution)
fmt.Println("Answer is: ", int(solution[0] * solution[1] * solution[2]))
} | Problem_9_Euler/main.go | 0.711631 | 0.471771 | main.go | starcoder |
package main
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
"math"
"math/big"
"time"
)
const Difficulty = 16 // Static difficulty for PoW calculation
var Empty [sha256.Size]byte // All zero sha256 value
// encodeUint64 encodes a uint64 to big endian notation. This code uses big
// endian in order to make the resulting values more readable for humans.
func encodeUint64(x uint64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, x)
return b
}
// Block represents a single block in the blockchain. It is linked to the prior
// block via the PreviousBlockHash.
type Block struct {
Timestamp int64 // Timestamp block was mined
Data []byte // Blockchain data
PreviousBlockHash []byte // Previous block hash in order link blocks
Hash []byte // PoW hash of this block
Nonce uint64 // Nonce used to calculate Hash
}
// NewBlock returns a block that is linked to previousBlockHash.
func NewBlock(data, previousBlockHash []byte) Block {
timestamp := time.Now().Unix()
return Block{
Timestamp: timestamp,
Data: data,
PreviousBlockHash: previousBlockHash,
}
}
// Verify ensures that the block is valid by hashing timestamp, data and nonce.
func (b Block) Verify() bool {
t := encodeUint64(uint64(b.Timestamp))
n := encodeUint64(b.Nonce)
hash := sha256.Sum256(bytes.Join([][]byte{t, b.Data,
b.PreviousBlockHash, n}, []byte{}))
return bytes.Equal(hash[:], b.Hash)
}
// Mine attempts to mine the block within the provided range.
func (b *Block) Mine(difficulty uint, start, end uint64) error {
target := big.NewInt(1)
target.Lsh(target, uint(256-difficulty))
t := encodeUint64(uint64(b.Timestamp))
n := make([]byte, 8)
bi := big.Int{}
for i := start; i < end; i++ {
binary.BigEndian.PutUint64(n, i)
hash := sha256.Sum256(bytes.Join([][]byte{t, b.Data,
b.PreviousBlockHash, n}, []byte{}))
bi.SetBytes(hash[:])
if bi.Cmp(target) == -1 {
b.Hash = hash[:]
b.Nonce = i
return nil
}
}
return fmt.Errorf("no solution for block")
}
// Blockchain is the blockchain context that houses an array of blocks.
type Blockchain struct {
blocks []*Block
}
// Append adds a block, if valid, to the end of the blockchain.
func (b *Blockchain) Append(blk *Block) error {
var previousBlockHash []byte
if len(b.blocks) == 0 {
// Genesis
previousBlockHash = Empty[:]
} else {
previousBlockHash = b.blocks[len(b.blocks)-1].Hash
}
if !bytes.Equal(previousBlockHash, blk.PreviousBlockHash) {
return fmt.Errorf("block does not link to previous block %x %x",
previousBlockHash, blk.PreviousBlockHash)
}
if !blk.Verify() {
return fmt.Errorf("can't append invalid block")
}
b.blocks = append(b.blocks, blk)
return nil
}
// PrepareBlock returns a block template based on the current height of the
// blockchain.
func (b *Blockchain) PrepareBlock(data []byte) *Block {
var previousBlockHash []byte
if len(b.blocks) == 0 {
// Genesis
previousBlockHash = Empty[:]
} else {
previousBlockHash = b.blocks[len(b.blocks)-1].Hash
}
blk := NewBlock(data, previousBlockHash)
return &blk
}
// Block returns a copy of the block at the specified block height.
func (b Blockchain) Block(block int) (Block, error) {
if block > len(b.blocks) {
return Block{}, fmt.Errorf("invalid block: %v", block)
}
return *b.blocks[block], nil
}
// Len returns the current blockchain height.
func (b Blockchain) Len() int {
return len(b.blocks)
}
// NewBlockChain returns a blockchain context that has a genesis block.
func NewBlockChain(data []byte) (*Blockchain, error) {
b := &Blockchain{}
blk := b.PrepareBlock(data)
err := blk.Mine(Difficulty, 0, math.MaxUint64)
if err != nil {
return nil, err
}
err = b.Append(blk)
if err != nil {
return nil, err
}
return b, nil
} | 1_1_pow/blockchain.go | 0.756807 | 0.512876 | blockchain.go | starcoder |
package iso20022
// Chain of parties involved in the settlement of a transaction, including receipts and deliveries, book transfers, treasury deals, or other activities, resulting in the movement of a security or amount of money from one account to another.
type SettlementParties36 struct {
// First party in the settlement chain. In a plain vanilla settlement, it is the Central Securities Depository where the counterparty requests to receive the financial instrument or from where the counterparty delivers the financial instruments.
Depository *PartyIdentification75 `xml:"Dpstry,omitempty"`
// Party that, in a settlement chain interacts with the depository.
Party1 *PartyIdentificationAndAccount106 `xml:"Pty1,omitempty"`
// Party that, in a settlement chain interacts with the party 1.
Party2 *PartyIdentificationAndAccount106 `xml:"Pty2,omitempty"`
// Party that, in a settlement chain interacts with the party 2.
Party3 *PartyIdentificationAndAccount106 `xml:"Pty3,omitempty"`
// Party that, in a settlement chain interacts with the party 3.
Party4 *PartyIdentificationAndAccount106 `xml:"Pty4,omitempty"`
// Party that, in a settlement chain interacts with the party 4.
Party5 *PartyIdentificationAndAccount106 `xml:"Pty5,omitempty"`
}
func (s *SettlementParties36) AddDepository() *PartyIdentification75 {
s.Depository = new(PartyIdentification75)
return s.Depository
}
func (s *SettlementParties36) AddParty1() *PartyIdentificationAndAccount106 {
s.Party1 = new(PartyIdentificationAndAccount106)
return s.Party1
}
func (s *SettlementParties36) AddParty2() *PartyIdentificationAndAccount106 {
s.Party2 = new(PartyIdentificationAndAccount106)
return s.Party2
}
func (s *SettlementParties36) AddParty3() *PartyIdentificationAndAccount106 {
s.Party3 = new(PartyIdentificationAndAccount106)
return s.Party3
}
func (s *SettlementParties36) AddParty4() *PartyIdentificationAndAccount106 {
s.Party4 = new(PartyIdentificationAndAccount106)
return s.Party4
}
func (s *SettlementParties36) AddParty5() *PartyIdentificationAndAccount106 {
s.Party5 = new(PartyIdentificationAndAccount106)
return s.Party5
} | SettlementParties36.go | 0.680666 | 0.490602 | SettlementParties36.go | starcoder |
package funk
import (
"reflect"
)
// Intersect returns the intersection between two collections.
func Intersect(x interface{}, y interface{}) interface{} {
if !IsCollection(x) {
panic("First parameter must be a collection")
}
if !IsCollection(y) {
panic("Second parameter must be a collection")
}
hash := map[interface{}]struct{}{}
xValue := reflect.ValueOf(x)
xType := xValue.Type()
yValue := reflect.ValueOf(y)
yType := yValue.Type()
if NotEqual(xType, yType) {
panic("Parameters must have the same type")
}
zType := reflect.SliceOf(xType.Elem())
zSlice := reflect.MakeSlice(zType, 0, 0)
for i := 0; i < xValue.Len(); i++ {
v := xValue.Index(i).Interface()
hash[v] = struct{}{}
}
for i := 0; i < yValue.Len(); i++ {
v := yValue.Index(i).Interface()
_, ok := hash[v]
if ok {
zSlice = reflect.Append(zSlice, yValue.Index(i))
}
}
return zSlice.Interface()
}
// IntersectString returns the intersection between two collections of string.
func IntersectString(x []string, y []string) []string {
if len(x) == 0 || len(y) == 0 {
return []string{}
}
set := []string{}
hash := map[string]struct{}{}
for _, v := range x {
hash[v] = struct{}{}
}
for _, v := range y {
_, ok := hash[v]
if ok {
set = append(set, v)
}
}
return set
}
// Difference returns the difference between two collections.
func Difference(x interface{}, y interface{}) (interface{}, interface{}) {
if !IsCollection(x) {
panic("First parameter must be a collection")
}
if !IsCollection(y) {
panic("Second parameter must be a collection")
}
xValue := reflect.ValueOf(x)
xType := xValue.Type()
yValue := reflect.ValueOf(y)
yType := yValue.Type()
if NotEqual(xType, yType) {
panic("Parameters must have the same type")
}
leftType := reflect.SliceOf(xType.Elem())
leftSlice := reflect.MakeSlice(leftType, 0, 0)
rightType := reflect.SliceOf(yType.Elem())
rightSlice := reflect.MakeSlice(rightType, 0, 0)
for i := 0; i < xValue.Len(); i++ {
v := xValue.Index(i).Interface()
if Contains(y, v) == false {
leftSlice = reflect.Append(leftSlice, xValue.Index(i))
}
}
for i := 0; i < yValue.Len(); i++ {
v := yValue.Index(i).Interface()
if Contains(x, v) == false {
rightSlice = reflect.Append(rightSlice, yValue.Index(i))
}
}
return leftSlice.Interface(), rightSlice.Interface()
}
// DifferenceString returns the difference between two collections of strings.
func DifferenceString(x []string, y []string) ([]string, []string) {
leftSlice := []string{}
rightSlice := []string{}
for _, v := range x {
if ContainsString(y, v) == false {
leftSlice = append(leftSlice, v)
}
}
for _, v := range y {
if ContainsString(x, v) == false {
rightSlice = append(rightSlice, v)
}
}
return leftSlice, rightSlice
} | vendor/github.com/thoas/go-funk/intersection.go | 0.801548 | 0.491578 | intersection.go | starcoder |
package models
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// SignInActivity
type SignInActivity struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The last non-interactive sign-in date for a specific user. You can use this field to calculate the last time a client signed in to the directory on behalf of a user. Because some users may use clients to access tenant resources rather than signing into your tenant directly, you can use the non-interactive sign-in date to along with lastSignInDateTime to identify inactive users. The timestamp represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is: '2014-01-01T00:00:00Z'. Azure AD maintains non-interactive sign-ins going back to May 2020. For more information about using the value of this property, see Manage inactive user accounts in Azure AD.
lastNonInteractiveSignInDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Request identifier of the last non-interactive sign-in performed by this user.
lastNonInteractiveSignInRequestId *string
// The last interactive sign-in date and time for a specific user. You can use this field to calculate the last time a user signed in to the directory with an interactive authentication method. This field can be used to build reports, such as inactive users. The timestamp represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is: '2014-01-01T00:00:00Z'. Azure AD maintains interactive sign-ins going back to April 2020. For more information about using the value of this property, see Manage inactive user accounts in Azure AD.
lastSignInDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Request identifier of the last interactive sign-in performed by this user.
lastSignInRequestId *string
}
// NewSignInActivity instantiates a new signInActivity and sets the default values.
func NewSignInActivity()(*SignInActivity) {
m := &SignInActivity{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateSignInActivityFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateSignInActivityFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewSignInActivity(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *SignInActivity) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *SignInActivity) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["lastNonInteractiveSignInDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetLastNonInteractiveSignInDateTime(val)
}
return nil
}
res["lastNonInteractiveSignInRequestId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetLastNonInteractiveSignInRequestId(val)
}
return nil
}
res["lastSignInDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetLastSignInDateTime(val)
}
return nil
}
res["lastSignInRequestId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetLastSignInRequestId(val)
}
return nil
}
return res
}
// GetLastNonInteractiveSignInDateTime gets the lastNonInteractiveSignInDateTime property value. The last non-interactive sign-in date for a specific user. You can use this field to calculate the last time a client signed in to the directory on behalf of a user. Because some users may use clients to access tenant resources rather than signing into your tenant directly, you can use the non-interactive sign-in date to along with lastSignInDateTime to identify inactive users. The timestamp represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is: '2014-01-01T00:00:00Z'. Azure AD maintains non-interactive sign-ins going back to May 2020. For more information about using the value of this property, see Manage inactive user accounts in Azure AD.
func (m *SignInActivity) GetLastNonInteractiveSignInDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.lastNonInteractiveSignInDateTime
}
}
// GetLastNonInteractiveSignInRequestId gets the lastNonInteractiveSignInRequestId property value. Request identifier of the last non-interactive sign-in performed by this user.
func (m *SignInActivity) GetLastNonInteractiveSignInRequestId()(*string) {
if m == nil {
return nil
} else {
return m.lastNonInteractiveSignInRequestId
}
}
// GetLastSignInDateTime gets the lastSignInDateTime property value. The last interactive sign-in date and time for a specific user. You can use this field to calculate the last time a user signed in to the directory with an interactive authentication method. This field can be used to build reports, such as inactive users. The timestamp represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is: '2014-01-01T00:00:00Z'. Azure AD maintains interactive sign-ins going back to April 2020. For more information about using the value of this property, see Manage inactive user accounts in Azure AD.
func (m *SignInActivity) GetLastSignInDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.lastSignInDateTime
}
}
// GetLastSignInRequestId gets the lastSignInRequestId property value. Request identifier of the last interactive sign-in performed by this user.
func (m *SignInActivity) GetLastSignInRequestId()(*string) {
if m == nil {
return nil
} else {
return m.lastSignInRequestId
}
}
// Serialize serializes information the current object
func (m *SignInActivity) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteTimeValue("lastNonInteractiveSignInDateTime", m.GetLastNonInteractiveSignInDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("lastNonInteractiveSignInRequestId", m.GetLastNonInteractiveSignInRequestId())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("lastSignInDateTime", m.GetLastSignInDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("lastSignInRequestId", m.GetLastSignInRequestId())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *SignInActivity) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetLastNonInteractiveSignInDateTime sets the lastNonInteractiveSignInDateTime property value. The last non-interactive sign-in date for a specific user. You can use this field to calculate the last time a client signed in to the directory on behalf of a user. Because some users may use clients to access tenant resources rather than signing into your tenant directly, you can use the non-interactive sign-in date to along with lastSignInDateTime to identify inactive users. The timestamp represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is: '2014-01-01T00:00:00Z'. Azure AD maintains non-interactive sign-ins going back to May 2020. For more information about using the value of this property, see Manage inactive user accounts in Azure AD.
func (m *SignInActivity) SetLastNonInteractiveSignInDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.lastNonInteractiveSignInDateTime = value
}
}
// SetLastNonInteractiveSignInRequestId sets the lastNonInteractiveSignInRequestId property value. Request identifier of the last non-interactive sign-in performed by this user.
func (m *SignInActivity) SetLastNonInteractiveSignInRequestId(value *string)() {
if m != nil {
m.lastNonInteractiveSignInRequestId = value
}
}
// SetLastSignInDateTime sets the lastSignInDateTime property value. The last interactive sign-in date and time for a specific user. You can use this field to calculate the last time a user signed in to the directory with an interactive authentication method. This field can be used to build reports, such as inactive users. The timestamp represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is: '2014-01-01T00:00:00Z'. Azure AD maintains interactive sign-ins going back to April 2020. For more information about using the value of this property, see Manage inactive user accounts in Azure AD.
func (m *SignInActivity) SetLastSignInDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.lastSignInDateTime = value
}
}
// SetLastSignInRequestId sets the lastSignInRequestId property value. Request identifier of the last interactive sign-in performed by this user.
func (m *SignInActivity) SetLastSignInRequestId(value *string)() {
if m != nil {
m.lastSignInRequestId = value
}
} | models/sign_in_activity.go | 0.793626 | 0.418459 | sign_in_activity.go | starcoder |
package year2018day06
import (
"strings"
"github.com/alokmenghrajani/adventofcode2021/utils"
"github.com/alokmenghrajani/adventofcode2021/utils/grids"
)
func Part1(input string) int {
grid := grids.NewGrid(-1)
nodesX := []int{}
nodesY := []int{}
lines := strings.Split(input, "\n")
for n, line := range lines {
pieces := strings.Split(line, ", ")
x := utils.MustAtoi(pieces[0])
nodesX = append(nodesX, x)
y := utils.MustAtoi(pieces[1])
nodesY = append(nodesY, y)
grid.Set(x, y, n)
}
// fill the grid
minX, maxX := grid.SizeX()
minY, maxY := grid.SizeY()
for x := minX - 1; x <= maxX+1; x++ {
for y := minY - 1; y <= maxY+1; y++ {
t := find(x, y, nodesX, nodesY)
grid.Set(x, y, t)
}
}
// exclude all the borders
excluded := make([]bool, len(nodesX))
for x := minX - 1; x <= maxX+1; x++ {
t := grid.Get(x, minY-1).(int)
if t != -1 {
excluded[t] = true
}
t = grid.Get(x, maxY+1).(int)
if t != -1 {
excluded[t] = true
}
}
for y := minY - 1; y <= maxY+1; y++ {
t := grid.Get(minX-1, y).(int)
if t != -1 {
excluded[t] = true
}
t = grid.Get(maxX+1, y).(int)
if t != -1 {
excluded[t] = true
}
}
// find the max
max := 0
for i := 0; i < len(excluded); i++ {
if excluded[i] {
continue
}
count := 0
for x := minX - 1; x <= maxX+1; x++ {
for y := minY - 1; y <= maxY+1; y++ {
t := grid.Get(x, y).(int)
if t == i {
count++
}
}
}
if count > max {
max = count
}
}
return max
}
func find(x, y int, nodesX, nodesY []int) int {
bestDistance := -1
bestN := 0
bestNode := -1
for i := 0; i < len(nodesX); i++ {
d := utils.Abs(x-nodesX[i]) + utils.Abs(y-nodesY[i])
if d < bestDistance || bestDistance == -1 {
bestDistance = d
bestN = 1
bestNode = i
} else if d == bestDistance {
bestN++
}
}
if bestN > 1 {
return -1
}
return bestNode
}
func Part2(input string) int {
grid := grids.NewGrid(false)
nodesX := []int{}
nodesY := []int{}
lines := strings.Split(input, "\n")
for n, line := range lines {
pieces := strings.Split(line, ", ")
x := utils.MustAtoi(pieces[0])
nodesX = append(nodesX, x)
y := utils.MustAtoi(pieces[1])
nodesY = append(nodesY, y)
grid.Set(x, y, n)
}
// count
r := 0
minX, maxX := grid.SizeX()
minY, maxY := grid.SizeY()
for x := minX - 10000; x <= maxX+10000; x++ {
for y := minY - 10000; y <= maxY+10000; y++ {
t := sumDistances(x, y, nodesX, nodesY)
if t < 10000 {
r++
}
}
}
return r
}
func sumDistances(x, y int, nodesX, nodesY []int) int {
r := 0
for i := 0; i < len(nodesX); i++ {
r += utils.Abs(x-nodesX[i]) + utils.Abs(y-nodesY[i])
}
return r
} | 2018/year2018day06/day06.go | 0.554229 | 0.473049 | day06.go | starcoder |
package golden
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
)
// G is the object that assertions are made on.
type G struct {
t testing.TB
fs afero.Fs
ShouldUpdate bool
FixtureDir string
FixturePrefix string
FixtureSuffix string
}
// New creates a new, ready to use G.
func New(t testing.TB) G {
return newG(t)
}
func newG(t testing.TB) G {
return G{
t: t,
fs: afero.NewOsFs(),
FixtureDir: "testdata",
FixturePrefix: "",
FixtureSuffix: ".golden",
}
}
// Assert asserts, that the given byte slice has the same content as a file,
// whose path is derived from the given name (usually
// "testdata/"+name+".golden").
func (g G) Assert(name string, got []byte) {
if g.ShouldUpdate {
err := g.write(name, got)
assert.NoError(g.t, err)
} else {
err := g.readAndCompare(name, got)
assert.NoError(g.t, err)
}
}
// AssertStruct asserts, that the given struct has the same content as a file,
// whose path is derived from the given name (usually
// "testdata/"+name+".golden"). This is ensured by gob-encoding the given struct
// and comparing it against the file's contents.
func (g G) AssertStruct(name string, got interface{}) {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
if err := enc.Encode(got); err != nil {
g.t.Errorf("unable to encode instance of %T: %v", got, err)
} else {
g.Assert(name, buf.Bytes())
}
}
func (g G) write(name string, data []byte) error {
path := g.computeFilePath(name)
return g.writeFile(path, data)
}
func (g G) readAndCompare(name string, got []byte) error {
path := g.computeFilePath(name)
want, err := g.readFile(path)
if err != nil {
return fmt.Errorf("read file: %w", err)
}
assert.Equal(g.t, want, got)
return nil
}
func (g G) readFile(path string) ([]byte, error) {
file, err := g.fs.Open(path)
if err != nil {
return nil, fmt.Errorf("open: %w", err)
}
defer func() { _ = file.Close() }()
data, err := ioutil.ReadAll(file)
if err != nil {
return nil, fmt.Errorf("read all: %w", err)
}
return data, nil
}
func (g G) writeFile(path string, data []byte) error {
err := g.fs.MkdirAll(filepath.Dir(path), 0755)
if err != nil {
return fmt.Errorf("mkdir all: %w", err)
}
file, err := g.fs.Create(path)
if err != nil {
return fmt.Errorf("create: %w", err)
}
defer func() { _ = file.Close() }()
n, err := file.Write(data)
if err != nil {
return fmt.Errorf("write: %w", err)
}
if n != len(data) {
return fmt.Errorf("could only write %d of %d bytes", n, len(data))
}
return nil
}
func (g G) computeFilePath(name string) string {
return filepath.Join(g.FixtureDir, g.FixturePrefix+name+g.FixtureSuffix)
} | golden.go | 0.644113 | 0.425187 | golden.go | starcoder |
package parser
import (
"bytes"
"path"
"path/filepath"
)
// isInclude parses {{...}}[...], that contains a path between the {{, the [...] syntax contains
// an address to select which lines to include. It is treated as an opaque string and just given
// to readInclude.
func (p *Parser) isInclude(data []byte) (filename string, address []byte, consumed int) {
i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces
if len(data[i:]) < 3 {
return "", nil, 0
}
if data[i] != '{' || data[i+1] != '{' {
return "", nil, 0
}
start := i + 2
// find the end delimiter
i = skipUntilChar(data, i, '}')
if i+1 >= len(data) {
return "", nil, 0
}
end := i
i++
if data[i] != '}' {
return "", nil, 0
}
filename = string(data[start:end])
if i+1 < len(data) && data[i+1] == '[' { // potential address specification
start := i + 2
end = skipUntilChar(data, start, ']')
if end >= len(data) {
return "", nil, 0
}
address = data[start:end]
return filename, address, end + 1
}
return filename, address, i + 1
}
func (p *Parser) readInclude(from, file string, address []byte) []byte {
if p.Opts.ReadIncludeFn != nil {
return p.Opts.ReadIncludeFn(from, file, address)
}
return nil
}
// isCodeInclude parses <{{...}} which is similar to isInclude the returned bytes are, however wrapped in a code block.
func (p *Parser) isCodeInclude(data []byte) (filename string, address []byte, consumed int) {
i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces
if len(data[i:]) < 3 {
return "", nil, 0
}
if data[i] != '<' {
return "", nil, 0
}
start := i
filename, address, consumed = p.isInclude(data[i+1:])
if consumed == 0 {
return "", nil, 0
}
return filename, address, start + consumed + 1
}
// readCodeInclude acts like include except the returned bytes are wrapped in a fenced code block.
func (p *Parser) readCodeInclude(from, file string, address []byte) []byte {
data := p.readInclude(from, file, address)
if data == nil {
return nil
}
ext := path.Ext(file)
buf := &bytes.Buffer{}
buf.Write([]byte("```"))
if ext != "" { // starts with a dot
buf.WriteString(" " + ext[1:] + "\n")
} else {
buf.WriteByte('\n')
}
buf.Write(data)
buf.WriteString("```\n")
return buf.Bytes()
}
// incStack hold the current stack of chained includes. Each value is the containing
// path of the file being parsed.
type incStack struct {
stack []string
}
func newIncStack() *incStack {
return &incStack{stack: []string{}}
}
// Push updates i with new.
func (i *incStack) Push(new string) {
if path.IsAbs(new) {
i.stack = append(i.stack, path.Dir(new))
return
}
last := ""
if len(i.stack) > 0 {
last = i.stack[len(i.stack)-1]
}
i.stack = append(i.stack, path.Dir(filepath.Join(last, new)))
}
// Pop pops the last value.
func (i *incStack) Pop() {
if len(i.stack) == 0 {
return
}
i.stack = i.stack[:len(i.stack)-1]
}
func (i *incStack) Last() string {
if len(i.stack) == 0 {
return ""
}
return i.stack[len(i.stack)-1]
} | vendor/github.com/gomarkdown/markdown/parser/include.go | 0.552298 | 0.583203 | include.go | starcoder |
package g3d
import (
"errors"
"math"
"github.com/angelsolaorbaiceta/inkgeom/nums"
)
var (
IVersor, _ = MakeVersor(1, 0, 0)
JVersor, _ = MakeVersor(0, 1, 0)
KVersor, _ = MakeVersor(0, 0, 1)
Zero = MakeVector(0, 0, 0)
)
var (
// ErrZeroVersor happens when a versor is created either from all zero projections or by
// normalizing a zero vector.
ErrZeroVersor = errors.New("can't create a versor if all components are zero")
// ErrZeroVector results from an operation that requires a vector with a non-zero length.
ErrZeroVector = errors.New("can't use a vector with zero length")
)
// A Vector is a direction with length in space.
// Vectors have three projections: Z, Y and Z.
type Vector struct {
x, y, z float64
}
// MakeVector creates a new vector given the X, Y and Z projections.
func MakeVector(x, y, z float64) *Vector {
return &Vector{x, y, z}
}
// MakeNonZeroVector creates a new vector given the X, Y and Z projections, or returns an ErrZeroVector
// error if the three of them are zero, as this would result in a vector with zero length.
func MakeNonZeroVector(x, y, z float64) (*Vector, error) {
length := computeLength(x, y, z)
if nums.IsCloseToZero(length) {
return nil, ErrZeroVector
}
return MakeVector(x, y, z), nil
}
// MakeVersor creates a versor (a vector of unit length) given the vector components X, Y and Z.
// Returns an error if all three components are zero, as the zero vector can't be normalized.
func MakeVersor(x, y, z float64) (*Vector, error) {
length := computeLength(x, y, z)
if nums.IsCloseToZero(length) {
return nil, ErrZeroVersor
}
return &Vector{x / length, y / length, z / length}, nil
}
// X is the vector's projection in the X axis.
func (v *Vector) X() float64 {
return v.x
}
// Y is the vector's projection in the Y axis.
func (v *Vector) Y() float64 {
return v.y
}
// Z is the vector's projection in the Z axis.
func (v *Vector) Z() float64 {
return v.z
}
// Length is the magnitude of the vector
func (v *Vector) Length() float64 {
return computeLength(v.x, v.y, v.z)
}
// IsVersor evaluates to true if the vector has a length of 1.
func (v *Vector) IsVersor() bool {
return nums.IsCloseToOne(v.Length())
}
// IsZero returns true if all X, Y and Z componets of this vector are zero.
func (v *Vector) IsZero() bool {
return nums.IsCloseToZero(v.x) && nums.IsCloseToZero(v.y) && nums.IsCloseToZero(v.z)
}
// ToVersor returns a versor with the same direction as this vector.
// Returns an error if all three components are zero, as the zero vector can't be normalized.
func (v *Vector) ToVersor() (*Vector, error) {
if v.IsVersor() {
return v, nil
}
return MakeVersor(v.x, v.y, v.z)
}
// Opposite returns a new vector in the opposite direction as this one.
func (v *Vector) Opposite() *Vector {
return MakeVector(-v.x, -v.y, -v.z)
}
// Scaled creates a new vector with the projections scaled the given factor.
func (v *Vector) Scaled(factor float64) *Vector {
return MakeVector(v.x*factor, v.y*factor, v.z*factor)
}
// IsParalleTo checks whether this and other vectors have the same direction (are parallel).
func (v *Vector) IsParallelTo(other *Vector) bool {
return v.CrossTimes(other).IsZero()
}
// IsPerpendicularTo checks whether this and other vectors have perpendicular directions.
func (v *Vector) IsPerpendicularTo(other *Vector) bool {
return nums.IsCloseToZero(v.DotTimes(other))
}
// Equals checks whether this and other vector have equal X, Y and Z projections.
func (v *Vector) Equals(other *Vector) bool {
return projectablesEqual(v, other)
}
func computeLength(x, y, z float64) float64 {
return math.Sqrt(x*x + y*y + z*z)
} | g3d/vector.go | 0.862163 | 0.739352 | vector.go | starcoder |
package main
type MinHeap struct {
array []*Node
nodePositionsInHeap map[string]int
}
func newMinHeap(array []*Node) *MinHeap {
nodePositionsInHeap := map[string]int{}
for i, node := range array {
nodePositionsInHeap[node.id] = i
}
heap := &MinHeap{array: array, nodePositionsInHeap: nodePositionsInHeap}
heap.buildHeap()
return heap
}
func (h *MinHeap) IsEmpty() bool {
return len(h.array) == 0
}
// O(log(n)) time | O(1) space
func (h *MinHeap) Remove() *Node {
if h.IsEmpty() {
return nil
}
h.swap(0, len(h.array)-1)
peeked := h.array[len(h.array)-1]
h.array = h.array[0 : len(h.array)-1]
delete(h.nodePositionsInHeap, peeked.id)
h.siftDown(0, len(h.array)-1)
return peeked
}
// O(log(n)) time | O(1) space
func (h *MinHeap) Update(node *Node) {
h.siftUp(h.nodePositionsInHeap[node.id])
}
// O(log(n)) time | O(1) space
func (h *MinHeap) Insert(node *Node) {
h.array = append(h.array, node)
h.nodePositionsInHeap[node.id] = len(h.array) - 1
h.siftUp(len(h.array) - 1)
}
// O(log(n)) time | O(1) space
func (h *MinHeap) siftUp(index int) {
parentIndex := (index - 1) / 2
for index > 0 && h.array[index].estimatedDistance < h.array[parentIndex].estimatedDistance {
h.swap(index, parentIndex)
index = parentIndex
parentIndex = (index - 1) / 2
}
}
// O(log(n)) time | O(1) space
func (h *MinHeap) siftDown(start, end int) {
leftChildIdx := start*2 + 1
for leftChildIdx <= end {
rightChildIdx := -1
if start*2+2 <= end {
rightChildIdx = start*2 + 2
}
indexToSwap := leftChildIdx
c1Distance := h.array[leftChildIdx].estimatedDistance
if rightChildIdx > -1 && h.array[rightChildIdx].estimatedDistance < c1Distance {
indexToSwap = rightChildIdx
}
if h.array[indexToSwap].estimatedDistance < h.array[start].estimatedDistance {
h.swap(start, indexToSwap)
start = indexToSwap
leftChildIdx = start*2 + 1
} else {
return
}
}
}
// O(n) time | O(1) space
func (h *MinHeap) buildHeap() {
first := (len(h.array) - 2) / 2
for index := first + 1; index >= 0; index-- {
h.siftDown(index, len(h.array)-1)
}
}
func (h *MinHeap) containsNode(node *Node) bool {
_, found := h.nodePositionsInHeap[node.id]
return found
}
func (h MinHeap) swap(i, j int) {
h.nodePositionsInHeap[h.array[i].id] = j
h.nodePositionsInHeap[h.array[j].id] = i
h.array[i], h.array[j] = h.array[j], h.array[i]
} | src/famous-algorithms/a-star/go/heap.go | 0.652684 | 0.528168 | heap.go | starcoder |
package compchain
import (
"github.com/abc-inc/goava/primitives/bools"
"github.com/abc-inc/goava/primitives/floats"
"github.com/abc-inc/goava/primitives/ints"
"github.com/abc-inc/goava/primitives/uints"
"strings"
)
type active struct{}
func (c active) CompareFunc(left, right interface{}, cmp func(l, r interface{}) int) ComparisonChain {
return classify(cmp(left, right))
}
func (c active) CompareInt(left, right int) ComparisonChain {
return classify(ints.Compare(left, right))
}
func (c active) CompareInt8(left, right int8) ComparisonChain {
return classify(ints.Compare8(left, right))
}
func (c active) CompareInt16(left, right int16) ComparisonChain {
return classify(ints.Compare16(left, right))
}
func (c active) CompareInt32(left, right int32) ComparisonChain {
return classify(ints.Compare32(left, right))
}
func (c active) CompareInt64(left, right int64) ComparisonChain {
return classify(ints.Compare64(left, right))
}
func (c active) CompareUInt(left, right uint) ComparisonChain {
return classify(uints.Compare(left, right))
}
func (c active) CompareUInt8(left, right uint8) ComparisonChain {
return classify(uints.Compare8(left, right))
}
func (c active) CompareUInt16(left, right uint16) ComparisonChain {
return classify(uints.Compare16(left, right))
}
func (c active) CompareUInt32(left, right uint32) ComparisonChain {
return classify(uints.Compare32(left, right))
}
func (c active) CompareUInt64(left, right uint64) ComparisonChain {
return classify(uints.Compare64(left, right))
}
func (c active) CompareUIntPtr(left, right uintptr) ComparisonChain {
return classify(uints.ComparePtr(left, right))
}
func (c active) CompareFloat32(left, right float32) ComparisonChain {
return classify(floats.Compare32(left, right))
}
func (c active) CompareFloat64(left, right float64) ComparisonChain {
return classify(floats.Compare64(left, right))
}
func (c active) CompareTrueFirst(left, right bool) ComparisonChain {
return classify(bools.Compare(right, left)) // reversed
}
func (c active) CompareFalseFirst(left, right bool) ComparisonChain {
return classify(bools.Compare(left, right))
}
func (c active) CompareString(left, right string) ComparisonChain {
return classify(strings.Compare(left, right))
}
func (c active) Result() int {
return 0
}
func classify(result int) ComparisonChain {
switch {
case result < 0:
return less
case result > 0:
return greater
default:
return a
}
} | collect/compchain/active.go | 0.701304 | 0.433382 | active.go | starcoder |
package evaluation
import (
"fmt"
"regexp"
"strings"
)
// String type for clause attribute evaluation
type String string
// NewString creates a string with the object value
func NewString(value interface{}) (String, error) {
str, ok := value.(string)
if ok {
newStr := String(str)
return newStr, nil
}
return "", fmt.Errorf("%v: cant cast to a string", ErrWrongTypeAssertion)
}
// String implement Stringer interface
func (s String) String() string {
return string(s)
}
// stringOperator takes the first element from the slice and passes to fn for processing.
// we ignore any additional elements if they exist.
func stringOperator(values []interface{}, fn func(string) bool) bool {
if len(values) > 0 {
for _, val := range values {
data, ok := val.(string)
if !ok {
continue
}
if fn(data) {
return true
}
}
}
return false
}
// StartsWith check if the string starts with the value
func (s String) StartsWith(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return strings.HasPrefix(string(s), c)
})
}
// EndsWith check if the string ends with the value
func (s String) EndsWith(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return strings.HasSuffix(string(s), c)
})
}
// Match check if the string match the regex value
func (s String) Match(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
if matched, err := regexp.MatchString(c, string(s)); err == nil {
return matched
}
return false
})
}
// Contains check if the string contains the value
func (s String) Contains(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return strings.Contains(string(s), c)
})
}
// EqualSensitive check if the string and value are equal (case sensitive)
func (s String) EqualSensitive(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return string(s) == c
})
}
// Equal check if the string and value are equal
func (s String) Equal(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return strings.EqualFold(string(s), c)
})
}
// GreaterThan checks if the string is greater than the value
func (s String) GreaterThan(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return strings.ToLower(string(s)) > strings.ToLower(c)
})
}
// GreaterThanEqual checks if the string is greater or equal than the value
func (s String) GreaterThanEqual(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return strings.ToLower(string(s)) >= strings.ToLower(c)
})
}
// LessThan checks if the string is less than the value
func (s String) LessThan(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return strings.ToLower(string(s)) < strings.ToLower(c)
})
}
// LessThanEqual checks if the string is less or equal than the value
func (s String) LessThanEqual(values []interface{}) bool {
return stringOperator(values, func(c string) bool {
return strings.ToLower(string(s)) <= strings.ToLower(c)
})
}
// In checks if the string exist in slice of strings (value)
func (s String) In(values []interface{}) bool {
for _, x := range values {
if strings.EqualFold(string(s), x.(string)) {
return true
}
}
return false
} | string.go | 0.72487 | 0.48182 | string.go | starcoder |
package udf
type ExtentInterface interface {
GetLocation() uint64
GetLength() uint32
SetLength(uint32)
GetPartition() uint16
IsNotRecorded() bool
HasExtended() bool
}
type Extent struct {
Length uint32
Location uint32
}
func (e Extent) GetPartition() uint16 {
return 0
}
func (e Extent) GetLocation() uint64 {
return uint64(e.Location)
}
func (e Extent) GetLength() uint32 {
return e.Length
}
func (e Extent) SetLength(length uint32) {
e.Length = length
}
func (e Extent) IsNotRecorded() bool {
return (e.Length & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED || (e.Length & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED
}
func (e Extent) HasExtended() bool {
return (e.Length >> 30) == 3
}
func NewExtent(b []byte) Extent {
return Extent{
Length: rl_u32(b[0:]),
Location: rl_u32(b[4:]),
}
}
type ExtentSmall struct {
Length uint16
Location uint64
}
func (e ExtentSmall) GetPartition() uint16 {
return 0
}
func (e ExtentSmall) GetLocation() uint64 {
return uint64(e.Location)
}
func (e ExtentSmall) GetLength() uint32 {
return uint32(e.Length)
}
func (e ExtentSmall) SetLength(length uint32) {
e.Length = uint16(length)
}
func (e ExtentSmall) IsNotRecorded() bool {
return false
}
func (e ExtentSmall) HasExtended() bool {
return (e.Length >> 30) == 3
}
func NewExtentSmall(b []byte) ExtentSmall {
return ExtentSmall{
Length: rl_u16(b[0:]),
Location: rl_u48(b[2:]),
}
}
type ExtentLong struct {
Length uint32
Location LbAddr
}
func (e ExtentLong) GetPartition() uint16 {
return e.Location.PartitionReferenceNumber
}
func (e ExtentLong) GetLocation() uint64 {
return uint64(e.Location.LogicalBlockNumber)
}
func (e ExtentLong) GetLength() uint32 {
return e.Length
}
func (e ExtentLong) SetLength(length uint32) {
e.Length = length
}
func (e ExtentLong) HasExtended() bool {
return (e.Length >> 30) == 3
}
func (e ExtentLong) IsNotRecorded() bool {
return (e.Length & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED || (e.Length & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED
}
func NewExtentLong(b []byte) ExtentLong {
return ExtentLong{
Length: rl_u32(b[0:]),
Location: new(LbAddr).FromBytes(b[4:]),
}
}
type ExtentExtended struct {
ExtentLength uint32
RecordedLength uint32
InfoLength uint32
Location LbAddr
}
func (e ExtentExtended) GetPartition() uint16 {
return e.Location.PartitionReferenceNumber
}
func (e ExtentExtended) GetLocation() uint64 {
return uint64(e.Location.LogicalBlockNumber)
}
func (e ExtentExtended) GetLength() uint32 {
return e.InfoLength
}
func (e ExtentExtended) SetLength(length uint32) {
e.InfoLength = length
}
func (e ExtentExtended) HasExtended() bool {
return (e.GetLength() >> 30) == 3
}
func (e ExtentExtended) IsNotRecorded() bool {
return false
}
func NewExtentExtended(b []byte) ExtentExtended {
return ExtentExtended{
ExtentLength: rl_u32(b[0:]),
RecordedLength: rl_u32(b[4:]),
InfoLength: rl_u32(b[8:]),
Location: new(LbAddr).FromBytes(b[12:]),
}
}
type AED struct {
Descriptor Descriptor
PreviousAllocationExtentLocation uint32
LengthOfAllocationDescriptors uint32
}
func (a *AED) FromBytes(b []byte) AED {
a.Descriptor.FromBytes(b)
a.PreviousAllocationExtentLocation = rl_u32(b[16:])
a.LengthOfAllocationDescriptors = rl_u32(b[20:])
return *a
}
type LbAddr struct {
LogicalBlockNumber uint32
PartitionReferenceNumber uint16
}
func (l *LbAddr) FromBytes(data []byte) LbAddr {
l.LogicalBlockNumber = rl_u32(data[0:])
l.PartitionReferenceNumber = rl_u16(data[4:])
return *l
} | extent.go | 0.584627 | 0.436922 | extent.go | starcoder |
package curves
import (
math2 "github.com/wieku/danser-go/bmath"
"math"
)
type Bezier struct {
points []math2.Vector2d
ApproxLength float64
lengthCalculated bool
lastPos math2.Vector2d
lastC float64
lastWidth float64
lastT float64
}
func NewBezier(points []math2.Vector2d) *Bezier {
bz := &Bezier{points: points, lastPos: points[0]}
pointLength := 0.0
for i := 1; i < len(points); i++ {
pointLength += points[i].Dst(points[i-1])
}
pointLength = math.Ceil(pointLength)
for i := 1; i <= int(pointLength); i++ {
bz.ApproxLength += bz.NPointAt(float64(i) / pointLength).Dst(bz.NPointAt(float64(i-1) / pointLength))
}
return bz
}
func (bz Bezier) NPointAt(t float64) math2.Vector2d {
x := 0.0
y := 0.0
n := len(bz.points) - 1
for i := 0; i <= n; i++ {
b := bernstein(int64(i), int64(n), t)
x += bz.points[i].X * b
y += bz.points[i].Y * b
}
return math2.NewVec2d(x, y)
}
//It's not a neat solution, but it works
//This calculates point on bezier with constant velocity
func (bz *Bezier) PointAt(t float64) math2.Vector2d {
desiredWidth := bz.ApproxLength * t
//width := b
//pos := bz.lastPos
//c := 0.0
if desiredWidth == bz.lastWidth {
return bz.lastPos
} else if desiredWidth > bz.lastWidth {
for bz.lastWidth < desiredWidth {
pt := bz.NPointAt(bz.lastC)
lsW := bz.lastWidth + pt.Dst(bz.lastPos)
if lsW > desiredWidth {
bz.lastC -= 1.0 / float64(bz.ApproxLength*2-1)
return bz.lastPos
}
bz.lastWidth = lsW
bz.lastPos = pt
bz.lastC += 1.0 / float64(bz.ApproxLength*2-1)
}
} else {
for bz.lastWidth > desiredWidth {
pt := bz.NPointAt(bz.lastC)
lsW := bz.lastWidth - pt.Dst(bz.lastPos)
if lsW < desiredWidth {
bz.lastC += 1.0 / float64(bz.ApproxLength*2-1)
return bz.lastPos
}
bz.lastWidth = lsW
bz.lastPos = pt
bz.lastC -= 1.0 / float64(bz.ApproxLength*2-1)
}
}
return bz.lastPos
}
func (bz Bezier) GetLength() float64 {
return bz.ApproxLength
}
func (bz Bezier) GetStartAngle() float64 {
return bz.points[0].AngleRV(bz.NPointAt(1.0 / bz.ApproxLength))
}
func (bz Bezier) GetEndAngle() float64 {
return bz.points[len(bz.points)-1].AngleRV(bz.NPointAt((bz.ApproxLength - 1) / bz.ApproxLength))
}
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}
func BinomialCoefficient(n, k int64) int64 {
if k < 0 || k > n {
return 0
}
if k == 0 || k == n {
return 1
}
k = min(k, n-k)
var c int64 = 1
var i int64 = 0
for ; i < k; i++ {
c = c * (n - i) / (i + 1)
}
return c
}
func bernstein(i, n int64, t float64) float64 {
return float64(BinomialCoefficient(n, i)) * math.Pow(t, float64(i)) * math.Pow(1.0-t, float64(n-i))
}
func calcLength() {
}
func (ln Bezier) GetPoints(num int) []math2.Vector2d {
t0 := 1 / float64(num-1)
points := make([]math2.Vector2d, num)
t := 0.0
for i := 0; i < num; i += 1 {
points[i] = ln.PointAt(t)
t += t0
}
return points
} | bmath/curves/bezier.go | 0.747524 | 0.645916 | bezier.go | starcoder |
package physics
import (
"fmt"
"math"
"math/big"
"strings"
"time"
"git.sr.ht/~kisom/proxima/rat"
)
var (
lyInMeters = rat.Float(9.46073047258e+15)
oneAU = rat.Int64(149597870691)
hundredthLY = LightyearsToMeters(0.01)
hundredthAU = AstronomicalUnit(0.01)
tenthAU = AstronomicalUnit(0.1)
)
// LightyearsToMeters convert lightyears to meters.
func LightyearsToMeters(ly float64) *big.Rat {
lyf := rat.Float(ly)
return rat.Mul(lyf, lyInMeters)
}
// MetersToLightyears converts meters to lightyears.
func MetersToLightyears(x *big.Rat) float64 {
ly := rat.Div(x, lyInMeters)
lyf, _ := ly.Float64()
return lyf
}
// VelocityToPercentC returns the percent lightspeed for a given velocity.
func VelocityToPercentC(v *big.Rat) float64 {
pct, _ := rat.Div(v, C).Float64()
return pct
}
// PercentCToVelocity computes the velocity in m/s from a percent of lightspeed.
func PercentCToVelocity(pct float64) *big.Rat {
cfrac := rat.Float(pct)
return rat.Mul(cfrac, C)
}
// AstronomicalUnit return the number of meters for a given number of AU.
func AstronomicalUnit(au float64) *big.Rat {
return rat.Mul(rat.Float(au), oneAU)
}
// ToAstronomicalUnit converts a distance in meters to AU.
func ToAstronomicalUnit(x *big.Rat) *big.Rat {
return rat.Div(x, oneAU)
}
// DistanceString prints out the distance in a human-readable form.
func DistanceString(d *big.Rat) string {
switch {
case d.Cmp(hundredthLY) > 0:
return fmt.Sprintf("%0.4f ly", MetersToLightyears(d))
// technically, au should be used in relation to the sun...
case d.Cmp(tenthAU) > 0:
return fmt.Sprintf("%s au", rat.Div(d, oneAU).FloatString(1))
case d.Cmp(hundredthAU) > 0:
return fmt.Sprintf("%s au", rat.Div(d, oneAU).FloatString(2))
default:
return rat.Div(d, rat.K).FloatString(1) + " km"
}
}
const (
secondsInDay = 86400
secondsInYear = 365.25 * secondsInDay
)
// TimeString prints out a time duration in a human-readable form. The
// Go stdlib version stops at seconds, but for printing clock drift, we'll
// need to go further.
func TimeString(dur time.Duration) string {
var s []string
delta := math.Round(dur.Seconds())
years := delta / secondsInYear
if years >= 1 {
s = append(s, fmt.Sprintf("%0.0fy", math.Floor(years)))
delta -= math.Floor(years) * secondsInYear
}
days := delta / secondsInDay
if days >= 1 {
s = append(s, fmt.Sprintf("%0.0fd", math.Floor(days)))
delta -= math.Floor(days) * secondsInDay
}
if years < 1 && days < 7 {
hours := delta / 3600.0
if hours > 0 {
s = append(s, fmt.Sprintf("%0.0fh", math.Floor(hours)))
delta -= math.Floor(hours) * 3600.0
}
mins := delta / 60.0
if mins > 0 {
s = append(s, fmt.Sprintf("%0.0fm", math.Floor(mins)))
delta -= math.Floor(mins) * 60.0
}
if delta > 0 {
s = append(s, fmt.Sprintf("%0.0fs", math.Floor(delta)))
}
if len(s) == 0 {
return "0s"
}
}
return strings.Join(s, " ")
} | physics/conv.go | 0.826397 | 0.550245 | conv.go | starcoder |
package proj
import (
"github.com/everystreet/go-proj/v6/cproj"
"github.com/golang/geo/r2"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
"github.com/golang/geo/s2"
)
// XY is a 2D cartesian coordinate.
type XY r2.Point
// PutCoordinate updates coord with the values in c.
func (c XY) PutCoordinate(coord *cproj.PJ_COORD) {
for data, i := float64ToDoubleBytes(c.X), 0; i < 8; i++ {
coord[i+0] = data[i]
}
for data, i := float64ToDoubleBytes(c.Y), 0; i < 8; i++ {
coord[i+8] = data[i]
}
}
// FromCoordinate updates c with the values in coord.
func (c *XY) FromCoordinate(coord cproj.PJ_COORD) {
c.X = doubleBytesToFloat64(coord[0:8])
c.Y = doubleBytesToFloat64(coord[8:16])
}
// XYZ is a 3D cartesian coordinate.
type XYZ r3.Vector
// PutCoordinate updates coord with the values in c.
func (c XYZ) PutCoordinate(coord *cproj.PJ_COORD) {
for data, i := float64ToDoubleBytes(c.X), 0; i < 8; i++ {
coord[i+0] = data[i]
}
for data, i := float64ToDoubleBytes(c.Y), 0; i < 8; i++ {
coord[i+8] = data[i]
}
for data, i := float64ToDoubleBytes(c.Z), 0; i < 8; i++ {
coord[i+16] = data[i]
}
}
// FromCoordinate updates c with the values in coord.
func (c *XYZ) FromCoordinate(coord cproj.PJ_COORD) {
c.X = doubleBytesToFloat64(coord[0:8])
c.Y = doubleBytesToFloat64(coord[8:16])
c.Z = doubleBytesToFloat64(coord[16:24])
}
// XYZT is a 3D cartesian coordinate with a time component.
type XYZT struct {
XYZ
T float64
}
// PutCoordinate updates coord with the values in c.
func (c XYZT) PutCoordinate(coord *cproj.PJ_COORD) {
c.XYZ.PutCoordinate(coord)
for data, i := float64ToDoubleBytes(c.T), 0; i < 8; i++ {
coord[i+24] = data[i]
}
}
// FromCoordinate updates c with the values in coord.
func (c *XYZT) FromCoordinate(coord cproj.PJ_COORD) {
c.XYZ.FromCoordinate(coord)
c.T = doubleBytesToFloat64(coord[24:32])
}
// LP is a geodetic coordinate expressed in radians.
type LP s2.LatLng
// PutCoordinate updates coord with the values in c.
func (c LP) PutCoordinate(coord *cproj.PJ_COORD) {
for data, i := float64ToDoubleBytes(c.Lng.Radians()), 0; i < 8; i++ {
coord[i+0] = data[i]
}
for data, i := float64ToDoubleBytes(c.Lat.Radians()), 0; i < 8; i++ {
coord[i+8] = data[i]
}
}
// FromCoordinate updates c with the values in coord.
func (c *LP) FromCoordinate(coord cproj.PJ_COORD) {
c.Lng = s1.Angle(doubleBytesToFloat64(coord[0:8]))
c.Lat = s1.Angle(doubleBytesToFloat64(coord[8:16]))
}
// LPZ is a geodetic coordinate expressed in radians, with a vertical component.
type LPZ struct {
LP
Z float64
}
// PutCoordinate updates coord with the values in c.
func (c LPZ) PutCoordinate(coord *cproj.PJ_COORD) {
c.LP.PutCoordinate(coord)
for data, i := float64ToDoubleBytes(c.Z), 0; i < 8; i++ {
coord[i+16] = data[i]
}
}
// FromCoordinate updates c with the values in coord.
func (c *LPZ) FromCoordinate(coord cproj.PJ_COORD) {
c.LP.FromCoordinate(coord)
c.Z = doubleBytesToFloat64(coord[16:24])
}
// LPZT is a geodetic coordinate expressed in radians, with vertical and time components.
type LPZT struct {
LPZ
T float64
}
// PutCoordinate updates coord with the values in c.
func (c LPZT) PutCoordinate(coord *cproj.PJ_COORD) {
c.LPZ.PutCoordinate(coord)
for data, i := float64ToDoubleBytes(c.T), 0; i < 8; i++ {
coord[i+24] = data[i]
}
}
// FromCoordinate updates c with the values in coord.
func (c *LPZT) FromCoordinate(coord cproj.PJ_COORD) {
c.LPZ.FromCoordinate(coord)
c.T = doubleBytesToFloat64(coord[24:32])
} | proj/coord.go | 0.790611 | 0.618521 | coord.go | starcoder |
package core
import (
"math"
"github.com/Laughs-In-Flowers/warhola/lib/canvas"
)
type ResampleFilter = canvas.ResampleFilter
func stringToFilter(s string) ResampleFilter {
switch s {
case "box":
return Box
case "linear":
return Linear
case "gaussian":
return Gaussian
case "mitchellnetravali":
return MitchellNetravali
case "catmullrom":
return CatmullRom
case "lanczos":
return Lanczos
case "bartlett":
return Bartlett
case "hermite":
return Hermite
case "bspline":
return BSpline
case "hann":
return Hann
case "hamming":
return Hamming
case "blackman":
return Blackman
case "welch":
return Welch
case "cosine":
return Cosine
}
return NearestNeighbor
}
var (
NearestNeighbor = canvas.NearestNeighbor
Box = ResampleFilter{
"box",
0.5,
func(x float64) float64 {
if math.Abs(x) < 0.5 {
return 1
}
return 0
},
}
Linear = canvas.Linear
Gaussian = ResampleFilter{
"gaussian",
1.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
exp := 2.0
x *= 2.0
y := math.Pow(0.5, math.Pow(x, exp))
base := math.Pow(0.5, math.Pow(2, exp))
return (y - base) / (1 - base)
}
return 0
},
}
MitchellNetravali = ResampleFilter{
"mitchelnetravali",
2.0,
func(x float64) float64 {
b := 1.0 / 3
c := 1.0 / 3
var w [4]float64
x = math.Abs(x)
if x < 1.0 {
w[0] = 0
w[1] = 6 - 2*b
w[2] = (-18 + 12*b + 6*c) * x * x
w[3] = (12 - 9*b - 6*c) * x * x * x
} else if x <= 2.0 {
w[0] = 8*b + 24*c
w[1] = (-12*b - 48*c) * x
w[2] = (6*b + 30*c) * x * x
w[3] = (-b - 6*c) * x * x * x
} else {
return 0
}
return (w[0] + w[1] + w[2] + w[3]) / 6
},
}
CatmullRom = ResampleFilter{
"catmullrom",
2.0,
func(x float64) float64 {
b := 0.0
c := 0.5
var w [4]float64
x = math.Abs(x)
if x < 1.0 {
w[0] = 0
w[1] = 6 - 2*b
w[2] = (-18 + 12*b + 6*c) * x * x
w[3] = (12 - 9*b - 6*c) * x * x * x
} else if x <= 2.0 {
w[0] = 8*b + 24*c
w[1] = (-12*b - 48*c) * x
w[2] = (6*b + 30*c) * x * x
w[3] = (-b - 6*c) * x * x * x
} else {
return 0
}
return (w[0] + w[1] + w[2] + w[3]) / 6
},
}
Lanczos = ResampleFilter{
"lanczos",
3.0,
func(x float64) float64 {
x = math.Abs(x)
if x == 0 {
return 1.0
} else if x < 3.0 {
return (3.0 * math.Sin(math.Pi*x) * math.Sin(math.Pi*(x/3.0))) / (math.Pi * math.Pi * x * x)
}
return 0.0
},
}
Bartlett = ResampleFilter{
"bartlett",
3.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (3.0 - x) / 3.0
}
return 0
},
}
Hermite = ResampleFilter{
"hermite",
1.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return bcspline(x, 0.0, 0.0)
}
return 0
},
}
BSpline = ResampleFilter{
"bspline",
2.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 1.0, 0.0)
}
return 0
},
}
Hann = ResampleFilter{
"hann",
3.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0))
}
return 0
},
}
Hamming = ResampleFilter{
"hamming",
3.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0))
}
return 0
},
}
Blackman = ResampleFilter{
"blackman",
3.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0))
}
return 0
},
}
Welch = ResampleFilter{
"welch",
3.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (1.0 - (x * x / 9.0))
}
return 0
},
}
Cosine = ResampleFilter{
"cosine",
3.0,
func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0))
}
return 0
},
}
ResampleFilters = []ResampleFilter{
NearestNeighbor,
Box,
Linear,
Gaussian,
MitchellNetravali,
CatmullRom,
Lanczos,
Bartlett,
Hermite,
BSpline,
Hann,
Hamming,
Blackman,
Welch,
Cosine,
}
)
func sinc(x float64) float64 {
if x == 0 {
return 1
}
return math.Sin(math.Pi*x) / (math.Pi * x)
}
func bcspline(x, b, c float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6
}
if x < 2.0 {
return ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6
}
return 0
} | lib/core/resample_filter.go | 0.567098 | 0.484197 | resample_filter.go | starcoder |
package waf
/*
This file contains operations and types specific to WAF Custom Rule Sets.
*/
import (
"fmt"
)
// CustomRuleSetDetail is a detailed representation of a custom rule set.
// A custom rule set defines custom threat assessment criteria.
type CustomRuleSetDetail struct {
// Contains custom rules.
// Each directive object defines a custom rule via the sec_rule object.
// You may create up to 10 custom rules.
Directives []Directive `json:"directive"`
// Indicates the name of the custom rule.
Name string `json:"name,omitempty"`
}
// CustomRuleSetLight is a lightweight representation of a Custom Rule Set
type CustomRuleSetLight struct {
// Indicates the system-defined ID for the custom rule set.
ID string `json:"id"`
// Indicates the date and time at which the custom rule was last modified.
// Syntax:
// MM/DD/YYYYhh:mm:ss [AM|PM]
LastModifiedDate string `json:"last_modified_date"`
// Indicates the name of the custom rule set.
Name string `json:"name"`
}
// Contains custom rules.
// Each directive object defines a custom rule via the sec_rule object.
type Directive struct {
// Defines a custom rule
SecRule SecRule `json:"sec_rule"`
}
// Defines a custom rule
type SecRule struct {
// Determines whether the string identified in a variable object will be
// transformed and the metadata that will be assigned to malicious traffic.
Action Action `json:"action"`
// Contains additional criteria that must be satisfied to
// identify a malicious request.
ChainedRules []ChainedRule `json:"chained_rule,omitempty"`
// Indicates the name assigned to this custom rule.
Name string `json:"name,omitempty"`
// Indicates the comparison that will be performed against the request
// element(s) identified within a variable object.
Operator Operator `json:"operator"`
// Contains criteria that identifies a request element.
Variables []Variable `json:"variable"`
}
/*
Action determines whether the value derived from the
request element identified in a variable object will be transformed
and the metadata that will be used to identify malicious traffic.
*/
type Action struct {
/*
Determines the custom ID that will be assigned to this custom rule.
This custom ID is exposed via the Threats Dashboard.
Valid values fall within this range: 66000000 - 66999999
Note: This field is only applicable for the action object that
resides in the root of the sec_rule object.
Default Value: Random number
*/
ID string `json:"id,omitempty"`
/*
Determines the rule message that will be assigned to this custom rule.
This message is exposed via the Threats Dashboard.
Note: This field is only applicable for the action object that resides
in the root of the sec_rule object.
Default Value: Blank
*/
Message string `json:"msg,omitempty"`
/*
Determines the set of transformations that will be applied to the value
derived from the request element identified in a variable object
(i.e., source value).
Transformations are always applied to the source value, regardless of
the number of transformations that have been defined.
Valid values are:
NONE: Indicates that the source value should not be modified.
LOWERCASE: Indicates that the source value should be converted to
lowercase characters.
URLDECODE: Indicates that the source value should be URL decoded.
This transformation is useful when the source value has
been URL encoded twice.
REMOVENULLS: Indicates that null values should be removed from
the source value.
Note: A criterion is satisfied if the source value or any of the
modified string values meet the conditions defined by the operator object.
*/
Transformations []string `json:"t,omitempty"`
}
// Each object within the chained_rule array describes an additional set of
// criteria that must be satisfied in order to identify a malicious request.
type ChainedRule struct {
// Determines whether the string value derived from the request element
// identified in a variable object will be transformed and the metadata
// that will be used to identify malicious traffic.
Action Action `json:"action"`
// Indicates the comparison that will be performed on the string value(s)
// derived from the request element(s) defined within the variable array.
Operator Operator `json:"operator"`
// Identifies each request element for which a comparison will be made.
Variables []Variable `json:"variable"`
}
// The variable array identifies each request element for which a comparison
// will be made using its properties
type Variable struct {
/*
Determines the request element that will be assessed.
Valid values are:
ARGS_POST |
GEO |
QUERY_STRING |
REMOTE_ADDR |
REQUEST_BODY |
REQUEST_COOKIES |
REQUEST_HEADERS |
REQUEST_METHOD |
REQUEST_URI
Note: If a request element consists of one or more key-value pairs,
then you may identify a key via a match object.
If is_count has been disabled, then you may identify a specific
value via the operator object.
*/
Type string `json:"type"`
// Contains comparison settings for the request element identified by the
// type property.
Matches []Match `json:"match,omitempty"`
/*
Determines whether a comparison will be performed between the operator
object and a string value or the number of matches found.
Valid values are:
true: A counter will increment whenever the request element defined by
this variable object is found. The operator object will perform a
comparison against this number.
** Note: If you enable is_count, then you must also set the type
property to EQ.**
false: The operator object will perform a comparison against the string
value derived from the request element defined by this variable object.
*/
IsCount bool `json:"is_count,omitempty"`
}
// Operator describes the comparison that will be performed on the request
// element(s) defined within a variable object using its properties:
type Operator struct {
/*
Indicates whether a condition will be satisfied when the value derived
from the request element defined within a variable object matches or
does not match the value property.
Valid values are:
True: Does not match
False: Matches
*/
IsNegated bool `json:"is_negated,omitempty"`
/*
Indicates how the system will interpret the comparison between the value
property and the value derived from the request element defined within
a variable object.
Valid values are:
RX:Indicates that the string value derived from the request element
must satisfy the regular expression defined in the value property.
STREQ: Indicates that the string value derived from the request
element must be an exact match to the value property.
CONTAINS: Indicates that the value property must contain the string
value derived from the request element.
BEGINSWITH: Indicates that the value property must start with the
string value derived from the request element.
ENDSWITH: Indicates that the value property must end with the string
value derived from the request element.
EQ: Indicates that the number derived from the variable object must
be an exact match to the value property.
Note: You should only use EQ when the is_count property
has been enabled.
IPMATCH: Requires that the request's IP address either be contained
by an IP block or be an exact match to an IP address defined in
the values property. Only use IPMATCH with the
REMOTE_ADDR variable.
*/
Type string `json:"type"`
/*
Indicates a value that will be compared against the string or number
value derived from the request element defined within a variable object.
Note: If you are identifying traffic via a URL path (REQUEST_URI),
then you should specify a URL path pattern that starts directly after
the hostname. Exclude a protocol or a hostname when defining this property.
Sample values:
/marketing
/800001/mycustomerorigin
*/
Value string `json:"value,omitempty"`
}
// The match array determines the comparison conditions for the request
// element identified by the type property.
type Match struct {
/*
Determines whether this condition is satisfied when the request element
identified by the variable object is found or not found.
True: Not found
False: Found
*/
IsNegated bool `json:"is_negated,omitempty"`
/*
Determines whether the value property will be interpreted as a
regular expression. Valid values are:
True: Regular expression
False: Default value. Literal value.
*/
IsRegex bool `json:"is_regex,omitempty"`
/*
Restricts the match condition defined by the type property to
the specified value.
Example:
If the type property is set to REQUEST_HEADERS and this property is
set to User-Agent, then this match condition is restricted to the
User-Agent request header.
If the value property is omitted, then this match condition applies
to all request headers.
*/
Value string `json:"value,omitempty"`
}
// GetCustomRuleSetResponse represents the response from the WAF API when
// retrieving a custom rule set
type GetCustomRuleSetResponse struct {
// ID indicates the generated ID for the newly deleted Rule
ID string
CustomRuleSetDetail
// Indicates the date and time at which the custom rule was last modified.
// Syntax:
// MM/DD/YYYYhh:mm:ss [AM|PM]
LastModifiedDate string `json:"last_modified_date"`
}
// AddCustomRuleSetResponse represents the response from the WAF API when
// adding a new custom rule
type AddCustomRuleSetResponse struct {
AddRuleResponse
}
// DeleteCustomRuleSetResponse represents the response from the WAF API when
// deleting a custom rule set
type DeleteCustomRuleSetResponse struct {
// ID indicates the generated ID for the newly deleted Rule
ID string
WAFResponse
}
// Updates a custom rule set that identifies a rule set configuration and
// describes a valid request.
type UpdateCustomRuleSetRequest struct {
CustomRuleSetDetail
}
// Contains the response from the WAF API when updating a custom rule set
type UpdateCustomRuleSetResponse struct {
UpdateRuleResponse
}
// Creates a custom rule set that defines custom threat assessment criteria.
func (svc *WAFService) AddCustomRuleSet(
customRuleSet CustomRuleSetDetail,
accountNumber string,
) (*AddCustomRuleSetResponse, error) {
url := fmt.Sprintf("/v2/mcc/customers/%s/waf/v1.0/rules", accountNumber)
request, err := svc.Client.BuildRequest("POST", url, customRuleSet)
if err != nil {
return nil, fmt.Errorf("AddCustomRuleSet: %v", err)
}
parsedResponse := &AddCustomRuleSetResponse{}
_, err = svc.Client.SendRequest(request, &parsedResponse)
if err != nil {
return nil, fmt.Errorf("AddCustomRuleSet: %v", err)
}
return parsedResponse, nil
}
// Retrieves a list of custom rule sets.
// A custom rule set allows you to define custom threat assessment criterion.
func (svc *WAFService) GetAllCustomRuleSets(
accountNumber string,
) ([]CustomRuleSetLight, error) {
url := fmt.Sprintf("/v2/mcc/customers/%s/waf/v1.0/rules", accountNumber)
request, err := svc.Client.BuildRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("GetAllCustomRuleSets: %v", err)
}
var customRuleSets = &[]CustomRuleSetLight{}
_, err = svc.Client.SendRequest(request, &customRuleSets)
if err != nil {
return nil, fmt.Errorf("GetAllCustomRuleSets: %v", err)
}
return *customRuleSets, nil
}
// Deletes a custom rule.
func (svc *WAFService) DeleteCustomRuleSet(
accountNumber string,
customRuleID string,
) (*DeleteCustomRuleSetResponse, error) {
url := fmt.Sprintf("/v2/mcc/customers/%s/waf/v1.0/rules/%v",
accountNumber,
customRuleID,
)
request, err := svc.Client.BuildRequest("DELETE", url, nil)
if err != nil {
return nil, fmt.Errorf("DeleteCustomRuleSet: %v", err)
}
parsedResponse := &DeleteCustomRuleSetResponse{}
_, err = svc.Client.SendRequest(request, &parsedResponse)
if err != nil {
return nil, fmt.Errorf("DeleteCustomRuleSet: %v", err)
}
return parsedResponse, nil
}
// GetCustomRuleSet retrieves a custom rule.
func (svc *WAFService) GetCustomRuleSet(
accountNumber string,
customRuleID string,
) (*GetCustomRuleSetResponse, error) {
url := fmt.Sprintf("/v2/mcc/customers/%s/waf/v1.0/rules/%v",
accountNumber,
customRuleID,
)
request, err := svc.Client.BuildRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("GetCustomRuleSet: %v", err)
}
parsedResponse := &GetCustomRuleSetResponse{}
_, err = svc.Client.SendRequest(request, &parsedResponse)
if err != nil {
return nil, fmt.Errorf("GetCustomRuleSet: %v", err)
}
return parsedResponse, nil
}
//UpdateCustomRuleSet that defines custom threat assessment criteria.
func (svc *WAFService) UpdateCustomRuleSet(
accountNumber string,
ID string,
customRuleSet UpdateCustomRuleSetRequest,
) (*UpdateCustomRuleSetResponse, error) {
url := fmt.Sprintf("/v2/mcc/customers/%s/waf/v1.0/rules/%s",
accountNumber,
ID,
)
request, err := svc.Client.BuildRequest("PUT", url, customRuleSet)
if err != nil {
return nil,
fmt.Errorf("waf -> custom_rule.go -> UpdateCustomRuleSet: %v", err)
}
var parsedResponse = &UpdateCustomRuleSetResponse{}
_, err = svc.Client.SendRequest(request, &parsedResponse)
if err != nil {
return nil,
fmt.Errorf("waf -> custom_rule.go -> UpdateCustomRuleSet: %v", err)
}
return parsedResponse, nil
} | edgecast/waf/custom_rule.go | 0.654122 | 0.465995 | custom_rule.go | starcoder |
package i6502
const (
aciaData = iota
aciaStatus
aciaCommand
aciaControl
)
/*
ACIA 6551 Serial IO
This Asynchronous Communications Interface Adapater can be
directly attached to the 6502's address and data busses.
It provides serial IO.
The supplied Rx and Tx channels can be used to read and wirte
data to the ACIA 6551.
*/
type Acia6551 struct {
rx byte
tx byte
commandData byte
controlData byte
rxFull bool
txEmpty bool
rxIrqEnabled bool
txIrqEnabled bool
overrun bool
output chan []byte
}
func NewAcia6551(output chan []byte) (*Acia6551, error) {
acia := &Acia6551{output: output}
acia.Reset()
return acia, nil
}
func (a *Acia6551) Size() uint16 {
// We have a only 4 addresses, Data, Status, Command and Control
return 0x04
}
// Emulates a hardware reset
func (a *Acia6551) Reset() {
a.rx = 0
a.rxFull = false
a.tx = 0
a.txEmpty = true
a.rxIrqEnabled = false
a.txIrqEnabled = false
a.overrun = false
a.setControl(0)
a.setCommand(0)
}
func (a *Acia6551) setControl(data byte) {
a.controlData = data
}
func (a *Acia6551) setCommand(data byte) {
a.commandData = data
a.rxIrqEnabled = (data & 0x02) != 0
a.txIrqEnabled = ((data & 0x04) != 0) && ((data & 0x08) != 1)
}
func (a *Acia6551) statusRegister() byte {
status := byte(0)
if a.rxFull {
status |= 0x08
}
if a.txEmpty {
status |= 0x10
}
if a.overrun {
status |= 0x04
}
return status
}
// Implements io.Reader, for external programs to read TX'ed data from
// the serial output.
func (a *Acia6551) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if a.txEmpty {
return 0, nil
} else {
a.txEmpty = true
copy(p, []byte{a.tx})
// TODO: Handle txInterrupt
return 1, nil
}
}
// Implements io.Writer, for external programs to write to the
// ACIA's RX
func (a *Acia6551) Write(p []byte) (n int, err error) {
for _, b := range p {
a.rxWrite(b)
}
return len(p), nil
}
// Used by the AddressBus to read data from the ACIA 6551
func (a *Acia6551) ReadByte(address uint16) byte {
switch address {
case aciaData:
return a.rxRead()
case aciaStatus:
return a.statusRegister()
case aciaCommand:
return a.commandData
case aciaControl:
return a.controlData
}
return 0x00
}
// Used by the AddressBus to write data to the ACIA 6551
func (a *Acia6551) WriteByte(address uint16, data byte) {
switch address {
case aciaData:
a.txWrite(data)
case aciaStatus:
a.Reset()
case aciaCommand:
a.setCommand(data)
case aciaControl:
a.setControl(data)
}
}
func (a *Acia6551) rxRead() byte {
a.overrun = false
a.rxFull = false
return a.rx
}
func (a *Acia6551) rxWrite(data byte) {
// Oh no, overrun. Set the appropriate status
if a.rxFull {
a.overrun = true
}
a.rx = data
a.rxFull = true
// TODO: Interrupts
}
func (a *Acia6551) txWrite(data byte) {
a.output <- []byte{data}
a.tx = data
// a.txEmpty = false
} | acia6551.go | 0.503662 | 0.403861 | acia6551.go | starcoder |
package measure
import (
"io"
"time"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
"github.com/ipfs/go-metrics-interface"
)
var (
// sort latencies in buckets with following upper bounds in seconds
datastoreLatencyBuckets = []float64{1e-4, 1e-3, 1e-2, 1e-1, 1}
// sort sizes in buckets with following upper bounds in bytes
datastoreSizeBuckets = []float64{1 << 6, 1 << 12, 1 << 18, 1 << 24}
)
// New wraps the datastore, providing metrics on the operations. The
// metrics are registered with names starting with prefix and a dot.
func New(prefix string, ds datastore.Datastore) *measure {
m := &measure{
backend: ds,
putNum: metrics.New(prefix+".put_total", "Total number of Datastore.Put calls").Counter(),
putErr: metrics.New(prefix+".put.errors_total", "Number of errored Datastore.Put calls").Counter(),
putLatency: metrics.New(prefix+".put.latency_seconds",
"Latency distribution of Datastore.Put calls").Histogram(datastoreLatencyBuckets),
putSize: metrics.New(prefix+".put.size_bytes",
"Size distribution of stored byte slices").Histogram(datastoreSizeBuckets),
syncNum: metrics.New(prefix+".sync_total", "Total number of Datastore.Sync calls").Counter(),
syncErr: metrics.New(prefix+".sync.errors_total", "Number of errored Datastore.Sync calls").Counter(),
syncLatency: metrics.New(prefix+".sync.latency_seconds",
"Latency distribution of Datastore.Sync calls").Histogram(datastoreLatencyBuckets),
getNum: metrics.New(prefix+".get_total", "Total number of Datastore.Get calls").Counter(),
getErr: metrics.New(prefix+".get.errors_total", "Number of errored Datastore.Get calls").Counter(),
getLatency: metrics.New(prefix+".get.latency_seconds",
"Latency distribution of Datastore.Get calls").Histogram(datastoreLatencyBuckets),
getSize: metrics.New(prefix+".get.size_bytes",
"Size distribution of retrieved byte slices").Histogram(datastoreSizeBuckets),
hasNum: metrics.New(prefix+".has_total", "Total number of Datastore.Has calls").Counter(),
hasErr: metrics.New(prefix+".has.errors_total", "Number of errored Datastore.Has calls").Counter(),
hasLatency: metrics.New(prefix+".has.latency_seconds",
"Latency distribution of Datastore.Has calls").Histogram(datastoreLatencyBuckets),
getsizeNum: metrics.New(prefix+".getsize_total", "Total number of Datastore.GetSize calls").Counter(),
getsizeErr: metrics.New(prefix+".getsize.errors_total", "Number of errored Datastore.GetSize calls").Counter(),
getsizeLatency: metrics.New(prefix+".getsize.latency_seconds",
"Latency distribution of Datastore.GetSize calls").Histogram(datastoreLatencyBuckets),
deleteNum: metrics.New(prefix+".delete_total", "Total number of Datastore.Delete calls").Counter(),
deleteErr: metrics.New(prefix+".delete.errors_total", "Number of errored Datastore.Delete calls").Counter(),
deleteLatency: metrics.New(prefix+".delete.latency_seconds",
"Latency distribution of Datastore.Delete calls").Histogram(datastoreLatencyBuckets),
queryNum: metrics.New(prefix+".query_total", "Total number of Datastore.Query calls").Counter(),
queryErr: metrics.New(prefix+".query.errors_total", "Number of errored Datastore.Query calls").Counter(),
queryLatency: metrics.New(prefix+".query.latency_seconds",
"Latency distribution of Datastore.Query calls").Histogram(datastoreLatencyBuckets),
checkNum: metrics.New(prefix+".check_total", "Total number of Datastore.Check calls").Counter(),
checkErr: metrics.New(prefix+".check.errors_total", "Number of errored Datastore.Check calls").Counter(),
checkLatency: metrics.New(prefix+".check.latency_seconds",
"Latency distribution of Datastore.Check calls").Histogram(datastoreLatencyBuckets),
scrubNum: metrics.New(prefix+".scrub_total", "Total number of Datastore.Scrub calls").Counter(),
scrubErr: metrics.New(prefix+".scrub.errors_total", "Number of errored Datastore.Scrub calls").Counter(),
scrubLatency: metrics.New(prefix+".scrub.latency_seconds",
"Latency distribution of Datastore.Scrub calls").Histogram(datastoreLatencyBuckets),
gcNum: metrics.New(prefix+".gc_total", "Total number of Datastore.CollectGarbage calls").Counter(),
gcErr: metrics.New(prefix+".gc.errors_total", "Number of errored Datastore.CollectGarbage calls").Counter(),
gcLatency: metrics.New(prefix+".gc.latency_seconds",
"Latency distribution of Datastore.CollectGarbage calls").Histogram(datastoreLatencyBuckets),
duNum: metrics.New(prefix+".du_total", "Total number of Datastore.DiskUsage calls").Counter(),
duErr: metrics.New(prefix+".du.errors_total", "Number of errored Datastore.DiskUsage calls").Counter(),
duLatency: metrics.New(prefix+".du.latency_seconds",
"Latency distribution of Datastore.DiskUsage calls").Histogram(datastoreLatencyBuckets),
batchPutNum: metrics.New(prefix+".batchput_total", "Total number of Batch.Put calls").Counter(),
batchPutErr: metrics.New(prefix+".batchput.errors_total", "Number of errored Batch.Put calls").Counter(),
batchPutLatency: metrics.New(prefix+".batchput.latency_seconds",
"Latency distribution of Batch.Put calls").Histogram(datastoreLatencyBuckets),
batchPutSize: metrics.New(prefix+".batchput.size_bytes",
"Size distribution of byte slices put into batches").Histogram(datastoreSizeBuckets),
batchDeleteNum: metrics.New(prefix+".batchdelete_total", "Total number of Batch.Delete calls").Counter(),
batchDeleteErr: metrics.New(prefix+".batchdelete.errors_total", "Number of errored Batch.Delete calls").Counter(),
batchDeleteLatency: metrics.New(prefix+".batchdelete.latency_seconds",
"Latency distribution of Batch.Delete calls").Histogram(datastoreLatencyBuckets),
batchCommitNum: metrics.New(prefix+".batchcommit_total", "Total number of Batch.Commit calls").Counter(),
batchCommitErr: metrics.New(prefix+".batchcommit.errors_total", "Number of errored Batch.Commit calls").Counter(),
batchCommitLatency: metrics.New(prefix+".batchcommit.latency_seconds",
"Latency distribution of Batch.Commit calls").Histogram(datastoreLatencyBuckets),
}
return m
}
type measure struct {
backend datastore.Datastore
putNum metrics.Counter
putErr metrics.Counter
putLatency metrics.Histogram
putSize metrics.Histogram
syncNum metrics.Counter
syncErr metrics.Counter
syncLatency metrics.Histogram
getNum metrics.Counter
getErr metrics.Counter
getLatency metrics.Histogram
getSize metrics.Histogram
hasNum metrics.Counter
hasErr metrics.Counter
hasLatency metrics.Histogram
getsizeNum metrics.Counter
getsizeErr metrics.Counter
getsizeLatency metrics.Histogram
deleteNum metrics.Counter
deleteErr metrics.Counter
deleteLatency metrics.Histogram
queryNum metrics.Counter
queryErr metrics.Counter
queryLatency metrics.Histogram
checkNum metrics.Counter
checkErr metrics.Counter
checkLatency metrics.Histogram
scrubNum metrics.Counter
scrubErr metrics.Counter
scrubLatency metrics.Histogram
gcNum metrics.Counter
gcErr metrics.Counter
gcLatency metrics.Histogram
duNum metrics.Counter
duErr metrics.Counter
duLatency metrics.Histogram
batchPutNum metrics.Counter
batchPutErr metrics.Counter
batchPutLatency metrics.Histogram
batchPutSize metrics.Histogram
batchDeleteNum metrics.Counter
batchDeleteErr metrics.Counter
batchDeleteLatency metrics.Histogram
batchCommitNum metrics.Counter
batchCommitErr metrics.Counter
batchCommitLatency metrics.Histogram
}
func recordLatency(h metrics.Histogram, start time.Time) {
elapsed := time.Since(start)
h.Observe(elapsed.Seconds())
}
func (m *measure) Put(key datastore.Key, value []byte) error {
defer recordLatency(m.putLatency, time.Now())
m.putNum.Inc()
m.putSize.Observe(float64(len(value)))
err := m.backend.Put(key, value)
if err != nil {
m.putErr.Inc()
}
return err
}
func (m *measure) Sync(prefix datastore.Key) error {
defer recordLatency(m.syncLatency, time.Now())
m.syncNum.Inc()
err := m.backend.Sync(prefix)
if err != nil {
m.syncErr.Inc()
}
return err
}
func (m *measure) Get(key datastore.Key) (value []byte, err error) {
defer recordLatency(m.getLatency, time.Now())
m.getNum.Inc()
value, err = m.backend.Get(key)
switch err {
case nil:
m.getSize.Observe(float64(len(value)))
case datastore.ErrNotFound:
// Not really an error.
default:
m.getErr.Inc()
}
return value, err
}
func (m *measure) Has(key datastore.Key) (exists bool, err error) {
defer recordLatency(m.hasLatency, time.Now())
m.hasNum.Inc()
exists, err = m.backend.Has(key)
if err != nil {
m.hasErr.Inc()
}
return exists, err
}
func (m *measure) GetSize(key datastore.Key) (size int, err error) {
defer recordLatency(m.getsizeLatency, time.Now())
m.getsizeNum.Inc()
size, err = m.backend.GetSize(key)
switch err {
case nil, datastore.ErrNotFound:
// Not really an error.
default:
m.getsizeErr.Inc()
}
return size, err
}
func (m *measure) Delete(key datastore.Key) error {
defer recordLatency(m.deleteLatency, time.Now())
m.deleteNum.Inc()
err := m.backend.Delete(key)
if err != nil {
m.deleteErr.Inc()
}
return err
}
func (m *measure) Query(q query.Query) (query.Results, error) {
defer recordLatency(m.queryLatency, time.Now())
m.queryNum.Inc()
res, err := m.backend.Query(q)
if err != nil {
m.queryErr.Inc()
}
return res, err
}
func (m *measure) Check() error {
defer recordLatency(m.checkLatency, time.Now())
m.checkNum.Inc()
if c, ok := m.backend.(datastore.CheckedDatastore); ok {
err := c.Check()
if err != nil {
m.checkErr.Inc()
}
return err
}
return nil
}
func (m *measure) Scrub() error {
defer recordLatency(m.scrubLatency, time.Now())
m.scrubNum.Inc()
if c, ok := m.backend.(datastore.ScrubbedDatastore); ok {
err := c.Scrub()
if err != nil {
m.scrubErr.Inc()
}
return err
}
return nil
}
func (m *measure) CollectGarbage() error {
defer recordLatency(m.gcLatency, time.Now())
m.gcNum.Inc()
if c, ok := m.backend.(datastore.GCDatastore); ok {
err := c.CollectGarbage()
if err != nil {
m.gcErr.Inc()
}
return err
}
return nil
}
func (m *measure) DiskUsage() (uint64, error) {
defer recordLatency(m.duLatency, time.Now())
m.duNum.Inc()
size, err := datastore.DiskUsage(m.backend)
if err != nil {
m.duErr.Inc()
}
return size, err
}
type measuredBatch struct {
b datastore.Batch
m *measure
}
func (m *measure) Batch() (datastore.Batch, error) {
bds, ok := m.backend.(datastore.Batching)
if !ok {
return nil, datastore.ErrBatchUnsupported
}
batch, err := bds.Batch()
if err != nil {
return nil, err
}
return &measuredBatch{
b: batch,
m: m,
}, nil
}
func (mt *measuredBatch) Put(key datastore.Key, val []byte) error {
defer recordLatency(mt.m.batchPutLatency, time.Now())
mt.m.batchPutNum.Inc()
mt.m.batchPutSize.Observe(float64(len(val)))
err := mt.b.Put(key, val)
if err != nil {
mt.m.batchPutErr.Inc()
}
return err
}
func (mt *measuredBatch) Delete(key datastore.Key) error {
defer recordLatency(mt.m.batchDeleteLatency, time.Now())
mt.m.batchDeleteNum.Inc()
err := mt.b.Delete(key)
if err != nil {
mt.m.batchDeleteErr.Inc()
}
return err
}
func (mt *measuredBatch) Commit() error {
defer recordLatency(mt.m.batchCommitLatency, time.Now())
mt.m.batchCommitNum.Inc()
err := mt.b.Commit()
if err != nil {
mt.m.batchCommitErr.Inc()
}
return err
}
func (m *measure) Close() error {
if c, ok := m.backend.(io.Closer); ok {
return c.Close()
}
return nil
} | tmp1/go-ds-measure@v0.1.0/measure.go | 0.622459 | 0.485417 | measure.go | starcoder |
package plaid
import (
"encoding/json"
"time"
)
// LinkTokenCreateRequestUser An object specifying information about the end user who will be linking their account.
type LinkTokenCreateRequestUser struct {
// A unique ID representing the end user. Typically this will be a user ID number from your application. Personally identifiable information, such as an email address or phone number, should not be used in the `client_user_id`. It is currently used as a means of searching logs for the given user in the Plaid Dashboard.
ClientUserId string `json:"client_user_id"`
// The user's full legal name. This is an optional field used in the [returning user experience](https://plaid.com/docs/link/returning-user) to associate Items to the user.
LegalName *string `json:"legal_name,omitempty"`
// The user's phone number in [E.164](https://en.wikipedia.org/wiki/E.164) format. This field is optional, but required to enable the [returning user experience](https://plaid.com/docs/link/returning-user).
PhoneNumber *string `json:"phone_number,omitempty"`
// The date and time the phone number was verified in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (`YYYY-MM-DDThh:mm:ssZ`). This field is optional, but required to enable any [returning user experience](https://plaid.com/docs/link/returning-user). Only pass a verification time for a phone number that you have verified. If you have performed verification but don’t have the time, you may supply a signal value of the start of the UNIX epoch. Example: `2020-01-01T00:00:00Z`
PhoneNumberVerifiedTime *time.Time `json:"phone_number_verified_time,omitempty"`
// The user's email address. This field is optional, but required to enable the [pre-authenticated returning user flow](https://plaid.com/docs/link/returning-user/#enabling-the-returning-user-experience).
EmailAddress *string `json:"email_address,omitempty"`
// The date and time the email address was verified in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (`YYYY-MM-DDThh:mm:ssZ`). This is an optional field used in the [returning user experience](https://plaid.com/docs/link/returning-user). Only pass a verification time for an email address that you have verified. If you have performed verification but don’t have the time, you may supply a signal value of the start of the UNIX epoch. Example: `2020-01-01T00:00:00Z`
EmailAddressVerifiedTime *time.Time `json:"email_address_verified_time,omitempty"`
// To be provided in the format \"ddd-dd-dddd\". This field is optional and will support not-yet-implemented functionality for new products.
Ssn *string `json:"ssn,omitempty"`
// To be provided in the format \"yyyy-mm-dd\". This field is optional and will support not-yet-implemented functionality for new products.
DateOfBirth *string `json:"date_of_birth,omitempty"`
}
// NewLinkTokenCreateRequestUser instantiates a new LinkTokenCreateRequestUser object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewLinkTokenCreateRequestUser(clientUserId string) *LinkTokenCreateRequestUser {
this := LinkTokenCreateRequestUser{}
this.ClientUserId = clientUserId
return &this
}
// NewLinkTokenCreateRequestUserWithDefaults instantiates a new LinkTokenCreateRequestUser object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewLinkTokenCreateRequestUserWithDefaults() *LinkTokenCreateRequestUser {
this := LinkTokenCreateRequestUser{}
return &this
}
// GetClientUserId returns the ClientUserId field value
func (o *LinkTokenCreateRequestUser) GetClientUserId() string {
if o == nil {
var ret string
return ret
}
return o.ClientUserId
}
// GetClientUserIdOk returns a tuple with the ClientUserId field value
// and a boolean to check if the value has been set.
func (o *LinkTokenCreateRequestUser) GetClientUserIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.ClientUserId, true
}
// SetClientUserId sets field value
func (o *LinkTokenCreateRequestUser) SetClientUserId(v string) {
o.ClientUserId = v
}
// GetLegalName returns the LegalName field value if set, zero value otherwise.
func (o *LinkTokenCreateRequestUser) GetLegalName() string {
if o == nil || o.LegalName == nil {
var ret string
return ret
}
return *o.LegalName
}
// GetLegalNameOk returns a tuple with the LegalName field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LinkTokenCreateRequestUser) GetLegalNameOk() (*string, bool) {
if o == nil || o.LegalName == nil {
return nil, false
}
return o.LegalName, true
}
// HasLegalName returns a boolean if a field has been set.
func (o *LinkTokenCreateRequestUser) HasLegalName() bool {
if o != nil && o.LegalName != nil {
return true
}
return false
}
// SetLegalName gets a reference to the given string and assigns it to the LegalName field.
func (o *LinkTokenCreateRequestUser) SetLegalName(v string) {
o.LegalName = &v
}
// GetPhoneNumber returns the PhoneNumber field value if set, zero value otherwise.
func (o *LinkTokenCreateRequestUser) GetPhoneNumber() string {
if o == nil || o.PhoneNumber == nil {
var ret string
return ret
}
return *o.PhoneNumber
}
// GetPhoneNumberOk returns a tuple with the PhoneNumber field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LinkTokenCreateRequestUser) GetPhoneNumberOk() (*string, bool) {
if o == nil || o.PhoneNumber == nil {
return nil, false
}
return o.PhoneNumber, true
}
// HasPhoneNumber returns a boolean if a field has been set.
func (o *LinkTokenCreateRequestUser) HasPhoneNumber() bool {
if o != nil && o.PhoneNumber != nil {
return true
}
return false
}
// SetPhoneNumber gets a reference to the given string and assigns it to the PhoneNumber field.
func (o *LinkTokenCreateRequestUser) SetPhoneNumber(v string) {
o.PhoneNumber = &v
}
// GetPhoneNumberVerifiedTime returns the PhoneNumberVerifiedTime field value if set, zero value otherwise.
func (o *LinkTokenCreateRequestUser) GetPhoneNumberVerifiedTime() time.Time {
if o == nil || o.PhoneNumberVerifiedTime == nil {
var ret time.Time
return ret
}
return *o.PhoneNumberVerifiedTime
}
// GetPhoneNumberVerifiedTimeOk returns a tuple with the PhoneNumberVerifiedTime field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LinkTokenCreateRequestUser) GetPhoneNumberVerifiedTimeOk() (*time.Time, bool) {
if o == nil || o.PhoneNumberVerifiedTime == nil {
return nil, false
}
return o.PhoneNumberVerifiedTime, true
}
// HasPhoneNumberVerifiedTime returns a boolean if a field has been set.
func (o *LinkTokenCreateRequestUser) HasPhoneNumberVerifiedTime() bool {
if o != nil && o.PhoneNumberVerifiedTime != nil {
return true
}
return false
}
// SetPhoneNumberVerifiedTime gets a reference to the given time.Time and assigns it to the PhoneNumberVerifiedTime field.
func (o *LinkTokenCreateRequestUser) SetPhoneNumberVerifiedTime(v time.Time) {
o.PhoneNumberVerifiedTime = &v
}
// GetEmailAddress returns the EmailAddress field value if set, zero value otherwise.
func (o *LinkTokenCreateRequestUser) GetEmailAddress() string {
if o == nil || o.EmailAddress == nil {
var ret string
return ret
}
return *o.EmailAddress
}
// GetEmailAddressOk returns a tuple with the EmailAddress field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LinkTokenCreateRequestUser) GetEmailAddressOk() (*string, bool) {
if o == nil || o.EmailAddress == nil {
return nil, false
}
return o.EmailAddress, true
}
// HasEmailAddress returns a boolean if a field has been set.
func (o *LinkTokenCreateRequestUser) HasEmailAddress() bool {
if o != nil && o.EmailAddress != nil {
return true
}
return false
}
// SetEmailAddress gets a reference to the given string and assigns it to the EmailAddress field.
func (o *LinkTokenCreateRequestUser) SetEmailAddress(v string) {
o.EmailAddress = &v
}
// GetEmailAddressVerifiedTime returns the EmailAddressVerifiedTime field value if set, zero value otherwise.
func (o *LinkTokenCreateRequestUser) GetEmailAddressVerifiedTime() time.Time {
if o == nil || o.EmailAddressVerifiedTime == nil {
var ret time.Time
return ret
}
return *o.EmailAddressVerifiedTime
}
// GetEmailAddressVerifiedTimeOk returns a tuple with the EmailAddressVerifiedTime field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LinkTokenCreateRequestUser) GetEmailAddressVerifiedTimeOk() (*time.Time, bool) {
if o == nil || o.EmailAddressVerifiedTime == nil {
return nil, false
}
return o.EmailAddressVerifiedTime, true
}
// HasEmailAddressVerifiedTime returns a boolean if a field has been set.
func (o *LinkTokenCreateRequestUser) HasEmailAddressVerifiedTime() bool {
if o != nil && o.EmailAddressVerifiedTime != nil {
return true
}
return false
}
// SetEmailAddressVerifiedTime gets a reference to the given time.Time and assigns it to the EmailAddressVerifiedTime field.
func (o *LinkTokenCreateRequestUser) SetEmailAddressVerifiedTime(v time.Time) {
o.EmailAddressVerifiedTime = &v
}
// GetSsn returns the Ssn field value if set, zero value otherwise.
func (o *LinkTokenCreateRequestUser) GetSsn() string {
if o == nil || o.Ssn == nil {
var ret string
return ret
}
return *o.Ssn
}
// GetSsnOk returns a tuple with the Ssn field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LinkTokenCreateRequestUser) GetSsnOk() (*string, bool) {
if o == nil || o.Ssn == nil {
return nil, false
}
return o.Ssn, true
}
// HasSsn returns a boolean if a field has been set.
func (o *LinkTokenCreateRequestUser) HasSsn() bool {
if o != nil && o.Ssn != nil {
return true
}
return false
}
// SetSsn gets a reference to the given string and assigns it to the Ssn field.
func (o *LinkTokenCreateRequestUser) SetSsn(v string) {
o.Ssn = &v
}
// GetDateOfBirth returns the DateOfBirth field value if set, zero value otherwise.
func (o *LinkTokenCreateRequestUser) GetDateOfBirth() string {
if o == nil || o.DateOfBirth == nil {
var ret string
return ret
}
return *o.DateOfBirth
}
// GetDateOfBirthOk returns a tuple with the DateOfBirth field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *LinkTokenCreateRequestUser) GetDateOfBirthOk() (*string, bool) {
if o == nil || o.DateOfBirth == nil {
return nil, false
}
return o.DateOfBirth, true
}
// HasDateOfBirth returns a boolean if a field has been set.
func (o *LinkTokenCreateRequestUser) HasDateOfBirth() bool {
if o != nil && o.DateOfBirth != nil {
return true
}
return false
}
// SetDateOfBirth gets a reference to the given string and assigns it to the DateOfBirth field.
func (o *LinkTokenCreateRequestUser) SetDateOfBirth(v string) {
o.DateOfBirth = &v
}
func (o LinkTokenCreateRequestUser) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["client_user_id"] = o.ClientUserId
}
if o.LegalName != nil {
toSerialize["legal_name"] = o.LegalName
}
if o.PhoneNumber != nil {
toSerialize["phone_number"] = o.PhoneNumber
}
if o.PhoneNumberVerifiedTime != nil {
toSerialize["phone_number_verified_time"] = o.PhoneNumberVerifiedTime
}
if o.EmailAddress != nil {
toSerialize["email_address"] = o.EmailAddress
}
if o.EmailAddressVerifiedTime != nil {
toSerialize["email_address_verified_time"] = o.EmailAddressVerifiedTime
}
if o.Ssn != nil {
toSerialize["ssn"] = o.Ssn
}
if o.DateOfBirth != nil {
toSerialize["date_of_birth"] = o.DateOfBirth
}
return json.Marshal(toSerialize)
}
type NullableLinkTokenCreateRequestUser struct {
value *LinkTokenCreateRequestUser
isSet bool
}
func (v NullableLinkTokenCreateRequestUser) Get() *LinkTokenCreateRequestUser {
return v.value
}
func (v *NullableLinkTokenCreateRequestUser) Set(val *LinkTokenCreateRequestUser) {
v.value = val
v.isSet = true
}
func (v NullableLinkTokenCreateRequestUser) IsSet() bool {
return v.isSet
}
func (v *NullableLinkTokenCreateRequestUser) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableLinkTokenCreateRequestUser(val *LinkTokenCreateRequestUser) *NullableLinkTokenCreateRequestUser {
return &NullableLinkTokenCreateRequestUser{value: val, isSet: true}
}
func (v NullableLinkTokenCreateRequestUser) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableLinkTokenCreateRequestUser) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_link_token_create_request_user.go | 0.746601 | 0.41253 | model_link_token_create_request_user.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.