code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package bwmf
import (
"fmt"
"log"
"math/rand"
"os"
"strconv"
"time"
"github.com/golang/protobuf/proto"
"github.com/taskgraph/taskgraph"
pb "github.com/taskgraph/taskgraph/example/bwmf/proto"
"github.com/taskgraph/taskgraph/filesystem"
"github.com/taskgraph/taskgraph/op"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
/*
The block wise matrix factorization task is designed for carry out block wise matrix
factorization for a variety of criteria (loss function) and constraints (non-negativity
for example).
The main idea behind the bwmf is following:
We will have K tasks that handle both row task and column task in alternation. Each task
will read two copies of the data: one row shard and one column shard of A. It either hosts
one shard of D and a full copy of T, or one shard of T and a full copy of D, depending on
the epoch of iteration. "A full copy" consists of computation results from itself and
all "children".
Topology: the topology is different from task to task. Each task will consider itself parent
and all others children.
*/
// bwmfTasks holds two shards of original matrices (row and column), one shard of D,
// and one shard of T. It works differently for odd and even epoch:
// During odd epoch, 1. it fetch all T from other slaves, and finding better value for
// local shard of D; 2. after it is done, it let every one knows. Vice versa for even epoch.
// Task 0 will monitor the progress and responsible for starting the work of new epoch.
type bwmfTask struct {
framework taskgraph.Framework
epoch uint64
taskID uint64
logger *log.Logger
numOfTasks uint64
numIters uint64
curIter uint64
rowShard *pb.SparseMatrixShard
columnShard *pb.SparseMatrixShard
dShard *pb.DenseMatrixShard
tShard *pb.DenseMatrixShard
peerShards map[uint64]*pb.DenseMatrixShard
peerUpdated map[uint64]bool
dims *dimensions
config *Config
latentDim int
fsClient filesystem.Client
// optimization toolkits
dLoss *KLDivLoss
tLoss *KLDivLoss
optimizer *op.ProjectedGradient
stopCriteria op.StopCriteria
// event hanlding
epochChange chan *event
getT chan *event
getD chan *event
dataReady chan *event
updateDone chan *event
metaReady chan *event
exitChan chan *event
}
type event struct {
ctx context.Context
epoch uint64
request *pb.Request
retT chan *pb.Response
retD chan *pb.Response
fromID uint64
method string
output proto.Message
}
type dimensions struct {
m, n, k int
M, N int
}
func (t *bwmfTask) initData() {
var rsErr, csErr error
t.rowShard, rsErr = LoadSparseShard(t.fsClient, t.config.IOConf.IDPath+"."+strconv.Itoa(int(t.taskID)))
if rsErr != nil {
t.logger.Panicf("Failed load rowShard. %s", rsErr)
}
t.columnShard, csErr = LoadSparseShard(t.fsClient, t.config.IOConf.ITPath+"."+strconv.Itoa(int(t.taskID)))
if csErr != nil {
t.logger.Panicf("Failed load columnShard. %s", csErr)
}
t.dims = &dimensions{
m: len(t.rowShard.Row),
n: len(t.columnShard.Row),
k: t.latentDim,
M: -1,
N: -1,
}
t.dShard, _ = initDenseShard(t.dims.m, t.dims.k)
t.tShard, _ = initDenseShard(t.dims.n, t.dims.k)
}
func (t *bwmfTask) initOptUtil() {
// init optimization utils
projLen := 0
if t.dims.m > t.dims.n {
projLen = t.dims.m * t.dims.k
} else {
projLen = t.dims.n * t.dims.k
}
t.optimizer = op.NewProjectedGradient(
op.NewProjection(
op.NewAllTheSameParameter(1e20, projLen),
op.NewAllTheSameParameter(1e-8, projLen),
),
t.config.OptConf.Beta,
t.config.OptConf.Sigma,
t.config.OptConf.Alpha,
)
}
func initDenseShard(l, k int) (*pb.DenseMatrixShard, error) {
shard := &pb.DenseMatrixShard{
Row: make([]*pb.DenseMatrixShard_DenseRow, l),
}
for i, _ := range shard.Row {
shard.Row[i] = &pb.DenseMatrixShard_DenseRow{At: make([]float32, k)}
for j, _ := range shard.Row[i].At {
shard.Row[i].At[j] = rand.Float32()
}
}
return shard, nil
}
// This is useful to bring the task up to speed from scratch or if it recovers.
func (t *bwmfTask) Init(taskID uint64, framework taskgraph.Framework) {
t.taskID = taskID
t.framework = framework
t.logger = log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile)
t.initData()
t.initOptUtil()
t.epochChange = make(chan *event, 1)
t.getT = make(chan *event, 1)
t.getD = make(chan *event, 1)
t.dataReady = make(chan *event, t.numOfTasks)
t.updateDone = make(chan *event, 1)
t.metaReady = make(chan *event, 1)
t.exitChan = make(chan *event)
go t.run()
}
func (t *bwmfTask) getDShard() *pb.Response {
return &pb.Response{
BlockId: t.taskID,
Shard: t.dShard,
}
}
func (t *bwmfTask) getTShard() *pb.Response {
return &pb.Response{
BlockId: t.taskID,
Shard: t.tShard,
}
}
func (t *bwmfTask) run() {
for {
select {
case epochChange := <-t.epochChange:
t.doEnterEpoch(epochChange.ctx, epochChange.epoch)
case req := <-t.getT:
t.logger.Printf("trying to serve T shard, task %d, epoch %d", t.taskID, t.epoch)
err := t.framework.CheckGRPCContext(req.ctx)
if err != nil {
close(req.retT)
break
}
// We only return the data shard of previous epoch. So it always exists.
req.retT <- t.getTShard()
case req := <-t.getD:
t.logger.Printf("trying to serve D shard, task %d, epoch %d", t.taskID, t.epoch)
err := t.framework.CheckGRPCContext(req.ctx)
if err != nil {
close(req.retD)
break
}
req.retD <- t.getDShard()
case dataReady := <-t.dataReady:
t.doDataReady(dataReady.ctx, dataReady.fromID, dataReady.method, dataReady.output)
case done := <-t.updateDone:
t.notifyMaster(done.ctx)
case notify := <-t.metaReady:
t.notifyUpdate(notify.ctx, notify.fromID)
case <-t.exitChan:
return
}
}
}
func (t *bwmfTask) Exit() {
close(t.exitChan)
t.finish()
}
func (t *bwmfTask) finish() {
err := SaveDenseShard(t.fsClient, t.tShard, t.config.IOConf.OTPath+"."+strconv.Itoa(int(t.taskID)))
if err != nil {
t.logger.Printf("Save tShard for task %d failed with error: %v", t.taskID, err)
}
err = SaveDenseShard(t.fsClient, t.dShard, t.config.IOConf.ODPath+"."+strconv.Itoa(int(t.taskID)))
if err != nil {
t.logger.Printf("Save dShard for task %d failed with error: %v", t.taskID, err)
}
t.logger.Println("Finished. Waiting for the framework to stop the task...")
}
func (t *bwmfTask) EnterEpoch(ctx context.Context, epoch uint64) {
t.epochChange <- &event{ctx: ctx, epoch: epoch}
}
func (t *bwmfTask) doEnterEpoch(ctx context.Context, epoch uint64) {
t.logger.Printf("doEnterEpoch, task %d, epoch %d", t.taskID, epoch)
t.peerShards = make(map[uint64]*pb.DenseMatrixShard)
t.peerUpdated = make(map[uint64]bool)
t.epoch = epoch
t.stopCriteria = op.MakeComposedCriterion(
op.MakeFixCountStopCriteria(t.config.OptConf.FixedCnt),
op.MakeGradientNormStopCriteria(t.config.OptConf.GradTol),
op.MakeTimeoutCriterion(300*time.Second),
)
if epoch%2 == 0 {
t.fetchShards(ctx, "/proto.BlockData/GetDShard")
} else {
t.fetchShards(ctx, "/proto.BlockData/GetTShard")
}
}
func (t *bwmfTask) fetchShards(ctx context.Context, method string) {
peers := t.framework.GetTopology().GetNeighbors("Neighbors", t.epoch)
for _, peer := range peers {
t.framework.DataRequest(ctx, peer, method, &pb.Request{})
}
}
func (t *bwmfTask) DataReady(ctx context.Context, fromID uint64, method string, output proto.Message) {
t.dataReady <- &event{ctx: ctx, fromID: fromID, method: method, output: output}
}
func (t *bwmfTask) doDataReady(ctx context.Context, fromID uint64, method string, output proto.Message) {
t.logger.Printf("doDataReady, task %d, from %d, epoch %d, method %s", t.taskID, fromID, t.epoch, method)
resp, bOk := output.(*pb.Response)
if !bOk {
t.logger.Panicf("doDataRead, corruption in proto.Message.")
}
t.peerShards[resp.BlockId] = resp.Shard
if len(t.peerShards) == int(t.numOfTasks) {
if t.epoch%2 == 0 {
t.logger.Printf("Full D ready, task %d, epoch %d", t.taskID, t.epoch)
// XXX Starting an intensive computation.
go t.updateTShard(ctx)
} else {
t.logger.Printf("Full T ready, task %d, epoch %d", t.taskID, t.epoch)
// XXX Starting an intensive computation.
go t.updateDShard(ctx)
}
}
}
// These two function carry out actual optimization.
func (t *bwmfTask) updateDShard(ctx context.Context) {
if t.dims.N == -1 {
t.dims.N = 0
for _, m := range t.peerShards {
t.dims.N += len(m.Row)
}
wh := make([][]float32, t.dims.N)
for i, _ := range wh {
wh[i] = make([]float32, t.dims.m)
}
t.dLoss = &KLDivLoss{
V: t.rowShard,
WH: wh,
m: t.dims.N,
n: t.dims.m,
k: t.dims.k,
}
}
t.dLoss.W = NewBlocksParameter(&t.peerShards)
param := NewSingleBlockParameter(t.dShard)
loss, optErr := t.optimizer.Minimize(t.dLoss, t.stopCriteria, param)
if optErr != nil {
t.logger.Panicf("Failed minimizing over dShard: %s", optErr)
// handle re-run
// XXX(baigang) just kill the framework and wait for restarting?
}
t.logger.Printf("Updated dShard, loss is %f", loss)
t.updateDone <- &event{ctx: ctx}
}
func (t *bwmfTask) updateTShard(ctx context.Context) {
if t.dims.M == -1 {
t.dims.M = 0
for _, m := range t.peerShards {
t.dims.M += len(m.Row)
}
wh := make([][]float32, t.dims.M)
for i, _ := range wh {
wh[i] = make([]float32, t.dims.n)
}
t.tLoss = &KLDivLoss{
V: t.columnShard,
WH: wh,
m: t.dims.M,
n: t.dims.n,
k: t.dims.k,
}
}
t.tLoss.W = NewBlocksParameter(&t.peerShards)
param := NewSingleBlockParameter(t.tShard)
loss, optErr := t.optimizer.Minimize(t.tLoss, t.stopCriteria, param)
if optErr != nil {
t.logger.Panicf("Failed minimizing over tShard:", optErr)
// TODO handle re-run
}
t.logger.Printf("Updated tShard, loss is %f", loss)
t.updateDone <- &event{ctx: ctx}
}
func (t *bwmfTask) notifyMaster(ctx context.Context) {
t.framework.FlagMeta(ctx, "Master", "done")
}
func (t *bwmfTask) CreateOutputMessage(method string) proto.Message {
switch method {
case "/proto.BlockData/GetDShard":
return new(pb.Response)
case "/proto.BlockData/GetTShard":
return new(pb.Response)
}
panic("")
}
func (t *bwmfTask) CreateServer() *grpc.Server {
server := grpc.NewServer()
pb.RegisterBlockDataServer(server, t)
return server
}
func (t *bwmfTask) GetTShard(ctx context.Context, request *pb.Request) (*pb.Response, error) {
retT := make(chan *pb.Response, 1)
t.getT <- &event{ctx: ctx, request: request, retT: retT}
resp, ok := <-retT
if !ok {
return nil, fmt.Errorf("epoch changed!")
}
return resp, nil
}
func (t *bwmfTask) GetDShard(ctx context.Context, request *pb.Request) (*pb.Response, error) {
retD := make(chan *pb.Response, 1)
t.getD <- &event{ctx: ctx, request: request, retD: retD}
resp, ok := <-retD
if !ok {
return nil, fmt.Errorf("epoch changed!")
}
return resp, nil
}
func (t *bwmfTask) MetaReady(ctx context.Context, fromID uint64, linkType, meta string) {
t.metaReady <- &event{ctx: ctx, fromID: fromID}
}
func (t *bwmfTask) notifyUpdate(ctx context.Context, fromID uint64) {
t.logger.Printf("notifyUpdate, task %d, from %d, epoch %d", t.taskID, fromID, t.epoch)
t.peerUpdated[fromID] = true
if len(t.peerUpdated) == int(t.numOfTasks) {
t.logger.Printf("All tasks update done, epoch %d", t.epoch)
if t.epoch < 2*t.numIters {
t.framework.IncEpoch(ctx)
} else {
t.framework.ShutdownJob()
}
}
} | example/bwmf/bwmf_task.go | 0.534612 | 0.440168 | bwmf_task.go | starcoder |
package pps
type particleGrid struct {
cellSize int
cells map[gridCell][]*Particle
}
type gridCell struct {
x, y int
}
func (c gridCell) add(x, y int) gridCell {
return gridCell{x: c.x + x, y: c.y + y}
}
func makeParticleGrid(cellSize int) particleGrid {
return particleGrid{
cellSize: cellSize,
cells: make(map[gridCell][]*Particle),
}
}
func (g *particleGrid) addParticle(p *Particle) {
g.add(g.cell(p.Pos), p)
}
func (g *particleGrid) add(c gridCell, p *Particle) {
g.cells[c] = append(g.cells[c], p)
}
func (g *particleGrid) remove(c gridCell, p *Particle) {
for i, cp := range g.cells[c] {
if cp != p {
continue
}
g.cells[c] = append(g.cells[c][:i], g.cells[c][i+1:]...)
return
}
}
func (g *particleGrid) updatePos(p *Particle, pos Vec2) {
oldCell := g.cell(p.Pos)
newCell := g.cell(pos)
if newCell != oldCell {
g.remove(oldCell, p)
g.add(newCell, p)
}
p.Pos = pos
}
func (g *particleGrid) cell(pos Vec2) gridCell {
return gridCell{x: int(pos.X) / g.cellSize, y: int(pos.Y) / g.cellSize}
}
// neighbours returns the number of neighbours within radius r on the left
// side (L) and on the right side (R) of a particle p, as well as the number
// of close neighbours NClose, which are within radius rClose.
// The left side and the right side are semicircles with the radius r.
// The sum N=L+R is the total number of neighbours within this radius.
func (g *particleGrid) neighbours(p *Particle, r, rClose float64) (L, R, NClose int) {
center := g.cell(p.Pos)
for dx := -1; dx <= 1; dx++ {
for dy := -1; dy <= 1; dy++ {
for _, np := range g.cells[center.add(dx, dy)] {
if p == np {
continue
}
pos := p.Pos
nPos := np.Pos
d := pos.dist(nPos)
if d <= r {
if isLeft(nPos, pos, pos.add(p.Dir())) {
L++
} else {
R++
}
if d <= rClose {
NClose++
}
}
}
}
}
return
}
// isLeft reports whether point p is to the left side of the line
// through a and b.
func isLeft(p, a, b Vec2) bool {
return (p.X-a.X)*(b.Y-a.Y)-(p.Y-a.Y)*(b.X-a.X) > 0
} | grid.go | 0.805785 | 0.66264 | grid.go | starcoder |
package timef
import "strings"
type format map[string]string
// Format list of predefined formats
var Format = format{
// Format for date year at begin plus timestamp
FormatDateLongYearAtBegin11: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateLongYearAtBegin12: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "/") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateLongYearAtBegin13: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, ".") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateLongYearAtBegin14: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateLongYearAtBegin21: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateLongYearAtBegin22: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateLongYearAtBegin23: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateLongYearAtBegin24: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateLongYearAtBegin31: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateLongYearAtBegin32: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateLongYearAtBegin33: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateLongYearAtBegin34: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateLongYearAtBegin41: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateLongYearAtBegin42: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateLongYearAtBegin43: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateLongYearAtBegin44: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateLongYearAtBegin51: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
FormatDateLongYearAtBegin52: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
FormatDateLongYearAtBegin53: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
FormatDateLongYearAtBegin54: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
FormatDateYearAtBegin11: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateYearAtBegin12: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "/") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateYearAtBegin13: strings.Join([]string{Year, ZeroMonth, ZeroDay}, ".") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateYearAtBegin14: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateYearAtBegin21: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateYearAtBegin22: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateYearAtBegin23: strings.Join([]string{Year, ZeroMonth, ZeroDay}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateYearAtBegin24: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateYearAtBegin31: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateYearAtBegin41: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateYearAtBegin51: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
// Format for date year at end plus timestamp
FormatDateLongYearAtEnd11: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "-") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateLongYearAtEnd12: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "/") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateLongYearAtEnd13: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, ".") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateLongYearAtEnd14: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateLongYearAtEnd21: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateLongYearAtEnd22: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateLongYearAtEnd23: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateLongYearAtEnd24: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateLongYearAtEnd31: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateLongYearAtEnd32: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateLongYearAtEnd33: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateLongYearAtEnd34: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateLongYearAtEnd41: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateLongYearAtEnd42: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateLongYearAtEnd43: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateLongYearAtEnd44: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateLongYearAtEnd51: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
FormatDateLongYearAtEnd52: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
FormatDateLongYearAtEnd53: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
FormatDateLongYearAtEnd54: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
FormatDateYearAtEnd11: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "-") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateYearAtEnd12: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "/") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateYearAtEnd13: strings.Join([]string{ZeroDay, Year, ZeroMonth}, ".") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateYearAtEnd14: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "") + " " + strings.Join([]string{Hour, ZeroMinute}, ":"),
FormatDateYearAtEnd21: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateYearAtEnd22: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "/") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateYearAtEnd23: strings.Join([]string{ZeroDay, Year, ZeroMonth}, ".") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateYearAtEnd24: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":"),
FormatDateYearAtEnd31: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Milli,
FormatDateYearAtEnd41: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Micro,
FormatDateYearAtEnd51: strings.Join([]string{ZeroDay, Year, ZeroMonth}, "-") + " " + strings.Join([]string{Hour, ZeroMinute, ZeroSecond}, ":") + "." + Nano,
// Format for day year at begin
FormatDayLongYearAtBegin1: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "-"),
FormatDayLongYearAtBegin2: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "/"),
FormatDayLongYearAtBegin3: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, "."),
FormatDayLongYearAtBegin4: strings.Join([]string{LongYear, ZeroMonth, ZeroDay}, ""),
FormatDayYearAtBegin1: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "-"),
FormatDayYearAtBegin2: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "/"),
FormatDayYearAtBegin3: strings.Join([]string{Year, ZeroMonth, ZeroDay}, "."),
FormatDayYearAtBegin4: strings.Join([]string{Year, ZeroMonth, ZeroDay}, ""),
// Format for day year at end
FormatDayLongYearAtEnd1: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "-"),
FormatDayLongYearAtEnd2: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "/"),
FormatDayLongYearAtEnd3: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, "."),
FormatDayLongYearAtEnd4: strings.Join([]string{ZeroDay, ZeroMonth, LongYear}, ""),
FormatDayYearAtEnd1: strings.Join([]string{ZeroDay, ZeroMonth, Year}, "-"),
FormatDayYearAtEnd2: strings.Join([]string{ZeroDay, ZeroMonth, Year}, "/"),
FormatDayYearAtEnd3: strings.Join([]string{ZeroDay, ZeroMonth, Year}, "."),
FormatDayYearAtEnd4: strings.Join([]string{ZeroDay, ZeroMonth, Year}, ""),
}
// Contains returns true if value exists
func (f *format) Contains(value string) bool {
if _, ok := (*f)[value]; ok {
return true
}
return false
} | format.go | 0.532911 | 0.545104 | format.go | starcoder |
package optimization
import (
"math"
"math/rand"
"sort"
"time"
)
func init() {
rand.Seed(time.Now().Unix())
}
// MCTS performs a Monte Carlo Tree Search with Upper Confidence Bound.
func MCTS(first *State, simulations int, c float64, limit time.Duration) []*Node {
start := time.Now()
root := &MCTSNode{
state: first,
untriedMoves: first.PossibleNextMoves(),
}
for time.Now().Sub(start) < limit {
node := root
// Selection - find the node with the highest selection score
for len(node.untriedMoves) == 0 && len(node.children) > 0 {
sort.Slice(node.children, func(i, j int) bool {
return node.children[i].selectionScore > node.children[j].selectionScore
})
node = node.children[0]
}
// Expansion - make a random move on the optimal node
if len(node.untriedMoves) > 0 {
i := rand.Intn(len(node.untriedMoves))
move := node.untriedMoves[i]
node.untriedMoves = append(node.untriedMoves[:i], node.untriedMoves[i+1:]...)
newState := node.state.Apply(move)
child := &MCTSNode{
parent: node,
state: newState,
untriedMoves: newState.PossibleNextMoves(),
}
node.children = append(node.children, child)
node = child
}
// Simulation - play randomized games from this new state
sim := node.state
for j := 0; j < simulations; j++ {
moves := sim.PossibleNextMoves()
if len(moves) == 0 {
break
}
i := rand.Intn(len(moves))
sim = sim.Apply(moves[i])
}
// Backpropagation - update the tree to show the results of the play-outs
outcome := float64(sim.Evaluation())
p := node
for p != nil {
p.totalOutcome += outcome
p.visits++
p = p.parent
}
winRatio := node.totalOutcome / float64(node.visits)
node.selectionScore = winRatio + c*math.Sqrt(2*math.Log(float64(node.parent.visits)/float64(node.visits)))
}
var path []*Node
current := root
for len(current.children) > 0 {
sort.Slice(current.children, func(i, j int) bool {
return current.children[i].visits > current.children[j].visits
})
path = append(path, current.children[0].state.At)
current = current.children[0]
}
return path
}
type MCTSNode struct {
parent *MCTSNode
state *State
totalOutcome float64
visits uint64
untriedMoves []Move
children []*MCTSNode
selectionScore float64
} | optimization/mcts.go | 0.730097 | 0.527682 | mcts.go | starcoder |
package main
import "strings"
// Path represents a fs path
type Path []string
func splitIntoPathInner(p Path, path string, state int) Path {
s := 0
i := 0
c := 0
for c >= 0 {
if i < len(path) {
c = int(path[i])
} else {
c = -1
}
switch state {
case 0:
if c == '/' {
i++
} else {
state = 1
s = i
}
case 1:
if c == '/' || c < 0 {
p = append(p, path[s:i])
state = 0
} else {
i++
}
}
}
return p
}
// SplitIntoPathAsAbs splits a path and returns a path object in abs
func SplitIntoPathAsAbs(path string) Path {
if path == "" {
return Path{}
}
return splitIntoPathInner(Path{""}, path, 0)
}
// SplitIntoPath splits a path and returns a path object
func SplitIntoPath(path string) Path {
if path == "" {
return Path{}
}
return splitIntoPathInner(Path{}, path, 1)
}
// Canonicalize returns the canonicalized path
func (p Path) Canonicalize() Path {
retval := make(Path, 0, len(p))
for _, c := range p {
switch c {
case ".":
continue
case "..":
if len(retval) > 0 && retval[len(retval)-1] != "" {
retval = retval[:len(retval)-1]
}
default:
retval = append(retval, c)
}
}
return retval
}
// IsEmpty returns true if current path is empty
func (p Path) IsEmpty() bool {
return len(p) == 0
}
// IsRoot returns true if current path is root
func (p Path) IsRoot() bool {
return len(p) == 1 && p[0] == ""
}
// IsAbs returns true if current path is in abs mode
func (p Path) IsAbs() bool {
return len(p) > 0 && p[0] == ""
}
// Join joins current path with another (passed as parameter) and returns a new path
func (p Path) Join(another Path) Path {
if len(another) > 0 && another[0] == "" {
return append(p, another[1:]...)
}
return append(p, another...)
}
// String converts a path into a string
func (p Path) String() string {
return strings.Join(p, "/")
}
// IsPrefixed returns true if current path is prefixed by another one (passed as parameter)
func (p Path) IsPrefixed(another Path) bool {
if len(p) < len(another) {
return false
}
for i, c := range another {
if p[i] != c {
return false
}
}
return true
}
// Prefix returns the prefix of current path
func (p Path) Prefix() Path {
if len(p) == 0 {
return p
}
if len(p) == 1 {
if p[0] == "" {
return Path{""}
}
return Path{}
}
return p[:len(p)-1]
}
// BasePart returns the base part of current path
func (p Path) BasePart() Path {
if len(p) == 0 {
return p
}
if len(p) == 1 {
if p[0] == "" {
return Path{""}
}
return Path{}
}
return p[len(p)-1:]
}
// Base returns the base of current path as string
func (p Path) Base() string {
if len(p) == 0 {
return ""
}
if len(p) == 1 {
if p[0] == "" {
return "/"
}
return ""
}
return p[len(p)-1]
}
// Equal returns true if current path and another one (passed as parameter) are equal
func (p Path) Equal(p2 Path) bool {
if len(p) != len(p2) {
return false
}
for i := 0; i < len(p); i++ {
if p[i] != p2[i] {
return false
}
}
return true
} | path.go | 0.59514 | 0.458046 | path.go | starcoder |
package stmt
import "github.com/lindb/lindb/pkg/function"
// Expr represents a interface for all expression types
type Expr interface {
// expr ensures spec expression type need implement the interface
expr()
}
// TagFilter represents tag filter for searching time series
type TagFilter interface {
// TagKey returns the filter's tag key
TagKey() string
}
// SelectItem represents a select item from select statement
type SelectItem struct {
Expr Expr
Alias string
}
// FieldExpr represents a field name for select list
type FieldExpr struct {
Name string
}
// CallExpr represents a function call expression
type CallExpr struct {
Type function.Type
Params []Expr
}
// ParenExpr represents a parenthesized expression
type ParenExpr struct {
Expr Expr
}
// BinaryExpr represents an operations with two expressions
type BinaryExpr struct {
Left, Right Expr
Operator BinaryOP
}
// EqualsExpr represents an equals expression
type EqualsExpr struct {
Key string
Value string
}
// InExpr represents an in expression
type InExpr struct {
Key string
Values []string
}
// LikeExpr represents a like expression
type LikeExpr struct {
Key string
Value string
}
// RegexExpr represents a regular expression
type RegexExpr struct {
Key string
Regexp string
}
// NotExpr represents a not expression
type NotExpr struct {
Expr Expr
}
func (e *SelectItem) expr() {}
func (e *FieldExpr) expr() {}
func (e *CallExpr) expr() {}
func (e *ParenExpr) expr() {}
func (e *BinaryExpr) expr() {}
func (e *NotExpr) expr() {}
func (e *EqualsExpr) expr() {}
func (e *InExpr) expr() {}
func (e *LikeExpr) expr() {}
func (e *RegexExpr) expr() {}
// TagKey returns the equals filter's tag key
func (e *EqualsExpr) TagKey() string { return e.Key }
// TagKey returns the in filter's tag key
func (e *InExpr) TagKey() string { return e.Key }
// TagKey returns the like filter's tag key
func (e *LikeExpr) TagKey() string { return e.Key }
// TagKey returns the regex filter's tag key
func (e *RegexExpr) TagKey() string { return e.Key } | sql/stmt/expr.go | 0.599133 | 0.41834 | expr.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// UnifiedRoleScheduleInstanceBase
type UnifiedRoleScheduleInstanceBase struct {
Entity
// Read-only property with details of the app specific scope when the assignment scope is app specific. Containment entity.
appScope AppScopeable
// Identifier of the app-specific scope when the assignment scope is app-specific. The scope of an assignment determines the set of resources for which the principal has been granted access. App scopes are scopes that are defined and understood by this application only. Use / for tenant-wide app scopes. Use directoryScopeId to limit the scope to particular directory objects, for example, administrative units.
appScopeId *string
// The directory object that is the scope of the assignment. Enables the retrieval of the directory object using $expand at the same time as getting the role assignment. Read-only.
directoryScope DirectoryObjectable
// Identifier of the directory object representing the scope of the assignment. The scope of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. Use appScopeId to limit the scope to an application only.
directoryScopeId *string
// The principal that is getting a role assignment through the request. Enables the retrieval of the principal using $expand at the same time as getting the role assignment. Read-only.
principal DirectoryObjectable
// Identifier of the principal to which the assignment is being granted to. Can be a group or a user.
principalId *string
// The roleDefinition for the assignment. Enables the retrieval of the role definition using $expand at the same time as getting the role assignment. The roleDefinition.Id is automatically expanded.
roleDefinition UnifiedRoleDefinitionable
// Identifier of the unifiedRoleDefinition the assignment is for. Read only. Supports $filter (eq).
roleDefinitionId *string
}
// NewUnifiedRoleScheduleInstanceBase instantiates a new unifiedRoleScheduleInstanceBase and sets the default values.
func NewUnifiedRoleScheduleInstanceBase()(*UnifiedRoleScheduleInstanceBase) {
m := &UnifiedRoleScheduleInstanceBase{
Entity: *NewEntity(),
}
return m
}
// CreateUnifiedRoleScheduleInstanceBaseFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateUnifiedRoleScheduleInstanceBaseFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewUnifiedRoleScheduleInstanceBase(), nil
}
// GetAppScope gets the appScope property value. Read-only property with details of the app specific scope when the assignment scope is app specific. Containment entity.
func (m *UnifiedRoleScheduleInstanceBase) GetAppScope()(AppScopeable) {
if m == nil {
return nil
} else {
return m.appScope
}
}
// GetAppScopeId gets the appScopeId property value. Identifier of the app-specific scope when the assignment scope is app-specific. The scope of an assignment determines the set of resources for which the principal has been granted access. App scopes are scopes that are defined and understood by this application only. Use / for tenant-wide app scopes. Use directoryScopeId to limit the scope to particular directory objects, for example, administrative units.
func (m *UnifiedRoleScheduleInstanceBase) GetAppScopeId()(*string) {
if m == nil {
return nil
} else {
return m.appScopeId
}
}
// GetDirectoryScope gets the directoryScope property value. The directory object that is the scope of the assignment. Enables the retrieval of the directory object using $expand at the same time as getting the role assignment. Read-only.
func (m *UnifiedRoleScheduleInstanceBase) GetDirectoryScope()(DirectoryObjectable) {
if m == nil {
return nil
} else {
return m.directoryScope
}
}
// GetDirectoryScopeId gets the directoryScopeId property value. Identifier of the directory object representing the scope of the assignment. The scope of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. Use appScopeId to limit the scope to an application only.
func (m *UnifiedRoleScheduleInstanceBase) GetDirectoryScopeId()(*string) {
if m == nil {
return nil
} else {
return m.directoryScopeId
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *UnifiedRoleScheduleInstanceBase) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["appScope"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateAppScopeFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetAppScope(val.(AppScopeable))
}
return nil
}
res["appScopeId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAppScopeId(val)
}
return nil
}
res["directoryScope"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateDirectoryObjectFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetDirectoryScope(val.(DirectoryObjectable))
}
return nil
}
res["directoryScopeId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDirectoryScopeId(val)
}
return nil
}
res["principal"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateDirectoryObjectFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetPrincipal(val.(DirectoryObjectable))
}
return nil
}
res["principalId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetPrincipalId(val)
}
return nil
}
res["roleDefinition"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateUnifiedRoleDefinitionFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetRoleDefinition(val.(UnifiedRoleDefinitionable))
}
return nil
}
res["roleDefinitionId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetRoleDefinitionId(val)
}
return nil
}
return res
}
// GetPrincipal gets the principal property value. The principal that is getting a role assignment through the request. Enables the retrieval of the principal using $expand at the same time as getting the role assignment. Read-only.
func (m *UnifiedRoleScheduleInstanceBase) GetPrincipal()(DirectoryObjectable) {
if m == nil {
return nil
} else {
return m.principal
}
}
// GetPrincipalId gets the principalId property value. Identifier of the principal to which the assignment is being granted to. Can be a group or a user.
func (m *UnifiedRoleScheduleInstanceBase) GetPrincipalId()(*string) {
if m == nil {
return nil
} else {
return m.principalId
}
}
// GetRoleDefinition gets the roleDefinition property value. The roleDefinition for the assignment. Enables the retrieval of the role definition using $expand at the same time as getting the role assignment. The roleDefinition.Id is automatically expanded.
func (m *UnifiedRoleScheduleInstanceBase) GetRoleDefinition()(UnifiedRoleDefinitionable) {
if m == nil {
return nil
} else {
return m.roleDefinition
}
}
// GetRoleDefinitionId gets the roleDefinitionId property value. Identifier of the unifiedRoleDefinition the assignment is for. Read only. Supports $filter (eq).
func (m *UnifiedRoleScheduleInstanceBase) GetRoleDefinitionId()(*string) {
if m == nil {
return nil
} else {
return m.roleDefinitionId
}
}
// Serialize serializes information the current object
func (m *UnifiedRoleScheduleInstanceBase) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteObjectValue("appScope", m.GetAppScope())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("appScopeId", m.GetAppScopeId())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("directoryScope", m.GetDirectoryScope())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("directoryScopeId", m.GetDirectoryScopeId())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("principal", m.GetPrincipal())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("principalId", m.GetPrincipalId())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("roleDefinition", m.GetRoleDefinition())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("roleDefinitionId", m.GetRoleDefinitionId())
if err != nil {
return err
}
}
return nil
}
// SetAppScope sets the appScope property value. Read-only property with details of the app specific scope when the assignment scope is app specific. Containment entity.
func (m *UnifiedRoleScheduleInstanceBase) SetAppScope(value AppScopeable)() {
if m != nil {
m.appScope = value
}
}
// SetAppScopeId sets the appScopeId property value. Identifier of the app-specific scope when the assignment scope is app-specific. The scope of an assignment determines the set of resources for which the principal has been granted access. App scopes are scopes that are defined and understood by this application only. Use / for tenant-wide app scopes. Use directoryScopeId to limit the scope to particular directory objects, for example, administrative units.
func (m *UnifiedRoleScheduleInstanceBase) SetAppScopeId(value *string)() {
if m != nil {
m.appScopeId = value
}
}
// SetDirectoryScope sets the directoryScope property value. The directory object that is the scope of the assignment. Enables the retrieval of the directory object using $expand at the same time as getting the role assignment. Read-only.
func (m *UnifiedRoleScheduleInstanceBase) SetDirectoryScope(value DirectoryObjectable)() {
if m != nil {
m.directoryScope = value
}
}
// SetDirectoryScopeId sets the directoryScopeId property value. Identifier of the directory object representing the scope of the assignment. The scope of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. Use appScopeId to limit the scope to an application only.
func (m *UnifiedRoleScheduleInstanceBase) SetDirectoryScopeId(value *string)() {
if m != nil {
m.directoryScopeId = value
}
}
// SetPrincipal sets the principal property value. The principal that is getting a role assignment through the request. Enables the retrieval of the principal using $expand at the same time as getting the role assignment. Read-only.
func (m *UnifiedRoleScheduleInstanceBase) SetPrincipal(value DirectoryObjectable)() {
if m != nil {
m.principal = value
}
}
// SetPrincipalId sets the principalId property value. Identifier of the principal to which the assignment is being granted to. Can be a group or a user.
func (m *UnifiedRoleScheduleInstanceBase) SetPrincipalId(value *string)() {
if m != nil {
m.principalId = value
}
}
// SetRoleDefinition sets the roleDefinition property value. The roleDefinition for the assignment. Enables the retrieval of the role definition using $expand at the same time as getting the role assignment. The roleDefinition.Id is automatically expanded.
func (m *UnifiedRoleScheduleInstanceBase) SetRoleDefinition(value UnifiedRoleDefinitionable)() {
if m != nil {
m.roleDefinition = value
}
}
// SetRoleDefinitionId sets the roleDefinitionId property value. Identifier of the unifiedRoleDefinition the assignment is for. Read only. Supports $filter (eq).
func (m *UnifiedRoleScheduleInstanceBase) SetRoleDefinitionId(value *string)() {
if m != nil {
m.roleDefinitionId = value
}
} | models/unified_role_schedule_instance_base.go | 0.692746 | 0.413892 | unified_role_schedule_instance_base.go | starcoder |
package model
import (
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
)
type (
// yaml tag for the proxy details
yamlProxy struct {
Http string `yaml:"http_proxy"`
Https string `yaml:"https_proxy"`
NoProxy string `yaml:"no_proxy"`
}
// yaml tag for stuff to be copied on volumes
yamlCopy struct {
//Once indicates if the copy should be done only on one node matching the targeted labels
Once bool
// The volume path where to copy the content
Path string
// Labels to restrict the copy to some node sets
yamlLabel `yaml:",inline"`
// The list of path patterns identifying content to be copied
Sources []string `yaml:"sources"`
}
// yaml tag for parameters
yamlParams struct {
Params map[string]interface{} `yaml:",omitempty"`
}
// yaml tag for variables
yamlVars struct {
Vars map[string]interface{} `yaml:",omitempty"`
}
// yaml tag for authentication parameters
yamlAuth struct {
Auth map[string]string `yaml:",omitempty"`
}
// yaml tag for environment variables
yamlEnv struct {
Env map[string]string `yaml:",omitempty"`
}
// yaml tag for labels on nodesets
yamlLabel struct {
Labels map[string]string `yaml:",omitempty"`
}
// yaml tag for custom playbooks
yamlPlaybooks struct {
Playbooks map[string]string `yaml:",omitempty"`
}
// yaml tag for component
yamlComponent struct {
// The source repository where the component lives
Repository string
// The ref (branch or tag) of the component to use
Ref string
// The authentication parameters
yamlAuth `yaml:",inline"`
}
// yaml tag for a volume and its parameters
yamlVolume struct {
// The mounting path of the created volume
Path string
// The parameters required to create the volume (typically provider dependent)
yamlParams `yaml:",inline"`
}
// yaml tag for a shared volume content
yamlVolumeContent struct {
// The component holding the content to copy into the volume
Component string
// The path of the content to copy
Path string
}
// yaml reference to provider
yamlProviderRef struct {
Name string
// The overriding provider parameters
yamlParams `yaml:",inline"`
// The overriding provider environment variables
yamlEnv `yaml:",inline"`
// The overriding provider proxy
Proxy yamlProxy
}
// yaml reference to orchestrator
yamlOrchestratorRef struct {
// The overriding orchestrator parameters
yamlParams `yaml:",inline"`
// The overriding orchestrator environment variables
yamlEnv `yaml:",inline"`
}
// yaml reference to task
yamlTaskRef struct {
// The referenced task
Task string
// Prefix, optional string used to prefix the stored hook results.*
Prefix string
// The overriding parameters
yamlParams `yaml:",inline"`
// The overriding environment variables
yamlEnv `yaml:",inline"`
}
//yaml tag for hooks
yamlHook struct {
// Hooks to be executed before the corresponding process step
Before []yamlTaskRef `yaml:",omitempty"`
// Hooks to be executed after the corresponding process step
After []yamlTaskRef `yaml:",omitempty"`
}
yamlEkara struct {
// Base for all non-absolute components
Base string `yaml:",omitempty"`
// Parent component
Parent yamlComponent
// Components declared
Components map[string]yamlComponent
// The list of path patterns where to apply the template mechanism
Templates []string
// The list of custom playbooks
yamlPlaybooks `yaml:",inline"`
}
yamlNode struct {
// The number of instances to create within the node set
Instances int
// The provider used to create the node set and its settings
Provider yamlProviderRef
// The orchestrator settings for this node set
Orchestrator yamlOrchestratorRef
// The orchestrator settings for this node set
Volumes []yamlVolume
// The Hooks to be executed while creating the node set
Hooks struct {
Create yamlHook `yaml:",omitempty"`
Destroy yamlHook `yaml:",omitempty"`
} `yaml:",omitempty"`
// The labels associated with the nodeset
yamlLabel `yaml:",inline"`
}
// Definition of the Ekara environment
yamlEnvironment struct {
// The name of the environment
Name string
// The qualifier of the environment
Qualifier string `yaml:",omitempty"`
// The description of the environment
Description string `yaml:",omitempty"`
// The Ekara platform used to interact with the environment
Ekara yamlEkara
// The descriptor variables
yamlVars `yaml:",inline"`
// Tasks which can be run on the created environment
Tasks map[string]struct {
// Name of the task component
Component string
// The task parameters
yamlParams `yaml:",inline"`
// The task environment variables
yamlEnv `yaml:",inline"`
// The name of the playbook to launch the task
Playbook string `yaml:",omitempty"`
// The Hooks to be executed in addition the the main task playbook
Hooks struct {
Execute yamlHook `yaml:",omitempty"`
} `yaml:",omitempty"`
}
// Global definition of the orchestrator to install on the environment
Orchestrator struct {
// Name of the orchestrator component
Component string
// The orchestrator parameters
yamlParams `yaml:",inline"`
// The orchestrator environment variables
yamlEnv `yaml:",inline"`
}
// The list of all cloud providers required to create the environment
Providers map[string]struct {
// Name of the provider component
Component string
// The provider parameters
yamlParams `yaml:",inline"`
// The provider environment variables
yamlEnv `yaml:",inline"`
// The provider proxy
Proxy yamlProxy
}
// The list of node sets to create
Nodes map[string]yamlNode
// Software stacks to be installed on the environment
Stacks map[string]struct {
// Name of the stack component
Component string
// The name of the stacks on which this one depends
Dependencies []string `yaml:",omitempty"`
// The Hooks to be executed while deploying the stack
Hooks struct {
Deploy yamlHook `yaml:",omitempty"`
} `yaml:",omitempty"`
// The parameters
yamlParams `yaml:",inline"`
// The environment variables
yamlEnv `yaml:",inline"`
// The stack content to be copied on volumes
Copies map[string]yamlCopy
// Custom playbook
Playbook string
}
// Global hooks
Hooks struct {
Init yamlHook `yaml:",omitempty"`
Create yamlHook `yaml:",omitempty"`
Install yamlHook `yaml:",omitempty"`
Deploy yamlHook `yaml:",omitempty"`
Delete yamlHook `yaml:",omitempty"`
} `yaml:",omitempty"`
// Global volumes
Volumes map[string]struct {
Content []yamlVolumeContent `yaml:",omitempty"`
} `yaml:",omitempty"`
}
yamlRefs struct {
Ekara yamlEkara
yamlVars `yaml:",inline"`
Orchestrator struct {
Component string
}
Providers map[string]struct {
Component string
}
Nodes map[string]struct {
Provider struct {
Name string
}
}
Stacks map[string]struct {
Component string
}
Tasks map[string]struct {
Component string
}
}
)
// parseYaml parses the url content, template it and unmarshal it into the out struct.
func parseYaml(path string, tplC *TemplateContext, out interface{}) error {
// Read descriptor content
content, err := ioutil.ReadFile(path)
if err != nil {
return err
}
// Parse just the "vars:" section of the descriptor and fill the template context with it
err = parseVars(content, tplC)
if err != nil {
err = fmt.Errorf("yaml error in %s: %s", path, err.Error())
return err
}
// Template the content of the environment descriptor with the updated template context
templated, err := tplC.Execute(string(content))
if err != nil {
return err
}
// Unmarshal the resulting YAML into output structure
err = yaml.Unmarshal([]byte(templated), out)
if err != nil {
err = fmt.Errorf("yaml error in %s : %s", path, err.Error())
return err
}
return nil
}
// parseVars parses the "vars:" section of the descriptor
func parseVars(content []byte, tplC *TemplateContext) error {
readVars := func(b []byte) (yamlVars, error) {
yVars := yamlVars{}
err := yaml.Unmarshal(b, &yVars)
if err != nil {
return yVars, err
}
return yVars, nil
}
// Read raw vars
vars, err := readVars(content)
if err != nil {
return err
}
// Marshal variables to be able to template only this part
onlyVars, err := yaml.Marshal(vars)
if err != nil {
return err
}
// Apply template
templatedVars, err := tplC.Execute(string(onlyVars))
if err != nil {
return err
}
// Read templated vars the templated vars again to merge them into the template context
vars, err = readVars([]byte(templatedVars))
if err != nil {
return err
}
if len(vars.Vars) > 0 {
tplC.addVars(vars.Vars)
}
return nil
} | model/yaml.go | 0.590189 | 0.453867 | yaml.go | starcoder |
package data
// FeedPtr represents the dynamic metadata value in which a feed is providing the value.
type FeedPtr struct {
FeedID string `json:"feed,omitempty"`
}
// Meta contains information on an entities metadata table. Metadata key/value
// pairs are used by a records' filter pipeline during a dns query.
// All values can be a feed id as well, indicating real-time updates of these values.
// Structure/Precendence of metadata tables:
// - Record
// - Meta <- lowest precendence in filter
// - Region(s)
// - Meta <- middle precedence in filter chain
// - ...
// - Answer(s)
// - Meta <- highest precedence in filter chain
// - ...
// - ...
type Meta struct {
// STATUS
// Indicates whether or not entity is considered 'up'
// bool or FeedPtr.
Up interface{} `json:"up,omitempty"`
// Indicates the number of active connections.
// Values must be positive.
// int or FeedPtr.
Connections interface{} `json:"connections,omitempty"`
// Indicates the number of active requests (HTTP or otherwise).
// Values must be positive.
// int or FeedPtr.
Requests interface{} `json:"requests,omitempty"`
// Indicates the "load average".
// Values must be positive, and will be rounded to the nearest tenth.
// float64 or FeedPtr.
LoadAvg interface{} `json:"loadavg,omitempty"`
// The Job ID of a Pulsar telemetry gathering job and routing granularities
// to associate with.
// string or FeedPtr.
Pulsar interface{} `json:"pulsar,omitempty"`
// GEOGRAPHICAL
// Must be between -180.0 and +180.0 where negative
// indicates South and positive indicates North.
// e.g., the longitude of the datacenter where a server resides.
// float64 or FeedPtr.
Latitude interface{} `json:"latitude,omitempty"`
// Must be between -180.0 and +180.0 where negative
// indicates West and positive indicates East.
// e.g., the longitude of the datacenter where a server resides.
// float64 or FeedPtr.
Longitude interface{} `json:"longitude,omitempty"`
// Valid geographic regions are: 'US-EAST', 'US-CENTRAL', 'US-WEST',
// 'EUROPE', 'ASIAPAC', 'SOUTH-AMERICA', 'AFRICA'.
// e.g., the rough geographic location of the Datacenter where a server resides.
// []string or FeedPtr.
Georegion interface{} `json:"georegion,omitempty"`
// Countr(ies) must be specified as ISO3166 2-character country code(s).
// []string or FeedPtr.
Country interface{} `json:"country,omitempty"`
// State(s) must be specified as standard 2-character state code(s).
// []string or FeedPtr.
USState interface{} `json:"us_state,omitempty"`
// Canadian Province(s) must be specified as standard 2-character province
// code(s).
// []string or FeedPtr.
CAProvince interface{} `json:"ca_province,omitempty"`
// INFORMATIONAL
// Notes to indicate any necessary details for operators.
// Up to 256 characters in length.
// string or FeedPtr.
Note interface{} `json:"note,omitempty"`
// NETWORK
// IP (v4 and v6) prefixes in CIDR format ("a.b.c.d/mask").
// May include up to 1000 prefixes.
// e.g., "1.2.3.4/24"
// []string or FeedPtr.
IPPrefixes interface{} `json:"ip_prefixes,omitempty"`
// Autonomous System (AS) number(s).
// May include up to 1000 AS numbers.
// []string or FeedPtr.
ASN interface{} `json:"asn,omitempty"`
// TRAFFIC
// Indicates the "priority tier".
// Lower values indicate higher priority.
// Values must be positive.
// int or FeedPtr.
Priority interface{} `json:"priority,omitempty"`
// Indicates a weight.
// Filters that use weights normalize them.
// Any positive values are allowed.
// Values between 0 and 100 are recommended for simplicity's sake.
// float64 or FeedPtr.
Weight interface{} `json:"weight,omitempty"`
// Indicates a "low watermark" to use for load shedding.
// The value should depend on the metric used to determine
// load (e.g., loadavg, connections, etc).
// int or FeedPtr.
LowWatermark interface{} `json:"low_watermark,omitempty"`
// Indicates a "high watermark" to use for load shedding.
// The value should depend on the metric used to determine
// load (e.g., loadavg, connections, etc).
// int or FeedPtr.
HighWatermark interface{} `json:"high_watermark,omitempty"`
} | vendor/gopkg.in/ns1/ns1-go.v2/rest/model/data/meta.go | 0.683842 | 0.416085 | meta.go | starcoder |
package caesar
import (
"errors"
"strings"
"github.com/stripedpajamas/caesar/runes"
)
// Bifid represents the Bifid cipher
// and conforms to the Cipher interface
// https://en.wikipedia.org/wiki/Bifid_cipher
type Bifid struct{}
// Encrypt operates on a plaintext string and a key string
// The function constructs an alphabet square from the key,
// and obtains substitution values from it. The substitution values
// are transposed and the transposed values are converted back into letters
func (b Bifid) Encrypt(plaintext, key string) (string, error) {
kb := newKeyblock(key)
staging := b.initialProcess(plaintext, kb)
// split apart substitution pairs
transposed := make([]int, len(staging)*2)
for i := 0; i < len(staging); i++ {
transposed[i] = staging[i].row
transposed[i+len(staging)] = staging[i].col
}
// map into locations
pairs := make([]location, len(staging))
pairIdx := 0
for i := 0; i < len(transposed); i += 2 {
pairs[pairIdx] = location{row: transposed[i], col: transposed[i+1]}
pairIdx++
}
return b.pairsToValues(pairs, kb)
}
// Decrypt operates on a ciphertext string and a key string
// The function constructs and alphabet square from the key,
// and obtains substitution values from it. The substitution values
// are de-transposed into values that are looked up in the square
// to obtain the original plaintext string.
func (b Bifid) Decrypt(ciphertext, key string) (string, error) {
kb := newKeyblock(key)
staging := b.initialProcess(ciphertext, kb)
// turn locations into flat list of numbers
flat := make([]int, len(staging)*2)
for i, loc := range staging {
flat[2*i] = loc.row
flat[(2*i)+1] = loc.col
}
// read new pairs from flat staging numbers (i, i + len)
pairs := make([]location, len(staging))
for i := 0; i < len(pairs); i++ {
pairs[i] = location{row: flat[i], col: flat[i+len(staging)]}
}
return b.pairsToValues(pairs, kb)
}
func (b Bifid) initialProcess(input string, kb *keyblock) []location {
cleanInput := runes.Clean(input)
staging := make([]location, len(cleanInput))
// map plaintext runes to pairs in square
for i, r := range cleanInput {
if !runes.IsLetter(r) {
continue
}
loc, err := kb.getLocation(runes.ToUpper(r))
if err != nil {
// somehow a letter that isn't in the keyblock
// skip it
continue
}
staging[i] = loc
}
return staging
}
func (b Bifid) pairsToValues(pairs []location, kb *keyblock) (string, error) {
// convert pairs into letters
var out strings.Builder
for _, loc := range pairs {
r, err := kb.getValue(loc)
if err != nil {
// this is an illegal state
return "", errors.New("error converting transposed pairs into letters")
}
out.WriteRune(r)
}
return out.String(), nil
} | bifid.go | 0.692538 | 0.41739 | bifid.go | starcoder |
package kyberk2so
// byteopsLoad32 returns a 32-bit unsigned integer loaded from byte x.
func byteopsLoad32(x []byte) uint32 {
var r uint32
r = uint32(x[0])
r = r | (uint32(x[1]) << 8)
r = r | (uint32(x[2]) << 16)
r = r | (uint32(x[3]) << 24)
return r
}
// byteopsLoad24 returns a 32-bit unsigned integer loaded from byte x.
func byteopsLoad24(x []byte) uint32 {
var r uint32
r = uint32(x[0])
r = r | (uint32(x[1]) << 8)
r = r | (uint32(x[2]) << 16)
return r
}
// byteopsCbd computers a polynomial with coefficients distributed
// according to a centered binomial distribution with parameter eta,
// given an array of uniformly random bytes.
func byteopsCbd(buf []byte, paramsK int) poly {
var t, d uint32
var a, b int16
var r poly
switch paramsK {
case 2:
for i := 0; i < paramsN/4; i++ {
t = byteopsLoad24(buf[3*i:])
d = t & 0x00249249
d = d + ((t >> 1) & 0x00249249)
d = d + ((t >> 2) & 0x00249249)
for j := 0; j < 4; j++ {
a = int16((d >> (6*j + 0)) & 0x7)
b = int16((d >> (6*j + paramsETAK512)) & 0x7)
r[4*i+j] = a - b
}
}
default:
for i := 0; i < paramsN/8; i++ {
t = byteopsLoad32(buf[4*i:])
d = t & 0x55555555
d = d + ((t >> 1) & 0x55555555)
for j := 0; j < 8; j++ {
a = int16((d >> (4*j + 0)) & 0x3)
b = int16((d >> (4*j + paramsETAK768K1024)) & 0x3)
r[8*i+j] = a - b
}
}
}
return r
}
// byteopsMontgomeryReduce computes a Montgomery reduction; given
// a 32-bit integer `a`, returns `a * R^-1 mod Q` where `R=2^16`.
func byteopsMontgomeryReduce(a int32) int16 {
u := int16(a * int32(paramsQinv))
t := int32(u) * int32(paramsQ)
t = a - t
t >>= 16
return int16(t)
}
// byteopsBarrettReduce computes a Barrett reduction; given
// a 16-bit integer `a`, returns a 16-bit integer congruent to
// `a mod Q` in {0,...,Q}.
func byteopsBarrettReduce(a int16) int16 {
var t int16
var v int16 = int16(((uint32(1) << 26) + uint32(paramsQ/2)) / uint32(paramsQ))
t = int16(int32(v) * int32(a) >> 26)
t = t * int16(paramsQ)
return a - t
}
// byteopsCSubQ conditionally subtracts Q from a.
func byteopsCSubQ(a int16) int16 {
a = a - int16(paramsQ)
a = a + ((a >> 15) & int16(paramsQ))
return a
} | byteops.go | 0.803405 | 0.404802 | byteops.go | starcoder |
package fp
func (l BoolArray) DropWhile(p func(bool) bool) BoolArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]bool, size - n)
copy(acc, l[n: size])
return acc
}
func (l StringArray) DropWhile(p func(string) bool) StringArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]string, size - n)
copy(acc, l[n: size])
return acc
}
func (l IntArray) DropWhile(p func(int) bool) IntArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]int, size - n)
copy(acc, l[n: size])
return acc
}
func (l Int64Array) DropWhile(p func(int64) bool) Int64Array {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]int64, size - n)
copy(acc, l[n: size])
return acc
}
func (l ByteArray) DropWhile(p func(byte) bool) ByteArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]byte, size - n)
copy(acc, l[n: size])
return acc
}
func (l RuneArray) DropWhile(p func(rune) bool) RuneArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]rune, size - n)
copy(acc, l[n: size])
return acc
}
func (l Float32Array) DropWhile(p func(float32) bool) Float32Array {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]float32, size - n)
copy(acc, l[n: size])
return acc
}
func (l Float64Array) DropWhile(p func(float64) bool) Float64Array {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]float64, size - n)
copy(acc, l[n: size])
return acc
}
func (l AnyArray) DropWhile(p func(Any) bool) AnyArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]Any, size - n)
copy(acc, l[n: size])
return acc
}
func (l Tuple2Array) DropWhile(p func(Tuple2) bool) Tuple2Array {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([]Tuple2, size - n)
copy(acc, l[n: size])
return acc
}
func (l BoolArrayArray) DropWhile(p func([]bool) bool) BoolArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]bool, size - n)
copy(acc, l[n: size])
return acc
}
func (l StringArrayArray) DropWhile(p func([]string) bool) StringArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]string, size - n)
copy(acc, l[n: size])
return acc
}
func (l IntArrayArray) DropWhile(p func([]int) bool) IntArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]int, size - n)
copy(acc, l[n: size])
return acc
}
func (l Int64ArrayArray) DropWhile(p func([]int64) bool) Int64ArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]int64, size - n)
copy(acc, l[n: size])
return acc
}
func (l ByteArrayArray) DropWhile(p func([]byte) bool) ByteArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]byte, size - n)
copy(acc, l[n: size])
return acc
}
func (l RuneArrayArray) DropWhile(p func([]rune) bool) RuneArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]rune, size - n)
copy(acc, l[n: size])
return acc
}
func (l Float32ArrayArray) DropWhile(p func([]float32) bool) Float32ArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]float32, size - n)
copy(acc, l[n: size])
return acc
}
func (l Float64ArrayArray) DropWhile(p func([]float64) bool) Float64ArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]float64, size - n)
copy(acc, l[n: size])
return acc
}
func (l AnyArrayArray) DropWhile(p func([]Any) bool) AnyArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]Any, size - n)
copy(acc, l[n: size])
return acc
}
func (l Tuple2ArrayArray) DropWhile(p func([]Tuple2) bool) Tuple2ArrayArray {
size := len(l)
var n int
for n = 0; n < size && p(l[n]); n ++ {}
acc := make([][]Tuple2, size - n)
copy(acc, l[n: size])
return acc
} | fp/bootstrap_array_dropwhile.go | 0.684475 | 0.556761 | bootstrap_array_dropwhile.go | starcoder |
package zipcodes
import (
"bufio"
"fmt"
"log"
"math"
"os"
"strconv"
"strings"
)
const (
earthRadiusKm = 6371
earthRadiusMi = 3958
)
// ZipCodeLocation struct represents each line of the dataset
type ZipCodeLocation struct {
ZipCode string
PlaceName string
AdminName string
Lat float64
Lon float64
}
// Zipcodes contains the whole list of structs representing
// the zipcode dataset
type Zipcodes struct {
DatasetList map[string]ZipCodeLocation
}
// New loads the dataset that this packages uses and
// returns a struct that contains the dataset as a map interface
func New(datasetPath string) (*Zipcodes, error) {
zipcodes, err := LoadDataset(datasetPath)
if err != nil {
return nil, err
}
return &zipcodes, nil
}
// Lookup looks for a zipcode inside the map interface
func (zc *Zipcodes) Lookup(zipCode string) (*ZipCodeLocation, error) {
foundedZipcode := zc.DatasetList[zipCode]
if (foundedZipcode == ZipCodeLocation{}) {
return &ZipCodeLocation{}, fmt.Errorf("zipcodes: zipcode %s not found !", zipCode)
}
return &foundedZipcode, nil
}
// DistanceInKm returns the line of sight distance between two zipcodes in Kilometers
func (zc *Zipcodes) DistanceInKm(zipCodeA string, zipCodeB string) (float64, error) {
return zc.CalculateDistance(zipCodeA, zipCodeB, earthRadiusKm)
}
// DistanceInMiles returns the line of sight distance between two zipcodes in Miles
func (zc *Zipcodes) DistanceInMiles(zipCodeA string, zipCodeB string) (float64, error) {
return zc.CalculateDistance(zipCodeA, zipCodeB, earthRadiusMi)
}
// CalculateDistance returns the line of sight distance between two zipcodes in Kilometers
func (zc *Zipcodes) CalculateDistance(zipCodeA string, zipCodeB string, radius float64) (float64, error) {
locationA, errLocA := zc.Lookup(zipCodeA)
if errLocA != nil {
return 0, errLocA
}
locationB, errLocB := zc.Lookup(zipCodeB)
if errLocB != nil {
return 0, errLocB
}
return DistanceBetweenPoints(locationA.Lat, locationA.Lon, locationB.Lat, locationB.Lon, radius), nil
}
// DistanceInKmToZipcode calculates the distance between a zipcode and a give lat/lon in Kilometers
func (zc *Zipcodes) DistanceInKmToZipCode(zipCode string, latitude, longitude float64) (float64, error) {
location, errLoc := zc.Lookup(zipCode)
if errLoc != nil {
return 0, errLoc
}
return DistanceBetweenPoints(location.Lat, location.Lon, latitude, longitude, earthRadiusKm), nil
}
// DistanceInMilToZipcode calculates the distance between a zipcode and a give lat/lon in Miles
func (zc *Zipcodes) DistanceInMilToZipCode(zipCode string, latitude, longitude float64) (float64, error) {
location, errLoc := zc.Lookup(zipCode)
if errLoc != nil {
return 0, errLoc
}
return DistanceBetweenPoints(location.Lat, location.Lon, latitude, longitude, earthRadiusMi), nil
}
// GetZipcodesWithinKmRadius get all zipcodes within the radius of this zipcode
func (zc *Zipcodes) GetZipcodesWithinKmRadius(zipCode string, radius float64) ([]string, error) {
zipcodeList := []string{}
location, errLoc := zc.Lookup(zipCode)
if errLoc != nil {
return zipcodeList, errLoc
}
return zc.FindZipcodesWithinRadius(location, radius, earthRadiusKm), nil
}
// GetZipcodesWithinMlRadius get all zipcodes within the radius of this zipcode
func (zc *Zipcodes) GetZipcodesWithinMlRadius(zipCode string, radius float64) ([]string, error) {
zipcodeList := []string{}
location, errLoc := zc.Lookup(zipCode)
if errLoc != nil {
return zipcodeList, errLoc
}
return zc.FindZipcodesWithinRadius(location, radius, earthRadiusMi), nil
}
// FindZipcodesWithinRadius finds zipcodes within a given radius
func (zc *Zipcodes) FindZipcodesWithinRadius(location *ZipCodeLocation, maxRadius float64, earthRadius float64) []string {
zipcodeList := []string{}
for _, elm := range zc.DatasetList {
if elm.ZipCode != location.ZipCode {
distance := DistanceBetweenPoints(location.Lat, location.Lon, elm.Lat, elm.Lon, earthRadius)
if distance < maxRadius {
zipcodeList = append(zipcodeList, elm.ZipCode)
}
}
}
return zipcodeList
}
func hsin(t float64) float64 {
return math.Pow(math.Sin(t/2), 2)
}
// degreesToRadians converts degrees to radians
func degreesToRadians(d float64) float64 {
return d * math.Pi / 180
}
// DistanceBetweenPoints returns the distance between two lat/lon
// points using the Haversin distance formula.
func DistanceBetweenPoints(latitude1, longitude1, latitude2, longitude2 float64, radius float64) float64 {
lat1 := degreesToRadians(latitude1)
lon1 := degreesToRadians(longitude1)
lat2 := degreesToRadians(latitude2)
lon2 := degreesToRadians(longitude2)
diffLat := lat2 - lat1
diffLon := lon2 - lon1
a := hsin(diffLat) + math.Cos(lat1)*math.Cos(lat2)*hsin(diffLon)
c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
distance := c * radius
return math.Round(distance*100) / 100
}
// LoadDataset reads and loads the dataset into a map interface
func LoadDataset(datasetPath string) (Zipcodes, error) {
file, err := os.Open(datasetPath)
if err != nil {
log.Fatal(err)
return Zipcodes{}, fmt.Errorf("zipcodes: error while opening file %v", err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
zipcodeMap := Zipcodes{DatasetList: make(map[string]ZipCodeLocation)}
for scanner.Scan() {
splittedLine := strings.Split(scanner.Text(), "\t")
if len(splittedLine) != 12 {
return Zipcodes{}, fmt.Errorf("zipcodes: file line does not have 12 fields")
}
lat, errLat := strconv.ParseFloat(splittedLine[9], 64)
if errLat != nil {
return Zipcodes{}, fmt.Errorf("zipcodes: error while converting %s to Latitude", splittedLine[9])
}
lon, errLon := strconv.ParseFloat(splittedLine[10], 64)
if errLon != nil {
return Zipcodes{}, fmt.Errorf("zipcodes: error while converting %s to Longitude", splittedLine[10])
}
zipcodeMap.DatasetList[splittedLine[1]] = ZipCodeLocation{
ZipCode: splittedLine[1],
PlaceName: splittedLine[2],
AdminName: splittedLine[3],
Lat: lat,
Lon: lon,
}
}
if err := scanner.Err(); err != nil {
return Zipcodes{}, fmt.Errorf("zipcodes: error while opening file %v", err)
}
return zipcodeMap, nil
} | zipcodes.go | 0.799716 | 0.51251 | zipcodes.go | starcoder |
package ray
import (
"math"
)
// Sphere is a canonical sphere, centered of origin, of radius 1
type Sphere struct {
Transform
Surface
name string
}
// NewSphere instantiate a new sphere
func NewSphere() *Sphere {
return &Sphere{
Transform: IDTransform,
Surface: DefaultSurface,
}
}
// SetName ...
func (s *Sphere) SetName(name string) {
s.name = "sphere:" + name
}
// Name returns the sphere's name
func (s *Sphere) Name() string {
return s.name
}
// Surf ...
func (s *Sphere) Surf() *Surface {
return &s.Surface
}
// Translate applies a translation to the sphere
func (s *Sphere) Translate(x, y, z float64) *Sphere {
s.Transform.Translate(x, y, z)
return s
}
// RotateX applies a rotation around x-axis to the sphere
func (s *Sphere) RotateX(x float64) *Sphere {
s.Transform.RotateX(x)
return s
}
// RotateY applies a rotation around y-axis to the sphere
func (s *Sphere) RotateY(y float64) *Sphere {
s.Transform.RotateY(y)
return s
}
// RotateZ applies a rotation around z-axis to the sphere
func (s *Sphere) RotateZ(z float64) *Sphere {
s.Transform.RotateZ(z)
return s
}
// Scale applies a scaling transform to the sphere
func (s *Sphere) Scale(x, y, z float64) *Sphere {
s.Transform.Scale(x, y, z)
return s
}
// Intersect finds the intersection point (if any) between a global ray
// and this sphere, and the normal at intersection point
func (s *Sphere) Intersect(r Ray) *Hit {
locRay := s.RayToLocal(r)
dir := locRay.dir
p := locRay.pt
a := square(dir[X]) + square(dir[Y]) + square(dir[Z])
b := 2 * (dir[X]*p[X] + dir[Y]*p[Y] + dir[Z]*p[Z])
c := square(p[X]) + square(p[Y]) + square(p[Z]) - 1
delta := square(b) - 4*a*c
if delta < BigEpsilon {
return nil
}
sqd := math.Sqrt(delta)
aa := 2 * a
t := (sqd - b) / aa
t2 := (-b - sqd) / aa
if t > Epsilon && t2 > Epsilon {
t = math.Min(t, t2)
} else if t2 > Epsilon {
t = t2
} else if t < Epsilon {
return nil
}
var h Hit
//log.Printf("gr=%v lr=%v", r, locRay)
h.globRay = r
h.locRay = locRay
lp := Point3{p[X] + t*dir[X], p[Y] + t*dir[Y], p[Z] + t*dir[Z]}
h.locNorm.pt = lp
h.locNorm.dir = Vector3(lp)
gp := s.PointToGlobal(lp)
h.globNorm.pt = gp
d := -(square(lp[X]) + square(lp[Y]) + square(lp[Z]))
var A, B Point3
//log.Printf("INTERSECT lp=%v gp=%v d=%v t=%v ray=%v locray=%v delta=%f", lp, gp, d, t, r, locRay, delta)
absx := math.Abs(lp[X])
absy := math.Abs(lp[Y])
absz := math.Abs(lp[Z])
switch {
case absx >= absy && absx >= absz:
A = Point3{-(lp[Z] + d) / lp[X], 0, 1}
B = Point3{-(lp[Y] + d) / lp[X], 1, 0}
case absy >= absx && absy >= absz:
A = Point3{0, -(lp[Z] + d) / lp[Y], 1}
B = Point3{1, -(lp[X] + d) / lp[Y], 0}
default:
A = Point3{0, 1, -(lp[Y] + d) / lp[Z]}
B = Point3{1, 0, -(lp[X] + d) / lp[Z]}
}
//log.Printf("local A=%v B=%v", A, B)
A = s.PointToGlobal(A)
B = s.PointToGlobal(B)
//log.Printf("gA=%v gB=%v", A, B)
u := NewVec(gp, A)
v := NewVec(gp, B)
h.globNorm.dir = u.Cross(v)
//log.Printf("globNorm: %v", h.globNorm.dir)
h.globNorm.Normalize()
o := s.PointToGlobal(Origin)
w := Vector3{gp[X] - o[X], gp[Y] - o[Y], gp[Z] - o[Z]}
//log.Printf("SPHERE u=%v v=%v gn=%v w=%v dot(gn,gp)=%f", u, v, h.globNorm.dir, w, h.globNorm.dir.Dot(w))
if h.globNorm.dir.Dot(w) < 0 {
h.globNorm.dir.Reverse()
}
h.Surface = &s.Surface
return &h
}
/*
func debug(r Ray) bool {
if r.x == 300 && r.y == 220 {
return true
}
return false
}
*/
// MinMax ...
func (s *Sphere) MinMax() (Point3, Point3) {
return Point3{-1, -1, -1}, Point3{1, 1, 1}
} | sphere.go | 0.855926 | 0.494324 | sphere.go | starcoder |
package hashmap
import (
"container/list"
"fmt"
)
const dataLen uint32 = 37
// HashMap struct provides associative array semantics for string key/value pairs
// Implementation provides O(1) set, get & remove operations in average case
// Based on an underlying array of linked lists
type HashMap struct {
len uint32
data []*list.List
}
// Internal struct for HashMap entries
type hashMapEntry struct {
key string
value string
}
// Internal struct for keeping track of HashMap iteration
// Warning: not stable if the HashMap mutates in the middle of iteration
type hashMapIter struct {
hm *HashMap
index uint32
node *list.Element
}
// Return an initialized HashMap
func New() (hm *HashMap) {
hm = new(HashMap)
hm.data = make([]*list.List, dataLen)
for i := uint32(0); i < dataLen; i++ {
hm.data[i] = list.New()
}
return
}
// Return the current number of elements in the HashMap - O(1)
func (hm HashMap) Len() uint32 {
return hm.len
}
// Set the HashMap entry with `key` to `value`
// Overwrites existing entry with same `key`, or inserts a new entry
// O(1) in average case, O(N) in worst case (all entries under same hash)
func (hm *HashMap) Set(key, value string) {
l := hm.findList(key)
entry, _ := hm.findEntry(key, l)
if entry != nil {
entry.value = value
} else {
l.PushBack(hashMapEntry{key, value})
hm.len++
// TODO: grow data slice when it gets crowded
}
}
// Return the value of the HashMap entry at `key` (or error if no such entry)
// O(1) in average case, O(N) in worst case (all entries under same hash)
func (hm HashMap) Get(key string) (string, error) {
l := hm.findList(key)
entry, _ := hm.findEntry(key, l)
if entry != nil {
return entry.value, nil
}
return "", fmt.Errorf("No entry with key %s", key)
}
// Remove the HashMap entry at `key`, if it exists (otherwise do nothing)
// O(1) in average case, O(N) in worst case (all entries under same hash)
func (hm *HashMap) Remove(key string) {
l := hm.findList(key)
_, node := hm.findEntry(key, l)
if node != nil {
l.Remove(node)
hm.len--
}
}
// Internal function for returning the relevant linked list for a given key by hash
func (hm HashMap) findList(key string) *list.List {
h := hm.hashKey(key)
return hm.data[h]
}
// Internal function for finding the list element within a specific list for a given key
// Returns both the "parsed" hashMapEntry and the list element itself
// Returns (nil, nil) if key doesn't exist in the list
func (hm HashMap) findEntry(key string, l *list.List) (*hashMapEntry, *list.Element) {
for node := l.Front(); node != nil; node = node.Next() {
entry := node.Value.(hashMapEntry)
if entry.key == key {
return &entry, node
}
}
return nil, nil
}
// Internal function for computing the hash of a string key
func (hm HashMap) hashKey(key string) uint32 {
var h uint32
for _, b := range key {
h = 199 * h + uint32(b)
}
return h % uint32(len(hm.data))
}
// Return a new iterator over the HashMap
func (hm HashMap) StartIter() (iter hashMapIter) {
iter = hashMapIter{&hm, 0, hm.data[0].Front()}
iter.findNext()
return
}
// Return true if Next() is expected to succeed
func (iter hashMapIter) HasNext() bool {
return iter.node != nil
}
// Return the next key/value pair from the underlying HashMap
func (iter *hashMapIter) Next() (key, value string) {
entry := iter.node.Value.(hashMapEntry)
key, value = entry.key, entry.value
iter.node = iter.node.Next()
iter.findNext()
return
}
// Internal function for advancing the iterator to the next item
func (iter *hashMapIter) findNext() {
for iter.node == nil {
iter.index++
if iter.index >= uint32(len(iter.hm.data)) {
break
}
iter.node = iter.hm.data[iter.index].Front()
}
} | DataStruct/HashMap/go/hashmap.go | 0.685423 | 0.409929 | hashmap.go | starcoder |
package validation
import (
"github.com/overline-mining/gool/src/common"
"github.com/overline-mining/gool/src/olhash"
p2p_pb "github.com/overline-mining/gool/src/protos"
"go.uber.org/zap"
"math/big"
"sort"
)
func ValidateBlockRange(startingBlock *p2p_pb.BcBlock, blocks []*p2p_pb.BcBlock) (bool, int) {
n := len(blocks)
for i := n - 1; i > 0; i-- {
if !OrderedBlockPairIsValid(blocks[i-1], blocks[i]) {
return false, i
}
}
if !OrderedBlockPairIsValid(startingBlock, blocks[0]) {
return false, 0
}
return true, -1
}
func globalContiguityProblems(low, high *p2p_pb.BcBlock) bool {
return ((low.GetHeight() == uint64(641452) && high.GetHeight() == uint64(641453)) || // stuck chain catch up
(low.GetHeight() == uint64(1771220) && high.GetHeight() == uint64(1771221)) || // stuck chain catch up
(low.GetHeight() == uint64(2921635) && high.GetHeight() == uint64(2921636))) // multiple rovers out of order
}
func btcContiguityProblems(low, high *p2p_pb.BcBlock) bool {
return false
}
func ethContiguityProblems(low, high *p2p_pb.BcBlock) bool {
return ((low.GetHeight() == uint64(6825682) || high.GetHeight() == uint64(6825682)) || // 6825682 eth is missing blocks 14426493 - 14426497
(low.GetHeight() == uint64(6823254) || high.GetHeight() == uint64(6823254)) || // 6823254 eth is missing blocks 14425278 - 14425279
(low.GetHeight() == uint64(6820589) || high.GetHeight() == uint64(6820589)) || // 6820589 eth is missing blocks 14423772 - 14423775
(low.GetHeight() == uint64(6817490) || high.GetHeight() == uint64(6817490)) || // 6817490 eth is missing block 14422525
(low.GetHeight() == uint64(6809276) || high.GetHeight() == uint64(6809276)) || // 6809276 eth is missing blocks 14419404 - 14419405
(low.GetHeight() == uint64(6778570) || high.GetHeight() == uint64(6778570)) || // 6778570 eth is missing blocks 14407958 - 14407966
(low.GetHeight() == uint64(6776209) || high.GetHeight() == uint64(6776209)) || // 6776209 eth is missing blocks 14406915 - 14406917
(low.GetHeight() == uint64(6772753) || high.GetHeight() == uint64(6772753)) || // 6772753 eth is missing blocks 14404645 - 14404648
(low.GetHeight() == uint64(6761621) || high.GetHeight() == uint64(6761621)) || // 6761621 eth is missing blocks 14398965 - 14398968
(low.GetHeight() == uint64(6754138) || high.GetHeight() == uint64(6754138)) || // 6754138 eth is missing block 14395958
(low.GetHeight() == uint64(6682094) || high.GetHeight() == uint64(6682094)) || // 6682094 eth is missing blocks 14365666 - 14365671
(low.GetHeight() == uint64(6657585) && high.GetHeight() == uint64(6657586)) || // 6657585 eth does not increment to 6657586
(low.GetHeight() == uint64(6638444) && high.GetHeight() == uint64(6638445)) || // 6638444 eth does not increment to 6638445
(low.GetHeight() == uint64(6627338) && high.GetHeight() == uint64(6627339)) || // 6627338 eth does not increment to 6627339
(low.GetHeight() == uint64(6620449) && high.GetHeight() == uint64(6620450)) || // 6620449 does not increment to 6620450
(low.GetHeight() == uint64(6620262) && high.GetHeight() == uint64(6620263)) || // 6620262 does not increment to 6620263
(low.GetHeight() == uint64(6611014) && high.GetHeight() == uint64(6611015)) || // 6611014 eth does not increment to 6611015
(low.GetHeight() == uint64(6539050) && high.GetHeight() == uint64(6539051)) || // 6539050 does not increment to 6539051
(low.GetHeight() == uint64(6457430) && high.GetHeight() == uint64(6457431)) || // 6457430 does not increment to 6457431
(low.GetHeight() == uint64(6453532) && high.GetHeight() == uint64(6453533)) || // 6453532 eth does not point to 6453533 hash
(low.GetHeight() == uint64(6392780) && high.GetHeight() == uint64(6392781)) || // 6392780 eth does not increment to 6392781
(low.GetHeight() == uint64(6389354) && high.GetHeight() == uint64(6389355)) || // 6389354 eth does not increment to 6389355
(low.GetHeight() == uint64(6075150) && high.GetHeight() == uint64(6075151)) || // 6075150 eth is not hash pointed to by 6075151
(low.GetHeight() == uint64(6072723) && high.GetHeight() == uint64(6072724)) || // 6072723 eth does not increment to 6072724
(low.GetHeight() == uint64(6061792) && high.GetHeight() == uint64(6061793)) || // 6061792 eth is not hash pointed to by 6061793
(low.GetHeight() == uint64(6061585) && high.GetHeight() == uint64(6061586)) || // 6061585 eth is not hash pointed to by 6061586
(low.GetHeight() == uint64(6058196) && high.GetHeight() == uint64(6058197)) || // 6058196 eth is not hash pointed to by 6058197
(low.GetHeight() == uint64(6013776) && high.GetHeight() == uint64(6013777)) || // 6013776 eth does not increment to 6013777
(low.GetHeight() == uint64(6005529) && high.GetHeight() == uint64(6005530)) || // 6005529 eth does not increment to 6005530
(low.GetHeight() == uint64(5944824) && high.GetHeight() == uint64(5944825)) || // 5944824 eth is not hash pointed to by 5944825
(low.GetHeight() == uint64(5943292) && high.GetHeight() == uint64(5943293)) || // 5943292 eth does not increment to 5943293
(low.GetHeight() == uint64(5943089) && high.GetHeight() == uint64(5943090)) || // 5943089 eth is not hash pointed to by 5943090
(low.GetHeight() == uint64(5943048) && high.GetHeight() == uint64(5943049)) || // 5943048 eth is not hash pointed to by 5943049
(low.GetHeight() == uint64(5942422) && high.GetHeight() == uint64(5942423)) || // 5942422 eth does not increment to 5942423
(low.GetHeight() == uint64(5761152) && high.GetHeight() == uint64(5761153)) || // 5761152 eth does not increment to 5761153
(low.GetHeight() == uint64(5124029) && high.GetHeight() == uint64(5124030)) || // 5124029 eth does not increment to 5124030
(low.GetHeight() == uint64(5124024) && high.GetHeight() == uint64(5124025)) || // 5124024 eth does not increment to 5124025
(low.GetHeight() == uint64(5124020) && high.GetHeight() == uint64(5124021)) || // 5124020 eth does not increment to 5124021
(low.GetHeight() == uint64(5123986) && high.GetHeight() == uint64(5123987)) || // 5123986 eth does not increment to 5123987
(low.GetHeight() == uint64(5123968) && high.GetHeight() == uint64(5123969)) || // 5123968 eth does not increment to 5123969
(low.GetHeight() == uint64(5123963) && high.GetHeight() == uint64(5123964)) || // 5123963 eth does not increment to 5123964
(low.GetHeight() == uint64(3378927) || high.GetHeight() == uint64(3378927)) || // 3378927 eth missing 12206231
(low.GetHeight() == uint64(3121968) && high.GetHeight() == uint64(3121969)) || // 3121968 eth does not increment to 3121969
(low.GetHeight() == uint64(2820590) && high.GetHeight() == uint64(2820591))) // 2820590 eth does not increment to 2820591
}
func lskContiguityProblems(low, high *p2p_pb.BcBlock) bool {
return ((low.GetHeight() == uint64(6826628) || high.GetHeight() == uint64(6826628)) || // 6826628 lsk is missing blocks 18052855 - 18052863
(low.GetHeight() == uint64(6825682) || high.GetHeight() == uint64(6825682)) || // 6825682 lsk is missing blocks 18052147 - 18052155
(low.GetHeight() == uint64(6610732) && high.GetHeight() == uint64(6610733)) || // 6610732 lsk does not increment to 6610733
(low.GetHeight() == uint64(6249671) && high.GetHeight() == uint64(6249672)) || // 6249671 lsk does not increment to 6249672
(low.GetHeight() == uint64(5958529) && high.GetHeight() == uint64(5958530)) || // 5958529 lsk does not increment to 5958530
(low.GetHeight() == uint64(5957773) && high.GetHeight() == uint64(5957774)) || // 5957773 lsk does not increment to 5957774
(low.GetHeight() == uint64(5950202) && high.GetHeight() == uint64(5950203)) || // 5950202 lsk does not increment to 5950203
(low.GetHeight() == uint64(5945645) && high.GetHeight() == uint64(5945646)) || // 5945645 lsk does not increment to 5945646
(low.GetHeight() == uint64(5741082) && high.GetHeight() == uint64(5741083)) || // 5741082 lsk does not increment to 5741083
(low.GetHeight() == uint64(5032519) && high.GetHeight() == uint64(5032520)) || // 5032519 lsk does not increment to 5032520
(low.GetHeight() == uint64(4860129) && high.GetHeight() == uint64(4860130)) || // 4860129 lsk does not increment to 4860130
(low.GetHeight() == uint64(4851654) && high.GetHeight() == uint64(4851655)) || // 4851654 lsk does not increment to 4851655
(low.GetHeight() == uint64(2927292) && high.GetHeight() == uint64(2927293)) || // 2927292 lsk does not increment to 2927293
(low.GetHeight() == uint64(2921767) && high.GetHeight() == uint64(2921768)) || // 2921767 lsk does not increment to 2921768
(low.GetHeight() == uint64(2877093) && high.GetHeight() == uint64(2877094))) // 2877093 lsk does not increment to 2877094
}
func neoContiguityProblems(low, high *p2p_pb.BcBlock) bool {
return ((low.GetHeight() == uint64(6686205) || high.GetHeight() == uint64(6686205)) || // 6686205 neo is missing block 8900857
(low.GetHeight() == uint64(5937782) && high.GetHeight() == uint64(5937783)) || // 5937782 neo does not increment to 5937783
(low.GetHeight() == uint64(5936375) && high.GetHeight() == uint64(5936376)) || // 5936375 neo does not increment to 5936376
(low.GetHeight() == uint64(2927292) && high.GetHeight() == uint64(2927293)) || // 2927292 neo does not increment to 2927293
(low.GetHeight() == uint64(2921767) && high.GetHeight() == uint64(2921768))) // 2921767 neo does not increment to 2921768
}
func wavContiguityProblems(low, high *p2p_pb.BcBlock) bool {
return ((low.GetHeight() == uint64(6823182) || high.GetHeight() == uint64(6823182)) || // 6823182 wav is missing block 3037758
(low.GetHeight() == uint64(6776437) || high.GetHeight() == uint64(6776437)) || // 6776437 wav is missing blocks 3033379 - 3033382
(low.GetHeight() == uint64(6644362) && high.GetHeight() == uint64(6644363)) || // 6644362 wav does not increment to 6644363
(low.GetHeight() == uint64(6641219) && high.GetHeight() == uint64(6641220)) || // 6641219 wav does not increment to 6641220
(low.GetHeight() == uint64(6633144) && high.GetHeight() == uint64(6633145)) || // 6633144 wav does not increment to 6633145
(low.GetHeight() == uint64(6625383) && high.GetHeight() == uint64(6625384)) || // 6625383 wav does not increment to 6625384
(low.GetHeight() == uint64(6621066) && high.GetHeight() == uint64(6621067)) || // 6621066 wav does not increment to 6621067
(low.GetHeight() == uint64(6616183) && high.GetHeight() == uint64(6616184)) || // 6616183 wav does not increment to 6616184
(low.GetHeight() == uint64(6571760) && high.GetHeight() == uint64(6571761)) || // 6571760 wav does not increment to 6571761
(low.GetHeight() == uint64(6463192) && high.GetHeight() == uint64(6463193)) || // 6463192 wav does not increment to 6463193
(low.GetHeight() == uint64(6448350) && high.GetHeight() == uint64(6448351)) || // 6448350 wav does not increment to 6448351
(low.GetHeight() == uint64(6229663) && high.GetHeight() == uint64(6229664)) || // 6229663 wav does not increment to 6229664
(low.GetHeight() == uint64(6133666) && high.GetHeight() == uint64(6133667)) || // 6133666 wav does not increment to 6133667
(low.GetHeight() == uint64(6078335) && high.GetHeight() == uint64(6078336)) || // 6078335 wav does not increment to 6078336
(low.GetHeight() == uint64(5948727) && high.GetHeight() == uint64(5948728)) || // 5948727 wav does not increment to 5948728
(low.GetHeight() == uint64(5944634) && high.GetHeight() == uint64(5944635)) || // 5944634 wav does not increment to 5944635
(low.GetHeight() == uint64(5908377) && high.GetHeight() == uint64(5908378)) || // 5908377 wav does not increment to 5908378
(low.GetHeight() == uint64(5890170) && high.GetHeight() == uint64(5890171)) || // 5890170 wav does not increment to 5890171
(low.GetHeight() == uint64(5890169) && high.GetHeight() == uint64(5890170)) || // 5890169 wav does not increment to 5890170
(low.GetHeight() == uint64(5870867) && high.GetHeight() == uint64(5870868)) || // 5870867 wav does not increment to 5870868
(low.GetHeight() == uint64(5792335) && high.GetHeight() == uint64(5792336)) || // 5792335 wav does not increment to 5792336
(low.GetHeight() == uint64(5760218) && high.GetHeight() == uint64(5760219))) // 5760218 wav does not increment to 5760219
}
func OrderedBlockPairIsValid(low, high *p2p_pb.BcBlock) bool {
return (orderedBlockPairIsValid(low, high, false) &&
(validateDifficultyProgression(low, high) || true) &&
(validateDistanceProgression(low, high) || true))
}
func OrderedBlockPairIsValidStrict(low, high *p2p_pb.BcBlock) bool {
return (orderedBlockPairIsValid(low, high, true) &&
validateDifficultyProgression(low, high) &&
validateDistanceProgression(low, high))
}
func orderedBlockPairIsValid(low, high *p2p_pb.BcBlock, isStrict bool) bool {
const nitPickedValidationHeight = uint64(6850000)
if (high.GetHeight()-1 != low.GetHeight()) || (high.GetPreviousHash() != low.GetHash()) {
return false
}
if low.GetHeight() != 1 { // do not validate headers if comparing to genesis block
if !globalContiguityProblems(low, high) {
if !btcContiguityProblems(low, high) && !HeaderRangeIsContiguous(low.GetBlockchainHeaders().GetBtc(), high.GetBlockchainHeaders().GetBtc()) {
if low.GetHeight() >= nitPickedValidationHeight && !isStrict {
zap.S().Warnf("Problem in BTC contiguity spanning blocks %v %v -> %v %v", low.GetHeight(), common.BriefHash(low.GetHash()), high.GetHeight(), common.BriefHash(high.GetHash()))
} else {
return false
}
}
if !ethContiguityProblems(low, high) && !HeaderRangeIsContiguous(low.GetBlockchainHeaders().GetEth(), high.GetBlockchainHeaders().GetEth()) {
if low.GetHeight() >= nitPickedValidationHeight && !isStrict {
zap.S().Warnf("Problem in ETH contiguity spanning blocks %v %v -> %v %v", low.GetHeight(), common.BriefHash(low.GetHash()), high.GetHeight(), common.BriefHash(high.GetHash()))
} else {
return false
}
}
if !lskContiguityProblems(low, high) && !HeaderRangeIsContiguous(low.GetBlockchainHeaders().GetLsk(), high.GetBlockchainHeaders().GetLsk()) {
if low.GetHeight() >= nitPickedValidationHeight && !isStrict {
zap.S().Warnf("Problem in LSK contiguity spanning blocks %v %v -> %v %v", low.GetHeight(), common.BriefHash(low.GetHash()), high.GetHeight(), common.BriefHash(high.GetHash()))
} else {
return false
}
}
if !neoContiguityProblems(low, high) && !HeaderRangeIsContiguous(low.GetBlockchainHeaders().GetNeo(), high.GetBlockchainHeaders().GetNeo()) {
if low.GetHeight() >= nitPickedValidationHeight && !isStrict {
zap.S().Warnf("Problem in NEO contiguity spanning blocks %v %v -> %v %v", low.GetHeight(), common.BriefHash(low.GetHash()), high.GetHeight(), common.BriefHash(high.GetHash()))
} else {
return false
}
}
if !wavContiguityProblems(low, high) && !HeaderRangeIsContiguous(low.GetBlockchainHeaders().GetWav(), high.GetBlockchainHeaders().GetWav()) {
if low.GetHeight() >= nitPickedValidationHeight && !isStrict {
zap.S().Warnf("Problem in WAV contiguity spanning blocks %v %v -> %v %v", low.GetHeight(), common.BriefHash(low.GetHash()), high.GetHeight(), common.BriefHash(high.GetHash()))
} else {
return false
}
}
}
}
return true
}
func isSameHeightOrChained(low, high *p2p_pb.BlockchainHeader) bool {
correctOrder := (low.GetHeight() == high.GetHeight() || ((high.GetHeight()-1 == low.GetHeight()) && (high.GetPreviousHash() == low.GetHash())))
if !correctOrder {
return (low.GetHeight() == high.GetHeight() || ((low.GetHeight()-1 == high.GetHeight()) && (low.GetPreviousHash() == high.GetHash())))
}
return correctOrder
}
func isChained(low, high []*p2p_pb.BlockchainHeader) bool {
if isSameHeightOrChained(low[len(low)-1], high[0]) {
return true
}
if low[0].GetHeight() <= high[0].GetHeight() && low[len(low)-1].GetHeight() <= high[len(high)-1].GetHeight() {
return true
}
if high[0].GetHeight() <= low[0].GetHeight() && high[len(high)-1].GetHeight() <= low[len(low)-1].GetHeight() {
return true
}
if low[0].GetHeight() <= high[0].GetHeight() && low[len(low)-1].GetHeight() >= high[len(high)-1].GetHeight() {
return true
}
if high[0].GetHeight() <= low[0].GetHeight() && high[len(high)-1].GetHeight() >= low[len(low)-1].GetHeight() {
return true
}
return false
}
func HeaderRangeIsContiguous(low, high []*p2p_pb.BlockchainHeader) bool {
// special case first
if len(low) == 1 && len(high) == 1 {
return isSameHeightOrChained(low[0], high[0])
}
lowLast := len(low) - 1
highLast := len(high) - 1
for i := 0; i < lowLast; i++ {
if low[i].GetHash() != low[i+1].GetPreviousHash() {
return false
}
}
for i := 0; i < highLast; i++ {
if high[i].GetHash() != high[i+1].GetPreviousHash() {
return false
}
}
return (isChained(low, high))
}
func validateDifficultyProgression(low, high *p2p_pb.BcBlock) bool {
if low.GetHeight() < 4 && high.GetHeight() < 4 {
return true
}
totalHeightChange := GetNewIndexedHeightChange(low, high)
expectedDifficulty, _ := new(big.Int).SetString(high.GetDifficulty(), 10)
bigMinDiff := new(big.Int).SetUint64(olhash.MIN_DIFFICULTY)
lastBlockDiff, _ := new(big.Int).SetString(low.GetDifficulty(), 10)
newestHeader := GetNewestIndexedBlockHeader(high)
firstDiff := olhash.GetDifficultyPreExp(
high.GetTimestamp(), low.GetTimestamp(),
lastBlockDiff, bigMinDiff,
totalHeightChange,
newestHeader,
)
finalDiff := olhash.GetExpFactorDiff(firstDiff, low.GetHeight())
result := finalDiff.Cmp(expectedDifficulty) == 0
if !result {
zap.S().Debugf("%v -> expectedDifficulty: %v, calculatedDifficulty: %v", high.GetHeight(), high.GetDifficulty(), finalDiff.String())
}
// note that the original js code does not require this to be
// true since it tests that a pointer to an object is not null
// rather than the value of the object
return result
}
func headerHeightDiff(low, high []*p2p_pb.BlockchainHeader) int64 {
return int64(high[len(high)-1].GetHeight()) - int64(low[len(low)-1].GetHeight())
}
func GetNewestIndexedBlockHeader(block *p2p_pb.BcBlock) *p2p_pb.BlockchainHeader {
headers := block.GetBlockchainHeaders()
headersFlat := make([]*p2p_pb.BlockchainHeader, 0)
headersFlat = append(headersFlat, headers.GetBtc()...)
headersFlat = append(headersFlat, headers.GetEth()...)
headersFlat = append(headersFlat, headers.GetLsk()...)
headersFlat = append(headersFlat, headers.GetNeo()...)
headersFlat = append(headersFlat, headers.GetWav()...)
sort.SliceStable(headersFlat, func(i, j int) bool {
return headersFlat[i].GetTimestamp() < headersFlat[j].GetTimestamp()
})
return headersFlat[len(headersFlat)-1]
}
func GetNewIndexedHeightChange(low, high *p2p_pb.BcBlock) int64 {
nNew := int64(0)
lowHeaders := low.GetBlockchainHeaders()
highHeaders := high.GetBlockchainHeaders()
nNew += headerHeightDiff(lowHeaders.GetBtc(), highHeaders.GetBtc())
nNew += headerHeightDiff(lowHeaders.GetEth(), highHeaders.GetEth())
nNew += headerHeightDiff(lowHeaders.GetLsk(), highHeaders.GetLsk())
nNew += headerHeightDiff(lowHeaders.GetNeo(), highHeaders.GetNeo())
nNew += headerHeightDiff(lowHeaders.GetWav(), highHeaders.GetWav())
return nNew
}
func validateDistanceProgression(low, high *p2p_pb.BcBlock) bool {
const (
PASS_HASH1 = "ce9f9e8b316de889a76d5d70295cedaf8f0894992ee485d5ecf04fea56b2ca62"
PASS_HASH2 = "a6eb1f0605ac811a148a9cd864baabe80267765829fad9aca048b9b8ef7f2ab3"
SOFT_HEIGHT = uint64(40000)
SOFT_TIMESTAMP = uint64(1584771657)
PASS_HEIGHT1 = uint64(2469110)
PASS_HEIGHT2 = uint64(2499000)
PASS_HEIGHT3 = uint64(5000000)
)
if high.GetHeight() < SOFT_HEIGHT && high.GetTimestamp() < SOFT_TIMESTAMP {
return true
}
if high.GetHash() == PASS_HASH1 || high.GetHash() == PASS_HASH2 {
return true
}
expectedDistance, _ := new(big.Int).SetString(high.GetTotalDistance(), 10)
lowDistance, _ := new(big.Int).SetString(low.GetTotalDistance(), 10)
addedDistance, _ := new(big.Int).SetString(high.GetDistance(), 10)
calcDistance := new(big.Int).Add(lowDistance, addedDistance)
matched := (expectedDistance.Cmp(calcDistance) == 0)
if !matched {
if (high.GetHeight() > PASS_HEIGHT1) && (high.GetHeight() < PASS_HEIGHT2) {
return true
}
// miner did not calculate advantage correctly
directDist := new(big.Int).Sub(expectedDistance, lowDistance)
if directDist.Cmp(addedDistance) == -1 {
return true
}
if expectedDistance.Cmp(lowDistance) == 1 && high.GetHeight() < PASS_HEIGHT3 {
return true
}
}
if !matched {
zap.S().Debugf("%v -> %v, %v != %v", low.GetHeight(), high.GetHeight(), expectedDistance.String(), calcDistance.String())
}
// note that the original js code does not require this to be
// true since it tests that a pointer to an object is not null
// rather than the value of the object
return matched
} | src/validation/chain_validation.go | 0.532911 | 0.508483 | chain_validation.go | starcoder |
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// LsegArrayFromFloat64Array2Array2Slice returns a driver.Valuer that produces a PostgreSQL lseg[] from the given Go [][2][2]float64.
func LsegArrayFromFloat64Array2Array2Slice(val [][2][2]float64) driver.Valuer {
return lsegArrayFromFloat64Array2Array2Slice{val: val}
}
// LsegArrayToFloat64Array2Array2Slice returns an sql.Scanner that converts a PostgreSQL lseg[] into a Go [][2][2]float64 and sets it to val.
func LsegArrayToFloat64Array2Array2Slice(val *[][2][2]float64) sql.Scanner {
return lsegArrayToFloat64Array2Array2Slice{val: val}
}
type lsegArrayFromFloat64Array2Array2Slice struct {
val [][2][2]float64
}
func (v lsegArrayFromFloat64Array2Array2Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{'{', '}'}, nil
}
size := (len(v.val) * 15) + // len(`"[(x,y),(x,y)]"`) == 15
(len(v.val) - 1) + // number of commas between array elements
2 // surrounding curly braces
out := make([]byte, 1, size)
out[0] = '{'
for i := 0; i < len(v.val); i++ {
out = append(out, '"', '[', '(')
out = strconv.AppendFloat(out, v.val[i][0][0], 'f', -1, 64)
out = append(out, ',')
out = strconv.AppendFloat(out, v.val[i][0][1], 'f', -1, 64)
out = append(out, ')', ',', '(')
out = strconv.AppendFloat(out, v.val[i][1][0], 'f', -1, 64)
out = append(out, ',')
out = strconv.AppendFloat(out, v.val[i][1][1], 'f', -1, 64)
out = append(out, ')', ']', '"', ',')
}
out[len(out)-1] = '}'
return out, nil
}
type lsegArrayToFloat64Array2Array2Slice struct {
val *[][2][2]float64
}
func (v lsegArrayToFloat64Array2Array2Slice) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
elems := pgParseLsegArray(data)
lsegs := make([][2][2]float64, len(elems))
for i := 0; i < len(elems); i++ {
if lsegs[i][0][0], err = strconv.ParseFloat(string(elems[i][0][0]), 64); err != nil {
return err
}
if lsegs[i][0][1], err = strconv.ParseFloat(string(elems[i][0][1]), 64); err != nil {
return err
}
if lsegs[i][1][0], err = strconv.ParseFloat(string(elems[i][1][0]), 64); err != nil {
return err
}
if lsegs[i][1][1], err = strconv.ParseFloat(string(elems[i][1][1]), 64); err != nil {
return err
}
}
*v.val = lsegs
return nil
} | pgsql/lsegarr.go | 0.569374 | 0.437343 | lsegarr.go | starcoder |
package graph
// A Subgraph is a Graph that consists of a subset of the nodes and
// vertices from another, underlying Graph.
type Subgraph interface {
Graph
// Underlying returns the underlying graph that this is a
// subgraph of.
Underlying() Graph
// NodeMap transduces a node property map on the underlying
// graph into a node property map on this graph.
NodeMap(underlyingMap func(node int) interface{}) func(node int) interface{}
// EdgeMap transduces an edge property map on the underlying
// graph into an edge property map on this graph.
EdgeMap(underlyingMap func(node, edge int) interface{}) func(node, edge int) interface{}
}
// SubgraphKeep returns a subgraph of g that keeps the given nodes and
// edges. Subgraph node i corresponds to nodes[i] in g.
func SubgraphKeep(g Graph, nodes []int, edges []Edge) Subgraph {
// Create old-to-new node mapping.
gNodes := g.NumNodes()
oldToNew := make(map[int]int, len(nodes))
for newNode, oldNode := range nodes {
if oldNode < 0 || oldNode >= gNodes {
panic("node not in underlying graph")
}
if _, ok := oldToNew[oldNode]; ok {
panic("duplicate node")
}
oldToNew[oldNode] = newNode
}
// Construct new nodes.
newNodes := make([]listSubgraphNode, len(nodes))
for i, oldNode := range nodes {
newNodes[i].oldNode = oldNode
}
// Map old edge indexes to new node IDs.
for _, oldEdge := range edges {
newNode := &newNodes[oldToNew[oldEdge.Node]]
oldTo := g.Out(oldEdge.Node)[oldEdge.Edge]
newTo := oldToNew[oldTo]
newNode.out = append(newNode.out, newTo)
newNode.oldEdges = append(newNode.oldEdges, oldEdge.Edge)
}
return &listSubgraph{g, newNodes}
}
// SubgraphRemove returns a subgraph of g that removes the given nodes
// and edges from g, as well as all edges incident to those nodes.
func SubgraphRemove(g Graph, nodes []int, edges []Edge) Subgraph {
// Collect the set of nodes and edges to remove.
rmNodes := make(map[int]struct{}, len(nodes))
for _, node := range nodes {
rmNodes[node] = struct{}{}
}
rmEdges := make(map[Edge]struct{}, len(edges))
for _, edge := range edges {
rmEdges[edge] = struct{}{}
}
// Create new-to-old and old-to-new node mappings.
newNodes := make([]listSubgraphNode, 0, g.NumNodes()-len(rmNodes))
oldToNew := make(map[int]int, cap(newNodes))
for oldNode := 0; oldNode < g.NumNodes(); oldNode++ {
if _, ok := rmNodes[oldNode]; ok {
continue
}
newNode := len(newNodes)
newNodes = append(newNodes, listSubgraphNode{oldNode: oldNode})
oldToNew[oldNode] = newNode
}
// Create edge mappings.
for i := range newNodes {
newNode := &newNodes[i]
oldNode := newNode.oldNode
oldOut := g.Out(oldNode)
for j, oldNode2 := range oldOut {
if _, ok := rmNodes[oldNode2]; ok {
// Target node removed.
continue
}
if _, ok := rmEdges[Edge{oldNode, j}]; ok {
// Edge removed.
continue
}
newNode.out = append(newNode.out, oldToNew[oldNode2])
newNode.oldEdges = append(newNode.oldEdges, j)
}
}
return &listSubgraph{g, newNodes}
}
type listSubgraph struct {
underlying Graph
nodes []listSubgraphNode
}
type listSubgraphNode struct {
out []int // Adjacency list
oldNode int // Node ID in underlying graph
oldEdges []int // New edge index -> old edge index
}
func (s *listSubgraph) NumNodes() int {
return len(s.nodes)
}
func (s *listSubgraph) Out(node int) []int {
return s.nodes[node].out
}
func (s *listSubgraph) Underlying() Graph {
return s.underlying
}
func (s *listSubgraph) NodeMap(underlyingMap func(node int) interface{}) func(node int) interface{} {
return func(node int) interface{} {
return underlyingMap(s.nodes[node].oldNode)
}
}
func (s *listSubgraph) EdgeMap(underlyingMap func(node, edge int) interface{}) func(node, edge int) interface{} {
return func(node, edge int) interface{} {
newNode := &s.nodes[node]
return underlyingMap(newNode.oldNode, newNode.oldEdges[edge])
}
} | graph/subgraph.go | 0.794425 | 0.531027 | subgraph.go | starcoder |
package statsdtest
import (
"bytes"
"fmt"
"strings"
)
// Stat contains the raw and extracted stat information from a stat that was
// sent by the RecordingSender. Raw will always have the content that was
// consumed for this specific stat and Parsed will be set if no errors were hit
// pulling information out of it.
type Stat struct {
Raw []byte
Stat string
Value string
Tag string
Rate string
Parsed bool
}
// String fulfils the stringer interface
func (s *Stat) String() string {
return fmt.Sprintf("%s %s %s", s.Stat, s.Value, s.Rate)
}
// ParseStats takes a sequence of bytes destined for a Statsd server and parses
// it out into one or more Stat structs. Each struct includes both the raw
// bytes (copied, so the src []byte may be reused if desired) as well as each
// component it was able to parse out. If parsing was incomplete Stat.Parsed
// will be set to false but no error is returned / kept.
func ParseStats(src []byte) Stats {
d := make([]byte, len(src))
for i, b := range src {
d[i] = b
}
// standard protocol indicates one stat per line
entries := bytes.Split(d, []byte{'\n'})
result := make(Stats, len(entries))
for i, e := range entries {
result[i] = Stat{Raw: e}
ss := &result[i]
// : deliniates the stat name from the stat data
marker := bytes.IndexByte(e, ':')
if marker == -1 {
continue
}
ss.Stat = string(e[0:marker])
// stat data folows ':' with the form {value}|{type tag}[|@{sample rate}]
e = e[marker+1:]
marker = bytes.IndexByte(e, '|')
if marker == -1 {
continue
}
ss.Value = string(e[:marker])
e = e[marker+1:]
marker = bytes.IndexByte(e, '|')
if marker == -1 {
// no sample rate
ss.Tag = string(e)
} else {
ss.Tag = string(e[:marker])
e = e[marker+1:]
if len(e) == 0 || e[0] != '@' {
// sample rate should be prefixed with '@'; bail otherwise
continue
}
ss.Rate = string(e[1:])
}
ss.Parsed = true
}
return result
}
// Stats is a slice of Stat
type Stats []Stat
// Unparsed returns any stats that were unable to be completely parsed.
func (s Stats) Unparsed() Stats {
var r Stats
for _, e := range s {
if !e.Parsed {
r = append(r, e)
}
}
return r
}
// CollectNamed returns all data sent for a given stat name.
func (s Stats) CollectNamed(statName string) Stats {
return s.Collect(func(e Stat) bool {
return e.Stat == statName
})
}
// Collect gathers all stats that make some predicate true.
func (s Stats) Collect(pred func(Stat) bool) Stats {
var r Stats
for _, e := range s {
if pred(e) {
r = append(r, e)
}
}
return r
}
// Values returns the values associated with this Stats object.
func (s Stats) Values() []string {
if len(s) == 0 {
return nil
}
r := make([]string, len(s))
for i, e := range s {
r[i] = e.Value
}
return r
}
// String fulfils the stringer interface
func (s Stats) String() string {
if len(s) == 0 {
return ""
}
r := make([]string, len(s))
for i, e := range s {
r[i] = e.String()
}
return strings.Join(r, "\n")
} | vendor/github.com/cactus/go-statsd-client/statsd/statsdtest/stat.go | 0.681303 | 0.415195 | stat.go | starcoder |
package generation
import (
"math"
"github.com/flowmatters/openwater-core/conv/rough"
"github.com/flowmatters/openwater-core/conv/units"
"github.com/flowmatters/openwater-core/data"
)
/*OW-SPEC
BankErosion:
inputs:
downstreamFlowVolume:
totalVolume:
states:
parameters:
riparianVegPercent:
maxRiparianVegEffectiveness:
soilErodibility:
bankErosionCoeff:
linkSlope:
bankFullFlow:
bankMgtFactor:
sedBulkDensity:
bankHeight:
linkLength:
dailyFlowPowerFactor:
longTermAvDailyFlow:
soilPercentFine:
durationInSeconds: '[1,86400] Timestep, default=86400'
outputs:
bankErosionFine: kg
bankErosionCoarse: kg
implementation:
function: bankErosion
type: scalar
lang: go
outputs: params
init:
zero: true
lang: go
tags:
sediment generation
*/
// Does this return the same value every timestep? (Name suggests it does!)
func meanAnnualBankErosion(riparianVegPercent, maxRiparianVegEffectiveness, soilErodibility, bankErosionCoeff,
linkSlope, bankFullFlow, bankMgtFactor, sedBulkDensity, bankHeight, linkLength float64) float64 {
densityWater := 1000.0 // kg.m^-3
gravity := 9.81 // m.s^-2
BankErodability := (1 - math.Min((riparianVegPercent/100), (maxRiparianVegEffectiveness/100))) * (soilErodibility / 100)
RetreatRate_MperYr := bankErosionCoeff * densityWater * gravity * linkSlope * bankFullFlow * bankMgtFactor
massConversion := sedBulkDensity * bankHeight * linkLength
result := massConversion * RetreatRate_MperYr * BankErodability
return result
}
func bankErosion(downstreamFlowVolume, totalVolume data.ND1Float64,
riparianVegPercent, maxRiparianVegEffectiveness, soilErodibility, bankErosionCoeff,
linkSlope, bankFullFlow, bankMgtFactor, sedBulkDensity, bankHeight, linkLength,
dailyFlowPowerFactor, longTermAvDailyFlow, soilPercentFine, durationInSeconds float64,
bankErosionFine, bankErosionCoarse data.ND1Float64) {
idx := []int{0}
n := downstreamFlowVolume.Len1()
meanAnnual := meanAnnualBankErosion(riparianVegPercent, maxRiparianVegEffectiveness, soilErodibility, bankErosionCoeff,
linkSlope, bankFullFlow, bankMgtFactor, sedBulkDensity, bankHeight, linkLength)
//implementation of formula 4.20 in specifiction document - page 22
//bank erosion calculated as per this formula is in tonnes per day
for i := 0; i < n; i++ {
idx[0] = i
LinkDischargeFactor := 0.0
outflow := downstreamFlowVolume.Get(idx)
if totalVolume.Get(idx) <= 0 || outflow <= 0 || longTermAvDailyFlow <= 0 {
LinkDischargeFactor = 0
} else {
//convert to daily m3 before rasing to power
LinkDischargeFactor = math.Pow(outflow*durationInSeconds, dailyFlowPowerFactor) / longTermAvDailyFlow
}
// mainChannelArea := /*mainChannelStreamDimensions.*/contribArea_Km
BankErosion_TperDay := (meanAnnual * LinkDischargeFactor) / rough.DAYS_PER_YEAR
BankErosionTotal_kg_per_Second := BankErosion_TperDay * units.TONNES_TO_KG / durationInSeconds
bankErosionFine_Kg_per_Second := BankErosionTotal_kg_per_Second * (soilPercentFine * units.PERCENT_TO_PROPORTION)
bankErosionCoarse_Kg_per_Second := BankErosionTotal_kg_per_Second * (1 - (soilPercentFine * units.PERCENT_TO_PROPORTION))
bankErosionFine.Set(idx, bankErosionFine_Kg_per_Second)
bankErosionCoarse.Set(idx, bankErosionCoarse_Kg_per_Second)
}
} | models/generation/bank_erosion.go | 0.708515 | 0.475849 | bank_erosion.go | starcoder |
package types
import (
"reflect"
"strconv"
)
var (
stringSliceType = reflect.TypeOf([]string(nil))
intSliceType = reflect.TypeOf([]int(nil))
int64SliceType = reflect.TypeOf([]int64(nil))
float64SliceType = reflect.TypeOf([]float64(nil))
)
var sliceAppenders = []AppenderFunc{
reflect.Bool: nil,
reflect.Int: appendIntSliceValue,
reflect.Int8: nil,
reflect.Int16: nil,
reflect.Int32: nil,
reflect.Int64: appendInt64SliceValue,
reflect.Uint: nil,
reflect.Uint8: nil,
reflect.Uint16: nil,
reflect.Uint32: nil,
reflect.Uint64: nil,
reflect.Uintptr: nil,
reflect.Float32: nil,
reflect.Float64: appendFloat64SliceValue,
reflect.Complex64: nil,
reflect.Complex128: nil,
reflect.Array: nil,
reflect.Chan: nil,
reflect.Func: nil,
reflect.Interface: nil,
reflect.Map: nil,
reflect.Ptr: nil,
reflect.Slice: nil,
reflect.String: appendStringSliceValue,
reflect.Struct: nil,
reflect.UnsafePointer: nil,
}
func ArrayAppender(typ reflect.Type) AppenderFunc {
elemType := typ.Elem()
if appender := sliceAppenders[elemType.Kind()]; appender != nil {
return appender
}
appendElem := Appender(elemType)
return func(b []byte, v reflect.Value, quote int) []byte {
if v.IsNil() {
return AppendNull(b, quote)
}
if quote == 1 {
b = append(b, '\'')
}
b = append(b, '{')
for i := 0; i < v.Len(); i++ {
elem := v.Index(i)
b = appendElem(b, elem, 2)
b = append(b, ',')
}
if v.Len() > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
if quote == 1 {
b = append(b, '\'')
}
return b
}
}
func appendStringSliceValue(b []byte, v reflect.Value, quote int) []byte {
ss := v.Convert(stringSliceType).Interface().([]string)
return appendStringSlice(b, ss, quote)
}
func appendStringSlice(b []byte, ss []string, quote int) []byte {
if ss == nil {
return AppendNull(b, quote)
}
if quote == 1 {
b = append(b, '\'')
}
b = append(b, '{')
for _, s := range ss {
b = AppendString(b, s, 2)
b = append(b, ',')
}
if len(ss) > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
if quote == 1 {
b = append(b, '\'')
}
return b
}
func appendIntSliceValue(b []byte, v reflect.Value, quote int) []byte {
ints := v.Convert(intSliceType).Interface().([]int)
return appendIntSlice(b, ints, quote)
}
func appendIntSlice(b []byte, ints []int, quote int) []byte {
if ints == nil {
return AppendNull(b, quote)
}
if quote == 1 {
b = append(b, '\'')
}
b = append(b, '{')
for _, n := range ints {
b = strconv.AppendInt(b, int64(n), 10)
b = append(b, ',')
}
if len(ints) > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
if quote == 1 {
b = append(b, '\'')
}
return b
}
func appendInt64SliceValue(b []byte, v reflect.Value, quote int) []byte {
ints := v.Convert(int64SliceType).Interface().([]int64)
return appendInt64Slice(b, ints, quote)
}
func appendInt64Slice(b []byte, ints []int64, quote int) []byte {
if ints == nil {
return AppendNull(b, quote)
}
if quote == 1 {
b = append(b, '\'')
}
b = append(b, "{"...)
for _, n := range ints {
b = strconv.AppendInt(b, n, 10)
b = append(b, ',')
}
if len(ints) > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
if quote == 1 {
b = append(b, '\'')
}
return b
}
func appendFloat64SliceValue(b []byte, v reflect.Value, quote int) []byte {
floats := v.Convert(float64SliceType).Interface().([]float64)
return appendFloat64Slice(b, floats, quote)
}
func appendFloat64Slice(b []byte, floats []float64, quote int) []byte {
if floats == nil {
return AppendNull(b, quote)
}
if quote == 1 {
b = append(b, '\'')
}
b = append(b, "{"...)
for _, n := range floats {
b = appendFloat(b, n)
b = append(b, ',')
}
if len(floats) > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
if quote == 1 {
b = append(b, '\'')
}
return b
} | vendor/gopkg.in/pg.v4/types/append_array.go | 0.535341 | 0.401072 | append_array.go | starcoder |
package instructionsexplanation
type InstructionExplanation struct {
Instruction string
Param string
}
var instructionExplanations = map[string]InstructionExplanation{
"nop": InstructionExplanation{
Instruction: "No operation",
Param: "No param needed",
},
"copy": InstructionExplanation{
Instruction: "Copies the value from memory to the ACC register",
Param: "The parameter refers to the memory address",
},
"store": InstructionExplanation{
Instruction: "Stores the value of the ACC register in memory",
Param: "The parameter refers to the memory address",
},
"add": InstructionExplanation{
Instruction: "Adds a memory value to the ACC register and stores the result in ACC",
Param: "The parameter refers to the memory address",
},
"sub": InstructionExplanation{
Instruction: "Subtracts a memory value from the value of the ACC register and stores the result in ACC",
Param: "The parameter refers to the memory address",
},
"input": InstructionExplanation{
Instruction: "Inputs the input value into memory",
Param: "The parameter refers to the memory address",
},
"output": InstructionExplanation{
Instruction: "Outputs a memory value",
Param: "The parameter refers to the memory address",
},
"kill": InstructionExplanation{
Instruction: "Kills the program (you will need this instruction to tell the computer that the program ended)",
Param: "No param needed",
},
"jmp": InstructionExplanation{
Instruction: "Jumps to the a instruction",
Param: "The parameter can be either a label or a number that refers to a instruction (Warning: the index of a instruction can be different from the index of the line where the instruction is)",
},
"jg": InstructionExplanation{
Instruction: "Jumps to the a instruction if ACC register is greater than zero",
Param: "The parameter can be either a label or a number that refers to a instruction (Warning: the index of a instruction can be different from the index of the line where the instruction is)",
},
"je": InstructionExplanation{
Instruction: "Jumps to the a instruction if ACC register is equal to zero",
Param: "The parameter can be either a label or a number that refers to a instruction (Warning: the index of a instruction can be different from the index of the line where the instruction is)",
},
"jl": InstructionExplanation{
Instruction: "Jumps to the a instruction if ACC register is less than zero",
Param: "The parameter can be either a label or a number that refers to a instruction (Warning: the index of a instruction can be different from the index of the line where the instruction is)",
},
} | config/instructionsexplanation/index.go | 0.710427 | 0.675928 | index.go | starcoder |
package assert
import (
"bytes"
"reflect"
"testing"
)
func NoError(t testing.TB, err error) {
if err != nil {
t.Helper()
t.Fatalf("%+v", err)
}
}
func Error(t testing.TB, err error) {
if err == nil {
t.Helper()
t.Fatal("expected an error")
}
}
func Equal(t testing.TB, a, b interface{}) {
if ta, tb := reflect.TypeOf(a), reflect.TypeOf(b); ta != nil && tb != nil {
if ta.Comparable() && tb.Comparable() {
if a == b || literalConvert(a) == literalConvert(b) {
return
}
}
}
if deepEqual(a, b) {
return
}
t.Helper()
t.Fatalf("%#v != %#v", a, b)
}
func DeepEqual(t testing.TB, a, b interface{}) {
if !deepEqual(a, b) {
t.Helper()
t.Fatalf("%#v != %#v", a, b)
}
}
func That(t testing.TB, v bool) {
if !v {
t.Helper()
t.Fatal("expected condition failed")
}
}
func True(t testing.TB, v bool) {
if !v {
t.Helper()
t.Fatal("expected condition failed")
}
}
func False(t testing.TB, v bool) {
if v {
t.Helper()
t.Fatal("expected condition failed")
}
}
func Nil(t testing.TB, a interface{}) {
if a == nil {
return
}
rv := reflect.ValueOf(a)
if !canNil(rv) {
t.Helper()
t.Fatalf("%#v cannot be nil", a)
}
if !rv.IsNil() {
t.Helper()
t.Fatalf("%#v != nil", a)
}
}
func NotNil(t testing.TB, a interface{}) {
if a == nil {
t.Helper()
t.Fatal("expected not nil")
}
rv := reflect.ValueOf(a)
if !canNil(rv) {
return
}
if rv.IsNil() {
t.Helper()
t.Fatalf("%#v == nil", a)
}
}
func deepEqual(a, b interface{}) bool {
ab, aok := a.([]byte)
bb, bok := b.([]byte)
if aok && bok {
return bytes.Equal(ab, bb)
}
return reflect.DeepEqual(a, b)
}
func canNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return true
}
return false
}
func literalConvert(val interface{}) interface{} {
switch val := reflect.ValueOf(val); val.Kind() {
case reflect.Bool:
return val.Bool()
case reflect.String:
return val.Convert(reflect.TypeOf("")).Interface()
case reflect.Float32, reflect.Float64:
return val.Float()
case reflect.Complex64, reflect.Complex128:
return val.Complex()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if asInt := val.Int(); asInt < 0 {
return asInt
}
return val.Convert(reflect.TypeOf(uint64(0))).Uint()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return val.Uint()
default:
return val
}
} | assert.go | 0.58166 | 0.469946 | assert.go | starcoder |
package kinematics
import (
"math"
"github.com/tab58/v1/spatial/pkg/geometry"
)
// Transform3D is a 3x3 matrix that encodes a transformation.
type Transform3D struct {
*geometry.Matrix3D
}
func rotation3DFromAxisAngle(axis geometry.Vector3DReader, angle float64) [9]float64 {
elements := [9]float64{}
u := axis.Clone()
u.Normalize()
ux, uy, uz := u.GetX(), u.GetY(), u.GetZ()
c := math.Cos(angle)
s := math.Sin(angle)
c1 := 1.0 - c
elements[0] = c + ux*ux*c1
elements[1] = ux*uy*c1 - uz*s
elements[2] = ux*uz*c1 + uy*s
elements[3] = ux*uy*c1 + uz*s
elements[4] = c + uy*uy*c1
elements[5] = uy*uz*c1 - ux*s
elements[6] = ux*uz*c1 - uy*s
elements[7] = uy*uz*c1 + ux*s
elements[8] = c + uz*uz*c1
return elements
}
// Set3DRotation sets the matrix to a 3D rotation about the specified axis and angle.
func (m *Transform3D) Set3DRotation(axis geometry.Vector3DReader, angle float64) error {
u := axis.Clone()
u.Normalize()
ux, uy, uz := u.GetX(), u.GetY(), u.GetZ()
c := math.Cos(angle)
s := math.Sin(angle)
c1 := 1.0 - c
a00 := c + ux*ux*c1
a01 := ux*uy*c1 - uz*s
a02 := ux*uz*c1 + uy*s
a10 := ux*uy*c1 + uz*s
a11 := c + uy*uy*c1
a12 := uy*uz*c1 - ux*s
a20 := ux*uz*c1 - uy*s
a21 := uy*uz*c1 + ux*s
a22 := c + uz*uz*c1
m.Matrix3D.SetElements(a00, a01, a02, a10, a11, a12, a20, a21, a22)
return nil
}
// Set3DXRotation sets the matrix to a 3D rotation about x-axis with the specified angle.
func (m *Transform3D) Set3DXRotation(angle float64) error {
c := math.Cos(angle)
s := math.Sin(angle)
m.Matrix3D.SetElements(1, 0, 0, 0, c, -s, 0, s, c)
return nil
}
// Set3DYRotation sets the matrix to a 3D rotation about y-axis with the specified angle.
func (m *Transform3D) Set3DYRotation(angle float64) error {
c := math.Cos(angle)
s := math.Sin(angle)
m.Matrix3D.SetElements(c, 0, s, 0, 1, 0, -s, 0, c)
return nil
}
// Set3DZRotation sets the matrix to a 3D rotation about z-axis with the specified angle.
func (m *Transform3D) Set3DZRotation(angle float64) error {
c := math.Cos(angle)
s := math.Sin(angle)
m.Matrix3D.SetElements(c, -s, 0, s, c, 0, 0, 0, 1)
return nil
}
// Set3DScaling sets the matrix to a scaling matrix.
func (m *Transform3D) Set3DScaling(v geometry.Vector3DReader) error {
x, y, z := v.GetX(), v.GetY(), v.GetZ()
return m.Matrix3D.SetElements(x, 0, 0, 0, y, 0, 0, 0, z)
}
// Set3DMirror sets the matrix to encode a mirror operation for a vector about a line direction defined by the given vector n.
func (m *Transform3D) Set3DMirror(n geometry.Vector3DReader) error {
n1, n2, n3 := n.GetX(), n.GetY(), n.GetZ()
n1sq := n1 * n1
n1n2 := n1 * n2
n1n3 := n1 * n3
n2sq := n2 * n2
n2n3 := n2 * n3
n3sq := n3 * n3
return m.Matrix3D.SetElements(1-2*n1sq, -2*n1n2, -2*n1n3, -2*n1n2, 1-2*n2sq, -2*n2n3, -2*n1n3, -2*n2n3, 1-2*n3sq)
} | pkg/kinematics/transform3d.go | 0.852706 | 0.67726 | transform3d.go | starcoder |
// Package pipeline provides abstraction of pipeline and stages. It is a basic tool to convert TAR/TAR GZ file into TFRecord file.
package pipeline
import (
"io"
"github.com/NVIDIA/go-tfdata/tfdata/archive"
"github.com/NVIDIA/go-tfdata/tfdata/core"
"github.com/NVIDIA/go-tfdata/tfdata/filter"
"github.com/NVIDIA/go-tfdata/tfdata/transform"
)
type (
// TarStage produces core.SampleReader
TarStage func() (core.SampleReader, error)
// SamplesStage makes transformation on core.Sample: consumes core.SampleReader and produces core.SampleReader
SamplesStage func(core.SampleReader) core.SampleReader
// Samples2ExampleStage transforms core.Sample to core.TFExample: consumes core.SampleReader and produces core.TFExampleReader
Sample2TFExampleStage func(core.SampleReader) core.TFExampleReader
// SamplesStage makes transformation on core.TFExample: consumes core.TFExampleReader and produces core.TFExampleReader
TFExamplesStage func(core.TFExampleReader) core.TFExampleReader
// TFRecordStage consumes core.TFExampleReader
TFRecordStage func(core.TFExampleReader) error
// DefaultPipeline represents TAR file to TFRecord file conversion with an intermediate
// transformations on core.Sample and core.TFExample
DefaultPipeline struct {
tarStage TarStage
samplesStage SamplesStage // optional stage - consumes the same type as produces
sample2ExampleStage Sample2TFExampleStage
tfExamplesStage TFExamplesStage // optional stage - consumes the same type as produces
tfRecordStage TFRecordStage
}
)
// TransformTFExamples adds transforming core.Samples according to tfs as pipeline's SamplesStage.
// Transformations will be executed in order of appearance in tfs
func (p *DefaultPipeline) TransformSamples(tfs ...transform.SampleTransformation) *DefaultPipeline {
return p.WithSamplesStage(func(r core.SampleReader) core.SampleReader {
return transform.NewSampleTransformer(r, tfs...)
})
}
// TransformTFExamples adds transforming core.TFExamples according to tfs as pipeline's TFExamplesStage.
// Transformations will be executed in order of appearance in tfs
func (p *DefaultPipeline) TransformTFExamples(tfs ...transform.TFExampleTransformation) *DefaultPipeline {
return p.WithTFExamplesStage(func(r core.TFExampleReader) core.TFExampleReader {
return transform.NewTFExampleTransformer(r, tfs...)
})
}
// FilterEmptySamples adds filtering of empty core.Samples as pipeline's SamplesStage.
func (p *DefaultPipeline) FilterEmptySamples() *DefaultPipeline {
return p.WithSamplesStage(filter.EmptySamples)
}
// FilterEmptyTFExamples adds filtering of empty core.TFExamples as pipeline's TFExamplesStage.
func (p *DefaultPipeline) FilterEmptyTFExamples() *DefaultPipeline {
return p.WithTFExamplesStage(filter.EmptyExamples)
}
// FromTar adds reading core.Samples from input as input was a TAR file.
func (p *DefaultPipeline) FromTar(input io.Reader) *DefaultPipeline {
return p.WithTarStage(func() (core.SampleReader, error) {
return archive.NewTarReader(input)
})
}
// FromTarGz adds reading core.Samples from input as input was a TAR GZ file.
func (p *DefaultPipeline) FromTarGz(input io.Reader) *DefaultPipeline {
return p.WithTarStage(func() (core.SampleReader, error) {
return archive.NewTarGzReader(input)
})
}
// Writers TFExamples to specified writer in TFRecord format
// If numWorkers provided, all pipeline transformations will be done
// asynchronously. It assumes that all underlying Readers are async-safe.
// All default Readers, Transformations, Selections are async-safe.
func (p *DefaultPipeline) ToTFRecord(w io.Writer, numWorkers ...int) *DefaultPipeline {
return p.WithTFRecordStage(func(reader core.TFExampleReader) error {
writer := core.NewTFRecordWriter(w)
if len(numWorkers) > 0 {
return writer.WriteMessagesAsync(reader, numWorkers[0])
}
return writer.WriteMessages(reader)
})
}
// Converts Samples to TFExamples. TypesMap defines what are actual sample types.
// For each (key, mappedType) pair from TypesMap, TFExample will have feature[key] = value, where
// value is sample[key] converted into type mappedType.
// If m is not provided, each entry from value will be converted to BytesList
// If m provided, but sample has key which is not present in TypesMap, value will be converted to BytesList
func (p *DefaultPipeline) SampleToTFExample(m ...core.TypesMap) *DefaultPipeline {
return p.WithSample2TFExampleStage(func(sr core.SampleReader) core.TFExampleReader {
return transform.SamplesToTFExample(sr, m...)
})
}
// Do executes pipeline based on specified stages.
func (p *DefaultPipeline) Do() error {
// prepare pipeline
sReader, err := p.tarStage()
if err != nil {
return err
}
if p.samplesStage != nil {
sReader = p.samplesStage(sReader)
}
exReader := p.sample2ExampleStage(sReader)
if p.tfExamplesStage != nil {
exReader = p.tfExamplesStage(exReader)
}
// The whole pipeline is ready, start doing the job
return p.tfRecordStage(exReader)
}
// default setters
func NewPipeline() *DefaultPipeline {
return &DefaultPipeline{}
}
// WithTarStage defines TarStage of a pipeline. Overrides previous value.
func (p *DefaultPipeline) WithTarStage(stage TarStage) *DefaultPipeline {
p.tarStage = stage
return p
}
// WithSamplesStage defines SamplesStage of a pipeline. If SamplesStage has been already set, the resulting
// SamplesStage will chain together transformations in order of setting.
func (p *DefaultPipeline) WithSamplesStage(stage SamplesStage) *DefaultPipeline {
if p.samplesStage != nil {
prevStage := p.samplesStage
p.samplesStage = func(reader core.SampleReader) core.SampleReader {
return stage(prevStage(reader))
}
} else {
p.samplesStage = stage
}
return p
}
// WithSample2TFExampleStage defines Sample2TFExampleStage of a pipeline. Overrides previous value.
func (p *DefaultPipeline) WithSample2TFExampleStage(stage Sample2TFExampleStage) *DefaultPipeline {
p.sample2ExampleStage = stage
return p
}
// WithTFExamplesStage defines TFExamplesStage of a pipeline. If TFExamplesStage has been already set, the resulting
// TFExamplesStage will chain together transformations in order of setting.
func (p *DefaultPipeline) WithTFExamplesStage(stage TFExamplesStage) *DefaultPipeline {
if p.tfExamplesStage != nil {
prevStage := p.tfExamplesStage
p.tfExamplesStage = func(reader core.TFExampleReader) core.TFExampleReader {
return stage(prevStage(reader))
}
} else {
p.tfExamplesStage = stage
}
return p
}
// WithTFRecordStage defines TFRecordStage of a pipeline. Overrides previous value.
func (p *DefaultPipeline) WithTFRecordStage(stage TFRecordStage) *DefaultPipeline {
p.tfRecordStage = stage
return p
} | tfdata/pipeline/pipeline.go | 0.892281 | 0.540924 | pipeline.go | starcoder |
package main
import (
"fmt"
"strconv"
"strings"
"github.com/kindermoumoute/adventofcode/pkg/execute"
"github.com/kindermoumoute/adventofcode/pkg"
)
// returns part1 and part2
func run(input string) (interface{}, interface{}) {
var r = &Registers{r: make([]int, 4)}
var Ops = []func(int, int, int){r.eqri, r.banr, r.bori, r.mulr, r.seti, r.bani, r.muli, r.gtrr, r.setr, r.addi, r.gtir, r.borr, r.addr, r.eqrr, r.gtri, r.eqir}
part1, part2 := parse(input)
part1Score := 0
dance:
for _, p1 := range part1 {
count := 0
for _, op := range Ops {
r.SetRegisters(p1.in)
op(p1.a, p1.b, p1.c)
if r.Equals(p1.out) {
count++
if count >= 3 {
part1Score++
continue dance
}
}
}
}
r.SetRegisters([]int{0, 0, 0, 0})
for _, ins := range part2 {
Ops[ins.op](ins.a, ins.b, ins.c)
}
return strconv.Itoa(part1Score), strconv.Itoa(r.r[0])
}
type Part1 struct {
in []int
out []int
op, a, b, c int
}
type Instruction struct {
op int
a, b, c int
}
func parse(s string) ([]Part1, []Instruction) {
part := strings.Split(s, "\n\n\n\n")
part1 := strings.Split(part[0], "\n")
part2 := strings.Split(part[1], "\n")
p1 := make([]Part1, len(part1)/4+1)
for i := 0; i < len(part1); i += 4 {
p1[i/4].in = make([]int, 4)
n, err := fmt.Sscanf(part1[i], "Before: [%d,%d,%d,%d]", &p1[i/4].in[0], &p1[i/4].in[1], &p1[i/4].in[2], &p1[i/4].in[3])
pkg.Check(err)
if n != 4 {
panic(fmt.Errorf("%d args expected in scanf, got %d", 0, n))
}
n, err = fmt.Sscanf(part1[i+1], "%d %d %d %d", &p1[i/4].op, &p1[i/4].a, &p1[i/4].b, &p1[i/4].c)
pkg.Check(err)
if n != 4 {
panic(fmt.Errorf("%d args expected in scanf, got %d", 0, n))
}
p1[i/4].out = make([]int, 4)
n, err = fmt.Sscanf(part1[i+2], "After: [%d, %d, %d, %d]", &p1[i/4].out[0], &p1[i/4].out[1], &p1[i/4].out[2], &p1[i/4].out[3])
pkg.Check(err)
if n != 4 {
panic(fmt.Errorf("%d args expected in scanf, got %d", 0, n))
}
}
p2 := make([]Instruction, len(part2))
for i := range part2 {
n, err := fmt.Sscanf(part2[i], "%d %d %d %d", &p2[i].op, &p2[i].a, &p2[i].b, &p2[i].c)
pkg.Check(err)
if n != 4 {
panic(fmt.Errorf("%d args expected in scanf, got %d", 0, n))
}
}
return p1, p2
}
func main() {
execute.Run(run, nil, puzzle, true)
}
type Registers struct {
ip int
r []int
}
func (r *Registers) SetRegisters(init []int) {
for i := 0; i < 4; i++ {
r.r[i] = init[i]
}
}
func (r *Registers) Equals(out []int) bool {
sameRegister := 0
for i := 0; i < 4; i++ {
if r.r[i] == out[i] {
sameRegister++
}
}
return sameRegister == 4
}
func (r *Registers) setr(a, b, c int) {
r.r[c] = r.r[a]
}
func (r *Registers) seti(a, b, c int) {
r.r[c] = a
}
func (r *Registers) addr(a, b, c int) {
r.r[c] = r.r[a] + r.r[b]
}
func (r *Registers) addi(a, b, c int) {
r.r[c] = r.r[a] + b
}
func (r *Registers) mulr(a, b, c int) {
r.r[c] = r.r[a] * r.r[b]
}
func (r *Registers) muli(a, b, c int) {
r.r[c] = r.r[a] * b
}
func (r *Registers) banr(a, b, c int) {
r.r[c] = r.r[a] & r.r[b]
}
func (r *Registers) bani(a, b, c int) {
r.r[c] = r.r[a] & b
}
func (r *Registers) borr(a, b, c int) {
r.r[c] = r.r[a] | r.r[b]
}
func (r *Registers) bori(a, b, c int) {
r.r[c] = r.r[a] | b
}
func (r *Registers) eqrr(a, b, c int) {
if r.r[b] == r.r[a] {
r.r[c] = 1
return
}
r.r[c] = 0
}
func (r *Registers) eqri(a, b, c int) {
if b == r.r[a] {
r.r[c] = 1
return
}
r.r[c] = 0
}
func (r *Registers) eqir(a, b, c int) {
if a == r.r[b] {
r.r[c] = 1
return
}
r.r[c] = 0
}
func (r *Registers) gtrr(a, b, c int) {
if r.r[b] < r.r[a] {
r.r[c] = 1
return
}
r.r[c] = 0
}
func (r *Registers) gtir(a, b, c int) {
if r.r[b] < a {
r.r[c] = 1
return
}
r.r[c] = 0
}
func (r *Registers) gtri(a, b, c int) {
if b < r.r[a] {
r.r[c] = 1
return
}
r.r[c] = 0
} | 2018/day16/main.go | 0.533154 | 0.440229 | main.go | starcoder |
package detection
import (
"github.com/nodejayes/geolib/pkg/geometry"
)
// BooleanPointInPolygon Takes a {@link Point} and a {@link Polygon} and determines if the point
// resides inside the polygon. The polygon can be convex or concave. The function accounts for holes.
func PointInPolygon(point *geometry.Point, polygon *geometry.Polygon, ignoreBoundary bool) bool {
bbox := getBoundingBox(polygon)
// Quick elimination if point is not inside bbox
if InBBox(point, bbox) == false {
return false
}
// normalize to multipolygon
polys := [][][][]float64{polygon.Coordinates}
insidePoly := false
for i := 0; i < len(polys) && !insidePoly; i++ {
// check if it is in the outer ring first
if InRing(point, geometry.NewLine(polys[i][0], point.CRS), ignoreBoundary) {
inHole := false
k := 1
// check for the point in any of the holes
for k < len(polys[i]) && !inHole {
if InRing(point, geometry.NewLine(polys[i][k], point.CRS), !ignoreBoundary) {
inHole = true
}
k++
}
if !inHole {
insidePoly = true
}
}
}
return insidePoly
}
func InRing(ptGeom *geometry.Point, ringGeom *geometry.Line, ignoreBoundary bool) bool {
pt := ptGeom.Coordinates
ring := ringGeom.Coordinates
ringLen := len(ring)
isInside := false
if ring[0][0] == ring[ringLen-1][0] && ring[0][1] == ring[ringLen-1][1] {
ring = ring[0 : ringLen-1]
ringLen = len(ring)
}
i := 0
j := ringLen - 1
for i < ringLen {
xi := ring[i][0]
yi := ring[i][1]
xj := ring[j][0]
yj := ring[j][1]
onBoundary := (pt[1]*(xi-xj)+yi*(xj-pt[0])+yj*(pt[0]-xi) == 0) &&
((xi-pt[0])*(xj-pt[0]) <= 0) && ((yi-pt[1])*(yj-pt[1]) <= 0)
if onBoundary {
return !ignoreBoundary
}
intersect := ((yi > pt[1]) != (yj > pt[1])) &&
(pt[0] < (xj-xi)*(pt[1]-yi)/(yj-yi)+xi)
if intersect {
isInside = !isInside
}
j = i
i++
}
return isInside
}
// InBBox check if a Point is in a Bounding Box
func InBBox(pt *geometry.Point, bbox *geometry.BoundingBox) bool {
return bbox.MinX <= pt.Coordinates[0] &&
bbox.MaxX <= pt.Coordinates[1] &&
bbox.MinY >= pt.Coordinates[0] &&
bbox.MaxY >= pt.Coordinates[1]
}
func getBoundingBox(polygonGeom *geometry.Polygon) *geometry.BoundingBox {
polygon := polygonGeom.Coordinates
gn := len(polygon)
if gn == 0 {
panic("invalid Polygon")
}
box := []float64{polygon[0][0][0], polygon[0][0][0], polygon[0][0][1], polygon[0][0][1]}
for i := 0; i < gn; i++ {
// Polygons
for j := 0; j < len(polygon[i]); j++ {
// Vertices
if polygon[i][j][0] < box[0] {
box[0] = polygon[i][j][0]
}
if polygon[i][j][0] > box[1] {
box[1] = polygon[i][j][0]
}
if polygon[i][j][1] < box[2] {
box[2] = polygon[i][j][1]
}
if polygon[i][j][1] > box[3] {
box[3] = polygon[i][j][1]
}
}
}
return geometry.NewBoundingBox(box[0], box[1], box[2], box[3], polygonGeom.CRS)
} | pkg/detection/inpolygon.go | 0.81899 | 0.5835 | inpolygon.go | starcoder |
package fixedwidth
import (
"errors"
"fmt"
"io"
"github.com/mattmc3/goetl"
"github.com/mattmc3/gofurther/slicex"
"github.com/mattmc3/gofurther/stringsx"
)
// Formatter defines a function signature for formatting fixed width
// values when writing them
type Formatter func(fwdef *FieldDef, value string) (string, error)
// ErrTruncatedValue indicates a value was too long and required truncation
var ErrTruncatedValue = errors.New("the value is too long and must be truncated")
// FormatLeft adds space padding to the right of the specified value and
// truncates the data if it exceeds the field length. If the data was truncated,
// an ErrTruncatedValue error is returned.
func FormatLeft(fwdef *FieldDef, value string) (string, error) {
if fwdef.Length < len(value) {
if fwdef.Length <= 0 {
return "", ErrTruncatedValue
}
return value[:fwdef.Length], ErrTruncatedValue
}
return stringsx.AlignLeft(value, ' ', fwdef.Length), nil
}
// FormatRight adds space padding to the left of the specified value and
// truncates the data if it exceeds the field length. If the data was truncated,
// an ErrTruncatedValue error is returned.
func FormatRight(fwdef *FieldDef, value string) (string, error) {
if fwdef.Length < len(value) {
if fwdef.Length <= 0 {
return "", ErrTruncatedValue
}
return value[:fwdef.Length], ErrTruncatedValue
}
return stringsx.AlignRight(value, ' ', fwdef.Length), nil
}
// FieldDef represents a fixed width field definition
type FieldDef struct {
FieldName string
Offset int
Length int
Formatter Formatter
}
// FormatValue takes a value and applies the formatter for the
// FieldDef to that value.
func (it *FieldDef) FormatValue(value string) (string, error) {
if it.Formatter == nil {
return value, nil
}
return it.Formatter(it, value)
}
// NewFieldDef constructor
func NewFieldDef(fieldName string, offset int, length int) *FieldDef {
result := FieldDef{
FieldName: fieldName,
Offset: offset,
Length: length,
}
return &result
}
// GenerateFieldDefs returns a field def array representing the
// field definitions by name and field length
func GenerateFieldDefs(fieldNames []string, fieldLengths []int) ([]FieldDef, error) {
if len(fieldLengths) != len(fieldNames) {
return nil, errors.New("number of field names and lengths differ")
} else if len(fieldNames) == 0 {
return nil, errors.New("no field names and lengths specified")
}
var fwfields = make([]FieldDef, len(fieldNames))
offset := 0
for i, l := range fieldLengths {
if l < 0 {
l = 0
}
fld := FieldDef{
FieldName: fieldNames[i],
Offset: offset,
Length: l,
}
fwfields[i] = fld
offset += l
}
return fwfields, nil
}
// Reader handles reading from a fixed width file.
type Reader struct {
reader io.Reader
Fields []FieldDef
recordType string
recordNumber int
}
// NewReader constructor
func NewReader(recordType string, rdr io.Reader, fields []FieldDef) *Reader {
result := Reader{
reader: rdr,
Fields: fields,
recordType: recordType,
}
return &result
}
// NewReaderByLengths constructor
func NewReaderByLengths(recordType string, rdr io.Reader, fieldLengths []int) *Reader {
var fwfields = make([]FieldDef, len(fieldLengths))
offset := 0
for i, l := range fieldLengths {
if l < 0 {
l = 0
}
fld := FieldDef{
FieldName: goetl.FieldNumStyle(i),
Offset: offset,
Length: l,
}
fwfields[i] = fld
}
return NewReader(recordType, rdr, fwfields)
}
// ReadNext reads the next record if one is available. When the end of the data
// set is reached, the EndOfRecords error is returned.
// func (it *Reader) ReadNext() ([]interface{}, error) {
// idx := &it.recordNumber
// *idx++
// var rec []string
// var result []interface{}
// var err error
// rec, err = it.reader.Read()
// result = slicex.ObjectifyStrings(rec)
// if err != nil {
// if err == io.EOF {
// return nil, EndOfRecords
// }
// return result, err
// }
// if *idx == 1 {
// if it.hasHeader {
// it.fieldNames[it.recordType] = rec
// } else {
// it.fieldNames[it.recordType] = GenerateColumnNames(len(result), ColNumStyle)
// }
// }
// return result, nil
// }
// Writer handles writing to a fixed width destination
type Writer struct {
writer io.Writer
recordType string
records int
}
// NewWriter is a constructor
func NewWriter(recordType string, wtr io.Writer) *Writer {
result := Writer{writer: wtr}
return &result
}
// Write outputs the record provided to a fixed width flatfile destination
func (it *Writer) Write(recordType string, record []interface{}) error {
if recordType != it.recordType {
return fmt.Errorf("only able to write records of type '%v'; received '%v'", it.recordType, recordType)
}
byterecs := slicex.Byteify(record)
var err error
write := func(buf []byte) {
if err != nil {
return
}
_, err = it.writer.Write(buf)
}
for _, buf := range byterecs {
write(buf)
}
return err
}
// RecordsWritten returns the total number of records added by the writer.
func (it *Writer) RecordsWritten() int {
return it.records
} | fixedwidth/fixedwidth.go | 0.579638 | 0.427217 | fixedwidth.go | starcoder |
package filters
import (
"github.com/golang/glog"
"image"
"image/color"
"math"
"github.com/go-gl/mathgl/mgl64"
)
type StraightLine struct {
// From, To 直线开始到结束的位置记录:
From, To image.Point
// Color 指定直线的颜色.
Color color.Color
// Thickness 指定直线的宽度
Thickness float64
// Rectangle enclosing straight line.
rect image.Rectangle
rebaseMatrix mgl64.Mat3
// 按照矢量求出当前绘制的长度
vectorLength float64
}
// NewStraightLine 创建一个新的直线,接口中国捏必须传入直线的宽度、颜色以及起点.
func NewStraightLine(from, to image.Point, color color.Color, thickness float64) *StraightLine {
c := &StraightLine{Color: color, Thickness: thickness}
c.SetPoints(from, to)
return c
}
const (
straightLineHeadLengthFactor = 10.0
straightLineHeadWidthFactor = 6.0
)
// SetPoints
func (c *StraightLine) SetPoints(from, to image.Point) {
if to.X == from.X && to.Y == from.Y {
to.X += 1 // 保证直线最少为一个像素大小,否则图像上显示不出来
}
c.From, c.To = from, to
// rect 确保最小值小于最大值,保证由Min 指向Max 如果不是就反转
c.rect = image.Rectangle{Min: from, Max: to}.Canon()
// 保证线条的宽度,在做矩阵转换的时候能保证起点和结束点的宽度一致
// 设置线条的宽度
headExtraPixels := int(c.Thickness)
c.rect.Min.X -= headExtraPixels
c.rect.Min.Y -= headExtraPixels
c.rect.Max.X += headExtraPixels
c.rect.Max.Y += headExtraPixels
// 求出从开始到结束的delta,随着鼠标的拖动会一直触发
delta := c.To.Sub(c.From)
//fmt.Println(delta)
vector := mgl64.Vec2{float64(delta.X), float64(delta.Y)}
// 求出当前直线的长度
c.vectorLength = vector.Len()
// 求出直线的方向
direction := vector.Mul(1.0 / c.vectorLength)
// 角度
angle := math.Atan2(direction.Y(), direction.X())
glog.V(2).Infof("SetPoints(from=%v, to=%v): delta=%v, length=%.0f, angle=%5.1f",
from, to, delta, c.vectorLength, mgl64.RadToDeg(angle))
//fmt.Println(angle)
c.rebaseMatrix = mgl64.HomogRotate2D(-angle)
c.rebaseMatrix = c.rebaseMatrix.Mul3(
mgl64.Translate2D(float64(-c.From.X), float64(-c.From.Y)))
}
// at is the function given to the filterImage object.
func (c *StraightLine) at(x, y int, under color.Color) color.Color {
if x > c.rect.Max.X || x < c.rect.Min.X || y > c.rect.Max.Y || y < c.rect.Min.Y {
return under
}
// Move to coordinates on the segment defined from c.From to c.To.
homogPoint := mgl64.Vec3{float64(x), float64(y), 1.0} // Homogeneous coordinates.
if glog.V(3) {
if math.Abs(homogPoint.Y()-float64(c.To.Y)) < 2 || math.Abs(homogPoint.X()-float64(c.To.X)) < 2 {
return Yellow
}
if math.Abs(homogPoint.Y()-float64(c.From.Y)) < 2 || math.Abs(homogPoint.X()-float64(c.From.X)) < 2 {
return Yellow
}
}
homogPoint = c.rebaseMatrix.Mul3x1(homogPoint)
if glog.V(3) {
if math.Abs(homogPoint.Y()) < 3 {
return Green
}
if math.Abs(homogPoint.X()) < 1 {
return Green
}
if math.Abs(homogPoint.X()-c.vectorLength) < 1 {
return Green
}
}
if homogPoint.X() < 0 {
return under
}
if homogPoint.X() < c.vectorLength {
if math.Abs(homogPoint.Y()) < c.Thickness/2 {
return c.Color
}
}
return under
}
// Apply 接口ImageFilter的实现.
// 实现方式,若是需要绘制的图,就替换为当先选中的颜色,若是不是就返回背景颜色 under
func (c *StraightLine) Apply(image image.Image) image.Image {
return &filterImage{image, c.at}
} | filters/straight_line.go | 0.549882 | 0.429489 | straight_line.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPTopLevelTypeDeclaration287 struct for BTPTopLevelTypeDeclaration287
type BTPTopLevelTypeDeclaration287 struct {
BTPTopLevelNode286
BtType *string `json:"btType,omitempty"`
Name *BTPIdentifier8 `json:"name,omitempty"`
SpaceAfterVersion *BTPSpace10 `json:"spaceAfterVersion,omitempty"`
Version *BTPLiteralNumber258 `json:"version,omitempty"`
}
// NewBTPTopLevelTypeDeclaration287 instantiates a new BTPTopLevelTypeDeclaration287 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPTopLevelTypeDeclaration287() *BTPTopLevelTypeDeclaration287 {
this := BTPTopLevelTypeDeclaration287{}
return &this
}
// NewBTPTopLevelTypeDeclaration287WithDefaults instantiates a new BTPTopLevelTypeDeclaration287 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPTopLevelTypeDeclaration287WithDefaults() *BTPTopLevelTypeDeclaration287 {
this := BTPTopLevelTypeDeclaration287{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPTopLevelTypeDeclaration287) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelTypeDeclaration287) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPTopLevelTypeDeclaration287) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPTopLevelTypeDeclaration287) SetBtType(v string) {
o.BtType = &v
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *BTPTopLevelTypeDeclaration287) GetName() BTPIdentifier8 {
if o == nil || o.Name == nil {
var ret BTPIdentifier8
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelTypeDeclaration287) GetNameOk() (*BTPIdentifier8, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *BTPTopLevelTypeDeclaration287) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given BTPIdentifier8 and assigns it to the Name field.
func (o *BTPTopLevelTypeDeclaration287) SetName(v BTPIdentifier8) {
o.Name = &v
}
// GetSpaceAfterVersion returns the SpaceAfterVersion field value if set, zero value otherwise.
func (o *BTPTopLevelTypeDeclaration287) GetSpaceAfterVersion() BTPSpace10 {
if o == nil || o.SpaceAfterVersion == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceAfterVersion
}
// GetSpaceAfterVersionOk returns a tuple with the SpaceAfterVersion field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelTypeDeclaration287) GetSpaceAfterVersionOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceAfterVersion == nil {
return nil, false
}
return o.SpaceAfterVersion, true
}
// HasSpaceAfterVersion returns a boolean if a field has been set.
func (o *BTPTopLevelTypeDeclaration287) HasSpaceAfterVersion() bool {
if o != nil && o.SpaceAfterVersion != nil {
return true
}
return false
}
// SetSpaceAfterVersion gets a reference to the given BTPSpace10 and assigns it to the SpaceAfterVersion field.
func (o *BTPTopLevelTypeDeclaration287) SetSpaceAfterVersion(v BTPSpace10) {
o.SpaceAfterVersion = &v
}
// GetVersion returns the Version field value if set, zero value otherwise.
func (o *BTPTopLevelTypeDeclaration287) GetVersion() BTPLiteralNumber258 {
if o == nil || o.Version == nil {
var ret BTPLiteralNumber258
return ret
}
return *o.Version
}
// GetVersionOk returns a tuple with the Version field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelTypeDeclaration287) GetVersionOk() (*BTPLiteralNumber258, bool) {
if o == nil || o.Version == nil {
return nil, false
}
return o.Version, true
}
// HasVersion returns a boolean if a field has been set.
func (o *BTPTopLevelTypeDeclaration287) HasVersion() bool {
if o != nil && o.Version != nil {
return true
}
return false
}
// SetVersion gets a reference to the given BTPLiteralNumber258 and assigns it to the Version field.
func (o *BTPTopLevelTypeDeclaration287) SetVersion(v BTPLiteralNumber258) {
o.Version = &v
}
func (o BTPTopLevelTypeDeclaration287) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTPTopLevelNode286, errBTPTopLevelNode286 := json.Marshal(o.BTPTopLevelNode286)
if errBTPTopLevelNode286 != nil {
return []byte{}, errBTPTopLevelNode286
}
errBTPTopLevelNode286 = json.Unmarshal([]byte(serializedBTPTopLevelNode286), &toSerialize)
if errBTPTopLevelNode286 != nil {
return []byte{}, errBTPTopLevelNode286
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Name != nil {
toSerialize["name"] = o.Name
}
if o.SpaceAfterVersion != nil {
toSerialize["spaceAfterVersion"] = o.SpaceAfterVersion
}
if o.Version != nil {
toSerialize["version"] = o.Version
}
return json.Marshal(toSerialize)
}
type NullableBTPTopLevelTypeDeclaration287 struct {
value *BTPTopLevelTypeDeclaration287
isSet bool
}
func (v NullableBTPTopLevelTypeDeclaration287) Get() *BTPTopLevelTypeDeclaration287 {
return v.value
}
func (v *NullableBTPTopLevelTypeDeclaration287) Set(val *BTPTopLevelTypeDeclaration287) {
v.value = val
v.isSet = true
}
func (v NullableBTPTopLevelTypeDeclaration287) IsSet() bool {
return v.isSet
}
func (v *NullableBTPTopLevelTypeDeclaration287) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPTopLevelTypeDeclaration287(val *BTPTopLevelTypeDeclaration287) *NullableBTPTopLevelTypeDeclaration287 {
return &NullableBTPTopLevelTypeDeclaration287{value: val, isSet: true}
}
func (v NullableBTPTopLevelTypeDeclaration287) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPTopLevelTypeDeclaration287) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_top_level_type_declaration_287.go | 0.695855 | 0.49762 | model_btp_top_level_type_declaration_287.go | starcoder |
package sqlutil
import (
"database/sql"
"fmt"
"reflect"
"time"
"github.com/grafana/grafana-plugin-sdk-go/data"
)
// FrameConverter defines how to convert the scanned value into a value that can be put into a dataframe (OutputFieldType)
type FrameConverter struct {
// FieldType is the type that is created for the dataframe field.
// The returned value from `ConverterFunc` should match this type, otherwise the data package will panic.
FieldType data.FieldType
// ConverterFunc defines how to convert the scanned `InputScanType` to the supplied `FieldType`.
// `in` is always supplied as a pointer, as it is scanned as a pointer, even if `InputScanType` is not a pointer.
// For example, if `InputScanType` is `string`, then `in` is `*string`
ConverterFunc func(in interface{}) (interface{}, error)
}
// StringConverter can be used to store types not supported by
// a Frame into a *string. When scanning, if a SQL's row's InputScanType's Kind
// and InputScanKind match that returned by the sql response, then the
// conversion func will be run on the row.
// Note, a Converter should be favored over a StringConverter as not all SQL rows can be scanned into a string.
// This type is only here for backwards compatibility.
type StringConverter struct {
// Name is an optional property that can be used to identify a converter
Name string
InputScanKind reflect.Kind // reflect.Type might better or worse option?
InputTypeName string
// Conversion func may be nil to do no additional operations on the string conversion.
ConversionFunc func(in *string) (*string, error)
// If the Replacer is not nil, the replacement will be performed.
Replacer *StringFieldReplacer
}
// Note: StringConverter is perhaps better understood as []byte. However, currently
// the Vector type ([][]byte) is not supported. https://github.com/grafana/grafana-plugin-sdk-go/issues/57
// StringFieldReplacer is used to replace a *string Field in a Frame. The type
// returned by the ReplaceFunc must match the type of elements of VectorType.
// Both properties must be non-nil.
// Note, a Converter should be favored over a StringConverter as not all SQL rows can be scanned into a string.
// This type is only here for backwards compatibility.
type StringFieldReplacer struct {
OutputFieldType data.FieldType
ReplaceFunc func(in *string) (interface{}, error)
}
// ToConverter turns this StringConverter into a Converter, using the ScanType of string
func (s StringConverter) ToConverter() Converter {
return Converter{
Name: s.Name,
InputScanType: reflect.TypeOf(sql.NullString{}),
InputTypeName: s.InputTypeName,
FrameConverter: StringFrameConverter(s),
}
}
// StringFrameConverter creates a FrameConverter from a StringConverter
func StringFrameConverter(s StringConverter) FrameConverter {
f := data.FieldTypeNullableString
if s.Replacer != nil {
f = s.Replacer.OutputFieldType
}
return FrameConverter{
FieldType: f,
ConverterFunc: func(in interface{}) (interface{}, error) {
ns := in.(*sql.NullString)
if !ns.Valid {
return nil, nil
}
v := &ns.String
if s.ConversionFunc != nil {
converted, err := s.ConversionFunc(v)
if err != nil {
return nil, err
}
v = converted
}
if s.Replacer.ReplaceFunc != nil {
return s.Replacer.ReplaceFunc(v)
}
return v, nil
},
}
}
// ToConverters creates a slice of Converters from a slice of StringConverters
func ToConverters(s ...StringConverter) []Converter {
n := make([]Converter, len(s))
for i, v := range s {
n[i] = v.ToConverter()
}
return n
}
// Converter is used to convert known types returned in sql.Row to a type usable in a dataframe.
type Converter struct {
// Name is the name of the converter that is used to distinguish them when debugging or parsing log output
Name string
// InputScanType is the type that is used when (*sql.Rows).Scan(...) is called.
// Some drivers require certain data types to be used when scanning data from sql rows, and this type should reflect that.
InputScanType reflect.Type
// InputTypeName is the case-sensitive name that must match the type that this converter matches
InputTypeName string
// FrameConverter defines how to convert the scanned value into a value that can be put into a dataframe
FrameConverter FrameConverter
}
// DefaultConverterFunc assumes that the scanned value, in, is already a type that can be put into a dataframe.
func DefaultConverterFunc(t reflect.Type) func(in interface{}) (interface{}, error) {
return func(in interface{}) (interface{}, error) {
inType := reflect.TypeOf(in)
if inType == reflect.PtrTo(t) {
n := reflect.ValueOf(in)
return n.Elem().Interface(), nil
}
return in, nil
}
}
// NewDefaultConverter creates a Converter that assumes that the value is scannable into a String, and placed into the dataframe as a nullable string.
func NewDefaultConverter(name string, nullable bool, t reflect.Type) Converter {
slice := reflect.MakeSlice(reflect.SliceOf(t), 0, 0).Interface()
if !data.ValidFieldType(slice) {
return Converter{
Name: fmt.Sprintf("[%s] String converter", t),
InputScanType: reflect.TypeOf(sql.NullString{}),
InputTypeName: name,
FrameConverter: FrameConverter{
FieldType: data.FieldTypeNullableString,
ConverterFunc: func(in interface{}) (interface{}, error) {
v := in.(*sql.NullString)
if !v.Valid {
return (*string)(nil), nil
}
f := v.String
return &f, nil
},
},
}
}
v := reflect.New(t)
var fieldType data.FieldType
if v.Type() == reflect.PtrTo(t) {
v = v.Elem()
fieldType = data.FieldTypeFor(v.Interface())
} else {
fieldType = data.FieldTypeFor(v.Interface()).NullableType()
}
if nullable {
if converter, ok := NullConverters[t]; ok {
return converter
}
}
return Converter{
Name: fmt.Sprintf("Default converter for %s", name),
InputScanType: t,
InputTypeName: name,
FrameConverter: FrameConverter{
FieldType: fieldType,
ConverterFunc: DefaultConverterFunc(t),
},
}
}
var (
// NullStringConverter creates a *string using the scan type of `sql.NullString`
NullStringConverter = Converter{
Name: "nullable string converter",
InputScanType: reflect.TypeOf(sql.NullString{}),
InputTypeName: "STRING",
FrameConverter: FrameConverter{
FieldType: data.FieldTypeNullableString,
ConverterFunc: func(n interface{}) (interface{}, error) {
v := n.(*sql.NullString)
if !v.Valid {
return (*string)(nil), nil
}
f := v.String
return &f, nil
},
},
}
// NullDecimalConverter creates a *float64 using the scan type of `sql.NullFloat64`
NullDecimalConverter = Converter{
Name: "NULLABLE decimal converter",
InputScanType: reflect.TypeOf(sql.NullFloat64{}),
InputTypeName: "DOUBLE",
FrameConverter: FrameConverter{
FieldType: data.FieldTypeNullableFloat64,
ConverterFunc: func(n interface{}) (interface{}, error) {
v := n.(*sql.NullFloat64)
if !v.Valid {
return (*float64)(nil), nil
}
f := v.Float64
return &f, nil
},
},
}
// NullInt64Converter creates a *int64 using the scan type of `sql.NullInt64`
NullInt64Converter = Converter{
Name: "NULLABLE int64 converter",
InputScanType: reflect.TypeOf(sql.NullInt64{}),
InputTypeName: "INTEGER",
FrameConverter: FrameConverter{
FieldType: data.FieldTypeNullableInt64,
ConverterFunc: func(n interface{}) (interface{}, error) {
v := n.(*sql.NullInt64)
if !v.Valid {
return (*int64)(nil), nil
}
f := v.Int64
return &f, nil
},
},
}
// NullInt32Converter creates a *int32 using the scan type of `sql.NullInt32`
NullInt32Converter = Converter{
Name: "NULLABLE int32 converter",
InputScanType: reflect.TypeOf(sql.NullInt32{}),
InputTypeName: "INTEGER",
FrameConverter: FrameConverter{
FieldType: data.FieldTypeNullableInt32,
ConverterFunc: func(n interface{}) (interface{}, error) {
v := n.(*sql.NullInt32)
if !v.Valid {
return (*int32)(nil), nil
}
f := v.Int32
return &f, nil
},
},
}
// NullTimeConverter creates a *time.time using the scan type of `sql.NullTime`
NullTimeConverter = Converter{
Name: "NULLABLE time.Time converter",
InputScanType: reflect.TypeOf(sql.NullTime{}),
InputTypeName: "TIMESTAMP",
FrameConverter: FrameConverter{
FieldType: data.FieldTypeNullableTime,
ConverterFunc: func(n interface{}) (interface{}, error) {
v := n.(*sql.NullTime)
if !v.Valid {
return (*time.Time)(nil), nil
}
f := v.Time
return &f, nil
},
},
}
// NullBoolConverter creates a *bool using the scan type of `sql.NullBool`
NullBoolConverter = Converter{
Name: "nullable bool converter",
InputScanType: reflect.TypeOf(sql.NullBool{}),
InputTypeName: "BOOLEAN",
FrameConverter: FrameConverter{
FieldType: data.FieldTypeNullableBool,
ConverterFunc: func(n interface{}) (interface{}, error) {
v := n.(*sql.NullBool)
if !v.Valid {
return (*bool)(nil), nil
}
return &v.Bool, nil
},
},
}
)
// NullConverters is a map of data type names (from reflect.TypeOf(...).String()) to converters
// Converters supplied here are used as defaults for fields that do not have a supplied Converter
var NullConverters = map[reflect.Type]Converter{
reflect.TypeOf(float64(0)): NullDecimalConverter,
reflect.TypeOf(int64(0)): NullInt64Converter,
reflect.TypeOf(int32(0)): NullInt32Converter,
reflect.TypeOf(""): NullStringConverter,
reflect.TypeOf(time.Time{}): NullTimeConverter,
reflect.TypeOf(false): NullBoolConverter,
} | data/sqlutil/converter.go | 0.771112 | 0.624036 | converter.go | starcoder |
package flag
import (
"time"
"flag"
)
// FlagSet represents an optionally scoped *flag.FlagSet.
type FlagSet interface {
// Scope creates a new scoped FlagSet. The name of any flag
// added to the new FlagSet is prefixed with the given
// prefix. In the flag's uage, the expression "{{NAME}}", is
// replaced with the given description.
Scope(prefix, description string) FlagSet
// GetScope retrieves this FlagSet's scoping prefix, including
// a trailing period.
GetScope() string
// Unwrap returns the flag.FlagSet underlying this FlagSet.
Unwrap() *flag.FlagSet
// Var defines a flag with the specified name and usage. The
// flag's type and value are derived from value. See
// flag.FlagSet.Var for more information.
Var(value flag.Value, name string, usage string)
// HostPortVar defines a HostPort flag with the specified name,
// default value, and usage string. The argument hp points to a
// HostPort variable in which to store the value of the flag.
// The flag accepts "host:port" strings.
HostPortVar(hp *HostPort, name string, value HostPort, usage string)
// HostPort defines a HostPort flag with the specified name,
// default value, and usage string. The return value is the
// address of a HostPort variable that stores the value of the
// flag. The flag accepts "host:port" strings.
HostPort(name string, value HostPort, usage string) *HostPort
// BoolVar defines a bool flag with the specified name,
// default value, and usage. The flag's value is stored in p.
BoolVar(p *bool, name string, value bool, usage string)
// Bool defines a bool flag with the specified name,
// default value, and usage. The return value is a pointer
// to a variable that stores the flag's value.
Bool(name string, value bool, usage string) *bool
// DurationVar defines a time.Duration flag with the specified name,
// default value, and usage. The flag's value is stored in p.
DurationVar(p *time.Duration, name string, value time.Duration, usage string)
// Duration defines a time.Duration flag with the specified name,
// default value, and usage. The return value is a pointer
// to a variable that stores the flag's value.
Duration(name string, value time.Duration, usage string) *time.Duration
// Float64Var defines a float64 flag with the specified name,
// default value, and usage. The flag's value is stored in p.
Float64Var(p *float64, name string, value float64, usage string)
// Float64 defines a float64 flag with the specified name,
// default value, and usage. The return value is a pointer
// to a variable that stores the flag's value.
Float64(name string, value float64, usage string) *float64
// IntVar defines a int flag with the specified name,
// default value, and usage. The flag's value is stored in p.
IntVar(p *int, name string, value int, usage string)
// Int defines a int flag with the specified name,
// default value, and usage. The return value is a pointer
// to a variable that stores the flag's value.
Int(name string, value int, usage string) *int
// Int64Var defines a int64 flag with the specified name,
// default value, and usage. The flag's value is stored in p.
Int64Var(p *int64, name string, value int64, usage string)
// Int64 defines a int64 flag with the specified name,
// default value, and usage. The return value is a pointer
// to a variable that stores the flag's value.
Int64(name string, value int64, usage string) *int64
// StringVar defines a string flag with the specified name,
// default value, and usage. The flag's value is stored in p.
StringVar(p *string, name string, value string, usage string)
// String defines a string flag with the specified name,
// default value, and usage. The return value is a pointer
// to a variable that stores the flag's value.
String(name string, value string, usage string) *string
// UintVar defines a uint flag with the specified name,
// default value, and usage. The flag's value is stored in p.
UintVar(p *uint, name string, value uint, usage string)
// Uint defines a uint flag with the specified name,
// default value, and usage. The return value is a pointer
// to a variable that stores the flag's value.
Uint(name string, value uint, usage string) *uint
// Uint64Var defines a uint64 flag with the specified name,
// default value, and usage. The flag's value is stored in p.
Uint64Var(p *uint64, name string, value uint64, usage string)
// Uint64 defines a uint64 flag with the specified name,
// default value, and usage. The return value is a pointer
// to a variable that stores the flag's value.
Uint64(name string, value uint64, usage string) *uint64
} | vendor/github.com/turbinelabs/nonstdlib/flag/gen_flagset.go | 0.6137 | 0.463262 | gen_flagset.go | starcoder |
package chaincfg
import (
"github.com/p9c/pod/pkg/log"
"strings"
chainhash "github.com/p9c/pod/pkg/chain/hash"
)
// String returns the hostname of the DNS seed in human-readable form.
func (d DNSSeed) String() string {
return d.Host
}
// Register registers the network parameters for a Bitcoin network. This may error with ErrDuplicateNet if the network is already registered (either due to a previous Register call, or the network being one of the default networks). Network parameters should be registered into this package by a main package as early as possible. Then, library packages may lookup networks or network parameters based on inputs and work regardless of the network being standard or not.
func Register(params *Params) error {
if _, ok := registeredNets[params.Net]; ok {
return ErrDuplicateNet
}
registeredNets[params.Net] = struct{}{}
pubKeyHashAddrIDs[params.PubKeyHashAddrID] = struct{}{}
scriptHashAddrIDs[params.ScriptHashAddrID] = struct{}{}
hdPrivToPubKeyIDs[params.HDPrivateKeyID] = params.HDPublicKeyID[:]
// A valid Bech32 encoded segwit address always has as prefix the human-readable part for the given net followed by '1'.
bech32SegwitPrefixes[params.Bech32HRPSegwit+"1"] = struct{}{}
return nil
}
// mustRegister performs the same function as Register except it panics if there is an error. This should only be called from package init functions.
func mustRegister(params *Params) {
if err := Register(params); err != nil {
panic("failed to register network: " + err.Error())
}
}
// IsPubKeyHashAddrID returns whether the id is an identifier known to prefix a pay-to-pubkey-hash address on any default or registered network. This is used when decoding an address string into a specific address type. It is up to the caller to check both this and IsScriptHashAddrID and decide whether an address is a pubkey hash address, script hash address, neither, or undeterminable (if both return true).
func IsPubKeyHashAddrID(id byte) bool {
_, ok := pubKeyHashAddrIDs[id]
return ok
}
// IsScriptHashAddrID returns whether the id is an identifier known to prefix a pay-to-script-hash address on any default or registered network. This is used when decoding an address string into a specific address type. It is up to the caller to check both this and IsPubKeyHashAddrID and decide whether an address is a pubkey hash address, script hash address, neither, or undeterminable (if both return true).
func IsScriptHashAddrID(id byte) bool {
_, ok := scriptHashAddrIDs[id]
return ok
}
// IsBech32SegwitPrefix returns whether the prefix is a known prefix for segwit addresses on any default or registered network. This is used when decoding an address string into a specific address type.
func IsBech32SegwitPrefix(prefix string) bool {
prefix = strings.ToLower(prefix)
_, ok := bech32SegwitPrefixes[prefix]
return ok
}
// HDPrivateKeyToPublicKeyID accepts a private hierarchical deterministic extended key id and returns the associated public key id. When the provided id is not registered, the ErrUnknownHDKeyID error will be returned.
func HDPrivateKeyToPublicKeyID(id []byte) ([]byte, error) {
if len(id) != 4 {
return nil, ErrUnknownHDKeyID
}
var key [4]byte
copy(key[:], id)
pubBytes, ok := hdPrivToPubKeyIDs[key]
if !ok {
return nil, ErrUnknownHDKeyID
}
return pubBytes, nil
}
// newHashFromStr converts the passed big-endian hex string into a chainhash.Hash. It only differs from the one available in chainhash in that it panics on an error since it will only (and must only) be called with hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *chainhash.Hash {
hash, err := chainhash.NewHashFromStr(hexStr)
if err != nil {
log.ERROR(err)
// Ordinarily I don't like panics in library code since it can take applications down without them having a chance to recover which is extremely annoying, however an exception is being made in this case because the only way this can panic is if there is an error in the hard-coded hashes. Thus it will only ever potentially panic on init and therefore is 100% predictable.
// loki: Panics are good when the condition should not happen!
panic(err)
}
return hash
}
func init() {
// Register all default networks when the package is initialized.
mustRegister(&MainNetParams)
mustRegister(&TestNet3Params)
mustRegister(&RegressionTestParams)
mustRegister(&SimNetParams)
} | pkg/chain/config/params.go | 0.720368 | 0.415077 | params.go | starcoder |
package shamir
import (
"crypto/rand"
"crypto/subtle"
"fmt"
mathrand "math/rand"
"time"
)
const (
// ShareOverhead is the byte size overhead of each share
// when using Split on a secret. This is caused by appending
// a one byte tag to the share.
ShareOverhead = 1
)
// polynomial represents a polynomial of arbitrary degree
type polynomial struct {
coefficients []uint8
}
// makePolynomial constructs a random polynomial of the given
// degree but with the provided intercept value.
func makePolynomial(intercept, degree uint8) (polynomial, error) {
// Create a wrapper
p := polynomial{
coefficients: make([]byte, degree+1),
}
// Ensure the intercept is set
p.coefficients[0] = intercept
// Assign random co-efficients to the polynomial
if _, err := rand.Read(p.coefficients[1:]); err != nil {
return p, err
}
return p, nil
}
// evaluate returns the value of the polynomial for the given x
func (p *polynomial) evaluate(x uint8) uint8 {
// Special case the origin
if x == 0 {
return p.coefficients[0]
}
// Compute the polynomial value using Horner's method.
degree := len(p.coefficients) - 1
out := p.coefficients[degree]
for i := degree - 1; i >= 0; i-- {
coeff := p.coefficients[i]
out = add(mult(out, x), coeff)
}
return out
}
// interpolatePolynomial takes N sample points and returns
// the value at a given x using a lagrange interpolation.
func interpolatePolynomial(x_samples, y_samples []uint8, x uint8) uint8 {
limit := len(x_samples)
var result, basis uint8
for i := 0; i < limit; i++ {
basis = 1
for j := 0; j < limit; j++ {
if i == j {
continue
}
num := add(x, x_samples[j])
denom := add(x_samples[i], x_samples[j])
term := div(num, denom)
basis = mult(basis, term)
}
group := mult(y_samples[i], basis)
result = add(result, group)
}
return result
}
// div divides two numbers in GF(2^8)
func div(a, b uint8) uint8 {
if b == 0 {
// leaks some timing information but we don't care anyways as this
// should never happen, hence the panic
panic("divide by zero")
}
var goodVal, zero uint8
log_a := logTable[a]
log_b := logTable[b]
diff := (int(log_a) - int(log_b)) % 255
if diff < 0 {
diff += 255
}
ret := expTable[diff]
// Ensure we return zero if a is zero but aren't subject to timing attacks
goodVal = ret
if subtle.ConstantTimeByteEq(a, 0) == 1 {
ret = zero
} else {
ret = goodVal
}
return ret
}
// mult multiplies two numbers in GF(2^8)
func mult(a, b uint8) (out uint8) {
var goodVal, zero uint8
log_a := logTable[a]
log_b := logTable[b]
sum := (int(log_a) + int(log_b)) % 255
ret := expTable[sum]
// Ensure we return zero if either a or b are zero but aren't subject to
// timing attacks
goodVal = ret
if subtle.ConstantTimeByteEq(a, 0) == 1 {
ret = zero
} else {
ret = goodVal
}
if subtle.ConstantTimeByteEq(b, 0) == 1 {
ret = zero
} else {
// This operation does not do anything logically useful. It
// only ensures a constant number of assignments to thwart
// timing attacks.
goodVal = zero
}
return ret
}
// add combines two numbers in GF(2^8)
// This can also be used for subtraction since it is symmetric.
func add(a, b uint8) uint8 {
return a ^ b
}
// Split takes an arbitrarily long secret and generates a `parts`
// number of shares, `threshold` of which are required to reconstruct
// the secret. The parts and threshold must be at least 2, and less
// than 256. The returned shares are each one byte longer than the secret
// as they attach a tag used to reconstruct the secret.
func Split(secret []byte, parts, threshold int) ([][]byte, error) {
// Sanity check the input
if parts < threshold {
return nil, fmt.Errorf("parts cannot be less than threshold")
}
if parts > 255 {
return nil, fmt.Errorf("parts cannot exceed 255")
}
if threshold < 2 {
return nil, fmt.Errorf("threshold must be at least 2")
}
if threshold > 255 {
return nil, fmt.Errorf("threshold cannot exceed 255")
}
if len(secret) == 0 {
return nil, fmt.Errorf("cannot split an empty secret")
}
// Generate random list of x coordinates
mathrand.Seed(time.Now().UnixNano())
xCoordinates := mathrand.Perm(255)
// Allocate the output array, initialize the final byte
// of the output with the offset. The representation of each
// output is {y1, y2, .., yN, x}.
out := make([][]byte, parts)
for idx := range out {
out[idx] = make([]byte, len(secret)+1)
out[idx][len(secret)] = uint8(xCoordinates[idx]) + 1
}
// Construct a random polynomial for each byte of the secret.
// Because we are using a field of size 256, we can only represent
// a single byte as the intercept of the polynomial, so we must
// use a new polynomial for each byte.
for idx, val := range secret {
p, err := makePolynomial(val, uint8(threshold-1))
if err != nil {
return nil, err
}
// Generate a `parts` number of (x,y) pairs
// We cheat by encoding the x value once as the final index,
// so that it only needs to be stored once.
for i := 0; i < parts; i++ {
x := uint8(xCoordinates[i]) + 1
y := p.evaluate(x)
out[i][idx] = y
}
}
// Return the encoded secrets
return out, nil
}
// Combine is used to reverse a Split and reconstruct a secret
// once a `threshold` number of parts are available.
func Combine(parts [][]byte) ([]byte, error) {
// Verify enough parts provided
if len(parts) < 2 {
return nil, fmt.Errorf("less than two parts cannot be used to reconstruct the secret")
}
// Verify the parts are all the same length
firstPartLen := len(parts[0])
if firstPartLen < 2 {
return nil, fmt.Errorf("parts must be at least two bytes")
}
for i := 1; i < len(parts); i++ {
if len(parts[i]) != firstPartLen {
return nil, fmt.Errorf("all parts must be the same length")
}
}
// Create a buffer to store the reconstructed secret
secret := make([]byte, firstPartLen-1)
// Buffer to store the samples
x_samples := make([]uint8, len(parts))
y_samples := make([]uint8, len(parts))
// Set the x value for each sample and ensure no x_sample values are the same,
// otherwise div() can be unhappy
checkMap := map[byte]bool{}
for i, part := range parts {
samp := part[firstPartLen-1]
if exists := checkMap[samp]; exists {
return nil, fmt.Errorf("duplicate part detected")
}
checkMap[samp] = true
x_samples[i] = samp
}
// Reconstruct each byte
for idx := range secret {
// Set the y value for each sample
for i, part := range parts {
y_samples[i] = part[idx]
}
// Interpolate the polynomial and compute the value at 0
val := interpolatePolynomial(x_samples, y_samples, 0)
// Evaluate the 0th value to get the intercept
secret[idx] = val
}
return secret, nil
} | shamir/shamir.go | 0.839306 | 0.547887 | shamir.go | starcoder |
package base45
import (
"bytes"
"encoding/binary"
"math"
"net/url"
)
/*
Chapter references:
[1] https://datatracker.ietf.org/doc/draft-faltstrom-base45/
2021-07-01 draft-faltstrom-base45-07
*/
/*
[1] Chapter 4:
A 45-character subset of US-ASCII is used; the 45 characters usable
in a QR code in Alphanumeric mode. Base45 encodes 2 bytes in 3
characters, compared to Base64, which encodes 3 bytes in 4
characters.
[1] Chapter 4.2:
The Alphanumeric mode is defined to use 45 characters as specified in
this alphabet.
*/
// Alphabet defines the 45 useable characters for the base 45 encoding.
var Alphabet = []byte{
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B',
'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
' ', '$', '%', '*', '+', '-', '.', '/', ':',
}
// encodeSingleByte takes in a byte and converts it to base 45.
func encodeSingleByte(in byte) []byte {
/*
[1] Chapter 4:
For encoding a single byte [a], it MUST be interpreted as a base 256
number, i.e. as an unsigned integer over 8 bits. That integer MUST
be converted to base 45 [c d] so that a = c + (45*d). The values c
and d are then looked up in Table 1 to produce a two character
string.
*/
a := int(in)
c := Alphabet[a%45]
d := Alphabet[a/45%45]
return []byte{c, d}
}
// encodeTwoBytes takes two bytes and converts it to to base 45.
func encodeTwoBytes(in []byte) []byte {
/*
[1] Chapter 4:
For encoding two bytes [a, b] MUST be interpreted as a number n in
base 256, i.e. as an unsigned integer over 16 bits so that the number
n = (a*256) + b.
*/
n := binary.BigEndian.Uint16(in)
/*
[1] Chapter 4:
This number n is converted to base 45 [c, d, e] so that n = c +
(d*45) + (e*45*45). Note the order of c, d and e which are chosen so
that the left-most [c] is the least significant.
The values c, d and e are then looked up in Table 1 to produce a
three character string. The process is reversed when decoding.
*/
c := Alphabet[n%45]
d := Alphabet[n/45%45]
e := Alphabet[n/(45*45)%45]
return []byte{c, d, e}
}
// Encode encodes the given byte to base 45.
// If an empty input is given, an empty result will be returned.
func Encode(in []byte) []byte {
// Instead of analysing the possible output length, we
// create a byte array with the estimated capacity of two
// output bytes per one input byte, which is a bit more
// than we need, but it keeps the code clean.
out := make([]byte, 0, len(in)*2)
// Next up we consume chunks up to two bytes of decoded date
// and encode it to base 45.
buf := make([]byte, 2)
reader := bytes.NewReader(in)
for {
n, _ := reader.Read(buf)
if n == 2 {
out = append(out, encodeTwoBytes(buf)...)
} else if n == 1 {
out = append(out, encodeSingleByte(buf[0])...)
} else {
// on EOF or error
break
}
}
return out
}
// EncodeURLSafe encodes the given bytes to a query safe string.
// If an empty input is given, an empty result will be returned.
func EncodeURLSafe(in []byte) string {
/*
[1] Chapter 6:
It should be noted that the resulting string after encoding to Base45
might include non-URL-safe characters so if the URL including the
Base45 encoded data has to be URL safe, one has to use %-encoding.
*/
parts := &url.URL{Path: string(Encode(in))}
return parts.String()
}
// decodeTwoBytes decodes two base 45 encoded bytes to one decoded byte.
// This will be used for very short or trailing base 45 encoded data.
func decodeTwoBytes(dst, src []byte) error {
/*
[1] Chapter 4:
For encoding a single byte [a], it MUST be interpreted as a base 256
number, i.e. as an unsigned integer over 8 bits. That integer MUST
be converted to base 45 [c d] so that a = c + (45*d). The values c
and d are then looked up src Table 1 to produce a two character
string.
For decoding a Base45 encoded string the inverse operations are
performed.
*/
c := bytes.IndexByte(Alphabet, src[0])
d := bytes.IndexByte(Alphabet, src[1])
val := c + (d * 45)
// Detect possible overflow attack
if val > math.MaxUint8 {
return ErrInvalidEncodedDataOverflow
}
copy(dst, []byte{byte(val)})
return nil
}
// decodeThreeBytes decodes three base 45 encoded bytes to two decoded bytes.
func decodeThreeBytes(dst, src []byte) error {
/*
[1] Chapter 4:
For encoding two bytes [a, b] MUST be interpreted as a number n src
base 256, i.e. as an unsigned integer over 16 bits so that the number
n = (a*256) + b.
This number n is converted to Base45 [c, d, e] so that n = c +
(d*45) + (e*45*45). Note the order of c, d and e which are chosen so
that the left-most [c] is the least significant.
The values c, d and e are then looked up src Table 1 to produce a
three character string. The process is reversed when decoding.
For decoding a Base45 encoded string the inverse operations are
performed.
*/
// We skip checks if c, d, e return -1 as the exposed Decode function
// already does an alphabet check and only allowed entries pass through here.
c := bytes.IndexByte(Alphabet, src[0])
d := bytes.IndexByte(Alphabet, src[1])
e := bytes.IndexByte(Alphabet, src[2])
val := c + (d * 45) + (e * 45 * 45)
// Detect possible overflow attack
if val > math.MaxUint16 {
return ErrInvalidEncodedDataOverflow
}
binary.BigEndian.PutUint16(dst, uint16(val))
return nil
}
// Decode reads the base 45 encoded bytes and returns the decoded bytes.
// If an empty input is given, ErrEmptyInput is returned.
func Decode(in []byte) ([]byte, error) {
// Calls to this function expect an input, empty calls should not happen.
if len(in) == 0 {
return nil, ErrEmptyInput
}
/*
[1] Chapter 6:
Implementations MUST reject the encoded data if it contains
characters outside the base alphabet (in Table 1) when interpreting
base-encoded data.
*/
for _, v := range in {
if !bytes.Contains(Alphabet, []byte{v}) {
return nil, ErrInvalidEncodingCharacters
}
}
/*
[1] Chapter 4:
A byte string [a b c d ... written y z] with arbitrary content and
arbitrary length MUST be encoded as follows: From left to right pairs
of bytes are encoded as described above. If the number of bytes is
even, then the encoded form is a string with a length which is evenly
divisible by 3. If the number of bytes is odd, then the last
(rightmost) byte is encoded on two characters as described above.
For decoding a Base45 encoded string the inverse operations are
performed.
*/
if len(in)%3 != 0 && (len(in)+1)%3 != 0 {
return nil, ErrInvalidLength
}
// Instead of analysing the possible output length, we allocate
// enough capacity to keep the code clean and readable. In this case
// the expected output length will always be smaller than the input length.
out := make([]byte, len(in))
buf := make([]byte, 3)
reader := bytes.NewReader(in)
written := 0
for {
read, _ := reader.Read(buf)
if read == 3 {
// Three bytes go in, two come out, we copy them into the output slice
err := decodeThreeBytes(out[written:written+2], buf)
if err != nil {
return nil, err
}
written += 2
} else if read == 2 {
// Two bytes go in, one comes out, we copy it into the output slice
err := decodeTwoBytes(out[written:written+1], buf[0:2])
if err != nil {
return nil, err
}
written += 1
} else {
// this happens on EOF or error, as read == 0 in both cases
break
}
}
return out[:written], nil
}
// DecodeURLSafe reads the given url encoded base 45 encoded data and returns the decoded bytes.
// If an empty input is given, ErrEmptyInput is returned.
func DecodeURLSafe(in string) ([]byte, error) {
/*
[1] Chapter 6:
It should be noted that the resulting string after encoding to Base45
might include non-URL-safe characters so if the URL including the
Base45 encoded data has to be URL safe, one has to use %-encoding.
*/
enc, err := url.QueryUnescape(in)
if err != nil {
return nil, ErrInvalidURLSafeEscaping
}
dec, err := Decode([]byte(enc))
if err != nil {
return nil, err
}
return dec, nil
} | base45.go | 0.810891 | 0.541773 | base45.go | starcoder |
package rfc4757
import (
"crypto/hmac"
"crypto/rand"
"crypto/rc4"
"errors"
"fmt"
"gopkg.in/L11R/gokrb5.v7/crypto/etype"
)
// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 4757.
func EncryptData(key, data []byte, e etype.EType) ([]byte, error) {
if len(key) != e.GetKeyByteSize() {
return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
}
rc4Cipher, err := rc4.NewCipher(key)
if err != nil {
return []byte{}, fmt.Errorf("error creating RC4 cipher: %v", err)
}
ed := make([]byte, len(data))
copy(ed, data)
rc4Cipher.XORKeyStream(ed, ed)
rc4Cipher.Reset()
return ed, nil
}
// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 4757.
func DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
return EncryptData(key, data, e)
}
// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 4757.
// The encrypted data is concatenated with its RC4 header containing integrity checksum and confounder to create an encrypted message.
func EncryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) {
confounder := make([]byte, e.GetConfounderByteSize()) // size = 8
_, err := rand.Read(confounder)
if err != nil {
return []byte{}, fmt.Errorf("error generating confounder: %v", err)
}
k1 := key
k2 := HMAC(k1, UsageToMSMsgType(usage))
toenc := append(confounder, data...)
chksum := HMAC(k2, toenc)
k3 := HMAC(k2, chksum)
ed, err := EncryptData(k3, toenc, e)
if err != nil {
return []byte{}, fmt.Errorf("error encrypting data: %v", err)
}
msg := append(chksum, ed...)
return msg, nil
}
// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 4757.
// The integrity of the message is also verified.
func DecryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) {
checksum := data[:e.GetHMACBitLength()/8]
ct := data[e.GetHMACBitLength()/8:]
_, k2, k3 := deriveKeys(key, checksum, usage, export)
pt, err := DecryptData(k3, ct, e)
if err != nil {
return []byte{}, fmt.Errorf("error decrypting data: %v", err)
}
if !VerifyIntegrity(k2, pt, data, e) {
return []byte{}, errors.New("integrity checksum incorrect")
}
return pt[e.GetConfounderByteSize():], nil
}
// VerifyIntegrity checks the integrity checksum of the data matches that calculated from the decrypted data.
func VerifyIntegrity(key, pt, data []byte, e etype.EType) bool {
chksum := HMAC(key, pt)
return hmac.Equal(chksum, data[:e.GetHMACBitLength()/8])
} | crypto/rfc4757/encryption.go | 0.764979 | 0.415847 | encryption.go | starcoder |
package core
import (
"encoding/binary"
"hash/fnv"
"math"
"github.com/raviqqe/hamt"
)
// NumberType represents a number in the language.
// It will perhaps be represented by DEC64 in the future release.
type NumberType float64
// Eval evaluates a value into a WHNF.
func (n *NumberType) eval() Value {
return n
}
// NewNumber creates a thunk containing a number value.
func NewNumber(n float64) *NumberType {
m := NumberType(n)
return &m
}
// Add sums up numbers of arguments.
var Add = newCommutativeOperator(0, func(n, m NumberType) NumberType { return n + m })
// Sub subtracts arguments of the second to the last from the first one as numbers.
var Sub = newInverseOperator(func(n, m NumberType) NumberType { return n - m })
// Mul multiplies numbers of arguments.
var Mul = newCommutativeOperator(1, func(n, m NumberType) NumberType { return n * m })
// Div divides the first argument by arguments of the second to the last one by one.
var Div = newInverseOperator(func(n, m NumberType) NumberType { return n / m })
// FloorDiv divides the first argument by arguments of the second to the last one by one.
var FloorDiv = newInverseOperator(func(n, m NumberType) NumberType {
return NumberType(math.Floor(float64(n / m)))
})
// Mod calculate a remainder of a division of the first argument by the second one.
var Mod = newBinaryOperator(math.Mod)
// Pow calculates an exponentiation from a base of the first argument and an
// exponent of the second argument.
var Pow = newBinaryOperator(math.Pow)
func newCommutativeOperator(i NumberType, f func(n, m NumberType) NumberType) Value {
return NewLazyFunction(
NewSignature(nil, "nums", nil, ""),
func(vs ...Value) Value {
l, err := EvalList(vs[0])
if err != nil {
return err
}
a := i
for !l.Empty() {
n, err := EvalNumber(l.First())
if err != nil {
return err
}
a = f(a, n)
l, err = EvalList(l.Rest())
if err != nil {
return err
}
}
return &a
})
}
func newInverseOperator(f func(n, m NumberType) NumberType) Value {
return NewLazyFunction(
NewSignature([]string{"initial"}, "nums", nil, ""),
func(vs ...Value) Value {
a, err := EvalNumber(vs[0])
if err != nil {
return err
}
l, err := EvalList(vs[1])
if err != nil {
return err
}
for !l.Empty() {
n, err := EvalNumber(l.First())
if err != nil {
return err
}
a = f(a, n)
l, err = EvalList(l.Rest())
if err != nil {
return err
}
}
return &a
})
}
func newBinaryOperator(f func(n, m float64) float64) Value {
return NewStrictFunction(
NewSignature([]string{"first", "second"}, "", nil, ""),
func(vs ...Value) Value {
ns := [2]NumberType{}
for i, t := range vs {
n, err := EvalNumber(t)
if err != nil {
return err
}
ns[i] = n
}
return NewNumber(f(float64(ns[0]), float64(ns[1])))
})
}
// IsInt checks if a number value is an integer or not.
func IsInt(n NumberType) bool {
return math.Mod(float64(n), 1) == 0
}
func (n *NumberType) compare(c comparable) int {
if *n < *c.(*NumberType) {
return -1
} else if *n > *c.(*NumberType) {
return 1
}
return 0
}
func (*NumberType) ordered() {}
// Hash hashes a number.
func (n *NumberType) Hash() uint32 {
h := fnv.New32()
if err := binary.Write(h, binary.BigEndian, float64(*n)); err != nil {
panic(err)
}
return h.Sum32()
}
// Equal checks equality of numbers.
func (n *NumberType) Equal(e hamt.Entry) bool {
if m, ok := e.(*NumberType); ok {
return *n == *m
}
return false
}
func (n *NumberType) string() Value {
return sprint(*n)
} | src/lib/core/number.go | 0.771069 | 0.5425 | number.go | starcoder |
package main
import (
"github.com/adevinta/vulcan-report"
)
var vulns = map[string]report.Vulnerability{
"dmarc-not-found": report.Vulnerability{
CWEID: 358,
Summary: "DMARC DNS Record Not Found",
Description: "No DMARC policy has been found for this domain.\nA DMARC " +
"(Domain-based Message Authentication, Reporting and Conformance) policy allows you " +
"to indicate that email messages from this domain are protected by SPF and DKIM, " +
"and tells recipients what to do if neither of those authentication methods passes, " +
"such as junk or reject the message. DMARC limits or eliminates your user's exposure to " +
"potentially fraudulent and harmful messages, such as phishing. DMARC also provides a way " +
"for recipients to automatically report back to you about messages that fail DMARC " +
"evaluation, so that you will be able to know if your email address is being used in " +
"phishing attacks or if some of your legitimate emails are being marked as spam.",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
Recommendations: []string{
"Create a DMARC DNS TXT record beginning with 'v=DMARC1'",
"For easy DMARC deployment in AWS Route53, check our CloudFormation template in References",
},
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489#section-6.3",
},
},
"multiple-dmarc-found": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Multiple Records Found",
Description: "Multiple DMARC policy records have been found for this domain.\nIf a domain" +
"contains multiple DMARC records, DMARC will not be processed at all.",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
Recommendations: []string{
"Create a single DMARC record",
},
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
},
"unable-to-parse-tags": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Unable To Parse Tags",
Description: "Some tags present in the DMARC policy record for this domain are invalid.\nIf a domain" +
"contains invalid DMARC tags or tag values, DMARC will not be processed at all.",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
Recommendations: []string{
"Review the DMARC record and fix/remove any invalid tags/values",
},
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489#section-6.3",
},
},
"v-and-p-invalid-or-missing": report.Vulnerability{
CWEID: 358,
Summary: "DMARC 'v' And 'p' Are Invalid",
Description: "Tags 'v' and 'p' are missing or are invalid. A DMARC policy record MUST comply with the formal specification " +
"in that the 'v' and 'p' tags MUST be present and MUST appear in that order.",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489#section-6.3",
},
Recommendations: []string{
"Review the record sintax and ensure: That 'v' and 'p' are present, have valid values and that they appear in that exact order.",
},
},
"tag-v-wrong-value": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'v' Has Wrong Value",
Description: "Version (plain-text; REQUIRED). Identifies the record retrieved " +
"as a DMARC record. It MUST have the value of 'DMARC1'. The value " +
"of this tag MUST match precisely; if it does not or it is absent, " +
"the entire retrieved record MUST be ignored. It MUST be the first" +
"tag in the list.",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489#section-6.3",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'v' is set to 'DMARC1'",
},
},
"tag-p-wrong-value": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'p' Has Wrong Value",
Description: "The value of tag 'p' is not a valid one. Requested Mail Receiver policy (plain-text; REQUIRED for policy " +
"records). Indicates the policy to be enacted by the Receiver at " +
"the request of the Domain Owner. Policy applies to the domain " +
"queried and to subdomains, unless subdomain policy is explicitly " +
"described using the 'sp' tag. This tag is mandatory for policy " +
"records only, but not for third-party reporting records. Possible values are as follows: " +
"none: The Domain Owner requests no specific action be taken regarding delivery of messages. " +
"quarantine: The Domain Owner wishes to have email that fails the " +
"DMARC mechanism check be treated by Mail Receivers as " +
"suspicious. Depending on the capabilities of the Mail " +
"Receiver, this can mean 'place into spam folder', 'scrutinize " +
"with additional intensity', and/or 'flag as suspicious'. " +
"reject: The Domain Owner wishes for Mail Receivers to reject " +
"email that fails the DMARC mechanism check.",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489#section-6.3",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'p' is set to one of the following values : 'none', 'quarantine' or 'reject'",
},
},
"tag-p-is-none": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'p' Set To 'none'",
Description: "The value of tag 'p' is configured as 'none'. It should be set to 'reject'. " +
"none: The Domain Owner requests no specific action be taken regarding delivery of messages. " +
"reject: The Domain Owner wishes for Mail Receivers to reject " +
"email that fails the DMARC mechanism check.",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Set tag 'p' to be 'reject'",
},
},
"tag-p-is-quarantine": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'p' Set To 'quarantine'",
Description: "The value of tag 'p' is configured as 'quarantine'. It should be set to 'reject'. " +
"none: The Domain Owner requests no specific action be taken regarding delivery of messages. " +
"quarantine: The Domain Owner wishes to have email that fails the " +
"DMARC mechanism check be treated by Mail Receivers as " +
"suspicious. Depending on the capabilities of the Mail " +
"Receiver, this can mean 'place into spam folder', 'scrutinize " +
"with additional intensity', and/or 'flag as suspicious'. ",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Set tag 'p' to be 'reject'",
},
},
"tag-rua-not-configured": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'rua' Missing",
Description: "The tag 'rua' is not explicitly configured. " +
"rua: Addresses to which aggregate feedback is to be sent (comma-" +
"separated plain-text list of DMARC URIs; OPTIONAL).",
Score: report.SeverityThresholdNone,
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Explicitly define the value of tag 'rua'",
},
},
"tag-ruf-not-configured": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'ruf' Missing",
Description: "The tag 'ruf' is not explicitly configured. " +
"ruf: Addresses to which message-specific failure information is to " +
"be reported (comma-separated plain-text list of DMARC URIs; " +
"OPTIONAL). If present, the Domain Owner is requesting Mail " +
"Receivers to send detailed failure reports about messages that " +
"fail the DMARC evaluation in specific ways (see the 'fo' tag " +
"above). The format of the message to be generated MUST follow the " +
"format specified for the 'rf' tag. ",
Score: report.SeverityThresholdNone,
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Explicitly define the value of tag 'ruf'",
},
},
"tag-pct-not-100": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'pct' Is Not Set To '100'",
Description: "Selective DMARC policy 'pct' is not set to '100'. " +
"pct: (plain-text integer between 0 and 100, inclusive; OPTIONAL; " +
"default is 100). Percentage of messages from the Domain Owner's " +
"mail stream to which the DMARC policy is to be applied. ",
Score: report.SeverityThresholdNone,
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Set tag 'pct' to be '100'",
},
},
"tag-rua-not-valid-mailto": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'rua' Is Invalid",
Description: "The 'rua' tag has an invalid value. It should be a comma-separated plain-text list of DMARC URIs. " +
"rua: Addresses to which aggregate feedback is to be sent (comma-" +
"separated plain-text list of DMARC URIs; OPTIONAL).",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'rua' is a valid list of mail addresses",
},
},
"tag-ruf-not-valid-mailto": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'ruf' Is Invalid",
Description: "The 'ruf' tag has an invalid value. It should be a comma-separated plain-text list of DMARC URIs." +
"ruf: Addresses to which message-specific failure information is to " +
"be reported (comma-separated plain-text list of DMARC URIs; " +
"OPTIONAL).",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'ruf' is a valid list of mail addresses",
},
},
"tag-adkim-not-valid": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'adkim' Is Invalid",
Description: "The 'adkim' tag has an invalid value. " +
"adkim: (plain-text; OPTIONAL; default is 'r'.) Indicates whether " +
"strict or relaxed DKIM Identifier Alignment mode is required by " +
"the Domain Owner. Valid values are as follows: [r: relaxed mode, s: strict mode]",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'adkim' is set to one of the following values : 'r' or 's'",
},
},
"tag-aspf-not-valid": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'aspf' Is Invalid",
Description: "The 'aspf' tag has an invalid value. " +
"aspf: (plain-text; OPTIONAL; default is 'r'.) Indicates whether " +
"strict or relaxed SPF Identifier Alignment mode is required by the " +
"Domain Owner. Valid values are as follows: [r: relaxed mode, s: strict mode] ",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'aspf' is set to one of the following values : 'r' or 's'",
},
},
"tag-sp-not-valid": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'sp' Is Invalid",
Description: "The 'sp' tag has an invalid value. " +
"sp: Requested Mail Receiver policy for all subdomains (plain-text; " +
"OPTIONAL). Indicates the policy to be enacted by the Receiver at " +
"the request of the Domain Owner. It applies only to subdomains of " +
"the domain queried and not to the domain itself. Possible values are as follows: " +
"none: The Domain Owner requests no specific action be taken regarding delivery of messages. " +
"quarantine: The Domain Owner wishes to have email that fails the " +
"DMARC mechanism check be treated by Mail Receivers as " +
"suspicious. Depending on the capabilities of the Mail " +
"Receiver, this can mean 'place into spam folder', 'scrutinize " +
"with additional intensity', and/or 'flag as suspicious'. " +
"reject: The Domain Owner wishes for Mail Receivers to reject " +
"email that fails the DMARC mechanism check.",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'sp' is set to one of the following values: 'none', 'quarantine' or 'reject'",
},
},
"tag-fo-not-valid": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'fo' Is Invalid",
Description: "The 'fo' tag has an invalid value. " +
"fo: Failure reporting options (plain-text; OPTIONAL; default is '0')" +
"Provides requested options for generation of failure reports. " +
"Report generators MAY choose to adhere to the requested options. " +
"This tag's content MUST be ignored if a 'ruf' tag is not " +
"also specified. The value of this tag is a colon-separated list " +
"of characters that indicate failure reporting options as follows: " +
"0: Generate a DMARC failure report if all underlying authentication mechanisms fail to produce an aligned 'pass' result. " +
"1: Generate a DMARC failure report if any underlying authentication mechanism produced something other than an aligned 'pass' result. " +
"d: Generate a DKIM failure report if the message had a signature that failed evaluation, regardless of its alignment. " +
"s: Generate an SPF failure report if the message failed SPF evaluation, regardless of its alignment. ",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'fo' is set to one of the following values: '0', '1', 'd' or 's'",
},
},
"tag-rf-not-valid": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'rf' Is Invalid",
Description: "The 'rf' tag has an invalid value. " +
"rf: Format to be used for message-specific failure reports (colon- " +
"separated plain-text list of values; OPTIONAL; default is 'afrf'). " +
"The value of this tag is a list of one or more report formats as " +
"requested by the Domain Owner to be used when a message fails both " +
"[SPF] and [DKIM] tests to report details of the individual " +
"failure. For this version, only 'afrf' (the auth-failure report " +
"type) is presently supported. ",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'rf' is set to 'afrf'",
},
},
"tag-ri-not-valid": report.Vulnerability{
CWEID: 358,
Summary: "DMARC Tag 'ri' Is Invalid",
Description: "The 'ri' tag has an invalid value. " +
"ri: Interval requested between aggregate reports (plain-text 32-bit " +
"unsigned integer; OPTIONAL; default is 86400). Indicates a " +
"request to Receivers to generate aggregate reports separated by no " +
"more than the requested number of seconds. ",
Score: report.SeverityThresholdLow,
ImpactDetails: "An attacker may be able to send email messages that appear to originate " +
"from this domain without your knowledge, which can be used to perform very convincing " +
"phishing attacks against your users.",
References: []string{
"https://dmarc.org/",
"https://en.wikipedia.org/wiki/DMARC",
"https://tools.ietf.org/html/rfc7489",
},
Recommendations: []string{
"Review the record syntax and ensure that tag 'ri' is set to an integer",
},
},
} | cmd/vulcan-dmarc/dmarc_vulnerabilities.go | 0.640299 | 0.41941 | dmarc_vulnerabilities.go | starcoder |
package peruntest
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hyperledger-labs/perun-node"
)
// AssertAPIError tests if the passed error contains expected category, code
// and phrases in the message.
func AssertAPIError(t *testing.T, e perun.APIError, categ perun.ErrorCategory, code perun.ErrorCode, msgs ...string) {
t.Helper()
require.Error(t, e)
assert.Equal(t, categ, e.Category())
assert.Equal(t, code, e.Code())
for _, msg := range msgs {
assert.Contains(t, e.Message(), msg)
}
}
// AssertErrInfoPeerRequestTimedOut tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoPeerRequestTimedOut(t *testing.T, info interface{}, peerAlias, timeout string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoPeerRequestTimedOut)
require.True(t, ok)
assert.Equal(t, peerAlias, addInfo.PeerAlias)
assert.Equal(t, timeout, addInfo.Timeout)
}
// AssertErrInfoPeerRejected tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoPeerRejected(t *testing.T, info interface{}, peerAlias, reason string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoPeerRejected)
require.True(t, ok)
assert.Equal(t, peerAlias, addInfo.PeerAlias)
assert.Equal(t, reason, addInfo.Reason)
}
// AssertErrInfoPeerNotFunded tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoPeerNotFunded(t *testing.T, info interface{}, peerAlias string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoPeerNotFunded)
require.True(t, ok)
assert.Equal(t, peerAlias, addInfo.PeerAlias)
}
// AssertErrInfoUserResponseTimedOut tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoUserResponseTimedOut(t *testing.T, info interface{}) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoUserResponseTimedOut)
require.True(t, ok)
assert.Less(t, addInfo.Expiry, addInfo.ReceivedAt)
}
// AssertErrInfoResourceNotFound tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoResourceNotFound(t *testing.T, info interface{}, resourceType perun.ResourceType, id string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoResourceNotFound)
require.True(t, ok)
assert.Equal(t, string(resourceType), addInfo.Type)
assert.Equal(t, id, addInfo.ID)
}
// AssertErrInfoResourceExists tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoResourceExists(t *testing.T, info interface{}, resourceType perun.ResourceType, id string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoResourceExists)
require.True(t, ok)
assert.Equal(t, string(resourceType), addInfo.Type)
assert.Equal(t, id, addInfo.ID)
}
// AssertErrInfoInvalidArgument tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoInvalidArgument(t *testing.T, info interface{}, name perun.ArgumentName, value string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoInvalidArgument)
require.True(t, ok)
assert.Equal(t, string(name), addInfo.Name)
assert.Equal(t, value, addInfo.Value)
t.Log("requirement:", addInfo.Requirement)
}
// AssertErrInfoFailedPreCondUnclosedChs tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoFailedPreCondUnclosedChs(t *testing.T, info interface{}, chInfos []perun.ChInfo) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoFailedPreCondUnclosedChs)
require.True(t, ok)
assert.Equal(t, chInfos, addInfo.ChInfos)
}
// AssertErrInfoInvalidConfig tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoInvalidConfig(t *testing.T, info interface{}, name, value string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoInvalidConfig)
require.True(t, ok)
assert.Equal(t, name, addInfo.Name)
assert.Equal(t, value, addInfo.Value)
}
// AssertErrInfoInvalidContracts tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoInvalidContracts(t *testing.T, info interface{}, contractErrInfos []perun.ContractErrInfo) {
t.Helper()
_, ok := info.(perun.ErrInfoInvalidContracts)
require.True(t, ok)
// TODO: compare the two list based on matching keys.
// assert.Len(t, len(contractErrInfos), len(addInfo.ContractErrInfos))
}
// AssertErrInfoTxTimedOut tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoTxTimedOut(t *testing.T, info interface{}, txType, txID, txTimeout string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoTxTimedOut)
require.True(t, ok)
assert.Equal(t, txType, addInfo.TxType)
assert.Equal(t, txID, addInfo.TxID)
assert.Equal(t, txTimeout, addInfo.TxTimeout)
}
// AssertErrInfoChainNotReachable tests if additional info field is of
// correct type and has expected values.
func AssertErrInfoChainNotReachable(t *testing.T, info interface{}, chainURL string) {
t.Helper()
addInfo, ok := info.(perun.ErrInfoChainNotReachable)
require.True(t, ok)
assert.Equal(t, chainURL, addInfo.ChainURL)
} | peruntest/assert.go | 0.619126 | 0.568985 | assert.go | starcoder |
Package builder provides methods to build admission webhooks.
The following are 2 examples for building mutating webhook and validating webhook.
webhook1, err := NewWebhookBuilder().
Mutating().
Operations(admissionregistrationv1beta1.Create).
ForType(&corev1.Pod{}).
WithManager(mgr).
Handlers(mutatingHandler11, mutatingHandler12).
Build()
if err != nil {
// handle error
}
webhook2, err := NewWebhookBuilder().
Validating().
Operations(admissionregistrationv1beta1.Create, admissionregistrationv1beta1.Update).
ForType(&appsv1.Deployment{}).
WithManager(mgr).
Handlers(validatingHandler21).
Build()
if err != nil {
// handle error
}
Note: To build a webhook for a CRD, you need to ensure the manager uses the scheme that understands your CRD.
This is necessary, because if the scheme doesn't understand your CRD types, the decoder won't be able to decode
the CR object from the admission review request.
The following snippet shows how to register CRD types with manager's scheme.
mgr, err := manager.New(cfg, manager.Options{})
if err != nil {
// handle error
}
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: "crew.k8s.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// Register your CRD types.
SchemeBuilder.Register(&Kraken{}, &KrakenList{})
// Register your CRD types with the manager's scheme.
err = SchemeBuilder.AddToScheme(mgr.GetScheme())
if err != nil {
// handle error
}
There are more options for configuring a webhook. e.g. Name, Path, FailurePolicy, NamespaceSelector.
Here is another example:
webhook3, err := NewWebhookBuilder().
Name("foo.example.com").
Path("/mutatepods").
Mutating().
Operations(admissionregistrationv1beta1.Create).
ForType(&corev1.Pod{}).
FailurePolicy(admissionregistrationv1beta1.Fail).
WithManager(mgr).
Handlers(mutatingHandler31, mutatingHandler32).
Build()
if err != nil {
// handle error
}
For most users, we recommend to use Operations and ForType instead of Rules to construct a webhook,
since it is more intuitive and easier to pass the target operations to Operations method and
a empty target object to ForType method than passing a complex RuleWithOperations struct to Rules method.
Rules may be useful for some more advanced use cases like subresources, wildcard resources etc.
Here is an example:
webhook4, err := NewWebhookBuilder().
Validating().
Rules(admissionregistrationv1beta1.RuleWithOperations{
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
Rule: admissionregistrationv1beta1.Rule{
APIGroups: []string{"apps", "batch"},
APIVersions: []string{"v1"},
Resources: []string{"*"},
},
}).
WithManager(mgr).
Handlers(validatingHandler41).
Build()
if err != nil {
// handle error
}
*/
package builder | vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/builder/doc.go | 0.654011 | 0.483953 | doc.go | starcoder |
package r1
import (
"fmt"
"math"
)
// Interval represents a closed interval on ℝ.
// Zero-length intervals (where Lo == Hi) represent single points.
// If Lo > Hi then the interval is empty.
type Interval struct {
Lo, Hi float64
}
// EmptyInterval returns an empty interval.
func EmptyInterval() Interval { return Interval{1, 0} }
// IntervalFromPoint returns an interval representing a single point.
func IntervalFromPoint(p float64) Interval { return Interval{p, p} }
// IsEmpty reports whether the interval is empty.
func (i Interval) IsEmpty() bool { return i.Lo > i.Hi }
// Equal returns true iff the interval contains the same points as oi.
func (i Interval) Equal(oi Interval) bool {
return i == oi || i.IsEmpty() && oi.IsEmpty()
}
// Center returns the midpoint of the interval.
// It is undefined for empty intervals.
func (i Interval) Center() float64 { return 0.5 * (i.Lo + i.Hi) }
// Length returns the length of the interval.
// The length of an empty interval is negative.
func (i Interval) Length() float64 { return i.Hi - i.Lo }
// Contains returns true iff the interval contains p.
func (i Interval) Contains(p float64) bool { return i.Lo <= p && p <= i.Hi }
// ContainsInterval returns true iff the interval contains oi.
func (i Interval) ContainsInterval(oi Interval) bool {
if oi.IsEmpty() {
return true
}
return i.Lo <= oi.Lo && oi.Hi <= i.Hi
}
// InteriorContains returns true iff the the interval strictly contains p.
func (i Interval) InteriorContains(p float64) bool {
return i.Lo < p && p < i.Hi
}
// InteriorContainsInterval returns true iff the interval strictly contains oi.
func (i Interval) InteriorContainsInterval(oi Interval) bool {
if oi.IsEmpty() {
return true
}
return i.Lo < oi.Lo && oi.Hi < i.Hi
}
// Intersects returns true iff the interval contains any points in common with oi.
func (i Interval) Intersects(oi Interval) bool {
if i.Lo <= oi.Lo {
return oi.Lo <= i.Hi && oi.Lo <= oi.Hi // oi.Lo ∈ i and oi is not empty
}
return i.Lo <= oi.Hi && i.Lo <= i.Hi // i.Lo ∈ oi and i is not empty
}
// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
func (i Interval) InteriorIntersects(oi Interval) bool {
return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= i.Hi
}
// Intersection returns the interval containing all points common to i and j.
func (i Interval) Intersection(j Interval) Interval {
// Empty intervals do not need to be special-cased.
return Interval{
Lo: math.Max(i.Lo, j.Lo),
Hi: math.Min(i.Hi, j.Hi),
}
}
// AddPoint returns the interval expanded so that it contains the given point.
func (i Interval) AddPoint(p float64) Interval {
if i.IsEmpty() {
return Interval{p, p}
}
if p < i.Lo {
return Interval{p, i.Hi}
}
if p > i.Hi {
return Interval{i.Lo, p}
}
return i
}
// ClampPoint returns the closest point in the interval to the given point "p".
// The interval must be non-empty.
func (i Interval) ClampPoint(p float64) float64 {
return math.Max(i.Lo, math.Min(i.Hi, p))
}
// Expanded returns an interval that has been expanded on each side by margin.
// If margin is negative, then the function shrinks the interval on
// each side by margin instead. The resulting interval may be empty. Any
// expansion of an empty interval remains empty.
func (i Interval) Expanded(margin float64) Interval {
if i.IsEmpty() {
return i
}
return Interval{i.Lo - margin, i.Hi + margin}
}
// Union returns the smallest interval that contains this interval and the given interval.
func (i Interval) Union(other Interval) Interval {
if i.IsEmpty() {
return other
}
if other.IsEmpty() {
return i
}
return Interval{math.Min(i.Lo, other.Lo), math.Max(i.Hi, other.Hi)}
}
func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) }
// epsilon is a small number that represents a reasonable level of noise between two
// values that can be considered to be equal.
const epsilon = 1e-14
// ApproxEqual reports whether the interval can be transformed into the
// given interval by moving each endpoint a small distance.
// The empty interval is considered to be positioned arbitrarily on the
// real line, so any interval with a small enough length will match
// the empty interval.
func (i Interval) ApproxEqual(other Interval) bool {
if i.IsEmpty() {
return other.Length() <= 2*epsilon
}
if other.IsEmpty() {
return i.Length() <= 2*epsilon
}
return math.Abs(other.Lo-i.Lo) <= epsilon &&
math.Abs(other.Hi-i.Hi) <= epsilon
}
// BUG(dsymonds): The major differences from the C++ version are:
// - a few other miscellaneous operations | Godeps/_workspace/src/github.com/golang/geo/r1/interval.go | 0.891575 | 0.664734 | interval.go | starcoder |
package core
import (
"fmt"
)
// TypeParser parses a specific DataType, returning an error or nil if the parsing was successful.
type TypeParser interface {
Parse(d DataType) error
}
// SetStringFunc is a function that sets a string value.
type SetStringFunc func(string)
// SetIntFunc is a function that sets an integer value.
type SetIntFunc func(int)
// SetFloatFunc is a function that sets a floating point value.
type SetFloatFunc func(float64)
// StringTypeParser is a TypeParser implementation that Parses String types and sets the value
// using the setter function.
type StringTypeParser struct {
setter SetStringFunc
}
// Parse parses the DataType expecting it to be a String type.
func (parser StringTypeParser) Parse(d DataType) error {
if value, ok := AsString(d); ok {
parser.setter(value)
} else {
return fmt.Errorf("Error parsing type of %#v as a String", d)
}
return nil
}
// NewStringTypeParser creates a new StringTypeParser with the setter as passed.
func NewStringTypeParser(setter SetStringFunc) *StringTypeParser {
parser := new(StringTypeParser)
parser.setter = setter
return parser
}
// NewStringTypeParserToVar creates a new StringTypeParser that sets the parsed
// value to the value of the passed string pointer.
func NewStringTypeParserToVar(variable *string) *StringTypeParser {
parser := new(StringTypeParser)
parser.setter = func(value string) {
*variable = value
}
return parser
}
// IntTypeParser is a TypeParser implementation that Parses Integer types and sets the value
// using the setter function.
type IntTypeParser struct {
setter SetIntFunc
}
// Parse parses the DataType expecting it to be an Integer type.
func (parser IntTypeParser) Parse(d DataType) error {
if value, ok := AsInt(d); ok {
parser.setter(value)
} else {
return fmt.Errorf("Error parsing type of %#v as an Integer", d)
}
return nil
}
// NewIntTypeParser creates a new IntTypeParser with the setter as passed.
func NewIntTypeParser(setter SetIntFunc) *IntTypeParser {
parser := new(IntTypeParser)
parser.setter = setter
return parser
}
// NewIntTypeParserToVar creates a new IntTypeParser that sets the parsed
// value to the value of the passed int pointer.
func NewIntTypeParserToVar(variable *int) *IntTypeParser {
parser := new(IntTypeParser)
parser.setter = func(value int) {
*variable = value
}
return parser
}
// FloatTypeParser is a TypeParser implementation that Parses Float types and sets the value
// using the setter function.
type FloatTypeParser struct {
setter SetFloatFunc
}
// Parse parses the DataType expecting it to be a Float type.
func (parser FloatTypeParser) Parse(d DataType) error {
if value, ok := AsFloat(d); ok {
parser.setter(value)
} else {
return fmt.Errorf("Error parsing type of %#v as a Float", d)
}
return nil
}
// NewFloatTypeParser creates a new FloatTypeParser with the setter as passed.
func NewFloatTypeParser(setter SetFloatFunc) *FloatTypeParser {
parser := new(FloatTypeParser)
parser.setter = setter
return parser
}
// NewFloatTypeParserToVar creates a new FloatTypeParser that sets the parsed
// value to the value of the passed float64 pointer.
func NewFloatTypeParserToVar(variable *float64) *FloatTypeParser {
parser := new(FloatTypeParser)
parser.setter = func(value float64) {
*variable = value
}
return parser
}
// DxfParseable is the base abstraction for any element in a DXF file that is composed by tags.
// It defines the basic boilerplate to support parsing and error handling of a slice of tags that
// composes the element.
type DxfParseable struct {
tagParsers map[int]TypeParser
}
// Init initializes the DxfParseable's parser map so that it can be used by the Parse method.
func (element *DxfParseable) Init(parsers map[int]TypeParser) {
element.tagParsers = parsers
}
// Update the tagParsers with the content on parsers.
func (element *DxfParseable) Update(parsers map[int]TypeParser) {
if len(element.tagParsers) == 0 {
element.tagParsers = parsers
} else {
for key, value := range parsers {
element.tagParsers[key] = value
}
}
}
// Parse parses the slice of tags using the configured parser map.
// Returns an error if any error happens during the process, otherwise it returns nil.
func (element *DxfParseable) Parse(tags TagSlice) error {
for _, tag := range tags.RegularTags() {
if parser, ok := element.tagParsers[tag.Code]; ok {
err := parser.Parse(tag.Value)
if err != nil {
return err
}
} else {
Log.Printf("Discarding tag: %+v\n", tag.ToString())
}
}
return nil
} | core/dxf_parseable.go | 0.720368 | 0.532851 | dxf_parseable.go | starcoder |
package date
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
)
const day = time.Hour * 24
var daysOfWeek = map[string]time.Weekday{
"monday": time.Monday,
"tuesday": time.Tuesday,
"wednesday": time.Wednesday,
"thursday": time.Thursday,
"friday": time.Friday,
"saturday": time.Saturday,
"sunday": time.Sunday,
}
func init() {
// Register the rules
RegisterRule(RuleToday)
RegisterRule(RuleTomorrow)
RegisterRule(RuleDayOfWeek)
RegisterRule(RuleNaturalDate)
RegisterRule(RuleDate)
}
// RuleToday checks for today, tonight, this afternoon dates in the given sentence, then
// it returns the date parsed.
func RuleToday(sentence string) (result time.Time) {
todayRegex := regexp.MustCompile("today|tonight")
today := todayRegex.FindString(sentence)
// Returns an empty date struct if no date has been found
if today == "" {
return time.Time{}
}
return time.Now()
}
// RuleTomorrow checks for "tomorrow" and "after tomorrow" dates in the given sentence, then
// it returns the date parsed.
func RuleTomorrow(sentence string) (result time.Time) {
tomorrowRegex := regexp.MustCompile(`(after )?tomorrow`)
date := tomorrowRegex.FindString(sentence)
// Returns an empty date struct if no date has been found
if date == "" {
return time.Time{}
}
result = time.Now().Add(day)
// If the date contains "after", we add 24 hours to tomorrow's date
if strings.Contains(date, "after") {
return result.Add(day)
}
return
}
// RuleDayOfWeek checks for the days of the week and the keyword "next" in the given sentence,
// then it returns the date parsed.
func RuleDayOfWeek(sentence string) time.Time {
dayOfWeekRegex := regexp.MustCompile(`((next )?(monday|tuesday|wednesday|thursday|friday|saturday|sunday))`)
date := dayOfWeekRegex.FindString(sentence)
// Returns an empty date struct if no date has been found
if date == "" {
return time.Time{}
}
var foundDayOfWeek int
// Find the integer value of the found day of the week
for _, dayOfWeek := range daysOfWeek {
// Down case the day of the week to match the found date
stringDayOfWeek := strings.ToLower(dayOfWeek.String())
if strings.Contains(date, stringDayOfWeek) {
foundDayOfWeek = int(dayOfWeek)
}
}
currentDay := int(time.Now().Weekday())
// Calculate the date of the found day
calculatedDate := foundDayOfWeek - currentDay
// If the day is already passed in the current week, then we add another week to the count
if calculatedDate <= 0 {
calculatedDate += 7
}
// If there is "next" in the sentence, then we add another week
if strings.Contains(date, "next") {
calculatedDate += 7
}
// Then add the calculated number of day to the actual date
return time.Now().Add(day * time.Duration(calculatedDate))
}
// RuleNaturalDate checks for the dates written in natural language in the given sentence,
// then it returns the date parsed.
func RuleNaturalDate(sentence string) time.Time {
naturalMonthRegex := regexp.MustCompile(
`january|february|march|april|may|june|july|august|september|october|november|december`,
)
naturalDayRegex := regexp.MustCompile(`\d{2}|\d`)
month := naturalMonthRegex.FindString(sentence)
day := naturalDayRegex.FindString(sentence)
parsedMonth, _ := time.Parse("January", month)
parsedDay, _ := strconv.Atoi(day)
// Returns an empty date struct if no date has been found
if day == "" && month == "" {
return time.Time{}
}
// If only the month is specified
if day == "" {
// Calculate the number of months to add
calculatedMonth := parsedMonth.Month() - time.Now().Month()
// Add a year if the month is passed
if calculatedMonth <= 0 {
calculatedMonth += 12
}
// Remove the number of days elapsed in the month to reach the first
return time.Now().AddDate(0, int(calculatedMonth), -time.Now().Day()+1)
}
// Parse the date
parsedDate := fmt.Sprintf("%d-%02d-%02d", time.Now().Year(), parsedMonth.Month(), parsedDay)
date, err := time.Parse("2006-01-02", parsedDate)
if err != nil {
return time.Time{}
}
// If the date has been passed, add a year
if time.Now().After(date) {
date = date.AddDate(1, 0, 0)
}
return date
}
// RuleDate checks for dates written like mm/dd
func RuleDate(sentence string) time.Time {
dateRegex := regexp.MustCompile(`(\d{2}|\d)/(\d{2}|\d)`)
date := dateRegex.FindString(sentence)
// Returns an empty date struct if no date has been found
if date == "" {
return time.Time{}
}
// Parse the found date
parsedDate, err := time.Parse("01/02", date)
if err != nil {
return time.Time{}
}
// Add the current year to the date
parsedDate = parsedDate.AddDate(time.Now().Year(), 0, 0)
// Add another year if the date is passed
if time.Now().After(parsedDate) {
parsedDate = parsedDate.AddDate(1, 0, 0)
}
return parsedDate
}
// RuleTime checks for an hour written like 9pm
func RuleTime(sentence string) time.Time {
timeRegex := regexp.MustCompile(`(\d{2}|\d)(:\d{2}|\d)?( )?(pm|am)`)
foundTime := timeRegex.FindString(sentence)
// Returns an empty date struct if no date has been found
if foundTime == "" {
return time.Time{}
}
// Initialize the part of the day asked
part := "am"
if strings.Contains(foundTime, "pm") || strings.Contains(foundTime, "p.m") {
part = "pm"
}
if strings.Contains(foundTime, ":") {
// Get the hours and minutes of the given time
hoursAndMinutesRegex := regexp.MustCompile(`(\d{2}|\d):(\d{2}|\d)`)
timeVariables := strings.Split(hoursAndMinutesRegex.FindString(foundTime), ":")
// Format the time with 2 digits for each
formattedTime := fmt.Sprintf("%02s:%02s %s", timeVariables[0], timeVariables[1], part)
response, _ := time.Parse("03:04 pm", formattedTime)
return response
}
digitsRegex := regexp.MustCompile(`\d{2}|\d`)
foundDigits := digitsRegex.FindString(foundTime)
formattedTime := fmt.Sprintf("%02s %s", foundDigits, part)
response, _ := time.Parse("03 pm", formattedTime)
return response
} | language/date/rules.go | 0.690455 | 0.420778 | rules.go | starcoder |
package vector
import (
"fmt"
"math"
"github.com/jeinfeldt/raytracer/raytracing/util"
)
type (
// Vector3 represent a vector with three coordinates
Vector3 struct {
x, y, z float64
}
)
// New is a factory method to create a new vector with three positions
func New(x, y, z float64) Vector3 {
return Vector3{x: x, y: y, z: z}
}
// NewEmpty is a factory method to create a new vector with (0, 0, 0)
func NewEmpty() Vector3 {
return New(0, 0, 0)
}
// X returns the first position of the vector
// Example: {1, 2, 0}.X() = 1
func (v *Vector3) X() float64 {
return v.x
}
// Y returns the second position of the vector
// Example: {1, 2, 0}.Y() = 2
func (v *Vector3) Y() float64 {
return v.y
}
// Z returns the third position of the vector
// Example: {1, 2, 0}.Z() = 0
func (v *Vector3) Z() float64 {
return v.z
}
// Add adds two vectors
// Example: {1, 2, 0}.Add({1, 0, 1}) = {2, 0, 1}
func (v *Vector3) Add(other Vector3) {
v.x += other.X()
v.y += other.Y()
v.z += other.Z()
}
// Sub subs two vectors
// Example: {1, 2, 0}.Sub({1, 0, 1}) = {0, 2, -1}
func (v *Vector3) Sub(other Vector3) {
v.x -= other.X()
v.y -= other.Y()
v.z -= other.Z()
}
// Mul multiplies a vector with a constant factor
// Example: {1, 2, 0}.Mul({2}) = {2, 4, 0}
func (v *Vector3) Mul(factor float64) {
v.x *= factor
v.y *= factor
v.z *= factor
}
// Div divides a vector with a constant factor
// Example: {4, 2, 0}.Div({2}) = {2, 1, 0}
func (v *Vector3) Div(factor float64) {
v.x /= factor
v.y /= factor
v.z /= factor
}
// Length returns the length of the vector
// The length of a vector is the square root of the sum
// of the squares of the horizontal and vertical components
// Example: {2, 1, 2}.length = Sqrt(2*2 + 1*1 + 2*2) = 3
func (v *Vector3) Length() float64 {
return math.Sqrt(v.LengthSquared())
}
// LengthSquared returns the length of the vector squared
// This equals the sum
// of the squares of the horizontal and vertical components
// Example: {2, 1, 2}.length = 2*2 + 1*1 + 2*2 = 9
func (v *Vector3) LengthSquared() float64 {
return v.x*v.x + v.y*v.y + v.z*v.z
}
// String output for vector
// Example: Vector3{1, 2, 3}.String() = {X:1, Y:2, Z:3}
func (v *Vector3) String() string {
template := "{X:%f, Y:%f, Z:%f}"
return fmt.Sprintf(template, v.x, v.y, v.z)
}
// Copy returns a new reference to an equal vector
func (v *Vector3) Copy() Vector3 {
return Vector3{v.x, v.y, v.z}
}
// Random returns a randomely initialised vector
func Random() Vector3 {
return Vector3{
x: util.RandFloat(),
y: util.RandFloat(),
z: util.RandFloat()}
}
// RandomClamp returns a randomely initialised vector with
// min and max values
func RandomClamp(min, max float64) Vector3 {
return Vector3{
x: util.RandClampFloat(min, max),
y: util.RandClampFloat(min, max),
z: util.RandClampFloat(min, max)}
}
// RandomUnit returns a randomely initialised vector within unit sphere
// meaning length 1
func RandomUnit() Vector3 {
a := util.RandClampFloat(0, 2*util.Pi)
z := util.RandClampFloat(-1, 1)
r := math.Sqrt(1 - z*z)
return Vector3{
x: r * math.Cos(a),
y: r * math.Sin(a),
z: z,
}
}
// RandomUnitDisk returns a randomely initialised vector within unit disk
func RandomUnitDisk() Vector3 {
for {
p := Vector3{
x: util.RandClampFloat(-1, 1),
y: util.RandClampFloat(-1, 1),
z: 0,
}
if p.LengthSquared() >= 1 {
continue
}
return p
}
} | raytracing/vector/vector.go | 0.908428 | 0.802401 | vector.go | starcoder |
package internal
import (
"github.com/boombuler/barcode/code128"
"github.com/boombuler/barcode/qr"
"github.com/eyesore/maroto/pkg/props"
"github.com/jung-kurt/gofpdf"
"github.com/jung-kurt/gofpdf/contrib/barcode"
)
// Code is the abstraction which deals of how to add QrCodes or Barcode in a PDF
type Code interface {
AddQr(code string, marginTop float64, indexCol float64, qtdCols float64, colHeight float64, prop props.Rect)
AddBar(code string, marginTop float64, indexCol float64, qtdCols float64, colHeight float64, prop props.Barcode) (err error)
}
type code struct {
pdf gofpdf.Pdf
math Math
}
// NewCode create a Code
func NewCode(pdf gofpdf.Pdf, math Math) *code {
return &code{
pdf,
math,
}
}
// AddQr create a QrCode inside a cell
func (s *code) AddQr(code string, marginTop float64, indexCol float64, qtdCols float64, colHeight float64, prop props.Rect) {
key := barcode.RegisterQR(s.pdf, code, qr.H, qr.Unicode)
actualWidthPerCol := s.math.GetWidthPerCol(qtdCols)
var x, y, w, h float64
if prop.Center {
x, y, w, h = s.math.GetRectCenterColProperties(actualWidthPerCol, actualWidthPerCol, qtdCols, colHeight, indexCol, prop.Percent)
} else {
x, y, w, h = s.math.GetRectNonCenterColProperties(actualWidthPerCol, actualWidthPerCol, qtdCols, colHeight, indexCol, prop)
}
barcode.Barcode(s.pdf, key, x, y+marginTop, w, h, false)
}
// AddBar create a Barcode inside a cell
func (s *code) AddBar(code string, marginTop float64, indexCol float64, qtdCols float64, colHeight float64, prop props.Barcode) (err error) {
bcode, err := code128.Encode(code)
if err != nil {
return
}
actualWidthPerCol := s.math.GetWidthPerCol(qtdCols)
heightPercentFromWidth := prop.Proportion.Height / prop.Proportion.Width
var x, y, w, h float64
if prop.Center {
x, y, w, h = s.math.GetRectCenterColProperties(actualWidthPerCol, actualWidthPerCol*heightPercentFromWidth, qtdCols, colHeight, indexCol, prop.Percent)
} else {
rectProps := props.Rect{Left: prop.Left, Top: prop.Top, Center: prop.Center, Percent: prop.Percent}
x, y, w, h = s.math.GetRectNonCenterColProperties(actualWidthPerCol, actualWidthPerCol*heightPercentFromWidth, qtdCols, colHeight, indexCol, rectProps)
}
barcode.Barcode(s.pdf, barcode.Register(bcode), x, y+marginTop, w, h, false)
return
} | internal/code.go | 0.758958 | 0.448668 | code.go | starcoder |
package simple
import (
"k8s.io/kubernetes/third_party/forked/gonum/graph"
)
// DirectedAcyclicGraph implements graph.Directed using UndirectedGraph,
// which only stores one edge for any node pair.
type DirectedAcyclicGraph struct {
*UndirectedGraph
}
func NewDirectedAcyclicGraph(self, absent float64) *DirectedAcyclicGraph {
return &DirectedAcyclicGraph{
UndirectedGraph: NewUndirectedGraph(self, absent),
}
}
func (g *DirectedAcyclicGraph) HasEdgeFromTo(u, v graph.Node) bool {
edge := g.UndirectedGraph.EdgeBetween(u, v)
if edge == nil {
return false
}
return (edge.From().ID() == u.ID())
}
func (g *DirectedAcyclicGraph) From(n graph.Node) []graph.Node {
if !g.Has(n) {
return nil
}
fid := n.ID()
nodes := make([]graph.Node, 0, g.UndirectedGraph.edges[n.ID()].Len())
g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
if edge.From().ID() == fid {
nodes = append(nodes, g.UndirectedGraph.nodes[edge.To().ID()])
}
})
return nodes
}
func (g *DirectedAcyclicGraph) VisitFrom(n graph.Node, visitor func(neighbor graph.Node) (shouldContinue bool)) {
if !g.Has(n) {
return
}
fid := n.ID()
g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
if edge.From().ID() == fid {
if !visitor(g.UndirectedGraph.nodes[edge.To().ID()]) {
return
}
}
})
}
func (g *DirectedAcyclicGraph) To(n graph.Node) []graph.Node {
if !g.Has(n) {
return nil
}
tid := n.ID()
nodes := make([]graph.Node, 0, g.UndirectedGraph.edges[n.ID()].Len())
g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
if edge.To().ID() == tid {
nodes = append(nodes, g.UndirectedGraph.nodes[edge.From().ID()])
}
})
return nodes
}
func (g *DirectedAcyclicGraph) VisitTo(n graph.Node, visitor func(neighbor graph.Node) (shouldContinue bool)) {
if !g.Has(n) {
return
}
tid := n.ID()
g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
if edge.To().ID() == tid {
if !visitor(g.UndirectedGraph.nodes[edge.From().ID()]) {
return
}
}
})
} | third_party/forked/gonum/graph/simple/directed_acyclic.go | 0.63023 | 0.484685 | directed_acyclic.go | starcoder |
package benchmark
import (
"reflect"
"testing"
)
func isBoolToUint64FuncCalibrated(supplier func() bool) bool {
return isCalibrated(reflect.Bool, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isIntToUint64FuncCalibrated(supplier func() int) bool {
return isCalibrated(reflect.Int, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isInt8ToUint64FuncCalibrated(supplier func() int8) bool {
return isCalibrated(reflect.Int8, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isInt16ToUint64FuncCalibrated(supplier func() int16) bool {
return isCalibrated(reflect.Int16, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isInt32ToUint64FuncCalibrated(supplier func() int32) bool {
return isCalibrated(reflect.Int32, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isInt64ToUint64FuncCalibrated(supplier func() int64) bool {
return isCalibrated(reflect.Int64, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isUintToUint64FuncCalibrated(supplier func() uint) bool {
return isCalibrated(reflect.Uint, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isUint8ToUint64FuncCalibrated(supplier func() uint8) bool {
return isCalibrated(reflect.Uint8, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isUint16ToUint64FuncCalibrated(supplier func() uint16) bool {
return isCalibrated(reflect.Uint16, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isUint32ToUint64FuncCalibrated(supplier func() uint32) bool {
return isCalibrated(reflect.Uint32, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func isUint64ToUint64FuncCalibrated(supplier func() uint64) bool {
return isCalibrated(reflect.Uint64, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setBoolToUint64FuncCalibrated(supplier func() bool) {
setCalibrated(reflect.Bool, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setIntToUint64FuncCalibrated(supplier func() int) {
setCalibrated(reflect.Int, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setInt8ToUint64FuncCalibrated(supplier func() int8) {
setCalibrated(reflect.Int8, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setInt16ToUint64FuncCalibrated(supplier func() int16) {
setCalibrated(reflect.Int16, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setInt32ToUint64FuncCalibrated(supplier func() int32) {
setCalibrated(reflect.Int32, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setInt64ToUint64FuncCalibrated(supplier func() int64) {
setCalibrated(reflect.Int64, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setUintToUint64FuncCalibrated(supplier func() uint) {
setCalibrated(reflect.Uint, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setUint8ToUint64FuncCalibrated(supplier func() uint8) {
setCalibrated(reflect.Uint8, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setUint16ToUint64FuncCalibrated(supplier func() uint16) {
setCalibrated(reflect.Uint16, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setUint32ToUint64FuncCalibrated(supplier func() uint32) {
setCalibrated(reflect.Uint32, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
func setUint64ToUint64FuncCalibrated(supplier func() uint64) {
setCalibrated(reflect.Uint64, reflect.Uint64, reflect.ValueOf(supplier).Pointer())
}
// BoolToUint64Func benchmarks a function with the signature:
// func(bool) uint64
// ID: B-11-1
func BoolToUint64Func(b *testing.B, supplier func() bool, toUint64Func func(bool) uint64) {
if !isBoolSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isBoolToUint64FuncCalibrated(supplier) {
panic("BoolToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// IntToUint64Func benchmarks a function with the signature:
// func(int) uint64
// ID: B-11-2
func IntToUint64Func(b *testing.B, supplier func() int, toUint64Func func(int) uint64) {
if !isIntSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isIntToUint64FuncCalibrated(supplier) {
panic("IntToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// Int8ToUint64Func benchmarks a function with the signature:
// func(int8) uint64
// ID: B-11-3
func Int8ToUint64Func(b *testing.B, supplier func() int8, toUint64Func func(int8) uint64) {
if !isInt8SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt8ToUint64FuncCalibrated(supplier) {
panic("Int8ToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// Int16ToUint64Func benchmarks a function with the signature:
// func(int16) uint64
// ID: B-11-4
func Int16ToUint64Func(b *testing.B, supplier func() int16, toUint64Func func(int16) uint64) {
if !isInt16SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt16ToUint64FuncCalibrated(supplier) {
panic("Int16ToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// Int32ToUint64Func benchmarks a function with the signature:
// func(int32) uint64
// ID: B-11-5
func Int32ToUint64Func(b *testing.B, supplier func() int32, toUint64Func func(int32) uint64) {
if !isInt32SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt32ToUint64FuncCalibrated(supplier) {
panic("Int32ToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// Int64ToUint64Func benchmarks a function with the signature:
// func(int64) uint64
// ID: B-11-6
func Int64ToUint64Func(b *testing.B, supplier func() int64, toUint64Func func(int64) uint64) {
if !isInt64SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt64ToUint64FuncCalibrated(supplier) {
panic("Int64ToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// UintToUint64Func benchmarks a function with the signature:
// func(uint) uint64
// ID: B-11-7
func UintToUint64Func(b *testing.B, supplier func() uint, toUint64Func func(uint) uint64) {
if !isUintSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUintToUint64FuncCalibrated(supplier) {
panic("UintToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// Uint8ToUint64Func benchmarks a function with the signature:
// func(uint8) uint64
// ID: B-11-8
func Uint8ToUint64Func(b *testing.B, supplier func() uint8, toUint64Func func(uint8) uint64) {
if !isUint8SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint8ToUint64FuncCalibrated(supplier) {
panic("Uint8ToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// Uint16ToUint64Func benchmarks a function with the signature:
// func(uint16) uint64
// ID: B-11-9
func Uint16ToUint64Func(b *testing.B, supplier func() uint16, toUint64Func func(uint16) uint64) {
if !isUint16SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint16ToUint64FuncCalibrated(supplier) {
panic("Uint16ToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// Uint32ToUint64Func benchmarks a function with the signature:
// func(uint32) uint64
// ID: B-11-10
func Uint32ToUint64Func(b *testing.B, supplier func() uint32, toUint64Func func(uint32) uint64) {
if !isUint32SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint32ToUint64FuncCalibrated(supplier) {
panic("Uint32ToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
}
// Uint64ToUint64Func benchmarks a function with the signature:
// func(uint64) uint64
// ID: B-11-11
func Uint64ToUint64Func(b *testing.B, supplier func() uint64, toUint64Func func(uint64) uint64) {
if !isUint64SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint64ToUint64FuncCalibrated(supplier) {
panic("Uint64ToUint64Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toUint64Func(supplier())
}
} | common/benchmark/11_to_uint64_func.go | 0.713931 | 0.726644 | 11_to_uint64_func.go | starcoder |
package test_persistence
import (
"testing"
cdata "github.com/pip-services3-go/pip-services3-commons-go/data"
data1 "github.com/pip-templates-services/pip-service-data-go/data/version1"
persist "github.com/pip-templates-services/pip-service-data-go/persistence"
"github.com/stretchr/testify/assert"
)
type EntitiesPersistenceFixture struct {
Entity1 data1.EntityV1
Entity2 data1.EntityV1
Entity3 data1.EntityV1
persistence persist.IEntitiesPersistence
}
func NewEntitiesPersistenceFixture(persistence persist.IEntitiesPersistence) *EntitiesPersistenceFixture {
c := EntitiesPersistenceFixture{}
c.Entity1 = data1.EntityV1{
Id: "1",
Name: "00001",
Type: data1.Type1,
SiteId: "1",
Content: "ABC",
}
c.Entity2 = data1.EntityV1{
Id: "2",
Name: "00002",
Type: data1.Type2,
SiteId: "1",
Content: "XYZ",
}
c.Entity3 = data1.EntityV1{
Id: "3",
Name: "00003",
Type: data1.Type3,
SiteId: "2",
Content: "DEF",
}
c.persistence = persistence
return &c
}
func (c *EntitiesPersistenceFixture) testCreateEntities(t *testing.T) {
// Create the first entity
entity, err := c.persistence.Create("", &c.Entity1)
assert.Nil(t, err)
assert.NotNil(t, entity)
assert.Equal(t, c.Entity1.Name, entity.Name)
assert.Equal(t, c.Entity1.SiteId, entity.SiteId)
assert.Equal(t, c.Entity1.Type, entity.Type)
assert.Equal(t, c.Entity1.Content, entity.Content)
// Create the second entity
entity, err = c.persistence.Create("", &c.Entity2)
assert.Nil(t, err)
assert.NotNil(t, entity)
assert.Equal(t, c.Entity2.Name, entity.Name)
assert.Equal(t, c.Entity2.SiteId, entity.SiteId)
assert.Equal(t, c.Entity2.Type, entity.Type)
assert.Equal(t, c.Entity2.Content, entity.Content)
// Create the third entity
entity, err = c.persistence.Create("", &c.Entity3)
assert.Nil(t, err)
assert.NotNil(t, entity)
assert.Equal(t, c.Entity3.Name, entity.Name)
assert.Equal(t, c.Entity3.SiteId, entity.SiteId)
assert.Equal(t, c.Entity3.Type, entity.Type)
assert.Equal(t, c.Entity3.Content, entity.Content)
}
func (c *EntitiesPersistenceFixture) TestCrudOperations(t *testing.T) {
var entity1 data1.EntityV1
// Create items
c.testCreateEntities(t)
// Get all entities
page, err := c.persistence.GetPageByFilter("", cdata.NewEmptyFilterParams(), cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.NotNil(t, page)
assert.Len(t, page.Data, 3)
entity1 = *page.Data[0]
// Update the entity
entity1.Content = "ABC"
entity, err := c.persistence.Update("", &entity1)
assert.Nil(t, err)
assert.NotNil(t, entity)
assert.Equal(t, entity1.Id, entity.Id)
assert.Equal(t, "ABC", entity.Content)
// Get entity by udi
entity, err = c.persistence.GetOneByName("", entity1.Name)
assert.Nil(t, err)
assert.NotNil(t, entity)
assert.Equal(t, entity1.Id, entity.Id)
// Delete the entity
entity, err = c.persistence.DeleteById("", entity1.Id)
assert.Nil(t, err)
assert.NotNil(t, entity)
assert.Equal(t, entity1.Id, entity.Id)
// Try to get deleted entity
entity, err = c.persistence.GetOneById("", entity1.Id)
assert.Nil(t, err)
assert.Nil(t, entity)
}
func (c *EntitiesPersistenceFixture) TestGetWithFilters(t *testing.T) {
// Create items
c.testCreateEntities(t)
// Filter by id
page, err := c.persistence.GetPageByFilter("",
cdata.NewFilterParamsFromTuples(
"id", "1",
),
cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.Len(t, page.Data, 1)
// Filter by udi
page, err = c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"name", "00002",
),
cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.Len(t, page.Data, 1)
// Filter by udis
page, err = c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"names", "00001,00003",
),
cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.Len(t, page.Data, 2)
// Filter by site_id
page, err = c.persistence.GetPageByFilter(
"",
cdata.NewFilterParamsFromTuples(
"site_id", "1",
),
cdata.NewEmptyPagingParams())
assert.Nil(t, err)
assert.Len(t, page.Data, 2)
} | test/persistence/EntitiesPersistenceFixture.go | 0.639511 | 0.547706 | EntitiesPersistenceFixture.go | starcoder |
package tetra3d
import "math"
type Quaternion struct {
X, Y, Z, W float64
}
func NewQuaternion(x, y, z, w float64) *Quaternion {
return &Quaternion{x, y, z, w}
}
func (quat *Quaternion) Clone() *Quaternion {
return NewQuaternion(quat.X, quat.Y, quat.Z, quat.W)
}
// func (quat *Quaternion) Slerp(other *Quaternion, percent float64) *Quaternion {
// if percent <= 0 {
// return quat.Clone()
// } else if percent >= 1 {
// return other.Clone()
// }
// newQuat := quat.Clone()
// angle := quat.Dot(other)
// if math.Abs(angle) >= 1 {
// return newQuat
// }
// sinHalfTheta := math.Sqrt(1 - angle*angle)
// halfTheta := math.Atan2(sinHalfTheta, angle)
// if angle < 0 {
// newQuat.W = -other.W
// newQuat.X = -other.X
// newQuat.Y = -other.Y
// newQuat.Z = -other.Z
// }
// if angle >= 1 {
// return quat.Clone()
// }
// ratioA := math.Sin((1-percent)*halfTheta) / sinHalfTheta
// ratioB := math.Sin(percent*halfTheta) / sinHalfTheta
// newQuat.W = quat.W*ratioA + other.W*ratioB
// newQuat.X = quat.X*ratioA + other.X*ratioB
// newQuat.Y = quat.Y*ratioA + other.Y*ratioB
// newQuat.Z = quat.Z*ratioA + other.Z*ratioB
// return newQuat
// }
func (quat *Quaternion) Lerp(end *Quaternion, percent float64) *Quaternion {
if percent <= 0 {
return quat.Clone()
} else if percent >= 1 {
return end.Clone()
}
if quat.Dot(end) < 0 {
end = end.Negated()
}
x := quat.X - percent*(quat.X-end.X)
y := quat.Y - percent*(quat.Y-end.Y)
z := quat.Z - percent*(quat.Z-end.Z)
w := quat.W - percent*(quat.W-end.W)
return NewQuaternion(x, y, z, w)
}
func (quat *Quaternion) Dot(other *Quaternion) float64 {
return quat.X*other.X + quat.Y*other.Y + quat.Z*other.Z + quat.W*other.W
}
func (quat *Quaternion) Magnitude() float64 {
return math.Sqrt(
(quat.X * quat.X) +
(quat.Y * quat.Y) +
(quat.Z * quat.Z) +
(quat.W * quat.W),
)
}
func (quat *Quaternion) Normalized() *Quaternion {
newQuat := quat.Clone()
m := newQuat.Magnitude()
newQuat.X /= m
newQuat.Y /= m
newQuat.Z /= m
newQuat.W /= m
return newQuat
}
func (quat *Quaternion) Negated() *Quaternion {
return NewQuaternion(-quat.X, -quat.Y, -quat.Z, -quat.W)
} | quaternion.go | 0.552298 | 0.422564 | quaternion.go | starcoder |
// Package action provides the interface and utilities for funnctions which
// takes a context and returns an error on failure.
package action
import (
"context"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
// Action is a function that takes a context and returns an error.
type Action = func(context.Context) error
// Named gives a name to an action. It logs when an action starts,
// and if the action fails, tells you the name of the failing action.
func Named(name string, fn Action) Action {
return func(ctx context.Context) error {
testing.ContextLogf(ctx, "Start action %s", name)
if err := fn(ctx); err != nil {
return errors.Wrapf(err, "failed action %s", name)
}
return nil
}
}
// Combine combines a list of functions from Context to error into one function.
// Combine adds the name of the operation into the error message to clarify the step.
// It is recommended to start the name of operations with a verb, e.g.,
// "open Downloads and right click a folder"
// Then the failure msg would be like:
// "failed to open Downloads and right click a folder on step ..."
func Combine(name string, steps ...Action) Action {
return func(ctx context.Context) error {
for i, f := range steps {
if err := f(ctx); err != nil {
return errors.Wrapf(err, "failed to %s on step %d", name, i+1)
}
}
return nil
}
}
// Retry returns a function that retries a given action if it returns error.
// The action will be executed up to n times, including the first attempt.
// The last error will be returned. Any other errors will be silently logged.
// Between each run of the loop, it will sleep according the specified interval.
func Retry(n int, action Action, interval time.Duration) Action {
return retryWithLogging(n, action, interval, true)
}
// RetrySilently returns a function that retries a given action if it returns error.
// The action will be executed up to n times, including the first attempt.
// The last error will be returned. Any other errors will be ignored.
// Between each run of the loop, it will sleep according the specified interval.
func RetrySilently(n int, action Action, interval time.Duration) Action {
return retryWithLogging(n, action, interval, false)
}
func retryWithLogging(n int, action Action, interval time.Duration, verboseLog bool) Action {
return func(ctx context.Context) error {
var err error
for i := 0; i < n; i++ {
if err = action(ctx); err == nil {
// Print a success log to clear confusing.
// Retry logs are sometimes mistaken as errors.
if i > 0 && verboseLog {
testing.ContextLogf(ctx, "Retry succeed in attempt %d", i+1)
}
return nil
}
if verboseLog {
testing.ContextLogf(ctx, "Retry failed attempt %d: %v", i+1, err)
}
// Sleep between all iterations.
if i < n-1 {
if err := testing.Sleep(ctx, interval); err != nil && verboseLog {
testing.ContextLog(ctx, "Failed to sleep between retry iterations: ", err)
}
}
}
return err
}
}
// IfSuccessThen returns a function that runs action only if the first function succeeds.
// The function returns an error only if the preFunc succeeds but action fails,
// It returns nil in all other situations.
// Example:
// dialog := nodewith.Name("Dialog").Role(role.Dialog)
// button := nodewith.Name("Ok").Role(role.Button).Ancestor(dialog)
// ui := uiauto.New(tconn)
// if err := action.IfSuccessThen(ui.Withtimeout(5*time.Second).WaitUntilExists(dialog), ui.LeftClick(button))(ctx); err != nil {
// ...
// }
func IfSuccessThen(preFunc, action Action) Action {
return func(ctx context.Context) error {
if err := preFunc(ctx); err == nil {
if err := action(ctx); err != nil {
return err
}
} else {
testing.ContextLogf(ctx, "The prefunc failed, the action was not executed, the error was: %s", err)
}
return nil
}
}
// Sleep returns a function that sleeps for the specified duration.
func Sleep(duration time.Duration) Action {
return func(ctx context.Context) error {
return testing.Sleep(ctx, duration)
}
} | src/chromiumos/tast/common/action/action.go | 0.727589 | 0.439567 | action.go | starcoder |
// Package day11 solves AoC 2020 day 11.
package day11
import (
"github.com/fis/aoc/glue"
"github.com/fis/aoc/util"
)
func init() {
glue.RegisterSolver(2020, 11, glue.LevelSolver{Solver: solve, Empty: '.'})
}
func solve(level *util.Level) ([]string, error) {
fp1 := fixedPoint(level, nearMap, 4)
fp2 := fixedPoint(level, farMap, 5)
return glue.Ints(fp1, fp2), nil
}
func fixedPoint(level *util.Level, mapper func(*util.Level) [][8]int, tolerance int) (occupied int) {
neigh := mapper(level)
cur := make([]bool, len(neigh))
next := make([]bool, len(neigh))
for simulate(cur, next, neigh, tolerance) {
cur, next = next, cur
}
for _, seat := range next {
if seat {
occupied++
}
}
return occupied
}
func nearMap(level *util.Level) [][8]int {
offsets := make(map[util.P]int)
level.Range(func(x, y int, _ byte) {
offsets[util.P{x, y}] = len(offsets)
})
neigh := make([][8]int, len(offsets))
level.Range(func(x, y int, _ byte) {
p := util.P{x, y}
n, ni := &neigh[offsets[p]], 0
for _, np := range p.Neigh8() {
if level.At(np.X, np.Y) == 'L' {
n[ni] = offsets[np]
ni++
}
}
for ; ni < 8; ni++ {
n[ni] = -1
}
})
return neigh
}
func farMap(level *util.Level) [][8]int {
offsets := make(map[util.P]int)
level.Range(func(x, y int, _ byte) {
offsets[util.P{x, y}] = len(offsets)
})
neigh := make([][8]int, len(offsets))
level.Range(func(x, y int, _ byte) {
n, ni := &neigh[offsets[util.P{x, y}]], 0
for _, d := range (util.P{0, 0}).Neigh8() {
for i := 1; level.InBounds(x+i*d.X, y+i*d.Y); i++ {
if level.At(x+i*d.X, y+i*d.Y) == 'L' {
n[ni] = offsets[util.P{x + i*d.X, y + i*d.Y}]
ni++
break
}
}
}
for ; ni < 8; ni++ {
n[ni] = -1
}
})
return neigh
}
func simulate(in, out []bool, neigh [][8]int, tolerance int) (changed bool) {
for i, ni := range neigh {
nc := 0
for j := 0; j < 8 && ni[j] >= 0; j++ {
if in[ni[j]] {
nc++
}
}
c := in[i]
if !c && nc == 0 {
c = true
changed = true
} else if c && nc >= tolerance {
c = false
changed = true
}
out[i] = c
}
return changed
} | 2020/day11/day11.go | 0.548915 | 0.401805 | day11.go | starcoder |
package model
import (
"encoding/json"
"errors"
"sort"
"strings"
"time"
)
func ElementInArray(element interface{}, array []interface{}) bool {
for _, comparison := range array {
if comparison == element {
return true
}
}
return false
}
/*
Removes an element form an array. If the array was ordered before, it will loose that order.
*/
func RemoveElementFrom2D(array [][]interface{}, index int) [][]interface{} {
array[len(array)-1], array[index] = array[index], array[len(array)-1]
return array[:len(array)-1]
}
// Sorts a 2D array by the specified column index in the specified direction. Panics if index is out of bounds.
// Allowed direction values are Desc and Asc. Supported types are nil, time.Time, string, json.Number and bool.
// Errors are given if direction or type are unknown.
func Sort2D(array [][]interface{}, index int, direction Direction) error {
if direction != Desc && direction != Asc {
return errors.New("unknown direction")
}
errFlag := false
sort.Slice(array, func(i, j int) bool {
if errFlag {
return false
}
if array[i][index] == nil && array[j][index] == nil {
return true
}
if array[i][index] == nil {
return direction == Asc
}
if array[j][index] == nil {
return direction == Desc
}
_, ok := array[i][index].(time.Time)
if ok {
if direction == Desc {
return array[i][index].(time.Time).After(array[j][index].(time.Time))
} else {
return array[i][index].(time.Time).Before(array[j][index].(time.Time))
}
}
_, ok = array[i][index].(string)
if ok {
if direction == Desc {
return strings.Compare(array[i][index].(string), array[j][index].(string)) > 0
} else {
return strings.Compare(array[i][index].(string), array[j][index].(string)) < 0
}
}
_, ok = array[i][index].(json.Number)
if ok {
valI, err := array[i][index].(json.Number).Float64()
if err != nil {
errFlag = true
return false
}
valJ, err := array[j][index].(json.Number).Float64()
if err != nil {
errFlag = true
return false
}
if direction == Desc {
return valI > valJ
} else {
return valI < valJ
}
}
_, ok = array[i][index].(bool)
if ok {
if array[i][index].(bool) == array[j][index].(bool) {
return true
}
if direction == Desc {
return array[i][index].(bool)
} else {
return !array[i][index].(bool)
}
}
errFlag = true
return false
})
if errFlag {
return errors.New("slice could not be sorted")
}
return nil
} | pkg/api/model/util.go | 0.550366 | 0.457258 | util.go | starcoder |
package predicate
import (
"fmt"
"strings"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxql"
)
// a fixed buffer ring
type buffer [3]struct {
tok influxql.Token // last read token
pos influxql.Pos // last read pos
lit string // last read literal
}
// parser of the predicate will connvert
// such a statement `(a = "a" or b!="b") and c ! =~/efg/`
// to the predicate node
type parser struct {
sc *influxql.Scanner
i int // buffer index
n int // buffer size
openParen int
buf buffer
}
// scan returns the next token from the underlying scanner.
// If a token has been unscanned then read that instead.
func (p *parser) scan() (tok influxql.Token, pos influxql.Pos, lit string) {
// If we have a token on the buffer, then return it.
if p.n > 0 {
p.n--
return p.curr()
}
// Move buffer position forward and save the token.
p.i = (p.i + 1) % len(p.buf)
buf := &p.buf[p.i]
buf.tok, buf.pos, buf.lit = p.sc.Scan()
return p.curr()
}
func (p *parser) unscan() {
p.n++
}
// curr returns the last read token.
func (p *parser) curr() (tok influxql.Token, pos influxql.Pos, lit string) {
buf := &p.buf[(p.i-p.n+len(p.buf))%len(p.buf)]
return buf.tok, buf.pos, buf.lit
}
// scanIgnoreWhitespace scans the next non-whitespace token.
func (p *parser) scanIgnoreWhitespace() (tok influxql.Token, pos influxql.Pos, lit string) {
tok, pos, lit = p.scan()
if tok == influxql.WS {
tok, pos, lit = p.scan()
}
return
}
// Parse the predicate statement.
func Parse(sts string) (n Node, err error) {
if sts == "" {
return nil, nil
}
p := new(parser)
p.sc = influxql.NewScanner(strings.NewReader(sts))
return p.parseLogicalNode()
}
func (p *parser) parseLogicalNode() (Node, error) {
n := new(LogicalNode)
for {
tok, pos, _ := p.scanIgnoreWhitespace()
switch tok {
case influxql.NUMBER:
fallthrough
case influxql.INTEGER:
fallthrough
case influxql.NAME:
fallthrough
case influxql.IDENT:
p.unscan()
tr, err := p.parseTagRuleNode()
if err != nil {
return *n, err
}
if n.Children[0] == nil {
n.Children[0] = tr
} else {
n.Children[1] = tr
}
case influxql.AND:
n.Operator = LogicalAnd
if n.Children[1] == nil {
continue
}
var n1 Node
var err error
if tokNext := p.peekTok(); tokNext == influxql.LPAREN {
n1, err = p.parseLogicalNode()
} else {
n1, err = p.parseTagRuleNode()
}
if err != nil {
return *n, err
}
n = &LogicalNode{
Children: [2]Node{*n, n1},
Operator: LogicalAnd,
}
case influxql.OR:
return *n, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: fmt.Sprintf("the logical operator OR is not supported yet at position %d", pos.Char),
}
case influxql.LPAREN:
p.openParen++
currParen := p.openParen
n1, err := p.parseLogicalNode()
if err != nil {
return *n, err
}
if p.openParen != currParen-1 {
return *n, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: fmt.Sprintf("extra ( seen"),
}
}
if n.Children[0] == nil {
n.Children[0] = n1
} else {
n.Children[1] = n1
}
case influxql.RPAREN:
p.openParen--
fallthrough
case influxql.EOF:
if p.openParen < 0 {
return *n, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: fmt.Sprintf("extra ) seen"),
}
}
if n.Children[1] == nil {
return n.Children[0], nil
}
return *n, nil
default:
return *n, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: fmt.Sprintf("bad logical expression, at position %d", pos.Char),
}
}
}
}
func (p *parser) parseTagRuleNode() (TagRuleNode, error) {
n := new(TagRuleNode)
// scan the key
tok, pos, lit := p.scanIgnoreWhitespace()
switch tok {
case influxql.IDENT:
n.Key = lit
case influxql.NAME:
n.Key = "name"
default:
return *n, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: fmt.Sprintf("bad tag key, at position %d", pos.Char),
}
}
tok, pos, _ = p.scanIgnoreWhitespace()
switch tok {
case influxql.EQ:
n.Operator = influxdb.Equal
goto scanRegularTagValue
case influxql.NEQ:
fallthrough
case influxql.EQREGEX:
fallthrough
case influxql.NEQREGEX:
return *n, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: fmt.Sprintf("operator: %q at position: %d is not supported yet", tok.String(), pos.Char),
}
default:
return *n, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: fmt.Sprintf("invalid operator %q at position: %d", tok.String(), pos.Char),
}
}
// scan the value
scanRegularTagValue:
tok, pos, lit = p.scanIgnoreWhitespace()
switch tok {
case influxql.SUB:
n.Value = "-"
goto scanRegularTagValue
case influxql.IDENT:
fallthrough
case influxql.DURATIONVAL:
fallthrough
case influxql.NUMBER:
fallthrough
case influxql.INTEGER:
n.Value += lit
return *n, nil
case influxql.TRUE:
n.Value = "true"
return *n, nil
case influxql.FALSE:
n.Value = "false"
return *n, nil
default:
return *n, &influxdb.Error{
Code: influxdb.EInvalid,
Msg: fmt.Sprintf("bad tag value: %q, at position %d", lit, pos.Char),
}
}
}
// peekRune returns the next rune that would be read by the scanner.
func (p *parser) peekTok() influxql.Token {
tok, _, _ := p.scanIgnoreWhitespace()
if tok != influxql.EOF {
p.unscan()
}
return tok
} | predicate/parser.go | 0.635222 | 0.403626 | parser.go | starcoder |
package dl
import "fmt"
// TimeOffset is the elapsed time. TimeOffset is measured in milliseconds
type TimeOffset int64
// Format satisfies interface fmt.Formatter
func (time TimeOffset) Format(f fmt.State, c rune) { formatUnit(f, c, "ms", time, int64(time)) }
// Speed is the vehicle speed. Speed is measured in meters per second
type Speed float64
// Format satisfies interface fmt.Formatter
func (speed Speed) Format(f fmt.State, c rune) { formatUnit(f, c, "m/s", speed, float64(speed)) }
// SpeedAccuracy is the accuracy of the speed measurement. SpeedAccuracy is measured in millimeters per second
type SpeedAccuracy int
// Format satisfies interface fmt.Formatter
func (accuracy SpeedAccuracy) Format(f fmt.State, c rune) {
formatUnit(f, c, "mm/s", accuracy, int(accuracy))
}
// Coordinate is a single point on either the lattitudinal or longitudinal axis. Coordinate is measured in degrees
type Coordinate float64
// Format satisfies interface fmt.Formatter
func (coordinate Coordinate) Format(f fmt.State, c rune) {
formatUnit(f, c, "°", coordinate, float64(coordinate))
}
// GPSAccuracy is the accuracy of the GPS coordinates. GPSAccuracy is measured in millimeters
type GPSAccuracy int
// Format satisfies interface fmt.Formatter
func (accuracy GPSAccuracy) Format(f fmt.State, c rune) {
formatUnit(f, c, "mm", accuracy, int(accuracy))
}
// Heading is the direction something is headed. In the case of course information the heading is the direction the vehicle is moving. For lap markers, the heading is direction the marker is pointing. Heading is measured in degrees
type Heading float64
// Format satisfies interface fmt.Formatter
func (heading Heading) Format(f fmt.State, c rune) { formatUnit(f, c, "°", heading, float64(heading)) }
// HeadingAccuracy is the accuracy of the GPS heading. HeadingAccuracy is measured in degrees
type HeadingAccuracy float64
// Format satisfies interface fmt.Formatter
func (accuracy HeadingAccuracy) Format(f fmt.State, c rune) {
formatUnit(f, c, "°", accuracy, float64(accuracy))
}
// Acceleration is the acceleration of the vehicle in a given direction. Acceleration is measured in standard gravity
type Acceleration float64
// Format satisfies interface fmt.Formatter
func (acceleration Acceleration) Format(f fmt.State, c rune) {
formatUnit(f, c, "G", acceleration, float64(acceleration))
}
// GPSTime is the number of milliseconds since midnight between Saturday and Sunday. GPSTime is measured in milliseconds
type GPSTime uint32
// Format satisfies interface fmt.Formatter
func (gpsTime GPSTime) Format(f fmt.State, c rune) { formatUnit(f, c, "ms", gpsTime, uint32(gpsTime)) }
// Voltage is a measurement sampled from the anlog inputs of the data logger. Voltage is measured in millivolts
type Voltage int
// Format satisfies interface fmt.Formatter
func (voltage Voltage) Format(f fmt.State, c rune) { formatUnit(f, c, "mV", voltage, int(voltage)) }
// Frequency is a measurement sampled from the frequency inputs of the data logger. Frequency is measured in hertz
type Frequency float64
// Format satisfies interface fmt.Formatter
func (freq Frequency) Format(f fmt.State, c rune) { formatUnit(f, c, "hz", freq, float64(freq)) }
// Altitude is the height above sea level as measured by GPS. Altitude is measured in millimeters
type Altitude int
// Format satisfies interface fmt.Formatter
func (altitude Altitude) Format(f fmt.State, c rune) { formatUnit(f, c, "mm", altitude, int(altitude)) }
// AltitudeAccuracy is the accuracy of the altitude measurement. AltitudeAccuracy is measured in millimeters
type AltitudeAccuracy int
// Format satisfies interface fmt.Formatter
func (accuracy AltitudeAccuracy) Format(f fmt.State, c rune) {
formatUnit(f, c, "mm", accuracy, int(accuracy))
} | unit_types.go | 0.856647 | 0.73065 | unit_types.go | starcoder |
package rgass
import (
"errors"
)
// Model represents all nodes in the RGASS
type Model struct {
head *Node // A sentinel head node
tail *Node // A sentinel tail node
table map[ID]*Node // A map of node IDs to nodes
}
// NewModel creates a new Model
func NewModel() Model {
m := Model{table: make(map[ID]*Node)}
head := &Node{Sentinel: true}
tail := &Node{Sentinel: true}
m.table[head.ID] = head
head.Next = tail
tail.Prev = head
m.head = head
m.tail = tail
return m
}
// Get returns the node associated with the given ID in the model
func (m *Model) Get(id ID) (*Node, bool) {
node, ok := m.table[id]
return node, ok
}
// Head returns the sentinel head node in the model
func (m *Model) Head() *Node {
return m.head
}
// FindNode finds a node given a root node ID and an offset (Algorithm 5, pp4)
func (m *Model) FindNode(tarID ID, pos int) (tarNode *Node, err error) {
tarNode, ok := m.Get(tarID)
if !ok {
return tarNode, errors.New("Target node not in model")
}
if pos > tarNode.Length() {
return tarNode, errors.New("Position outside of target node")
}
for tarNode.Split {
if pos <= tarNode.List[0].Length() {
tarNode = tarNode.List[0]
} else if pos <= tarNode.List[1].ID.Offset+tarNode.List[1].Length() {
pos -= tarNode.List[0].Length()
tarNode = tarNode.List[1]
} else if tarNode.List[2] != nil {
pos -= (tarNode.List[0].Length() + tarNode.List[1].Length())
tarNode = tarNode.List[2]
}
}
return tarNode, err
}
// InsertAfter inserts the given node (totally ordered by ID)
func (m *Model) InsertAfter(tarNode *Node, newNodes ...*Node) error {
if _, ok := m.Get(tarNode.ID); !ok {
return errors.New("Target node not in model")
}
for _, newNode := range newNodes {
if _, ok := m.Get(newNode.ID); ok {
return errors.New("Node already in model")
}
m.table[newNode.ID] = newNode
for nextNode := tarNode.Next; nextNode != m.tail; nextNode = nextNode.Next {
if newNode.ID.Compare(nextNode.ID) == -1 {
tarNode = nextNode
} else {
break
}
}
linkAfter(tarNode, newNode)
tarNode = newNode
}
return nil
}
// Replace replaces a node with new nodes
func (m *Model) Replace(tarNode *Node, newNodes ...*Node) error {
if _, ok := m.Get(tarNode.ID); !ok {
return errors.New("Target node not in model")
}
firstNewNode := newNodes[0]
if firstNewNode == nil {
return nil
}
if _, ok := m.Get(firstNewNode.ID); ok {
return errors.New("Node already in model")
}
m.table[firstNewNode.ID] = firstNewNode
linkAfter(tarNode, firstNewNode)
tarNode = firstNewNode
for _, newNode := range newNodes[1:] {
m.table[newNode.ID] = newNode
linkAfter(tarNode, newNode)
tarNode = newNode
}
return nil
}
// Iter iterates over all nodes in the model
func (m *Model) Iter() <-chan *Node {
ch := make(chan *Node)
go func() {
for node := m.head; node != m.tail; node = node.Next {
ch <- node
}
close(ch)
}()
return ch
}
func linkAfter(tarNode *Node, newNode *Node) {
newNode.Next = tarNode.Next
newNode.Prev = tarNode
newNode.Next.Prev = newNode
tarNode.Next = newNode
} | rgass/model.go | 0.704973 | 0.476275 | model.go | starcoder |
package node
import (
"sort"
"github.com/insolar/insolar/insolar"
)
type Accessor struct {
snapshot *Snapshot
refIndex map[insolar.Reference]insolar.NetworkNode
sidIndex map[insolar.ShortNodeID]insolar.NetworkNode
addrIndex map[string]insolar.NetworkNode
roleIndex map[insolar.StaticRole]*refSet
// should be removed in future
active []insolar.NetworkNode
}
func (a *Accessor) GetActiveNodeByShortID(shortID insolar.ShortNodeID) insolar.NetworkNode {
return a.sidIndex[shortID]
}
func (a *Accessor) GetActiveNodeByAddr(address string) insolar.NetworkNode {
return a.addrIndex[address]
}
func (a *Accessor) GetActiveNodes() []insolar.NetworkNode {
result := make([]insolar.NetworkNode, len(a.active))
copy(result, a.active)
return result
}
func (a *Accessor) GetActiveNode(ref insolar.Reference) insolar.NetworkNode {
return a.refIndex[ref]
}
func (a *Accessor) GetWorkingNode(ref insolar.Reference) insolar.NetworkNode {
node := a.GetActiveNode(ref)
if node == nil || node.GetPower() == 0 {
return nil
}
return node
}
func (a *Accessor) GetWorkingNodes() []insolar.NetworkNode {
workingList := a.snapshot.nodeList[ListWorking]
result := make([]insolar.NetworkNode, len(workingList))
copy(result, workingList)
sort.Slice(result, func(i, j int) bool {
return result[i].ID().Compare(result[j].ID()) < 0
})
return result
}
func GetSnapshotActiveNodes(snapshot *Snapshot) []insolar.NetworkNode {
joining := snapshot.nodeList[ListJoiner]
idle := snapshot.nodeList[ListIdle]
working := snapshot.nodeList[ListWorking]
leaving := snapshot.nodeList[ListLeaving]
joinersCount := len(joining)
idlersCount := len(idle)
workingCount := len(working)
leavingCount := len(leaving)
result := make([]insolar.NetworkNode, joinersCount+idlersCount+workingCount+leavingCount)
copy(result[:joinersCount], joining)
copy(result[joinersCount:joinersCount+idlersCount], idle)
copy(result[joinersCount+idlersCount:joinersCount+idlersCount+workingCount], working)
copy(result[joinersCount+idlersCount+workingCount:], leaving)
return result
}
func (a *Accessor) addToIndex(node insolar.NetworkNode) {
a.refIndex[node.ID()] = node
a.sidIndex[node.ShortID()] = node
a.addrIndex[node.Address()] = node
if node.GetPower() == 0 {
return
}
list, ok := a.roleIndex[node.Role()]
if !ok {
list = newRefSet()
}
list.Add(node.ID())
a.roleIndex[node.Role()] = list
}
func NewAccessor(snapshot *Snapshot) *Accessor {
result := &Accessor{
snapshot: snapshot,
refIndex: make(map[insolar.Reference]insolar.NetworkNode),
sidIndex: make(map[insolar.ShortNodeID]insolar.NetworkNode),
roleIndex: make(map[insolar.StaticRole]*refSet),
addrIndex: make(map[string]insolar.NetworkNode),
}
result.active = GetSnapshotActiveNodes(snapshot)
for _, node := range result.active {
result.addToIndex(node)
}
return result
} | network/node/accessor.go | 0.550124 | 0.421314 | accessor.go | starcoder |
package env
import (
"os"
"strconv"
)
// Get parses an string from the environment variable key parameter. If the environment
// variable is empty, the defaultValue parameter is returned.
func Get(key string, defaultValue string) string {
r := os.Getenv(key)
if r == "" {
return defaultValue
}
return r
}
// GetInt parses an int from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetInt(key string, defaultValue int) int {
r := os.Getenv(key)
i, err := strconv.Atoi(r)
if err != nil {
return defaultValue
}
return i
}
// GetInt8 parses an int8 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetInt8(key string, defaultValue int8) int8 {
r := os.Getenv(key)
i, err := strconv.ParseInt(r, 10, 8)
if err != nil {
return defaultValue
}
return int8(i)
}
// GetInt16 parses an int16 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetInt16(key string, defaultValue int16) int16 {
r := os.Getenv(key)
i, err := strconv.ParseInt(r, 10, 16)
if err != nil {
return defaultValue
}
return int16(i)
}
// GetInt32 parses an int32 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetInt32(key string, defaultValue int32) int32 {
r := os.Getenv(key)
i, err := strconv.ParseInt(r, 10, 32)
if err != nil {
return defaultValue
}
return int32(i)
}
// GetInt64 parses an int64 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetInt64(key string, defaultValue int64) int64 {
r := os.Getenv(key)
i, err := strconv.ParseInt(r, 10, 64)
if err != nil {
return defaultValue
}
return i
}
// GetUInt parses a uint from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetUInt(key string, defaultValue uint) uint {
r := os.Getenv(key)
i, err := strconv.ParseUint(r, 10, 32)
if err != nil {
return defaultValue
}
return uint(i)
}
// GetUInt8 parses a uint8 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetUInt8(key string, defaultValue uint8) uint8 {
r := os.Getenv(key)
i, err := strconv.ParseUint(r, 10, 8)
if err != nil {
return defaultValue
}
return uint8(i)
}
// GetUInt16 parses a uint16 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetUInt16(key string, defaultValue uint16) uint16 {
r := os.Getenv(key)
i, err := strconv.ParseUint(r, 10, 16)
if err != nil {
return defaultValue
}
return uint16(i)
}
// GetUInt32 parses a uint32 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetUInt32(key string, defaultValue uint32) uint32 {
r := os.Getenv(key)
i, err := strconv.ParseUint(r, 10, 32)
if err != nil {
return defaultValue
}
return uint32(i)
}
// GetUInt64 parses a uint64 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetUInt64(key string, defaultValue uint64) uint64 {
r := os.Getenv(key)
i, err := strconv.ParseUint(r, 10, 64)
if err != nil {
return defaultValue
}
return uint64(i)
}
// GetFloat32 parses a float32 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetFloat32(key string, defaultValue float32) float32 {
r := os.Getenv(key)
f, err := strconv.ParseFloat(r, 32)
if err != nil {
return defaultValue
}
return float32(f)
}
// GetFloat64 parses a float64 from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetFloat64(key string, defaultValue float64) float64 {
r := os.Getenv(key)
f, err := strconv.ParseFloat(r, 64)
if err != nil {
return defaultValue
}
return f
}
// GetBool parses a bool from the environment variable key parameter. If the environment
// variable is empty or fails to parse, the defaultValue parameter is returned.
func GetBool(key string, defaultValue bool) bool {
r := os.Getenv(key)
b, err := strconv.ParseBool(r)
if err != nil {
return defaultValue
}
return b
}
// Set sets the environment variable for the key provided using the value provided.
func Set(key string, value string) error {
return os.Setenv(key, value)
}
// SetInt sets the environment variable to a string formatted int value
func SetInt(key string, value int) error {
return os.Setenv(key, strconv.Itoa(value))
}
// SetInt8 sets the environment variable to a string formatted int8 value.
func SetInt8(key string, value int8) error {
return os.Setenv(key, strconv.FormatInt(int64(value), 10))
}
// SetInt16 sets the environment variable to a string formatted int16 value.
func SetInt16(key string, value int16) error {
return os.Setenv(key, strconv.FormatInt(int64(value), 10))
}
// SetInt32 sets the environment variable to a string formatted int32 value.
func SetInt32(key string, value int32) error {
return os.Setenv(key, strconv.FormatInt(int64(value), 10))
}
// SetInt64 sets the environment variable to a string formatted int64 value.
func SetInt64(key string, value int64) error {
return os.Setenv(key, strconv.FormatInt(value, 10))
}
// SetUInt sets the environment variable to a string formatted uint value
func SetUInt(key string, value uint) error {
return os.Setenv(key, strconv.FormatUint(uint64(value), 10))
}
// SetUInt8 sets the environment variable to a string formatted uint8 value
func SetUInt8(key string, value uint8) error {
return os.Setenv(key, strconv.FormatUint(uint64(value), 10))
}
// SetUInt16 sets the environment variable to a string formatted uint16 value
func SetUInt16(key string, value uint16) error {
return os.Setenv(key, strconv.FormatUint(uint64(value), 10))
}
// SetUInt32 sets the environment variable to a string formatted uint32 value
func SetUInt32(key string, value uint32) error {
return os.Setenv(key, strconv.FormatUint(uint64(value), 10))
}
// SetUInt64 sets the environment variable to a string formatted uint64 value
func SetUInt64(key string, value uint64) error {
return os.Setenv(key, strconv.FormatUint(value, 10))
}
// SetBool sets the environment variable to a string formatted bool value.
func SetBool(key string, value bool) error {
return os.Setenv(key, strconv.FormatBool(value))
} | pkg/env/env.go | 0.721743 | 0.444022 | env.go | starcoder |
package starlarktruth
import (
"fmt"
"sort"
"strings"
"go.starlark.net/starlark"
"go.starlark.net/syntax"
)
type (
attr func(t *T, args ...starlark.Value) (starlark.Value, error)
attrs map[string]attr
)
var (
methods0args = attrs{
"contains_no_duplicates": containsNoDuplicates,
"in_order": inOrder,
"is_callable": isCallable,
"is_empty": isEmpty,
"is_false": isFalse,
"is_falsy": isFalsy,
"is_finite": isFinite,
"is_nan": isNaN,
"is_negative_infinity": isNegativeInfinity,
"is_non_zero": isNonZero,
"is_none": isNone,
"is_not_callable": isNotCallable,
"is_not_empty": isNotEmpty,
"is_not_finite": isNotFinite,
"is_not_nan": isNotNaN,
"is_not_negative_infinity": isNotNegativeInfinity,
"is_not_none": isNotNone,
"is_not_positive_infinity": isNotPositiveInfinity,
"is_ordered": isOrdered,
"is_positive_infinity": isPositiveInfinity,
"is_strictly_ordered": isStrictlyOrdered,
"is_true": isTrue,
"is_truthy": isTruthy,
"is_zero": isZero,
}
methods1arg = attrs{
"contains": contains,
"contains_all_in": containsAllIn,
"contains_any_in": containsAnyIn,
"contains_exactly_elements_in": containsExactlyElementsIn,
"contains_exactly_items_in": containsExactlyItemsIn,
"contains_key": containsKey,
"contains_match": containsMatch,
"contains_none_in": containsNoneIn,
"does_not_contain": doesNotContain,
"does_not_contain_key": doesNotContainKey,
"does_not_contain_match": doesNotContainMatch,
"does_not_have_attribute": doesNotHaveAttribute,
"does_not_match": doesNotMatch,
"ends_with": endsWith,
"has_attribute": hasAttribute,
"has_length": hasLength,
"has_size": hasSize,
"is_at_least": isAtLeast,
"is_at_most": isAtMost,
"is_equal_to": isEqualTo,
"is_greater_than": isGreaterThan,
"is_in": isIn,
"is_less_than": isLessThan,
"is_not_equal_to": isNotEqualTo,
"is_not_in": isNotIn,
"is_not_of_type": isNotOfType,
"is_not_within": isNotWithin,
"is_of_type": isOfType,
"is_ordered_according_to": isOrderedAccordingTo,
"is_strictly_ordered_according_to": isStrictlyOrderedAccordingTo,
"is_within": isWithin,
"matches": matches,
"named": named,
"of": of,
"starts_with": startsWith,
}
methods2args = attrs{
"contains_item": containsItem,
"does_not_contain_item": doesNotContainItem,
}
methodsNargs = attrs{
"contains_all_of": containsAllOf,
"contains_any_of": containsAnyOf,
"contains_exactly": containsExactly,
"contains_none_of": containsNoneOf,
"is_any_of": isAnyOf,
"is_none_of": isNoneOf,
}
methods = []attrs{
methodsNargs,
methods0args,
methods1arg,
methods2args,
}
attrNames = func() []string {
count := 0
for _, ms := range methods {
count += len(ms)
}
names := make([]string, 0, count)
for _, ms := range methods {
for name := range ms {
names = append(names, name)
}
}
sort.Strings(names)
return names
}()
)
func findAttr(name string) (attr, int) {
for i, ms := range methods[1:] {
if m, ok := ms[name]; ok {
return m, i
}
}
if m, ok := methodsNargs[name]; ok {
return m, -1
}
return nil, 0
}
// LocalThreadKeyForClose is used by Close() and internally to check subjects
// are eventually resolved.
var LocalThreadKeyForClose = Default
var εCallFrame = starlark.CallFrame{Pos: syntax.Position{Line: -1, Col: -1}}
// Close ensures that all created subjects were eventually resolved.
// Otherwise it returns an error pinpointing the UnresolvedError position.
// A subject is considered resolved what at least one proposition has been
// executed on it. An unresolved or dangling assertion is almost certainly a
// test author error.
func Close(th *starlark.Thread) (err error) {
if c, ok := th.Local(LocalThreadKeyForClose).(starlark.CallFrame); ok && c != εCallFrame {
err = UnresolvedError(c.Pos.String())
}
return
}
// Asserted returns whether all assert.that(x)... call chains were properly terminated
func Asserted(th *starlark.Thread) bool {
_, ok := th.Local(LocalThreadKeyForClose).(starlark.CallFrame)
return ok
}
func builtinAttr(t *T, name string) (starlark.Value, error) {
method, nArgs := findAttr(name)
if method == nil {
return nil, nil // no such method
}
impl := func(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if err := t.registerValues(thread); err != nil {
return nil, err
}
bName := b.Name()
var argz []starlark.Value
switch nArgs {
case -1:
if len(kwargs) > 0 {
return nil, fmt.Errorf("%s: unexpected keyword arguments", bName)
}
argz = []starlark.Value(args)
case 0:
if err := starlark.UnpackPositionalArgs(bName, args, kwargs, nArgs); err != nil {
return nil, err
}
case 1:
var arg1 starlark.Value
if err := starlark.UnpackPositionalArgs(bName, args, kwargs, nArgs, &arg1); err != nil {
return nil, err
}
argz = []starlark.Value{arg1}
case 2:
var arg1, arg2 starlark.Value
if err := starlark.UnpackPositionalArgs(bName, args, kwargs, nArgs, &arg1, &arg2); err != nil {
return nil, err
}
argz = []starlark.Value{arg1, arg2}
default:
err := fmt.Errorf("unexpected #args for %s.that(%s).%q(): %d", Default, t.actual.String(), name, nArgs)
return nil, err
}
providesInOrder := false ||
strings.HasPrefix(bName, "contains_all") ||
strings.HasPrefix(bName, "contains_exactly")
deferred := false
switch bName {
case "named":
case "is_within":
case "is_not_within":
default:
// Marks the current subject as having been adequately asserted.
defer thread.SetLocal(LocalThreadKeyForClose, εCallFrame)
deferred = true
}
ret, err := method(t, argz...)
switch err {
case nil:
if providesInOrder {
if tt, ok := ret.(*T); !ok {
panic("unreachable: call should return t for .in_order()")
} else if tt.forOrdering == nil {
panic("unreachable: call should prepare for .in_order()")
}
} else if deferred && ret != starlark.None {
panic(fmt.Sprintf("unreachable: call should return None, not: %T", ret))
}
return ret, nil
case errUnhandled:
return nil, t.unhandled(bName, argz...)
default:
return nil, err
}
}
return starlark.NewBuiltin(name, impl).BindReceiver(t), nil
} | pkg/starlarktruth/attrs.go | 0.518059 | 0.409398 | attrs.go | starcoder |
package applier
import (
"fmt"
"github.com/skeema/skeema/fs"
"github.com/skeema/skeema/workspace"
"github.com/skeema/tengo"
)
// VerifyDiff verifies the result of all AlterTable values found in
// diff.TableDiffs, confirming that applying the corresponding ALTER would
// bring a table from the version currently in the instance to the version
// specified in the filesystem.
func VerifyDiff(diff *tengo.SchemaDiff, t *Target) error {
if !wantVerify(diff, t) {
return nil
}
// If diff contains no ALTER TABLEs, nothing to verify
altersInDiff := diff.FilteredTableDiffs(tengo.DiffTypeAlter)
if len(altersInDiff) == 0 {
return nil
}
// Build a set of statement modifiers that will yield matching CREATE TABLE
// statements in all edge cases.
mods := tengo.StatementModifiers{
NextAutoInc: tengo.NextAutoIncIgnore,
StrictIndexOrder: true, // needed since we must get the SHOW CREATE TABLEs to match
StrictForeignKeyNaming: true, // ditto
AllowUnsafe: true, // needed since we're just running against the temp schema
SkipPreDropAlters: true, // needed to ignore DROP PARTITION generated just to speed up DROP TABLE
Flavor: t.Instance.Flavor(),
}
if major, minor, _ := t.Instance.Version(); major > 5 || minor > 5 {
// avoid having MySQL ignore index changes that are simply reordered, but only
// legal syntax in 5.6+
mods.AlgorithmClause = "copy"
}
// Gather CREATE and ALTER for modified tables, and put into a LogicalSchema,
// which we then materialize into a real schema using a workspace
logicalSchema := &fs.LogicalSchema{
CharSet: t.Dir.Config.Get("default-character-set"),
Collation: t.Dir.Config.Get("default-collation"),
Creates: make(map[tengo.ObjectKey]*fs.Statement),
Alters: make([]*fs.Statement, 0),
}
expected := make(map[string]*tengo.Table)
for _, td := range altersInDiff {
stmt, err := td.Statement(mods)
if stmt != "" && err == nil {
expected[td.From.Name] = td.To
logicalSchema.AddStatement(&fs.Statement{
Type: fs.StatementTypeCreate,
Text: td.From.CreateStatement,
ObjectType: tengo.ObjectTypeTable,
ObjectName: td.From.Name,
})
logicalSchema.AddStatement(&fs.Statement{
Type: fs.StatementTypeAlter,
Text: stmt,
ObjectType: tengo.ObjectTypeTable,
ObjectName: td.From.Name,
})
}
}
opts, err := workspace.OptionsForDir(t.Dir, t.Instance)
if err != nil {
return err
}
wsSchema, err := workspace.ExecLogicalSchema(logicalSchema, opts)
if err == nil && len(wsSchema.Failures) > 0 {
err = wsSchema.Failures[0]
}
if err != nil {
return fmt.Errorf("Diff verification failure: %s", err.Error())
}
// Compare the create statements of the "to" side of the diff with the create
// statements from the workspace. In doing so we must ignore differences in
// next-auto-inc value (which intentionally is often not updated) as well as
// the entirety of the partitioning clause (since the partition list is
// intentionally never modified).
actualTables := wsSchema.TablesByName()
for name, toTable := range expected {
// Simply compare partitioning *status*
expectPartitioned := (toTable.Partitioning != nil)
actualPartitioned := (actualTables[name].Partitioning != nil)
if expectPartitioned != actualPartitioned {
return fmt.Errorf("Diff verification failure on table %s\nEXPECTED PARTITIONING STATUS POST-ALTER: %t\nACTUAL PARTITIONING STATUS POST-ALTER: %t\nRun command again with --skip-verify if this discrepancy is safe to ignore", name, expectPartitioned, actualPartitioned)
}
expectCreate := toTable.CreateStatement
actualCreate := actualTables[name].CreateStatement
if expectPartitioned {
expectCreate = toTable.UnpartitionedCreateStatement(mods.Flavor)
actualCreate = actualTables[name].UnpartitionedCreateStatement(mods.Flavor)
}
expectCreate, _ = tengo.ParseCreateAutoInc(expectCreate)
actualCreate, _ = tengo.ParseCreateAutoInc(actualCreate)
if expectCreate != actualCreate {
return fmt.Errorf("Diff verification failure on table %s\n\nEXPECTED POST-ALTER:\n%s\n\nACTUAL POST-ALTER:\n%s\n\nRun command again with --skip-verify if this discrepancy is safe to ignore", name, expectCreate, actualCreate)
}
}
return nil
}
func wantVerify(diff *tengo.SchemaDiff, t *Target) bool {
return t.Dir.Config.GetBool("verify") && len(diff.TableDiffs) > 0 && !t.briefOutput()
} | applier/verifier.go | 0.557604 | 0.402069 | verifier.go | starcoder |
package fields
import "time"
// Package level versions
// SetInt64 instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func SetInt64(field string, value int64) *Update {
return Set(field, value)
}
// SetInt instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func SetInt(field string, value int) *Update {
return Set(field, value)
}
// SetInt32 instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func SetInt32(field string, value int32) *Update {
return Set(field, value)
}
// SetString instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func SetString(field string, value string) *Update {
return Set(field, value)
}
// SetBytes instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func SetBytes(field string, value []byte) *Update {
return Set(field, value)
}
// SetFloat32 instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func SetFloat32(field string, value float32) *Update {
return Set(field, value)
}
// SetFloat64 instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func SetFloat64(field string, value float64) *Update {
return Set(field, value)
}
// SetTime composes 'equal' operation. from time.Time value.
// Result is equivalent to:
// field = value
func SetTime(field string, value *time.Time) *Update {
return Set(field, value)
}
// Scoped versions
// SetInt64 instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func (u *Update) SetInt64(field string, value int64) *Update {
return u.Set(field, value)
}
// SetInt instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func (u *Update) SetInt(field string, value int) *Update {
return Set(field, value)
}
// SetInt32 instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func (u *Update) SetInt32(field string, value int32) *Update {
return Set(field, value)
}
// SetString instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func (u *Update) SetString(field string, value string) *Update {
return Set(field, value)
}
// SetBytes instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func (u *Update) SetBytes(field string, value []byte) *Update {
return Set(field, value)
}
// SetFloat32 instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func (u *Update) SetFloat32(field string, value float32) *Update {
return Set(field, value)
}
// SetFloat64 instructs update to set given field to the provided value
// Result is equivalent to:
// field = value
func (u *Update) SetFloat64(field string, value float64) *Update {
return Set(field, value)
}
// SetTime composes 'equal' operation. from time.Time value.
// Result is equivalent to:
// field = value
func (u *Update) SetTime(field string, value *time.Time) *Update {
return Set(field, value)
} | fields/set.go | 0.839175 | 0.45538 | set.go | starcoder |
package imagequant
import (
"image/color"
"unsafe"
)
/*
#include "libimagequant.h"
*/
import "C"
// Callers must not use this object once Release has been called on the parent
// Image struct.
type Result struct {
p *C.struct_liq_result
im *Image
}
// Enables/disables dithering in liq_write_remapped_image(). Dithering level must be between 0 and 1 (inclusive).
// Dithering level 0 enables fast non-dithered remapping. Otherwise a variation of Floyd-Steinberg error diffusion is used.
// Precision of the dithering algorithm depends on the speed setting, see Attributes.SetSpeed()
func (this *Result) SetDitheringLevel(dither_level float32) error {
return translateError(C.liq_set_dithering_level(this.p, C.float(dither_level)))
}
// Returns mean square error of quantization (square of difference between pixel values in the source image and its remapped version).
// Alpha channel, gamma correction and approximate importance of pixels is taken into account, so the result isn't exactly the mean square error of all channels.
// For most images MSE 1-5 is excellent. 7-10 is OK. 20-30 will have noticeable errors. 100 is awful.
// This function may return -1 if the value is not available (this happens when a high speed has been requested, the image hasn't been remapped yet, and quality limit hasn't been set, see Attributes.SetSpeed() and Attributes.SetQuality()).
// The value is not updated when multiple images are remapped, it applies only to the image used in liq_image_quantize() or the first image that has been remapped.
func (this *Result) GetQuantizationError() float64 {
return float64(C.liq_get_quantization_error(this.p))
}
// Returns mean square error of last remapping done (square of difference between pixel values in the remapped image and its remapped version).
// Alpha channel and gamma correction are taken into account, so the result isn't exactly the mean square error of all channels.
// This function may return -1 if the value is not available (this happens when a high speed has been requested or the image hasn't been remapped yet).
func (this *Result) GetRemappingError() float64 {
return float64(C.liq_get_remapping_error(this.p))
}
// Analoguous to Result.GetQuantizationError(), but returns quantization error as quality value in the same 0-100 range that is used by Attributes.SetQuality().
func (this *Result) GetQuantizationQuality() float64 {
return float64(C.liq_get_quantization_quality(this.p))
}
// Analoguous to Result.GetRemappingError(), but returns quantization error as quality value in the same 0-100 range that is used by Attributes.SetQuality().
func (this *Result) GetRemappingQuality() float64 {
return float64(C.liq_get_remapping_quality(this.p))
}
// Sets gamma correction for generated palette and remapped image.
// Must be > 0 and < 1, e.g. 0.45455 for gamma 1/2.2 in PNG images.
// By default output gamma is same as gamma of the input image.
func (this *Result) SetOutputGamma(gamma float64) error {
return translateError(C.liq_set_output_gamma(this.p, C.double(gamma)))
}
func (this *Result) GetImageWidth() int {
// C.liq_image_get_width
return this.im.w
}
func (this *Result) GetImageHeight() int {
// C.liq_image_get_height
return this.im.h
}
func (this *Result) GetOutputGamma() float64 {
return float64(C.liq_get_output_gamma(this.p))
}
// Remaps the image to palette and writes its pixels to the given []byte, 1 pixel per byte.
func (this *Result) WriteRemappedImage() ([]byte, error) {
if this.im.released {
return nil, ErrUseAfterFree
}
buff_size := this.im.w * this.im.h
buff := make([]byte, buff_size)
buffP := unsafe.Pointer(&buff[0])
iqe := C.liq_write_remapped_image(this.p, this.im.p, buffP, C.size_t(buff_size))
if iqe != C.LIQ_OK {
return nil, translateError(iqe)
}
return buff, nil
}
// Returns color.Palette optimized for image that has been quantized or remapped (final refinements are applied to the palette during remapping).
// It's valid to call this method before remapping, if you don't plan to remap any images or want to use same palette for multiple images.
func (this *Result) GetPalette() color.Palette {
ptr := C.liq_get_palette(this.p) // copy struct content
max := int(ptr.count)
ret := make([]color.Color, max)
for i := 0; i < max; i += 1 {
ret[i] = color.RGBA{
R: uint8(ptr.entries[i].r),
G: uint8(ptr.entries[i].g),
B: uint8(ptr.entries[i].b),
A: uint8(ptr.entries[i].a),
}
}
return ret
}
// Free memory. Callers must not use this object after Release has been called.
func (this *Result) Release() {
C.liq_result_destroy(this.p)
} | result.go | 0.760384 | 0.474022 | result.go | starcoder |
package metrics
// Histograms calculate distribution statistics from a series of int64 values.
type Histogram interface {
Clear()
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Sample() Sample
Snapshot() Histogram
StdDev() float64
Sum() int64
Update(int64)
Variance() float64
}
// GetOrRegisterHistogram returns an existing Histogram or constructs and
// registers a new StandardHistogram.
func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
}
// NewHistogram constructs a new StandardHistogram from a Sample.
func NewHistogram(s Sample) Histogram {
if UseNilMetrics {
return NilHistogram{}
}
return &StandardHistogram{sample: s}
}
// NewRegisteredHistogram constructs and registers a new StandardHistogram from
// a Sample.
func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
c := NewHistogram(s)
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// HistogramSnapshot is a read-only copy of another Histogram.
type HistogramSnapshot struct {
sample *SampleSnapshot
}
// Clear panics.
func (*HistogramSnapshot) Clear() {
panic("Clear called on a HistogramSnapshot")
}
// Count returns the number of samples recorded at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
// Max returns the maximum value in the sample at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
// Mean returns the mean of the values in the sample at the time the snapshot
// was taken.
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
// Min returns the minimum value in the sample at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
// Percentile returns an arbitrary percentile of values in the sample at the
// time the snapshot was taken.
func (h *HistogramSnapshot) Percentile(p float64) float64 {
return h.sample.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the sample
// at the time the snapshot was taken.
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
return h.sample.Percentiles(ps)
}
// Sample returns the Sample underlying the histogram.
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
// Snapshot returns the snapshot.
func (h *HistogramSnapshot) Snapshot() Histogram { return h }
// StdDev returns the standard deviation of the values in the sample at the
// time the snapshot was taken.
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
// Sum returns the sum in the sample at the time the snapshot was taken.
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
// Update panics.
func (*HistogramSnapshot) Update(int64) {
panic("Update called on a HistogramSnapshot")
}
// Variance returns the variance of inputs at the time the snapshot was taken.
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
// NilHistogram is a no-op Histogram.
type NilHistogram struct{}
// Clear is a no-op.
func (NilHistogram) Clear() {}
// Count is a no-op.
func (NilHistogram) Count() int64 { return 0 }
// Max is a no-op.
func (NilHistogram) Max() int64 { return 0 }
// Mean is a no-op.
func (NilHistogram) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilHistogram) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilHistogram) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Sample is a no-op.
func (NilHistogram) Sample() Sample { return NilSample{} }
// Snapshot is a no-op.
func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
// StdDev is a no-op.
func (NilHistogram) StdDev() float64 { return 0.0 }
// Sum is a no-op.
func (NilHistogram) Sum() int64 { return 0 }
// Update is a no-op.
func (NilHistogram) Update(v int64) {}
// Variance is a no-op.
func (NilHistogram) Variance() float64 { return 0.0 }
// StandardHistogram is the standard implementation of a Histogram and uses a
// Sample to bound its memory use.
type StandardHistogram struct {
sample Sample
}
// Clear clears the histogram and its sample.
func (h *StandardHistogram) Clear() { h.sample.Clear() }
// Count returns the number of samples recorded since the histogram was last
// cleared.
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
// Max returns the maximum value in the sample.
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
// Mean returns the mean of the values in the sample.
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
// Min returns the minimum value in the sample.
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
// Percentile returns an arbitrary percentile of the values in the sample.
func (h *StandardHistogram) Percentile(p float64) float64 {
return h.sample.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of the values in the
// sample.
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
return h.sample.Percentiles(ps)
}
// Sample returns the Sample underlying the histogram.
func (h *StandardHistogram) Sample() Sample { return h.sample }
// Snapshot returns a read-only copy of the histogram.
func (h *StandardHistogram) Snapshot() Histogram {
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
}
// StdDev returns the standard deviation of the values in the sample.
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
// Sum returns the sum in the sample.
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
// Update samples a new value.
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
// Variance returns the variance of the values in the sample.
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } | vendor/github.com/elastic/beats/vendor/github.com/rcrowley/go-metrics/histogram.go | 0.904217 | 0.711022 | histogram.go | starcoder |
package iso20022
// Description of the financial instrument.
type FinancialInstrumentAttributes46 struct {
// Identifies the financial instrument.
SecurityIdentification *SecurityIdentification14 `xml:"SctyId"`
// Quantity of entitled intermediate securities based on the balance of underlying securities.
Quantity *DecimalNumber `xml:"Qty,omitempty"`
// Specifies whether terms of the event allow resale of the rights.
RenounceableEntitlementStatusType *RenounceableEntitlementStatusTypeFormat1Choice `xml:"RnncblEntitlmntStsTp,omitempty"`
// Specifies how fractions resulting from derived securities will be processed or how prorated decisions will be rounding, if provided with a pro ration rate.
FractionDisposition *FractionDispositionType15Choice `xml:"FrctnDspstn,omitempty"`
// Quantity of intermediate securities awarded for a given quantity of underlying security.
IntermediateSecuritiesToUnderlyingRatio *QuantityToQuantityRatio1 `xml:"IntrmdtSctiesToUndrlygRatio,omitempty"`
// Last reported/known price of a financial instrument in a market.
MarketPrice *AmountPrice2 `xml:"MktPric,omitempty"`
// Date on which an order expires or at which a privilege or offer terminates.
ExpiryDate *DateFormat16Choice `xml:"XpryDt"`
// Date of the posting (credit or debit) to the account.
PostingDate *DateFormat16Choice `xml:"PstngDt"`
// Period during which intermediate or outturn securities are tradable in a secondary market.
TradingPeriod *Period4 `xml:"TradgPrd,omitempty"`
// Balance of uninstructed position.
UninstructedBalance *BalanceFormat1Choice `xml:"UinstdBal,omitempty"`
// Balance of instructed position.
InstructedBalance *BalanceFormat1Choice `xml:"InstdBal,omitempty"`
}
func (f *FinancialInstrumentAttributes46) AddSecurityIdentification() *SecurityIdentification14 {
f.SecurityIdentification = new(SecurityIdentification14)
return f.SecurityIdentification
}
func (f *FinancialInstrumentAttributes46) SetQuantity(value string) {
f.Quantity = (*DecimalNumber)(&value)
}
func (f *FinancialInstrumentAttributes46) AddRenounceableEntitlementStatusType() *RenounceableEntitlementStatusTypeFormat1Choice {
f.RenounceableEntitlementStatusType = new(RenounceableEntitlementStatusTypeFormat1Choice)
return f.RenounceableEntitlementStatusType
}
func (f *FinancialInstrumentAttributes46) AddFractionDisposition() *FractionDispositionType15Choice {
f.FractionDisposition = new(FractionDispositionType15Choice)
return f.FractionDisposition
}
func (f *FinancialInstrumentAttributes46) AddIntermediateSecuritiesToUnderlyingRatio() *QuantityToQuantityRatio1 {
f.IntermediateSecuritiesToUnderlyingRatio = new(QuantityToQuantityRatio1)
return f.IntermediateSecuritiesToUnderlyingRatio
}
func (f *FinancialInstrumentAttributes46) AddMarketPrice() *AmountPrice2 {
f.MarketPrice = new(AmountPrice2)
return f.MarketPrice
}
func (f *FinancialInstrumentAttributes46) AddExpiryDate() *DateFormat16Choice {
f.ExpiryDate = new(DateFormat16Choice)
return f.ExpiryDate
}
func (f *FinancialInstrumentAttributes46) AddPostingDate() *DateFormat16Choice {
f.PostingDate = new(DateFormat16Choice)
return f.PostingDate
}
func (f *FinancialInstrumentAttributes46) AddTradingPeriod() *Period4 {
f.TradingPeriod = new(Period4)
return f.TradingPeriod
}
func (f *FinancialInstrumentAttributes46) AddUninstructedBalance() *BalanceFormat1Choice {
f.UninstructedBalance = new(BalanceFormat1Choice)
return f.UninstructedBalance
}
func (f *FinancialInstrumentAttributes46) AddInstructedBalance() *BalanceFormat1Choice {
f.InstructedBalance = new(BalanceFormat1Choice)
return f.InstructedBalance
} | data/train/go/c446fe7b8567cd67afe1d244acc29a2331b48aa0FinancialInstrumentAttributes46.go | 0.848628 | 0.407864 | c446fe7b8567cd67afe1d244acc29a2331b48aa0FinancialInstrumentAttributes46.go | starcoder |
package imagecashletter
import (
"encoding/json"
"fmt"
"strings"
"time"
"unicode/utf8"
)
// Errors specific to a ReturnDetailAddendumB Record
// ReturnDetailAddendumB Record
type ReturnDetailAddendumB struct {
// ID is a client defined string used as a reference to this record.
ID string `json:"id"`
// RecordType defines the type of record.
recordType string
// PayorBankName is short name of the institution by or through which the item is payable.
PayorBankName string `json:"payorBankName"`
// AuxiliaryOnUs identifies a code used on commercial checks at the discretion of the payor bank.
AuxiliaryOnUs string `json:"auxiliaryOnUs"`
// PayorBankSequenceNumber is a number that identifies the item at the payor bank.
PayorBankSequenceNumber string `json:"payorBankSequenceNumber"`
// PayorBankBusinessDate is The year, month, and day the payor bank processed the Return Record.
// Format: YYYYMMDD, where: YYYY year, MM month, DD day
// Values:
// YYYY 1993 through 9999
// MM 01 through 12
// DD 01 through 31
PayorBankBusinessDate time.Time `json:"payorBankBusinessDate"`
// PayorAccountName is the account name from payor bank records.
PayorAccountName string `json:"payorAccountName"`
// validator is composed for imagecashletter data validation
validator
// converters is composed for imagecashletter to golang Converters
converters
}
// NewReturnDetailAddendumB returns a new ReturnDetailAddendumB with default values for non exported fields
func NewReturnDetailAddendumB() ReturnDetailAddendumB {
rdAddendumB := ReturnDetailAddendumB{}
rdAddendumB.setRecordType()
return rdAddendumB
}
func (rdAddendumB *ReturnDetailAddendumB) setRecordType() {
if rdAddendumB == nil {
return
}
rdAddendumB.recordType = "33"
}
// Parse takes the input record string and parses the ReturnDetailAddendumB values
func (rdAddendumB *ReturnDetailAddendumB) Parse(record string) {
if utf8.RuneCountInString(record) < 80 {
return // line too short
}
// Character position 1-2, Always "33"
rdAddendumB.setRecordType()
// 03-20
rdAddendumB.PayorBankName = rdAddendumB.parseStringField(record[2:20])
// 21-35
rdAddendumB.AuxiliaryOnUs = rdAddendumB.parseStringField(record[20:35])
// 36-50
rdAddendumB.PayorBankSequenceNumber = rdAddendumB.parseStringField(record[35:50])
// 51-58
rdAddendumB.PayorBankBusinessDate = rdAddendumB.parseYYYYMMDDDate(record[50:58])
// 59-80
rdAddendumB.PayorAccountName = rdAddendumB.parseStringField(record[58:80])
}
func (rdAddendumB *ReturnDetailAddendumB) UnmarshalJSON(data []byte) error {
type Alias ReturnDetailAddendumB
aux := struct {
*Alias
}{
(*Alias)(rdAddendumB),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
rdAddendumB.setRecordType()
return nil
}
// String writes the ReturnDetailAddendumB struct to a string.
func (rdAddendumB *ReturnDetailAddendumB) String() string {
var buf strings.Builder
buf.Grow(80)
buf.WriteString(rdAddendumB.recordType)
buf.WriteString(rdAddendumB.PayorBankNameField())
buf.WriteString(rdAddendumB.AuxiliaryOnUsField())
buf.WriteString(rdAddendumB.PayorBankSequenceNumberField())
buf.WriteString(rdAddendumB.PayorBankBusinessDateField())
buf.WriteString(rdAddendumB.PayorAccountNameField())
return buf.String()
}
// Validate performs imagecashletter format rule checks on the record and returns an error if not Validated
// The first error encountered is returned and stops the parsing.
func (rdAddendumB *ReturnDetailAddendumB) Validate() error {
if err := rdAddendumB.fieldInclusion(); err != nil {
return err
}
if rdAddendumB.recordType != "33" {
msg := fmt.Sprintf(msgRecordType, 33)
return &FieldError{FieldName: "recordType", Value: rdAddendumB.recordType, Msg: msg}
}
if err := rdAddendumB.isAlphanumericSpecial(rdAddendumB.PayorBankName); err != nil {
return &FieldError{FieldName: "PayorBankName", Value: rdAddendumB.PayorBankName, Msg: err.Error()}
}
if err := rdAddendumB.isAlphanumericSpecial(rdAddendumB.PayorAccountName); err != nil {
return &FieldError{FieldName: "PayorAccountName", Value: rdAddendumB.PayorAccountName, Msg: err.Error()}
}
return nil
}
// fieldInclusion validate mandatory fields are not default values. If fields are
// invalid the Electronic Exchange will be returned.
func (rdAddendumB *ReturnDetailAddendumB) fieldInclusion() error {
if rdAddendumB.recordType == "" {
return &FieldError{FieldName: "recordType",
Value: rdAddendumB.recordType,
Msg: msgFieldInclusion + ", did you use ReturnDetailAddendumB()?"}
}
if rdAddendumB.PayorBankSequenceNumberField() == " " {
return &FieldError{FieldName: "PayorBankSequenceNumber",
Value: rdAddendumB.PayorBankSequenceNumber,
Msg: msgFieldInclusion + ", did you use ReturnDetailAddendumB()?"}
}
if rdAddendumB.PayorBankBusinessDate.IsZero() {
return &FieldError{FieldName: "PayorBankBusinessDate",
Value: rdAddendumB.PayorBankBusinessDate.String(),
Msg: msgFieldInclusion + ", did you use ReturnDetailAddendumB()?"}
}
return nil
}
// PayorBankNameField gets the PayorBankName field
func (rdAddendumB *ReturnDetailAddendumB) PayorBankNameField() string {
return rdAddendumB.alphaField(rdAddendumB.PayorBankName, 18)
}
// AuxiliaryOnUsField gets the AuxiliaryOnUs field
func (rdAddendumB *ReturnDetailAddendumB) AuxiliaryOnUsField() string {
return rdAddendumB.nbsmField(rdAddendumB.AuxiliaryOnUs, 15)
}
// PayorBankSequenceNumberField gets the PayorBankSequenceNumber field
func (rdAddendumB *ReturnDetailAddendumB) PayorBankSequenceNumberField() string {
return rdAddendumB.alphaField(rdAddendumB.PayorBankSequenceNumber, 15)
}
// PayorBankBusinessDateField gets the PayorBankBusinessDate in YYYYMMDD format
func (rdAddendumB *ReturnDetailAddendumB) PayorBankBusinessDateField() string {
return rdAddendumB.formatYYYYMMDDDate(rdAddendumB.PayorBankBusinessDate)
}
// PayorAccountNameField gets the PayorAccountName field
func (rdAddendumB *ReturnDetailAddendumB) PayorAccountNameField() string {
return rdAddendumB.alphaField(rdAddendumB.PayorAccountName, 22)
} | returnDetailAddendumB.go | 0.640074 | 0.469399 | returnDetailAddendumB.go | starcoder |
package spatialindex
import (
"errors"
"math"
"sort"
"sync"
)
// Point represents an object in 2D space
type Point struct {
ID uint64
X, Y int64
}
// Grid is a statically set series of slices that Points get put into
type Grid struct {
mtx *sync.RWMutex
buckets [][][]Point
allPoints map[uint64]*Point
}
// NewGrid returns a Grid without preallocating nested slices
func NewGrid(precision int) *Grid {
if precision < 1 {
return nil
}
return &Grid{
mtx: &sync.RWMutex{},
buckets: make([][][]Point, precision),
allPoints: make(map[uint64]*Point, 0),
}
}
// Package level errors
var (
ErrDuplicateID = errors.New("id already exists")
ErrInvalidID = errors.New("id does not exist")
ErrNotEnoughNeighbors = errors.New("not enough neighbors")
)
func calculateBucket(x, y, diameter int64) (xb, yb int64) {
xb, yb = diameter/2, diameter/2
xb += x / (2 * (1 + (math.MaxInt64 / diameter)))
yb += y / (2 * (1 + (math.MaxInt64 / diameter)))
return xb, yb
}
// Add inserts a new Point into the appropriate bucket if it doesn't already exist
func (g *Grid) Add(id uint64, x, y int64) error {
g.mtx.Lock()
_, exists := g.allPoints[id]
if exists {
g.mtx.Unlock()
return ErrDuplicateID
}
xb, yb := calculateBucket(x, y, int64(len(g.buckets)))
newPoint := Point{id, x, y}
if g.buckets[xb] == nil {
g.buckets[xb] = make([][]Point, len(g.buckets))
}
g.buckets[xb][yb] = append(g.buckets[xb][yb], newPoint)
g.allPoints[id] = &newPoint
g.mtx.Unlock()
return nil
}
// Move will remove an existing Point and insert a new one into the appropriate bucket
func (g *Grid) Move(id uint64, x, y int64) error {
g.mtx.Lock()
point, exists := g.allPoints[id]
if !exists {
g.mtx.Unlock()
return ErrInvalidID
}
if point.X == x && point.Y == y {
g.mtx.Unlock()
return nil
}
xb1, yb1 := calculateBucket(point.X, point.Y, int64(len(g.buckets)))
xb2, yb2 := calculateBucket(x, y, int64(len(g.buckets)))
if g.buckets[xb2] == nil {
g.buckets[xb2] = make([][]Point, len(g.buckets))
}
if xb1 != xb2 || yb1 != yb2 {
for i := range g.buckets[xb1][yb1] {
if g.buckets[xb1][yb1][i].ID == point.ID {
g.buckets[xb1][yb1] = append(g.buckets[xb1][yb1][:i],
g.buckets[xb1][yb1][i+1:]...)
break
}
}
newPoint := Point{id, x, y}
g.buckets[xb2][yb2] = append(g.buckets[xb2][yb2], newPoint)
g.allPoints[id] = &newPoint
}
g.mtx.Unlock()
return nil
}
// Delete removes the existing Point
func (g *Grid) Delete(id uint64) error {
g.mtx.Lock()
point, exists := g.allPoints[id]
if !exists {
g.mtx.Unlock()
return ErrInvalidID
}
xb, yb := calculateBucket(point.X, point.Y, int64(len(g.buckets)))
for i := range g.buckets[xb][yb] {
if g.buckets[xb][yb][i].ID == id {
g.buckets[xb][yb] = append(g.buckets[xb][yb][:i],
g.buckets[xb][yb][i+1:]...)
break
}
}
delete(g.allPoints, id)
g.mtx.Unlock()
return nil
}
// Reset will empty all buckets
func (g *Grid) Reset() {
g.mtx.Lock()
for x := range g.buckets {
for y := range g.buckets[x] {
if g.buckets[x][y] == nil {
continue
} else if len(g.buckets[x][y]) != 0 {
g.buckets[x][y] = g.buckets[x][y][:0]
}
}
}
g.allPoints = map[uint64]*Point{}
g.mtx.Unlock()
}
const (
center = iota
bottom
bottomLeft
left
topLeft
top
topRight
right
bottomRight
)
func adjustBucket(side, xb, yb, distance, diameter int64) (int64, int64, bool) {
switch side {
case center:
// do nothing
case bottom:
yb -= distance
case bottomLeft:
xb -= distance
yb -= distance
case left:
xb -= distance
case topLeft:
xb -= distance
yb += distance
case top:
yb += distance
case topRight:
xb += distance
yb += distance
case right:
xb += distance
case bottomRight:
xb += distance
yb -= distance
default:
panic("InvalidParameter")
}
if xb < 0 || yb < 0 {
return math.MinInt64, math.MinInt64, false
}
if xb >= diameter || yb >= diameter {
return math.MaxInt64, math.MaxInt64, false
}
return xb, yb, true
}
func (g *Grid) getClosestPoint(originPoint *Point, checkID bool) *Point {
var (
valid bool
bestPoint, otherPoint *Point
xb, yb, side int64
hypotenuse, bestHypotenuse float64
)
xbStart, ybStart := calculateBucket(originPoint.X, originPoint.Y, int64(len(g.buckets)))
for distance := int64(1); distance < int64(len(g.buckets)); distance++ {
for side = 0; side < 9; side++ {
if side == 0 && distance != 1 {
continue
}
xb, yb, valid = adjustBucket(side, xbStart, ybStart, distance, int64(len(g.buckets)))
if !valid || g.buckets[xb] == nil || g.buckets[xb][yb] == nil {
continue
}
for i := range g.buckets[xb][yb] {
otherPoint = &g.buckets[xb][yb][i]
if checkID && otherPoint.ID == originPoint.ID {
continue
}
hypotenuse = math.Hypot(float64(originPoint.X-otherPoint.X),
float64(originPoint.Y-otherPoint.Y))
if hypotenuse < bestHypotenuse || bestHypotenuse == 0 {
bestHypotenuse = hypotenuse
bestPoint = otherPoint
}
}
}
if bestPoint != nil {
break
}
}
return bestPoint
}
// ClosestPoint will return the closest Point regardless of proximity
// Please note that the returned Point could be in the same position
func (g *Grid) ClosestPoint(x, y int64) (Point, error) {
g.mtx.RLock()
p := g.getClosestPoint(&Point{0, x, y}, false)
g.mtx.RUnlock()
if p != nil {
return *p, nil
}
return Point{}, ErrNotEnoughNeighbors
}
// NearestNeighbor will return the first adjacent Point
func (g *Grid) NearestNeighbor(id uint64) (Point, error) {
g.mtx.RLock()
p, exists := g.allPoints[id]
if !exists {
g.mtx.RUnlock()
return Point{}, ErrInvalidID
}
p = g.getClosestPoint(p, true)
g.mtx.RUnlock()
if p != nil {
return *p, nil
}
return Point{}, ErrNotEnoughNeighbors
}
type distanceVectors struct {
points []Point
distances []float64
}
func createDistanceVectors(origin *Point, queryPoints []Point) distanceVectors {
dv := distanceVectors{
points: queryPoints,
distances: make([]float64, len(queryPoints)),
}
for i := range queryPoints {
dv.distances[i] = math.Hypot(float64(origin.X-queryPoints[i].X),
float64(origin.Y-queryPoints[i].Y))
}
return dv
}
func (dv distanceVectors) Len() int {
return len(dv.distances)
}
func (dv distanceVectors) Swap(i, j int) {
dv.points[i], dv.points[j] = dv.points[j], dv.points[i]
dv.distances[i], dv.distances[j] = dv.distances[j], dv.distances[i]
}
func (dv distanceVectors) Less(i, j int) bool {
return dv.distances[i] < dv.distances[j]
}
// NearestNeighbors returns multiple adjacent Points in order of proximity.
// If unable to fulfill the requested number it will return a slice containing
// an unspecified number of Points and a non-nill error value.
func (g *Grid) NearestNeighbors(id uint64, num int64) ([]Point, error) {
g.mtx.RLock()
origin, exists := g.allPoints[id]
if !exists {
g.mtx.RUnlock()
return []Point{}, ErrInvalidID
}
xbStart, ybStart := calculateBucket(origin.X, origin.Y, int64(len(g.buckets)))
var points []Point
if len(g.buckets[xbStart][ybStart]) > 1 {
points = make([]Point, 0, len(g.buckets[xbStart][ybStart])-1)
for _, obj := range g.buckets[xbStart][ybStart] {
if obj.ID != id {
points = append(points, obj)
}
}
}
distanceVectors := createDistanceVectors(origin, points)
sort.Sort(distanceVectors)
points = distanceVectors.points
if int64(len(points)) >= num {
return points[:num], nil
}
var (
valid bool
otherPoints []Point
xb, yb, side int64
)
for distance := int64(1); distance < int64(len(g.buckets)); distance++ {
otherPoints = []Point{}
for side = 1; side < 9; side++ {
xb, yb, valid = adjustBucket(side, xbStart, ybStart, distance, int64(len(g.buckets)))
if !valid {
continue
}
if len(g.buckets[xb][yb]) > 0 {
otherPoints = append(otherPoints, g.buckets[xb][yb]...)
}
}
distanceVectors = createDistanceVectors(origin, otherPoints)
sort.Sort(distanceVectors)
points = append(points, distanceVectors.points...)
if int64(len(points)) >= num {
g.mtx.RUnlock()
return points[:num], nil
}
}
g.mtx.RUnlock()
if len(points) != 0 {
return points, ErrNotEnoughNeighbors
}
return points, ErrNotEnoughNeighbors
} | grid.go | 0.631594 | 0.529932 | grid.go | starcoder |
package gkgen
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
// SetValidator generates code that will verify a fields does Set one of an allowed set of values
// The SetValidator will look at the field or the dereferenced value of the field
// nil values for a field are not considered invalid
type SetValidator struct {
name string
}
// NewSetValidator holds the SetValidator state
func NewSetValidator() *SetValidator {
return &SetValidator{name: "Set"}
}
// Generate generates validation code
func (s *SetValidator) Generate(sType reflect.Type, fieldStruct reflect.StructField, params []string) (string, error) {
if len(params) == 0 {
return "", errors.New("Set validation requires at least 1 parameter")
}
field := fieldStruct.Type
switch field.Kind() {
case reflect.String:
conditions := make([]string, len(params))
for i, param := range params {
conditions[i] = fmt.Sprintf(`s.%[1]s == "%[2]s"`, fieldStruct.Name, param)
}
condition := strings.Join(conditions, " || ")
return fmt.Sprintf(`
if s.%[1]s != "" && !(%[2]s) {
errors%[1]s = append(errors%[1]s, errors.New("%[1]s must equal %[3]s"))
}`, fieldStruct.Name, condition, strings.Join(params, " or ")), nil
case reflect.Int, reflect.Int32, reflect.Int64:
conditions := make([]string, len(params))
for i, param := range params {
intParam, err := strconv.Atoi(param)
if err != nil {
return "", fmt.Errorf("Expected a set of Ints, but got param %q", param)
}
conditions[i] = fmt.Sprintf(`s.%[1]s == %[2]d`, fieldStruct.Name, intParam)
}
condition := strings.Join(conditions, " || ")
return fmt.Sprintf(`
if s.%[1]s != 0 && !(%[2]s) {
errors%[1]s = append(errors%[1]s, errors.New("%[1]s must equal %[3]s"))
}`, fieldStruct.Name, condition, strings.Join(params, " or ")), nil
case reflect.Ptr:
field = field.Elem()
switch field.Kind() {
case reflect.String:
conditions := make([]string, len(params))
for i, param := range params {
conditions[i] = fmt.Sprintf(`*s.%[1]s == "%[2]s"`, fieldStruct.Name, param)
}
condition := strings.Join(conditions, " || ")
return fmt.Sprintf(`
if s.%[1]s != nil && !(%[2]s) {
errors%[1]s = append(errors%[1]s, errors.New("%[1]s must equal %[3]s"))
}`, fieldStruct.Name, condition, strings.Join(params, " or ")), nil
case reflect.Int, reflect.Int32, reflect.Int64:
conditions := make([]string, len(params))
for i, param := range params {
intParam, err := strconv.Atoi(param)
if err != nil {
return "", fmt.Errorf("Expected a set of Ints, but got param %q", param)
}
conditions[i] = fmt.Sprintf(`*s.%[1]s == %[2]d`, fieldStruct.Name, intParam)
}
condition := strings.Join(conditions, " || ")
return fmt.Sprintf(`
if s.%[1]s != nil && !(%[2]s) {
errors%[1]s = append(errors%[1]s, errors.New("%[1]s must equal %[3]s"))
}`, fieldStruct.Name, condition, strings.Join(params, " or ")), nil
default:
return "", fmt.Errorf("Set does not work on type '%s'", field.Kind())
}
default:
return "", fmt.Errorf("Set does not work on type '%s'", field.Kind())
}
}
// Name provides access to the name field
func (s *SetValidator) Name() string {
return s.name
} | gkgen/set.go | 0.60054 | 0.401688 | set.go | starcoder |
package html
import "github.com/guillermo/golazy/lazyview/nodes"
// A Creates a new a element
func A(options ...interface{}) nodes.Element {
return nodes.NewElement("a", options...)
}
// Abbr Creates a new abbr element
func Abbr(options ...interface{}) nodes.Element {
return nodes.NewElement("abbr", options...)
}
// Acronym Creates a new acronym element
func Acronym(options ...interface{}) nodes.Element {
return nodes.NewElement("acronym", options...)
}
// Address Creates a new address element
func Address(options ...interface{}) nodes.Element {
return nodes.NewElement("address", options...)
}
// Applet Creates a new applet element
func Applet(options ...interface{}) nodes.Element {
return nodes.NewElement("applet", options...)
}
// Area Creates a new area element
func Area(options ...interface{}) nodes.Element {
return nodes.NewElement("area", options...)
}
// Article Creates a new article element
func Article(options ...interface{}) nodes.Element {
return nodes.NewElement("article", options...)
}
// Aside Creates a new aside element
func Aside(options ...interface{}) nodes.Element {
return nodes.NewElement("aside", options...)
}
// Audio Creates a new audio element
func Audio(options ...interface{}) nodes.Element {
return nodes.NewElement("audio", options...)
}
// B Creates a new b element
func B(options ...interface{}) nodes.Element {
return nodes.NewElement("b", options...)
}
// Base Creates a new base element
func Base(options ...interface{}) nodes.Element {
return nodes.NewElement("base", options...)
}
// Basefont Creates a new basefont element
func Basefont(options ...interface{}) nodes.Element {
return nodes.NewElement("basefont", options...)
}
// Bb Creates a new bb element
func Bb(options ...interface{}) nodes.Element {
return nodes.NewElement("bb", options...)
}
// Bdi Creates a new bdi element
func Bdi(options ...interface{}) nodes.Element {
return nodes.NewElement("bdi", options...)
}
// Bdo Creates a new bdo element
func Bdo(options ...interface{}) nodes.Element {
return nodes.NewElement("bdo", options...)
}
// Bgsound Creates a new bgsound element
func Bgsound(options ...interface{}) nodes.Element {
return nodes.NewElement("bgsound", options...)
}
// Big Creates a new big element
func Big(options ...interface{}) nodes.Element {
return nodes.NewElement("big", options...)
}
// Blink Creates a new blink element
func Blink(options ...interface{}) nodes.Element {
return nodes.NewElement("blink", options...)
}
// Blockquote Creates a new blockquote element
func Blockquote(options ...interface{}) nodes.Element {
return nodes.NewElement("blockquote", options...)
}
// Body Creates a new body element
func Body(options ...interface{}) nodes.Element {
return nodes.NewElement("body", options...)
}
// Br Creates a new br element
func Br(options ...interface{}) nodes.Element {
return nodes.NewElement("br", options...)
}
// Button Creates a new button element
func Button(options ...interface{}) nodes.Element {
return nodes.NewElement("button", options...)
}
// Canvas Creates a new canvas element
func Canvas(options ...interface{}) nodes.Element {
return nodes.NewElement("canvas", options...)
}
// Caption Creates a new caption element
func Caption(options ...interface{}) nodes.Element {
return nodes.NewElement("caption", options...)
}
// Center Creates a new center element
func Center(options ...interface{}) nodes.Element {
return nodes.NewElement("center", options...)
}
// Cite Creates a new cite element
func Cite(options ...interface{}) nodes.Element {
return nodes.NewElement("cite", options...)
}
// Code Creates a new code element
func Code(options ...interface{}) nodes.Element {
return nodes.NewElement("code", options...)
}
// Col Creates a new col element
func Col(options ...interface{}) nodes.Element {
return nodes.NewElement("col", options...)
}
// Colgroup Creates a new colgroup element
func Colgroup(options ...interface{}) nodes.Element {
return nodes.NewElement("colgroup", options...)
}
// Command Creates a new command element
func Command(options ...interface{}) nodes.Element {
return nodes.NewElement("command", options...)
}
// Content Creates a new content element
func Content(options ...interface{}) nodes.Element {
return nodes.NewElement("content", options...)
}
// Data Creates a new data element
func Data(options ...interface{}) nodes.Element {
return nodes.NewElement("data", options...)
}
// Datagrid Creates a new datagrid element
func Datagrid(options ...interface{}) nodes.Element {
return nodes.NewElement("datagrid", options...)
}
// Datalist Creates a new datalist element
func Datalist(options ...interface{}) nodes.Element {
return nodes.NewElement("datalist", options...)
}
// Dd Creates a new dd element
func Dd(options ...interface{}) nodes.Element {
return nodes.NewElement("dd", options...)
}
// Del Creates a new del element
func Del(options ...interface{}) nodes.Element {
return nodes.NewElement("del", options...)
}
// Details Creates a new details element
func Details(options ...interface{}) nodes.Element {
return nodes.NewElement("details", options...)
}
// Dfn Creates a new dfn element
func Dfn(options ...interface{}) nodes.Element {
return nodes.NewElement("dfn", options...)
}
// Dialog Creates a new dialog element
func Dialog(options ...interface{}) nodes.Element {
return nodes.NewElement("dialog", options...)
}
// Dir Creates a new dir element
func Dir(options ...interface{}) nodes.Element {
return nodes.NewElement("dir", options...)
}
// Div Creates a new div element
func Div(options ...interface{}) nodes.Element {
return nodes.NewElement("div", options...)
}
// Dl Creates a new dl element
func Dl(options ...interface{}) nodes.Element {
return nodes.NewElement("dl", options...)
}
// Dt Creates a new dt element
func Dt(options ...interface{}) nodes.Element {
return nodes.NewElement("dt", options...)
}
// Em Creates a new em element
func Em(options ...interface{}) nodes.Element {
return nodes.NewElement("em", options...)
}
// Embed Creates a new embed element
func Embed(options ...interface{}) nodes.Element {
return nodes.NewElement("embed", options...)
}
// Eventsource Creates a new eventsource element
func Eventsource(options ...interface{}) nodes.Element {
return nodes.NewElement("eventsource", options...)
}
// Fieldset Creates a new fieldset element
func Fieldset(options ...interface{}) nodes.Element {
return nodes.NewElement("fieldset", options...)
}
// Figcaption Creates a new figcaption element
func Figcaption(options ...interface{}) nodes.Element {
return nodes.NewElement("figcaption", options...)
}
// Figure Creates a new figure element
func Figure(options ...interface{}) nodes.Element {
return nodes.NewElement("figure", options...)
}
// Font Creates a new font element
func Font(options ...interface{}) nodes.Element {
return nodes.NewElement("font", options...)
}
// Footer Creates a new footer element
func Footer(options ...interface{}) nodes.Element {
return nodes.NewElement("footer", options...)
}
// Form Creates a new form element
func Form(options ...interface{}) nodes.Element {
return nodes.NewElement("form", options...)
}
// Frame Creates a new frame element
func Frame(options ...interface{}) nodes.Element {
return nodes.NewElement("frame", options...)
}
// Frameset Creates a new frameset element
func Frameset(options ...interface{}) nodes.Element {
return nodes.NewElement("frameset", options...)
}
// H1 Creates a new h1 element
func H1(options ...interface{}) nodes.Element {
return nodes.NewElement("h1", options...)
}
// Head Creates a new head element
func Head(options ...interface{}) nodes.Element {
return nodes.NewElement("head", options...)
}
// Header Creates a new header element
func Header(options ...interface{}) nodes.Element {
return nodes.NewElement("header", options...)
}
// Hgroup Creates a new hgroup element
func Hgroup(options ...interface{}) nodes.Element {
return nodes.NewElement("hgroup", options...)
}
// Hr Creates a new hr element
func Hr(options ...interface{}) nodes.Element {
return nodes.NewElement("hr", options...)
}
// Html Creates a new html element
func Html(options ...interface{}) nodes.Element {
return nodes.NewElement("html", options...)
}
// I Creates a new i element
func I(options ...interface{}) nodes.Element {
return nodes.NewElement("i", options...)
}
// Iframe Creates a new iframe element
func Iframe(options ...interface{}) nodes.Element {
return nodes.NewElement("iframe", options...)
}
// Image Creates a new image element
func Image(options ...interface{}) nodes.Element {
return nodes.NewElement("image", options...)
}
// Img Creates a new img element
func Img(options ...interface{}) nodes.Element {
return nodes.NewElement("img", options...)
}
// Input Creates a new input element
func Input(options ...interface{}) nodes.Element {
return nodes.NewElement("input", options...)
}
// Ins Creates a new ins element
func Ins(options ...interface{}) nodes.Element {
return nodes.NewElement("ins", options...)
}
// Isindex Creates a new isindex element
func Isindex(options ...interface{}) nodes.Element {
return nodes.NewElement("isindex", options...)
}
// Kbd Creates a new kbd element
func Kbd(options ...interface{}) nodes.Element {
return nodes.NewElement("kbd", options...)
}
// Keygen Creates a new keygen element
func Keygen(options ...interface{}) nodes.Element {
return nodes.NewElement("keygen", options...)
}
// Label Creates a new label element
func Label(options ...interface{}) nodes.Element {
return nodes.NewElement("label", options...)
}
// Legend Creates a new legend element
func Legend(options ...interface{}) nodes.Element {
return nodes.NewElement("legend", options...)
}
// Li Creates a new li element
func Li(options ...interface{}) nodes.Element {
return nodes.NewElement("li", options...)
}
// Link Creates a new link element
func Link(options ...interface{}) nodes.Element {
return nodes.NewElement("link", options...)
}
// Main Creates a new main element
func Main(options ...interface{}) nodes.Element {
return nodes.NewElement("main", options...)
}
// Map Creates a new map element
func Map(options ...interface{}) nodes.Element {
return nodes.NewElement("map", options...)
}
// Mark Creates a new mark element
func Mark(options ...interface{}) nodes.Element {
return nodes.NewElement("mark", options...)
}
// Marquee Creates a new marquee element
func Marquee(options ...interface{}) nodes.Element {
return nodes.NewElement("marquee", options...)
}
// Menu Creates a new menu element
func Menu(options ...interface{}) nodes.Element {
return nodes.NewElement("menu", options...)
}
// Menuitem Creates a new menuitem element
func Menuitem(options ...interface{}) nodes.Element {
return nodes.NewElement("menuitem", options...)
}
// Meta Creates a new meta element
func Meta(options ...interface{}) nodes.Element {
return nodes.NewElement("meta", options...)
}
// Meter Creates a new meter element
func Meter(options ...interface{}) nodes.Element {
return nodes.NewElement("meter", options...)
}
// Nav Creates a new nav element
func Nav(options ...interface{}) nodes.Element {
return nodes.NewElement("nav", options...)
}
// Nobr Creates a new nobr element
func Nobr(options ...interface{}) nodes.Element {
return nodes.NewElement("nobr", options...)
}
// Noembed Creates a new noembed element
func Noembed(options ...interface{}) nodes.Element {
return nodes.NewElement("noembed", options...)
}
// Noframes Creates a new noframes element
func Noframes(options ...interface{}) nodes.Element {
return nodes.NewElement("noframes", options...)
}
// Noscript Creates a new noscript element
func Noscript(options ...interface{}) nodes.Element {
return nodes.NewElement("noscript", options...)
}
// Object Creates a new object element
func Object(options ...interface{}) nodes.Element {
return nodes.NewElement("object", options...)
}
// Ol Creates a new ol element
func Ol(options ...interface{}) nodes.Element {
return nodes.NewElement("ol", options...)
}
// Optgroup Creates a new optgroup element
func Optgroup(options ...interface{}) nodes.Element {
return nodes.NewElement("optgroup", options...)
}
// Option Creates a new option element
func Option(options ...interface{}) nodes.Element {
return nodes.NewElement("option", options...)
}
// Output Creates a new output element
func Output(options ...interface{}) nodes.Element {
return nodes.NewElement("output", options...)
}
// P Creates a new p element
func P(options ...interface{}) nodes.Element {
return nodes.NewElement("p", options...)
}
// Param Creates a new param element
func Param(options ...interface{}) nodes.Element {
return nodes.NewElement("param", options...)
}
// Picture Creates a new picture element
func Picture(options ...interface{}) nodes.Element {
return nodes.NewElement("picture", options...)
}
// Plaintext Creates a new plaintext element
func Plaintext(options ...interface{}) nodes.Element {
return nodes.NewElement("plaintext", options...)
}
// Portal Creates a new portal element
func Portal(options ...interface{}) nodes.Element {
return nodes.NewElement("portal", options...)
}
// Pre Creates a new pre element
func Pre(options ...interface{}) nodes.Element {
return nodes.NewElement("pre", options...)
}
// Progress Creates a new progress element
func Progress(options ...interface{}) nodes.Element {
return nodes.NewElement("progress", options...)
}
// Q Creates a new q element
func Q(options ...interface{}) nodes.Element {
return nodes.NewElement("q", options...)
}
// Rb Creates a new rb element
func Rb(options ...interface{}) nodes.Element {
return nodes.NewElement("rb", options...)
}
// Rp Creates a new rp element
func Rp(options ...interface{}) nodes.Element {
return nodes.NewElement("rp", options...)
}
// Rt Creates a new rt element
func Rt(options ...interface{}) nodes.Element {
return nodes.NewElement("rt", options...)
}
// Rtc Creates a new rtc element
func Rtc(options ...interface{}) nodes.Element {
return nodes.NewElement("rtc", options...)
}
// Ruby Creates a new ruby element
func Ruby(options ...interface{}) nodes.Element {
return nodes.NewElement("ruby", options...)
}
// S Creates a new s element
func S(options ...interface{}) nodes.Element {
return nodes.NewElement("s", options...)
}
// Samp Creates a new samp element
func Samp(options ...interface{}) nodes.Element {
return nodes.NewElement("samp", options...)
}
// Script Creates a new script element
func Script(options ...interface{}) nodes.Element {
return nodes.NewElement("script", options...)
}
// Section Creates a new section element
func Section(options ...interface{}) nodes.Element {
return nodes.NewElement("section", options...)
}
// Select Creates a new select element
func Select(options ...interface{}) nodes.Element {
return nodes.NewElement("select", options...)
}
// Shadow Creates a new shadow element
func Shadow(options ...interface{}) nodes.Element {
return nodes.NewElement("shadow", options...)
}
// Slot Creates a new slot element
func Slot(options ...interface{}) nodes.Element {
return nodes.NewElement("slot", options...)
}
// Small Creates a new small element
func Small(options ...interface{}) nodes.Element {
return nodes.NewElement("small", options...)
}
// Source Creates a new source element
func Source(options ...interface{}) nodes.Element {
return nodes.NewElement("source", options...)
}
// Spacer Creates a new spacer element
func Spacer(options ...interface{}) nodes.Element {
return nodes.NewElement("spacer", options...)
}
// Span Creates a new span element
func Span(options ...interface{}) nodes.Element {
return nodes.NewElement("span", options...)
}
// Strike Creates a new strike element
func Strike(options ...interface{}) nodes.Element {
return nodes.NewElement("strike", options...)
}
// Strong Creates a new strong element
func Strong(options ...interface{}) nodes.Element {
return nodes.NewElement("strong", options...)
}
// Style Creates a new style element
func Style(options ...interface{}) nodes.Element {
return nodes.NewElement("style", options...)
}
// Sub Creates a new sub element
func Sub(options ...interface{}) nodes.Element {
return nodes.NewElement("sub", options...)
}
// Summary Creates a new summary element
func Summary(options ...interface{}) nodes.Element {
return nodes.NewElement("summary", options...)
}
// Sup Creates a new sup element
func Sup(options ...interface{}) nodes.Element {
return nodes.NewElement("sup", options...)
}
// Table Creates a new table element
func Table(options ...interface{}) nodes.Element {
return nodes.NewElement("table", options...)
}
// Tbody Creates a new tbody element
func Tbody(options ...interface{}) nodes.Element {
return nodes.NewElement("tbody", options...)
}
// Td Creates a new td element
func Td(options ...interface{}) nodes.Element {
return nodes.NewElement("td", options...)
}
// Template Creates a new template element
func Template(options ...interface{}) nodes.Element {
return nodes.NewElement("template", options...)
}
// Textarea Creates a new textarea element
func Textarea(options ...interface{}) nodes.Element {
return nodes.NewElement("textarea", options...)
}
// Tfoot Creates a new tfoot element
func Tfoot(options ...interface{}) nodes.Element {
return nodes.NewElement("tfoot", options...)
}
// Th Creates a new th element
func Th(options ...interface{}) nodes.Element {
return nodes.NewElement("th", options...)
}
// Thead Creates a new thead element
func Thead(options ...interface{}) nodes.Element {
return nodes.NewElement("thead", options...)
}
// Time Creates a new time element
func Time(options ...interface{}) nodes.Element {
return nodes.NewElement("time", options...)
}
// Title Creates a new title element
func Title(options ...interface{}) nodes.Element {
return nodes.NewElement("title", options...)
}
// Tr Creates a new tr element
func Tr(options ...interface{}) nodes.Element {
return nodes.NewElement("tr", options...)
}
// Track Creates a new track element
func Track(options ...interface{}) nodes.Element {
return nodes.NewElement("track", options...)
}
// Tt Creates a new tt element
func Tt(options ...interface{}) nodes.Element {
return nodes.NewElement("tt", options...)
}
// U Creates a new u element
func U(options ...interface{}) nodes.Element {
return nodes.NewElement("u", options...)
}
// Ul Creates a new ul element
func Ul(options ...interface{}) nodes.Element {
return nodes.NewElement("ul", options...)
}
// Var Creates a new var element
func Var(options ...interface{}) nodes.Element {
return nodes.NewElement("var", options...)
}
// Video Creates a new video element
func Video(options ...interface{}) nodes.Element {
return nodes.NewElement("video", options...)
}
// Wbr Creates a new wbr element
func Wbr(options ...interface{}) nodes.Element {
return nodes.NewElement("wbr", options...)
}
// Wbra Creates a new wbra element
func Wbra(options ...interface{}) nodes.Element {
return nodes.NewElement("wbra", options...)
}
// Xmp Creates a new xmp element
func Xmp(options ...interface{}) nodes.Element {
return nodes.NewElement("xmp", options...)
} | lazyview/html/autotags.go | 0.844281 | 0.436562 | autotags.go | starcoder |
package slicex
import (
"constraints"
"errors"
"github.com/hsiafan/glow/v2/container/optional"
"sort"
)
// Copy shallow copy a slice.
// copy can also trim slice underlying array to it's len.
func Copy[T any](s []T) []T {
t := make([]T, len(s))
copy(t, s)
return t
}
// InvalidIndexErr slice index invalid
var InvalidIndexErr = errors.New("invalid index")
// Insert inserts new value at the position of slice.
// The index value must in range [0, len(slice)]
func Insert[T any](slice []T, index int, value T) []T {
if index < 0 || index > len(slice) {
panic(InvalidIndexErr)
}
if len(slice) == index {
return append(slice, value)
}
slice = append(slice[:index+1], slice[index:]...)
slice[index] = value
return slice
}
// RemoveAt remove the value at the position of slice
// The index value must in range [0, len(slice)-1]
func RemoveAt[T any](slice []T, index int) []T {
if index < 0 || index >= len(slice) {
panic(InvalidIndexErr)
}
return append(slice[:index], slice[index+1:]...)
}
// RemoveBy remove the value match the given condition predicate.
func RemoveBy[T any](slice []T, predicate func(value T) bool) []T {
var newIdx = 0
for idx, v := range slice {
if !predicate(v) {
if idx != newIdx {
slice[newIdx] = slice[idx]
}
newIdx++
}
}
return slice[:newIdx]
}
// Remove removes the value match the given condition predicate.
func Remove[T comparable](slice []T, value T) []T {
var newIdx = 0
for idx, v := range slice {
if v == value {
if idx != newIdx {
slice[newIdx] = slice[idx]
}
newIdx++
}
}
return slice[:newIdx]
}
// Contains return whether slice contains the given value
func Contains[T comparable](s []T, value T) bool {
for _, v := range s {
if v == value {
return true
}
}
return false
}
// Find return the index of value; if not found, return -1
func Find[T comparable](s []T, value T) int {
for idx, v := range s {
if v == value {
return idx
}
}
return -1
}
// FindBy find the value match the predicate, return the index; if not found, return -1
func FindBy[T any](s []T, predicate func(v T) bool) int {
for idx, v := range s {
if predicate(v) {
return idx
}
}
return -1
}
// FindLast return the lat index of lvalue; if not found, return -1
func FindLast[T comparable](s []T, value T) int {
for idx := len(s) - 1; idx >= 0; idx-- {
if s[idx] == value {
return idx
}
}
return -1
}
// Select apply predicate on the origin slice, return a new slice contains the values match the predicate.
func Select[T any](s []T, predicate func(v T) bool) []T {
var result []T
for _, v := range s {
if predicate(v) {
result = append(result, v)
}
}
return result
}
// Partition divide slice into two slice, one contains elements match the predicate, the other contains the mismatched.
func Partition[T any](s []T, predicate func(v T) bool) (matched []T, mismatched []T) {
for _, v := range s {
if predicate(v) {
matched = append(matched, v)
} else {
mismatched = append(mismatched, v)
}
}
return
}
// Convert apply convert func on the origin slice, return a new slice contains the generated value.
func Convert[T any, R any](s []T, convert func(v T) R) []R {
result := make([]R, len(s))
for idx, v := range s {
result[idx] = convert(v)
}
return result
}
type orderedSlice[T constraints.Ordered] []T
func (o orderedSlice[T]) Len() int {
return len(o)
}
func (o orderedSlice[T]) Less(i, j int) bool {
return o[i] < o[j]
}
func (o orderedSlice[T]) Swap(i, j int) {
o[i], o[j] = o[j], o[i]
}
// Sort sorts the slice in place
func Sort[T constraints.Ordered](s []T) {
sort.Sort(orderedSlice[T](s))
}
// SortStable sorts the slice in place, with stable sort algorithm
func SortStable[T constraints.Ordered](s []T) {
sort.Stable(orderedSlice[T](s))
}
type orderedSliceFunc[T any] struct {
slice []T
less func(v1, v2 T) bool
}
func (o orderedSliceFunc[T]) Len() int {
return len(o.slice)
}
func (o orderedSliceFunc[T]) Less(i, j int) bool {
return o.less(o.slice[i], o.slice[j])
}
func (o orderedSliceFunc[T]) Swap(i, j int) {
o.slice[i], o.slice[j] = o.slice[j], o.slice[i]
}
// SortBy sorts the slice in place, a less function is specified to compare slice values.
func SortBy[T any](s []T, less func(v1, v2 T) bool) {
sort.Sort(orderedSliceFunc[T]{
slice: s,
less: less,
})
}
// SortStableBy sorts the slice in place, with stable sort algorithm. A less function is specified to compare slice values.
func SortStableBy[T any](s []T, less func(v1, v2 T) bool) {
sort.Stable(orderedSliceFunc[T]{
slice: s,
less: less,
})
}
// Max returns an Optional contains the max value in the slice. If slice is empty, return empty Optional.
func Max[T constraints.Ordered](s []T) optional.Optional[T] {
if len(s) == 0 {
return optional.Empty[T]()
}
max := s[0]
for _, v := range s[1:] {
if max < v {
max = v
}
}
return optional.OfValue(max)
}
// MaxBy returns an Optional contains the max value in the slice. If slice is empty, return empty Optional.
// The values are compared by less func.
func MaxBy[T any](s []T, less func(v1, v2 T) bool) optional.Optional[T] {
if len(s) == 0 {
return optional.Empty[T]()
}
max := s[0]
for _, v := range s[1:] {
if less(max, v) {
max = v
}
}
return optional.OfValue(max)
}
// Min returns an Optional contains the min value in the slice. If slice is empty, return empty Optional.
func Min[T constraints.Ordered](s []T) optional.Optional[T] {
if len(s) == 0 {
return optional.Empty[T]()
}
min := s[0]
for _, v := range s[1:] {
if min > v {
min = v
}
}
return optional.OfValue(min)
}
// MinBy returns an Optional contains the min value in the slice. If slice is empty, return empty Optional.
// The values are compared by less func.
func MinBy[T any](s []T, less func(v1, v2 T) bool) optional.Optional[T] {
if len(s) == 0 {
return optional.Empty[T]()
}
min := s[0]
for _, v := range s[1:] {
if less(v, min) {
min = v
}
}
return optional.OfValue(min)
}
// Reduce returns max value in the slice. If slice is empty, return zero value.
func Reduce[T any, R any](s []T, initial R, reducer func(current R, value T) R) R {
result := initial
for _, v := range s {
result = reducer(result, v)
}
return result
} | container/slicex/slice_utils.go | 0.840226 | 0.510313 | slice_utils.go | starcoder |
package main
import (
"fmt"
"io"
"os"
)
type point struct{ x, y int }
func find(piece byte, board [8][8]byte) point {
for i, vi := range board {
for j, vj := range vi {
if vj == piece {
return point{i, j}
}
}
}
return point{}
}
func inBoard(x, y int) bool { return x >= 0 && x <= 7 && y >= 0 && y <= 7 }
func checkForPieces(king point, board [8][8]byte, pieces []byte, directions [4][2]int) bool {
for _, direction := range directions {
x, y := king.x, king.y
for {
x += direction[0]
y += direction[1]
if !inBoard(x, y) {
break
}
if board[x][y] != '.' {
for _, piece := range pieces {
if board[x][y] == piece {
return true
}
}
break
}
}
}
return false
}
func checkBishopAndQueen(king point, board [8][8]byte, pieces []byte) bool {
directions := [4][2]int{{1, -1}, {1, 1}, {-1, 1}, {-1, -1}}
return checkForPieces(king, board, pieces, directions)
}
func checkRookAndQueen(king point, board [8][8]byte, pieces []byte) bool {
directions := [4][2]int{{0, -1}, {1, 0}, {0, 1}, {-1, 0}}
return checkForPieces(king, board, pieces, directions)
}
func checkForPiece(king point, board [8][8]byte, piece byte, directions [][2]int) bool {
for _, direction := range directions {
x := king.x + direction[0]
y := king.y + direction[1]
if inBoard(x, y) && board[x][y] == piece {
return true
}
}
return false
}
func checkPawn(king point, board [8][8]byte, direction int, piece byte) bool {
directions := [][2]int{{-1, direction}, {1, direction}}
return checkForPiece(king, board, piece, directions)
}
func checkKnight(king point, board [8][8]byte, piece byte) bool {
directions := [][2]int{{1, -2}, {2, -1}, {2, 1}, {1, 2}, {-1, 2}, {-2, 1}, {-2, -1}, {-1, -2}}
return checkForPiece(king, board, piece, directions)
}
func checkWhite(board [8][8]byte) bool {
king := find('K', board)
return checkRookAndQueen(king, board, []byte{'r', 'q'}) ||
checkBishopAndQueen(king, board, []byte{'b', 'q'}) ||
checkPawn(king, board, -1, 'p') ||
checkKnight(king, board, 'n')
}
func checkBlack(board [8][8]byte) bool {
king := find('k', board)
return checkRookAndQueen(king, board, []byte{'R', 'Q'}) ||
checkBishopAndQueen(king, board, []byte{'B', 'Q'}) ||
checkPawn(king, board, 1, 'P') ||
checkKnight(king, board, 'N')
}
func check(out io.Writer, kase int, board [8][8]byte) {
fmt.Fprintf(out, "Game #%d: ", kase)
switch {
case checkWhite(board):
fmt.Fprintln(out, "white king is in check.")
case checkBlack(board):
fmt.Fprintln(out, "black king is in check.")
default:
fmt.Fprintln(out, "no king is in check.")
}
}
func main() {
in, _ := os.Open("10196.in")
defer in.Close()
out, _ := os.Create("10196.out")
defer out.Close()
var line string
var board [8][8]byte
for kase := 1; ; kase++ {
done := true
for i := range board {
fmt.Fscanf(in, "%s", &line)
for j := range board[i] {
board[i][j] = line[j]
if line[j] != '.' {
done = false
}
}
}
if done {
break
}
fmt.Fscanln(in)
check(out, kase, board)
}
} | 10196/10196.go | 0.525125 | 0.414603 | 10196.go | starcoder |
package main
import (
rl "github.com/chunqian/go-raylib/raylib"
"runtime"
)
const (
FOVY_PERSPECTIVE = 45.0
WIDTH_ORTHOGRAPHIC = 10.0
)
func init() {
runtime.LockOSThread()
}
func main() {
screenWidth := int32(800)
screenHeight := int32(450)
rl.InitWindow(screenWidth, screenHeight, "raylib [models] example - geometric shapes")
defer rl.CloseWindow()
camera := rl.NewCamera(
rl.NewVector3(0, 10.0, 10.0),
rl.NewVector3(0, 0, 0),
rl.NewVector3(0, 1.0, 0),
FOVY_PERSPECTIVE,
int32(rl.CAMERA_PERSPECTIVE),
)
rl.SetTargetFPS(60)
for !rl.WindowShouldClose() {
if rl.IsKeyPressed(int32(rl.KEY_SPACE)) {
if camera.Type == int32(rl.CAMERA_PERSPECTIVE) {
camera.Fovy = WIDTH_ORTHOGRAPHIC
camera.Type = int32(rl.CAMERA_ORTHOGRAPHIC)
} else {
camera.Fovy = FOVY_PERSPECTIVE
camera.Type = int32(rl.CAMERA_PERSPECTIVE)
}
}
rl.BeginDrawing()
rl.ClearBackground(rl.RayWhite)
rl.BeginMode3D(rl.Camera3D(camera))
rl.DrawCube(rl.NewVector3(-4, 0, 2), 2, 5, 2, rl.Red)
rl.DrawCubeWires(rl.NewVector3(-4, 0, 2), 2, 5, 2, rl.Gold)
rl.DrawCubeWires(rl.NewVector3(-4, 0, -2), 3, 6, 2, rl.Maroon)
rl.DrawSphere(rl.NewVector3(-1, 0, -2), 1, rl.Green)
rl.DrawSphereWires(rl.NewVector3(1, 0, 2), 2, 16, 16, rl.Lime)
rl.DrawCylinder(rl.NewVector3(4, 0, -2), 1, 2, 3, 4, rl.SkyBlue)
rl.DrawCylinderWires(rl.NewVector3(4, 0, -2), 1, 2, 3, 4, rl.DarkBlue)
rl.DrawCylinderWires(rl.NewVector3(4.5, -1, 2), 1, 1, 2, 6, rl.Brown)
rl.DrawCylinder(rl.NewVector3(1, 0, -4), 0, 1.5, 3, 8, rl.Gold)
rl.DrawCylinderWires(rl.NewVector3(1, 0, -4), 0, 1.5, 3, 8, rl.Pink)
rl.DrawGrid(10, 1)
rl.EndMode3D()
rl.DrawText("Press Spacebar to switch camera type", 10, rl.GetScreenHeight()-30, 20, rl.DarkGray)
if camera.Type == int32(rl.CAMERA_ORTHOGRAPHIC) {
rl.DrawText("ORTHOGRAPHIC", 10, 40, 20, rl.Black)
} else if camera.Type == int32(rl.CAMERA_PERSPECTIVE) {
rl.DrawText("PERSPECTIVE", 10, 40, 20, rl.Black)
}
rl.DrawFPS(10, 10)
rl.EndDrawing()
}
} | examples/models/orthographic_projection/orthographic_projection.go | 0.569853 | 0.415077 | orthographic_projection.go | starcoder |
package ai2048
func emptySquares(b *Board) float64 {
return float64(16 - b.TileCount)
}
var boardIndices [4][4]int = [4][4]int{
{2, 1, 1, 2},
{1, 0, 0, 1},
{1, 0, 0, 1},
{2, 1, 1, 2},
}
func tilePlacement(b *Board, weights []float64) float64 {
sum := 0.0
k := 0
for i := 0; i != 4; i++ {
for j := 0; j != 4; j++ {
sum += weights[boardIndices[i][j]] * float64(b.Vals[i][j])
k++
}
}
return sum
}
func goodNeighbours(b *Board, weights []float64) float64 {
scorePair := func(a, b int) float64 {
if a == 0 || b == 0 {
return weights[0] * float64(a + b)
}
ratio := a / b + b / a // one of these will be zero, the other will be max / min
if ratio == 1 {
return weights[1] * float64(a + b)
} else if ratio == 2 {
return weights[2] * float64(a + b)
} else if ratio == 4 {
return weights[3] * float64(a + b)
} else if ratio == 8 {
return weights[4] * float64(a + b)
} else if ratio == 16 {
return weights[5] * float64(a + b)
} else if ratio == 32 {
return weights[6] * float64(a + b)
}
return weights[7] * float64(a + b)
}
sum := 0.0
for i := 0; i != 3; i++ {
for j := 0; j != 4; j++ {
sum += scorePair(b.Vals[i][j], b.Vals[i + 1][j])
sum += scorePair(b.Vals[j][i], b.Vals[j][i + 1])
}
}
return sum
}
func monotonicity(b *Board, weights []float64) float64 {
scoreLine := func(a, b, c, d int) float64 {
if (a >= b && b >= c && c >= d) || (a <= b && b <= c && c <= d) {
return (weights[0] + weights[1] * float64(a + b + c + d))
}
return 0
}
sum := 0.0
for i := 0; i != 4; i++ {
sum += scoreLine(b.Vals[i][0], b.Vals[i][1], b.Vals[i][2], b.Vals[i][3])
sum += scoreLine(b.Vals[0][i], b.Vals[1][i], b.Vals[2][i], b.Vals[3][i])
}
return sum
}
func biggestBlockInCorner(b *Board) float64 {
biggest := -1
for i := 0; i != 4; i++ {
for j := 0; j != 4; j++ {
if b.Vals[i][j] > biggest {
biggest = b.Vals[i][j]
}
}
}
if b.Vals[0][0] == biggest || b.Vals[0][3] == biggest || b.Vals[3][0] == biggest || b.Vals[3][3] == biggest {
return float64(biggest)
}
return float64(0.0)
} | ai2048/metrics.go | 0.865096 | 0.537648 | metrics.go | starcoder |
package ast
import (
"github.com/zoncoen/scenarigo/template/token"
)
// All node types implement the Node interface.
type Node interface {
Pos() int
}
// All expression nodes implement the Expr interface.
type Expr interface {
Node
exprNode()
}
type (
// BadExpr node is a placeholder for expressions containing syntax errors.
BadExpr struct {
ValuePos int
Kind token.Token
Value string
}
// BinaryExpr node represents a binary expression.
BinaryExpr struct {
X Expr
OpPos int
Op token.Token
Y Expr
}
// BasicLit node represents a literal of basic type.
BasicLit struct {
ValuePos int
Kind token.Token
Value string
}
// ParameterExpr node represents a parameter of template.
ParameterExpr struct {
Ldbrace int
X Expr
Rdbrace int
Quoted bool
}
// Ident node represents an identifier.
Ident struct {
NamePos int
Name string
}
// SelectorExpr node represents an expression followed by a selector.
SelectorExpr struct {
X Expr
Sel *Ident
}
// IndexExpr node represents an expression followed by an index.
IndexExpr struct {
X Expr
Lbrack int
Index Expr
Rbrack int
}
// A CallExpr node represents an expression followed by an argument list.
CallExpr struct {
Fun Expr
Lparen int
Args []Expr
Rparen int
}
// A LeftArrowExpr node represents an expression followed by an argument.
LeftArrowExpr struct {
Fun Expr
Larrow int
Rdbrace int
Arg Expr
}
)
// Pos implements Node.
func (e *BadExpr) Pos() int { return e.ValuePos }
func (e *BinaryExpr) Pos() int { return e.OpPos }
func (e *BasicLit) Pos() int { return e.ValuePos }
func (e *ParameterExpr) Pos() int { return e.Ldbrace }
func (e *Ident) Pos() int { return e.NamePos }
func (e *SelectorExpr) Pos() int { return e.Sel.Pos() }
func (e *IndexExpr) Pos() int { return e.Lbrack }
func (e *CallExpr) Pos() int { return e.Lparen }
func (e *LeftArrowExpr) Pos() int { return e.Larrow }
// exprNode implements Expr.
func (e *BadExpr) exprNode() {}
func (e *BinaryExpr) exprNode() {}
func (e *BasicLit) exprNode() {}
func (e *ParameterExpr) exprNode() {}
func (e *Ident) exprNode() {}
func (e *SelectorExpr) exprNode() {}
func (e *IndexExpr) exprNode() {}
func (e *LeftArrowExpr) exprNode() {}
func (e *CallExpr) exprNode() {} | template/ast/ast.go | 0.574037 | 0.46035 | ast.go | starcoder |
package goja
import (
"gonum.org/v1/gonum/stat"
"math"
"math/bits"
"sort"
)
func (r *Runtime) math_abs(call FunctionCall) Value {
return floatToValue(math.Abs(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_acos(call FunctionCall) Value {
return floatToValue(math.Acos(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_acosh(call FunctionCall) Value {
return floatToValue(math.Acosh(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_asin(call FunctionCall) Value {
return floatToValue(math.Asin(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_asinh(call FunctionCall) Value {
return floatToValue(math.Asinh(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_atan(call FunctionCall) Value {
return floatToValue(math.Atan(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_atanh(call FunctionCall) Value {
return floatToValue(math.Atanh(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_atan2(call FunctionCall) Value {
y := call.Argument(0).ToFloat()
x := call.Argument(1).ToFloat()
return floatToValue(math.Atan2(y, x))
}
func (r *Runtime) math_cbrt(call FunctionCall) Value {
return floatToValue(math.Cbrt(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_ceil(call FunctionCall) Value {
return floatToValue(math.Ceil(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_clz32(call FunctionCall) Value {
return intToValue(int64(bits.LeadingZeros32(toUint32(call.Argument(0)))))
}
func (r *Runtime) math_cos(call FunctionCall) Value {
return floatToValue(math.Cos(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_cosh(call FunctionCall) Value {
return floatToValue(math.Cosh(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_exp(call FunctionCall) Value {
return floatToValue(math.Exp(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_expm1(call FunctionCall) Value {
return floatToValue(math.Expm1(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_floor(call FunctionCall) Value {
return floatToValue(math.Floor(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_fround(call FunctionCall) Value {
return floatToValue(float64(float32(call.Argument(0).ToFloat())))
}
func (r *Runtime) math_hypot(call FunctionCall) Value {
var max float64
var hasNaN bool
absValues := make([]float64, 0, len(call.Arguments))
for _, v := range call.Arguments {
arg := nilSafe(v).ToFloat()
if math.IsNaN(arg) {
hasNaN = true
} else {
abs := math.Abs(arg)
if abs > max {
max = abs
}
absValues = append(absValues, abs)
}
}
if math.IsInf(max, 1) {
return _positiveInf
}
if hasNaN {
return _NaN
}
if max == 0 {
return _positiveZero
}
// Kahan summation to avoid rounding errors.
// Normalize the numbers to the largest one to avoid overflow.
var sum, compensation float64
for _, n := range absValues {
n /= max
summand := n*n - compensation
preliminary := sum + summand
compensation = (preliminary - sum) - summand
sum = preliminary
}
return floatToValue(math.Sqrt(sum) * max)
}
func (r *Runtime) math_imul(call FunctionCall) Value {
x := toUint32(call.Argument(0))
y := toUint32(call.Argument(1))
return intToValue(int64(int32(x * y)))
}
func (r *Runtime) math_log(call FunctionCall) Value {
return floatToValue(math.Log(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_log1p(call FunctionCall) Value {
return floatToValue(math.Log1p(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_log10(call FunctionCall) Value {
return floatToValue(math.Log10(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_log2(call FunctionCall) Value {
return floatToValue(math.Log2(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_max(call FunctionCall) Value {
result := math.Inf(-1)
args := call.Arguments
for i, arg := range args {
n := nilSafe(arg).ToFloat()
if math.IsNaN(n) {
args = args[i+1:]
goto NaNLoop
}
result = math.Max(result, n)
}
return floatToValue(result)
NaNLoop:
// All arguments still need to be coerced to number according to the specs.
for _, arg := range args {
nilSafe(arg).ToFloat()
}
return _NaN
}
func (r *Runtime) math_min(call FunctionCall) Value {
result := math.Inf(1)
args := call.Arguments
for i, arg := range args {
n := nilSafe(arg).ToFloat()
if math.IsNaN(n) {
args = args[i+1:]
goto NaNLoop
}
result = math.Min(result, n)
}
return floatToValue(result)
NaNLoop:
// All arguments still need to be coerced to number according to the specs.
for _, arg := range args {
nilSafe(arg).ToFloat()
}
return _NaN
}
func (r *Runtime) math_pow(call FunctionCall) Value {
x := call.Argument(0)
y := call.Argument(1)
if x, ok := x.(valueInt); ok {
if y, ok := y.(valueInt); ok && y >= 0 && y < 64 {
if y == 0 {
return intToValue(1)
}
if x == 0 {
return intToValue(0)
}
ip := ipow(int64(x), int64(y))
if ip != 0 {
return intToValue(ip)
}
}
}
xf := x.ToFloat()
yf := y.ToFloat()
if math.Abs(xf) == 1 && math.IsInf(yf, 0) {
return _NaN
}
if xf == 1 && math.IsNaN(yf) {
return _NaN
}
return floatToValue(math.Pow(xf, yf))
}
func (r *Runtime) math_random(call FunctionCall) Value {
return floatToValue(r.rand())
}
func (r *Runtime) math_round(call FunctionCall) Value {
f := call.Argument(0).ToFloat()
if math.IsNaN(f) {
return _NaN
}
if f == 0 && math.Signbit(f) {
return _negativeZero
}
t := math.Trunc(f)
if f >= 0 {
if f-t >= 0.5 {
return floatToValue(t + 1)
}
} else {
if t-f > 0.5 {
return floatToValue(t - 1)
}
}
return floatToValue(t)
}
func (r *Runtime) math_sign(call FunctionCall) Value {
arg := call.Argument(0)
num := arg.ToFloat()
if math.IsNaN(num) || num == 0 { // this will match -0 too
return arg
}
if num > 0 {
return intToValue(1)
}
return intToValue(-1)
}
func (r *Runtime) math_sin(call FunctionCall) Value {
return floatToValue(math.Sin(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_sinh(call FunctionCall) Value {
return floatToValue(math.Sinh(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_sqrt(call FunctionCall) Value {
return floatToValue(math.Sqrt(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_tan(call FunctionCall) Value {
return floatToValue(math.Tan(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_tanh(call FunctionCall) Value {
return floatToValue(math.Tanh(call.Argument(0).ToFloat()))
}
func (r *Runtime) math_trunc(call FunctionCall) Value {
arg := call.Argument(0)
if i, ok := arg.(valueInt); ok {
return i
}
return floatToValue(math.Trunc(arg.ToFloat()))
}
func (r *Runtime) getFloat64Slice(call FunctionCall) (result []float64) {
result = nil
var args []Value = nil
if o, ok := call.Argument(0).(*Object); ok {
if a, ok := o.self.(*arrayObject); ok {
args = a.values
}
} else {
args = call.Arguments
}
if args == nil || len(args) == 0 {
return nil
}
for _, arg := range args {
n := nilSafe(arg).ToFloat()
if !math.IsNaN(n) {
result = append(result, n)
}
}
return result
}
func (r *Runtime) math_mean(call FunctionCall) Value {
floatList := r.getFloat64Slice(call)
if floatList != nil && len(floatList) > 0 {
return floatToValue(stat.Mean(floatList, nil))
} else {
return _NaN
}
}
func (r *Runtime) math_median(call FunctionCall) Value {
floatList := r.getFloat64Slice(call)
if floatList != nil && len(floatList) > 0 {
sort.Float64s(floatList)
return floatToValue(stat.Quantile(0.5, stat.Empirical, floatList, nil))
} else {
return _NaN
}
}
func (r *Runtime) math_var(call FunctionCall) Value {
floatList := r.getFloat64Slice(call)
if floatList != nil && len(floatList) > 0 {
_, ret := stat.PopMeanVariance(floatList, nil)
return floatToValue(ret)
} else {
return _NaN
}
}
func (r *Runtime) math_std(call FunctionCall) Value {
floatList := r.getFloat64Slice(call)
if floatList != nil && len(floatList) > 0 {
_, ret := stat.PopMeanVariance(floatList, nil)
return floatToValue(math.Sqrt(ret))
} else {
return _NaN
}
}
func (r *Runtime) createMath(val *Object) objectImpl {
m := &baseObject{
class: classMath,
val: val,
extensible: true,
prototype: r.global.ObjectPrototype,
}
m.init()
m._putProp("E", valueFloat(math.E), false, false, false)
m._putProp("LN10", valueFloat(math.Ln10), false, false, false)
m._putProp("LN2", valueFloat(math.Ln2), false, false, false)
m._putProp("LOG10E", valueFloat(math.Log10E), false, false, false)
m._putProp("LOG2E", valueFloat(math.Log2E), false, false, false)
m._putProp("PI", valueFloat(math.Pi), false, false, false)
m._putProp("SQRT1_2", valueFloat(sqrt1_2), false, false, false)
m._putProp("SQRT2", valueFloat(math.Sqrt2), false, false, false)
m._putSym(SymToStringTag, valueProp(asciiString(classMath), false, false, true))
m._putProp("abs", r.newNativeFunc(r.math_abs, nil, "abs", nil, 1), true, false, true)
m._putProp("acos", r.newNativeFunc(r.math_acos, nil, "acos", nil, 1), true, false, true)
m._putProp("acosh", r.newNativeFunc(r.math_acosh, nil, "acosh", nil, 1), true, false, true)
m._putProp("asin", r.newNativeFunc(r.math_asin, nil, "asin", nil, 1), true, false, true)
m._putProp("asinh", r.newNativeFunc(r.math_asinh, nil, "asinh", nil, 1), true, false, true)
m._putProp("atan", r.newNativeFunc(r.math_atan, nil, "atan", nil, 1), true, false, true)
m._putProp("atanh", r.newNativeFunc(r.math_atanh, nil, "atanh", nil, 1), true, false, true)
m._putProp("atan2", r.newNativeFunc(r.math_atan2, nil, "atan2", nil, 2), true, false, true)
m._putProp("cbrt", r.newNativeFunc(r.math_cbrt, nil, "cbrt", nil, 1), true, false, true)
m._putProp("ceil", r.newNativeFunc(r.math_ceil, nil, "ceil", nil, 1), true, false, true)
m._putProp("clz32", r.newNativeFunc(r.math_clz32, nil, "clz32", nil, 1), true, false, true)
m._putProp("cos", r.newNativeFunc(r.math_cos, nil, "cos", nil, 1), true, false, true)
m._putProp("cosh", r.newNativeFunc(r.math_cosh, nil, "cosh", nil, 1), true, false, true)
m._putProp("exp", r.newNativeFunc(r.math_exp, nil, "exp", nil, 1), true, false, true)
m._putProp("expm1", r.newNativeFunc(r.math_expm1, nil, "expm1", nil, 1), true, false, true)
m._putProp("floor", r.newNativeFunc(r.math_floor, nil, "floor", nil, 1), true, false, true)
m._putProp("fround", r.newNativeFunc(r.math_fround, nil, "fround", nil, 1), true, false, true)
m._putProp("hypot", r.newNativeFunc(r.math_hypot, nil, "hypot", nil, 2), true, false, true)
m._putProp("imul", r.newNativeFunc(r.math_imul, nil, "imul", nil, 2), true, false, true)
m._putProp("log", r.newNativeFunc(r.math_log, nil, "log", nil, 1), true, false, true)
m._putProp("log1p", r.newNativeFunc(r.math_log1p, nil, "log1p", nil, 1), true, false, true)
m._putProp("log10", r.newNativeFunc(r.math_log10, nil, "log10", nil, 1), true, false, true)
m._putProp("log2", r.newNativeFunc(r.math_log2, nil, "log2", nil, 1), true, false, true)
m._putProp("max", r.newNativeFunc(r.math_max, nil, "max", nil, 2), true, false, true)
m._putProp("min", r.newNativeFunc(r.math_min, nil, "min", nil, 2), true, false, true)
m._putProp("pow", r.newNativeFunc(r.math_pow, nil, "pow", nil, 2), true, false, true)
m._putProp("random", r.newNativeFunc(r.math_random, nil, "random", nil, 0), true, false, true)
m._putProp("round", r.newNativeFunc(r.math_round, nil, "round", nil, 1), true, false, true)
m._putProp("sign", r.newNativeFunc(r.math_sign, nil, "sign", nil, 1), true, false, true)
m._putProp("sin", r.newNativeFunc(r.math_sin, nil, "sin", nil, 1), true, false, true)
m._putProp("sinh", r.newNativeFunc(r.math_sinh, nil, "sinh", nil, 1), true, false, true)
m._putProp("sqrt", r.newNativeFunc(r.math_sqrt, nil, "sqrt", nil, 1), true, false, true)
m._putProp("tan", r.newNativeFunc(r.math_tan, nil, "tan", nil, 1), true, false, true)
m._putProp("tanh", r.newNativeFunc(r.math_tanh, nil, "tanh", nil, 1), true, false, true)
m._putProp("trunc", r.newNativeFunc(r.math_trunc, nil, "trunc", nil, 1), true, false, true)
// extend
m._putProp("mean", r.newNativeFunc(r.math_mean, nil, "mean", nil, 0), true, false, true)
m._putProp("median", r.newNativeFunc(r.math_median, nil, "median", nil, 0), true, false, true)
m._putProp("var", r.newNativeFunc(r.math_var, nil, "var", nil, 0), true, false, true)
m._putProp("std", r.newNativeFunc(r.math_std, nil, "std", nil, 0), true, false, true)
return m
}
func (r *Runtime) initMath() {
r.addToGlobal("Math", r.newLazyObject(r.createMath))
} | builtin_math.go | 0.747984 | 0.527317 | builtin_math.go | starcoder |
package pointerAnalysis
import (
"github.com/amit-davidson/Chronos/domain"
"github.com/amit-davidson/Chronos/utils"
"go/token"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
)
// Analysis starts by mapping between positions of the guard accesses (values inside) to the guard accesses themselves.
// Then it analyzes all the values inside values inside, and check if some of the values might alias each other. If so,
// the positions for those values are merged. After all positions were merged, the algorithm runs and check if for a
// given value (identified by a pos in the map) there are two guarded accesses that might conflict - W&W/R&W from two
// different goroutines.
// map1 : A->ga1, ga2, ga3
// B->ga4, ga5, ga6
// C->ga7, ga8, ga9
// D->ga10, ga11, ga12
// Now that we know that B may point to A, we add it to it
// map1 : A->ga1, ga2, ga3, ga4, ga5, ga6
// C->ga7, ga8, ga9
// D->ga10, ga11, ga12
// And if A may point to D, then
// map1 : C->ga7, ga8, ga9
// D->ga10, ga11, ga12, ga1, ga2, ga3, ga4, ga5, ga6
// And then for pos all the guarded accesses are compared to see if data races might exist
func Analysis(pkg *ssa.Package, accesses []*domain.GuardedAccess) ([][]*domain.GuardedAccess, error) {
config := &pointer.Config{
Mains: []*ssa.Package{pkg},
}
positionsToGuardAccesses := map[token.Pos][]*domain.GuardedAccess{}
for _, guardedAccess := range accesses {
if guardedAccess.Pos.IsValid() && pointer.CanPoint(guardedAccess.Value.Type()) {
config.AddQuery(guardedAccess.Value)
// Multiple instructions for the same variable for example write and multiple reads
positionsToGuardAccesses[guardedAccess.Value.Pos()] = append(positionsToGuardAccesses[guardedAccess.Value.Pos()], guardedAccess)
}
}
result, err := pointer.Analyze(config)
if err != nil {
return nil, err // internal error in pointer analysis
}
// Join instructions of variables that may point to each other.
for v, l := range result.Queries {
for _, label := range l.PointsTo().Labels() {
allocPos := label.Value().Pos()
queryPos := v.Pos()
if allocPos == queryPos {
continue
}
positionsToGuardAccesses[allocPos] = append(positionsToGuardAccesses[allocPos], positionsToGuardAccesses[queryPos]...)
}
}
conflictingGA := make([][]*domain.GuardedAccess, 0)
for _, guardedAccesses := range positionsToGuardAccesses {
for _, guardedAccessA := range guardedAccesses {
for _, guardedAccessB := range guardedAccesses {
if guardedAccessA.IsConflicting(guardedAccessB) {
conflictingGA = append(conflictingGA, []*domain.GuardedAccess{guardedAccessA, guardedAccessB})
}
}
}
}
return conflictingGA, nil
}
func FilterDuplicates(conflictingGAs [][]*domain.GuardedAccess) [][]*domain.GuardedAccess {
foundDataRaces := utils.NewDoubleKeyMap() // To avoid reporting on the same pair of positions more then once. Can happen if for the same place we read and then write.
nonDuplicatesGAs := make([][]*domain.GuardedAccess, 0)
for _, conflict := range conflictingGAs {
isExist := foundDataRaces.IsExist(conflict[0].Pos, conflict[1].Pos)
if !isExist {
foundDataRaces.Add(conflict[0].Pos, conflict[1].Pos)
nonDuplicatesGAs = append(nonDuplicatesGAs, conflict)
}
}
return nonDuplicatesGAs
} | pointerAnalysis/PointerAnalysis.go | 0.532182 | 0.430626 | PointerAnalysis.go | starcoder |
package gfx
import (
"sync"
"unsafe"
"github.com/go-gl/gl/v4.5-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
const (
// MaximumPointLights is the maximum number of lights that the pointlight system is prepared to handle.
MaximumPointLights = 1024
)
var (
// PointLights are the current pointlights in the scene.
PointLights [MaximumPointLights]PointLight
numPointLights = uint32(0)
nextPointLightIndex = uint32(0)
mu sync.Mutex
lightBuffer, visibleLightIndicesBuffer uint32
)
// PointLight represents all of the data about a PointLight.
type PointLight struct {
Color mgl32.Vec3
Intensity float32
Position mgl32.Vec3
Radius float32
}
// VisibleIndex is a wrapper around an index.
type VisibleIndex struct {
index int32
}
// InitPointLights sets up buffer space for light culling calculations and storage.
func InitPointLights() {
AddPointLight(mgl32.Vec3{0, 12, 0}, mgl32.Vec3{1, 0, 0}, 1.0, 10.0)
AddPointLight(mgl32.Vec3{36, 12, 0}, mgl32.Vec3{0, 1, 0}, 1.0, 10.0)
AddPointLight(mgl32.Vec3{0, 12, 36}, mgl32.Vec3{0, 0, 1}, 1.0, 10.0)
AddPointLight(mgl32.Vec3{36, 12, 36}, mgl32.Vec3{1, 1, 0}, 1.0, 10.0)
// Prepare light buffers
gl.GenBuffers(1, &lightBuffer)
gl.GenBuffers(1, &visibleLightIndicesBuffer)
// Bind light buffer
gl.BindBuffer(gl.SHADER_STORAGE_BUFFER, lightBuffer)
gl.BufferData(gl.SHADER_STORAGE_BUFFER, MaximumPointLights*int(unsafe.Sizeof(&PointLight{})), unsafe.Pointer(&PointLights), gl.DYNAMIC_DRAW)
// Bind visible light indices buffer
gl.BindBuffer(gl.SHADER_STORAGE_BUFFER, visibleLightIndicesBuffer)
gl.BufferData(gl.SHADER_STORAGE_BUFFER, int(getTotalNumTiles())*int(unsafe.Sizeof(&VisibleIndex{}))*MaximumPointLights, nil, gl.STATIC_DRAW)
// Unbind for safety.
gl.BindBuffer(gl.SHADER_STORAGE_BUFFER, 0)
}
// GetNumPointLights returns the number of PointLights that are currently in the scene.
func GetNumPointLights() uint32 {
return numPointLights
}
// AddPointLight adds a PointLight to the scene with the given attributes.
func AddPointLight(position, color mgl32.Vec3, intensity, radius float32) {
mu.Lock()
PointLights[nextPointLightIndex].Color = color
PointLights[nextPointLightIndex].Intensity = intensity
PointLights[nextPointLightIndex].Position = position
PointLights[nextPointLightIndex].Radius = radius
numPointLights++
nextPointLightIndex++
if numPointLights >= MaximumPointLights {
numPointLights = MaximumPointLights - 1
}
if nextPointLightIndex >= MaximumPointLights {
nextPointLightIndex = 0
}
mu.Unlock()
gl.BindBuffer(gl.SHADER_STORAGE_BUFFER, lightBuffer)
gl.BufferData(gl.SHADER_STORAGE_BUFFER, MaximumPointLights*int(unsafe.Sizeof(&PointLight{})), unsafe.Pointer(&PointLights), gl.DYNAMIC_DRAW)
gl.BindBuffer(gl.SHADER_STORAGE_BUFFER, 0)
}
// GetPointLightBuffer retrieves the private lightBuffer variable.
func GetPointLightBuffer() uint32 {
return lightBuffer
}
// GetPointLightVisibleLightIndicesBuffer retrieves the private visibleLightIndicesBuffer variable.
func GetPointLightVisibleLightIndicesBuffer() uint32 {
return visibleLightIndicesBuffer
} | gfx/pointlights.go | 0.822902 | 0.425307 | pointlights.go | starcoder |
package wm
import (
"github.com/cznic/interval"
"github.com/cznic/mathutil"
)
// Position represents 2D coordinates.
type Position struct {
X, Y int
}
func (p Position) add(q Position) Position { return Position{p.X + q.X, p.Y + q.Y} }
func (p Position) sub(q Position) Position { return Position{p.X - q.X, p.Y - q.Y} }
// In returns whether p is inside r.
func (p Position) In(r Rectangle) bool { return r.Has(p) }
// Rectangle represents a 2D area.
type Rectangle struct {
Position
Size
}
// NewRectangle returns a Rectangle from 4 coordinates.
func NewRectangle(x1, y1, x2, y2 int) Rectangle {
if x1 > x2 {
x1, x2 = x2, x1
}
if y1 > y2 {
y1, y2 = y2, y1
}
return Rectangle{Position{x1, y1}, Size{x2 - x1 + 1, y2 - y1 + 1}}
}
// Clip sets r to the intersection of r and s and returns a boolean value indicating
// whether the result is of non zero size.
func (r *Rectangle) Clip(s Rectangle) bool {
a := interval.Int{Cls: interval.LeftClosed, A: r.X, B: r.X + r.Width}
b := interval.Int{Cls: interval.LeftClosed, A: s.X, B: s.X + s.Width}
h0 := interval.Intersection(&a, &b)
if h0.Class() == interval.Empty {
return false
}
a = interval.Int{Cls: interval.LeftClosed, A: r.Y, B: r.Y + r.Height}
b = interval.Int{Cls: interval.LeftClosed, A: s.Y, B: s.Y + s.Height}
v0 := interval.Intersection(&a, &b)
if v0.Class() == interval.Empty {
return false
}
h := h0.(*interval.Int)
v := v0.(*interval.Int)
var y Rectangle
y.X = h.A
y.Y = v.A
y.Width = h.B - h.A
y.Height = v.B - v.A
*r = y
return true
}
func (r *Rectangle) join(s Rectangle) {
if s.IsZero() {
return
}
if r.IsZero() {
*r = s
return
}
x := mathutil.Min(r.X, s.X)
r.Width = mathutil.Max(r.X+r.Width, s.X+s.Width) - x
y := mathutil.Min(r.Y, s.Y)
r.Height = mathutil.Max(r.Y+r.Height, s.Y+s.Height) - y
r.Position = Position{x, y}
}
// Has returns whether r contains p.
func (r *Rectangle) Has(p Position) bool {
return p.X >= r.X && p.X < r.X+r.Width &&
p.Y >= r.Y && p.Y < r.Y+r.Height
}
// Size represents 2D dimensions.
type Size struct {
Width, Height int
}
func newSize(w, h int) Size { return Size{w, h} }
// IsZero returns whether s.Width or s.Height is zero.
func (s *Size) IsZero() bool { return s.Width <= 0 || s.Height <= 0 } | etc.go | 0.918183 | 0.755772 | etc.go | starcoder |
package packed
// Efficient sequential read/write of packed integers.
type BulkOperationPacked10 struct {
*BulkOperationPacked
}
func newBulkOperationPacked10() BulkOperation {
return &BulkOperationPacked10{newBulkOperationPacked(10)}
}
func (op *BulkOperationPacked10) decodeLongToInt(blocks []int64, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i ++ {
block0 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 54)); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 44) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 34) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 24) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 14) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 4) & 1023); valuesOffset++
block1 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(((block0 & 15) << 6) | (int64(uint64(block1) >> 58))); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1) >> 48) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1) >> 38) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1) >> 28) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1) >> 18) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1) >> 8) & 1023); valuesOffset++
block2 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(((block1 & 255) << 2) | (int64(uint64(block2) >> 62))); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2) >> 52) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2) >> 42) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2) >> 32) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2) >> 22) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2) >> 12) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2) >> 2) & 1023); valuesOffset++
block3 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(((block2 & 3) << 8) | (int64(uint64(block3) >> 56))); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3) >> 46) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3) >> 36) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3) >> 26) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3) >> 16) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3) >> 6) & 1023); valuesOffset++
block4 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int32(((block3 & 63) << 4) | (int64(uint64(block4) >> 60))); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4) >> 50) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4) >> 40) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4) >> 30) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4) >> 20) & 1023); valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4) >> 10) & 1023); valuesOffset++
values[valuesOffset] = int32(block4 & 1023); valuesOffset++
}
}
func (op *BulkOperationPacked10) decodeByteToInt(blocks []byte, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i ++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte0) << 2) | int64(uint8(byte1) >> 6))
valuesOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte1 & 63) << 4) | int64(uint8(byte2) >> 4))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte2 & 15) << 6) | int64(uint8(byte3) >> 2))
valuesOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte3 & 3) << 8) | int64(byte4))
valuesOffset++
}
}
func (op *BulkOperationPacked10) decodeLongToLong(blocks []int64, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i ++ {
block0 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = int64(uint64(block0) >> 54); valuesOffset++
values[valuesOffset] = int64(uint64(block0) >> 44) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block0) >> 34) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block0) >> 24) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block0) >> 14) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block0) >> 4) & 1023; valuesOffset++
block1 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = ((block0 & 15) << 6) | (int64(uint64(block1) >> 58)); valuesOffset++
values[valuesOffset] = int64(uint64(block1) >> 48) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block1) >> 38) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block1) >> 28) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block1) >> 18) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block1) >> 8) & 1023; valuesOffset++
block2 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = ((block1 & 255) << 2) | (int64(uint64(block2) >> 62)); valuesOffset++
values[valuesOffset] = int64(uint64(block2) >> 52) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block2) >> 42) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block2) >> 32) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block2) >> 22) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block2) >> 12) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block2) >> 2) & 1023; valuesOffset++
block3 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = ((block2 & 3) << 8) | (int64(uint64(block3) >> 56)); valuesOffset++
values[valuesOffset] = int64(uint64(block3) >> 46) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block3) >> 36) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block3) >> 26) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block3) >> 16) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block3) >> 6) & 1023; valuesOffset++
block4 := blocks[blocksOffset]; blocksOffset++
values[valuesOffset] = ((block3 & 63) << 4) | (int64(uint64(block4) >> 60)); valuesOffset++
values[valuesOffset] = int64(uint64(block4) >> 50) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block4) >> 40) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block4) >> 30) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block4) >> 20) & 1023; valuesOffset++
values[valuesOffset] = int64(uint64(block4) >> 10) & 1023; valuesOffset++
values[valuesOffset] = block4 & 1023; valuesOffset++
}
}
func (op *BulkOperationPacked10) decodeByteToLong(blocks []byte, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i ++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte0) << 2) | int64(uint8(byte1) >> 6))
valuesOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte1 & 63) << 4) | int64(uint8(byte2) >> 4))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte2 & 15) << 6) | int64(uint8(byte3) >> 2))
valuesOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte3 & 3) << 8) | int64(byte4))
valuesOffset++
}
} | vendor/github.com/balzaczyy/golucene/core/util/packed/bulkOperation10.go | 0.560373 | 0.738598 | bulkOperation10.go | starcoder |
package openapi
import (
"encoding/json"
)
// InlineObject2 struct for InlineObject2
type InlineObject2 struct {
// Stromkonto account address of sender
Account *string `json:"account,omitempty"`
// Stromkonto account address of reciever
To *string `json:"to,omitempty"`
// Amount to transfer (in Watthours for electricity, or pcs for trees)
Value *int32 `json:"value,omitempty"`
Variation *string `json:"variation,omitempty"`
// Signature per Stromkonto setting (might be simple email confirmation link)
Signature *string `json:"signature,omitempty"`
}
// NewInlineObject2 instantiates a new InlineObject2 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewInlineObject2() *InlineObject2 {
this := InlineObject2{}
return &this
}
// NewInlineObject2WithDefaults instantiates a new InlineObject2 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewInlineObject2WithDefaults() *InlineObject2 {
this := InlineObject2{}
return &this
}
// GetAccount returns the Account field value if set, zero value otherwise.
func (o *InlineObject2) GetAccount() string {
if o == nil || o.Account == nil {
var ret string
return ret
}
return *o.Account
}
// GetAccountOk returns a tuple with the Account field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *InlineObject2) GetAccountOk() (*string, bool) {
if o == nil || o.Account == nil {
return nil, false
}
return o.Account, true
}
// HasAccount returns a boolean if a field has been set.
func (o *InlineObject2) HasAccount() bool {
if o != nil && o.Account != nil {
return true
}
return false
}
// SetAccount gets a reference to the given string and assigns it to the Account field.
func (o *InlineObject2) SetAccount(v string) {
o.Account = &v
}
// GetTo returns the To field value if set, zero value otherwise.
func (o *InlineObject2) GetTo() string {
if o == nil || o.To == nil {
var ret string
return ret
}
return *o.To
}
// GetToOk returns a tuple with the To field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *InlineObject2) GetToOk() (*string, bool) {
if o == nil || o.To == nil {
return nil, false
}
return o.To, true
}
// HasTo returns a boolean if a field has been set.
func (o *InlineObject2) HasTo() bool {
if o != nil && o.To != nil {
return true
}
return false
}
// SetTo gets a reference to the given string and assigns it to the To field.
func (o *InlineObject2) SetTo(v string) {
o.To = &v
}
// GetValue returns the Value field value if set, zero value otherwise.
func (o *InlineObject2) GetValue() int32 {
if o == nil || o.Value == nil {
var ret int32
return ret
}
return *o.Value
}
// GetValueOk returns a tuple with the Value field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *InlineObject2) GetValueOk() (*int32, bool) {
if o == nil || o.Value == nil {
return nil, false
}
return o.Value, true
}
// HasValue returns a boolean if a field has been set.
func (o *InlineObject2) HasValue() bool {
if o != nil && o.Value != nil {
return true
}
return false
}
// SetValue gets a reference to the given int32 and assigns it to the Value field.
func (o *InlineObject2) SetValue(v int32) {
o.Value = &v
}
// GetVariation returns the Variation field value if set, zero value otherwise.
func (o *InlineObject2) GetVariation() string {
if o == nil || o.Variation == nil {
var ret string
return ret
}
return *o.Variation
}
// GetVariationOk returns a tuple with the Variation field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *InlineObject2) GetVariationOk() (*string, bool) {
if o == nil || o.Variation == nil {
return nil, false
}
return o.Variation, true
}
// HasVariation returns a boolean if a field has been set.
func (o *InlineObject2) HasVariation() bool {
if o != nil && o.Variation != nil {
return true
}
return false
}
// SetVariation gets a reference to the given string and assigns it to the Variation field.
func (o *InlineObject2) SetVariation(v string) {
o.Variation = &v
}
// GetSignature returns the Signature field value if set, zero value otherwise.
func (o *InlineObject2) GetSignature() string {
if o == nil || o.Signature == nil {
var ret string
return ret
}
return *o.Signature
}
// GetSignatureOk returns a tuple with the Signature field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *InlineObject2) GetSignatureOk() (*string, bool) {
if o == nil || o.Signature == nil {
return nil, false
}
return o.Signature, true
}
// HasSignature returns a boolean if a field has been set.
func (o *InlineObject2) HasSignature() bool {
if o != nil && o.Signature != nil {
return true
}
return false
}
// SetSignature gets a reference to the given string and assigns it to the Signature field.
func (o *InlineObject2) SetSignature(v string) {
o.Signature = &v
}
func (o InlineObject2) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Account != nil {
toSerialize["account"] = o.Account
}
if o.To != nil {
toSerialize["to"] = o.To
}
if o.Value != nil {
toSerialize["value"] = o.Value
}
if o.Variation != nil {
toSerialize["variation"] = o.Variation
}
if o.Signature != nil {
toSerialize["signature"] = o.Signature
}
return json.Marshal(toSerialize)
}
type NullableInlineObject2 struct {
value *InlineObject2
isSet bool
}
func (v NullableInlineObject2) Get() *InlineObject2 {
return v.value
}
func (v *NullableInlineObject2) Set(val *InlineObject2) {
v.value = val
v.isSet = true
}
func (v NullableInlineObject2) IsSet() bool {
return v.isSet
}
func (v *NullableInlineObject2) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableInlineObject2(val *InlineObject2) *NullableInlineObject2 {
return &NullableInlineObject2{value: val, isSet: true}
}
func (v NullableInlineObject2) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableInlineObject2) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | out/go/model_inline_object_2.go | 0.763307 | 0.416203 | model_inline_object_2.go | starcoder |
package main
import eb "github.com/hajimehoshi/ebiten"
type coord struct {
x, y float64
}
// Une objet simple, qui a une image associée, des coordonnées, une vitesse (signée),
// et qui peut entrer en collision avec d'autres objets
type object struct {
image *eb.Image
coord
width, height float64
speedX float64
speedY float64
dx float64
dy float64
isColliding bool
}
// retourne les coordonées du coin supérieur gauche et inférieur droit de l'image
func (so *object) bounds() (coord, coord) {
return coord{so.x, so.y}, coord{so.x + so.width, so.y + so.height}
}
// met à jour le déplacement prévu en fonction de la résolution de collision
func (so *object) updateDxDy(dx float64, dy float64) {
so.dx -= getSign(so.dx) * dx
so.dy -= getSign(so.dy) * dy
}
// met à jour la position de l'objet avec le déplacement sur les deux axes
// doit être appelé après la résolution de collisions
func (so *object) updatePos() {
so.x += so.dx
so.y += so.dy
}
type resolution struct {
obj *object
collide bool
collideWorld bool
isVisible bool
collideWith *object
x float64 // dernières coordonnées avant collision
y float64
dx float64 // de combien il faut diminuer/augmenter le déplacement pour arrêter les objets au point de collision
dy float64
otherDx float64
otherDy float64
}
func abs(n float64) float64 {
if n >= 0 {
return n
}
return -n
}
func newResolution(one *object) resolution {
res := resolution{}
res.obj = one
res.collide = false
res.collideWorld = false
res.isVisible = true
res.collideWith = nil
res.x = 0
res.y = 0
res.dx = 0
res.dy = 0
return res
}
/*detection collision avec murs
retourne resolution
*/
func isExiting(obj *object) resolution {
topLeft, bottomRight := obj.bounds()
topLeft.x += obj.dx
topLeft.y += obj.dy
bottomRight.x += obj.dx
bottomRight.y += obj.dy
res := newResolution(obj)
// res.x = topLeft.x
// res.y = topLeft.y
//limite gauche
if topLeft.x <= 0 {
res.collide = true
res.collideWorld = true
res.dx = abs(topLeft.x) + 1
if bottomRight.x <= 0 {
res.isVisible = false
}
}
//limite haut
if topLeft.y <= 0 {
res.collide = true
res.collideWorld = true
res.dy = abs(topLeft.y) + 1
if bottomRight.y <= 0 {
res.isVisible = false
}
}
//limite droite
if bottomRight.x >= screenWidth {
res.collide = true
res.collideWorld = true
res.dx = bottomRight.x - screenWidth - 1
if topLeft.x >= screenWidth {
res.isVisible = false
}
}
//limite bas
if bottomRight.y >= screenHeight {
res.collide = true
res.collideWorld = true
res.dy = bottomRight.y - screenHeight - 1
if topLeft.y >= screenHeight {
res.isVisible = false
}
}
return res
}
// Détection collision entre deux objets
func checkCollision(one *object, two *object) resolution {
res := newResolution(one)
oneTopLeft, oneBottomRight := one.bounds()
oneTopLeft.x += one.dx
oneTopLeft.y += one.dy
oneBottomRight.x += one.dx
oneBottomRight.y += one.dy
twoTopLeft, twoBottomRight := two.bounds()
twoTopLeft.x += two.dx
twoTopLeft.y += two.dy
twoBottomRight.x += two.dx
twoBottomRight.y += two.dy
//collision axe des x
axeX := oneBottomRight.x >= twoTopLeft.x && twoBottomRight.x >= oneTopLeft.x
//collision axe des y
axeY := oneBottomRight.y >= twoTopLeft.y && twoBottomRight.y >= oneTopLeft.y
//collistion 2 axes
if axeX && axeY {
res.collide = true
res.collideWith = two
// si oneTopLeft < twoTopLeft (alors one à gauche de two)
var dx float64
if oneTopLeft.x < twoTopLeft.x {
dx = oneBottomRight.x - twoTopLeft.x
} else {
dx = twoBottomRight.x - oneTopLeft.x
}
// pondérer rectif dx selon déplacement effectif des 2 objets
oneProp := one.dx / (one.dx + two.dx)
twoProp := two.dx / (one.dx + two.dx)
res.dx = dx * oneProp
res.dy = one.dy
res.otherDx = dx * twoProp
res.otherDy = two.dy
}
return res
} | collisions.go | 0.596786 | 0.426322 | collisions.go | starcoder |
package orderedset
// OrderedSet represents a set as defined at https://infra.spec.whatwg.org/#ordered-set
type OrderedSet struct {
set []string
}
// NewOrderedSet creates an OrderedSet with the specified contents
func NewOrderedSet(contents []string) *OrderedSet {
return &OrderedSet{contents}
}
/*
Append adds an item, if it doesn't exist, to the end of the OrderedSet
> To append to an ordered set: if the set contains the given item, then do nothing; otherwise, perform the normal list append operation.
*/
func (os *OrderedSet) Append(item string) {
for _, i := range os.set {
if i == item {
return
}
}
os.set = append(os.set, item)
}
/*
Prepend adds an item, if it doesn't exist, to the start of the OrderedSet
> To prepend to an ordered set: if the set contains the given item, then do nothing; otherwise, perform the normal list prepend operation.
*/
func (os *OrderedSet) Prepend(item string) {
for _, i := range os.set {
if i == item {
return
}
}
os.set = append([]string{item}, os.set...)
}
/*
Replace replaces an item in the OrderedSet
> To replace within an ordered set set, given item and replacement: if set contains item or replacement, then replace the first instance of either with replacement and remove all other instances.
*/
func (os *OrderedSet) Replace(item string, replacement string) {
replaced := false
var toDelete []int
for n, i := range os.set {
if i == item || i == replacement {
if replaced {
toDelete = append(toDelete, n)
} else {
os.set[n] = replacement
replaced = true
}
}
}
for _, i := range toDelete {
os.set = removeIndex(os.set, i)
}
}
/*
Intersect intersects the two OrderedSets, returning a new OrderedSet
> The intersection of ordered sets A and B, is the result of creating a new ordered set set and, for each item of A, if B contains item, appending item to set.
*/
func (os *OrderedSet) Intersect(other *OrderedSet) *OrderedSet {
newos := &OrderedSet{}
for _, i := range os.set {
for _, a := range other.set {
if a == i {
newos.Append(i)
}
}
}
return newos
}
/*
Union returns a union of both OrderedSets, as a new OrderedSet
> The union of ordered sets A and B, is the result of cloning A as set and, for each item of B, appending item to set."
*/
func (os *OrderedSet) Union(other *OrderedSet) *OrderedSet {
newos := *os
newos.set = make([]string, len(os.set))
copy(newos.set, os.set)
if other == nil {
return &newos
}
for _, i := range other.set {
newos.Append(i)
}
return &newos
}
/*
Range returns a chunk of an OrderedSet
> The range n to m, inclusive, creates a new ordered set containing all of the integers from n up to and including m in consecutively increasing order, as long as m is greater than or equal to n.
*/
func (os *OrderedSet) Range(start int, end int) *OrderedSet {
ns := &OrderedSet{}
ns.set = os.set[start : end+1]
return ns
}
// AsSlice returns a copy of the Ordered Set as a slice for iteration
func (os *OrderedSet) AsSlice() []string {
sl := make([]string, len(os.set))
copy(sl, os.set)
return sl
} | orderedset.go | 0.790934 | 0.462898 | orderedset.go | starcoder |
package shmensor
import (
"fmt"
)
// This file is used to define some package-default Tensor types.
// Interface may become open in the future so clients can define their own.
// For example, Tensor spaces over finite fields, or modules over arbitrary rings.
// Integers.
type defaultInt struct{}
func (dt defaultInt) Multiply(x, y interface{}) interface{} {
return x.(int) * y.(int)
}
func (dt defaultInt) Add(x, y interface{}) interface{} {
return x.(int) + y.(int)
}
func NewIntTensor(f func(i ...int) int, signature string, dim []int) Tensor {
return Tensor{
func(i ...int) interface{} { return f(i...) },
signature,
dim,
defaultInt{},
}
}
// Reals.
type defaultReal struct{}
func (dt defaultReal) Multiply(x, y interface{}) interface{} {
return x.(float64) * y.(float64)
}
func (dt defaultReal) Add(x, y interface{}) interface{} {
return x.(float64) + y.(float64)
}
func NewRealTensor(f func(i ...int) float64, signature string, dim []int) Tensor {
return Tensor{
func(i ...int) interface{} { return f(i...) },
signature,
dim,
defaultReal{},
}
}
func NewRealFunction(f func(r float64) float64) Function {
wrapper := func(i interface{}) interface{} {
// Test type assertion and panic informatively first.
return f(i.(float64))
}
return Function{
wrapper,
defaultReal{},
}
}
func NewRealScalar(f float64) Function {
return NewRealFunction(func(x float64) float64 {
return f * x
})
}
// Complex numbers.
type defaultComplex struct{}
func (dt defaultComplex) Multiply(x, y interface{}) interface{} {
return x.(complex128) * y.(complex128)
}
func (dt defaultComplex) Add(x, y interface{}) interface{} {
return x.(complex128) + y.(complex128)
}
func NewComplexTensor(f func(i ...int) complex128, signature string, dim []int) Tensor {
return Tensor{
func(i ...int) interface{} { return f(i...) },
signature,
dim,
defaultComplex{},
}
}
// Strings
type defaultString struct{}
func (ds defaultString) Multiply(x, y interface{}) interface{} {
return fmt.Sprintf("(%v)(%v)", x.(string), y.(string))
}
func (ds defaultString) Add(x, y interface{}) interface{} {
return fmt.Sprintf("%v + %v", x.(string), y.(string))
}
func NewStringTensor(f func(i ...int) string, signature string, dim []int) Tensor {
return Tensor{
func(i ...int) interface{} { return f(i...) },
signature,
dim,
defaultString{},
}
}
func NewStringFunction(f func(s string) string) Function {
wrapper := func(i interface{}) interface{} {
// Test type assertion and panic informatively first.
return f(i.(string))
}
return Function{
wrapper,
defaultString{},
}
} | shmensor/types.go | 0.774839 | 0.425963 | types.go | starcoder |
package anomalies
import (
"context"
"math"
"time"
"github.com/trackit/trackit/config"
)
// min returns the minimum between a and b.
func min(a, b int) int {
if a < b {
return a
}
return b
}
// sum adds every element of a CostAnomaly slice.
func sum(aCosts AnalyzedCosts) float64 {
var sum float64
for _, a := range aCosts {
sum += a.Cost
}
return sum
}
// average calculates the average of a CostAnomaly slice.
func average(aCosts AnalyzedCosts) float64 {
return sum(aCosts) / float64(len(aCosts))
}
// sigma calculates the sigma in the standard deviation formula.
func sigma(aCosts AnalyzedCosts, avg float64) float64 {
var sigma float64
for _, a := range aCosts {
sigma += math.Pow(a.Cost-avg, 2)
}
return sigma
}
// deviation calculates the standard deviation.
func deviation(sigma float64, period int) float64 {
var deviation float64 = 1 / float64(period) * math.Pow(sigma, 0.5)
return deviation
}
// analyseAnomalies calculates anomalies with Bollinger Bands algorithm and
// const values above. It consists in generating an upper band, which, if
// exceeded, make an alert.
func analyseAnomalies(aCosts AnalyzedCosts) AnalyzedCosts {
for index := range aCosts {
if index > 0 {
a := &aCosts[index]
tempSliceSize := min(index, config.AnomalyDetectionBollingerBandPeriod)
tempSlice := aCosts[index-tempSliceSize : index]
avg := average(tempSlice)
sigma := sigma(tempSlice, avg)
deviation := deviation(sigma, tempSliceSize)
a.UpperBand = avg*config.AnomalyDetectionBollingerBandUpperBandCoefficient + (deviation * config.AnomalyDetectionBollingerBandStandardDeviationCoefficient)
if a.Cost > a.UpperBand {
a.Anomaly = true
}
}
}
return aCosts
}
// addPadding adds a padding if we ask from 10 to 15
// but ES has only from 12 to 15. So 10 11 will be padded.
func addPadding(aCosts AnalyzedCosts, dateBegin time.Time) AnalyzedCosts {
if cd, err := time.Parse("2006-01-02T15:04:05.000Z", aCosts[0].Meta.Date); err == nil && dateBegin.Before(cd) {
for i := int(cd.Sub(dateBegin).Hours() / 24); i > 0; i-- {
cd = cd.AddDate(0, 0, -1)
pad := AnalyzedCost{
Meta: AnalyzedCostEssentialMeta{
Date: cd.Format("2006-01-02T15:04:05.000Z"),
},
}
aCosts = append(AnalyzedCosts{pad}, aCosts...)
}
}
return aCosts
}
// computeAnomalies calls every functions to well format
// AnalyzedCosts and do BollingerBand.
func computeAnomalies(ctx context.Context, aCosts AnalyzedCosts, dateBegin time.Time) AnalyzedCosts {
aCosts = addPadding(aCosts, dateBegin)
aCosts = analyseAnomalies(aCosts)
return aCosts
} | anomaliesDetection/bollinger.go | 0.775435 | 0.645776 | bollinger.go | starcoder |
package aoc2019
import (
"image"
"image/color"
"image/png"
"io/ioutil"
"math"
"os"
"strconv"
"strings"
"github.com/pkg/errors"
)
type day3LineSegment [4]int
func (d day3LineSegment) Draw(img *image.RGBA, col color.Color) {
var ptMod func(x, y int) (int, int, bool)
switch {
case d.IsVertical() && d[1] < d[3]:
ptMod = func(x, y int) (int, int, bool) { return x, y + 1, y+1 <= d[3] }
case d.IsVertical() && d[1] > d[3]:
ptMod = func(x, y int) (int, int, bool) { return x, y - 1, y-1 >= d[3] }
case !d.IsVertical() && d[0] < d[2]:
ptMod = func(x, y int) (int, int, bool) { return x + 1, y, x+1 <= d[2] }
case !d.IsVertical() && d[0] > d[2]:
ptMod = func(x, y int) (int, int, bool) { return x - 1, y, x-1 >= d[2] }
}
var pX, pY, ok = d[0], d[1], true
for ok {
img.Set(pX, pY, col)
pX, pY, ok = ptMod(pX, pY)
}
}
func (d day3LineSegment) GetIntersection(in day3LineSegment) ([2]int, bool) {
if d.IsVertical() == in.IsVertical() {
// Both lines have the same direction, there is no intersection
// NOTE: This might yield false negative when lines are overlapping?
return [2]int{0, 0}, false
}
var (
vert, horiz day3LineSegment
)
if d.IsVertical() {
vert, horiz = d, in
} else {
vert, horiz = in, d
}
// Normalize lines for intersection finding
if vert[1] > vert[3] {
vert[1], vert[3] = vert[3], vert[1]
}
if horiz[0] > horiz[2] {
horiz[0], horiz[2] = horiz[2], horiz[0]
}
if vert[0] < horiz[0] || vert[0] > horiz[2] || horiz[1] < vert[1] || horiz[1] > vert[3] {
// Lines do not have an intersection
return [2]int{0, 0}, false
}
return [2]int{vert[0], horiz[1]}, true
}
func (d day3LineSegment) HasPoint(x, y int) bool {
switch {
case d.IsVertical() && x != d[0]: // vertical line, test Y not on line
fallthrough
case !d.IsVertical() && y != d[1]: // horizontal line, text X not on line
fallthrough
case d.IsVertical() && d[1] < d[3] && (y < d[1] || y > d[3]): // vertical low to high, test Y not in range
fallthrough
case d.IsVertical() && d[1] > d[3] && (y > d[1] || y < d[3]): // vertical high to low, test Y not in range
fallthrough
case !d.IsVertical() && d[0] < d[2] && (x < d[0] || x > d[2]): // horizontal left to right, test X not in range
fallthrough
case !d.IsVertical() && d[0] > d[2] && (x > d[0] || x < d[2]): // horizontal right to left, test X not in range
return false
}
return true
}
func (d day3LineSegment) IsVertical() bool {
return d[0] == d[2]
}
func (d day3LineSegment) Steps() int {
if d.IsVertical() {
return int(math.Abs(float64(d[1] - d[3])))
}
return int(math.Abs(float64(d[0] - d[2])))
}
func (d day3LineSegment) StepsToPoint(x, y int) int {
if d.IsVertical() {
return int(math.Abs(float64(d[1] - y)))
}
return int(math.Abs(float64(d[0] - x)))
}
type day3Line []day3LineSegment
func (d day3Line) GetIntersections(in day3Line) [][2]int {
var inter [][2]int
for _, dseg := range d {
for _, iseg := range in {
if is, ok := dseg.GetIntersection(iseg); ok {
inter = append(inter, is)
}
}
}
return inter
}
func debugDay3DrawImage(l1, l2 day3Line) error {
dest := image.NewRGBA(image.Rect(-500, -500, 500, 500))
// Draw l1 in red
for _, ls := range l1 {
ls.Draw(dest, color.RGBA{0xff, 0x0, 0x0, 0xff})
}
// Draw l2 in blue
for _, ls := range l2 {
ls.Draw(dest, color.RGBA{0x0, 0x0, 0xff, 0xff})
}
// Draw detected intersections in purple
for _, i := range l1.GetIntersections(l2) {
dest.SetRGBA(i[0], i[1], color.RGBA{0xff, 0x0, 0xff, 0xff})
}
// Draw "home-point" in green
dest.SetRGBA(0, 0, color.RGBA{0x0, 0xff, 0x0, 0xff})
f, err := os.Create("day03_debug.png")
if err != nil {
return errors.Wrap(err, "Unable to open output file")
}
defer f.Close()
return errors.Wrap(png.Encode(f, dest), "Unable to write image")
}
func getDay3MinIntersectionDistance(l1, l2 day3Line, originX, originY int) int {
var (
inter = l1.GetIntersections(l2)
min = math.MaxInt64
)
for _, i := range inter {
dist := manhattenDistance(originX, originY, i[0], i[1])
if dist > 0 && dist < min {
min = dist
}
}
return min
}
func getDay3MinIntersectionSteps(l1, l2 day3Line) int {
var minSteps int = math.MaxInt64
for _, is := range l1.GetIntersections(l2) {
var combinedsteps int
for _, l := range []day3Line{l1, l2} {
for _, ls := range l {
if ls.HasPoint(is[0], is[1]) {
combinedsteps += ls.StepsToPoint(is[0], is[1])
break
}
combinedsteps += ls.Steps()
}
}
if combinedsteps > 0 && combinedsteps < minSteps {
minSteps = combinedsteps
}
}
return minSteps
}
func parseDay3LineDefinition(definition string, startX, startY int) (day3Line, error) {
var (
directions = strings.Split(strings.TrimSpace(definition), ",")
pX, pY = startX, startY
out day3Line
)
for _, d := range directions {
l, err := strconv.Atoi(d[1:])
if err != nil {
return nil, errors.Wrapf(err, "Unable to parse direction %q", d)
}
var tX, tY int
switch d[0] {
case 'D':
tX, tY = pX, pY-l
case 'L':
tX, tY = pX-l, pY
case 'R':
tX, tY = pX+l, pY
case 'U':
tX, tY = pX, pY+l
default:
return nil, errors.Wrapf(err, "Invalid direction %q given", d)
}
out = append(out, [4]int{pX, pY, tX, tY})
pX, pY = tX, tY
}
return out, nil
}
func solveDay3Part1(inFile string) (int, error) {
raw, err := ioutil.ReadFile(inFile)
if err != nil {
return 0, errors.Wrap(err, "Unable to read input")
}
defs := strings.Split(string(raw), "\n")
l1, err := parseDay3LineDefinition(defs[0], 0, 0)
if err != nil {
return 0, errors.Wrap(err, "Unable to parse L1")
}
l2, err := parseDay3LineDefinition(defs[1], 0, 0)
if err != nil {
return 0, errors.Wrap(err, "Unable to parse L1")
}
return getDay3MinIntersectionDistance(l1, l2, 0, 0), nil
}
func solveDay3Part2(inFile string) (int, error) {
raw, err := ioutil.ReadFile(inFile)
if err != nil {
return 0, errors.Wrap(err, "Unable to read input")
}
defs := strings.Split(string(raw), "\n")
l1, err := parseDay3LineDefinition(defs[0], 0, 0)
if err != nil {
return 0, errors.Wrap(err, "Unable to parse L1")
}
l2, err := parseDay3LineDefinition(defs[1], 0, 0)
if err != nil {
return 0, errors.Wrap(err, "Unable to parse L1")
}
return getDay3MinIntersectionSteps(l1, l2), nil
} | day03.go | 0.576184 | 0.474144 | day03.go | starcoder |
package ent
import (
"clock-in/app/record/service/internal/data/ent/record"
"fmt"
"strings"
"time"
"entgo.io/ent/dialect/sql"
)
// Record is the model entity for the Record schema.
type Record struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// User holds the value of the "user" field.
User int64 `json:"user,omitempty"`
// Day holds the value of the "day" field.
Day int64 `json:"day,omitempty"`
// Type holds the value of the "type" field.
Type int64 `json:"type,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Record) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case record.FieldID, record.FieldUser, record.FieldDay, record.FieldType:
values[i] = new(sql.NullInt64)
case record.FieldCreatedAt, record.FieldUpdatedAt:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Record", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Record fields.
func (r *Record) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case record.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
r.ID = int(value.Int64)
case record.FieldUser:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field user", values[i])
} else if value.Valid {
r.User = value.Int64
}
case record.FieldDay:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field day", values[i])
} else if value.Valid {
r.Day = value.Int64
}
case record.FieldType:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
} else if value.Valid {
r.Type = value.Int64
}
case record.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
r.CreatedAt = value.Time
}
case record.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
r.UpdatedAt = value.Time
}
}
}
return nil
}
// Update returns a builder for updating this Record.
// Note that you need to call Record.Unwrap() before calling this method if this Record
// was returned from a transaction, and the transaction was committed or rolled back.
func (r *Record) Update() *RecordUpdateOne {
return (&RecordClient{config: r.config}).UpdateOne(r)
}
// Unwrap unwraps the Record entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (r *Record) Unwrap() *Record {
tx, ok := r.config.driver.(*txDriver)
if !ok {
panic("ent: Record is not a transactional entity")
}
r.config.driver = tx.drv
return r
}
// String implements the fmt.Stringer.
func (r *Record) String() string {
var builder strings.Builder
builder.WriteString("Record(")
builder.WriteString(fmt.Sprintf("id=%v", r.ID))
builder.WriteString(", user=")
builder.WriteString(fmt.Sprintf("%v", r.User))
builder.WriteString(", day=")
builder.WriteString(fmt.Sprintf("%v", r.Day))
builder.WriteString(", type=")
builder.WriteString(fmt.Sprintf("%v", r.Type))
builder.WriteString(", created_at=")
builder.WriteString(r.CreatedAt.Format(time.ANSIC))
builder.WriteString(", updated_at=")
builder.WriteString(r.UpdatedAt.Format(time.ANSIC))
builder.WriteByte(')')
return builder.String()
}
// Records is a parsable slice of Record.
type Records []*Record
func (r Records) config(cfg config) {
for _i := range r {
r[_i].config = cfg
}
} | app/record/service/internal/data/ent/record.go | 0.660501 | 0.402686 | record.go | starcoder |
package plaid
import (
"encoding/json"
)
// UserCustomPassword Custom test accounts are configured with a JSON configuration object formulated according to the schema below. All fields are optional. Sending an empty object as a configuration will result in an account configured with random balances and transaction history.
type UserCustomPassword struct {
// The version of the password schema to use, possible values are 1 or 2. The default value is 2. You should only specify 1 if you know it is necessary for your test suite.
Version NullableString `json:"version,omitempty"`
// A seed, in the form of a string, that will be used to randomly generate account and transaction data, if this data is not specified using the `override_accounts` argument. If no seed is specified, the randomly generated data will be different each time. Note that transactions data is generated relative to the Item's creation date. Different Items created on different dates with the same seed for transactions data will have different dates for the transactions. The number of days between each transaction and the Item creation will remain constant. For example, an Item created on December 15 might show a transaction on December 14. An Item created on December 20, using the same seed, would show that same transaction occurring on December 19.
Seed string `json:"seed"`
// An array of account overrides to configure the accounts for the Item. By default, if no override is specified, transactions and account data will be randomly generated based on the account type and subtype, and other products will have fixed or empty data.
OverrideAccounts []OverrideAccounts `json:"override_accounts"`
Mfa MFA `json:"mfa"`
// You may trigger a reCAPTCHA in Plaid Link in the Sandbox environment by using the recaptcha field. Possible values are `good` or `bad`. A value of `good` will result in successful Item creation and `bad` will result in a `RECAPTCHA_BAD` error to simulate a failed reCAPTCHA. Both values require the reCAPTCHA to be manually solved within Plaid Link.
Recaptcha string `json:"recaptcha"`
// An error code to force on Item creation. Possible values are: `\"INSTITUTION_NOT_RESPONDING\"` `\"INSTITUTION_NO_LONGER_SUPPORTED\"` `\"INVALID_CREDENTIALS\"` `\"INVALID_MFA\"` `\"ITEM_LOCKED\"` `\"ITEM_LOGIN_REQUIRED\"` `\"ITEM_NOT_SUPPORTED\"` `\"INVALID_LINK_TOKEN\"` `\"MFA_NOT_SUPPORTED\"` `\"NO_ACCOUNTS\"` `\"PLAID_ERROR\"` `\"PRODUCTS_NOT_SUPPORTED\"` `\"USER_SETUP_REQUIRED\"`
ForceError string `json:"force_error"`
AdditionalProperties map[string]interface{}
}
type _UserCustomPassword UserCustomPassword
// NewUserCustomPassword instantiates a new UserCustomPassword object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewUserCustomPassword(seed string, overrideAccounts []OverrideAccounts, mfa MFA, recaptcha string, forceError string) *UserCustomPassword {
this := UserCustomPassword{}
this.Seed = seed
this.OverrideAccounts = overrideAccounts
this.Mfa = mfa
this.Recaptcha = recaptcha
this.ForceError = forceError
return &this
}
// NewUserCustomPasswordWithDefaults instantiates a new UserCustomPassword object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewUserCustomPasswordWithDefaults() *UserCustomPassword {
this := UserCustomPassword{}
return &this
}
// GetVersion returns the Version field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *UserCustomPassword) GetVersion() string {
if o == nil || o.Version.Get() == nil {
var ret string
return ret
}
return *o.Version.Get()
}
// GetVersionOk returns a tuple with the Version field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *UserCustomPassword) GetVersionOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.Version.Get(), o.Version.IsSet()
}
// HasVersion returns a boolean if a field has been set.
func (o *UserCustomPassword) HasVersion() bool {
if o != nil && o.Version.IsSet() {
return true
}
return false
}
// SetVersion gets a reference to the given NullableString and assigns it to the Version field.
func (o *UserCustomPassword) SetVersion(v string) {
o.Version.Set(&v)
}
// SetVersionNil sets the value for Version to be an explicit nil
func (o *UserCustomPassword) SetVersionNil() {
o.Version.Set(nil)
}
// UnsetVersion ensures that no value is present for Version, not even an explicit nil
func (o *UserCustomPassword) UnsetVersion() {
o.Version.Unset()
}
// GetSeed returns the Seed field value
func (o *UserCustomPassword) GetSeed() string {
if o == nil {
var ret string
return ret
}
return o.Seed
}
// GetSeedOk returns a tuple with the Seed field value
// and a boolean to check if the value has been set.
func (o *UserCustomPassword) GetSeedOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Seed, true
}
// SetSeed sets field value
func (o *UserCustomPassword) SetSeed(v string) {
o.Seed = v
}
// GetOverrideAccounts returns the OverrideAccounts field value
func (o *UserCustomPassword) GetOverrideAccounts() []OverrideAccounts {
if o == nil {
var ret []OverrideAccounts
return ret
}
return o.OverrideAccounts
}
// GetOverrideAccountsOk returns a tuple with the OverrideAccounts field value
// and a boolean to check if the value has been set.
func (o *UserCustomPassword) GetOverrideAccountsOk() (*[]OverrideAccounts, bool) {
if o == nil {
return nil, false
}
return &o.OverrideAccounts, true
}
// SetOverrideAccounts sets field value
func (o *UserCustomPassword) SetOverrideAccounts(v []OverrideAccounts) {
o.OverrideAccounts = v
}
// GetMfa returns the Mfa field value
func (o *UserCustomPassword) GetMfa() MFA {
if o == nil {
var ret MFA
return ret
}
return o.Mfa
}
// GetMfaOk returns a tuple with the Mfa field value
// and a boolean to check if the value has been set.
func (o *UserCustomPassword) GetMfaOk() (*MFA, bool) {
if o == nil {
return nil, false
}
return &o.Mfa, true
}
// SetMfa sets field value
func (o *UserCustomPassword) SetMfa(v MFA) {
o.Mfa = v
}
// GetRecaptcha returns the Recaptcha field value
func (o *UserCustomPassword) GetRecaptcha() string {
if o == nil {
var ret string
return ret
}
return o.Recaptcha
}
// GetRecaptchaOk returns a tuple with the Recaptcha field value
// and a boolean to check if the value has been set.
func (o *UserCustomPassword) GetRecaptchaOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Recaptcha, true
}
// SetRecaptcha sets field value
func (o *UserCustomPassword) SetRecaptcha(v string) {
o.Recaptcha = v
}
// GetForceError returns the ForceError field value
func (o *UserCustomPassword) GetForceError() string {
if o == nil {
var ret string
return ret
}
return o.ForceError
}
// GetForceErrorOk returns a tuple with the ForceError field value
// and a boolean to check if the value has been set.
func (o *UserCustomPassword) GetForceErrorOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.ForceError, true
}
// SetForceError sets field value
func (o *UserCustomPassword) SetForceError(v string) {
o.ForceError = v
}
func (o UserCustomPassword) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Version.IsSet() {
toSerialize["version"] = o.Version.Get()
}
if true {
toSerialize["seed"] = o.Seed
}
if true {
toSerialize["override_accounts"] = o.OverrideAccounts
}
if true {
toSerialize["mfa"] = o.Mfa
}
if true {
toSerialize["recaptcha"] = o.Recaptcha
}
if true {
toSerialize["force_error"] = o.ForceError
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *UserCustomPassword) UnmarshalJSON(bytes []byte) (err error) {
varUserCustomPassword := _UserCustomPassword{}
if err = json.Unmarshal(bytes, &varUserCustomPassword); err == nil {
*o = UserCustomPassword(varUserCustomPassword)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "version")
delete(additionalProperties, "seed")
delete(additionalProperties, "override_accounts")
delete(additionalProperties, "mfa")
delete(additionalProperties, "recaptcha")
delete(additionalProperties, "force_error")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableUserCustomPassword struct {
value *UserCustomPassword
isSet bool
}
func (v NullableUserCustomPassword) Get() *UserCustomPassword {
return v.value
}
func (v *NullableUserCustomPassword) Set(val *UserCustomPassword) {
v.value = val
v.isSet = true
}
func (v NullableUserCustomPassword) IsSet() bool {
return v.isSet
}
func (v *NullableUserCustomPassword) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableUserCustomPassword(val *UserCustomPassword) *NullableUserCustomPassword {
return &NullableUserCustomPassword{value: val, isSet: true}
}
func (v NullableUserCustomPassword) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableUserCustomPassword) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_user_custom_password.go | 0.841858 | 0.598635 | model_user_custom_password.go | starcoder |
package main
import (
"math"
"math/rand"
)
// GeneticAlgorithm represents
type GeneticAlgorithm struct {
PopulationSize int
MutationRate float64
CrossoverRate float64
ElitismCount int
TournamentSize int
}
func createGeneticAlgorithm(populationSize int, mutationRate float64, crossoverRate float64, elitismCount int, tournamentSize int) (geneticAlgorithm GeneticAlgorithm) {
geneticAlgorithm = GeneticAlgorithm{
PopulationSize: populationSize,
MutationRate: mutationRate,
CrossoverRate: crossoverRate,
ElitismCount: elitismCount,
TournamentSize: tournamentSize,
}
return
}
// InitPopulation a
/**
* Initialize population
*
* @param chromosomeLength
* The length of the individuals chromosome
* @return population The initial population generated
*/
func (ga *GeneticAlgorithm) InitPopulation(timeTable TimeTable) (population Population) {
// Initialize population
population = createPopulationByTimeTable(ga.PopulationSize, timeTable)
return
}
// IsTerminationConditionMet1 s
/**
* Check if population has met termination condition
*
* @param generationsCount
* Number of generations passed
* @param maxGenerations
* Number of generations to terminate after
* @return boolean True if termination condition met, otherwise, false
*/
func (ga *GeneticAlgorithm) IsTerminationConditionMet1(generationsCount int, maxGenerations int) bool {
return generationsCount > maxGenerations
}
// IsTerminationConditionMet2 as
/**
* Check if population has met termination condition
*
* @param population
* @return boolean True if termination condition met, otherwise, false
*/
func (ga *GeneticAlgorithm) IsTerminationConditionMet2(population Population) bool {
return math.Abs(population.GetFittest(0).Fitness-1.0) < 0.00001
}
// calcFitness s
/**
* Calculate individual's fitness value
*
* @param individual
* @param timetable
* @return fitness
*/
func calcFitness(individual *Individual, timeTable TimeTable) float64 {
// Create new timetable object to use -- cloned from an existing timetable
threadTimeTable := timeTable
threadTimeTable.createClasses(individual)
// Calculate fitness
clashes := threadTimeTable.calcClashes()
fitness := 1 / (float64)(clashes+1)
individual.Fitness = fitness
return fitness
}
// EvalPopulation s
/**
* Evaluate population
*
* @param population
* @param timetable
*/
func (ga *GeneticAlgorithm) EvalPopulation(population *Population, timeTable TimeTable) {
var populationFitness float64
// Loop over population evaluating individuals and summing population
// fitness
// Makes a copy attention
/*for _, individual := range population.Population {
populationFitness += calcFitness(&individual, timeTable)
}*/
for i := range population.Population {
populationFitness += calcFitness(&population.Population[i], timeTable)
}
//fmt.Println(populationFitness, population.Population[0])
population.PopulationFitness = populationFitness
}
// SelectParent s
/**
* Selects parent for crossover using tournament selection
*
* Tournament selection works by choosing N random individuals, and then
* choosing the best of those.
*
* @param population
* @return The individual selected as a parent
*/
func (ga *GeneticAlgorithm) SelectParent(population Population) Individual {
// Create tournament
tournament := Population{
Population: make([]Individual, ga.TournamentSize),
PopulationFitness: 0,
}
// Add random individuals to the tournament
population.shuffle()
for i := 0; i < ga.TournamentSize; i++ {
tournamentIndividual := population.Population[i]
tournament.Population[i] = tournamentIndividual
}
// Return the best
return tournament.GetFittest(0)
}
// MutatePopulation as
/**
* Apply mutation to population
*
* @param population
* @param timetable
* @return The mutated population
*/
func (ga *GeneticAlgorithm) MutatePopulation(population Population, timeTable TimeTable) Population {
// Initialize new population
newPopulation := Population{
Population: make([]Individual, ga.PopulationSize),
PopulationFitness: 0,
}
// Loop over current population by fitness
for populationIndex := 0; populationIndex < len(population.Population); populationIndex++ {
individual := population.GetFittest(populationIndex)
// Create random individual to swap genes with
randomIndividual := createIndividual(timeTable)
// Loop over individual's genes
for geneIndex := 0; geneIndex < len(individual.Chromosome); geneIndex++ {
// Skip mutation if this is an elite individual
if populationIndex > ga.ElitismCount {
// Does this gene need mutation?
if ga.MutationRate > rand.Float64() {
// Swap for new gene
individual.Chromosome[geneIndex] = randomIndividual.Chromosome[geneIndex]
}
}
}
// Add individual to population
newPopulation.Population[populationIndex] = individual
}
// Return mutated population
return newPopulation
}
// CrossoverPopulation asdasd
/* Apply crossover to population
*
* @param population The population to apply crossover to
* @return The new population
*/
func (ga *GeneticAlgorithm) CrossoverPopulation(population Population) Population {
// Create new population
newPopulation := Population{
Population: make([]Individual, len(population.Population)),
PopulationFitness: 0,
}
// Loop over current population by fitness
for populationIndex := 0; populationIndex < len(population.Population); populationIndex++ {
parent1 := population.GetFittest(populationIndex)
// Apply crossover to this individual?
if ga.CrossoverRate > rand.Float64() && populationIndex >= ga.ElitismCount {
// Initialize offspring
offspring := Individual{
Chromosome: make([]int, len(parent1.Chromosome)),
Fitness: 0,
}
// Find second parent
parent2 := ga.SelectParent(population)
// Loop over genome
for geneIndex := 0; geneIndex < len(parent1.Chromosome); geneIndex++ {
// Use half of parent1's genes and half of parent2's genes
if 0.5 > rand.Float64() {
offspring.Chromosome[geneIndex] = parent1.Chromosome[geneIndex]
} else {
offspring.Chromosome[geneIndex] = parent2.Chromosome[geneIndex]
}
}
// Add offspring to new population
newPopulation.Population[populationIndex] = offspring
} else {
// Add individual to new population without applying crossover
newPopulation.Population[populationIndex] = parent1
}
}
return newPopulation
} | schedule-ga/geneticalgorithm.go | 0.822082 | 0.521654 | geneticalgorithm.go | starcoder |
package process
import (
"encoding/binary"
"math"
)
type TagEncoder interface {
// Buffer returns the underlying byte buffer that the tags were encoded in to
Buffer() []byte
// Encode encodes the given tags in to the buffer and returns the index in the buffer where the data begins
Encode(tags []string) int
}
// Version for the encoding format
const (
version1 = 1
version2 = 2
)
// Groups of tags are successively encoded in to a single buffer. For each group of tags, the format is:
// - Number of tags encoded as a 2-byte uint16.
// - For each tag, write the length of the tag as a 2-byte uint16 followed by the tag bytes.
type v1TagEncoder struct {
buffer []byte
}
// NewTagEncoder creates an empty tag encoder
func NewTagEncoder() TagEncoder {
// Reserve the first byte to version the format
initialBuf := []byte{version1}
return &v1TagEncoder{buffer: initialBuf}
}
func (t *v1TagEncoder) Buffer() []byte {
return t.buffer
}
func (t *v1TagEncoder) Encode(tags []string) int {
// We only allow 2 bytes for the number of the tags, ensure we don't exceed it
if len(tags) > math.MaxUint16 {
tags = tags[0:math.MaxUint16]
}
bufferSize := bufferSize(tags)
// Check to see if there is enough space in the buffer that we can reuse rather than allocating a temporary buffer
newBufferRequired := (cap(t.buffer) - len(t.buffer)) < bufferSize
tagBuffer := t.buffer[len(t.buffer):]
if newBufferRequired {
tagBuffer = make([]byte, 0, bufferSize)
}
var sizeBuf [2]byte
binary.LittleEndian.PutUint16(sizeBuf[0:], uint16(len(tags)))
tagBuffer = append(tagBuffer, sizeBuf[0:]...)
for _, tag := range tags {
// We only allow 2 bytes for the length of the tag, ensure we don't exceed it
if len(tag) > math.MaxUint16 {
tag = tag[0:math.MaxUint16]
}
binary.LittleEndian.PutUint16(sizeBuf[0:], uint16(len(tag)))
tagBuffer = append(tagBuffer, sizeBuf[0:]...)
tagBuffer = append(tagBuffer, tag...)
}
// The index for these tags is the current end of the buffer
tagIndex := len(t.buffer)
if newBufferRequired {
t.buffer = append(t.buffer, tagBuffer...)
} else {
t.buffer = t.buffer[0 : len(t.buffer)+bufferSize]
}
return tagIndex
}
func getTags(buffer []byte, tagIndex int) []string {
if len(buffer) == 0 || tagIndex < 0 {
return nil
}
switch buffer[0] {
case version1:
return decodeV1(buffer, tagIndex)
case version2:
return decodeV2(buffer, tagIndex)
default:
return nil
}
}
func iterateTags(buffer []byte, tagIndex int, cb func(i, total int, tag string) bool) {
if len(buffer) == 0 || tagIndex < 0 {
return
}
switch buffer[0] {
case version1:
iterateV1(buffer, tagIndex, cb)
case version2:
iterateV2(buffer, tagIndex, cb)
default:
}
}
func decodeV1(buffer []byte, tagIndex int) []string {
var tags []string
iterateV1(buffer, tagIndex, func(i, total int, tag string) bool {
if i == 0 {
tags = make([]string, 0, total)
}
tags = append(tags, tag)
return true
})
return tags
}
func iterateV1(buffer []byte, tagIndex int, cb func(i, total int, tag string) bool) {
tagBuffer := buffer[tagIndex:]
readIndex := 0
numTags := int(binary.LittleEndian.Uint16(tagBuffer[readIndex:]))
readIndex += 2
for i := 0; i < numTags; i++ {
tagLength := int(binary.LittleEndian.Uint16(tagBuffer[readIndex:]))
readIndex += 2
if !cb(i, numTags, string(tagBuffer[readIndex:readIndex+tagLength])) {
return
}
readIndex += tagLength
}
}
// bufferSize returns the number of bytes required to store the given tags
func bufferSize(tags []string) int {
// Include space for the number of tags
bufferSize := 2
for _, tag := range tags {
// Include space for the length of the tag and the tag itself
bufferSize += 2 + len(tag)
}
return bufferSize
} | process/tags.go | 0.731922 | 0.449513 | tags.go | starcoder |
package metrics
import (
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
)
const (
cleanInterval = 5 * time.Minute
)
// CalculateFunc defines how to process metric values by the calculator. It
// passes previously received MetricValue, and the current raw value and timestamp
// as parameters. Returns true if the calculation is executed successfully.
type CalculateFunc func(prev *MetricValue, val interface{}, timestamp time.Time) (interface{}, bool)
func NewFloat64DeltaCalculator() MetricCalculator {
return NewMetricCalculator(calculateDelta)
}
func calculateDelta(prev *MetricValue, val interface{}, timestamp time.Time) (interface{}, bool) {
deltaValue := val.(float64)
if prev != nil {
deltaValue = deltaValue - prev.RawValue.(float64)
if deltaValue < 0 {
return float64(0), true
}
}
return deltaValue, true
}
// MetricCalculator is a calculator used to adjust metric values based on its previous record.
type MetricCalculator struct {
// lock on write
lock sync.Mutex
// cache stores data with expiry time. The expiry is not supported at the moment.
cache *MapWithExpiry
// calculateFunc is the delegation for data processing
calculateFunc CalculateFunc
}
func NewMetricCalculator(calculateFunc CalculateFunc) MetricCalculator {
return MetricCalculator{
cache: NewMapWithExpiry(cleanInterval),
calculateFunc: calculateFunc,
}
}
// Calculate accepts a new metric value identified by matricName and labels, and delegates
// the calculation with value and timestamp back to CalculateFunc for the result. Returns
// true if the calculation is executed successfully.
func (rm *MetricCalculator) Calculate(metricName string, labels map[string]string, value interface{}, timestamp time.Time) (interface{}, bool) {
k := NewKey(metricName, labels)
cacheStore := rm.cache
var result interface{}
done := false
rm.lock.Lock()
defer rm.lock.Unlock()
prev, exists := cacheStore.Get(k)
result, done = rm.calculateFunc(prev, value, timestamp)
if !exists || done {
cacheStore.Set(k, MetricValue{
RawValue: value,
Timestamp: timestamp,
})
}
return result, done
}
type Key struct {
MetricMetadata interface{}
MetricLabels attribute.Distinct
}
func NewKey(metricMetadata interface{}, labels map[string]string) Key {
var kvs []attribute.KeyValue
var sortable attribute.Sortable
for k, v := range labels {
kvs = append(kvs, attribute.String(k, v))
}
set := attribute.NewSetWithSortable(kvs, &sortable)
dedupSortedLabels := set.Equivalent()
return Key{
MetricMetadata: metricMetadata,
MetricLabels: dedupSortedLabels,
}
}
type MetricValue struct {
RawValue interface{}
Timestamp time.Time
}
// MapWithExpiry act like a map which provide a method to clean up expired entries
type MapWithExpiry struct {
lock *sync.Mutex
ttl time.Duration
entries map[interface{}]*MetricValue
}
func NewMapWithExpiry(ttl time.Duration) *MapWithExpiry {
return &MapWithExpiry{lock: &sync.Mutex{}, ttl: ttl, entries: make(map[interface{}]*MetricValue)}
}
func (m *MapWithExpiry) Get(key Key) (*MetricValue, bool) {
v, ok := m.entries[key]
return v, ok
}
func (m *MapWithExpiry) Set(key Key, value MetricValue) {
m.entries[key] = &value
}
func (m *MapWithExpiry) CleanUp(now time.Time) {
for k, v := range m.entries {
if now.Sub(v.Timestamp) >= m.ttl {
delete(m.entries, k)
}
}
}
func (m *MapWithExpiry) Size() int {
return len(m.entries)
}
func (m *MapWithExpiry) Lock() {
m.lock.Lock()
}
func (m *MapWithExpiry) Unlock() {
m.lock.Unlock()
} | internal/aws/metrics/metric_calculator.go | 0.762247 | 0.410225 | metric_calculator.go | starcoder |
package main
/**
给定两个字符串text1 和text2,返回这两个字符串的最长 公共子序列 的长度。如果不存在 公共子序列 ,返回 0 。
一个字符串的子序列是指这样一个新的字符串:它是由原字符串在不改变字符的相对顺序的情况下删除某些字符(也可以不删除任何字符)后组成的新字符串。
例如,"ace" 是 "abcde" 的子序列,但 "aec" 不是 "abcde" 的子序列。
两个字符串的 公共子序列 是这两个字符串所共同拥有的子序列。
示例 1:
输入:text1 = "abcde", text2 = "ace"
输出:3
解释:最长公共子序列是 "ace" ,它的长度为 3 。
示例 2:
输入:text1 = "abc", text2 = "abc"
输出:3
解释:最长公共子序列是 "abc" ,它的长度为 3 。
示例 3:
输入:text1 = "abc", text2 = "def"
输出:0
解释:两个字符串没有公共子序列,返回 0 。
提示:
1 <= text1.length, text2.length <= 1000
text1 和text2 仅由小写英文字符组成。
*/
/**
动态规划
*/
func longestCommonSubsequence(text1 string, text2 string) int {
m := len(text1)
n := len(text2)
dp := make([][]int, m+1)
for i := range dp {
dp[i] = make([]int, n+1)
}
for i := 1; i < m+1; i++ {
for j := 1; j < n+1; j++ {
if text1[i-1] == text2[j-1] {
dp[i][j] = dp[i-1][j-1] + 1
} else {
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
}
}
}
return dp[m][n]
}
func max(i int, j int) int {
if i > j {
return i
}
return j
}
/**
Java版
class Solution {
public int longestCommonSubsequence(String text1, String text2) {
int m = text1.length();
int n = text2.length();
int[][] dp = new int[m + 1][n + 1];
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (text1.charAt(i) == text2.charAt(j)) {
dp[i + 1][j + 1] = dp[i][j] + 1;
} else {
dp[i + 1][j + 1] = Math.max(dp[i][j + 1], dp[i + 1][j]);
}
}
}
return dp[m][n];
}
}
class Solution {
public int longestCommonSubsequence(String text1, String text2) {
int n = text1.length();
int m = text2.length();
int[][] dp = new int[n + 1][m + 1];
for (int i = 1; i < n + 1; i++) {
for (int j = 1; j < m + 1; j++) {
if (text1.charAt(i - 1) == text2.charAt(j - 1)) {
dp[i][j] = dp[i - 1][j - 1] + 1;
} else {
dp[i][j] = Math.max(dp[i - 1][j], dp[i][j - 1]);
}
}
}
return dp[n][m];
}
}
*/
func main() {
longestCommonSubsequence("abcde", "ace")
} | leetcode/longestCommonSubsequence/longestCommonSubsequence.go | 0.524882 | 0.41561 | longestCommonSubsequence.go | starcoder |
package iso20022
// Extract of trade data for an investment fund switch order.
type FundOrderData2 struct {
// Amount of money used to derive the quantity of investment fund units to be redeemed.
TotalRedemptionAmount *ActiveOrHistoricCurrencyAndAmount `xml:"TtlRedAmt,omitempty"`
// Amount of money used to derive the quantity of investment fund units to be subscribed.
TotalSubscriptionAmount *ActiveOrHistoricCurrencyAndAmount `xml:"TtlSbcptAmt,omitempty"`
// Amount of money to be transferred between the debtor and creditor before bank transaction charges.
SettlementAmount *ActiveCurrencyAndAmount `xml:"SttlmAmt,omitempty"`
// Method by which the transaction is settled.
SettlementMethod *DeliveryReceiptType2Code `xml:"SttlmMtd,omitempty"`
// Additional amount of money paid by the investor in addition to the switch redemption amount.
AdditionalCashIn *ActiveOrHistoricCurrencyAndAmount `xml:"AddtlCshIn,omitempty"`
// Amount of money that results from a switch-out, that is not reinvested in another investment fund, and is repaid to the investor.
ResultingCashOut *ActiveOrHistoricCurrencyAndAmount `xml:"RsltgCshOut,omitempty"`
// Currency in which the rate of exchange is expressed in a currency exchange. In the example 1GBP = xxxCUR, the unit currency is GBP.
UnitCurrency *ActiveOrHistoricCurrencyCode `xml:"UnitCcy,omitempty"`
// Currency into which the base currency is converted, in a currency exchange.
QuotedCurrency *ActiveOrHistoricCurrencyCode `xml:"QtdCcy,omitempty"`
}
func (f *FundOrderData2) SetTotalRedemptionAmount(value, currency string) {
f.TotalRedemptionAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FundOrderData2) SetTotalSubscriptionAmount(value, currency string) {
f.TotalSubscriptionAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FundOrderData2) SetSettlementAmount(value, currency string) {
f.SettlementAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (f *FundOrderData2) SetSettlementMethod(value string) {
f.SettlementMethod = (*DeliveryReceiptType2Code)(&value)
}
func (f *FundOrderData2) SetAdditionalCashIn(value, currency string) {
f.AdditionalCashIn = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FundOrderData2) SetResultingCashOut(value, currency string) {
f.ResultingCashOut = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FundOrderData2) SetUnitCurrency(value string) {
f.UnitCurrency = (*ActiveOrHistoricCurrencyCode)(&value)
}
func (f *FundOrderData2) SetQuotedCurrency(value string) {
f.QuotedCurrency = (*ActiveOrHistoricCurrencyCode)(&value)
} | FundOrderData2.go | 0.837454 | 0.512083 | FundOrderData2.go | starcoder |
package activitypub
import (
"bytes"
"encoding/gob"
"github.com/valyala/fastjson"
)
// LinkTypes represent the valid values for a Link object
var LinkTypes = ActivityVocabularyTypes{
LinkType,
MentionType,
}
type Links interface {
Link | IRI
}
// A Link is an indirect, qualified reference to a resource identified by a URL.
// The fundamental model for links is established by [ RFC5988].
// Many of the properties defined by the Activity Vocabulary allow values that are either instances of APObject or Link.
// When a Link is used, it establishes a qualified relation connecting the subject
// (the containing object) to the resource identified by the href.
// Properties of the Link are properties of the reference as opposed to properties of the resource.
type Link struct {
// Provides the globally unique identifier for an APObject or Link.
ID ID `jsonld:"id,omitempty"`
// Identifies the APObject or Link type. Multiple values may be specified.
Type ActivityVocabularyType `jsonld:"type,omitempty"`
// A simple, human-readable, plain-text name for the object.
// HTML markup MUST NOT be included. The name MAY be expressed using multiple language-tagged values.
Name NaturalLanguageValues `jsonld:"name,omitempty,collapsible"`
// A link relation associated with a Link. The value must conform to both the [HTML5] and
// [RFC5988](https://tools.ietf.org/html/rfc5988) "link relation" definitions.
// In the [HTML5], any string not containing the "space" U+0020, "tab" (U+0009), "LF" (U+000A),
// "FF" (U+000C), "CR" (U+000D) or "," (U+002C) characters can be used as a valid link relation.
Rel IRI `jsonld:"rel,omitempty"`
// When used on a Link, identifies the MIME media type of the referenced resource.
MediaType MimeType `jsonld:"mediaType,omitempty"`
// On a Link, specifies a hint as to the rendering height in device-independent pixels of the linked resource.
Height uint `jsonld:"height,omitempty"`
// On a Link, specifies a hint as to the rendering width in device-independent pixels of the linked resource.
Width uint `jsonld:"width,omitempty"`
// Identifies an entity that provides a preview of this object.
Preview Item `jsonld:"preview,omitempty"`
// The target resource pointed to by a Link.
Href IRI `jsonld:"href,omitempty"`
// Hints as to the language used by the target resource.
// Value must be a [BCP47](https://tools.ietf.org/html/bcp47) Language-Tag.
HrefLang LangRef `jsonld:"hrefLang,omitempty"`
}
// Mention is a specialized Link that represents an @mention.
type Mention = Link
// LinkNew initializes a new Link
func LinkNew(id ID, typ ActivityVocabularyType) *Link {
if !LinkTypes.Contains(typ) {
typ = LinkType
}
return &Link{ID: id, Type: typ}
}
// MentionNew initializes a new Mention
func MentionNew(id ID) *Mention {
return &Mention{ID: id, Type: MentionType}
}
// IsLink validates if current Link is a Link
func (l Link) IsLink() bool {
return l.Type == LinkType || LinkTypes.Contains(l.Type)
}
// IsObject validates if current Link is an GetID
func (l Link) IsObject() bool {
return l.Type == ObjectType || ObjectTypes.Contains(l.Type)
}
// IsCollection returns false for Link objects
func (l Link) IsCollection() bool {
return false
}
// GetID returns the ID corresponding to the Link object
func (l Link) GetID() ID {
return l.ID
}
// GetLink returns the IRI corresponding to the current Link
func (l Link) GetLink() IRI {
return IRI(l.ID)
}
// GetType returns the Type corresponding to the Mention object
func (l Link) GetType() ActivityVocabularyType {
return l.Type
}
// MarshalJSON encodes the receiver object to a JSON document.
func (l Link) MarshalJSON() ([]byte, error) {
b := make([]byte, 0)
write(&b, '{')
if writeLinkJSONValue(&b, l) {
write(&b, '}')
return b, nil
}
return nil, nil
}
// UnmarshalJSON decodes an incoming JSON document into the receiver object.
func (l *Link) UnmarshalJSON(data []byte) error {
p := fastjson.Parser{}
val, err := p.ParseBytes(data)
if err != nil {
return err
}
return loadLink(val, l)
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (l *Link) UnmarshalBinary(data []byte) error {
return l.GobDecode(data)
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (l Link) MarshalBinary() ([]byte, error) {
return l.GobEncode()
}
func (l Link) GobEncode() ([]byte, error) {
mm := make(map[string][]byte)
hasData, err := mapLinkProperties(mm, l)
if err != nil {
return nil, err
}
if !hasData {
return []byte{}, nil
}
bb := bytes.Buffer{}
g := gob.NewEncoder(&bb)
if err := g.Encode(mm); err != nil {
return nil, err
}
return bb.Bytes(), nil
}
func (l *Link) GobDecode(data []byte) error {
if len(data) == 0 {
return nil
}
mm, err := gobDecodeObjectAsMap(data)
if err != nil {
return err
}
return unmapLinkProperties(mm, l)
} | link.go | 0.792504 | 0.424531 | link.go | starcoder |
package bw761
import "math/big"
// e6 is a degree-three finite field extension of fp2
type e6 struct {
B0, B1, B2 e2
}
// Equal returns true if z equals x, fasle otherwise
// TODO can this be deleted? Should be able to use == operator instead
func (z *e6) Equal(x *e6) bool {
return z.B0.Equal(&x.B0) && z.B1.Equal(&x.B1) && z.B2.Equal(&x.B2)
}
// SetString sets a e6 elmt from stringf
func (z *e6) SetString(s1, s2, s3, s4, s5, s6 string) *e6 {
z.B0.SetString(s1, s2)
z.B1.SetString(s3, s4)
z.B2.SetString(s5, s6)
return z
}
// Set Sets a e6 elmt form another e6 elmt
func (z *e6) Set(x *e6) *e6 {
z.B0 = x.B0
z.B1 = x.B1
z.B2 = x.B2
return z
}
// SetOne sets z to 1 in Montgomery form and returns z
func (z *e6) SetOne() *e6 {
z.B0.A0.SetOne()
z.B0.A1.SetZero()
z.B1.A0.SetZero()
z.B1.A1.SetZero()
z.B2.A0.SetZero()
z.B2.A1.SetZero()
return z
}
// SetRandom set z to a random elmt
func (z *e6) SetRandom() *e6 {
z.B0.SetRandom()
z.B1.SetRandom()
z.B2.SetRandom()
return z
}
// ToMont converts to Mont form
func (z *e6) ToMont() *e6 {
z.B0.ToMont()
z.B1.ToMont()
z.B2.ToMont()
return z
}
// FromMont converts from Mont form
func (z *e6) FromMont() *e6 {
z.B0.FromMont()
z.B1.FromMont()
z.B2.FromMont()
return z
}
// Add adds two elements of e6
func (z *e6) Add(x, y *e6) *e6 {
z.B0.Add(&x.B0, &y.B0)
z.B1.Add(&x.B1, &y.B1)
z.B2.Add(&x.B2, &y.B2)
return z
}
// Neg negates the e6 number
func (z *e6) Neg(x *e6) *e6 {
z.B0.Neg(&x.B0)
z.B1.Neg(&x.B1)
z.B2.Neg(&x.B2)
return z
}
// Sub two elements of e6
func (z *e6) Sub(x, y *e6) *e6 {
z.B0.Sub(&x.B0, &y.B0)
z.B1.Sub(&x.B1, &y.B1)
z.B2.Sub(&x.B2, &y.B2)
return z
}
// Double doubles an element in e6
func (z *e6) Double(x *e6) *e6 {
z.B0.Double(&x.B0)
z.B1.Double(&x.B1)
z.B2.Double(&x.B2)
return z
}
// String puts e6 elmt in string form
func (z *e6) String() string {
return (z.B0.String() + "+(" + z.B1.String() + ")*v+(" + z.B2.String() + ")*v**2")
}
// Mul sets z to the e6-product of x,y, returns z
func (z *e6) Mul(x, y *e6) *e6 {
// Algorithm 13 from https://eprint.iacr.org/2010/354.pdf
var t0, t1, t2, c0, c1, c2, tmp e2
t0.Mul(&x.B0, &y.B0)
t1.Mul(&x.B1, &y.B1)
t2.Mul(&x.B2, &y.B2)
c0.Add(&x.B1, &x.B2)
tmp.Add(&y.B1, &y.B2)
c0.Mul(&c0, &tmp).Sub(&c0, &t1).Sub(&c0, &t2).MulByNonResidue(&c0).Add(&c0, &t0)
c1.Add(&x.B0, &x.B1)
tmp.Add(&y.B0, &y.B1)
c1.Mul(&c1, &tmp).Sub(&c1, &t0).Sub(&c1, &t1)
tmp.MulByNonResidue(&t2)
c1.Add(&c1, &tmp)
tmp.Add(&x.B0, &x.B2)
c2.Add(&y.B0, &y.B2).Mul(&c2, &tmp).Sub(&c2, &t0).Sub(&c2, &t2).Add(&c2, &t1)
z.B0.Set(&c0)
z.B1.Set(&c1)
z.B2.Set(&c2)
return z
}
// MulAssign sets z to the e6-product of z,y, returns z
func (z *e6) MulAssign(x *e6) *e6 {
z.Mul(z, x)
return z
}
// MulByE2 multiplies x by an elements of e2
func (z *e6) MulByE2(x *e6, y *e2) *e6 {
var yCopy e2
yCopy.Set(y)
z.B0.Mul(&x.B0, &yCopy)
z.B1.Mul(&x.B1, &yCopy)
z.B2.Mul(&x.B2, &yCopy)
return z
}
// Square sets z to the e6-product of x,x, returns z
func (z *e6) Square(x *e6) *e6 {
// Algorithm 16 from https://eprint.iacr.org/2010/354.pdf
var c4, c5, c1, c2, c3, c0 e2
c4.Mul(&x.B0, &x.B1).Double(&c4)
c5.Square(&x.B2)
c1.MulByNonResidue(&c5).Add(&c1, &c4)
c2.Sub(&c4, &c5)
c3.Square(&x.B0)
c4.Sub(&x.B0, &x.B1).Add(&c4, &x.B2)
c5.Mul(&x.B1, &x.B2).Double(&c5)
c4.Square(&c4)
c0.MulByNonResidue(&c5).Add(&c0, &c3)
z.B2.Add(&c2, &c4).Add(&z.B2, &c5).Sub(&z.B2, &c3)
z.B0.Set(&c0)
z.B1.Set(&c1)
return z
}
// CyclotomicSquare https://eprint.iacr.org/2009/565.pdf, 3.2
func (z *e6) CyclotomicSquare(x *e6) *e6 {
var res, a e6
var tmp e2
// A
res.B0.Square(&x.B0)
a.B0.Conjugate(&x.B0)
// B
res.B2.A0.Set(&x.B1.A1)
res.B2.A1.MulByNonResidueInv(&x.B1.A0)
res.B2.Square(&res.B2).Double(&res.B2).Double(&res.B2).Neg(&res.B2)
a.B2.Conjugate(&x.B2)
// C
tmp.Square(&x.B2)
res.B1.A0.MulByNonResidue(&tmp.A1)
res.B1.A1.Set(&tmp.A0)
a.B1.A0.Neg(&x.B1.A0)
a.B1.A1.Set(&x.B1.A1)
z.Sub(&res, &a).Double(z).Add(z, &res)
return z
}
// Inverse an element in e6
func (z *e6) Inverse(x *e6) *e6 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
var t0, t1, t2, t3, t4, t5, t6, c0, c1, c2, d1, d2 e2
t0.Square(&x.B0)
t1.Square(&x.B1)
t2.Square(&x.B2)
t3.Mul(&x.B0, &x.B1)
t4.Mul(&x.B0, &x.B2)
t5.Mul(&x.B1, &x.B2)
c0.MulByNonResidue(&t5).Neg(&c0).Add(&c0, &t0)
c1.MulByNonResidue(&t2).Sub(&c1, &t3)
c2.Sub(&t1, &t4)
t6.Mul(&x.B0, &c0)
d1.Mul(&x.B2, &c1)
d2.Mul(&x.B1, &c2)
d1.Add(&d1, &d2).MulByNonResidue(&d1)
t6.Add(&t6, &d1)
t6.Inverse(&t6)
z.B0.Mul(&c0, &t6)
z.B1.Mul(&c1, &t6)
z.B2.Mul(&c2, &t6)
return z
}
// Exp sets z=x**e and returns it
func (z *e6) Exp(x *e6, e big.Int) *e6 {
var res e6
res.SetOne()
b := e.Bytes()
for i := range b {
w := b[i]
mask := byte(0x80)
for j := 0; j < 8; j++ {
res.Square(&res)
if (w&mask)>>(7-j) != 0 {
res.Mul(&res, x)
}
mask = mask >> 1
}
}
z.Set(&res)
return z
} | bw761/e6.go | 0.513668 | 0.574335 | e6.go | starcoder |
package xutil
import (
"sync"
)
// OrderedMap wraps around a Go map keeping the order with which
// elements have been added. Keys must be strings, but values
// can be anything (interface{}).
// Unlike map, index assigment is not possible. Use the `Set`
// method to set a key with a particular value.
// Use the Keys method to retrieves keys, Values to get the
// values. To get both, which probably what you want, use
// the KeysValues method.
type OrderedMap struct {
mapMU sync.RWMutex
map_ map[string]interface{}
order []string
}
// Count returns the number of elements in the map.
func (om *OrderedMap) Count() int {
return len(om.order)
}
// Set key in OrderedMap to value. Previously stored values
// are overwritten, but the order does not change.
func (om *OrderedMap) Set(key string, value interface{}) {
om.mapMU.Lock()
defer om.mapMU.Unlock()
if om.map_ == nil {
om.map_ = map[string]interface{}{}
}
om.map_[key] = value
if !HasString(om.order, key) {
om.order = append(om.order, key)
}
}
// Delete deletes the element with the specified key from
// the map.
func (om *OrderedMap) Delete(key string) {
om.mapMU.Lock()
defer om.mapMU.Unlock()
om.order = RemoveString(om.order, key)
delete(om.map_, key)
}
// Keys returns keys as slice of string.
func (om *OrderedMap) Keys() []string {
om.mapMU.RLock()
defer om.mapMU.RUnlock()
return om.order
}
func (om *OrderedMap) values() []interface{} {
res := make([]interface{}, len(om.order))
for i, k := range om.order {
res[i] = om.map_[k]
}
return res
}
// Values returns the values as slice of interfaces.
func (om *OrderedMap) Values() []interface{} {
om.mapMU.RLock()
defer om.mapMU.RUnlock()
return om.values()
}
// KeysValues returns the keys as slice of strings, and values as slice of interfaces.
func (om *OrderedMap) KeysValues() ([]string, []interface{}) {
om.mapMU.RLock()
defer om.mapMU.RUnlock()
return om.order, om.values()
}
// Has returns whether the map contains key.
func (om *OrderedMap) Has(key string) bool {
om.mapMU.RLock()
defer om.mapMU.RUnlock()
return HasString(om.order, key)
}
// Value returns the value for key and also whether it was found.
// The bool is returned because value could be nil.
func (om *OrderedMap) Value(key string) (interface{}, bool) {
om.mapMU.RLock()
defer om.mapMU.RUnlock()
return om.map_[key], HasString(om.order, key)
} | xutil/mapping.go | 0.805058 | 0.477615 | mapping.go | starcoder |
package interfacediagnostics
import (
"strconv"
"time"
"github.com/arsonistgopher/jinfluxexporter/collector"
channels "github.com/arsonistgopher/jinfluxexporter/rootchannels"
"github.com/arsonistgopher/jinfluxexporter/rpc"
)
type interfaceDiagnosticsCollector struct {
}
// NewCollector creates a new collector
func NewCollector() collector.RPCCollector {
return &interfaceDiagnosticsCollector{}
}
// Collect collects metrics from JunOS
func (c *interfaceDiagnosticsCollector) Collect(client rpc.Client, ch chan<- channels.InfluxDBMeasurement, label string, measurement string) error {
diagnostics, err := c.interfaceDiagnostics(client)
if err != nil {
return err
}
for _, d := range diagnostics {
tagset := make(map[string]string)
tagset["host"] = label
fieldset := map[string]interface{}{
"LaserBiasCurrent": uint64(d.LaserBiasCurrent),
"LaserOutputPower": uint64(d.LaserOutputPower),
"LaserOutputPowerDbm": uint64(d.LaserOutputPowerDbm),
"ModuleTemperature": uint64(d.ModuleTemperature),
}
ch <- channels.InfluxDBMeasurement{Measurement: measurement, TagSet: tagset, FieldSet: fieldset, TimeStamp: time.Now()}
if d.ModuleVoltage > 0 {
fieldset := map[string]interface{}{
"ModuleVoltage": uint64(d.ModuleVoltage),
"RxSignalAvgOpticalPower": uint64(d.RxSignalAvgOpticalPower),
"RxSignalAvgOpticalPowerDbm": uint64(d.RxSignalAvgOpticalPowerDbm),
}
ch <- channels.InfluxDBMeasurement{Measurement: measurement, TagSet: tagset, FieldSet: fieldset, TimeStamp: time.Now()}
} else {
fieldset := map[string]interface{}{
"LaserRxOpticalPower": uint64(d.LaserRxOpticalPower),
"LaserRxOpticalPowerDbm": uint64(d.LaserRxOpticalPowerDbm),
}
ch <- channels.InfluxDBMeasurement{Measurement: measurement, TagSet: tagset, FieldSet: fieldset, TimeStamp: time.Now()}
}
}
return nil
}
func (c *interfaceDiagnosticsCollector) interfaceDiagnostics(client rpc.Client) ([]*InterfaceDiagnostics, error) {
x := &InterfaceDiagnosticsRpc{}
err := rpc.RunCommandAndParse(client, "<get-interface-optics-diagnostics-information/>", &x)
if err != nil {
return nil, err
}
diagnostics := make([]*InterfaceDiagnostics, 0)
for _, diag := range x.Diagnostics {
if diag.Diagnostics.NA == "N/A" {
continue
}
d := &InterfaceDiagnostics{
Name: diag.Name,
LaserBiasCurrent: float64(diag.Diagnostics.LaserBiasCurrent),
LaserOutputPower: float64(diag.Diagnostics.LaserOutputPower),
ModuleTemperature: float64(diag.Diagnostics.ModuleTemperature.Value),
}
f, err := strconv.ParseFloat(diag.Diagnostics.LaserOutputPowerDbm, 64)
if err == nil {
d.LaserOutputPowerDbm = f
}
if diag.Diagnostics.ModuleVoltage > 0 {
d.ModuleVoltage = float64(diag.Diagnostics.ModuleVoltage)
d.RxSignalAvgOpticalPower = float64(diag.Diagnostics.RxSignalAvgOpticalPower)
f, err = strconv.ParseFloat(diag.Diagnostics.RxSignalAvgOpticalPowerDbm, 64)
if err == nil {
d.RxSignalAvgOpticalPowerDbm = f
}
} else {
d.LaserRxOpticalPower = float64(diag.Diagnostics.LaserRxOpticalPower)
f, err = strconv.ParseFloat(diag.Diagnostics.LaserRxOpticalPowerDbm, 64)
if err == nil {
d.LaserRxOpticalPowerDbm = f
}
}
diagnostics = append(diagnostics, d)
}
return diagnostics, nil
} | collectors/interfacediagnostics/interface_diagnostics_collector.go | 0.706798 | 0.402011 | interface_diagnostics_collector.go | starcoder |
package digitalocean
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
func dropletSchema() map[string]*schema.Schema {
return map[string]*schema.Schema{
"id": {
Type: schema.TypeInt,
Description: "id of the Droplet",
},
"name": {
Type: schema.TypeString,
Description: "name of the Droplet",
},
"created_at": {
Type: schema.TypeString,
Description: "the creation date for the Droplet",
},
"urn": {
Type: schema.TypeString,
Description: "the uniform resource name for the Droplet",
},
"region": {
Type: schema.TypeString,
Description: "the region that the Droplet instance is deployed in",
},
"image": {
Type: schema.TypeString,
Description: "the image id or slug of the Droplet",
},
"size": {
Type: schema.TypeString,
Description: "the current size of the Droplet",
},
"disk": {
Type: schema.TypeInt,
Description: "the size of the Droplets disk in gigabytes",
},
"vcpus": {
Type: schema.TypeInt,
Description: "the number of virtual cpus",
},
"memory": {
Type: schema.TypeInt,
Description: "memory of the Droplet in megabytes",
},
"price_hourly": {
Type: schema.TypeFloat,
Description: "the Droplets hourly price",
},
"price_monthly": {
Type: schema.TypeFloat,
Description: "the Droplets monthly price",
},
"status": {
Type: schema.TypeString,
Description: "state of the Droplet instance",
},
"locked": {
Type: schema.TypeBool,
Description: "whether the Droplet has been locked",
},
"ipv4_address": {
Type: schema.TypeString,
Description: "the Droplets public ipv4 address",
},
"ipv4_address_private": {
Type: schema.TypeString,
Description: "the Droplets private ipv4 address",
},
"ipv6_address": {
Type: schema.TypeString,
Description: "the Droplets public ipv6 address",
},
"ipv6_address_private": {
Type: schema.TypeString,
Description: "the Droplets private ipv4 address",
},
"backups": {
Type: schema.TypeBool,
Description: "whether the Droplet has backups enabled",
},
"ipv6": {
Type: schema.TypeBool,
Description: "whether the Droplet has ipv6 enabled",
},
"private_networking": {
Type: schema.TypeBool,
Description: "whether the Droplet has private networking enabled",
},
"monitoring": {
Type: schema.TypeBool,
Description: "whether the Droplet has monitoring enabled",
},
"volume_ids": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Description: "list of volumes attached to the Droplet",
},
"tags": tagsDataSourceSchema(),
"vpc_uuid": {
Type: schema.TypeString,
Description: "UUID of the VPC in which the Droplet is located",
},
}
}
func getDigitalOceanDroplets(meta interface{}) ([]interface{}, error) {
client := meta.(*CombinedConfig).godoClient()
opts := &godo.ListOptions{
Page: 1,
PerPage: 200,
}
var dropletList []interface{}
for {
droplets, resp, err := client.Droplets.List(context.Background(), opts)
if err != nil {
return nil, fmt.Errorf("Error retrieving droplets: %s", err)
}
for _, droplet := range droplets {
dropletList = append(dropletList, droplet)
}
if resp.Links == nil || resp.Links.IsLastPage() {
break
}
page, err := resp.Links.CurrentPage()
if err != nil {
return nil, fmt.Errorf("Error retrieving droplets: %s", err)
}
opts.Page = page + 1
}
return dropletList, nil
}
func flattenDigitalOceanDroplet(rawDroplet, meta interface{}) (map[string]interface{}, error) {
droplet := rawDroplet.(godo.Droplet)
flattenedDroplet := map[string]interface{}{
"id": droplet.ID,
"name": droplet.Name,
"urn": droplet.URN(),
"region": droplet.Region.Slug,
"size": droplet.Size.Slug,
"price_hourly": droplet.Size.PriceHourly,
"price_monthly": droplet.Size.PriceMonthly,
"disk": droplet.Disk,
"vcpus": droplet.Vcpus,
"memory": droplet.Memory,
"status": droplet.Status,
"locked": droplet.Locked,
"created_at": droplet.Created,
"vpc_uuid": droplet.VPCUUID,
}
if droplet.Image.Slug == "" {
flattenedDroplet["image"] = strconv.Itoa(droplet.Image.ID)
} else {
flattenedDroplet["image"] = droplet.Image.Slug
}
if publicIPv4 := findIPv4AddrByType(&droplet, "public"); publicIPv4 != "" {
flattenedDroplet["ipv4_address"] = publicIPv4
}
if privateIPv4 := findIPv4AddrByType(&droplet, "private"); privateIPv4 != "" {
flattenedDroplet["ipv4_address_private"] = privateIPv4
}
if publicIPv6 := findIPv6AddrByType(&droplet, "public"); publicIPv6 != "" {
flattenedDroplet["ipv6_address"] = strings.ToLower(publicIPv6)
}
if privateIPv6 := findIPv6AddrByType(&droplet, "private"); privateIPv6 != "" {
flattenedDroplet["ipv6_address_private"] = strings.ToLower(privateIPv6)
}
if features := droplet.Features; features != nil {
flattenedDroplet["backups"] = containsDigitalOceanDropletFeature(features, "backups")
flattenedDroplet["ipv6"] = containsDigitalOceanDropletFeature(features, "ipv6")
flattenedDroplet["private_networking"] = containsDigitalOceanDropletFeature(features, "private_networking")
flattenedDroplet["monitoring"] = containsDigitalOceanDropletFeature(features, "monitoring")
}
flattenedDroplet["volume_ids"] = flattenDigitalOceanDropletVolumeIds(droplet.VolumeIDs)
flattenedDroplet["tags"] = flattenTags(droplet.Tags)
return flattenedDroplet, nil
} | vendor/github.com/terraform-providers/terraform-provider-digitalocean/digitalocean/droplets.go | 0.507568 | 0.462655 | droplets.go | starcoder |
package mat
import (
"math"
)
func NewRay(origin Set, direction Set) Ray {
return Ray{Origin: origin, Direction: direction}
}
type Ray struct {
Origin Set
Direction Set
}
// Position multiplies direction of ray with the passed distance and adds the result onto the origin.
// Used for finding the position along a ray.
func Position(r Ray, distance float64) Set{
add := Scalar(r.Direction, distance)
pos := Add(r.Origin, add)
return pos
}
func PositionPtr(r Ray, distance float64, out *Set) {
add := Scalar(r.Direction, distance)
AddPtr(r.Origin, add, out)
}
// TODO only used by unit tests, fix so tests use IntersectRayWithShapePtr and remove
func IntersectRayWithShape(s Shape, r2 Ray) []Intersection {
// transform ray with inverse of shape transformation matrix to be able to intersect a translated/rotated/skewed shape
r := TransformRay(r2, s.GetInverse())
// Call the intersect function provided by the shape implementation (e.g. Sphere, Plane osv)
return s.IntersectLocal(r)
}
func IntersectRayWithShapePtr(s Shape, r2 Ray, in *Ray) []Intersection {
//calcstats.Incr()
// transform ray with inverse of shape transformation matrix to be able to intersect a translated/rotated/skewed shape
TransformRayPtr(r2, s.GetInverse(), in)
// Call the intersect function provided by the shape implementation (e.g. Sphere, Plane osv)
return s.IntersectLocal(*in)
}
// Hit finds the first intersection with a positive T (the passed intersections are assumed to have been sorted already)
func Hit(intersections []Intersection) (Intersection, bool) {
// Filter out all negatives
for i := 0; i < len(intersections); i++ {
if intersections[i].T > 0.0 {
return intersections[i], true
//xs = append(xs, i)
}
}
return Intersection{}, false
}
func TransformRay(r Ray, m1 Mat4x4) Ray {
origin := MultiplyByTuple(m1, r.Origin)
direction := MultiplyByTuple(m1, r.Direction)
return NewRay(origin, direction)
}
func TransformRayPtr(r Ray, m1 Mat4x4, out *Ray) {
MultiplyByTuplePtr(&m1, &r.Origin, &out.Origin)
MultiplyByTuplePtr(&m1, &r.Direction, &out.Direction)
}
func Schlick(comps Computation) float64 {
// find the cosine of the angle between the eye and normal vectors using Dot
cos := Dot(comps.EyeVec, comps.NormalVec)
// total internal reflection can only occur if n1 > n2
if comps.N1 > comps.N2 {
n := comps.N1 / comps.N2
sin2Theta := (n * n) * (1.0 - (cos * cos))
if sin2Theta > 1.0 {
return 1.0
}
// compute cosine of theta_t using trig identity
cosTheta := math.Sqrt(1.0 - sin2Theta)
// when n1 > n2, use cos(theta_t) instead
cos = cosTheta
}
temp := (comps.N1 - comps.N2) / (comps.N1 + comps.N2)
r0 := temp * temp
return r0 + (1-r0)*math.Pow(1-cos, 5)
} | internal/pkg/mat/rays.go | 0.736021 | 0.680872 | rays.go | starcoder |
package attributes
// Attributes is an immutable struct for storing and retrieving generic
// key/value pairs. Keys must be hashable, and users should define their own
// types for keys. Values should not be modified after they are added to an
// Attributes or if they were received from one. If values implement 'Equal(o
// interface{}) bool', it will be called by (*Attributes).Equal to determine
// whether two values with the same key should be considered equal.
type Attributes struct {
m map[interface{}]interface{}
}
// New returns a new Attributes containing the key/value pair.
func New(key, value interface{}) *Attributes {
return &Attributes{m: map[interface{}]interface{}{key: value}}
}
// WithValue returns a new Attributes containing the previous keys and values
// and the new key/value pair. If the same key appears multiple times, the
// last value overwrites all previous values for that key. To remove an
// existing key, use a nil value. value should not be modified later.
func (a *Attributes) WithValue(key, value interface{}) *Attributes {
if a == nil {
return New(key, value)
}
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
for k, v := range a.m {
n.m[k] = v
}
n.m[key] = value
return n
}
// Value returns the value associated with these attributes for key, or nil if
// no value is associated with key. The returned value should not be modified.
func (a *Attributes) Value(key interface{}) interface{} {
if a == nil {
return nil
}
return a.m[key]
}
// Equal returns whether a and o are equivalent. If 'Equal(o interface{})
// bool' is implemented for a value in the attributes, it is called to
// determine if the value matches the one stored in the other attributes. If
// Equal is not implemented, standard equality is used to determine if the two
// values are equal. Note that some types (e.g. maps) aren't comparable by
// default, so they must be wrapped in a struct, or in an alias type, with Equal
// defined.
func (a *Attributes) Equal(o *Attributes) bool {
if a == nil && o == nil {
return true
}
if a == nil || o == nil {
return false
}
if len(a.m) != len(o.m) {
return false
}
for k, v := range a.m {
ov, ok := o.m[k]
if !ok {
// o missing element of a
return false
}
if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
if !eq.Equal(ov) {
return false
}
} else if v != ov {
// Fallback to a standard equality check if Value is unimplemented.
return false
}
}
return true
} | vendor/google.golang.org/grpc/attributes/attributes.go | 0.807537 | 0.458773 | attributes.go | starcoder |
package rib
import (
"net"
"github.com/transitorykris/kbgp/bgp"
"github.com/transitorykris/kbgp/radix"
)
// 3.1. Routes: Advertisement and Storage
// For the purpose of this protocol, a route is defined as a unit of
// information that pairs a set of destinations with the attributes of a
// path to those destinations. The set of destinations are systems
// whose IP addresses are contained in one IP address prefix that is
// carried in the Network Layer Reachability Information (NLRI) field of
// an UPDATE message, and the path is the information reported in the
// path attributes field of the same UPDATE message.
// Routes are advertised between BGP speakers in UPDATE messages.
// Multiple routes that have the same path attributes can be advertised
// in a single UPDATE message by including multiple prefixes in the NLRI
// field of the UPDATE message.
// Routes are stored in the Routing Information Bases (RIBs): namely,
// the Adj-RIBs-In, the Loc-RIB, and the Adj-RIBs-Out, as described in
// Section 3.2.
// If a BGP speaker chooses to advertise a previously received route, it
// MAY add to, or modify, the path attributes of the route before
// advertising it to a peer.
// BGP provides mechanisms by which a BGP speaker can inform its peers
// that a previously advertised route is no longer available for use.
// There are three methods by which a given BGP speaker can indicate
// that a route has been withdrawn from service:
// a) the IP prefix that expresses the destination for a previously
// advertised route can be advertised in the WITHDRAWN ROUTES
// field in the UPDATE message, thus marking the associated route
// as being no longer available for use,
// b) a replacement route with the same NLRI can be advertised, or
// c) the BGP speaker connection can be closed, which implicitly
// removes all routes the pair of speakers had advertised to each
// other from service.
// Changing the attribute(s) of a route is accomplished by advertising a
// replacement route. The replacement route carries new (changed)
// attributes and has the same address prefix as the original route.
// 3.2. Routing Information Base
// The Routing Information Base (RIB) within a BGP speaker consists of
// three distinct parts:
// a) Adj-RIBs-In: The Adj-RIBs-In stores routing information learned
// from inbound UPDATE messages that were received from other BGP
// speakers. Their contents represent routes that are available
// as input to the Decision Process.
// b) Loc-RIB: The Loc-RIB contains the local routing information the
// BGP speaker selected by applying its local policies to the
// routing information contained in its Adj-RIBs-In. These are
// the routes that will be used by the local BGP speaker. The
// next hop for each of these routes MUST be resolvable via the
// local BGP speaker's Routing Table.
// c) Adj-RIBs-Out: The Adj-RIBs-Out stores information the local BGP
// speaker selected for advertisement to its peers. The routing
// information stored in the Adj-RIBs-Out will be carried in the
// local BGP speaker's UPDATE messages and advertised to its
// peers.
// In summary, the Adj-RIBs-In contains unprocessed routing information
// that has been advertised to the local BGP speaker by its peers; the
// Loc-RIB contains the routes that have been selected by the local BGP
// speaker's Decision Process; and the Adj-RIBs-Out organizes the routes
// for advertisement to specific peers (by means of the local speaker's
// UPDATE messages).
// Although the conceptual model distinguishes between Adj-RIBs-In,
// Loc-RIB, and Adj-RIBs-Out, this neither implies nor requires that an
// implementation must maintain three separate copies of the routing
// information. The choice of implementation (for example, 3 copies of
// the information vs 1 copy with pointers) is not constrained by the
// protocol.
// Routing information that the BGP speaker uses to forward packets (or
// to construct the forwarding table used for packet forwarding) is
// maintained in the Routing Table. The Routing Table accumulates
// routes to directly connected networks, static routes, routes learned
// from the IGP protocols, and routes learned from BGP. Whether a
// specific BGP route should be installed in the Routing Table, and
// whether a BGP route should override a route to the same destination
// installed by another source, is a local policy decision, and is not
// specified in this document. In addition to actual packet forwarding,
// the Routing Table is used for resolution of the next-hop addresses
// specified in BGP updates (see Section 5.1.3).
// RIB contains a set of routes that can have policy applied to it
type RIB struct {
prePolicy radix.Trie
postPolicy radix.Trie
policy bgp.Policer
}
// New creates a new RIB
func New(policy bgp.Policer) *RIB {
return &RIB{policy: policy}
}
// Inject implements bgp.RIB
func (r *RIB) Inject(route bgp.Route) {
}
// Remove implements bgp.RIB
func (r *RIB) Remove(nlri bgp.NLRI) {
}
// Lookup implements bgp.RIB
func (r *RIB) Lookup(net.IPNet) bgp.Route {
return bgp.Route{}
}
// SetPolicy implements bgp.RIB
func (r *RIB) SetPolicy(policy bgp.Policer) {
}
// Dump implements bgp.RIB
func (r *RIB) Dump() <-chan bgp.Route {
return nil
} | old/rib/rib.go | 0.675658 | 0.568895 | rib.go | starcoder |
package ts
import (
"bytes"
"github.com/m3db/m3x/checked"
)
// Segment represents a binary blob consisting of two byte slices and
// declares whether they should be finalized when the segment is finalized.
type Segment struct {
// Head is the head of the segment.
Head checked.Bytes
// Tail is the tail of the segment.
Tail checked.Bytes
// SegmentFlags declares whether to finalize when finalizing the segment.
Flags SegmentFlags
}
// SegmentFlags describes the option to finalize or not finalize
// bytes in a Segment.
type SegmentFlags uint8
const (
// FinalizeNone specifies to finalize neither of the bytes
FinalizeNone SegmentFlags = 1 << 0
// FinalizeHead specifies to finalize the head bytes
FinalizeHead SegmentFlags = 1 << 1
// FinalizeTail specifies to finalize the tail bytes
FinalizeTail SegmentFlags = 1 << 2
)
// NewSegment will create a new segment and increment the refs to
// head and tail if they are non-nil. When finalized the segment will
// also finalize the byte slices if FinalizeBytes is passed.
func NewSegment(
head, tail checked.Bytes,
flags SegmentFlags,
) Segment {
if head != nil {
head.IncRef()
}
if tail != nil {
tail.IncRef()
}
return Segment{
Head: head,
Tail: tail,
Flags: flags,
}
}
// Len returns the length of the head and tail.
func (s *Segment) Len() int {
var total int
if s.Head != nil {
total += s.Head.Len()
}
if s.Tail != nil {
total += s.Tail.Len()
}
return total
}
// Equal returns if this segment is equal to another.
// WARNING: This should only be used in code paths not
// executed often as it allocates bytes to concat each
// segment head and tail together before comparing the contents.
func (s *Segment) Equal(other *Segment) bool {
var head, tail, otherHead, otherTail []byte
if s.Head != nil {
head = s.Head.Get()
}
if s.Tail != nil {
tail = s.Tail.Get()
}
if other.Head != nil {
otherHead = other.Head.Get()
}
if other.Tail != nil {
otherTail = other.Tail.Get()
}
return bytes.Equal(append(head, tail...), append(otherHead, otherTail...))
}
// Finalize will finalize the segment by decrementing refs to head and
// tail if they are non-nil.
func (s *Segment) Finalize() {
if s.Head != nil {
s.Head.DecRef()
if s.Flags&FinalizeHead == FinalizeHead {
s.Head.Finalize()
}
}
s.Head = nil
if s.Tail != nil {
s.Tail.DecRef()
if s.Flags&FinalizeTail == FinalizeTail {
s.Tail.Finalize()
}
}
s.Tail = nil
} | ts/segment.go | 0.73173 | 0.493531 | segment.go | starcoder |
package geom
import (
"fmt"
"math"
math2 "github.com/roeldev/go-sdl2-experiments/pkg/sdlkit/math"
)
// var (
// vectorUp = Vector{0, -1}
// vectorDown = Vector{0, 1}
// vectorLeft = Vector{-1, 0}
// vectorRight = Vector{1, 0}
// )
type Vector struct {
X, Y float64
}
// VectorFromInt creates a new Vector from int32 values.
func VectorFromInt(x, y int32) *Vector {
return &Vector{X: float64(x), Y: float64(y)}
}
// VectorFromXY creates a new Vector from an XYGetter.
func VectorFromXY(xy XYGetter) *Vector {
return &Vector{X: xy.GetX(), Y: xy.GetY()}
}
func (v Vector) GetX() float64 { return v.X }
func (v Vector) GetY() float64 { return v.Y }
func (v *Vector) SetX(x float64) { v.X = x }
func (v *Vector) SetY(y float64) { v.Y = y }
// Point returns a Point with the same X and Y values as the Vector.
func (v Vector) Point() Point { return Point{X: v.X, Y: v.Y} }
// Clone returns a pointer to a new Vector with the same X and Y values as the
// source Vector.
func (v Vector) Clone() *Vector { return &Vector{X: v.X, Y: v.Y} }
// SetAngle in radians.
func (v *Vector) SetAngle(angle float64) *Vector {
return v.FromPolar(angle, v.Length())
}
func (v *Vector) SetLength(l float64) *Vector {
return v.FromPolar(v.Angle(), l)
}
// FromPolar sets the X and Y values according to the provided angle (in
// radians) and length.
func (v *Vector) FromPolar(angle, length float64) *Vector {
v.X = math.Cos(angle) * length
v.Y = math.Sin(angle) * length
return v
}
// Zero sets this Vector to 0 values.
func (v *Vector) Zero() *Vector {
v.X, v.Y = 0, 0
return v
}
// Negate the X and Y values of this Vector, meaning negative numbers becoming
// positive and positive becoming negative.
func (v *Vector) Negate() *Vector {
v.X = -v.X
v.Y = -v.Y
return v
}
// Scale this Vector by the given scale value, where 1 is equal to the Vector's
// current value.
func (v *Vector) Scale(scale float64) *Vector {
v.X *= scale
v.Y *= scale
return v
}
// Limit the length of this Vector.
func (v *Vector) Limit(length float64) *Vector {
if v.Length() <= length {
return v
}
return v.SetLength(length)
}
// Rotate this Vector by an angle amount (in radians).
func (v *Vector) Rotate(angle float64) *Vector {
x, y := v.X, v.Y
cos, sin := math.Cos(angle), math.Sin(angle)
v.X = (x * cos) - (y * sin)
v.Y = (y * cos) - (x * sin)
return v
}
func (v *Vector) Add(x, y float64) *Vector {
v.X += x
v.Y += y
return v
}
// AddVec adds the given Vector to this Vector.
func (v *Vector) AddVec(add Vector) *Vector {
v.X += add.X
v.Y += add.Y
return v
}
// AddXY the given XYGetter values to this Vector.
func (v *Vector) AddXY(add XYGetter) *Vector {
v.X += add.GetX()
v.Y += add.GetY()
return v
}
// SubVec subtracts the given Vector from this Vector.
func (v *Vector) SubVec(sub Vector) *Vector {
v.X -= sub.X
v.Y -= sub.Y
return v
}
// SubXY subtracts the given XYGetter values from this Vector.
func (v *Vector) SubXY(sub XYGetter) *Vector {
v.X -= sub.GetX()
v.Y -= sub.GetY()
return v
}
// MulVec multiplies this Vector by the given Vector.
func (v *Vector) MulVec(mul Vector) *Vector {
v.X *= mul.X
v.Y *= mul.Y
return v
}
// Div divides this Vector by the given Vector.
func (v *Vector) Div(div Vector) *Vector {
v.X /= div.X
v.Y /= div.Y
return v
}
// Lerp linearly interpolates this Vector towards the target Vector. Value t is
// the interpolation percentage between 0 and 1.
func (v *Vector) Lerp(target Vector, t float64) *Vector {
v.X += (target.X - v.X) * t
v.Y += (target.Y - v.Y) * t
return v
}
func (v *Vector) LerpRound(target Vector, t, e float64) *Vector {
v.X = math2.LerpRound(v.X, target.X, t, e)
v.Y = math2.LerpRound(v.Y, target.Y, t, e)
return v
}
// Angle returns the angle in radians.
func (v Vector) Angle() float64 { return math.Atan2(v.Y, v.X) }
// Length returns the length (magnitude).
func (v Vector) Length() float64 { return math.Sqrt(v.LengthSq()) }
// Length returns the squared length (magnitude).
func (v Vector) LengthSq() float64 { return (v.X * v.X) + (v.Y * v.Y) }
// Equals compares the X and Y values of the Vector and the target Vector, and
// returns true when they are equal.
func (v Vector) Equals(target Vector) bool {
return v.X == target.X && v.Y == target.Y
}
// Norm normalizes the Vector by making the length a magnitude of 1 in the same
// direction. It does not alter the source Vector.
func (v Vector) Normalize() Vector {
l := v.LengthSq()
if l == 1 {
return v
}
v.Scale(1 / math.Sqrt(l))
return v
}
// Dot calculates the dot product of the Vector and the target Vector.
func (v Vector) Dot(target Vector) float64 {
return (v.X * target.X) + (v.Y * target.Y)
}
// Cross calculates the cross product of the Vector and the target Vector.
func (v Vector) Cross(target Vector) float64 {
return (v.X * target.Y) + (v.Y * target.X)
}
func (v Vector) String() string {
angle := v.Angle()
return fmt.Sprintf("%T{%f, %f}, angle: %f (= %f°), length: %f",
v, v.X, v.Y,
angle, math2.RadToDeg(angle),
v.Length(),
)
} | pkg/sdlkit/geom/vector.go | 0.832849 | 0.648181 | vector.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.