code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package period
import (
"sort"
"time"
cabiriaTime "github.com/liampulles/cabiria/pkg/time"
)
// Periods is a slice of Period. It can itself be considered a Period (and we
// implement Period for Periods)... see below.
type Periods []Period
// Valid is true for Periods when there is at least one element, and all elements
// are themselves Valid.
func (p Periods) Valid() bool {
if len(p) == 0 {
return false
}
for _, elem := range p {
if !elem.Valid() {
return false
}
}
return true
}
// Start is the minimum start of all elements in Periods.
func (p Periods) Start() time.Time {
if len(p) == 0 {
return time.Time{}
}
min := p[0].Start()
for i := 1; i < len(p); i++ {
min = cabiriaTime.Min(min, p[i].Start())
}
return min
}
// End is the maximum end of all elements in Periods.
func (p Periods) End() time.Time {
if len(p) == 0 {
return time.Time{}
}
max := p[0].End()
for i := 1; i < len(p); i++ {
max = cabiriaTime.Max(max, p[i].End())
}
return max
}
// TransformToNew scales and shifts the elements of Periods, such that
// the minimum start of all elements is now "start", and the maximum end
// of all elements is now "end". The elements are also transformed into new
// variants, and the relative relationship between elements is unchanged -
// e.g. if elements 2 and 7 were overlapping, they will continue to overlap by
// the same percentage after the transformation.
func (p Periods) TransformToNew(start, end time.Time) Period {
// Determine bounds of many
manyMin := p.Start()
manyMax := p.End()
// Shift to min
dist := start.Sub(manyMin)
shifted := shiftPeriods(p, dist)
// Scale to max
manySpan := manyMax.Sub(manyMin)
desiredSpan := end.Sub(start)
scaleFactor := float64(desiredSpan) / float64(manySpan)
scaled := scalePeriods(shifted, start, scaleFactor)
return scaled
}
func scalePeriods(p Periods, origin time.Time, factor float64) Periods {
var results []Period
for _, elem := range p {
results = append(results, Scale(elem, origin, factor))
}
return results
}
func shiftPeriods(periods Periods, amount time.Duration) Periods {
var results []Period
for _, elem := range periods {
results = append(results, Shift(elem, amount))
}
return results
}
// FixOverlaps will adjust any set of overlapping elements in many such that
// their bounds touch, and they share the span of their overlapping set in
// proportion to their original Durations.
func FixOverlaps(many Periods) Periods {
if len(many) == 0 {
return []Period{}
}
Sort(many)
var result []Period
currentSet := Periods([]Period{many[0]})
for i := 1; i < len(many); i++ {
elem := many[i]
if DoesOverlap(elem, currentSet) {
currentSet = append(currentSet, elem)
} else {
result = append(result, separate(currentSet)...)
currentSet = Periods([]Period{elem})
}
}
result = append(result, separate(currentSet)...)
return result
}
// MergeTouching will merge any touching periods using mergeFunc.
// mergeFunc should return a period which has Start = a.Start() and end
// = b.End(), otherwise the result is not guaranteed to have non-touching
// elements.
func MergeTouching(many Periods, mergeFunc func(a, b Period) Period) Periods {
if len(many) == 0 {
return []Period{}
}
wrappedMergeFunc := func(set Periods) Periods {
return Periods([]Period{merge(set, mergeFunc)})
}
Sort(many)
var result []Period
currentSet := Periods([]Period{many[0]})
for i := 1; i < len(many); i++ {
elem := many[i]
if Touching(elem, currentSet) {
currentSet = append(currentSet, elem)
} else {
result = append(result, wrappedMergeFunc(currentSet)...)
currentSet = Periods([]Period{elem})
}
}
result = append(result, wrappedMergeFunc(currentSet)...)
return result
}
// CoverGaps will close any gaps between close elements by stretching those
// elements to cover the gap. The degree to which the elements are stretched is
// determined by their original Duration.
func CoverGaps(many Periods) Periods {
result := make(Periods, len(many))
copy(result, many)
Sort(result)
for i := 0; i < len(result)-1; i++ {
// For each pair
before := result[i]
after := result[i+1]
// If there is a gap
if !Touching(before, after) {
dist := after.Start().Sub(before.End())
// Figure out what proportion of the gap goes to each
beforeDuration := Duration(before)
afterDuration := Duration(after)
totalDuration := beforeDuration + afterDuration
gapToAfter := time.Duration(float64(dist) * (float64(afterDuration) / float64(totalDuration)))
meetingPoint := after.Start().Add(-gapToAfter)
// Modify periods
before = before.TransformToNew(before.Start(), meetingPoint)
after = after.TransformToNew(meetingPoint, after.End())
// Store result
result[i] = before
result[i+1] = after
}
}
return result
}
// Sort orders the elements naturally.
func Sort(many Periods) {
sort.Slice(many, func(i, j int) bool {
if many[i].Start().Equal(many[j].Start()) {
return many[i].End().Before(many[j].End())
}
return many[i].Start().Before(many[j].Start())
})
}
func separate(many Periods) Periods {
var results Periods
spanDuration := float64(Duration(many))
overlappingDuration := float64(durationSum(many))
origin := many.Start()
newStart := origin
for _, elem := range many {
percentageOfTotal := float64(Duration(elem)) / overlappingDuration
newEnd := newStart.Add(time.Duration(spanDuration * percentageOfTotal))
results = append(results, elem.TransformToNew(newStart, newEnd))
newStart = newEnd
}
return results
}
func merge(many Periods, mergeFunc func(a, b Period) Period) Period {
if len(many) == 0 {
return nil
}
base := many[0]
for i := 1; i < len(many); i++ {
base = mergeFunc(base, many[i])
}
return base
}
func durationSum(many Periods) time.Duration {
sum := time.Duration(0)
for _, elem := range many {
sum += Duration(elem)
}
return sum
} | pkg/time/period/periods.go | 0.747063 | 0.465752 | periods.go | starcoder |
package processing
import (
"image"
"log"
"math"
"sync"
"github.com/alevinval/fingerprints/internal/helpers"
"github.com/alevinval/fingerprints/internal/matrix"
"github.com/alevinval/fingerprints/internal/types"
)
const (
black = 0
white = 255
)
// BinarizeSegmented runs binarization with an optimized threshold that
// ensures the segmented area is as big an continuous as possible
func BinarizeSegmented(in, out *matrix.M, meta types.Metadata) {
binarize(in, out, math.Sqrt(meta.MeanValue))
}
// BinarizeSkeleton runs binarization with an optimized threshold that
// does not damage the skeleton itself.
func BinarizeSkeleton(in, out *matrix.M, meta types.Metadata) {
binarize(in, out, meta.MeanValue/(math.Pi/2))
}
func binarize(in, out *matrix.M, threshold float64) {
helpers.RunInParallel(in, 0, func(wg *sync.WaitGroup, bounds image.Rectangle) {
doBinarize(in, out, bounds, threshold)
wg.Done()
})
}
func doBinarize(in *matrix.M, out *matrix.M, bounds image.Rectangle, threshold float64) {
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
if in.At(x, y) < threshold {
out.Set(x, y, black)
} else {
out.Set(x, y, white)
}
}
}
}
func BinarizeEnhancement(in *matrix.M) *matrix.M {
bounds := in.Bounds()
p := matrix.NewFromGray(in.ToGray())
region := 1
for y := bounds.Min.Y + 1; y < bounds.Max.Y-1; y++ {
for x := bounds.Min.X + 1; x < bounds.Max.X-1; x++ {
if p.At(x, y) == black || p.At(x, y) == white {
fillRegion(p, region, x, y, 0)
region++
}
if region == white {
region++
}
}
}
log.Printf("regions found: %d", region)
histogram := make([]int, region)
for y := bounds.Min.Y + 1; y < bounds.Max.Y-1; y++ {
for x := bounds.Min.X + 1; x < bounds.Max.X-1; x++ {
histogram[int(p.At(x, y))] += 1
}
}
sum := 0.0
for _, area := range histogram {
sum += float64(area)
}
mean := sum / float64(region)
erasedRegions := 0
for region, area := range histogram {
if float64(area) < math.Sqrt(mean) {
eraseRegion(p, in, region)
erasedRegions++
}
}
log.Printf("erased regions: %d", erasedRegions)
return p
}
func fillRegion(p *matrix.M, region, x, y, max int) {
if x == p.Bounds().Min.X+1 || x == p.Bounds().Max.X-1 {
return
}
if y == p.Bounds().Min.Y+1 || y == p.Bounds().Max.Y-1 {
return
}
bw := p.At(x, y)
p.Set(x, y, float64(region))
if p.At(x-1, y) == bw {
fillRegion(p, region, x-1, y, max)
}
if p.At(x, y-1) == bw {
fillRegion(p, region, x, y-1, max)
}
if p.At(x+1, y) == bw {
fillRegion(p, region, x+1, y, max)
}
if p.At(x, y+1) == bw {
fillRegion(p, region, x, y+1, max)
}
if p.At(x-1, y-1) == bw {
fillRegion(p, region, x-1, y-1, max)
}
if p.At(x+1, y-1) == bw {
fillRegion(p, region, x+1, y-1, max)
}
if p.At(x+1, y+1) == bw {
fillRegion(p, region, x+1, y+1, max)
}
if p.At(x-1, y+1) == bw {
fillRegion(p, region, x-1, y+1, max)
}
}
func eraseRegion(p, in *matrix.M, region int) {
bounds := p.Bounds()
for y := bounds.Min.Y + 1; y < bounds.Max.Y-1; y++ {
for x := bounds.Min.X + 1; x < bounds.Max.X-1; x++ {
value := p.At(x, y)
if int(value) != region {
continue
} else if value == white {
in.Set(x, y, black)
} else {
in.Set(x, y, white)
}
}
}
} | internal/processing/binarize.go | 0.580709 | 0.526282 | binarize.go | starcoder |
package gohorizon
import (
"encoding/json"
)
// NetworkLabelData Information related to a network label.
type NetworkLabelData struct {
// The network label name.
NetworkLabelName *string `json:"network_label_name,omitempty"`
// The network interface name
NicName *string `json:"nic_name,omitempty"`
}
// NewNetworkLabelData instantiates a new NetworkLabelData object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewNetworkLabelData() *NetworkLabelData {
this := NetworkLabelData{}
return &this
}
// NewNetworkLabelDataWithDefaults instantiates a new NetworkLabelData object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewNetworkLabelDataWithDefaults() *NetworkLabelData {
this := NetworkLabelData{}
return &this
}
// GetNetworkLabelName returns the NetworkLabelName field value if set, zero value otherwise.
func (o *NetworkLabelData) GetNetworkLabelName() string {
if o == nil || o.NetworkLabelName == nil {
var ret string
return ret
}
return *o.NetworkLabelName
}
// GetNetworkLabelNameOk returns a tuple with the NetworkLabelName field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkLabelData) GetNetworkLabelNameOk() (*string, bool) {
if o == nil || o.NetworkLabelName == nil {
return nil, false
}
return o.NetworkLabelName, true
}
// HasNetworkLabelName returns a boolean if a field has been set.
func (o *NetworkLabelData) HasNetworkLabelName() bool {
if o != nil && o.NetworkLabelName != nil {
return true
}
return false
}
// SetNetworkLabelName gets a reference to the given string and assigns it to the NetworkLabelName field.
func (o *NetworkLabelData) SetNetworkLabelName(v string) {
o.NetworkLabelName = &v
}
// GetNicName returns the NicName field value if set, zero value otherwise.
func (o *NetworkLabelData) GetNicName() string {
if o == nil || o.NicName == nil {
var ret string
return ret
}
return *o.NicName
}
// GetNicNameOk returns a tuple with the NicName field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkLabelData) GetNicNameOk() (*string, bool) {
if o == nil || o.NicName == nil {
return nil, false
}
return o.NicName, true
}
// HasNicName returns a boolean if a field has been set.
func (o *NetworkLabelData) HasNicName() bool {
if o != nil && o.NicName != nil {
return true
}
return false
}
// SetNicName gets a reference to the given string and assigns it to the NicName field.
func (o *NetworkLabelData) SetNicName(v string) {
o.NicName = &v
}
func (o NetworkLabelData) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.NetworkLabelName != nil {
toSerialize["network_label_name"] = o.NetworkLabelName
}
if o.NicName != nil {
toSerialize["nic_name"] = o.NicName
}
return json.Marshal(toSerialize)
}
type NullableNetworkLabelData struct {
value *NetworkLabelData
isSet bool
}
func (v NullableNetworkLabelData) Get() *NetworkLabelData {
return v.value
}
func (v *NullableNetworkLabelData) Set(val *NetworkLabelData) {
v.value = val
v.isSet = true
}
func (v NullableNetworkLabelData) IsSet() bool {
return v.isSet
}
func (v *NullableNetworkLabelData) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableNetworkLabelData(val *NetworkLabelData) *NullableNetworkLabelData {
return &NullableNetworkLabelData{value: val, isSet: true}
}
func (v NullableNetworkLabelData) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableNetworkLabelData) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_network_label_data.go | 0.727685 | 0.435721 | model_network_label_data.go | starcoder |
package period
import (
"time"
cabiriaTime "github.com/liampulles/cabiria/pkg/time"
)
// TimeFunction must return a time value for a given Period.
// Generally, this will be Period.Start or Period.End.
type TimeFunction func(Period) time.Time
// DoesOverlap returns true if a and b overlap, otherwise false.
// a and b are NOT considered to be overlapping if their bounds merely
// touch - for that see Touching.
func DoesOverlap(a, b Period) bool {
if a == nil || b == nil {
return false
}
return a.Start().Before(b.End()) && b.Start().Before(a.End())
}
// Touching returns true if a and b overlap OR if their bounds touch,
// otherwise false.
func Touching(a, b Period) bool {
if a == nil || b == nil {
return false
}
return !b.End().Before(a.Start()) && !a.End().Before(b.Start())
}
// Overlap returns the duration of the section for which a and b overlap.
// If a and b do NOT overlap, or either is nil, then 0 is returned.
func Overlap(a, b Period) time.Duration {
if a == nil || b == nil {
return 0
}
latestStart := Max(a, b, Period.Start)
earliestEnd := Min(a, b, Period.End)
if earliestEnd.Before(latestStart) {
return 0
}
return earliestEnd.Sub(latestStart)
}
// Shift returns a new period with the start and end adjusted to be +amount.
func Shift(period Period, amount time.Duration) Period {
if period == nil {
return nil
}
newStart := period.Start().Add(amount)
newEnd := period.End().Add(amount)
return period.TransformToNew(newStart, newEnd)
}
// Scale returns a new period where period has been scaled by factor from origin.
// e.g. period: (0:00:02.000,0:00:03.000), origin: 0:00:01.000, factor: 2.0 => (0:00:03.000,0:00:05.000)
func Scale(period Period, origin time.Time, factor float64) Period {
if period == nil {
return nil
}
newStart := cabiriaTime.Scale(period.Start(), origin, factor)
newEnd := cabiriaTime.Scale(period.End(), origin, factor)
// If the scale is negative, switch them.
if factor < 0.0 {
newStart, newEnd = newEnd, newStart
}
return period.TransformToNew(newStart, newEnd)
}
// Min returns the minimum time of timeFunc(a) vs. timeFunc(b)
func Min(a, b Period, timeFunc TimeFunction) time.Time {
return cabiriaTime.Min(timeFunc(a), timeFunc(b))
}
// Max returns the maximum time of timeFunc(a) vs. timeFunc(b)
func Max(a, b Period, timeFunc TimeFunction) time.Time {
return cabiriaTime.Max(timeFunc(a), timeFunc(b))
}
// Duration returns the duration that a period covers. If period is nil,
// 0 is returned.
func Duration(period Period) time.Duration {
if period == nil {
return 0
}
return period.End().Sub(period.Start())
} | pkg/time/period/common.go | 0.883211 | 0.492127 | common.go | starcoder |
package expectations
import (
"strings"
"dawn.googlesource.com/dawn/tools/src/cts/result"
)
const (
tagHeaderStart = `BEGIN TAG HEADER`
tagHeaderEnd = `END TAG HEADER`
)
// Parse parses an expectations file, returning the Content
func Parse(body string) (Content, error) {
// LineType is an enumerator classifying the 'type' of the line.
type LineType int
const (
comment LineType = iota // The line starts with the '#'
expectation // The line declares an expectation
blank // The line is blank
)
// classifyLine returns the LineType for the given line
classifyLine := func(line string) LineType {
line = strings.TrimSpace(line)
switch {
case line == "":
return blank
case strings.HasPrefix(line, "#"):
return comment
default:
return expectation
}
}
content := Content{} // The output content
var pending Chunk // The current Chunk being parsed
// flush completes the current chunk, appending it to 'content'
flush := func() {
parseTags(&content.Tags, pending.Comments)
content.Chunks = append(content.Chunks, pending)
pending = Chunk{}
}
lastLineType := blank // The type of the last parsed line
for i, l := range strings.Split(body, "\n") { // For each line...
lineIdx := i + 1 // line index
lineType := classifyLine(l)
// Compare the new line type to the last.
// Flush the pending chunk if needed.
if i > 0 {
switch {
case
lastLineType == blank && lineType != blank, // blank -> !blank
lastLineType != blank && lineType == blank, // !blank -> blank
lastLineType == expectation && lineType != expectation: // expectation -> comment
flush()
}
}
lastLineType = lineType
// Handle blank lines and comments.
switch lineType {
case blank:
continue
case comment:
pending.Comments = append(pending.Comments, l)
continue
}
// Below this point, we're dealing with an expectation
// Split the line by whitespace to form a list of tokens
type Token struct {
str string
start, end int // line offsets (0-based)
}
tokens := []Token{}
if len(l) > 0 { // Parse the tokens
inToken, s := false, 0
for i, c := range l {
if c == ' ' {
if inToken {
tokens = append(tokens, Token{l[s:i], s, i})
inToken = false
}
} else if !inToken {
s = i
inToken = true
}
}
if inToken {
tokens = append(tokens, Token{l[s:], s, len(l)})
}
}
// syntaxErr is a helper for returning a SyntaxError with the current
// line and column index.
syntaxErr := func(at Token, msg string) error {
columnIdx := at.start + 1
if columnIdx == 1 {
columnIdx = len(l) + 1
}
return Diagnostic{Error, lineIdx, columnIdx, msg}
}
// peek returns the next token without consuming it.
// If there are no more tokens then an empty Token is returned.
peek := func() Token {
if len(tokens) > 0 {
return tokens[0]
}
return Token{}
}
// next returns the next token, consuming it and incrementing the
// column index.
// If there are no more tokens then an empty Token is returned.
next := func() Token {
if len(tokens) > 0 {
tok := tokens[0]
tokens = tokens[1:]
return tok
}
return Token{}
}
match := func(str string) bool {
if peek().str != str {
return false
}
next()
return true
}
// tags parses a [ tag ] block.
tags := func(use string) (result.Tags, error) {
if !match("[") {
return result.Tags{}, nil
}
out := result.NewTags()
for {
t := next()
switch t.str {
case "]":
return out, nil
case "":
return result.Tags{}, syntaxErr(t, "expected ']' for "+use)
default:
out.Add(t.str)
}
}
}
// Parse the optional bug
var bug string
if strings.HasPrefix(peek().str, "crbug.com") {
bug = next().str
}
// Parse the optional test tags
testTags, err := tags("tags")
if err != nil {
return Content{}, err
}
// Parse the query
if t := peek(); t.str == "" || t.str[0] == '#' || t.str[0] == '[' {
return Content{}, syntaxErr(t, "expected test query")
}
query := next().str
// Parse the expected status
if t := peek(); !strings.HasPrefix(t.str, "[") {
return Content{}, syntaxErr(t, "expected status")
}
status, err := tags("status")
if err != nil {
return Content{}, err
}
// Parse any optional trailing comment
comment := ""
if t := peek(); strings.HasPrefix(t.str, "#") {
comment = l[t.start:]
}
// Append the expectation to the list.
pending.Expectations = append(pending.Expectations, Expectation{
Line: lineIdx,
Bug: bug,
Tags: testTags,
Query: query,
Status: status.List(),
Comment: comment,
})
}
if lastLineType != blank {
flush()
}
return content, nil
}
// parseTags parses the tag information found between tagHeaderStart and
// tagHeaderEnd comments.
func parseTags(tags *Tags, lines []string) {
// Flags for whether we're currently parsing a TAG HEADER and whether we're
// also within a tag-set.
inTagsHeader, inTagSet := false, false
tagSet := TagSet{} // The currently parsed tag-set
for _, line := range lines {
line = strings.TrimSpace(strings.TrimLeft(strings.TrimSpace(line), "#"))
if strings.Contains(line, tagHeaderStart) {
if tags.ByName == nil {
*tags = Tags{
ByName: map[string]TagSetAndPriority{},
Sets: []TagSet{},
}
}
inTagsHeader = true
continue
}
if strings.Contains(line, tagHeaderEnd) {
return // Reached the end of the TAG HEADER
}
if !inTagsHeader {
continue // Still looking for a tagHeaderStart
}
// Below this point, we're in a TAG HEADER.
tokens := removeEmpty(strings.Split(line, " "))
for len(tokens) > 0 {
if inTagSet {
// Parsing tags in a tag-set (between the '[' and ']')
if tokens[0] == "]" {
// End of the tag-set.
tags.Sets = append(tags.Sets, tagSet)
inTagSet = false
break
} else {
// Still inside the tag-set. Consume the tag.
tag := tokens[0]
tags.ByName[tag] = TagSetAndPriority{
Set: tagSet.Name,
Priority: len(tagSet.Tags),
}
tagSet.Tags.Add(tag)
}
tokens = tokens[1:]
} else {
// Outside of tag-set. Scan for 'tags: ['
if len(tokens) > 2 && tokens[0] == "tags:" && tokens[1] == "[" {
inTagSet = true
tagSet.Tags = result.NewTags()
tokens = tokens[2:] // Skip 'tags:' and '['
} else {
// Tag set names are on their own line.
// Remember the content of the line, in case the next line
// starts a tag-set.
tagSet.Name = strings.Join(tokens, " ")
break
}
}
}
}
}
// removeEmpty returns the list of strings with all empty strings removed.
func removeEmpty(in []string) []string {
out := make([]string, 0, len(in))
for _, s := range in {
if s != "" {
out = append(out, s)
}
}
return out
} | tools/src/cts/expectations/parse.go | 0.604516 | 0.415195 | parse.go | starcoder |
package bfv
import (
"errors"
"github.com/ldsec/lattigo/ring"
)
type Operand interface {
Element() *bfvElement
Degree() uint64
}
// bfvElement is a common struct between plaintexts and ciphertexts. It stores a value
// as a slice of polynomials, and an isNTT flag indicatig if the element is in the NTT domain.
type bfvElement struct {
value []*ring.Poly
isNTT bool
}
// NewCiphertext creates a new empty ciphertext of degree degree.
func (bfvcontext *BfvContext) NewBfvElement(degree uint64) *bfvElement {
el := &bfvElement{}
el.value = make([]*ring.Poly, degree+1)
for i := uint64(0); i < degree+1; i++ {
el.value[i] = bfvcontext.contextQ.NewPoly()
}
el.isNTT = false
return el
}
// Value returns the value of the target ciphertext (as a slice of polynomials in CRT form).
func (el *bfvElement) Value() []*ring.Poly {
return el.value
}
// SetValue assigns the input slice of polynomials to the target ciphertext value.
func (el *bfvElement) SetValue(value []*ring.Poly) {
el.value = value
}
// Degree returns the degree of the target ciphertext.
func (el *bfvElement) Degree() uint64 {
return uint64(len(el.value) - 1)
}
// Resize resizes the target ciphertext degree to the degree given as input. If the input degree is bigger then
// it will append new empty polynomials, if the degree is smaller, it will delete polynomials until the degree matches
// the input degree.
func (el *bfvElement) Resize(bfvcontext *BfvContext, degree uint64) {
if el.Degree() > degree {
el.value = el.value[:degree]
} else if el.Degree() < degree {
for el.Degree() < degree {
el.value = append(el.value, []*ring.Poly{bfvcontext.contextQ.NewPoly()}...)
}
}
}
// IsNTT returns true if the target ciphertext is in the NTT domain, else false.
func (el *bfvElement) IsNTT() bool {
return el.isNTT
}
// SetIsNTT assigns the input bolean value to the isNTT flag of the target ciphertext.
func (el *bfvElement) SetIsNTT(value bool) {
el.isNTT = value
}
// CopyNew creates a new ciphertext which is a copy of the target ciphertext. Returns the value as
// a Element.
func (el *bfvElement) CopyNew() *bfvElement {
ctxCopy := new(bfvElement)
ctxCopy.value = make([]*ring.Poly, el.Degree()+1)
for i := range el.value {
ctxCopy.value[i] = el.value[i].CopyNew()
}
ctxCopy.isNTT = el.isNTT
return ctxCopy
}
// Copy copies the value and parameters of the input on the target ciphertext.
func (el *bfvElement) Copy(ctxCopy *bfvElement) error {
if el != ctxCopy {
for i := range ctxCopy.Value() {
el.Value()[i].Copy(ctxCopy.Value()[i])
}
el.SetIsNTT(el.IsNTT())
}
return nil
}
// NTT puts the target ciphertext in the NTT domain and sets its isNTT flag to true. If it is already in the NTT domain, does nothing.
func (el *bfvElement) NTT(bfvcontext *BfvContext, c *bfvElement) error {
if el.Degree() != c.Degree() {
return errors.New("error : receiver element invalide degree (does not match)")
}
if el.IsNTT() != true {
for i := range el.value {
bfvcontext.contextQ.NTT(el.Value()[i], c.Value()[i])
}
c.SetIsNTT(true)
}
return nil
}
// InvNTT puts the target ciphertext outside of the NTT domain, and sets its isNTT flag to false. If it is not in the NTT domain, does nothing.
func (el *bfvElement) InvNTT(bfvcontext *BfvContext, c *bfvElement) error {
if el.Degree() != c.Degree() {
return errors.New("error : receiver element invalide degree (does not match)")
}
if el.IsNTT() != false {
for i := range el.value {
bfvcontext.contextQ.InvNTT(el.Value()[i], c.Value()[i])
}
c.SetIsNTT(false)
}
return nil
}
func (el *bfvElement) Element() *bfvElement {
return el
}
func (el *bfvElement) Ciphertext() *Ciphertext {
return &Ciphertext{el}
}
func (el *bfvElement) Plaintext() *Plaintext {
if len(el.value) != 1 {
panic("not a plaintext element")
}
return &Plaintext{el, el.value[0]}
} | bfv/operand.go | 0.768212 | 0.413773 | operand.go | starcoder |
package value
import (
"math/big"
)
func sinh(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) {
return complexSinh(c, u)
}
v = u.real
}
return evalFloatFunc(c, v, floatSinh)
}
func cosh(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) {
return complexCosh(c, u)
}
v = u.real
}
return evalFloatFunc(c, v, floatCosh)
}
func tanh(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) {
return complexTanh(c, u)
}
v = u.real
}
return evalFloatFunc(c, v, floatTanh)
}
// floatSinh computes sinh(x) = (e**x - e**-x)/2.
func floatSinh(c Context, x *big.Float) *big.Float {
// The Taylor series for sinh(x) is the odd terms of exp(x): x + x³/3! + x⁵/5!...
conf := c.Config()
xN := newF(conf).Set(x)
term := newF(conf)
n := newF(conf)
nFactorial := newF(conf).SetUint64(1)
z := newF(conf).SetInt64(0)
for loop := newLoop(conf, "sinh", x, 10); ; { // Big exponentials converge slowly.
term.Set(xN)
term.Quo(term, nFactorial)
z.Add(z, term)
if loop.done(z) {
break
}
// Advance x**index (multiply by x).
xN.Mul(xN, x)
xN.Mul(xN, x)
// Advance n, n!.
nFactorial.Mul(nFactorial, n.SetUint64(2*loop.i))
nFactorial.Mul(nFactorial, n.SetUint64(2*loop.i+1))
}
return z
}
// floatCosh computes sinh(x) = (e**x + e**-x)/2.
func floatCosh(c Context, x *big.Float) *big.Float {
// The Taylor series for cosh(x) is the even terms of exp(x): 1 + x²/2! + x⁴/4!...
conf := c.Config()
xN := newF(conf).Set(x)
xN.Mul(xN, x) // x²
term := newF(conf)
n := newF(conf)
nFactorial := newF(conf).SetUint64(2)
z := newF(conf).SetInt64(1)
for loop := newLoop(conf, "cosh", x, 10); ; { // Big exponentials converge slowly.
term.Set(xN)
term.Quo(term, nFactorial)
z.Add(z, term)
if loop.done(z) {
break
}
// Advance x**index (multiply by x).
xN.Mul(xN, x)
xN.Mul(xN, x)
// Advance n, n!.
nFactorial.Mul(nFactorial, n.SetUint64(2*loop.i+1))
nFactorial.Mul(nFactorial, n.SetUint64(2*loop.i+2))
}
return z
}
// floatTanh computes tanh(x) = sinh(x)/cosh(x)
func floatTanh(c Context, x *big.Float) *big.Float {
if x.IsInf() {
Errorf("tanh of infinity")
}
denom := floatCosh(c, x)
if denom.Cmp(floatZero) == 0 {
Errorf("tanh is infinite")
}
num := floatSinh(c, x)
return num.Quo(num, denom)
}
func complexSinh(c Context, v Complex) Value {
// Use the formula: sinh(x+yi) = sinh(x)cos(y) + i cosh(x)sin(y)
// First turn v into (a + bi) where a and b are big.Floats.
x := floatSelf(c, v.real).Float
y := floatSelf(c, v.imag).Float
sinhX := floatSinh(c, x)
cosY := floatCos(c, y)
coshX := floatCosh(c, x)
sinY := floatSin(c, y)
lhs := sinhX.Mul(sinhX, cosY)
rhs := coshX.Mul(coshX, sinY)
return newComplex(BigFloat{lhs}, BigFloat{rhs}).shrink()
}
func complexCosh(c Context, v Complex) Value {
// Use the formula: cosh(x+yi) = cosh(x)cos(y) + i sinh(x)sin(y)
// First turn v into (a + bi) where a and b are big.Floats.
x := floatSelf(c, v.real).Float
y := floatSelf(c, v.imag).Float
coshX := floatCosh(c, x)
cosY := floatCos(c, y)
sinhX := floatSinh(c, x)
sinY := floatSin(c, y)
lhs := coshX.Mul(coshX, cosY)
rhs := sinhX.Mul(sinhX, sinY)
return newComplex(BigFloat{lhs}, BigFloat{rhs}).shrink()
}
func complexTanh(c Context, v Complex) Value {
// Use the formula: tanh(x+yi) = (sinh(2x) + i sin(2y)/(cosh(2x) + cos(2y))
// First turn v into (a + bi) where a and b are big.Floats.
x := floatSelf(c, v.real).Float
y := floatSelf(c, v.imag).Float
// Double them - all the arguments are 2X.
x.Mul(x, floatTwo)
y.Mul(y, floatTwo)
sinh2X := floatSinh(c, x)
sin2Y := floatSin(c, y)
cosh2X := floatCosh(c, x)
cos2Y := floatCos(c, y)
den := cosh2X.Add(cosh2X, cos2Y)
if den.Sign() == 0 {
Errorf("tangent is infinite")
}
return newComplex(BigFloat{sinh2X.Quo(sinh2X, den)}, BigFloat{sin2Y.Quo(sin2Y, den)}).shrink()
} | value/sinh.go | 0.742141 | 0.427815 | sinh.go | starcoder |
package chpp
// MatchRating ...
type MatchRating uint
// List of MatchRating constants.
const (
MatchRatingVeryLowDisastrous MatchRating = 1
MatchRatingLowDisastrous MatchRating = 2
MatchRatingHighDisastrous MatchRating = 3
MatchRatingVeryHighDisastrous MatchRating = 4
MatchRatingVeryLowWretched MatchRating = 5
MatchRatingLowWretched MatchRating = 6
MatchRatingHighWretched MatchRating = 7
MatchRatingVeryHighWretched MatchRating = 8
MatchRatingVeryLowPoor MatchRating = 9
MatchRatingLowPoor MatchRating = 10
MatchRatingHighPoor MatchRating = 11
MatchRatingVeryHighPoor MatchRating = 12
MatchRatingVeryLowWeak MatchRating = 13
MatchRatingLowWeak MatchRating = 14
MatchRatingHighWeak MatchRating = 15
MatchRatingVeryHighWeak MatchRating = 16
MatchRatingVeryLowInadequate MatchRating = 17
MatchRatingLowInadequate MatchRating = 18
MatchRatingHighInadequate MatchRating = 19
MatchRatingVeryHighInadequate MatchRating = 20
MatchRatingVeryLowPassable MatchRating = 21
MatchRatingLowPassable MatchRating = 22
MatchRatingHighPassable MatchRating = 23
MatchRatingVeryHighPassable MatchRating = 24
MatchRatingVeryLowSolid MatchRating = 25
MatchRatingLowSolid MatchRating = 26
MatchRatingHighSolid MatchRating = 27
MatchRatingVeryHighSolid MatchRating = 28
MatchRatingVeryLowExcellent MatchRating = 29
MatchRatingLowExcellent MatchRating = 30
MatchRatingHighExcellent MatchRating = 31
MatchRatingVeryHighExcellent MatchRating = 32
MatchRatingVeryLowFormidable MatchRating = 33
MatchRatingLowFormidable MatchRating = 34
MatchRatingHighFormidable MatchRating = 35
MatchRatingVeryHighFormidable MatchRating = 36
MatchRatingVeryLowOutstanding MatchRating = 37
MatchRatingLowOutstanding MatchRating = 38
MatchRatingHighOutstanding MatchRating = 39
MatchRatingVeryHighOutstanding MatchRating = 40
MatchRatingVeryLowBrilliant MatchRating = 41
MatchRatingLowBrilliant MatchRating = 42
MatchRatingHighBrilliant MatchRating = 43
MatchRatingVeryHighBrilliant MatchRating = 44
MatchRatingVeryLowMagnificent MatchRating = 45
MatchRatingLowMagnificent MatchRating = 46
MatchRatingHighMagnificent MatchRating = 47
MatchRatingVeryHighMagnificent MatchRating = 48
MatchRatingVeryLowWorldClass MatchRating = 49
MatchRatingLowWorldClass MatchRating = 50
MatchRatingHighWorldClass MatchRating = 51
MatchRatingVeryHighWorldClass MatchRating = 52
MatchRatingVeryLowSupernatural MatchRating = 53
MatchRatingLowSupernatural MatchRating = 54
MatchRatingHighSupernatural MatchRating = 55
MatchRatingVeryHighSupernatural MatchRating = 56
MatchRatingVeryLowTitanic MatchRating = 57
MatchRatingLowTitanic MatchRating = 58
MatchRatingHighTitanic MatchRating = 59
MatchRatingVeryHighTitanic MatchRating = 60
MatchRatingVeryLowExtraTerrestrial MatchRating = 61
MatchRatingLowExtraTerrestrial MatchRating = 62
MatchRatingHighExtraTerrestrial MatchRating = 63
MatchRatingVeryHighExtraTerrestrial MatchRating = 64
MatchRatingVeryLowMythical MatchRating = 65
MatchRatingLowMythical MatchRating = 66
MatchRatingHighMythical MatchRating = 67
MatchRatingVeryHighMythical MatchRating = 68
MatchRatingVeryLowMagical MatchRating = 69
MatchRatingLowMagical MatchRating = 70
MatchRatingHighMagical MatchRating = 71
MatchRatingVeryHighMagical MatchRating = 72
MatchRatingVeryLowUtopian MatchRating = 73
MatchRatingLowUtopian MatchRating = 74
MatchRatingHighUtopian MatchRating = 75
MatchRatingVeryHighUtopian MatchRating = 76
MatchRatingVeryLowDivine MatchRating = 77
MatchRatingLowDivine MatchRating = 78
MatchRatingHighDivine MatchRating = 79
MatchRatingVeryHighDivine MatchRating = 80
)
// String returns a string representation of the type.
// nolint
func (m MatchRating) String() string {
switch m {
case MatchRatingVeryLowDisastrous:
return "VeryLowDisastrous"
case MatchRatingLowDisastrous:
return "LowDisastrous"
case MatchRatingHighDisastrous:
return "HighDisastrous"
case MatchRatingVeryHighDisastrous:
return "VeryHighDisastrous"
case MatchRatingVeryLowWretched:
return "VeryLowWretched"
case MatchRatingLowWretched:
return "LowWretched"
case MatchRatingHighWretched:
return "HighWretched"
case MatchRatingVeryHighWretched:
return "VeryHighWretched"
case MatchRatingVeryLowPoor:
return "VeryLowPoor"
case MatchRatingLowPoor:
return "LowPoor"
case MatchRatingHighPoor:
return "HighPoor"
case MatchRatingVeryHighPoor:
return "VeryHighPoor"
case MatchRatingVeryLowWeak:
return "VeryLowWeak"
case MatchRatingLowWeak:
return "LowWeak"
case MatchRatingHighWeak:
return "HighWeak"
case MatchRatingVeryHighWeak:
return "VeryHighWeak"
case MatchRatingVeryLowInadequate:
return "VeryLowInadequate"
case MatchRatingLowInadequate:
return "LowInadequate"
case MatchRatingHighInadequate:
return "HighInadequate"
case MatchRatingVeryHighInadequate:
return "VeryHighInadequate"
case MatchRatingVeryLowPassable:
return "VeryLowPassable"
case MatchRatingLowPassable:
return "LowPassable"
case MatchRatingHighPassable:
return "HighPassable"
case MatchRatingVeryHighPassable:
return "VeryHighPassable"
case MatchRatingVeryLowSolid:
return "VeryLowSolid"
case MatchRatingLowSolid:
return "LowSolid"
case MatchRatingHighSolid:
return "HighSolid"
case MatchRatingVeryHighSolid:
return "VeryHighSolid"
case MatchRatingVeryLowExcellent:
return "VeryLowExcellent"
case MatchRatingLowExcellent:
return "LowExcellent"
case MatchRatingHighExcellent:
return "HighExcellent"
case MatchRatingVeryHighExcellent:
return "VeryHighExcellent"
case MatchRatingVeryLowFormidable:
return "VeryLowFormidable"
case MatchRatingLowFormidable:
return "LowFormidable"
case MatchRatingHighFormidable:
return "HighFormidable"
case MatchRatingVeryHighFormidable:
return "VeryHighFormidable"
case MatchRatingVeryLowOutstanding:
return "VeryLowOutstanding"
case MatchRatingLowOutstanding:
return "LowOutstanding"
case MatchRatingHighOutstanding:
return "HighOutstanding"
case MatchRatingVeryHighOutstanding:
return "VeryHighOutstanding"
case MatchRatingVeryLowBrilliant:
return "VeryLowBrilliant"
case MatchRatingLowBrilliant:
return "LowBrilliant"
case MatchRatingHighBrilliant:
return "HighBrilliant"
case MatchRatingVeryHighBrilliant:
return "VeryHighBrilliant"
case MatchRatingVeryLowMagnificent:
return "VeryLowMagnificent"
case MatchRatingLowMagnificent:
return "LowMagnificent"
case MatchRatingHighMagnificent:
return "HighMagnificent"
case MatchRatingVeryHighMagnificent:
return "VeryHighMagnificent"
case MatchRatingVeryLowWorldClass:
return "VeryLowWorldClass"
case MatchRatingLowWorldClass:
return "LowWorldClass"
case MatchRatingHighWorldClass:
return "HighWorldClass"
case MatchRatingVeryHighWorldClass:
return "VeryHighWorldClass"
case MatchRatingVeryLowSupernatural:
return "VeryLowSupernatural"
case MatchRatingLowSupernatural:
return "LowSupernatural"
case MatchRatingHighSupernatural:
return "HighSupernatural"
case MatchRatingVeryHighSupernatural:
return "VeryHighSupernatural"
case MatchRatingVeryLowTitanic:
return "VeryLowTitanic"
case MatchRatingLowTitanic:
return "LowTitanic"
case MatchRatingHighTitanic:
return "HighTitanic"
case MatchRatingVeryHighTitanic:
return "VeryHighTitanic"
case MatchRatingVeryLowExtraTerrestrial:
return "VeryLowExtraTerrestrial"
case MatchRatingLowExtraTerrestrial:
return "LowExtraTerrestrial"
case MatchRatingHighExtraTerrestrial:
return "HighExtraTerrestrial"
case MatchRatingVeryHighExtraTerrestrial:
return "VeryHighExtraTerrestrial "
case MatchRatingVeryLowMythical:
return "VeryLowMythical"
case MatchRatingLowMythical:
return "LowMythical"
case MatchRatingHighMythical:
return "HighMythical"
case MatchRatingVeryHighMythical:
return "VeryHighMythical"
case MatchRatingVeryLowMagical:
return "VeryLowMagical"
case MatchRatingLowMagical:
return "LowMagical"
case MatchRatingHighMagical:
return "HighMagical"
case MatchRatingVeryHighMagical:
return "VeryHighMagical"
case MatchRatingVeryLowUtopian:
return "VeryLowUtopian"
case MatchRatingLowUtopian:
return "LowUtopian"
case MatchRatingHighUtopian:
return "HighUtopian"
case MatchRatingVeryHighUtopian:
return "VeryHighUtopian"
case MatchRatingVeryLowDivine:
return "VeryLowDivine"
case MatchRatingLowDivine:
return "LowDivine"
case MatchRatingHighDivine:
return "HighDivine"
case MatchRatingVeryHighDivine:
return "VeryHighDivine"
default:
return "unknown"
}
}
// Value ...
func (m MatchRating) Value() int {
return int(m)
} | chpp/type_match_rating.go | 0.665628 | 0.407569 | type_match_rating.go | starcoder |
package md
import (
"os"
"strings"
"text/template"
"github.com/mh-cbon/testndoc"
)
// Export the documentation to MD format.
func Export(recorded testndoc.APIDoc, dest string) error {
os.Remove(dest)
f, err2 := os.OpenFile(dest, os.O_RDWR|os.O_CREATE, 0755)
if err2 != nil {
return err2
}
defer f.Close()
funcs := map[string]interface{}{
"lower": strings.ToLower,
"mdlink": mdlink,
}
t, err := template.New("").Funcs(funcs).Parse(index)
if err != nil {
return err
}
return t.Execute(f, recorded)
}
func mdlink(s string) string {
s = strings.ToLower(s)
s = strings.Replace(s, " ", "-", -1)
s = strings.Replace(s, "/", "", -1)
s = strings.Replace(s, ".", "", -1)
return s
}
var index = `
# API DOC
## TOC
{{range .SortedEP}}
- [{{.ParameterizedPath}}](#{{.ParameterizedPath|mdlink}}){{range .SortedRequests}}
- [{{.Title}}](#{{.Title|mdlink}}){{end}}
{{end}}
{{range .SortedEP}}
### {{.ParameterizedPath}}
{{.Doc}}
{{range .SortedRequests}}
#### {{.Title}}
{{.Doc}}
__[{{.Response.Code}}] {{.Request.Method}}__ {{.Request.URL}}
{{- if .HasURLParams}}
##### Url Parameters
| Key | Value |
| --- | --- |
{{range $k, $v := .Request.URLParams}}| {{$k}} | {{$v}} |{{end}}
{{end}}
{{- if .HasGetParams}}
##### GET Parameters
| Key | Value |
| --- | --- |
{{range $k, $v := .Request.GetParams}}| {{$k}} | {{$v}} |{{end}}
{{end}}
{{- if .HasPostParams}}
##### POST Parameters
| Key | Value |
| --- | --- |
{{range $k, $v := .Request.PostParams}}| {{$k}} | {{$v}} |{{end}}
{{end}}
{{- if .HasRequestHeaders}}
##### Request Headers
| Key | Value |
| --- | --- |
{{range $k, $v := .Request.Headers}}| {{$k}} | {{$v}} |{{end}}
{{end}}
{{- if .HasRequestBody}}
##### Request Body
` + "```" + `
{{.GetRequestBody}}
` + "```" + `
{{end}}
{{- if .HasResponseHeaders}}
##### Response Headers
| Key | Value |
| --- | --- |
{{range $k, $v := .Response.Headers}}| {{$k}} | {{$v}} |{{end}}
{{end}}
{{- if .HasResponseBody}}
##### Response Body
` + "```" + `
{{.GetResponseBody}}
` + "```" + `
{{end}}
[TOP](#{{"API DOC"|mdlink}})
___________________
{{end}}
{{end}}` | md/exporter.go | 0.543833 | 0.423041 | exporter.go | starcoder |
package vast
const (
/**
* not to be confused with an impression, this event indicates that an individual creative
* portion of the ad was viewed. An impression indicates the first frame of the ad was displayed; however
* an ad may be composed of multiple creative, or creative that only play on some platforms and not
* others. This event enables ad servers to track which ad creative are viewed, and therefore, which
* platforms are more common.
*/
Event_type_creativeView = "creativeView"
Event_type_view = "view"
/**
* this event is used to indicate that an individual creative within the ad was loaded and playback
* began. As with creativeView, this event is another way of tracking creative playback.
*/
Event_type_start = "start"
// the creative played for at least 25% of the total duration.
Event_type_firstQuartile = "firstQuartile"
// the creative played for at least 50% of the total duration.
Event_type_midpoint = "midpoint"
// the creative played for at least 75% of the duration.
Event_type_thirdQuartile = "thirdQuartile"
// The creative was played to the end at normal speed.
Event_type_complete = "complete"
// the user activated the mute control and muted the creative.
Event_type_mute = "mute"
// the user activated the mute control and unmuted the creative.
Event_type_unmute = "unmute"
// the user clicked the pause control and stopped the creative.
Event_type_pause = "pause"
// the user activated the rewind control to access a previous point in the creative timeline.
Event_type_rewind = "rewind"
// the user activated the resume control after the creative had been stopped or paused.
Event_type_resume = "resume"
// the user activated a control to extend the video player to the edges of the viewer’s screen.
Event_type_fullscreen = "fullscreen"
// the user activated the control to reduce video player size to original dimensions.
Event_type_exitFullscreen = "exitFullscreen"
// the user activated a control to expand the creative.
Event_type_expand = "expand"
// the user activated a control to reduce the creative to its original dimensions.
Event_type_collapse = "collapse"
/**
* the user activated a control that launched an additional portion of the
* creative. The name of this event distinguishes it from the existing “acceptInvitation” event described in
* the 2008 IAB Digital Video In-Stream Ad Metrics Definitions, which defines the “acceptInivitation”
* metric as applying to non-linear ads only. The “acceptInvitationLinear” event extends the metric for use
* in Linear creative.
*/
Event_type_acceptInvitationLinear = "acceptInvitationLinear"
/**
* the user clicked the close button on the creative. The name of this event distinguishes it
* from the existing “close” event described in the 2008 IAB Digital Video In-Stream Ad Metrics
* Definitions, which defines the “close” metric as applying to non-linear ads only. The “closeLinear” event
* extends the “close” event for use in Linear creative.
*/
Event_type_closeLinear = "closeLinear"
Event_type_close = "close"
// the user activated a skip control to skip the creative, which is a
// different control than the one used to close the creative.
Event_type_skip = "skip"
/**
* the creative played for a duration at normal speed that is equal to or greater than the
* value provided in an additional attribute for offset . Offset values can be time in the format
* HH:MM:SS or HH:MM:SS.mmm or a percentage value in the format n% . Multiple progress ev
*/
Event_type_progress = "progress"
Event_type_monitor = "monitor"
) | event_type.go | 0.504883 | 0.448789 | event_type.go | starcoder |
package doubly
import "fmt"
type LinkedList[T any] struct {
header *Node[T] // header is a sentinel node. header.Next is the first element in the list.
trailer *Node[T] // trailer is a sentinel node. trailer.Prev is the last element in the list.
Size int
}
// New constructs and returns an empty doubly linked list.
func New[T any]() *LinkedList[T] {
var d LinkedList[T]
d.header = &Node[T]{}
d.trailer = &Node[T]{Prev: d.header}
d.header.Next = d.trailer
return &d
}
// IsEmpty returns true if the list doesn't have any elements.
func (d *LinkedList[T]) IsEmpty() bool {
return d.Size == 0
}
// First returns the first element of the list. It returns false if the list is empty.
func (d *LinkedList[T]) First() (data T, ok bool) {
if d.IsEmpty() {
return
}
return d.header.Next.Data, true
}
// Last returns the last element of the list. It returns false if the list is empty.
func (d *LinkedList[T]) Last() (data T, ok bool) {
if d.IsEmpty() {
return
}
return d.trailer.Prev.Data, true
}
// AddBetween gets two nodes, constructs a new node out of the given data and adds that node in between them.
func (d *LinkedList[T]) AddBetween(data T, predecessor *Node[T], successor *Node[T]) {
newNode := &Node[T]{Data: data, Next: successor, Prev: predecessor}
predecessor.Next = newNode
successor.Prev = newNode
d.Size++
}
// AddFirst adds a new element to the first of the list.
func (d *LinkedList[T]) AddFirst(data T) {
d.AddBetween(data, d.header, d.header.Next)
}
// AddLast adds a new element to the end of the list.
func (d *LinkedList[T]) AddLast(data T) {
d.AddBetween(data, d.trailer.Prev, d.trailer)
}
// Remove removes the given node from the list. It returns the removed node's data.
func (d *LinkedList[T]) Remove(node *Node[T]) T {
predecessor := node.Prev
successor := node.Next
predecessor.Next = successor
successor.Prev = predecessor
d.Size--
return node.Data
}
// RemoveFirst removes and returns the first element of the list. It returns false if the list is empty.
func (d *LinkedList[T]) RemoveFirst() (data T, ok bool) {
if d.IsEmpty() {
return
}
return d.Remove(d.header.Next), true
}
// RemoveLast removes and returns the last element of the list. It returns false if the list empty.
func (d *LinkedList[T]) RemoveLast() (data T, ok bool) {
if d.IsEmpty() {
return
}
return d.Remove(d.trailer.Prev), true
}
// String retruns the string representation of the list.
func (d *LinkedList[T]) String() string {
str := "[ "
for current := d.header.Next; current != d.trailer; current = current.Next {
str += fmt.Sprint(current.Data) + " "
}
str += "]"
return str
}
// Clone constructs and returns a clone of the list.
func (d *LinkedList[T]) Clone() *LinkedList[T] {
newDoubly := New[T]()
if d.IsEmpty() {
return newDoubly
}
newDoubly.header.Next = &Node[T]{Data: d.header.Next.Data}
newDoubly.Size++
newDoublyTail := newDoubly.header.Next
walk := d.header.Next.Next
var n *Node[T]
for walk != d.trailer {
n = &Node[T]{Data: walk.Data, Prev: newDoublyTail}
newDoublyTail.Next = n
newDoubly.Size++
newDoublyTail = n
walk = walk.Next
}
n.Next = newDoubly.trailer
newDoubly.trailer.Prev = n
return newDoubly
}
// ToSlice returns a slice of the list's elements.
func (d *LinkedList[T]) ToSlice() []T {
r := make([]T, d.Size)
for i, cur := 0, d.header.Next; cur != d.trailer && i < len(r); i, cur = i+1, cur.Next {
r[i] = cur.Data
}
return r
} | linkedlist/doubly/doubly_linked_list.go | 0.830732 | 0.521654 | doubly_linked_list.go | starcoder |
package cpu
import (
"encoding/binary"
"fmt"
)
// Opcode represents a single CHIP-8 operation.
type Opcode uint16
// Bytes splits the opcode into its respective bytes and returns them.
func (o Opcode) Bytes() (firstByte, secondByte byte) {
return byte(o >> 8), byte(o)
}
// OpcodeFromBytes takes in a slice of bytes and returns an Opcode
func OpcodeFromBytes(b []byte) Opcode {
return Opcode(binary.BigEndian.Uint16(b))
}
// Instruction returns the Opcode's name and instruction. If the Opcode is
// unknown then ErrUnknownOp will be returned.
func (o Opcode) Instruction() string {
firstByte, secondByte := o.Bytes()
firstNib := firstByte >> 4
secondNib := firstByte & 0xf
thirdNib := secondByte >> 4
fourthNib := secondByte & 0xf
switch firstNib {
case 0x0:
switch secondByte {
case 0xe0:
return fmt.Sprintf("%-10s", "CLS")
case 0xee:
return fmt.Sprintf("%-10s", "RTS")
}
case 0x1:
return fmt.Sprintf("%-10s $%01x%02x", "JUMP", secondNib, secondByte)
case 0x2:
return fmt.Sprintf("%-10s $%01x%02x", "CALL", secondNib, secondByte)
case 0x3:
return fmt.Sprintf("%-10s V%01X,#$%02x", "SKIP.EQ", secondNib, secondByte)
case 0x4:
return fmt.Sprintf("%-10s V%01X,#$%02x", "SKIP.NE", secondNib, secondByte)
case 0x5:
return fmt.Sprintf("%-10s V%01X,V%01X", "SKIP.EQ", secondNib, thirdNib)
case 0x6:
return fmt.Sprintf("%-10s V%01X,#$%02x", "MVI", secondNib, secondByte)
case 0x7:
return fmt.Sprintf("%-10s V%01X,#$%02x", "ADI", secondNib, secondByte)
case 0x8:
switch secondByte & 0xf {
case 0x0:
return fmt.Sprintf("%-10s V%01X,V%01X", "MOV", secondNib, thirdNib)
case 0x1:
return fmt.Sprintf("%-10s V%01X,V%01X", "OR", secondNib, thirdNib)
case 0x2:
return fmt.Sprintf("%-10s V%01X,V%01X", "AND", secondNib, thirdNib)
case 0x3:
return fmt.Sprintf("%-10s V%01X,V%01X", "XOR", secondNib, thirdNib)
case 0x4:
return fmt.Sprintf("%-10s V%01X,V%01X", "ADD.", secondNib, thirdNib)
case 0x5:
return fmt.Sprintf("%-10s V%01X,V%01X", "SUB.", secondNib, thirdNib)
case 0x6:
return fmt.Sprintf("%-10s V%01X", "SHR.", secondNib)
case 0x7:
return fmt.Sprintf("%-10s V%01X,V%01X", "SUBB.", secondNib, thirdNib)
case 0xe:
return fmt.Sprintf("%-10s V%01X", "SHL.", secondNib)
}
case 0x9:
return fmt.Sprintf("%-10s V%01X,V%01X", "SKIP.NE", secondNib, thirdNib)
case 0xa:
return fmt.Sprintf("%-10s I,#$%01x%02x", "MVI", secondNib, secondByte)
case 0xb:
return fmt.Sprintf("%-10s $%01x%02x(V0)", "JUMP", secondNib, secondByte)
case 0xc:
return fmt.Sprintf("%-10s V%01X,#$%02x", "RND", secondNib, secondByte)
case 0xd:
return fmt.Sprintf("%-10s V%01X,V%01X,#$%01X", "SPRITE.", secondNib, thirdNib, fourthNib)
case 0xe:
switch secondByte {
case 0x9E:
return fmt.Sprintf("%-10s V%01X", "SKIP.KEY", secondNib)
case 0xA1:
return fmt.Sprintf("%-10s V%01X", "SKIP.NOKEY", secondNib)
}
case 0xf:
switch secondByte {
case 0x07:
return fmt.Sprintf("%-10s V%01X,DELAY", "MOV", secondNib)
case 0x0a:
return fmt.Sprintf("%-10s V%01X", "WAITKEY", secondNib)
case 0x15:
return fmt.Sprintf("%-10s DELAY,V%01X", "MOV", secondNib)
case 0x18:
return fmt.Sprintf("%-10s SOUND,V%01X", "MOV", secondNib)
case 0x1e:
return fmt.Sprintf("%-10s I,V%01X", "ADD", secondNib)
case 0x29:
return fmt.Sprintf("%-10s V%01X", "SPRITECHAR", secondNib)
case 0x33:
return fmt.Sprintf("%-10s V%01X", "MOVBCD", secondNib)
case 0x55:
return fmt.Sprintf("%-10s (I),V0-V%01X", "MOVM", secondNib)
case 0x65:
return fmt.Sprintf("%-10s V0-V%01X,(I)", "MOVM", secondNib)
}
}
return fmt.Sprintf("%-10s 0x%02x%02x", "UNK", firstByte, secondByte)
} | go/internal/cpu/opcode.go | 0.601125 | 0.427217 | opcode.go | starcoder |
package grid
import "errors"
var deltaMap map[string]delta
func NewGrid(rows int, cols int) *Grid {
deltaMap = map[string]delta{
"tl": {-1, -1},
"tc": {-1, 0},
"tr": {-1, 1},
"l": {0, -1},
"r": {0, 1},
"bl": {1, -1},
"bc": {1, 0},
"br": {1, 1},
}
grid := &Grid{
grid: make([][]GridCell, rows),
dimensions: GridDimensions{
rows: rows,
cols: cols,
},
boundaries: GridBoundaries{
top: 0,
right: cols - 1,
bottom: rows - 1,
left: 0,
},
}
for index := range grid.grid {
grid.grid[index] = make([]GridCell, cols)
}
return grid
}
// Utility function for tests.
func (grid Grid) Initialize(table [][]cellType) {
assert(len(table) == grid.dimensions.rows, "Invalid Initialize size (rows)")
assert(len(table[0]) == grid.dimensions.cols, "Invalid Initialize size (cols)")
for rowI, row := range table {
for colI, _ := range row {
grid.SetCell(GridCell{
Value: table[rowI][colI],
Pos: GridCellPosition{rowI, colI},
})
}
}
}
func (grid Grid) GetCell(pos GridCellPosition) GridCell {
assertRowBoundary(grid, pos.Row)
assertColBoundary(grid, pos.Col)
return grid.grid[pos.Row][pos.Col]
}
func (grid Grid) SetCell(cell GridCell) {
assertRowBoundary(grid, cell.Pos.Row)
assertColBoundary(grid, cell.Pos.Col)
grid.grid[cell.Pos.Row][cell.Pos.Col] = cell
}
func (grid Grid) GetNeighbors(cell GridCell) map[string]GridCell {
neighbors := make(map[string]GridCell)
pos := cell.Pos
neighbors["tl"] = grid.getRelativeNeighbor(pos, "tl")
neighbors["tc"] = grid.getRelativeNeighbor(pos, "tc")
neighbors["tr"] = grid.getRelativeNeighbor(pos, "tr")
neighbors["l"] = grid.getRelativeNeighbor(pos, "l")
neighbors["r"] = grid.getRelativeNeighbor(pos, "r")
neighbors["bl"] = grid.getRelativeNeighbor(pos, "bl")
neighbors["bc"] = grid.getRelativeNeighbor(pos, "bc")
neighbors["br"] = grid.getRelativeNeighbor(pos, "br")
return neighbors
}
func (grid Grid) GetLiveNeighborCount(cell GridCell) int {
liveNeighbors := 0
for _, neighbor := range grid.GetNeighbors(cell) {
if neighbor.IsAlive() {
liveNeighbors++
}
}
return liveNeighbors
}
func (grid Grid) getRelativeNeighbor(pos GridCellPosition, deltaLabel string) GridCell {
delta, exists := deltaMap[deltaLabel]
if !exists {
panic(errors.New("bad delta"))
}
neighborPosition := pos
// Adjust the neighbors position on the grid.
neighborPosition.Row += delta.deltaRow
neighborPosition.Col += delta.deltaCol
// Wrap any values which have left the board.
if neighborPosition.Row < grid.boundaries.top {
neighborPosition.Row = grid.boundaries.bottom
} else if neighborPosition.Row > grid.boundaries.bottom {
neighborPosition.Row = grid.boundaries.top
}
if neighborPosition.Col < grid.boundaries.left {
neighborPosition.Col = grid.boundaries.right
} else if neighborPosition.Col > grid.boundaries.right {
neighborPosition.Col = grid.boundaries.left
}
return grid.GetCell(neighborPosition)
}
func (grid Grid) IterateRows(cb func(index int, row []GridCell)) {
for index, row := range grid.grid {
cb(index, row)
}
}
func (grid Grid) IterateCells(cb func(cell GridCell)) {
grid.IterateRows(func(rowIndex int, row []GridCell) {
for _, cell := range row {
cb(cell)
}
})
}
func (grid Grid) Clone() *Grid {
return NewGrid(grid.dimensions.rows, grid.dimensions.cols)
} | src/grid/grid.go | 0.807385 | 0.758332 | grid.go | starcoder |
package config
// InputFixture corresponds with the data structure of unmarshalled config values.
// It shouldn't be used directly and instead marshalled via it's parse method.
type InputFixture struct {
DockerCompose *struct {
Output string `yaml:"output"`
} `yaml:"docker-compose"`
Imports interface{} `yaml:"imports"`
Cassandra *struct {
Sources []struct {
Keyspace string `yaml:"keyspace"`
Definition string `yaml:"definition"`
Files string `yaml:"files"`
} `yaml:"src"`
Destination string `yaml:"dest"`
}
Elasticsearch *struct {
Sources []struct {
Index string `yaml:"index"`
Mapping string `yaml:"mapping"`
MappingType string `yaml:"mapping-type"`
Files string `yaml:"files"`
} `yaml:"src"`
Destination string `yaml:"dest"`
} `yaml:"elasticsearch"`
PostgreSQL *struct {
Sources []struct {
Database string `yaml:"database"`
Definition string `yaml:"definition"`
Files string `yaml:"files"`
} `yaml:"src"`
Destination string `yaml:"dest"`
} `yaml:"postgresql"`
Redis *struct {
Source string `yaml:"src"`
Destination string `yaml:"dest"`
} `yaml:"redis"`
}
// InputService corresponds with the data structure of unmarshalled config values.
// It shouldn't be used directly and instead marshalled via it's parse method.
type InputService struct {
Imports interface{} `yaml:"imports"`
Output string `yaml:"output"`
GRPC map[string]struct {
Port uint16 `yaml:"port"`
Definition string `yaml:"definition"`
Methods map[string]struct {
Request string `yaml:"request"`
Response string `yaml:"response"`
} `yaml:"methods"`
Conditions map[string][]struct {
Request interface{} `yaml:"request"`
Response interface{} `yaml:"response"`
} `yaml:"conditions"`
} `yaml:"grpc"`
HTTP map[string]struct {
Port uint16 `yaml:"port"`
Endpoints map[string][]struct {
Method interface{} `yaml:"method"`
Request interface{} `yaml:"request"`
Response interface{} `yaml:"response"`
ResponseFile string `yaml:"response_file"`
} `yaml:"endpoints"`
} `yaml:"http"`
} | config.v1/config.go | 0.604516 | 0.42185 | config.go | starcoder |
package container
import (
"fmt"
"math"
)
// DenseArray is an array in in which elements are packed with a width of
// b < 64 bits. It allows for space-efficient storage when integers have
// well-knownvalue ranges that don't correspond to exactly 64, 32, 16, or 8
// bits.
type DenseArray struct {
Length int
Bits byte
Data []byte
}
func DenseArrayBytes(bits, length int) int {
return int(math.Ceil(float64(bits * length) / 8))
}
// Slice converts the contents of a DenseArray into a standard uint64 slice.
// len(out) must equal arr.Length.
func (arr *DenseArray) Slice(out []uint64) {
if len(out) < arr.Length {
panic(fmt.Sprintf("DenseArray has length %d, but out buffer has " +
"length %d.", arr.Length, len(out)))
}
// Set up buffers and commonly-used values.
bits := int(arr.Bits)
buf, tBuf := [8]byte{ }, [9]byte{ }
bufBytes := uint64(arr.Bits / 8)
if bufBytes * 8 < uint64(arr.Bits) { bufBytes++ }
for i := 0; i < arr.Length; i++ {
// Find where we are in the array.
startBit := uint64(i*bits % 8)
nextStartBit := (startBit + uint64(bits)) % 8
startByte := int(i*bits / 8)
endByte := int(((i + 1)*bits - 1) / 8)
tBufBytes := endByte - startByte + 1
// Pull bytes out into a buffer.
for j := 0; j < tBufBytes; j++ {
tBuf[j] = arr.Data[startByte + j]
}
// Mask unrelated edges
startMask := (^byte(0)) << startBit
endMask := (^byte(0)) >> (8 - nextStartBit)
if nextStartBit == 0 { endMask = ^byte(0) }
tBuf[0] &= startMask
tBuf[tBufBytes - 1] &= endMask
// Transfer shifted bytes into unshifted buffer.
for j := uint64(0); j < bufBytes; j++ {
buf[j] = tBuf[j] >> startBit
}
for j := uint64(0); j < bufBytes; j++ {
buf[j] |= tBuf[j+1] << (8-startBit)
}
// Clear tBuf for next loop.
for i := 0; i < tBufBytes; i++ { tBuf[i] = 0 }
// Convert to uint64
xi := uint64(0)
for j := uint64(0); j < bufBytes; j++ {
xi |= uint64(buf[j]) << (8*j)
}
out[i] = xi
}
}
// NewDenseArray creates a new DenseArray which stores only the bits least
// signiticant bits of every element in x.
func NewDenseArray(bits int, x []uint64) *DenseArray {
if bits > 64 {
panic("Cannot pack more than 64 bits per element into a DenseArray")
}
// Set up buffers and commonly used values.
nBytes := DenseArrayBytes(bits, len(x))
arr := &DenseArray{
Length: len(x), Bits: byte(bits), Data: make([]byte, nBytes),
}
buf, tBuf := [8]byte{ }, [9]byte{ }
bufBytes := uint64(bits / 8)
if bufBytes * 8 < uint64(bits) { bufBytes++ }
mask := (^uint64(0)) >> uint64(64 - bits)
for i, xi := range x {
xi &= mask
currBit := uint64(i*bits % 8)
// Move to byte-wise buffer.
for j := uint64(0); j < bufBytes; j++ {
buf[j] = byte(xi >> (8*j))
}
// Shift and move to the transfer buffer
tBuf[bufBytes] = 0
for j := uint64(0); j < bufBytes; j++ {
tBuf[j] = buf[j] << currBit
}
for j := uint64(0); j < bufBytes; j++ {
tBuf[j + 1] |= buf[j] >> (8-currBit)
}
// Transfer bits into the DenseArray
startByte := i * bits / 8
endByte := ((i + 1)*bits - 1) / 8
for j := 0; j < (endByte - startByte) + 1; j++ {
arr.Data[startByte + j] |= tBuf[j]
}
}
return arr
} | container/dense_array.go | 0.621426 | 0.453685 | dense_array.go | starcoder |
package dtoa
import (
"math"
)
type DiyFp struct {
f uint64
e int
}
func DiyFpDouble(d float64) DiyFp {
//u64 := *(*uint64)(unsafe.Pointer(&d))
u64 := math.Float64bits(d)
biased_e := int((u64 & kDpExponentMask) >> uint64(kDpSignificandSize))
significand := (u64 & kDpSignificandMask)
if biased_e != 0 {
return DiyFp{significand + kDpHiddenBit, biased_e - kDpExponentBias}
}
return DiyFp{significand, kDpMinExponent + 1}
}
func (df DiyFp) Minus(rhs DiyFp) DiyFp {
return DiyFp{df.f - rhs.f, df.e}
}
func (df DiyFp) Multiplication(rhs DiyFp) DiyFp {
const M32 uint64 = 0xFFFFFFFF
a := df.f >> 32
b := df.f & M32
c := rhs.f >> 32
d := rhs.f & M32
ac := a * c
bc := b * c
ad := a * d
bd := b * d
tmp := (bd >> 32) + (ad & M32) + (bc & M32)
tmp += uint64(1) << 31 /// mult_round
return DiyFp{ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), df.e + rhs.e + 64}
}
func (df DiyFp) Normalize() DiyFp {
for (df.f & (uint64(1) << 63)) == 0 {
df.f <<= 1
df.e--
}
return df
}
func (df DiyFp) NormalizeBoundary() DiyFp {
for (df.f & (kDpHiddenBit << 1)) == 0 {
df.f <<= 1
df.e--
}
df.f <<= (kDiySignificandSize - kDpSignificandSize - 2)
df.e = df.e - (kDiySignificandSize - kDpSignificandSize - 2)
return df
}
func (df DiyFp) NormalizedBoundaries() (DiyFp, DiyFp) {
pl := DiyFp{(df.f << 1) + 1, df.e - 1}.NormalizeBoundary()
var mi DiyFp
if df.f == kDpHiddenBit {
mi = DiyFp{(df.f << 2) - 1, df.e - 2}
} else {
mi = DiyFp{(df.f << 1) - 1, df.e - 1}
}
mi.f <<= uint64(mi.e - pl.e)
mi.e = pl.e
return mi, pl
}
func (df DiyFp) ToDouble() float64 {
var be uint64
if df.e == kDpDenormalExponent && (df.f&kDpHiddenBit) == 0 {
be = 0
} else {
be = uint64(df.e + kDpExponentBias)
}
be = (df.f & kDpSignificandMask) | (be << kDpSignificandSize)
return math.Float64frombits(be)
}
func GetCachedPowerByIndex(index uint) DiyFp {
return DiyFp{kCachedPowers_F[index], kCachedPowers_E[index]}
}
func GetCachedPower(e int) (DiyFp, int) {
//int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374;
var dk float64 = (-61-float64(e))*0.30102999566398114 + 347 // dk must be positive, so can do ceiling in positive
k := int(dk)
if dk-float64(k) > 0.0 {
k++
}
index := uint((k >> 3) + 1)
return GetCachedPowerByIndex(index), -(-348 + int(index<<3)) // decimal exponent no need lookup table
}
func GetCachedPower10(exp int, outExp *int) DiyFp {
var index uint = uint(exp) + 348/8
*outExp = -348 + int(index)*8
return GetCachedPowerByIndex(index)
} | diyfp.go | 0.547464 | 0.449151 | diyfp.go | starcoder |
package cryptypes
import "database/sql/driver"
// EncryptedUint64 supports encrypting Uint64 data
type EncryptedUint64 struct {
Field
Raw uint64
}
// Scan converts the value from the DB into a usable EncryptedUint64 value
func (s *EncryptedUint64) Scan(value interface{}) error {
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized EncryptedUint64 value into a value that can safely be stored in the DB
func (s EncryptedUint64) Value() (driver.Value, error) {
return encrypt(s.Raw)
}
// NullEncryptedUint64 supports encrypting nullable Uint64 data
type NullEncryptedUint64 struct {
Field
Raw uint64
Empty bool
}
// Scan converts the value from the DB into a usable NullEncryptedUint64 value
func (s *NullEncryptedUint64) Scan(value interface{}) error {
if value == nil {
s.Raw = 0
s.Empty = true
return nil
}
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized NullEncryptedUint64 value into a value that can safely be stored in the DB
func (s NullEncryptedUint64) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encrypt(s.Raw)
}
// SignedUint64 supports signing Uint64 data
type SignedUint64 struct {
Field
Raw uint64
Valid bool
}
// Scan converts the value from the DB into a usable SignedUint64 value
func (s *SignedUint64) Scan(value interface{}) (err error) {
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedUint64 value into a value that can safely be stored in the DB
func (s SignedUint64) Value() (driver.Value, error) {
return sign(s.Raw)
}
// NullSignedUint64 supports signing nullable Uint64 data
type NullSignedUint64 struct {
Field
Raw uint64
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedUint64 value
func (s *NullSignedUint64) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedUint64 value into a value that can safely be stored in the DB
func (s NullSignedUint64) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return sign(s.Raw)
}
// SignedEncryptedUint64 supports signing and encrypting Uint64 data
type SignedEncryptedUint64 struct {
Field
Raw uint64
Valid bool
}
// Scan converts the value from the DB into a usable SignedEncryptedUint64 value
func (s *SignedEncryptedUint64) Scan(value interface{}) (err error) {
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedEncryptedUint64 value into a value that can safely be stored in the DB
func (s SignedEncryptedUint64) Value() (driver.Value, error) {
return encryptSign(s.Raw)
}
// NullSignedEncryptedUint64 supports signing and encrypting nullable Uint64 data
type NullSignedEncryptedUint64 struct {
Field
Raw uint64
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedEncryptedUint64 value
func (s *NullSignedEncryptedUint64) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedEncryptedUint64 value into a value that can safely be stored in the DB
func (s NullSignedEncryptedUint64) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encryptSign(s.Raw)
} | cryptypes/type_uint64.go | 0.806662 | 0.571587 | type_uint64.go | starcoder |
Rate limiting is an important mechanism for controlling resource utilisation and
maintaining quality of service.
Go elegantly supports rate limiting with goroutines, channnels and tickers
*/
package main
import (
"fmt"
"time"
)
func main() {
// NOTE: First, we'll look at basic rate limiting.
// NOTE: Suppose we want to limit ouir handling of incoming requests.
// NOTE: We'll serve these requests off a channel of the same name
requests := make(chan int, 5)
for i := 1; i <= 5; i++ {
requests <- i
}
close(requests)
// NOTE: This limiter channel will receive a value every 200 ms.
// NOTE: This is the regulator rate limiter scheme.
limiter := time.Tick(200 * time.Millisecond)
// NOTE: By blocking on a recive from the limiter channel before serving each request
// NOTE: we limit ourselves to 1 request every 200ms
for req := range requests {
<-limiter
fmt.Println("request", req, time.Now())
}
// NOTE: We may want to allow short bursts of requests in our rate limiting scheme
// NOTE: while preserving the overall rate limit.
// NOTE: We can accomplish this by buffering our limiter channel.
// NOTE: The burstyLimiter channel will allow bursts of up to 3 events.
burstyLimiter := make(chan time.Time, 3)
// NOTE: Filling up the channel to represent allowed bursting.
for i := 0; i < 3; i++ {
burstyLimiter <- time.Now()
}
// NOTE: Every 200 ms we will try to add a new value to burstyLimiter, up to it's limit of 3
go func() {
for t := range time.Tick(200 * time.Millisecond) {
burstyLimiter <- t
}
}()
// NOTE: Now simluate 5 more incoming requests.
// NOTE: The first 3 of these will benefit from the capability of burstyLimiter
burstyRequests := make(chan int, 5)
for i := 0; i < 5; i++ {
burstyRequests <- i
}
close(burstyRequests)
for req := range burstyRequests {
<-burstyLimiter
fmt.Println("Request ", req, time.Now())
}
}
/*
Running our program, we see the first batch of requests handled every 200ms as desired
go run 036-ratelimiting.go
request 1 2020-08-23 12:46:19.993109032 +0200 CEST m=+0.200365658
request 2 2020-08-23 12:46:20.193105436 +0200 CEST m=+0.400362165
request 3 2020-08-23 12:46:20.393124752 +0200 CEST m=+0.600381410
request 4 2020-08-23 12:46:20.593092908 +0200 CEST m=+0.800349532
request 5 2020-08-23 12:46:20.793009462 +0200 CEST m=+1.000266034
for the second batch, we serve the first 3 immediately, then rate limit
Request 0 2020-08-23 12:46:20.793103797 +0200 CEST m=+1.000360271
Request 1 2020-08-23 12:46:20.793113867 +0200 CEST m=+1.000370342
Request 2 2020-08-23 12:46:20.793119249 +0200 CEST m=+1.000375722
Request 3 2020-08-23 12:46:20.993235549 +0200 CEST m=+1.200492028
Request 4 2020-08-23 12:46:21.193309554 +0200 CEST m=+1.400566032
*/ | 036-ratelimiting.go | 0.540681 | 0.585812 | 036-ratelimiting.go | starcoder |
package singly
import (
"fmt"
)
type LinkedList[T any] struct {
Head *Node[T]
Tail *Node[T]
Size int
}
// New constructs and returns an empty singlt linked list.
func New[T any]() *LinkedList[T] {
return &LinkedList[T]{}
}
// IsEmpty returns true if the linked list doesn't have any nodes.
func (s *LinkedList[T]) IsEmpty() bool {
return s.Size == 0
}
// First returns the first element of the list. It returns false if the list is empty.
func (s *LinkedList[T]) First() (data T, ok bool) {
if s.IsEmpty() {
return
}
return s.Head.Data, true
}
// Last returns the last element of the list. It returns false if the list is empty.
func (s *LinkedList[T]) Last() (data T, ok bool) {
if s.IsEmpty() {
return
}
return s.Tail.Data, true
}
// AddFirst adds a new element to the first of the list.
func (s *LinkedList[T]) AddFirst(data T) {
s.Head = &Node[T]{Data: data, Next: s.Head}
if s.Size == 0 {
s.Tail = s.Head
}
s.Size++
}
// AddLast adds a new element to the end of the list.
func (s *LinkedList[T]) AddLast(data T) {
newNode := &Node[T]{Data: data}
if s.IsEmpty() {
s.Head = newNode
} else {
s.Tail.Next = newNode
}
s.Tail = newNode
s.Size++
}
// Add adds an element to the given index in the list. It returns InvalidIndexErr if the given index is
// out of bound.
func (s *LinkedList[T]) Add(data T, index int) error {
if index < 0 || index > s.Size {
return InvalidIndexErr
}
if index == 0 {
s.AddFirst(data)
return nil
}
if index == s.Size {
s.AddLast(data)
return nil
}
var count int
current := s.Head
for ; current != nil; current = current.Next {
count++
if count == index {
break
}
}
newNode := Node[T]{Data: data, Next: current.Next}
current.Next = &newNode
s.Size++
return nil
}
// RemoveFirst removes and returns the first element of the list. It returns false if the list is empty.
func (s *LinkedList[T]) RemoveFirst() (val T, ok bool) {
if s.IsEmpty() {
return
}
val = s.Head.Data
s.Head = s.Head.Next
s.Size--
if s.IsEmpty() {
s.Tail = nil
}
return val, true
}
// RemoveLast removes and returns the last element of the list. It returns false if the list empty.
func (s *LinkedList[T]) RemoveLast() (val T, ok bool) {
if s.IsEmpty() {
return
}
val = s.Tail.Data
if s.Size == 1 {
s.Tail = nil
s.Head = nil
s.Size--
return val, true
}
current := s.Head
for ; current.Next.Next != nil; current = current.Next {
}
current.Next = nil
s.Tail = current
s.Size--
if s.IsEmpty() {
s.Tail = nil
s.Head = nil
}
return val, true
}
// Remove removes the element in the given index. It returns false if the list is empty also it
// returns InvalidIndexErr if the given index is out of bound.
func (s *LinkedList[T]) Remove(index int) (val T, ok bool, err error) {
if index < 0 || index >= s.Size {
return val, false, InvalidIndexErr
}
if index == 0 {
val, ok = s.RemoveFirst()
return val, ok, nil
}
if index == s.Size-1 {
val, ok = s.RemoveLast()
return val, ok, nil
}
var count int
current := s.Head
for ; current != nil; current = current.Next {
count++
if count == index {
break
}
}
val = current.Next.Data
current.Next = current.Next.Next
s.Size--
return val, true, nil
}
// String retruns the string representation of the list.
func (s *LinkedList[T]) String() string {
str := "[ "
current := s.Head
for ; current != nil; current = current.Next {
str += fmt.Sprint(current.Data) + " "
}
str += "]"
return str
}
// Clone constructs and returns a clone of the list.
func (s *LinkedList[T]) Clone() *LinkedList[T] {
var newSingly LinkedList[T]
if s.Size == 0 {
return &newSingly
}
newSingly.Head = &Node[T]{Data: s.Head.Data}
newSingly.Size++
newSinglyTail := newSingly.Head
walk := s.Head.Next
for walk != nil {
n := &Node[T]{Data: walk.Data}
newSinglyTail.Next = n
newSingly.Size++
newSinglyTail = n
walk = walk.Next
}
return &newSingly
}
// ToSlice returns a slice of the list's elements.
func (s *LinkedList[T]) ToSlice() []T {
r := make([]T, s.Size)
for i, cur := 0, s.Head; cur != nil && i < len(r); i, cur = i+1, cur.Next {
r[i] = cur.Data
}
return r
} | linkedlist/singly/singly_linked_list.go | 0.79732 | 0.436022 | singly_linked_list.go | starcoder |
package gateway
import "strconv"
// ConvertStringToFloat64 converts string to float64.
func ConvertStringToFloat64(s string) (float64, error) {
return strconv.ParseFloat(s, 64)
}
// ConvertStringToFloat32 converts string to float32.
func ConvertStringToFloat32(s string) (float32, error) {
v, err := strconv.ParseFloat(s, 32)
return float32(v), err
}
// ConvertStringToInt64 converts string to int64.
func ConvertStringToInt64(s string) (int64, error) {
return strconv.ParseInt(s, 10, 64)
}
// ConvertStringToInt32 converts string to int32.
func ConvertStringToInt32(s string) (int32, error) {
v, err := strconv.ParseInt(s, 10, 32)
return int32(v), err
}
// ConvertStringToInt16 converts string to int16.
func ConvertStringToInt16(s string) (int16, error) {
v, err := strconv.ParseInt(s, 10, 16)
return int16(v), err
}
// ConvertStringToInt8 converts string to int8.
func ConvertStringToInt8(s string) (int8, error) {
v, err := strconv.ParseInt(s, 10, 8)
return int8(v), err
}
// ConvertStringToInt converts string to int.
func ConvertStringToInt(s string) (int, error) {
v, err := strconv.ParseInt(s, 10, 0)
return int(v), err
}
// ConvertStringToUint64 converts string to uint64.
func ConvertStringToUint64(s string) (uint64, error) {
return strconv.ParseUint(s, 10, 64)
}
// ConvertStringToUint32 converts string to uint32.
func ConvertStringToUint32(s string) (uint32, error) {
v, err := strconv.ParseUint(s, 10, 32)
return uint32(v), err
}
// ConvertStringToUint16 converts string to uint16.
func ConvertStringToUint16(s string) (uint16, error) {
v, err := strconv.ParseUint(s, 10, 16)
return uint16(v), err
}
// ConvertStringToUint8 converts string to uint8.
func ConvertStringToUint8(s string) (uint8, error) {
v, err := strconv.ParseUint(s, 10, 8)
return uint8(v), err
}
// ConvertStringToUint converts string to uint.
func ConvertStringToUint(s string) (uint, error) {
v, err := strconv.ParseUint(s, 10, 0)
return uint(v), err
}
// ConvertStringToBool converts string to bool. The string "on" will be converted to "true".
func ConvertStringToBool(s string) (bool, error) {
if s == "on" {
return true, nil
}
v, err := strconv.ParseBool(s)
return v, err
} | gateway/convert.go | 0.766031 | 0.442637 | convert.go | starcoder |
package hamming
import "strconv"
// References: check out Hacker's Delight, about p. 70
var table = [256]uint8{
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
}
// table-less, branch-free implementation
func CountBitsByteAlt(x byte) int {
x = (x & 0x55) + ((x >> 1) & 0x55)
x = (x & 0x33) + ((x >> 2) & 0x33)
return int((x & 0x0f) + ((x >> 4) & 0x0f))
}
func CountBitsInt8(x int8) int { return CountBitsByte(byte(x)) }
func CountBitsInt16(x int16) int { return CountBitsUint16(uint16(x)) }
func CountBitsInt32(x int32) int { return CountBitsUint32(uint32(x)) }
func CountBitsInt64(x int64) int { return CountBitsUint64(uint64(x)) }
func CountBitsInt(x int) int { return CountBitsUint(uint(x)) }
func CountBitsByte(x byte) int { return CountBitsUint8(x) }
func CountBitsRune(x rune) int { return CountBitsInt32(x) }
func CountBitsUint8(x uint8) int {
return int(table[x])
}
func CountBitsUint16(x uint16) int {
return int(table[x&0xFF] + table[(x>>8)&0xFF])
}
const (
m1d uint32 = 0x55555555
m2d = 0x33333333
m4d = 0x0f0f0f0f
)
func CountBitsUint32(x uint32) int {
x -= ((x >> 1) & m1d)
x = (x & m2d) + ((x >> 2) & m2d)
x = (x + (x >> 4)) & m4d
x += x >> 8
x += x >> 16
return int(x & 0x3f)
}
const (
m1q uint64 = 0x5555555555555555
m2q = 0x3333333333333333
m4q = 0x0f0f0f0f0f0f0f0f
hq = 0x0101010101010101
)
func CountBitsUint64(x uint64) int {
x -= (x >> 1) & m1q // put count of each 2 bits into those 2 bits
x = (x & m2q) + ((x >> 2) & m2q) // put count of each 4 bits into those 4 bits
x = (x + (x >> 4)) & m4q // put count of each 8 bits into those 8 bits
return int((x * hq) >> 56) // returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ...
}
func CountBitsUint64Alt(x uint64) int {
return CountBitsUint32(uint32(x>>32)) + CountBitsUint32(uint32(x))
}
func CountBitsUintReference(x uint) int {
c := 0
for x != 0 {
x &= x - 1
c++
}
return c
}
func CountBitsUint(x uint) int {
if strconv.IntSize == 64 {
return CountBitsUint64(uint64(x))
} else if strconv.IntSize == 32 {
return CountBitsUint32(uint32(x))
}
panic("strconv.IntSize must be 32 or 64 bits")
} | vendor/github.com/steakknife/hamming/popcount.go | 0.525612 | 0.681208 | popcount.go | starcoder |
package cherry_pickup
import "math"
/*
741. 摘樱桃 https://leetcode-cn.com/problems/cherry-pickup
一个N x N的网格(grid) 代表了一块樱桃地,每个格子由以下三种数字的一种来表示:
0 表示这个格子是空的,所以你可以穿过它。
1 表示这个格子里装着一个樱桃,你可以摘到樱桃然后穿过它。
-1 表示这个格子里有荆棘,挡着你的路。
你的任务是在遵守下列规则的情况下,尽可能的摘到最多樱桃:
从位置 (0, 0) 出发,最后到达 (N-1, N-1) ,只能向下或向右走,并且只能穿越有效的格子(即只可以穿过值为0或者1的格子);
当到达 (N-1, N-1) 后,你要继续走,直到返回到 (0, 0) ,只能向上或向左走,并且只能穿越有效的格子;
当你经过一个格子且这个格子包含一个樱桃时,你将摘到樱桃并且这个格子会变成空的(值变为0);
如果在 (0, 0) 和 (N-1, N-1) 之间不存在一条可经过的路径,则没有任何一个樱桃能被摘到。
示例 1:
输入: grid =
[[0, 1, -1],
[1, 0, -1],
[1, 1, 1]]
输出: 5
解释:
玩家从(0,0)点出发,经过了向下走,向下走,向右走,向右走,到达了点(2, 2)。
在这趟单程中,总共摘到了4颗樱桃,矩阵变成了[[0,1,-1],[0,0,-1],[0,0,0]]。
接着,这名玩家向左走,向上走,向上走,向左走,返回了起始点,又摘到了1颗樱桃。
在旅程中,总共摘到了5颗樱桃,这是可以摘到的最大值了。
说明:
grid 是一个 N * N 的二维数组,N的取值范围是1 <= N <= 50。
每一个 grid[i][j] 都是集合 {-1, 0, 1}中的一个数。
可以保证起点 grid[0][0] 和终点 grid[N-1][N-1] 的值都不会是 -1。
*/
/*
问题简化:如果只是从起点到终点摘一遍呢?这将是一个典型的动态规划
定义dp(r,c)表示从起点走到(r,c)摘到的最大樱桃数; 则dp[r][c] = max(dp[r-1][c], dp[r][c-1]) + grid[r][c]
注意边界情况即(r,c)处本身为荆棘或其上边和左边一格均无法到达的情况
或者dp多申请一行一列,0行0列都是0,不参与结果计算,只是方便少判断边界
*/
func cherryPickupOnce(grid [][]int) int {
n := len(grid)
if n == 0 {
return -1
}
dp := make([][]int, n+1)
for i := 0; i <= n; i++ {
dp[i] = make([]int, n+1)
}
for r := 1; r <= n; r++ {
for c := 1; c <= n; c++ {
cherryNum := grid[r-1][c-1]
if cherryNum == -1 {
dp[r][c] = -1
continue
}
dp[r][c] = cherryNum
topLeftMax := max(dp[r][c-1], dp[r-1][c])
if topLeftMax > 0 {
dp[r][c] += topLeftMax
}
}
}
return dp[n][n]
}
/*
可以考虑执行cherryPickup两次,且第一次把摘掉的樱桃格子里的值置为0
但这样会导致过多的格子被置为0;
可以考虑记录第一次摘过樱桃的路径,但显然又会漏掉最优解,
如下边的情况,为了明显,将两个特别的樱桃用★表示了:
{1,1,1,1,0,0,0},
{0,0,0,1,0,0,0},
{0,0,0,1,0,0,★},
{★,0,0,1,0,0,0},
{0,0,0,1,0,0,0},
{0,0,0,1,0,0,0},
{0,0,0,1,1,1,1},
可见最优解是所有值为1的格子都能被摘掉,共15
但如果采用上面的办法,结果会是14,左下或右上的★会被漏掉一个
*/
/* ---------------------------------------------------
问题转化:两个人同时从左上角走到右下角
*/
/*
[解法1:动态规划,自顶向下]
假设有2个人,在走了t步后;分别居于(r1, c1), (r2, c2)位置
因r1+c1=r2+c2=t;所以r2=r1+c1-c2,这意味着r1,c1,c2唯一地决定了2个走了t步的人,以这个条件来做动态规划:
定义dp[r1][c1][c2]为从(r1, c1), (r2, c2)开始,走到终点(n-1,n-1)所能摘到的最多樱桃数量;其中r2=r1+c1-c2
如果(r1, c1), (r2, c2)处不是荆棘;那么dp[r1][c1][c2]的值这样计算:
先得到(r1, c1), (r2, c2)两处的樱桃总数(如果位置重复则只算一次); 再加上
max(
dp(r1+1, c1, c2, dp, grid), // a, b都向下
dp(r1, c1+1, c2, dp, grid), // a右b下
dp(r1+1, c1, c2+1, dp, grid), // a下b右
dp(r1, c1+1, c2+1, dp, grid)) // a,b都向右
*/
func cherryPickup(grid [][]int) int {
n := len(grid)
dp := make([][][]int, n)
for i := 0; i < n; i++ {
dp[i] = make([][]int, n)
for j := 0; j < n; j++ {
dp[i][j] = make([]int, n)
for k := 0; k < n; k++ {
dp[i][j][k] = math.MinInt32
}
}
}
return max(0, pickup(0, 0, 0, dp, grid))
}
func pickup(r1, c1, c2 int, dp [][][]int, grid [][]int) int {
r2 := r1 + c1 - c2
n := len(grid)
if n == r1 || n == c1 || n == r2 || n == c2 ||
grid[r1][c1] == -1 || grid[r2][c2] == -1 {
return math.MinInt32
}
if r1 == n-1 && c1 == n-1 {
return grid[r1][c1]
}
if dp[r1][c1][c2] != math.MinInt32 {
return dp[r1][c1][c2]
}
val := grid[r1][c1]
if c1 != c2 {
val += grid[r2][c2]
}
val += max(
pickup(r1+1, c1, c2, dp, grid),
pickup(r1, c1+1, c2, dp, grid),
pickup(r1+1, c1, c2+1, dp, grid),
pickup(r1, c1+1, c2+1, dp, grid))
dp[r1][c1][c2] = val
return val
}
/*
[解法2:动态规划,自顶向下]
与解法1类似,只是这里定义的dp[r1][c1][c2]为从起点走到(r1, c1), (r2, c2)所能摘到的最多樱桃数量;其中r2=r1+c1-c2
一开始调用的时候r1,c1,c2的值传n-1
*/
func pickup1(r1, c1, c2 int, dp [][][]int, grid [][]int) int {
r2 := r1 + c1 - c2
if r1 < 0 || c1 < 0 || r2 < 0 || c2 < 0 ||
grid[r1][c1] == -1 || grid[r2][c2] == -1 {
return math.MinInt32
}
if r1 == 0 && c1 == 0 && c2 == 0 {
return grid[r1][c1]
}
if dp[r1][c1][c2] != math.MinInt32 {
return dp[r1][c1][c2]
}
val := grid[r1][c1]
if c1 != c2 {
val += grid[r2][c2]
}
val += max(
pickup1(r1-1, c1, c2, dp, grid),
pickup1(r1, c1-1, c2, dp, grid),
pickup1(r1-1, c1, c2-1, dp, grid),
pickup1(r1, c1-1, c2-1, dp, grid))
dp[r1][c1][c2] = val
return val
}
/*
[解法3:动态规划,自底向上]
定义dp[c1][c2]为第t步,从起点走到(r1,c1)和从起点走到(r2,c2)能摘到的最多樱桃数;其中r1=t-c1,r2=t-c2
一个人从左上角走到右下角共需n-1 + n-1 即2n-2步
*/
func cherryPickup1(grid [][]int) int {
n := len(grid)
dp := genDp(n)
dp[0][0] = grid[0][0]
for t := 1; t <= 2*n-2; t++ {
dp2 := genDp(n)
from, end := max(0, t-(n-1)), min(n-1, t)
for i := from; i <= end; i++ {
if grid[i][t-i] == -1 {
continue
}
for j := from; j <= end; j++ {
if grid[j][t-j] == -1 {
continue
}
val := grid[i][t-i]
if i != j {
val += grid[j][t-j]
}
for pi := i - 1; pi <= i; pi++ {
for pj := j - 1; pj <= j; pj++ {
if pi >= 0 && pj >= 0 {
dp2[i][j] = max(dp2[i][j], dp[pi][pj]+val)
}
}
}
}
}
dp = dp2
}
return max(0, dp[n-1][n-1])
}
func genDp(n int) [][]int {
dp := make([][]int, n)
for i := 0; i < n; i++ {
dp[i] = make([]int, n)
for j := 0; j < n; j++ {
dp[i][j] = math.MinInt32
}
}
return dp
}
func max(nums ...int) int {
r := math.MinInt32
for _, v := range nums {
if v > r {
r = v
}
}
return r
}
func min(nums ...int) int {
r := math.MaxInt64
for _, v := range nums {
if v < r {
r = v
}
}
return r
} | solutions/cherry-pickup/d.go | 0.518546 | 0.509459 | d.go | starcoder |
package binarytree
// TreeNode 二叉树结点
type TreeNode struct {
Value int
Height int
Left *TreeNode
Right *TreeNode
}
// PreOrder 前序遍历 DFS
func (root *TreeNode) PreOrder() []int {
var (
stack []*TreeNode
order []int
)
if root == nil {
return order
}
for stack = append(stack, root); len(stack) != 0; {
curr := stack[len(stack)-1]
stack = stack[:len(stack)-1]
order = append(order, curr.Value)
if curr.Right != nil {
stack = append(stack, curr.Right)
}
if curr.Left != nil {
stack = append(stack, curr.Left)
}
}
return order
}
// PreOrderRecursive 前序遍历递归
func (root *TreeNode) PreOrderRecursive() []int {
var (
order []int
traversal func(*TreeNode)
)
traversal = func(node *TreeNode) {
if node == nil {
return
}
order = append(order, node.Value)
traversal(node.Left)
traversal(node.Right)
}
traversal(root)
return order
}
// InOrder 中序遍历
func (root *TreeNode) InOrder() []int {
var (
stack []*TreeNode
order []int
)
for curr := root; curr != nil || len(stack) != 0; {
for curr != nil {
stack = append(stack, curr)
curr = curr.Left
}
curr = stack[len(stack)-1]
stack = stack[:len(stack)-1]
order = append(order, curr.Value)
curr = curr.Right
}
return order
}
// PostOrder 后序遍历
// 前序遍历镜像 根->右->左,结果逆序 左->右->根
func (root *TreeNode) PostOrder() []int {
var (
stack []*TreeNode
order []int
)
if root == nil {
return order
}
for stack = append(stack, root); len(stack) != 0; {
curr := stack[len(stack)-1]
stack = stack[:len(stack)-1]
order = append(order, curr.Value)
if curr.Left != nil {
stack = append(stack, curr.Left)
}
if curr.Right != nil {
stack = append(stack, curr.Right)
}
}
for left, right := 0, len(order)-1; left < right; left, right = left+1, right-1 {
order[left], order[right] = order[right], order[left]
}
return order
}
// LevelOrder 层次遍历 BFS
func (root *TreeNode) LevelOrder() [][]int {
var (
order [][]int
queue []*TreeNode
level int
)
if root == nil {
return order
}
for queue = append(queue, root); len(queue) != 0; {
order = append(order, []int{})
length := len(queue)
for i := 0; i < length; i++ {
order[level] = append(order[level], queue[i].Value)
if queue[i].Left != nil {
queue = append(queue, queue[i].Left)
}
if queue[i].Right != nil {
queue = append(queue, queue[i].Right)
}
}
queue = queue[length:]
level++
}
return order
}
// LevelOrderRecursive 层次遍历
func (root *TreeNode) LevelOrderRecursive() [][]int {
var (
traversal func(*TreeNode, int)
order [][]int
)
traversal = func(node *TreeNode, level int) {
if node == nil {
return
}
if len(order) == level {
order = append(order, []int{})
}
order[level] = append(order[level], node.Value)
traversal(node.Left, level+1)
traversal(node.Right, level+1)
}
traversal(root, 0)
return order
}
// BuildTreeFromPreIn 从前序和中序构造二叉树
func BuildTreeFromPreIn(preorder, inorder []int) *TreeNode {
if len(inorder) == 0 {
return nil
}
node := &TreeNode{Value: preorder[0]}
if len(inorder) == 1 {
return node
}
idx := index(node.Value, inorder)
node.Left = BuildTreeFromPreIn(preorder[1:idx+1], inorder[:idx])
node.Right = BuildTreeFromPreIn(preorder[idx+1:], inorder[idx+1:])
return node
}
// BuildTreeFromPostIn 从后序和中序构造二叉树
func BuildTreeFromPostIn(postorder, inorder []int) *TreeNode {
if len(inorder) == 0 {
return nil
}
node := &TreeNode{Value: postorder[len(postorder)-1]}
if len(inorder) == 1 {
return node
}
idx := index(node.Value, inorder)
node.Left = BuildTreeFromPostIn(postorder[:idx], inorder[:idx])
node.Right = BuildTreeFromPostIn(postorder[idx:len(postorder)-1], inorder[idx+1:])
return node
}
// InvertTree 翻转二叉树
func (root *TreeNode) InvertTree() *TreeNode {
var queue []*TreeNode
if root == nil {
return root
}
for queue = append(queue, root); len(queue) != 0; {
curr := queue[0]
curr.Left, curr.Right = curr.Right, curr.Left
if curr.Left != nil {
queue = append(queue, curr.Left)
}
if curr.Right != nil {
queue = append(queue, curr.Right)
}
queue = queue[1:]
}
return root
}
// InvertTreeRecursive 翻转二叉树
func (root *TreeNode) InvertTreeRecursive() *TreeNode {
var invert func(*TreeNode) *TreeNode
invert = func(node *TreeNode) *TreeNode {
if node == nil {
return node
}
node.Left, node.Right = node.Right, node.Left
node.Left = invert(node.Left)
node.Right = invert(node.Right)
return node
}
return invert(root)
}
// Predecessor 前驱
func (root *TreeNode) Predecessor() *TreeNode {
curr := root.Left
for curr.Right != nil {
curr = curr.Right
}
return curr
}
// Successor 后继
func (root *TreeNode) Successor() *TreeNode {
curr := root.Right
for curr.Left != nil {
curr = curr.Left
}
return curr
}
func index(val int, slice []int) int {
for i, v := range slice {
if val == v {
return i
}
}
return 0
} | tree/binarytree/binarytree.go | 0.524882 | 0.420719 | binarytree.go | starcoder |
package tfgo
import (
tf "github.com/tensorflow/tensorflow/tensorflow/go"
"github.com/tensorflow/tensorflow/tensorflow/go/op"
)
// Batchify creates a batch of tensors, concatenating them along the first dimension
func Batchify(scope *op.Scope, tensors []tf.Output) tf.Output {
s := scope.SubScope("batchify")
// Batchify a single value, means add batch dimension and return
if len(tensors) == 1 {
return op.ExpandDims(s.SubScope("ExpandDims"), tensors[0], op.Const(s.SubScope("axis"), []int32{0}))
}
var tensors4d []tf.Output
for _, tensor := range tensors {
tensors4d = append(tensors4d, op.ExpandDims(s.SubScope("ExpandDims"), tensor, op.Const(s.SubScope("axis"), []int32{0})))
}
return op.ConcatV2(s.SubScope("ConcatV2"), tensors4d, op.Const(s.SubScope("axis"), int32(0)))
}
// Cast casts value to the specified dtype
func Cast(scope *op.Scope, value tf.Output, dtype tf.DataType) tf.Output {
if value.DataType() == dtype {
return value
}
return op.Cast(scope.SubScope("Cast"), value, dtype)
}
// NewRoot creates a new *op.Scope, empty
func NewRoot() *op.Scope {
return op.NewScope()
}
// Const creates a constant value within the specified scope
func Const(scope *op.Scope, value interface{}) tf.Output {
return op.Const(scope.SubScope("Const"), value)
}
// IsClose defines the isclose operation between a and b.
// Returns a conditional node that is true when a is close to b.
// relTol is the relative tolerance
// absTol is the absolute tolerance
func IsClose(scope *op.Scope, a, b tf.Output, relTol, absTol tf.Output) tf.Output {
s := scope.SubScope("IsClose")
return op.LessEqual(s.SubScope("LessEqual"),
op.Abs(s.SubScope("Abs"),
op.Sub(s.SubScope("Sub"), a, b)),
op.Maximum(s.SubScope("Maximum"),
op.Mul(s.SubScope("Mul"), relTol,
op.Maximum(s.SubScope("Maximum"), op.Abs(s.SubScope("Abs"), a), op.Abs(s.SubScope("Abs"), b))), absTol))
}
// Exec creates the computation graph from the scope, then executes
// the operations required to compute each element of tensors.
// Node in the graph can be overwritten with feedDict.
// The session options can be specified using the session parameter.
// Returns the evaluated tensors. Panics on error.
func Exec(scope *op.Scope, tensors []tf.Output, feedDict map[tf.Output]*tf.Tensor, options *tf.SessionOptions) []*tf.Tensor {
graph, err := scope.Finalize()
if err != nil {
panic(err.Error())
}
var sess *tf.Session
sess, err = tf.NewSession(graph, options)
if err == nil {
defer sess.Close()
var results []*tf.Tensor
if results, err = sess.Run(feedDict, tensors, nil); err == nil {
return results
}
}
panic(err)
} | ops.go | 0.879871 | 0.490785 | ops.go | starcoder |
package bindings
import "log"
// Mapping is a stack of map[string]string for CMake variables.
type Mapping struct {
vs []map[string]string
cache map[string]string
}
// New returns a new, empty, variable stack.
func New() *Mapping {
m := &Mapping{cache: make(map[string]string)}
m.Push()
return m
}
// Push pushes a new variable binding scope.
func (m *Mapping) Push() {
m.vs = append(m.vs, make(map[string]string))
}
// Pop removes the most recently pushed scope.
func (m *Mapping) Pop() {
m.vs = m.vs[0 : len(m.vs)-1]
}
// Depth returns the current mapping depth starting from 0.
func (m *Mapping) Depth() int {
return len(m.vs) - 1
}
// Set sets a key to a particular value in the current scope.
// Setting a key to the empty string is equivalent to deleting it, in accordance with CMake semantics.
func (m *Mapping) Set(key, value string) {
// Keep empty strings in the current scope as a tombstone to prevent searching in parent scopes.
m.vs[len(m.vs)-1][key] = value
}
// SetParent sets a key to a particular value in the parent scope.
// Setting a key to the empty string is equivalent to deleting it, in accordance with CMake semantics.
func (m *Mapping) SetParent(key, value string) {
if m.Depth() == 0 {
log.Println("Attempt to set ", key, "in PARENT_SCOPE at root")
} else {
m.vs[len(m.vs)-2][key] = value
}
}
// SetCache sets a key to a particular value in CACHE scope.
// Setting a key to the empty string is equivalent to deleting it, in accordance with CMake semantics.
func (m *Mapping) SetCache(key, value string) {
m.cache[key] = value
}
// Get looks from the current scope up to find the nearest value for key.
// If they key is absent, returns the empty string.
// This matches the semantics of CMake variable lookup.
func (m *Mapping) Get(key string) string {
for i := len(m.vs) - 1; i >= 0; i-- {
val, ok := m.vs[i][key]
if ok {
return val
}
}
// From https://cmake.org/cmake/help/latest/manual/cmake-language.7.html#variables
// Variable references are looked up in the cache if not present in the current scope.
return m.GetCache(key)
}
// GetCache returns the associated value from the variable cache or an empty string if not found.
func (m *Mapping) GetCache(key string) string {
val, ok := m.cache[key]
if ok {
return val
}
return ""
}
// GetEnv returns the corresponding environment variable or the empty string (not implemented).
func (m *Mapping) GetEnv(key string) string {
return ""
}
// Values returns the currently set values as a map[string]string.
// Keys set to the empty string will be omitted from the final map.
func (m *Mapping) Values() map[string]string {
vals := make(map[string]string)
for _, v := range m.vs {
for key, val := range v {
if val == "" {
delete(vals, key)
} else {
vals[key] = val
}
}
}
return vals
} | cmakelib/bindings/bindings.go | 0.770594 | 0.416203 | bindings.go | starcoder |
package analyzers
import (
summarypb "github.com/GoogleCloudPlatform/testgrid/pb/summary"
"github.com/GoogleCloudPlatform/testgrid/pkg/summarizer/common"
)
const analyzerName = "flipanalyzer"
// FlipAnalyzer implements functions that calculate flakiness as a ratio of failed tests to total tests
type FlipAnalyzer struct {
RelevantStatus map[string][]StatusCategory
}
// StatusCategory is a simiplified status that allows only "Pass", "Fail", and "Flaky"
type StatusCategory int32
// StatusPass, StatusFail, and StatusFlaky are the status categories this analyzer works with.
const (
StatusPass StatusCategory = iota
StatusFail
StatusFlaky
)
// GetFlakiness returns a HealthinessInfo message
func (ea *FlipAnalyzer) GetFlakiness(gridMetrics []*common.GridMetrics, minRuns int, startDate int, endDate int, tab string) *summarypb.HealthinessInfo {
// Delegate to a BaseAnalyzer and change the flakiness scores.
// (And average flakiness)
var ba BaseAnalyzer
healthinessInfo := ba.GetFlakiness(gridMetrics, minRuns, startDate, endDate, tab)
var averageFlakiness float32
for _, test := range healthinessInfo.Tests {
test.Flakiness = calculateFlipFlakiness(ea.RelevantStatus[test.DisplayName])
averageFlakiness += test.Flakiness
}
if len(healthinessInfo.Tests) == 0 {
healthinessInfo.AverageFlakiness = 0
} else {
healthinessInfo.AverageFlakiness = averageFlakiness / float32(len(healthinessInfo.Tests))
}
return healthinessInfo
}
const ignoreFailuresInARow = 3
func consecutiveFailures(statuses []StatusCategory, i int) int {
var result int
for i < len(statuses) && statuses[i] == StatusFail {
result++
i++
}
return result
}
// calculateFlipFlakiness gets a calculation of flakiness based on number of flips to failing rather than number of failures
// statuses should have already filtered to the correct time horizon and removed infra failures
// Returns a percentage between 0 and 100
func calculateFlipFlakiness(statuses []StatusCategory) float32 {
var flips int
var considered int
lastPassing := true // No flakes if we pass 100%
var i int
for i < len(statuses) {
cf := consecutiveFailures(statuses, i)
if cf >= ignoreFailuresInARow {
// Ignore the run of failures
i += cf
if i >= len(statuses) {
break
}
}
s := statuses[i]
considered++
if s == StatusPass {
lastPassing = true
} else if s == StatusFlaky {
// Consider this as always a flip (because there was a flip involved), but it did pass.
flips++
lastPassing = true
} else {
// Failing
if lastPassing {
flips++
}
lastPassing = false
}
i++
}
if considered == 0 {
return 0
}
return 100 * float32(flips) / float32(considered)
} | pkg/summarizer/analyzers/flipanalyzer.go | 0.708717 | 0.416737 | flipanalyzer.go | starcoder |
package expect
import (
"fmt"
"reflect"
"regexp"
"testing"
)
// Negation is a negated expectation
type Negation struct {
*testing.T
inverse *ExpectedValue
}
// To returns the current Expectation
func (not *Negation) To() Expectation {
return not
}
// Be returns the current Expectation
func (not *Negation) Be() Expectation {
return not
}
// Is returns the current Expectation
func (not *Negation) Is() Expectation {
return not
}
// Should returns the current Expectation
func (not *Negation) Should() Expectation {
return not
}
// Not negates the current Expectation
func (not *Negation) Not() Expectation {
return not.inverse
}
// IsNot is equivalent to Not()
func (not *Negation) IsNot() Expectation {
return not.Not()
}
// DoesNot is equivalent to Not()
func (not *Negation) DoesNot() Expectation {
return not.Not()
}
// At returns an negated expectation about the element at the given index
func (not *Negation) At(index interface{}) Expectation {
return not.inverse.At(index).Not()
}
// Nil asserts the value is not nil
func (not *Negation) Nil() {
not.Equals(nil)
}
// True asserts the value is not true
func (not *Negation) True() {
not.Equals(true)
}
// False asserts the value is not false
func (not *Negation) False() {
not.Equals(false)
}
// Empty asserts the value is a non-empty array
func (not *Negation) Empty() {
not.HasLength(0)
}
// HasLength asserts the value is an array with length != given
func (not *Negation) HasLength(given int) {
if reflect.ValueOf(not.inverse.actual).Len() == given {
not.Errorf("%s: len(%#v) == %d", not.Name(), not.inverse.actual, given)
}
}
// HaveLength is equivalent to HasLength()
func (not *Negation) HaveLength(given int) {
not.HasLength(given)
}
// Equals asserts inequality to a given value
func (not *Negation) Equals(given interface{}) {
if equal(not.inverse.actual, given) {
not.Errorf("%s: %#v == %#v", not.Name(), not.inverse.actual, given)
}
}
// Eq is equivalent to Equals()
func (not *Negation) Eq(given interface{}) {
not.Equals(given)
}
// Equal is equivalent to Equals()
func (not *Negation) Equal(given interface{}) {
not.Equals(given)
}
// EqualTo is equivalent to Equals()
func (not *Negation) EqualTo(given interface{}) {
not.Equals(given)
}
// Matches asserts that the value does not match a given pattern
func (not *Negation) Matches(pattern string) {
str := fmt.Sprint(not.inverse.actual)
match, err := regexp.MatchString(pattern, str)
if err != nil {
not.Errorf("%s: %s", not.Name(), err)
} else if match {
not.Errorf("%s: '%s' matches /%s/", not.Name(), str, pattern)
}
}
// Match is equivalent to Matches()
func (not *Negation) Match(pattern string) {
not.Matches(pattern)
} | expect/negation.go | 0.842151 | 0.496155 | negation.go | starcoder |
package treemap
// template type TreeMap(Key, Value)
// Key is a generic key type of the map
type Key interface{}
// Value is a generic value type of the map
type Value interface{}
// TreeMap is the red-black tree based map
type TreeMap struct {
endNode *node
beginNode *node
count int
// Less returns a < b
Less func(a Key, b Key) bool
}
type node struct {
right *node
left *node
parent *node
isBlack bool
key Key
value Value
}
// New creates and returns new TreeMap.
// Parameter less is a function returning a < b.
func New(less func(a Key, b Key) bool) *TreeMap {
endNode := &node{isBlack: true}
return &TreeMap{beginNode: endNode, endNode: endNode, Less: less}
}
// Len returns total count of elements in a map.
// Complexity: O(1).
func (t *TreeMap) Len() int { return t.count }
// Set sets the value and silently overrides previous value if it exists.
// Complexity: O(log N).
func (t *TreeMap) Set(key Key, value Value) {
parent := t.endNode
current := parent.left
less := true
for current != nil {
parent = current
switch {
case t.Less(key, current.key):
current = current.left
less = true
case t.Less(current.key, key):
current = current.right
less = false
default:
current.value = value
return
}
}
x := &node{parent: parent, value: value, key: key}
if less {
parent.left = x
} else {
parent.right = x
}
if t.beginNode.left != nil {
t.beginNode = t.beginNode.left
}
t.insertFixup(x)
t.count++
}
// Del deletes the value.
// Complexity: O(log N).
func (t *TreeMap) Del(key Key) {
z := t.findNode(key)
if z == nil {
return
}
if t.beginNode == z {
if z.right != nil {
t.beginNode = z.right
} else {
t.beginNode = z.parent
}
}
t.count--
removeNode(t.endNode.left, z)
}
// Clear clears the map.
// Complexity: O(1).
func (t *TreeMap) Clear() {
t.count = 0
t.beginNode = t.endNode
t.endNode.left = nil
}
// Get retrieves a value from a map for specified key and reports if it exists.
// Complexity: O(log N).
func (t *TreeMap) Get(id Key) (Value, bool) {
node := t.findNode(id)
if node == nil {
node = t.endNode
}
return node.value, node != t.endNode
}
// Contains checks if key exists in a map.
// Complexity: O(log N)
func (t *TreeMap) Contains(id Key) bool { return t.findNode(id) != nil }
// Range returns a pair of iterators that you can use to go through all the keys in the range [from, to].
// More specifically it returns iterators pointing to lower bound and upper bound.
// Complexity: O(log N).
func (t *TreeMap) Range(from, to Key) (ForwardIterator, ForwardIterator) {
return t.LowerBound(from), t.UpperBound(to)
}
// LowerBound returns an iterator pointing to the first element that is not less than the given key.
// Complexity: O(log N).
func (t *TreeMap) LowerBound(key Key) ForwardIterator {
result := t.endNode
node := t.endNode.left
if node == nil {
return ForwardIterator{tree: t, node: t.endNode}
}
for {
if t.Less(node.key, key) {
if node.right != nil {
node = node.right
} else {
return ForwardIterator{tree: t, node: result}
}
} else {
result = node
if node.left != nil {
node = node.left
} else {
return ForwardIterator{tree: t, node: result}
}
}
}
}
// UpperBound returns an iterator pointing to the first element that is greater than the given key.
// Complexity: O(log N).
func (t *TreeMap) UpperBound(key Key) ForwardIterator {
result := t.endNode
node := t.endNode.left
if node == nil {
return ForwardIterator{tree: t, node: t.endNode}
}
for {
if !t.Less(key, node.key) {
if node.right != nil {
node = node.right
} else {
return ForwardIterator{tree: t, node: result}
}
} else {
result = node
if node.left != nil {
node = node.left
} else {
return ForwardIterator{tree: t, node: result}
}
}
}
}
// Iterator returns an iterator for tree map.
// It starts at the first element and goes to the one-past-the-end position.
// You can iterate a map at O(N) complexity.
// Method complexity: O(1)
func (t *TreeMap) Iterator() ForwardIterator { return ForwardIterator{tree: t, node: t.beginNode} }
// Reverse returns a reverse iterator for tree map.
// It starts at the last element and goes to the one-before-the-start position.
// You can iterate a map at O(N) complexity.
// Method complexity: O(log N)
func (t *TreeMap) Reverse() ReverseIterator {
node := t.endNode.left
if node != nil {
node = mostRight(node)
}
return ReverseIterator{tree: t, node: node}
}
func (t *TreeMap) findNode(id Key) *node {
current := t.endNode.left
for current != nil {
switch {
case t.Less(id, current.key):
current = current.left
case t.Less(current.key, id):
current = current.right
default:
return current
}
}
return nil
}
func mostLeft(x *node) *node {
for x.left != nil {
x = x.left
}
return x
}
func mostRight(x *node) *node {
for x.right != nil {
x = x.right
}
return x
}
func successor(x *node) *node {
if x.right != nil {
return mostLeft(x.right)
}
for x != x.parent.left {
x = x.parent
}
return x.parent
}
func predecessor(x *node) *node {
if x.left != nil {
return mostRight(x.left)
}
for x.parent != nil && x != x.parent.right {
x = x.parent
}
return x.parent
}
func rotateLeft(x *node) {
y := x.right
x.right = y.left
if x.right != nil {
x.right.parent = x
}
y.parent = x.parent
if x == x.parent.left {
x.parent.left = y
} else {
x.parent.right = y
}
y.left = x
x.parent = y
}
func rotateRight(x *node) {
y := x.left
x.left = y.right
if x.left != nil {
x.left.parent = x
}
y.parent = x.parent
if x == x.parent.left {
x.parent.left = y
} else {
x.parent.right = y
}
y.right = x
x.parent = y
}
func (t *TreeMap) insertFixup(x *node) {
root := t.endNode.left
x.isBlack = x == root
for x != root && !x.parent.isBlack {
if x.parent == x.parent.parent.left {
y := x.parent.parent.right
if y != nil && !y.isBlack {
x = x.parent
x.isBlack = true
x = x.parent
x.isBlack = x == root
y.isBlack = true
} else {
if x != x.parent.left {
x = x.parent
rotateLeft(x)
}
x = x.parent
x.isBlack = true
x = x.parent
x.isBlack = false
rotateRight(x)
break
}
} else {
y := x.parent.parent.left
if y != nil && !y.isBlack {
x = x.parent
x.isBlack = true
x = x.parent
x.isBlack = x == root
y.isBlack = true
} else {
if x == x.parent.left {
x = x.parent
rotateRight(x)
}
x = x.parent
x.isBlack = true
x = x.parent
x.isBlack = false
rotateLeft(x)
break
}
}
}
}
// nolint: gocyclo
func removeNode(root *node, z *node) {
var y *node
if z.left == nil || z.right == nil {
y = z
} else {
y = successor(z)
}
var x *node
if y.left != nil {
x = y.left
} else {
x = y.right
}
var w *node
if x != nil {
x.parent = y.parent
}
if y == y.parent.left {
y.parent.left = x
if y != root {
w = y.parent.right
} else {
root = x // w == nil
}
} else {
y.parent.right = x
w = y.parent.left
}
removedBlack := y.isBlack
if y != z {
y.parent = z.parent
if z == z.parent.left {
y.parent.left = y
} else {
y.parent.right = y
}
y.left = z.left
y.left.parent = y
y.right = z.right
if y.right != nil {
y.right.parent = y
}
y.isBlack = z.isBlack
if root == z {
root = y
}
}
if removedBlack && root != nil {
if x != nil {
x.isBlack = true
} else {
for {
if w != w.parent.left {
if !w.isBlack {
w.isBlack = true
w.parent.isBlack = false
rotateLeft(w.parent)
if root == w.left {
root = w
}
w = w.left.right
}
if (w.left == nil || w.left.isBlack) && (w.right == nil || w.right.isBlack) {
w.isBlack = false
x = w.parent
if x == root || !x.isBlack {
x.isBlack = true
break
}
if x == x.parent.left {
w = x.parent.right
} else {
w = x.parent.left
}
} else {
if w.right == nil || w.right.isBlack {
w.left.isBlack = true
w.isBlack = false
rotateRight(w)
w = w.parent
}
w.isBlack = w.parent.isBlack
w.parent.isBlack = true
w.right.isBlack = true
rotateLeft(w.parent)
break
}
} else {
if !w.isBlack {
w.isBlack = true
w.parent.isBlack = false
rotateRight(w.parent)
if root == w.right {
root = w
}
w = w.right.left
}
if (w.left == nil || w.left.isBlack) && (w.right == nil || w.right.isBlack) {
w.isBlack = false
x = w.parent
if !x.isBlack || x == root {
x.isBlack = true
break
}
if x == x.parent.left {
w = x.parent.right
} else {
w = x.parent.left
}
} else {
if w.left == nil || w.left.isBlack {
w.right.isBlack = true
w.isBlack = false
rotateLeft(w)
w = w.parent
}
w.isBlack = w.parent.isBlack
w.parent.isBlack = true
w.left.isBlack = true
rotateRight(w.parent)
break
}
}
}
}
}
}
// ForwardIterator represents a position in a tree map.
// It is designed to iterate a map in a forward order.
// It can point to any position from the first element to the one-past-the-end element.
type ForwardIterator struct {
tree *TreeMap
node *node
}
// Valid reports if an iterator's position is valid.
// In other words it returns true if an iterator is not at the one-past-the-end position.
func (i ForwardIterator) Valid() bool { return i.node != i.tree.endNode }
// Next moves an iterator to the next element.
// It panics if goes out of bounds.
func (i *ForwardIterator) Next() {
if i.node == i.tree.endNode {
panic("out of bound iteration")
}
i.node = successor(i.node)
}
// Prev moves an iterator to the previous element.
// It panics if goes out of bounds.
func (i *ForwardIterator) Prev() {
i.node = predecessor(i.node)
if i.node == nil {
panic("out of bound iteration")
}
}
// Key returns a key at an iterator's position
func (i ForwardIterator) Key() Key { return i.node.key }
// Value returns a value at an iterator's position
func (i ForwardIterator) Value() Value { return i.node.value }
// ReverseIterator represents a position in a tree map.
// It is designed to iterate a map in a reverse order.
// It can point to any position from the one-before-the-start element to the last element.
type ReverseIterator struct {
tree *TreeMap
node *node
}
// Valid reports if an iterator's position is valid.
// In other words it returns true if an iterator is not at the one-before-the-start position.
func (i ReverseIterator) Valid() bool { return i.node != nil }
// Next moves an iterator to the next element in reverse order.
// It panics if goes out of bounds.
func (i *ReverseIterator) Next() {
if i.node == nil {
panic("out of bound iteration")
}
i.node = predecessor(i.node)
}
// Prev moves an iterator to the previous element in reverse order.
// It panics if goes out of bounds.
func (i *ReverseIterator) Prev() {
if i.node != nil {
i.node = successor(i.node)
} else {
i.node = i.tree.beginNode
}
if i.node == i.tree.endNode {
panic("out of bound iteration")
}
}
// Key returns a key at an iterator's position
func (i ReverseIterator) Key() Key { return i.node.key }
// Value returns a value at an iterator's position
func (i ReverseIterator) Value() Value { return i.node.value } | treemap/treemap.go | 0.869853 | 0.553626 | treemap.go | starcoder |
package big
import (
"math/big"
"github.com/ALTree/bigfloat"
)
// Matrix is a matrix
type Matrix struct {
Prec uint
Values [][]Rational
}
// NewMatrix make a new matrix
func NewMatrix(prec uint) Matrix {
return Matrix{
Prec: prec,
}
}
// Add adds two matricies
func (m *Matrix) Add(a, b *Matrix) *Matrix {
asingular := len(a.Values) == 1 && len(a.Values[0]) == 1
bsingular := len(b.Values) == 1 && len(b.Values[0]) == 1
if asingular || bsingular {
if bsingular {
a, b = b, a
}
value, values := a.Values[0][0], [][]Rational{}
for _, b := range b.Values {
var row []Rational
for _, bb := range b {
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
row = append(row, *(ab.Add(&bb, &value)))
}
values = append(values, row)
}
m.Values = values
return m
}
values := [][]Rational{}
for i, b := range b.Values {
var row []Rational
for j, bb := range b {
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
row = append(row, *(ab.Add(&bb, &a.Values[i][j])))
}
values = append(values, row)
}
m.Values = values
return m
}
// Sub subtracts two matricies
func (m *Matrix) Sub(a, b *Matrix) *Matrix {
if len(a.Values) == 1 && len(a.Values[0]) == 1 &&
len(b.Values) == 1 && len(b.Values[0]) == 1 {
aa, bb, values := a.Values[0][0], b.Values[0][0], [][]Rational{}
var row []Rational
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
row = append(row, *(ab.Sub(&aa, &bb)))
values = append(values, row)
m.Values = values
return m
} else if len(a.Values) == 1 && len(a.Values[0]) == 1 {
value, values := a.Values[0][0], [][]Rational{}
for _, b := range b.Values {
var row []Rational
for _, bb := range b {
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
row = append(row, *(ab.Sub(&value, &bb)))
}
values = append(values, row)
}
m.Values = values
return m
} else if len(b.Values) == 1 && len(b.Values[0]) == 1 {
value, values := b.Values[0][0], [][]Rational{}
for _, a := range a.Values {
var row []Rational
for _, aa := range a {
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
row = append(row, *(ab.Sub(&aa, &value)))
}
values = append(values, row)
}
m.Values = values
return m
}
values := [][]Rational{}
for i, b := range b.Values {
var row []Rational
for j, bb := range b {
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
row = append(row, *(ab.Sub(&a.Values[i][j], &bb)))
}
values = append(values, row)
}
m.Values = values
return m
}
// Mul multiplies two matricies
func (m *Matrix) Mul(a, b *Matrix) *Matrix {
asingular := len(a.Values) == 1 && len(a.Values[0]) == 1
bsingular := len(b.Values) == 1 && len(b.Values[0]) == 1
if asingular || bsingular {
if bsingular {
a, b = b, a
}
value, values := a.Values[0][0], [][]Rational{}
for _, b := range b.Values {
var row []Rational
for _, bb := range b {
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
row = append(row, *(ab.Mul(&bb, &value)))
}
values = append(values, row)
}
m.Values = values
return m
}
values := [][]Rational{}
for x := 0; x < len(a.Values); x++ {
var row []Rational
for y := 0; y < len(b.Values[0]); y++ {
sum := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
for z := 0; z < len(b.Values); z++ {
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
ab.Mul(&a.Values[x][z], &b.Values[z][y])
sum.Add(sum, ab)
}
row = append(row, *sum)
}
values = append(values, row)
}
m.Values = values
return m
}
// Div divides two matricies
func (m *Matrix) Div(a, b *Matrix) *Matrix {
if len(a.Values) == 1 && len(a.Values[0]) == 1 &&
len(b.Values) == 1 && len(b.Values[0]) == 1 {
aa, bb, values := a.Values[0][0], b.Values[0][0], [][]Rational{}
var row []Rational
ab := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
row = append(row, *(ab.Div(&aa, &bb)))
values = append(values, row)
m.Values = values
return m
}
panic("can't divide non 1x1 matrices")
}
func (m *Matrix) apply(a *Matrix, function func(a *Rational) *Rational) *Matrix {
values := [][]Rational{}
for _, a := range a.Values {
var row []Rational
for _, aa := range a {
row = append(row, *(function(&aa)))
}
values = append(values, row)
}
m.Values = values
return m
}
func (m *Matrix) apply2(a *Matrix, y *Rational, function func(a *Rational, b *Rational) *Rational) *Matrix {
values := [][]Rational{}
for _, a := range a.Values {
var row []Rational
for _, aa := range a {
row = append(row, *(function(&aa, y)))
}
values = append(values, row)
}
m.Values = values
return m
}
// Abs computes the absolute value of the entries of the matrix
func (m *Matrix) Abs(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Abs(x).Rat(a)
return a
})
}
// Conj computes the complex conjugate of a
func (m *Matrix) Conj(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Conj(x).Rat(a)
return a
})
}
// Sqrt computes the square root of the matrix
func (m *Matrix) Sqrt(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Sqrt(x).Rat(a)
return a
})
}
// Atan2 computes atan2 of x
// https://en.wikipedia.org/wiki/Atan2
func (m *Matrix) Atan2(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Atan2(x).Rat(a)
return a
})
}
// Arg computes arg(x + yi) = tan-1(y/x)
// https://mathworld.wolfram.com/ComplexArgument.html
func (m *Matrix) Arg(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Arg(x).Rat(a)
return a
})
}
// Exp computes e^x for a complex number
// https://www.wolframalpha.com/input/?i=e%5E%28x+%2B+yi%29
func (m *Matrix) Exp(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Exp(x).Rat(a)
return a
})
}
// Cos computes cosine of a number
// https://www.wolframalpha.com/input/?i=cos%28x+%2B+yi%29
func (m *Matrix) Cos(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Cos(x).Rat(a)
return a
})
}
// Sin computes sine of a number
// https://www.wolframalpha.com/input/?i=sin%28x+%2B+yi%29
func (m *Matrix) Sin(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Sin(x).Rat(a)
return a
})
}
// Tan computes tangent of a number
// https://en.wikipedia.org/wiki/Trigonometric_functions
func (m *Matrix) Tan(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Tan(x).Rat(a)
return a
})
}
// Log computes the natural log of x
// https://en.wikipedia.org/wiki/Complex_logarithm
func (m *Matrix) Log(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
x.Log(x).Rat(a)
return a
})
}
// Pow computes x**y
// https://mathworld.wolfram.com/ComplexExponentiation.html
func (m *Matrix) Pow(x *Matrix, y *Rational) *Matrix {
return m.apply2(x, y, func(a, b *Rational) *Rational {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(a)
y := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
y.SetRat(b)
x.Pow(x, y).Rat(a)
return a
})
}
// Neg negates the rational
func (m *Matrix) Neg(a *Matrix) *Matrix {
return m.apply(a, func(a *Rational) *Rational {
a.Neg(a)
return a
})
}
func (m *Matrix) String() string {
if len(m.Values) == 1 && len(m.Values[0]) == 1 {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(&m.Values[0][0])
return x.String()
}
s, last := "[", len(m.Values)-1
for i, row := range m.Values {
lastColumn := len(row) - 1
for j, value := range row {
x := NewFloat(big.NewFloat(0).SetPrec(m.Prec), big.NewFloat(0).SetPrec(m.Prec))
x.SetRat(&value)
s += x.String()
if j < lastColumn {
s += " "
}
}
if i < last {
s += ";"
}
}
return s + "]"
}
// Float is an imaginary number
type Float struct {
A, B *big.Float
}
// NewFloat creates a new imaginary number
func NewFloat(a, b *big.Float) *Float {
return &Float{
A: a,
B: b,
}
}
// Abs computes the absolute value of a
func (f *Float) Abs(a *Float) *Float {
f.A.Mul(a.A, a.A)
f.B.Mul(a.B, a.B)
f.A = bigfloat.Sqrt(f.A.Add(f.A, f.B))
f.B = big.NewFloat(0)
return f
}
// Add add two imaginary numbers
func (f *Float) Add(a, b *Float) *Float {
f.A.Add(a.A, b.A)
f.B.Add(a.B, b.B)
return f
}
// Sub subtracts two imaginary numbers
func (f *Float) Sub(a, b *Float) *Float {
f.A.Sub(a.A, b.A)
f.B.Sub(a.B, b.B)
return f
}
// Mul multiples two imaginary numbers
func (f *Float) Mul(a, b *Float) *Float {
x1, x2, x3, x4 :=
big.NewFloat(0).SetPrec(f.A.Prec()), big.NewFloat(0).SetPrec(f.B.Prec()),
big.NewFloat(0).SetPrec(f.B.Prec()), big.NewFloat(0).SetPrec(f.A.Prec())
x1.Mul(a.A, b.A) // a*a
x2.Mul(a.A, b.B) // a*ib
x3.Mul(a.B, b.A) // ib*a
x4.Mul(a.B, b.B) // i^2 * b = -b
f.A.Add(x1, x4.Neg(x4))
f.B.Add(x2, x3)
return f
}
// Conj computes the complex conjugate of a
func (f *Float) Conj(a *Float) *Float {
f.A.Set(a.A)
f.B.Neg(a.B)
return f
}
// Div divides two imaginary numbers
func (f *Float) Div(a, b *Float) *Float {
c := NewFloat(big.NewFloat(0).SetPrec(f.A.Prec()), big.NewFloat(0).SetPrec(f.B.Prec()))
c.Conj(b)
x := NewFloat(a.A.Copy(a.A), a.B.Copy(a.B))
y := NewFloat(b.A.Copy(b.A), b.B.Copy(b.B))
x.Mul(x, c)
y.Mul(y, c)
f.A.Quo(x.A, y.A)
f.B.Quo(x.B, y.A)
return f
}
// Sqrt computes the square root of the complex number
// https://www.johndcook.com/blog/2020/06/09/complex-square-root/
func (f *Float) Sqrt(a *Float) *Float {
x := big.NewFloat(0).SetPrec(f.A.Prec())
y := big.NewFloat(0).SetPrec(f.B.Prec())
l := big.NewFloat(0).SetPrec(f.A.Prec())
x.Mul(a.A, a.A)
y.Mul(a.B, a.B)
l.Add(x, y)
l = bigfloat.Sqrt(l)
aa := big.NewFloat(0).SetPrec(f.A.Prec())
aa.Add(l, a.A)
aa.Quo(aa, big.NewFloat(2).SetPrec(f.A.Prec()))
aa = bigfloat.Sqrt(aa)
f.B.Sub(l, a.A)
f.B.Quo(f.B, big.NewFloat(2).SetPrec(f.B.Prec()))
f.B = bigfloat.Sqrt(f.B)
f.B.Mul(big.NewFloat(float64(a.B.Sign())).SetPrec(f.B.Prec()), f.B)
f.A = aa
return f
}
// Atan2 computes atan2 of x
// https://en.wikipedia.org/wiki/Atan2
func (f *Float) Atan2(x *Float) *Float {
a := x.A
b := x.B
if a.Cmp(big.NewFloat(0).SetPrec(a.Prec())) > 0 {
f.Arg(x)
} else if a.Cmp(big.NewFloat(0).SetPrec(a.Prec())) < 0 &&
b.Cmp(big.NewFloat(0).SetPrec(b.Prec())) >= 0 {
f.Arg(x)
f.A.Add(f.A, bigfloat.PI(a.Prec()))
} else if a.Cmp(big.NewFloat(0).SetPrec(a.Prec())) < 0 &&
b.Cmp(big.NewFloat(0).SetPrec(b.Prec())) < 0 {
f.Arg(x)
f.A.Sub(f.A, bigfloat.PI(f.A.Prec()))
} else {
f.Arg(x)
}
return f
}
// Arg computes arg(x + yi) = tan-1(y/x)
// https://mathworld.wolfram.com/ComplexArgument.html
func (f *Float) Arg(x *Float) *Float {
a := x.A
b := x.B
f.B = big.NewFloat(0).SetPrec(b.Prec())
if a.Cmp(big.NewFloat(0).SetPrec(a.Prec())) == 0 {
if b.Cmp(big.NewFloat(0).SetPrec(a.Prec())) < 0 {
f.A.Set(bigfloat.PI(a.Prec()))
f.A.Quo(f.A, big.NewFloat(2).SetPrec(a.Prec()))
f.A.Neg(f.A)
} else if b.Cmp(big.NewFloat(0).SetPrec(a.Prec())) == 0 {
f.A.SetInf(false)
} else {
f.A.Set(bigfloat.PI(a.Prec()))
f.A.Quo(f.A, big.NewFloat(2).SetPrec(a.Prec()))
}
return f
}
if a.Cmp(big.NewFloat(1).SetPrec(a.Prec())) == 0 &&
b.Cmp(big.NewFloat(0).SetPrec(b.Prec())) == 0 {
f.A.Set(big.NewFloat(0).SetPrec(a.Prec()))
} else if a.Cmp(big.NewFloat(1).SetPrec(a.Prec())) == 0 &&
b.Cmp(big.NewFloat(1).SetPrec(b.Prec())) == 0 {
f.A.Set(bigfloat.PI(a.Prec()))
f.A.Quo(f.A, big.NewFloat(4).SetPrec(a.Prec()))
} else if a.Cmp(big.NewFloat(0).SetPrec(a.Prec())) == 0 &&
b.Cmp(big.NewFloat(1).SetPrec(b.Prec())) == 0 {
f.A.Set(bigfloat.PI(b.Prec()))
f.A.Quo(f.A, big.NewFloat(2).SetPrec(b.Prec()))
} else if a.Cmp(big.NewFloat(-1).SetPrec(a.Prec())) == 0 &&
b.Cmp(big.NewFloat(0).SetPrec(b.Prec())) == 0 {
f.A.Set(bigfloat.PI(a.Prec()))
} else if a.Cmp(big.NewFloat(0).SetPrec(a.Prec())) == 0 &&
b.Cmp(big.NewFloat(-1).SetPrec(b.Prec())) == 0 {
f.A.Set(bigfloat.PI(b.Prec()))
f.A.Quo(f.A, big.NewFloat(2).SetPrec(b.Prec()))
f.A.Neg(f.A)
} else {
f.A.Quo(b, a)
f.A = bigfloat.Arctan(f.A)
}
return f
}
// Exp computes e^x for a complex number
// https://www.wolframalpha.com/input/?i=e%5E%28x+%2B+yi%29
func (f *Float) Exp(x *Float) *Float {
exp := bigfloat.Exp(x.A)
cos := bigfloat.Cos(x.B)
sin := bigfloat.Sin(x.B)
f.A.Mul(exp, cos)
f.B.Mul(exp, sin)
return f
}
// Cos computes cosine of a number
// https://www.wolframalpha.com/input/?i=cos%28x+%2B+yi%29
func (f *Float) Cos(x *Float) *Float {
y1 := big.NewFloat(0).SetPrec(x.B.Prec())
y1.Set(x.B)
y1.Neg(y1)
x1 := big.NewFloat(0).SetPrec(x.A.Prec())
x1.Set(x.A)
a := NewFloat(y1, x1)
y2 := big.NewFloat(0).SetPrec(x.B.Prec())
y2.Set(x.B)
x2 := big.NewFloat(0).SetPrec(x.A.Prec())
x2.Set(x.A)
x2.Neg(x2)
b := NewFloat(y2, x2)
a.Exp(a)
b.Exp(b)
a.Add(a, b)
x3 := NewFloat(big.NewFloat(.5).SetPrec(x.A.Prec()), big.NewFloat(0).SetPrec(x.B.Prec()))
f.Mul(a, x3)
return f
}
// Sin computes sine of a number
// https://www.wolframalpha.com/input/?i=sin%28x+%2B+yi%29
func (f *Float) Sin(x *Float) *Float {
y1 := big.NewFloat(0).SetPrec(x.B.Prec())
y1.Set(x.B)
x1 := big.NewFloat(0).SetPrec(x.A.Prec())
x1.Set(x.A)
x1.Neg(x1)
a := NewFloat(y1, x1)
y2 := big.NewFloat(0).SetPrec(x.B.Prec())
y2.Set(x.B)
y2.Neg(y2)
x2 := big.NewFloat(0).SetPrec(x.A.Prec())
x2.Set(x.A)
b := NewFloat(y2, x2)
a.Exp(a)
b.Exp(b)
a.Sub(a, b)
x3 := NewFloat(big.NewFloat(0).SetPrec(x.A.Prec()), big.NewFloat(.5).SetPrec(x.B.Prec()))
f.Mul(a, x3)
return f
}
// Tan computes tangent of a number
// https://en.wikipedia.org/wiki/Trigonometric_functions
func (f *Float) Tan(x *Float) *Float {
a := big.NewFloat(0).SetPrec(x.A.Prec())
a.Set(x.A)
b := big.NewFloat(0).SetPrec(x.B.Prec())
b.Set(x.B)
y := NewFloat(a, b)
x.Sin(x)
y.Cos(y)
f.Div(x, y)
return f
}
// Log computes the natural log of x
// https://en.wikipedia.org/wiki/Complex_logarithm
func (f *Float) Log(x *Float) *Float {
a := x.A
aa := big.NewFloat(0).SetPrec(a.Prec())
aa.Mul(a, a)
b := x.B
bb := big.NewFloat(0).SetPrec(b.Prec())
bb.Mul(b, b)
real := big.NewFloat(0).SetPrec(a.Prec())
real.Add(aa, bb)
real = bigfloat.Log(bigfloat.Sqrt(real))
y := NewFloat(big.NewFloat(0).SetPrec(a.Prec()), big.NewFloat(0).SetPrec(b.Prec()))
y.Atan2(x)
f.A = real
f.B = y.A
return f
}
// Pow computes x**y
// https://mathworld.wolfram.com/ComplexExponentiation.html
func (f *Float) Pow(x *Float, y *Float) *Float {
if x.A.Cmp(big.NewFloat(0).SetPrec(x.A.Prec())) == 0 &&
x.B.Cmp(big.NewFloat(0).SetPrec(x.B.Prec())) == 0 &&
y.A.Cmp(big.NewFloat(0).SetPrec(y.A.Prec())) == 0 &&
y.B.Cmp(big.NewFloat(0).SetPrec(y.B.Prec())) == 0 {
f.A.SetInf(false)
return f
}
a := big.NewFloat(0).SetPrec(x.A.Prec())
a.Set(x.A)
aa := big.NewFloat(0).SetPrec(x.A.Prec())
aa.Mul(a, a)
b := big.NewFloat(0).SetPrec(x.B.Prec())
b.Set(x.B)
bb := big.NewFloat(0).SetPrec(x.B.Prec())
bb.Mul(b, b)
sum := big.NewFloat(0).SetPrec(x.A.Prec())
sum.Add(aa, bb)
c := big.NewFloat(0).SetPrec(y.A.Prec())
c.Set(y.A)
cc := big.NewFloat(0).SetPrec(c.Prec())
cc.Quo(c, big.NewFloat(2).SetPrec(c.Prec()))
e := bigfloat.Pow(sum, cc)
d := big.NewFloat(0).SetPrec(y.B.Prec())
d.Set(y.B)
arg := NewFloat(big.NewFloat(0).SetPrec(a.Prec()),
big.NewFloat(0).SetPrec(b.Prec()))
arg.Arg(x)
exp := big.NewFloat(0).SetPrec(arg.A.Prec())
exp.Mul(d, arg.A)
exp.Neg(exp)
exp = bigfloat.Exp(exp)
e.Mul(e, exp)
i := big.NewFloat(0).SetPrec(c.Prec())
i.Mul(c, arg.A)
j := big.NewFloat(0).SetPrec(d.Prec())
j.Mul(d, bigfloat.Log(sum))
j.Quo(j, big.NewFloat(2).SetPrec(d.Prec()))
i.Add(i, j)
cos := bigfloat.Cos(i)
cos.Mul(e, cos)
f.A.Set(cos)
sin := bigfloat.Sin(i)
sin.Mul(e, sin)
f.B.Set(sin)
return f
}
// SetRat sets the value to a rational
func (f *Float) SetRat(r *Rational) {
f.A.SetRat(r.A)
f.B.SetRat(r.B)
}
// Rat stores the float in a rational
func (f *Float) Rat(r *Rational) {
f.A.Rat(r.A)
f.B.Rat(r.B)
}
// String returns a string of the imaginary number
func (f *Float) String() string {
if f.B.Cmp(big.NewFloat(0)) == 0 {
return f.A.String()
}
return f.A.String() + " + " + f.B.String() + "i"
}
// Rational is an imaginary number
type Rational struct {
A, B *big.Rat
}
// NewRational creates a new imaginary number
func NewRational(a, b *big.Rat) *Rational {
return &Rational{
A: a,
B: b,
}
}
// Add add two imaginary numbers
func (r *Rational) Add(a, b *Rational) *Rational {
r.A.Add(a.A, b.A)
r.B.Add(a.B, b.B)
return r
}
// Sub subtracts two imaginary numbers
func (r *Rational) Sub(a, b *Rational) *Rational {
r.A.Sub(a.A, b.A)
r.B.Sub(a.B, b.B)
return r
}
// Mul multiples two imaginary numbers
func (r *Rational) Mul(a, b *Rational) *Rational {
x1, x2, x3, x4 :=
big.NewRat(0, 1), big.NewRat(0, 1),
big.NewRat(0, 1), big.NewRat(0, 1)
x1.Mul(a.A, b.A) // a*a
x2.Mul(a.A, b.B) // a*ib
x3.Mul(a.B, b.A) // ib*a
x4.Mul(a.B, b.B) // i^2 * b = -b
r.A.Add(x1, x4.Neg(x4))
r.B.Add(x2, x3)
return r
}
// Conj computes the complex conjugate of a
func (r *Rational) Conj(a *Rational) *Rational {
r.A.Set(a.A)
r.B.Neg(a.B)
return r
}
// Div divides two imaginary numbers
func (r *Rational) Div(a, b *Rational) *Rational {
c := NewRational(big.NewRat(0, 1), big.NewRat(0, 1))
c.Conj(b)
x := NewRational(a.A.Set(a.A), a.B.Set(a.B))
y := NewRational(b.A.Set(b.A), b.B.Set(b.B))
x.Mul(x, c)
y.Mul(y, c)
r.A.Quo(x.A, y.A)
r.B.Quo(x.B, y.A)
return r
}
// Neg negates the rational
func (r *Rational) Neg(a *Rational) *Rational {
r.A.Neg(a.A)
r.B.Neg(a.B)
return r
}
// String returns a string of the imaginary number
func (r *Rational) String() string {
return r.A.String() + " + " + r.B.String() + "i"
} | complex.go | 0.748444 | 0.653652 | complex.go | starcoder |
package sk
import "github.com/ContextLogic/cldr"
var calendar = cldr.Calendar{
Formats: cldr.CalendarFormats{
Date: cldr.CalendarDateFormat{Full: "EEEE, d. MMMM y", Long: "d. MMMM y", Medium: "d. M. y", Short: "dd.MM.yy"},
Time: cldr.CalendarDateFormat{Full: "H:mm:ss zzzz", Long: "H:mm:ss z", Medium: "H:mm:ss", Short: "H:mm"},
DateTime: cldr.CalendarDateFormat{Full: "{1}, {0}", Long: "{1}, {0}", Medium: "{1}, {0}", Short: "{1} {0}"},
},
FormatNames: cldr.CalendarFormatNames{
Months: cldr.CalendarMonthFormatNames{
Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "jan", Feb: "feb", Mar: "mar", Apr: "apr", May: "máj", Jun: "jún", Jul: "júl", Aug: "aug", Sep: "sep", Oct: "okt", Nov: "nov", Dec: "dec"},
Narrow: cldr.CalendarMonthFormatNameValue{Jan: "j", Feb: "f", Mar: "m", Apr: "a", May: "m", Jun: "j", Jul: "j", Aug: "a", Sep: "s", Oct: "o", Nov: "n", Dec: "d"},
Short: cldr.CalendarMonthFormatNameValue{},
Wide: cldr.CalendarMonthFormatNameValue{Jan: "január", Feb: "február", Mar: "marec", Apr: "apríl", May: "máj", Jun: "jún", Jul: "júl", Aug: "august", Sep: "september", Oct: "október", Nov: "november", Dec: "december"},
},
Days: cldr.CalendarDayFormatNames{
Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "ne", Mon: "po", Tue: "ut", Wed: "st", Thu: "št", Fri: "pi", Sat: "so"},
Narrow: cldr.CalendarDayFormatNameValue{Sun: "N", Mon: "P", Tue: "U", Wed: "S", Thu: "Š", Fri: "P", Sat: "S"},
Short: cldr.CalendarDayFormatNameValue{Sun: "Ne", Mon: "Po", Tue: "Ut", Wed: "St", Thu: "Št", Fri: "Pi", Sat: "So"},
Wide: cldr.CalendarDayFormatNameValue{Sun: "nedeľa", Mon: "pondelok", Tue: "utorok", Wed: "streda", Thu: "štvrtok", Fri: "piatok", Sat: "sobota"},
},
Periods: cldr.CalendarPeriodFormatNames{
Abbreviated: cldr.CalendarPeriodFormatNameValue{},
Narrow: cldr.CalendarPeriodFormatNameValue{AM: "dop.", PM: "odp."},
Short: cldr.CalendarPeriodFormatNameValue{},
Wide: cldr.CalendarPeriodFormatNameValue{AM: "dopoludnia", PM: "odpoludnia"},
},
},
} | resources/locales/sk/calendar.go | 0.50415 | 0.42674 | calendar.go | starcoder |
package femto
import (
"time"
dmp "github.com/sergi/go-diff/diffmatchpatch"
)
const (
// Opposite and undoing events must have opposite values
// TextEventInsert represents an insertion event
TextEventInsert = 1
// TextEventRemove represents a deletion event
TextEventRemove = -1
// TextEventReplace represents a replace event
TextEventReplace = 0
undoThreshold = 500 // If two events are less than n milliseconds apart, undo both of them
)
// TextEvent holds data for a manipulation on some text that can be undone
type TextEvent struct {
C Cursor
EventType int
Deltas []Delta
Time time.Time
}
// A Delta is a change to the buffer
type Delta struct {
Text string
Start Loc
End Loc
}
// ExecuteTextEvent runs a text event
func ExecuteTextEvent(t *TextEvent, buf *Buffer) {
if t.EventType == TextEventInsert {
for _, d := range t.Deltas {
buf.insert(d.Start, []byte(d.Text))
}
} else if t.EventType == TextEventRemove {
for i, d := range t.Deltas {
t.Deltas[i].Text = buf.remove(d.Start, d.End)
}
} else if t.EventType == TextEventReplace {
for i, d := range t.Deltas {
t.Deltas[i].Text = buf.remove(d.Start, d.End)
buf.insert(d.Start, []byte(d.Text))
t.Deltas[i].Start = d.Start
t.Deltas[i].End = Loc{d.Start.X + Count(d.Text), d.Start.Y}
}
for i, j := 0, len(t.Deltas)-1; i < j; i, j = i+1, j-1 {
t.Deltas[i], t.Deltas[j] = t.Deltas[j], t.Deltas[i]
}
}
}
// UndoTextEvent undoes a text event
func UndoTextEvent(t *TextEvent, buf *Buffer) {
t.EventType = -t.EventType
ExecuteTextEvent(t, buf)
}
// EventHandler executes text manipulations and allows undoing and redoing
type EventHandler struct {
buf *Buffer
UndoStack *Stack
RedoStack *Stack
}
// NewEventHandler returns a new EventHandler
func NewEventHandler(buf *Buffer) *EventHandler {
eh := new(EventHandler)
eh.UndoStack = new(Stack)
eh.RedoStack = new(Stack)
eh.buf = buf
return eh
}
// ApplyDiff takes a string and runs the necessary insertion and deletion events to make
// the buffer equal to that string
// This means that we can transform the buffer into any string and still preserve undo/redo
// through insert and delete events
func (eh *EventHandler) ApplyDiff(new string) {
differ := dmp.New()
diff := differ.DiffMain(eh.buf.String(), new, false)
loc := eh.buf.Start()
for _, d := range diff {
if d.Type == dmp.DiffDelete {
eh.Remove(loc, loc.Move(Count(d.Text), eh.buf))
} else {
if d.Type == dmp.DiffInsert {
eh.Insert(loc, d.Text)
}
loc = loc.Move(Count(d.Text), eh.buf)
}
}
}
// Insert creates an insert text event and executes it
func (eh *EventHandler) Insert(start Loc, text string) {
e := &TextEvent{
C: *eh.buf.cursors[eh.buf.curCursor],
EventType: TextEventInsert,
Deltas: []Delta{{text, start, Loc{0, 0}}},
Time: time.Now(),
}
eh.Execute(e)
charCount := Count(text)
e.Deltas[0].End = start.Move(charCount, eh.buf)
end := e.Deltas[0].End
for _, c := range eh.buf.cursors {
move := func(loc Loc) Loc {
if start.Y != end.Y && loc.GreaterThan(start) {
loc.Y += end.Y - start.Y
} else if loc.Y == start.Y && loc.GreaterEqual(start) {
loc = loc.Move(charCount, eh.buf)
}
return loc
}
c.Loc = move(c.Loc)
c.CurSelection[0] = move(c.CurSelection[0])
c.CurSelection[1] = move(c.CurSelection[1])
c.OrigSelection[0] = move(c.OrigSelection[0])
c.OrigSelection[1] = move(c.OrigSelection[1])
c.LastVisualX = c.GetVisualX()
}
}
// Remove creates a remove text event and executes it
func (eh *EventHandler) Remove(start, end Loc) {
e := &TextEvent{
C: *eh.buf.cursors[eh.buf.curCursor],
EventType: TextEventRemove,
Deltas: []Delta{{"", start, end}},
Time: time.Now(),
}
eh.Execute(e)
for _, c := range eh.buf.cursors {
move := func(loc Loc) Loc {
if start.Y != end.Y && loc.GreaterThan(end) {
loc.Y -= end.Y - start.Y
} else if loc.Y == end.Y && loc.GreaterEqual(end) {
loc = loc.Move(-Diff(start, end, eh.buf), eh.buf)
}
return loc
}
c.Loc = move(c.Loc)
c.CurSelection[0] = move(c.CurSelection[0])
c.CurSelection[1] = move(c.CurSelection[1])
c.OrigSelection[0] = move(c.OrigSelection[0])
c.OrigSelection[1] = move(c.OrigSelection[1])
c.LastVisualX = c.GetVisualX()
}
}
// MultipleReplace creates an multiple insertions executes them
func (eh *EventHandler) MultipleReplace(deltas []Delta) {
e := &TextEvent{
C: *eh.buf.cursors[eh.buf.curCursor],
EventType: TextEventReplace,
Deltas: deltas,
Time: time.Now(),
}
eh.Execute(e)
}
// Replace deletes from start to end and replaces it with the given string
func (eh *EventHandler) Replace(start, end Loc, replace string) {
eh.Remove(start, end)
eh.Insert(start, replace)
}
// Execute a textevent and add it to the undo stack
func (eh *EventHandler) Execute(t *TextEvent) {
if eh.RedoStack.Len() > 0 {
eh.RedoStack = new(Stack)
}
eh.UndoStack.Push(t)
ExecuteTextEvent(t, eh.buf)
}
// Undo the first event in the undo stack
func (eh *EventHandler) Undo() {
t := eh.UndoStack.Peek()
if t == nil {
return
}
startTime := t.Time.UnixNano() / int64(time.Millisecond)
eh.UndoOneEvent()
for {
t = eh.UndoStack.Peek()
if t == nil {
return
}
if startTime-(t.Time.UnixNano()/int64(time.Millisecond)) > undoThreshold {
return
}
startTime = t.Time.UnixNano() / int64(time.Millisecond)
eh.UndoOneEvent()
}
}
// UndoOneEvent undoes one event
func (eh *EventHandler) UndoOneEvent() {
// This event should be undone
// Pop it off the stack
t := eh.UndoStack.Pop()
if t == nil {
return
}
// Undo it
// Modifies the text event
UndoTextEvent(t, eh.buf)
// Set the cursor in the right place
teCursor := t.C
if teCursor.Num >= 0 && teCursor.Num < len(eh.buf.cursors) {
t.C = *eh.buf.cursors[teCursor.Num]
eh.buf.cursors[teCursor.Num].Goto(teCursor)
} else {
teCursor.Num = -1
}
// Push it to the redo stack
eh.RedoStack.Push(t)
}
// Redo the first event in the redo stack
func (eh *EventHandler) Redo() {
t := eh.RedoStack.Peek()
if t == nil {
return
}
startTime := t.Time.UnixNano() / int64(time.Millisecond)
eh.RedoOneEvent()
for {
t = eh.RedoStack.Peek()
if t == nil {
return
}
if (t.Time.UnixNano()/int64(time.Millisecond))-startTime > undoThreshold {
return
}
eh.RedoOneEvent()
}
}
// RedoOneEvent redoes one event
func (eh *EventHandler) RedoOneEvent() {
t := eh.RedoStack.Pop()
if t == nil {
return
}
// Modifies the text event
UndoTextEvent(t, eh.buf)
teCursor := t.C
if teCursor.Num >= 0 && teCursor.Num < len(eh.buf.cursors) {
t.C = *eh.buf.cursors[teCursor.Num]
eh.buf.cursors[teCursor.Num].Goto(teCursor)
} else {
teCursor.Num = -1
}
eh.UndoStack.Push(t)
} | femto/eventhandler.go | 0.601945 | 0.403508 | eventhandler.go | starcoder |
package report
import (
"github.com/vitessio/arewefastyet/go/storage"
"strconv"
"strings"
"github.com/vitessio/arewefastyet/go/storage/influxdb"
"github.com/vitessio/arewefastyet/go/tools/microbench"
"github.com/jung-kurt/gofpdf"
"github.com/vitessio/arewefastyet/go/tools/git"
"github.com/vitessio/arewefastyet/go/tools/macrobench"
)
// pdfMargin is the margin that is used while creating the pdf. It is used as the left, right, top and bottom margin.
const pdfMargin = 15.0
// cellStyle is a struct that has the elements used for styling a cell in gofpdf
type cellStyle struct {
textCol [3]int // RGB colors of the text
fillCol [3]int // RGB colors of the background
borderStr string // border style string
alignStr string // alignment style string
}
// cellStyles contains the different cell styles that we use in the pdf
var cellStyles = []cellStyle{
{
textCol: [3]int{224, 224, 224},
fillCol: [3]int{64, 64, 64},
borderStr: "1",
alignStr: "CM",
}, {
textCol: [3]int{24, 24, 24},
fillCol: [3]int{255, 255, 255},
borderStr: "",
alignStr: "C",
}, {
textCol: [3]int{255, 255, 255},
fillCol: [3]int{0, 0, 0},
borderStr: "1",
alignStr: "CM",
},
}
// tableCell contains the string value along with the styling index to use
type tableCell struct {
value string
// styleIndex is the index of cellStyles to use for styling
styleIndex int
// linkUrl if non empty, will make the cell a clickable link to the given URL
linkUrl string
}
// GenerateCompareReport is used to generate a comparison report between the 2 SHAs provided. It uses the client connection
// to read the results. It also takes as an argument the name of the report that will be generated
func GenerateCompareReport(client storage.SQLClient, metricsClient *influxdb.Client, fromSHA, toSHA, reportFile string) error {
// Compare macrobenchmark results for the 2 SHAs
macrosMatrices, err := macrobench.CompareMacroBenchmarks(client, metricsClient, fromSHA, toSHA, macrobench.V3Planner)
if err != nil {
return err
}
// Compare microbenchmark results for the 2 SHAs
microsMatrix, err := microbench.Compare(client, fromSHA, toSHA)
if err != nil {
return err
}
// Create a new pdf and set the margins
pdf := gofpdf.New(gofpdf.OrientationPortrait, "mm", "A4", "")
pdf.SetMargins(pdfMargin, pdfMargin, pdfMargin)
// Add a page to start
pdf.AddPage()
pdf.SetAutoPageBreak(true, pdfMargin)
// Set the font for the title
pdf.SetFont("Arial", "B", 28)
// Print the title
pdf.WriteAligned(0, 10, "Comparison Results", "C")
pdf.Ln(-1)
pdf.Ln(2)
if len(macrosMatrices) != 0 {
// Print the subtitle
writeSubtitle(pdf, "Macro-benchmarks")
macroTable := [][]tableCell{
{
tableCell{value: "Metric", styleIndex: 2},
tableCell{value: git.ShortenSHA(fromSHA), styleIndex: 2, linkUrl: "https://github.com/vitessio/vitess/tree/" + fromSHA + "/"},
tableCell{value: git.ShortenSHA(toSHA), styleIndex: 2, linkUrl: "https://github.com/vitessio/vitess/tree/" + toSHA + "/"},
},
}
// range over all the macrobenchmarks
for key, value := range macrosMatrices {
// the map stores the comparisonArrays
macroCompArr := value.(macrobench.ComparisonArray)
if len(macroCompArr) > 0 {
macroComp := macroCompArr[0]
macroTable = append(macroTable, []tableCell{{value: strings.ToUpper(key.String()), styleIndex: 2}})
macroTable = append(macroTable, []tableCell{{value: "TPS", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.TPS), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.TPS), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "QPS Reads", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.QPS.Reads), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.QPS.Reads), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "QPS Writes", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.QPS.Writes), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.QPS.Writes), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "QPS Total", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.QPS.Total), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.QPS.Total), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "QPS Others", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.QPS.Other), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.QPS.Other), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "Latency", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.Latency), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.Latency), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "Errors", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.Errors), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.Errors), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "Reconnects", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.Reconnects), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.Reconnects), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "Time", styleIndex: 0}, {value: strconv.Itoa(macroComp.Reference.Result.Time), styleIndex: 1}, {value: strconv.Itoa(macroComp.Compare.Result.Time), styleIndex: 1}})
macroTable = append(macroTable, []tableCell{{value: "Threads", styleIndex: 0}, {value: convertFloatToString(macroComp.Reference.Result.Threads), styleIndex: 1}, {value: convertFloatToString(macroComp.Compare.Result.Threads), styleIndex: 1}})
}
}
// write the table to pdf
writeTableToPdf(pdf, macroTable)
pdf.AddPage()
}
if len(microsMatrix) != 0 {
// Print the subtitle
writeSubtitle(pdf, "Micro-benchmarks")
microTable := [][]tableCell{
{
tableCell{value: "Metric", styleIndex: 2},
tableCell{value: git.ShortenSHA(fromSHA), styleIndex: 2, linkUrl: "https://github.com/vitessio/vitess/tree/" + fromSHA + "/"},
tableCell{value: git.ShortenSHA(toSHA), styleIndex: 2, linkUrl: "https://github.com/vitessio/vitess/tree/" + toSHA + "/"},
},
}
// range over all the microbenchmarks
for _, microComp := range microsMatrix {
microTable = append(microTable, []tableCell{{value: microComp.PkgName + "." + microComp.SubBenchmarkName, styleIndex: 2}})
microTable = append(microTable, []tableCell{{value: "Ops", styleIndex: 0}, {value: convertFloatToString(microComp.Current.Ops), styleIndex: 1}, {value: convertFloatToString(microComp.Last.Ops), styleIndex: 1}})
microTable = append(microTable, []tableCell{{value: "NSPerOp", styleIndex: 0}, {value: convertFloatToString(microComp.Current.NSPerOp), styleIndex: 1}, {value: convertFloatToString(microComp.Last.NSPerOp), styleIndex: 1}})
microTable = append(microTable, []tableCell{{value: "MBPerSec", styleIndex: 0}, {value: convertFloatToString(microComp.Current.MBPerSec), styleIndex: 1}, {value: convertFloatToString(microComp.Last.MBPerSec), styleIndex: 1}})
microTable = append(microTable, []tableCell{{value: "BytesPerOp", styleIndex: 0}, {value: convertFloatToString(microComp.Current.BytesPerOp), styleIndex: 1}, {value: convertFloatToString(microComp.Last.BytesPerOp), styleIndex: 1}})
microTable = append(microTable, []tableCell{{value: "AllocsPerOp", styleIndex: 0}, {value: convertFloatToString(microComp.Current.AllocsPerOp), styleIndex: 1}, {value: convertFloatToString(microComp.Last.AllocsPerOp), styleIndex: 1}})
microTable = append(microTable, []tableCell{{value: "Ratio of NSPerOp", styleIndex: 0}, {value: convertFloatToString(microComp.Diff.NSPerOp), styleIndex: 1}})
}
// write the table to pdf
writeTableToPdf(pdf, microTable)
}
err = pdf.OutputFileAndClose(reportFile)
return err
}
// convertFloatToString converts the float input into a string so that it can be printed
func convertFloatToString(in float64) string {
return strconv.FormatFloat(in, 'f', -1, 64)
}
// writeSubtitle is used to write a subtitle in the pdf provided
func writeSubtitle(pdf *gofpdf.Fpdf, subtitle string) {
pdf.SetFont("Arial", "B", 20)
pdf.WriteAligned(0, 10, subtitle, "C")
pdf.Ln(-1)
pdf.Ln(2)
}
// writeTableToPdf is used to write a table to the pdf provided
func writeTableToPdf(pdf *gofpdf.Fpdf, table [][]tableCell) {
pageWidth, pageHeight := pdf.GetPageSize()
lineHt := 5.5
cellGap := 2.0
type cellType struct {
str string
list [][]byte
ht float64
}
var (
cellList []cellType
cell cellType
)
pdf.SetFont("Arial", "", 14)
// Rows
y := pdf.GetY()
for rowJ := 0; rowJ < len(table); rowJ++ {
cellList = nil
colCount := len(table[rowJ])
colWd := (pageWidth - pdfMargin - pdfMargin) / float64(colCount)
maxHt := lineHt
// Cell height calculation loop
// required because a cell might span multiple lines but then the entire row must span that many lines
for colJ := 0; colJ < colCount; colJ++ {
cell.str = table[rowJ][colJ].value
cell.list = pdf.SplitLines([]byte(cell.str), colWd-cellGap-cellGap)
cell.ht = float64(len(cell.list)) * lineHt
if cell.ht > maxHt {
maxHt = cell.ht
}
cellList = append(cellList, cell)
}
// Cell render loop
x := pdfMargin
for colJ := 0; colJ < len(cellList); colJ++ {
if y+maxHt+cellGap+cellGap > pageHeight-pdfMargin-pdfMargin {
pdf.AddPage()
y = pdf.GetY()
}
pdf.Rect(x, y, colWd, maxHt+cellGap+cellGap, "D")
cell = cellList[colJ]
cellY := y + cellGap + (maxHt-cell.ht)/2
// Get the styling from the index
styling := cellStyles[table[rowJ][colJ].styleIndex]
// Use it to set the colours
pdf.SetTextColor(styling.textCol[0], styling.textCol[1], styling.textCol[2])
pdf.SetFillColor(styling.fillCol[0], styling.fillCol[1], styling.fillCol[2])
for splitJ := 0; splitJ < len(cell.list); splitJ++ {
pdf.SetXY(x+cellGap, cellY)
pdf.CellFormat(colWd-cellGap-cellGap, lineHt, string(cell.list[splitJ]), styling.borderStr, 0,
styling.alignStr, true, 0, table[rowJ][colJ].linkUrl)
cellY += lineHt
}
x += colWd
}
y += maxHt + cellGap + cellGap
}
pdf.Ln(-1)
pdf.Ln(4)
} | go/tools/report/compare_report.go | 0.609873 | 0.416203 | compare_report.go | starcoder |
package molecule
import (
"errors"
"regexp"
"sort"
"strconv"
"github.com/524D/galms/elements"
)
// This package parses chemical formula
// AtomsCount contains an atom index and atom count
type AtomsCount struct {
idx int // Element index
count int // Number of atoms of this element
}
// Molecule represents a single molecule
type Molecule struct {
atoms []AtomsCount
}
// ErrUnknownAACode Single letter AA code unknown
var ErrUnknownAACode = errors.New("Unknown amino acid code")
// Atoms returns the atoms/count of a molecule
func (m *Molecule) Atoms() []AtomsCount {
return m.atoms
}
// IdxCount retruns the index and count of an atom in a molecule
func (ac *AtomsCount) IdxCount() (int, int) {
return ac.idx, ac.count
}
// SimpleFormula converts a chemical formula to a structure that contains elements/atom counts
func SimpleFormula(f string, e *elements.Elems) (Molecule, error) {
var m Molecule
// Map previously encountered element index into position in 'atoms'
prevEl := make(map[int]int)
re := regexp.MustCompile(`([A-Z][a-z]?)([0-9]*)`)
mts := re.FindAllStringSubmatch(f, -1)
for _, em := range mts {
var elCount AtomsCount
var err error
// em[1] holds the elements string
elCount.idx, err = e.ElemIdx(em[1])
if err != nil {
return m, err
}
if em[2] == `` {
elCount.count = 1
} else {
elCount.count, err = strconv.Atoi(em[2])
if err != nil {
return m, err
}
}
// If we already encountered this element previously,
// just add the atom count
if i, ok := prevEl[elCount.idx]; ok {
m.atoms[i].count += elCount.count
} else {
prevEl[elCount.idx] = len(m.atoms)
m.atoms = append(m.atoms, elCount)
}
}
// Sort atoms in element index
sort.Slice(m.atoms, func(i, j int) bool { return m.atoms[i].idx < m.atoms[j].idx })
return m, nil
}
// ChemicalFormula a molecule to a string
func ChemicalFormula(m Molecule, e *elements.Elems) (string, error) {
var f string
for _, a := range m.atoms {
// Ignore elements with zero (or negative) count
if a.count > 0 {
symbol, err := e.Symbol(a.idx)
if err != nil {
return ``, err
}
f += symbol
if a.count > 1 {
f += strconv.Itoa(a.count)
}
}
}
return f, nil
}
// AminoAcid returns the molecule (minus H2O) for a single letter amino acid code
func AminoAcid(aa byte) (Molecule, error) {
var m Molecule
if aaMol[aa].atoms == nil {
return m, ErrUnknownAACode
}
return aaMol[aa], nil
}
// PepProt returns the molecule peptide or protein
func PepProt(p string) (Molecule, error) {
var m Molecule
for _, aa := range p {
if aa >= 'A' && aa <= 'Z' && aaMol[aa].atoms != nil {
m = Add(m, aaMol[aa])
} else {
return Molecule{}, ErrUnknownAACode
}
}
m = Add(m, water)
return m, nil
}
// Add two molecules. The atoms in the molecules must be sorted by atom index
func Add(m1 Molecule, m2 Molecule) Molecule {
var m Molecule
// Make sure 2 have enough room even if both molecules contain
// completely different atoms
m.atoms = make([]AtomsCount, len(m1.atoms), len(m1.atoms)+len(m2.atoms))
copy(m.atoms, m1.atoms)
mi := 0
for i, a := range m2.atoms {
for mi < len(m.atoms) && m.atoms[mi].idx < a.idx {
mi++
}
// If we are at the end of the first molecules atoms, append the rest of the
// second one, and we are done
if mi >= len(m.atoms) {
m.atoms = append(m.atoms, m2.atoms[i:]...)
return m
}
// If same atom index, add the count of this atom
if m.atoms[mi].idx == a.idx {
m.atoms[mi].count += a.count
} else {
// m2 contains an atom index that was not in m, insert the atom
// This is 'expensive', but normally doesn't happen often
// Extend m.atoms by one (last element is overwritten later)
m.atoms = append(m.atoms, a)
copy(m.atoms[mi+1:], m.atoms[mi:])
m.atoms[mi] = a
}
}
return m
}
// Conversion table for translating amino acids to molecules
// Initialized by 'initAA'
var aaMol [256]Molecule
var water Molecule
type aaForm struct {
code byte
formula string
}
// InitCommonMolecules initializes some common molecules
// It must be called after (re-)initializing elements.Elems
func InitCommonMolecules(e *elements.Elems) {
// Set up amino acid translation table
aaList := []aaForm{
{code: 'A', formula: `C3H5NO`},
{code: 'C', formula: `C3H5NOS`},
{code: 'D', formula: `C4H5NO3`},
{code: 'E', formula: `C5H7NO3`},
{code: 'F', formula: `C9H9NO`},
{code: 'G', formula: `C2H3NO`},
{code: 'H', formula: `C6H7N3O`},
{code: 'I', formula: `C6H11NO`},
{code: 'K', formula: `C6H12N2O`},
{code: 'L', formula: `C6H11NO`},
{code: 'M', formula: `C5H9NOS`},
{code: 'N', formula: `C4H6N2O2`},
{code: 'O', formula: `C5H7NO2`},
{code: 'P', formula: `C5H7NO`},
{code: 'Q', formula: `C5H8N2O2`},
{code: 'R', formula: `C6H12N4O`},
{code: 'S', formula: `C3H5NO2`},
{code: 'T', formula: `C4H7NO2`},
{code: 'U', formula: `C5H5NO2`},
{code: 'V', formula: `C5H9NO`},
{code: 'W', formula: `C11H10N2O`},
{code: 'Y', formula: `C9H9NO2`},
}
for i := range aaMol {
aaMol[i].atoms = nil
}
for _, a := range aaList {
aaMol[a.code], _ = SimpleFormula(a.formula, e)
}
water, _ = SimpleFormula(`H2O`, e)
} | molecule/molecule.go | 0.759939 | 0.619788 | molecule.go | starcoder |
package util
// Traversal defines a basic interface to perform traversals.
type Traversal interface {
// Edges should return the neighbours of node "u".
Edges(u T) []T
// Visited should return true if node "u" has already been visited in this
// traversal. If the same traversal is used multiple times, the state that
// tracks visited nodes should be reset.
Visited(u T) bool
}
// Equals should return true if node "u" equals node "v".
type Equals func(u T, v T) bool
// Iter should return true to indicate stop.
type Iter func(u T) bool
// DFS performs a depth first traversal calling f for each node starting from u.
// If f returns true, traversal stops and DFS returns true.
func DFS(t Traversal, f Iter, u T) bool {
lifo := NewLIFO(u)
for lifo.Size() > 0 {
next, _ := lifo.Pop()
if t.Visited(next) {
continue
}
if f(next) {
return true
}
for _, v := range t.Edges(next) {
lifo.Push(v)
}
}
return false
}
// BFS performs a breadth first traversal calling f for each node starting from
// u. If f returns true, traversal stops and BFS returns true.
func BFS(t Traversal, f Iter, u T) bool {
fifo := NewFIFO(u)
for fifo.Size() > 0 {
next, _ := fifo.Pop()
if t.Visited(next) {
continue
}
if f(next) {
return true
}
for _, v := range t.Edges(next) {
fifo.Push(v)
}
}
return false
}
// DFSPath returns a path from node a to node z found by performing
// a depth first traversal. If no path is found, an empty slice is returned.
func DFSPath(t Traversal, eq Equals, a, z T) []T {
p := dfsRecursive(t, eq, a, z, []T{})
for i := len(p)/2 - 1; i >= 0; i-- {
o := len(p) - i - 1
p[i], p[o] = p[o], p[i]
}
return p
}
func dfsRecursive(t Traversal, eq Equals, u, z T, path []T) []T {
if t.Visited(u) {
return path
}
for _, v := range t.Edges(u) {
if eq(v, z) {
path = append(path, z)
path = append(path, u)
return path
}
if p := dfsRecursive(t, eq, v, z, path); len(p) > 0 {
path = append(p, u)
return path
}
}
return path
} | vendor/github.com/open-policy-agent/opa/util/graph.go | 0.813942 | 0.446193 | graph.go | starcoder |
package cache
import "math"
// bloomFilter is Bloom Filter implementation used as a cache admission policy.
// See http://billmill.org/bloomfilter-tutorial/
type bloomFilter struct {
numHashes uint32 // number of hashes per element
bitsMask uint32 // size of bit vector
bits []uint64 // filter bit vector
}
// init initializes bloomFilter with the given expected insertions ins and
// false positive probability fpp.
func (f *bloomFilter) init(ins int, fpp float64) {
ln2 := math.Log(2.0)
factor := -math.Log(fpp) / (ln2 * ln2)
numBits := nextPowerOfTwo(uint32(float64(ins) * factor))
if numBits == 0 {
numBits = 1
}
f.bitsMask = numBits - 1
if ins == 0 {
f.numHashes = 1
} else {
f.numHashes = uint32(ln2 * float64(numBits) / float64(ins))
}
size := int(numBits+63) / 64
if len(f.bits) != size {
f.bits = make([]uint64, size)
} else {
f.reset()
}
}
// put inserts a hash value into the bloom filter.
// It returns true if the value may already in the filter.
func (f *bloomFilter) put(h uint64) bool {
h1, h2 := uint32(h), uint32(h>>32)
var o uint = 1
for i := uint32(0); i < f.numHashes; i++ {
o &= f.set((h1 + (i * h2)) & f.bitsMask)
}
return o == 1
}
// contains returns true if the given hash is may be in the filter.
func (f *bloomFilter) contains(h uint64) bool {
h1, h2 := uint32(h), uint32(h>>32)
var o uint = 1
for i := uint32(0); i < f.numHashes; i++ {
o &= f.get((h1 + (i * h2)) & f.bitsMask)
}
return o == 1
}
// set sets bit at index i and returns previous value.
func (f *bloomFilter) set(i uint32) uint {
idx, shift := i/64, i%64
val := f.bits[idx]
mask := uint64(1) << shift
f.bits[idx] |= mask
return uint((val & mask) >> shift)
}
// get returns bit set at index i.
func (f *bloomFilter) get(i uint32) uint {
idx, shift := i/64, i%64
val := f.bits[idx]
mask := uint64(1) << shift
return uint((val & mask) >> shift)
}
// reset clears the bloom filter.
func (f *bloomFilter) reset() {
for i := range f.bits {
f.bits[i] = 0
}
} | filter.go | 0.800731 | 0.544983 | filter.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPStatementIf276 struct for BTPStatementIf276
type BTPStatementIf276 struct {
BTPStatement269
BtType *string `json:"btType,omitempty"`
Condition *BTPExpression9 `json:"condition,omitempty"`
ElseBody *BTPStatement269 `json:"elseBody,omitempty"`
SpaceAfterIf *BTPSpace10 `json:"spaceAfterIf,omitempty"`
ThenBody *BTPStatement269 `json:"thenBody,omitempty"`
}
// NewBTPStatementIf276 instantiates a new BTPStatementIf276 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPStatementIf276() *BTPStatementIf276 {
this := BTPStatementIf276{}
return &this
}
// NewBTPStatementIf276WithDefaults instantiates a new BTPStatementIf276 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPStatementIf276WithDefaults() *BTPStatementIf276 {
this := BTPStatementIf276{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPStatementIf276) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementIf276) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPStatementIf276) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPStatementIf276) SetBtType(v string) {
o.BtType = &v
}
// GetCondition returns the Condition field value if set, zero value otherwise.
func (o *BTPStatementIf276) GetCondition() BTPExpression9 {
if o == nil || o.Condition == nil {
var ret BTPExpression9
return ret
}
return *o.Condition
}
// GetConditionOk returns a tuple with the Condition field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementIf276) GetConditionOk() (*BTPExpression9, bool) {
if o == nil || o.Condition == nil {
return nil, false
}
return o.Condition, true
}
// HasCondition returns a boolean if a field has been set.
func (o *BTPStatementIf276) HasCondition() bool {
if o != nil && o.Condition != nil {
return true
}
return false
}
// SetCondition gets a reference to the given BTPExpression9 and assigns it to the Condition field.
func (o *BTPStatementIf276) SetCondition(v BTPExpression9) {
o.Condition = &v
}
// GetElseBody returns the ElseBody field value if set, zero value otherwise.
func (o *BTPStatementIf276) GetElseBody() BTPStatement269 {
if o == nil || o.ElseBody == nil {
var ret BTPStatement269
return ret
}
return *o.ElseBody
}
// GetElseBodyOk returns a tuple with the ElseBody field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementIf276) GetElseBodyOk() (*BTPStatement269, bool) {
if o == nil || o.ElseBody == nil {
return nil, false
}
return o.ElseBody, true
}
// HasElseBody returns a boolean if a field has been set.
func (o *BTPStatementIf276) HasElseBody() bool {
if o != nil && o.ElseBody != nil {
return true
}
return false
}
// SetElseBody gets a reference to the given BTPStatement269 and assigns it to the ElseBody field.
func (o *BTPStatementIf276) SetElseBody(v BTPStatement269) {
o.ElseBody = &v
}
// GetSpaceAfterIf returns the SpaceAfterIf field value if set, zero value otherwise.
func (o *BTPStatementIf276) GetSpaceAfterIf() BTPSpace10 {
if o == nil || o.SpaceAfterIf == nil {
var ret BTPSpace10
return ret
}
return *o.SpaceAfterIf
}
// GetSpaceAfterIfOk returns a tuple with the SpaceAfterIf field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementIf276) GetSpaceAfterIfOk() (*BTPSpace10, bool) {
if o == nil || o.SpaceAfterIf == nil {
return nil, false
}
return o.SpaceAfterIf, true
}
// HasSpaceAfterIf returns a boolean if a field has been set.
func (o *BTPStatementIf276) HasSpaceAfterIf() bool {
if o != nil && o.SpaceAfterIf != nil {
return true
}
return false
}
// SetSpaceAfterIf gets a reference to the given BTPSpace10 and assigns it to the SpaceAfterIf field.
func (o *BTPStatementIf276) SetSpaceAfterIf(v BTPSpace10) {
o.SpaceAfterIf = &v
}
// GetThenBody returns the ThenBody field value if set, zero value otherwise.
func (o *BTPStatementIf276) GetThenBody() BTPStatement269 {
if o == nil || o.ThenBody == nil {
var ret BTPStatement269
return ret
}
return *o.ThenBody
}
// GetThenBodyOk returns a tuple with the ThenBody field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPStatementIf276) GetThenBodyOk() (*BTPStatement269, bool) {
if o == nil || o.ThenBody == nil {
return nil, false
}
return o.ThenBody, true
}
// HasThenBody returns a boolean if a field has been set.
func (o *BTPStatementIf276) HasThenBody() bool {
if o != nil && o.ThenBody != nil {
return true
}
return false
}
// SetThenBody gets a reference to the given BTPStatement269 and assigns it to the ThenBody field.
func (o *BTPStatementIf276) SetThenBody(v BTPStatement269) {
o.ThenBody = &v
}
func (o BTPStatementIf276) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTPStatement269, errBTPStatement269 := json.Marshal(o.BTPStatement269)
if errBTPStatement269 != nil {
return []byte{}, errBTPStatement269
}
errBTPStatement269 = json.Unmarshal([]byte(serializedBTPStatement269), &toSerialize)
if errBTPStatement269 != nil {
return []byte{}, errBTPStatement269
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Condition != nil {
toSerialize["condition"] = o.Condition
}
if o.ElseBody != nil {
toSerialize["elseBody"] = o.ElseBody
}
if o.SpaceAfterIf != nil {
toSerialize["spaceAfterIf"] = o.SpaceAfterIf
}
if o.ThenBody != nil {
toSerialize["thenBody"] = o.ThenBody
}
return json.Marshal(toSerialize)
}
type NullableBTPStatementIf276 struct {
value *BTPStatementIf276
isSet bool
}
func (v NullableBTPStatementIf276) Get() *BTPStatementIf276 {
return v.value
}
func (v *NullableBTPStatementIf276) Set(val *BTPStatementIf276) {
v.value = val
v.isSet = true
}
func (v NullableBTPStatementIf276) IsSet() bool {
return v.isSet
}
func (v *NullableBTPStatementIf276) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPStatementIf276(val *BTPStatementIf276) *NullableBTPStatementIf276 {
return &NullableBTPStatementIf276{value: val, isSet: true}
}
func (v NullableBTPStatementIf276) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPStatementIf276) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_statement_if_276.go | 0.670716 | 0.408513 | model_btp_statement_if_276.go | starcoder |
package comptop
// NewSimplex adds a Simplex to c.
// All lower dimensional faces of the new Simplex are computed and automatically added to c.
func (c *Complex) NewSimplex(base ...Index) *Simplex {
if c.chainGroups == nil {
c.chainGroups = ChainGroups{}
}
dim := Dim(len(base)) - 1
if dim > c.dim {
c.dim = dim
}
s := &simplex{base: base}
var newSimplex *Simplex
stack := []*simplex{s}
for len(stack) > 0 {
n := len(stack) - 1
// pop next simplex from stack
ss := stack[n]
stack = stack[:n]
// Skip this simplex if its already in the complex
if smplx := c.GetSimplex(ss.base...); smplx != nil {
if smplx.Dim() == dim {
return smplx
}
continue
}
// Add this simplex to the appropriate chain group
p := ss.dim()
smplx := &Simplex{
simplex: *ss,
complex: c,
}
if p == 0 {
smplx.index = ss.base[0]
}
group := c.chainGroups[p]
if group == nil {
group = c.newChainGroup(p)
c.chainGroups[p] = group
}
group.addSimplex(smplx)
if newSimplex == nil {
newSimplex = smplx
}
if p == 0 {
continue
}
// Compute the boundary and add all its simplices to the stack
for _, sss := range ss.d() {
stack = append(stack, sss)
}
}
// cached results should be reset
c.resetCache()
return newSimplex
}
// NewSimplex adds multiple simplices to c.
// All lower dimensional faces of each new Simplex are computed and automatically added to c.
func (c *Complex) NewSimplices(bases ...Base) *SimplicialSet {
if c.chainGroups == nil {
c.chainGroups = ChainGroups{}
}
set := map[*Simplex]struct{}{}
for _, base := range bases {
dim := Dim(len(base)) - 1
if dim > c.dim {
c.dim = dim
}
s := &simplex{base: base}
var newSimplex *Simplex
stack := []*simplex{s}
topLevel := true
for len(stack) > 0 {
n := len(stack) - 1
// pop next simplex from stack
ss := stack[n]
stack = stack[:n]
// Skip this simplex if its already in the complex
if smplx := c.GetSimplex(ss.base...); smplx != nil {
if topLevel {
set[smplx] = struct{}{}
}
topLevel = false
continue
}
topLevel = false
// Add this simplex to the appropriate chain group
p := ss.dim()
smplx := &Simplex{
simplex: *ss,
complex: c,
}
if p == 0 {
smplx.index = ss.base[0]
}
group := c.chainGroups[p]
if group == nil {
group = c.newChainGroup(p)
c.chainGroups[p] = group
}
group.addSimplex(smplx)
if newSimplex == nil {
newSimplex = smplx
}
if p == 0 {
continue
}
// Compute the boundary and add all its simplices to the stack
for _, sss := range ss.d() {
stack = append(stack, sss)
}
}
if newSimplex != nil {
set[newSimplex] = struct{}{}
}
}
// cached results should be reset
c.resetCache()
return &SimplicialSet{set: set}
}
// DataProvider is used to attach user-defined data to simplices.
type DataProvider func(Dim, Index, Base) interface{}
// NewSimplex adds a Simplex to c while using dp to attach data to each newly created Simplex.
// All lower dimensional faces of the new Simplex are computed and automatically added to c.
func (c *Complex) NewSimplexWithData(dp DataProvider, base ...Index) *Simplex {
if c.chainGroups == nil {
c.chainGroups = ChainGroups{}
}
dim := Dim(len(base)) - 1
if dim > c.dim {
c.dim = dim
}
s := &simplex{base: base}
var newSimplex *Simplex
stack := []*simplex{s}
for len(stack) > 0 {
n := len(stack) - 1
// pop next simplex from stack
ss := stack[n]
stack = stack[:n]
// Skip this simplex if its already in the complex
if smplx := c.GetSimplex(ss.base...); smplx != nil {
if smplx.Dim() == dim {
return smplx
}
continue
}
// Add this simplex to the appropriate chain group
p := ss.dim()
smplx := &Simplex{
simplex: *ss,
complex: c,
}
if p == 0 {
smplx.index = ss.base[0]
}
group := c.chainGroups[p]
if group == nil {
group = c.newChainGroup(p)
c.chainGroups[p] = group
}
group.addSimplex(smplx)
// Add the data provded by the DataProvider
smplx.Data = dp(p, smplx.index, base)
if newSimplex == nil {
newSimplex = smplx
}
if p == 0 {
continue
}
// Compute the boundary and add all its simplices to the stack
for _, sss := range ss.d() {
stack = append(stack, sss)
}
}
// cached results should be reset
c.resetCache()
return newSimplex
}
// NewSimplex adds multiple simplices to c, using dp to attach data to each newly created Simplex.
// All lower dimensional faces of each new Simplex are computed and automatically added to c.
func (c *Complex) NewSimplicesWithData(dp DataProvider, bases ...Base) *SimplicialSet {
if c.chainGroups == nil {
c.chainGroups = ChainGroups{}
}
set := map[*Simplex]struct{}{}
for _, base := range bases {
dim := Dim(len(base)) - 1
if dim > c.dim {
c.dim = dim
}
s := &simplex{base: base}
var newSimplex *Simplex
stack := []*simplex{s}
for len(stack) > 0 {
n := len(stack) - 1
// pop next simplex from stack
ss := stack[n]
stack = stack[:n]
// Skip this simplex if its already in the complex
if smplx := c.GetSimplex(ss.base...); smplx != nil {
continue
}
// Add this simplex to the appropriate chain group
p := ss.dim()
smplx := &Simplex{
simplex: *ss,
complex: c,
}
if p == 0 {
smplx.index = ss.base[0]
}
group := c.chainGroups[p]
if group == nil {
group = c.newChainGroup(p)
c.chainGroups[p] = group
}
group.addSimplex(smplx)
// Add the data provded by the DataProvider
smplx.Data = dp(p, smplx.index, base)
if newSimplex == nil {
newSimplex = smplx
}
if p == 0 {
continue
}
// Compute the boundary and add all its simplices to the stack
for _, sss := range ss.d() {
stack = append(stack, sss)
}
}
set[newSimplex] = struct{}{}
}
// cached results should be reset
c.resetCache()
return &SimplicialSet{set: set}
} | newSimplex.go | 0.829561 | 0.636946 | newSimplex.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTEditingLogic2350 struct for BTEditingLogic2350
type BTEditingLogic2350 struct {
FunctionName *string `json:"functionName,omitempty"`
WantsHiddenBodies *bool `json:"wantsHiddenBodies,omitempty"`
WantsIsCreating *bool `json:"wantsIsCreating,omitempty"`
WantsSpecifiedParameters *bool `json:"wantsSpecifiedParameters,omitempty"`
}
// NewBTEditingLogic2350 instantiates a new BTEditingLogic2350 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTEditingLogic2350() *BTEditingLogic2350 {
this := BTEditingLogic2350{}
return &this
}
// NewBTEditingLogic2350WithDefaults instantiates a new BTEditingLogic2350 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTEditingLogic2350WithDefaults() *BTEditingLogic2350 {
this := BTEditingLogic2350{}
return &this
}
// GetFunctionName returns the FunctionName field value if set, zero value otherwise.
func (o *BTEditingLogic2350) GetFunctionName() string {
if o == nil || o.FunctionName == nil {
var ret string
return ret
}
return *o.FunctionName
}
// GetFunctionNameOk returns a tuple with the FunctionName field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTEditingLogic2350) GetFunctionNameOk() (*string, bool) {
if o == nil || o.FunctionName == nil {
return nil, false
}
return o.FunctionName, true
}
// HasFunctionName returns a boolean if a field has been set.
func (o *BTEditingLogic2350) HasFunctionName() bool {
if o != nil && o.FunctionName != nil {
return true
}
return false
}
// SetFunctionName gets a reference to the given string and assigns it to the FunctionName field.
func (o *BTEditingLogic2350) SetFunctionName(v string) {
o.FunctionName = &v
}
// GetWantsHiddenBodies returns the WantsHiddenBodies field value if set, zero value otherwise.
func (o *BTEditingLogic2350) GetWantsHiddenBodies() bool {
if o == nil || o.WantsHiddenBodies == nil {
var ret bool
return ret
}
return *o.WantsHiddenBodies
}
// GetWantsHiddenBodiesOk returns a tuple with the WantsHiddenBodies field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTEditingLogic2350) GetWantsHiddenBodiesOk() (*bool, bool) {
if o == nil || o.WantsHiddenBodies == nil {
return nil, false
}
return o.WantsHiddenBodies, true
}
// HasWantsHiddenBodies returns a boolean if a field has been set.
func (o *BTEditingLogic2350) HasWantsHiddenBodies() bool {
if o != nil && o.WantsHiddenBodies != nil {
return true
}
return false
}
// SetWantsHiddenBodies gets a reference to the given bool and assigns it to the WantsHiddenBodies field.
func (o *BTEditingLogic2350) SetWantsHiddenBodies(v bool) {
o.WantsHiddenBodies = &v
}
// GetWantsIsCreating returns the WantsIsCreating field value if set, zero value otherwise.
func (o *BTEditingLogic2350) GetWantsIsCreating() bool {
if o == nil || o.WantsIsCreating == nil {
var ret bool
return ret
}
return *o.WantsIsCreating
}
// GetWantsIsCreatingOk returns a tuple with the WantsIsCreating field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTEditingLogic2350) GetWantsIsCreatingOk() (*bool, bool) {
if o == nil || o.WantsIsCreating == nil {
return nil, false
}
return o.WantsIsCreating, true
}
// HasWantsIsCreating returns a boolean if a field has been set.
func (o *BTEditingLogic2350) HasWantsIsCreating() bool {
if o != nil && o.WantsIsCreating != nil {
return true
}
return false
}
// SetWantsIsCreating gets a reference to the given bool and assigns it to the WantsIsCreating field.
func (o *BTEditingLogic2350) SetWantsIsCreating(v bool) {
o.WantsIsCreating = &v
}
// GetWantsSpecifiedParameters returns the WantsSpecifiedParameters field value if set, zero value otherwise.
func (o *BTEditingLogic2350) GetWantsSpecifiedParameters() bool {
if o == nil || o.WantsSpecifiedParameters == nil {
var ret bool
return ret
}
return *o.WantsSpecifiedParameters
}
// GetWantsSpecifiedParametersOk returns a tuple with the WantsSpecifiedParameters field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTEditingLogic2350) GetWantsSpecifiedParametersOk() (*bool, bool) {
if o == nil || o.WantsSpecifiedParameters == nil {
return nil, false
}
return o.WantsSpecifiedParameters, true
}
// HasWantsSpecifiedParameters returns a boolean if a field has been set.
func (o *BTEditingLogic2350) HasWantsSpecifiedParameters() bool {
if o != nil && o.WantsSpecifiedParameters != nil {
return true
}
return false
}
// SetWantsSpecifiedParameters gets a reference to the given bool and assigns it to the WantsSpecifiedParameters field.
func (o *BTEditingLogic2350) SetWantsSpecifiedParameters(v bool) {
o.WantsSpecifiedParameters = &v
}
func (o BTEditingLogic2350) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.FunctionName != nil {
toSerialize["functionName"] = o.FunctionName
}
if o.WantsHiddenBodies != nil {
toSerialize["wantsHiddenBodies"] = o.WantsHiddenBodies
}
if o.WantsIsCreating != nil {
toSerialize["wantsIsCreating"] = o.WantsIsCreating
}
if o.WantsSpecifiedParameters != nil {
toSerialize["wantsSpecifiedParameters"] = o.WantsSpecifiedParameters
}
return json.Marshal(toSerialize)
}
type NullableBTEditingLogic2350 struct {
value *BTEditingLogic2350
isSet bool
}
func (v NullableBTEditingLogic2350) Get() *BTEditingLogic2350 {
return v.value
}
func (v *NullableBTEditingLogic2350) Set(val *BTEditingLogic2350) {
v.value = val
v.isSet = true
}
func (v NullableBTEditingLogic2350) IsSet() bool {
return v.isSet
}
func (v *NullableBTEditingLogic2350) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTEditingLogic2350(val *BTEditingLogic2350) *NullableBTEditingLogic2350 {
return &NullableBTEditingLogic2350{value: val, isSet: true}
}
func (v NullableBTEditingLogic2350) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTEditingLogic2350) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_editing_logic_2350.go | 0.725551 | 0.438485 | model_bt_editing_logic_2350.go | starcoder |
package main
import (
"log"
"time"
"github.com/fogleman/gg"
"github.com/rosshemsley/kalman"
"github.com/rosshemsley/kalman/models"
"gonum.org/v1/gonum/mat"
)
const W = 600
const H = 600
type Observation struct {
Time time.Time
Point mat.Vector
}
func NewObservation(secondsOffset float64, x, y float64) Observation {
return Observation {
Point: mat.NewVecDense(2, []float64{x, y}),
Time: time.Time{}.Add(time.Duration(secondsOffset * float64(time.Second))),
}
}
var testData = []Observation{
NewObservation(1, 0.06, 0.92),
NewObservation(2, 0.06, 0.8),
NewObservation(3, 0.08, 0.9),
NewObservation(5, 0.08, 0.87),
NewObservation(5.5, 0.16, 0.98),
NewObservation(7, 0.15, 0.89),
NewObservation(7.2, 0.19, 0.92),
NewObservation(7.3, 0.18, 0.85),
NewObservation(7.4, 0.26, 0.91),
NewObservation(7.5, 0.24, 0.85),
NewObservation(8, 0.27, 0.81),
NewObservation(9, 0.28, 0.78),
NewObservation(9.1, 0.35, 0.84),
NewObservation(9.5, 0.35, 0.8),
NewObservation(10, 0.46, 0.87),
NewObservation(12, 0.44, 0.82),
NewObservation(13, 0.44, 0.78),
NewObservation(14, 0.5, 0.8),
NewObservation(14.6, 0.49, 0.74),
NewObservation(14.7, 0.7, 0.9),
NewObservation(15, 0.57, 0.77),
NewObservation(15.2, 0.61, 0.78),
NewObservation(16, 0.6, 0.72),
NewObservation(17, 0.67, 0.72),
NewObservation(18, 0.62, 0.63),
NewObservation(19, 0.66, 0.66),
NewObservation(20.1, 0.75, 0.68),
NewObservation(20.2, 0.73, 0.63),
NewObservation(20.3, 0.76, 0.63),
NewObservation(20.4, 0.75, 0.61),
NewObservation(20.6, 0.94, 0.61),
NewObservation(20.9, 0.79, 0.56),
NewObservation(21, 0.82, 0.55),
NewObservation(21.3, 0.73, 0.54),
NewObservation(21.6, 0.8, 0.5),
NewObservation(22, 0.81, 0.48),
NewObservation(22.4, 0.63, 0.5),
NewObservation(22.7, 0.66, 0.43),
NewObservation(23, 0.6, 0.44),
NewObservation(24, 0.65, 0.35),
NewObservation(25, 0.57, 0.33),
NewObservation(25.2, 0.68, 0.27),
NewObservation(26, 0.48, 0.32),
NewObservation(26.3, 0.47, 0.22),
NewObservation(27, 0.39, 0.3),
NewObservation(27.8, 0.39, 0.25),
NewObservation(27.9, 0.38, 0.2),
NewObservation(28, 0.35, 0.39),
NewObservation(29, 0.32, 0.21),
NewObservation(29.2, 0.32, 0.3),
NewObservation(29.5, 0.29, 0.22),
NewObservation(29.6, 0.25, 0.38),
NewObservation(29.9, 0.23, 0.21),
NewObservation(31, 0.15, 0.22),
NewObservation(32, 0.18, 0.19),
NewObservation(33, 0.12, 0.09),
NewObservation(34, 0.17, 0.1),
NewObservation(35, 0.17, 0.05),
NewObservation(36, 0.26, 0.15),
NewObservation(37, 0.25, 0.06),
NewObservation(39, 0.29, 0.03),
NewObservation(42, 0.35, 0.16),
NewObservation(43, 0.35, 0.05),
NewObservation(43.9, 0.4, 0.1),
NewObservation(44.3, 0.42, 0.06),
NewObservation(45, 0.55, 0.15),
NewObservation(46, 0.55, 0.1),
NewObservation(47, 0.64, 0.18),
NewObservation(48, 0.65, 0.09),
NewObservation(49, 0.69, 0.14),
NewObservation(50, 0.71, 0.05),
NewObservation(60, 0.76, 0.06),
NewObservation(62, 0.8, 0.17),
NewObservation(63, 0.8, 0.1),
NewObservation(66, 0.87, 0.16),
NewObservation(67, 0.91, 0.23),
NewObservation(68, 0.86, 0.29),
NewObservation(69, 0.98, 0.29),
NewObservation(70, 0.95, 0.38),
}
// These numbers control how much smoothing the model does.
const observationNoise = 0.1 // entries for the diagonal of R_k
const initialVariance = 0.01 // entries for the diagonal of P_0
const processVariance = 0.005 // entries for the diagonal of Q_k
func main() {
dc := gg.NewContext(W, H)
dc.SetRGB(1, 1, 1)
dc.Clear()
model := models.NewConstantVelocityModel(testData[0].Time, testData[0].Point, models.ConstantVelocityModelConfig{
InitialVariance: observationNoise,
ProcessVariance: processVariance,
})
noisyTrajectory := extractTrajectory(testData)
filteredTrajectory, err := kalmanFilter(model, testData)
if err != nil {
log.Fatalf("failed to run filter: %v", err)
}
smoothedTrajectory, err := kalmanSmoother(model, testData)
if err != nil {
log.Fatalf("failed to run smoother: %v", err)
}
dc.SetRGB(0, 1, 0)
drawTrajectory(dc, noisyTrajectory)
dc.SetRGB(1, 0, 0)
drawTrajectory(dc, smoothedTrajectory)
dc.SetRGB(0, 0, 1)
drawTrajectory(dc, filteredTrajectory)
err = dc.SavePNG("plot.png")
if err != nil{
log.Fatalf("failed to write png: %v", err)
}
}
func kalmanFilter(model *models.ConstantVelocityModel, observations []Observation) ([]mat.Vector, error) {
result := make([]mat.Vector, len(observations))
filter := kalman.NewKalmanFilter(model)
for i, obs := range observations {
err := filter.Update(obs.Time, model.NewPositionMeasurement(obs.Point, observationNoise))
if err != nil {
return nil, err
}
result[i] = model.Position(filter.State())
}
return result, nil
}
func kalmanSmoother(model *models.ConstantVelocityModel, observations []Observation) ([]mat.Vector, error) {
mm := make([]*kalman.MeasurementAtTime, len(observations))
for i, obs := range observations {
mm[i] = kalman.NewMeasurementAtTime(obs.Time, model.NewPositionMeasurement(obs.Point, observationNoise))
}
states, err := kalman.NewKalmanSmoother(model).Smooth(mm...)
if err != nil {
return nil, err
}
result := make([]mat.Vector, len(states))
for i, s := range states {
result[i] = model.Position(s.State)
}
return result, nil
}
func extractTrajectory(observations []Observation) []mat.Vector {
var result []mat.Vector
for _, obs := range observations {
result = append(result, obs.Point)
}
return result
}
func drawTrajectory(dc *gg.Context, t []mat.Vector) {
for i, p := range t {
if i == 0 {
dc.MoveTo(p.AtVec(0)*W, p.AtVec(1)*H)
} else {
dc.LineTo(p.AtVec(0)*W, p.AtVec(1)*H)
}
}
dc.Stroke()
}
func point(x, y float64) mat.Vector {
return mat.NewVecDense(2, []float64{x, y})
} | examples/trajectory-example/main.go | 0.650134 | 0.618924 | main.go | starcoder |
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Semi-exhaustive test for append()
package main
import (
"fmt"
"reflect"
)
func verify(name string, result, expected interface{}) {
if !reflect.DeepEqual(result, expected) {
panic(name)
}
}
func main() {
for _, t := range tests {
verify(t.name, t.result, t.expected)
}
verifyStruct()
verifyInterface()
}
var tests = []struct {
name string
result, expected interface{}
}{
{"bool a", append([]bool{}), []bool{}},
{"bool b", append([]bool{}, true), []bool{true}},
{"bool c", append([]bool{}, true, false, true, true), []bool{true, false, true, true}},
{"bool d", append([]bool{true, false, true}), []bool{true, false, true}},
{"bool e", append([]bool{true, false, true}, false), []bool{true, false, true, false}},
{"bool f", append([]bool{true, false, true}, false, false, false), []bool{true, false, true, false, false, false}},
{"bool g", append([]bool{}, []bool{true}...), []bool{true}},
{"bool h", append([]bool{}, []bool{true, false, true, false}...), []bool{true, false, true, false}},
{"bool i", append([]bool{true, false, true}, []bool{true}...), []bool{true, false, true, true}},
{"bool j", append([]bool{true, false, true}, []bool{true, true, true}...), []bool{true, false, true, true, true, true}},
{"byte a", append([]byte{}), []byte{}},
{"byte b", append([]byte{}, 0), []byte{0}},
{"byte c", append([]byte{}, 0, 1, 2, 3), []byte{0, 1, 2, 3}},
{"byte d", append([]byte{0, 1, 2}), []byte{0, 1, 2}},
{"byte e", append([]byte{0, 1, 2}, 3), []byte{0, 1, 2, 3}},
{"byte f", append([]byte{0, 1, 2}, 3, 4, 5), []byte{0, 1, 2, 3, 4, 5}},
{"byte g", append([]byte{}, []byte{0}...), []byte{0}},
{"byte h", append([]byte{}, []byte{0, 1, 2, 3}...), []byte{0, 1, 2, 3}},
{"byte i", append([]byte{0, 1, 2}, []byte{3}...), []byte{0, 1, 2, 3}},
{"byte j", append([]byte{0, 1, 2}, []byte{3, 4, 5}...), []byte{0, 1, 2, 3, 4, 5}},
{"int16 a", append([]int16{}), []int16{}},
{"int16 b", append([]int16{}, 0), []int16{0}},
{"int16 c", append([]int16{}, 0, 1, 2, 3), []int16{0, 1, 2, 3}},
{"int16 d", append([]int16{0, 1, 2}), []int16{0, 1, 2}},
{"int16 e", append([]int16{0, 1, 2}, 3), []int16{0, 1, 2, 3}},
{"int16 f", append([]int16{0, 1, 2}, 3, 4, 5), []int16{0, 1, 2, 3, 4, 5}},
{"int16 g", append([]int16{}, []int16{0}...), []int16{0}},
{"int16 h", append([]int16{}, []int16{0, 1, 2, 3}...), []int16{0, 1, 2, 3}},
{"int16 i", append([]int16{0, 1, 2}, []int16{3}...), []int16{0, 1, 2, 3}},
{"int16 j", append([]int16{0, 1, 2}, []int16{3, 4, 5}...), []int16{0, 1, 2, 3, 4, 5}},
{"uint32 a", append([]uint32{}), []uint32{}},
{"uint32 b", append([]uint32{}, 0), []uint32{0}},
{"uint32 c", append([]uint32{}, 0, 1, 2, 3), []uint32{0, 1, 2, 3}},
{"uint32 d", append([]uint32{0, 1, 2}), []uint32{0, 1, 2}},
{"uint32 e", append([]uint32{0, 1, 2}, 3), []uint32{0, 1, 2, 3}},
{"uint32 f", append([]uint32{0, 1, 2}, 3, 4, 5), []uint32{0, 1, 2, 3, 4, 5}},
{"uint32 g", append([]uint32{}, []uint32{0}...), []uint32{0}},
{"uint32 h", append([]uint32{}, []uint32{0, 1, 2, 3}...), []uint32{0, 1, 2, 3}},
{"uint32 i", append([]uint32{0, 1, 2}, []uint32{3}...), []uint32{0, 1, 2, 3}},
{"uint32 j", append([]uint32{0, 1, 2}, []uint32{3, 4, 5}...), []uint32{0, 1, 2, 3, 4, 5}},
{"float64 a", append([]float64{}), []float64{}},
{"float64 b", append([]float64{}, 0), []float64{0}},
{"float64 c", append([]float64{}, 0, 1, 2, 3), []float64{0, 1, 2, 3}},
{"float64 d", append([]float64{0, 1, 2}), []float64{0, 1, 2}},
{"float64 e", append([]float64{0, 1, 2}, 3), []float64{0, 1, 2, 3}},
{"float64 f", append([]float64{0, 1, 2}, 3, 4, 5), []float64{0, 1, 2, 3, 4, 5}},
{"float64 g", append([]float64{}, []float64{0}...), []float64{0}},
{"float64 h", append([]float64{}, []float64{0, 1, 2, 3}...), []float64{0, 1, 2, 3}},
{"float64 i", append([]float64{0, 1, 2}, []float64{3}...), []float64{0, 1, 2, 3}},
{"float64 j", append([]float64{0, 1, 2}, []float64{3, 4, 5}...), []float64{0, 1, 2, 3, 4, 5}},
{"complex128 a", append([]complex128{}), []complex128{}},
{"complex128 b", append([]complex128{}, 0), []complex128{0}},
{"complex128 c", append([]complex128{}, 0, 1, 2, 3), []complex128{0, 1, 2, 3}},
{"complex128 d", append([]complex128{0, 1, 2}), []complex128{0, 1, 2}},
{"complex128 e", append([]complex128{0, 1, 2}, 3), []complex128{0, 1, 2, 3}},
{"complex128 f", append([]complex128{0, 1, 2}, 3, 4, 5), []complex128{0, 1, 2, 3, 4, 5}},
{"complex128 g", append([]complex128{}, []complex128{0}...), []complex128{0}},
{"complex128 h", append([]complex128{}, []complex128{0, 1, 2, 3}...), []complex128{0, 1, 2, 3}},
{"complex128 i", append([]complex128{0, 1, 2}, []complex128{3}...), []complex128{0, 1, 2, 3}},
{"complex128 j", append([]complex128{0, 1, 2}, []complex128{3, 4, 5}...), []complex128{0, 1, 2, 3, 4, 5}},
{"string a", append([]string{}), []string{}},
{"string b", append([]string{}, "0"), []string{"0"}},
{"string c", append([]string{}, "0", "1", "2", "3"), []string{"0", "1", "2", "3"}},
{"string d", append([]string{"0", "1", "2"}), []string{"0", "1", "2"}},
{"string e", append([]string{"0", "1", "2"}, "3"), []string{"0", "1", "2", "3"}},
{"string f", append([]string{"0", "1", "2"}, "3", "4", "5"), []string{"0", "1", "2", "3", "4", "5"}},
{"string g", append([]string{}, []string{"0"}...), []string{"0"}},
{"string h", append([]string{}, []string{"0", "1", "2", "3"}...), []string{"0", "1", "2", "3"}},
{"string i", append([]string{"0", "1", "2"}, []string{"3"}...), []string{"0", "1", "2", "3"}},
{"string j", append([]string{"0", "1", "2"}, []string{"3", "4", "5"}...), []string{"0", "1", "2", "3", "4", "5"}},
}
func verifyStruct() {
type T struct {
a, b, c string
}
type S []T
e := make(S, 100)
for i := range e {
e[i] = T{"foo", fmt.Sprintf("%d", i), "bar"}
}
verify("struct a", append(S{}), S{})
verify("struct b", append(S{}, e[0]), e[0:1])
verify("struct c", append(S{}, e[0], e[1], e[2]), e[0:3])
verify("struct d", append(e[0:1]), e[0:1])
verify("struct e", append(e[0:1], e[1]), e[0:2])
verify("struct f", append(e[0:1], e[1], e[2], e[3]), e[0:4])
verify("struct g", append(e[0:3]), e[0:3])
verify("struct h", append(e[0:3], e[3]), e[0:4])
verify("struct i", append(e[0:3], e[3], e[4], e[5], e[6]), e[0:7])
for i := range e {
verify("struct j", append(S{}, e[0:i]...), e[0:i])
input := make(S, i)
copy(input, e[0:i])
verify("struct k", append(input, e[i:]...), e)
verify("struct k - input modified", input, e[0:i])
}
s := make(S, 10, 20)
r := make(S, len(s)+len(e))
for i, x := range e {
r[len(s)+i] = x
}
verify("struct l", append(s), s)
verify("struct m", append(s, e...), r)
}
func verifyInterface() {
type T interface{}
type S []T
e := make(S, 100)
for i := range e {
switch i % 4 {
case 0:
e[i] = i
case 1:
e[i] = "foo"
case 2:
e[i] = fmt.Sprintf("%d", i)
case 3:
e[i] = float64(i)
}
}
verify("interface a", append(S{}), S{})
verify("interface b", append(S{}, e[0]), e[0:1])
verify("interface c", append(S{}, e[0], e[1], e[2]), e[0:3])
verify("interface d", append(e[0:1]), e[0:1])
verify("interface e", append(e[0:1], e[1]), e[0:2])
verify("interface f", append(e[0:1], e[1], e[2], e[3]), e[0:4])
verify("interface g", append(e[0:3]), e[0:3])
verify("interface h", append(e[0:3], e[3]), e[0:4])
verify("interface i", append(e[0:3], e[3], e[4], e[5], e[6]), e[0:7])
for i := range e {
verify("interface j", append(S{}, e[0:i]...), e[0:i])
input := make(S, i)
copy(input, e[0:i])
verify("interface k", append(input, e[i:]...), e)
verify("interface k - input modified", input, e[0:i])
}
s := make(S, 10, 20)
r := make(S, len(s)+len(e))
for i, x := range e {
r[len(s)+i] = x
}
verify("interface l", append(s), s)
verify("interface m", append(s, e...), r)
} | test/append.go | 0.556882 | 0.449755 | append.go | starcoder |
package docs
import (
"bytes"
"fmt"
"reflect"
"strings"
"text/template"
"github.com/Jeffail/benthos/v3/lib/util/config"
"github.com/Jeffail/gabs/v2"
"gopkg.in/yaml.v3"
)
// ComponentSpec describes a Benthos component.
type ComponentSpec struct {
// Name of the component
Name string
// Type of the component (input, output, etc)
Type string
// Summary of the component (in markdown, must be short).
Summary string
// Description of the component (in markdown).
Description string
// Footnotes of the component (in markdown).
Footnotes string
Fields FieldSpecs
}
type fieldContext struct {
Name string
Type string
Description string
Default string
Advanced bool
Deprecated bool
Interpolation FieldInterpolation
Examples []string
Options []string
}
type componentContext struct {
Name string
Type string
Summary string
Description string
Fields []fieldContext
Footnotes string
CommonConfig string
AdvancedConfig string
}
func (ctx fieldContext) InterpolationBatchWide() FieldInterpolation {
return FieldInterpolationBatchWide
}
func (ctx fieldContext) InterpolationIndividual() FieldInterpolation {
return FieldInterpolationIndividual
}
var componentTemplate = `---
title: {{.Name}}
type: {{.Type}}
---
<!--
THIS FILE IS AUTOGENERATED!
To make changes please edit the contents of:
lib/{{.Type}}/{{.Name}}.go
-->
{{if gt (len .Summary) 0 -}}
{{.Summary}}
{{end}}
{{if eq .CommonConfig .AdvancedConfig -}}
` + "```yaml" + `
# Config fields, showing default values
{{.CommonConfig -}}
` + "```" + `
{{else}}
import Tabs from '@theme/Tabs';
<Tabs defaultValue="common" values={{"{"}}[
{ label: 'Common', value: 'common', },
{ label: 'Advanced', value: 'advanced', },
]{{"}"}}>
import TabItem from '@theme/TabItem';
<TabItem value="common">
` + "```yaml" + `
# Common config fields, showing default values
{{.CommonConfig -}}
` + "```" + `
</TabItem>
<TabItem value="advanced">
` + "```yaml" + `
# All config fields, showing default values
{{.AdvancedConfig -}}
` + "```" + `
</TabItem>
</Tabs>
{{end -}}
{{if gt (len .Description) 0}}
{{.Description}}
{{end}}
{{if gt (len .Fields) 0 -}}
## Fields
{{end -}}
{{range $i, $field := .Fields -}}
### ` + "`{{$field.Name}}`" + `
{{$field.Description}}
{{if eq $field.Interpolation .InterpolationBatchWide -}}
This field supports [interpolation functions](/docs/configuration/interpolation#functions) that are resolved batch wide.
{{end -}}
{{if eq $field.Interpolation .InterpolationIndividual -}}
This field supports [interpolation functions](/docs/configuration/interpolation#functions).
{{end}}
Type: ` + "`{{$field.Type}}`" + `
Default: ` + "`{{$field.Default}}`" + `
{{if gt (len $field.Options) 0}}Options: {{range $j, $option := $field.Options -}}
{{if ne $j 0}}, {{end}}` + "`" + `{{$option}}` + "`" + `{{end}}.
{{end}}
{{if gt (len $field.Examples) 0 -}}
` + "```yaml" + `
# Examples
{{range $j, $example := $field.Examples -}}
{{if ne $j 0}}
{{end}}{{$example}}{{end -}}
` + "```" + `
{{end -}}
{{end}}{{if gt (len .Footnotes) 0 -}}
{{.Footnotes}}
{{end}}
`
func (c *ComponentSpec) createConfigs(root string, fullConfigExample interface{}) (
advancedConfigBytes, commonConfigBytes []byte,
) {
var err error
if len(c.Fields) > 0 {
advancedConfig, err := c.Fields.ConfigAdvanced(fullConfigExample)
if err == nil {
tmp := map[string]interface{}{
c.Name: advancedConfig,
}
if len(root) > 0 {
tmp = map[string]interface{}{
root: tmp,
}
}
advancedConfigBytes, err = config.MarshalYAML(tmp)
}
var commonConfig interface{}
if err == nil {
commonConfig, err = c.Fields.ConfigCommon(advancedConfig)
}
if err == nil {
tmp := map[string]interface{}{
c.Name: commonConfig,
}
if len(root) > 0 {
tmp = map[string]interface{}{
root: tmp,
}
}
commonConfigBytes, err = config.MarshalYAML(tmp)
}
}
if err != nil {
panic(err)
}
if len(c.Fields) == 0 {
tmp := map[string]interface{}{
c.Name: fullConfigExample,
}
if len(root) > 0 {
tmp = map[string]interface{}{
root: tmp,
}
}
if advancedConfigBytes, err = config.MarshalYAML(tmp); err != nil {
panic(err)
}
commonConfigBytes = advancedConfigBytes
}
return
}
// AsMarkdown renders the spec of a component, along with a full configuration
// example, into a markdown document.
func (c *ComponentSpec) AsMarkdown(nest bool, fullConfigExample interface{}) ([]byte, error) {
ctx := componentContext{
Name: c.Name,
Type: c.Type,
Summary: c.Summary,
Description: c.Description,
Footnotes: c.Footnotes,
}
if tmpBytes, err := yaml.Marshal(fullConfigExample); err == nil {
fullConfigExample = map[string]interface{}{}
if err = yaml.Unmarshal(tmpBytes, &fullConfigExample); err != nil {
panic(err)
}
} else {
panic(err)
}
root := ""
if nest {
root = c.Type
}
advancedConfigBytes, commonConfigBytes := c.createConfigs(root, fullConfigExample)
ctx.CommonConfig = string(commonConfigBytes)
ctx.AdvancedConfig = string(advancedConfigBytes)
gConf := gabs.Wrap(fullConfigExample)
if len(c.Description) > 0 && c.Description[0] == '\n' {
ctx.Description = c.Description[1:]
}
if len(c.Footnotes) > 0 && c.Footnotes[0] == '\n' {
ctx.Footnotes = c.Footnotes[1:]
}
flattenedFields := FieldSpecs{}
var walkFields func(path string, gObj *gabs.Container, f FieldSpecs) ([]string, []string)
walkFields = func(path string, gObj *gabs.Container, f FieldSpecs) ([]string, []string) {
var missingFields []string
expectedFields := map[string]struct{}{}
for k := range gObj.ChildrenMap() {
expectedFields[k] = struct{}{}
}
seenFields := map[string]struct{}{}
var duplicateFields []string
for _, v := range f {
if _, seen := seenFields[v.Name]; seen {
duplicateFields = append(duplicateFields, v.Name)
}
seenFields[v.Name] = struct{}{}
newV := v
delete(expectedFields, v.Name)
newV.Children = nil
if len(path) > 0 {
newV.Name = path + newV.Name
}
flattenedFields = append(flattenedFields, newV)
if len(v.Children) > 0 {
tmpMissing, tmpDuplicate := walkFields(path+v.Name+".", gConf.S(v.Name), v.Children)
missingFields = append(missingFields, tmpMissing...)
duplicateFields = append(duplicateFields, tmpDuplicate...)
}
}
for k := range expectedFields {
missingFields = append(missingFields, path+k)
}
return missingFields, duplicateFields
}
if len(c.Fields) > 0 {
if missing, duplicates := walkFields("", gConf, c.Fields); len(missing) > 0 {
return nil, fmt.Errorf("spec missing fields: %v", missing)
} else if len(duplicates) > 0 {
return nil, fmt.Errorf("spec duplicate fields: %v", duplicates)
}
}
for _, v := range flattenedFields {
if v.Deprecated {
continue
}
if !gConf.ExistsP(v.Name) {
return nil, fmt.Errorf("unrecognised field '%v'", v.Name)
}
defaultValue := gConf.Path(v.Name)
if defaultValue.Data() == nil {
return nil, fmt.Errorf("field '%v' not found in config example", v.Name)
}
fieldType := v.Type
if len(fieldType) == 0 {
if len(v.Examples) > 0 {
fieldType = reflect.TypeOf(v.Examples[0]).Kind().String()
} else {
fieldType = reflect.TypeOf(defaultValue.Data()).Kind().String()
}
}
switch fieldType {
case "map":
fieldType = "object"
case "slice":
fieldType = "array"
case "float64", "int", "int64":
fieldType = "number"
}
var examples []string
if len(v.Examples) > 0 {
nameSplit := strings.Split(v.Name, ".")
exampleName := nameSplit[len(nameSplit)-1]
for _, example := range v.Examples {
exampleBytes, err := config.MarshalYAML(map[string]interface{}{
exampleName: example,
})
if err != nil {
return nil, err
}
examples = append(examples, string(exampleBytes))
}
}
fieldCtx := fieldContext{
Name: v.Name,
Type: fieldType,
Description: v.Description,
Default: defaultValue.String(),
Advanced: v.Advanced,
Examples: examples,
Options: v.Options,
Interpolation: v.Interpolation,
}
if len(fieldCtx.Description) == 0 {
fieldCtx.Description = "Sorry! This field is missing documentation."
}
if fieldCtx.Description[0] == '\n' {
fieldCtx.Description = fieldCtx.Description[1:]
}
ctx.Fields = append(ctx.Fields, fieldCtx)
}
var buf bytes.Buffer
err := template.Must(template.New("component").Parse(componentTemplate)).Execute(&buf, ctx)
return buf.Bytes(), err
} | lib/x/docs/component.go | 0.646572 | 0.577495 | component.go | starcoder |
package archs
import (
"github.com/skyhookml/skyhookml/skyhook"
"github.com/skyhookml/skyhookml/exec_ops"
)
func init() {
type TrainParams struct {
skyhook.PytorchTrainParams
Resize skyhook.PDDImageOptions
NumClasses int
ValPercent int
}
type InferParams struct {
Resize skyhook.PDDImageOptions
}
type ModelParams struct {
NumClasses int `json:"num_classes,omitempty"`
}
AddImpl(Impl{
ID: "pytorch_unet",
Name: "UNet",
TrainInputs: []skyhook.ExecInput{
{Name: "images", DataTypes: []skyhook.DataType{skyhook.ImageType}},
{Name: "labels", DataTypes: []skyhook.DataType{skyhook.ArrayType}},
{Name: "models", DataTypes: []skyhook.DataType{skyhook.FileType}},
},
InferInputs: []skyhook.ExecInput{
{Name: "input", DataTypes: []skyhook.DataType{skyhook.ImageType, skyhook.VideoType}},
{Name: "model", DataTypes: []skyhook.DataType{skyhook.FileType}},
},
InferOutputs: []skyhook.ExecOutput{
{Name: "output", DataType: skyhook.ArrayType},
},
TrainPrepare: func(node skyhook.Runnable) (skyhook.PytorchTrainParams, error) {
var params TrainParams
if err := exec_ops.DecodeParams(node, ¶ms, false); err != nil {
return skyhook.PytorchTrainParams{}, err
}
p := params.PytorchTrainParams
p.Dataset.Op = "default"
p.Dataset.Params = string(skyhook.JsonMarshal(skyhook.PDDParams{
InputOptions: []interface{}{params.Resize, struct{}{}},
ValPercent: params.ValPercent,
}))
modelParams := ModelParams{
NumClasses: params.NumClasses,
}
p.Components = map[int]string{
0: string(skyhook.JsonMarshal(modelParams)),
}
p.ArchID = "unet"
return p, nil
},
InferPrepare: func(node skyhook.Runnable) (skyhook.PytorchInferParams, error) {
var params InferParams
if err := exec_ops.DecodeParams(node, ¶ms, false); err != nil {
return skyhook.PytorchInferParams{}, err
}
p := skyhook.PytorchInferParams{
ArchID: "unet",
OutputDatasets: []skyhook.PIOutputDataset{{
ComponentIdx: 0,
Layer: "classes",
DataType: skyhook.ArrayType,
}},
InputOptions: []skyhook.PIInputOption{{
Idx: 0,
Value: string(skyhook.JsonMarshal(params.Resize)),
}},
}
return p, nil
},
})
} | exec_ops/pytorch/archs/unet.go | 0.547706 | 0.437523 | unet.go | starcoder |
package geo
import (
"math"
"github.com/dadadamarine/orb"
)
// Distance returns the distance between two points on the earth.
func Distance(p1, p2 orb.Point) float64 {
dLat := deg2rad(p1[1] - p2[1])
dLon := deg2rad(p1[0] - p2[0])
dLon = math.Abs(dLon)
if dLon > math.Pi {
dLon = 2*math.Pi - dLon
}
// fast way using pythagorean theorem on an equirectangular projection
x := dLon * math.Cos(deg2rad((p1[1]+p2[1])/2.0))
return math.Sqrt(dLat*dLat+x*x) * orb.EarthRadius
}
// DistanceHaversine computes the distance on the earth using the
// more accurate haversine formula.
func DistanceHaversine(p1, p2 orb.Point) float64 {
dLat := deg2rad(p1[1] - p2[1])
dLon := deg2rad(p1[0] - p2[0])
dLat2Sin := math.Sin(dLat / 2)
dLon2Sin := math.Sin(dLon / 2)
a := dLat2Sin*dLat2Sin + math.Cos(deg2rad(p2[1]))*math.Cos(deg2rad(p1[1]))*dLon2Sin*dLon2Sin
return 2.0 * orb.EarthRadius * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
}
// Bearing computes the direction one must start traveling on earth
// to be heading from, to the given points.
func Bearing(from, to orb.Point) float64 {
dLon := deg2rad(to[0] - from[0])
fromLatRad := deg2rad(from[1])
toLatRad := deg2rad(to[1])
y := math.Sin(dLon) * math.Cos(toLatRad)
x := math.Cos(fromLatRad)*math.Sin(toLatRad) - math.Sin(fromLatRad)*math.Cos(toLatRad)*math.Cos(dLon)
return rad2deg(math.Atan2(y, x))
}
// Midpoint returns the half-way point along a great circle path between the two points.
func Midpoint(p, p2 orb.Point) orb.Point {
dLon := deg2rad(p2[0] - p[0])
aLatRad := deg2rad(p[1])
bLatRad := deg2rad(p2[1])
x := math.Cos(bLatRad) * math.Cos(dLon)
y := math.Cos(bLatRad) * math.Sin(dLon)
r := orb.Point{
deg2rad(p[0]) + math.Atan2(y, math.Cos(aLatRad)+x),
math.Atan2(math.Sin(aLatRad)+math.Sin(bLatRad), math.Sqrt((math.Cos(aLatRad)+x)*(math.Cos(aLatRad)+x)+y*y)),
}
// convert back to degrees
r[0] = rad2deg(r[0])
r[1] = rad2deg(r[1])
return r
}
// PointAtBearingAndDistance returns the point at the given bearing and distance in meters from the point
func PointAtBearingAndDistance(p orb.Point, bearing, distance float64) orb.Point {
aLat := deg2rad(p[1])
aLon := deg2rad(p[0])
bearingRadians := deg2rad(bearing)
distanceRatio := distance / orb.EarthRadius
bLat := math.Asin(math.Sin(aLat)*math.Cos(distanceRatio) + math.Cos(aLat)*math.Sin(distanceRatio)*math.Cos(bearingRadians))
bLon := aLon +
math.Atan2(
math.Sin(bearingRadians)*math.Sin(distanceRatio)*math.Cos(aLat),
math.Cos(distanceRatio)-math.Sin(aLat)*math.Sin(bLat),
)
return orb.Point{rad2deg(bLon), rad2deg(bLat)}
}
func PointAtDistanceAlongLine(ls orb.LineString, distance float64) (orb.Point, float64) {
if len(ls) == 0 {
panic("empty LineString")
}
if distance < 0 || len(ls) == 1 {
return ls[0], 0.0
}
var (
travelled = 0.0
from, to orb.Point
)
for i := 1; i < len(ls); i++ {
from, to = ls[i-1], ls[i]
actualSegmentDistance := DistanceHaversine(from, to)
expectedSegmentDistance := distance - travelled
if expectedSegmentDistance < actualSegmentDistance {
bearing := Bearing(from, to)
return PointAtBearingAndDistance(from, bearing, expectedSegmentDistance), bearing
}
travelled += actualSegmentDistance
}
return to, Bearing(from, to)
} | geo/distance.go | 0.889882 | 0.767102 | distance.go | starcoder |
package iso20022
// Provides the details of each individual un
// secured market transaction.
type UnsecuredMarketTransaction3 struct {
// Defines the status of the reported transaction, that is details on whether the transaction is a new transaction, an amendment of a previously reported transaction, a cancellation of a previously reported transaction or a correction to a previously reported and rejected transaction.
ReportedTransactionStatus *TransactionOperationType1Code `xml:"RptdTxSts"`
// Unique and unambiguous legal entity identification of the branch of the reporting agent in which the transaction has been booked.
//
// Usage: This field must only be provided if the transaction has been conducted and booked by a branch of the reporting agent and only if this branch has its own LEI that the reporting agent can clearly identify.
// Where the transaction has been booked by the head office or the reporting agent cannot be identified by a unique branch-specific LEI, the reporting agent must provide the LEI of the head office.
BranchIdentification *LEIIdentifier `xml:"BrnchId,omitempty"`
// Unique transaction identifier will be created at the time a transaction is first executed, shared with all registered entities and counterparties involved in the transaction, and used to track that particular transaction during its lifetime.
UniqueTransactionIdentifier *Max105Text `xml:"UnqTxIdr,omitempty"`
// Internal unique transaction identifier used by the reporting agent for each transaction.
ProprietaryTransactionIdentification *Max105Text `xml:"PrtryTxId"`
// Internal unique proprietary transaction identifier as assigned by the counterparty of the reporting agent for each transaction.
CounterpartyProprietaryTransactionIdentification *Max105Text `xml:"CtrPtyPrtryTxId,omitempty"`
// Identification of the counterparty of the reporting agent for the reported transaction.
CounterpartyIdentification *CounterpartyIdentification2Choice `xml:"CtrPtyId"`
// Date and time on which the parties entered into the reported transaction.
//
// Usage: when time is available, it must be reported.
//
// It is to be reported with only the date when the time of the transaction is not available.
//
// The reported time is the execution time when available or otherwise the time at which the transaction entered the trading system of the reporting agent.
TradeDate *DateAndDateTimeChoice `xml:"TradDt"`
// Date on which the amount of money is exchanged by counterparties or on which the purchase or sale of a debt instrument settles.
// With regard to call accounts and other unsecured borrowing/lending redeemable at notice, it is the date on which the deposit is rolled over, that is on which it would have been paid back if it had been called/not rolled over. In the case of a settlement failure in which settlement takes place on a different date than initially agreed, no transactional amendment needs to be reported.
SettlementDate *ISODate `xml:"SttlmDt"`
// Date on which the amount of money is due to be repaid by the borrower to the lender or on which a debt instrument matures and is due to be paid back. In regards to callable and puttable instruments, the final maturity date must be provided. For call accounts and other unsecured borrowing/lending redeemable upon notice, the first date on which the instrument may be redeemed must be provided.
MaturityDate *ISODate `xml:"MtrtyDt"`
// Defines whether the transaction is a cash borrowing or cash lending transaction.
TransactionType *MoneyMarketTransactionType1Code `xml:"TxTp"`
// Defines the instrument via which the borrowing or lending transaction takes place.
InstrumentType *FinancialInstrumentProductType1Code `xml:"InstrmTp"`
// Amount of money initially borrowed or lent on deposits. In the case of debt securities, it is the nominal amount of the security issued or purchased.
TransactionNominalAmount *ActiveCurrencyAndAmount `xml:"TxNmnlAmt"`
// Dirty price at which the security is issued or traded in percentage points, and which is to be reported as 100 for unsecured deposits.
DealPrice *PercentageRate `xml:"DealPric"`
// Fixed rate for deposits and debt instruments with fixed coupons or variable rate for debt instruments for which the pay out at maturity or period depends on observed value of some underlying reference rate as well as for unsecured deposits paying interest at regular intervals.
//
RateType *InterestRateType1Code `xml:"RateTp"`
// Interest rate expressed in accordance with the local money market convention at which the repurchase agreement has been concluded and at which the cash lent is remunerated.
//
// Usage:
// When the remuneration for securities lending transactions is represented by a fee amount, the fee amount will be translated into a deal rate per annum based on the ratio between the fee amount and the transaction nominal amount times number of days based on relevant money market convention divided by the number of days between the settlement date and the maturity of the transaction.
//
// Only actual values, as opposed to estimated or default values, will be reported for this variable.
//
// This value can be either positive or negative irrespective of whether the cash is borrowed or lent. It represents the contractually agreed remuneration rate on the transaction nominal amount regardless of the transaction sign (that whether the transaction type is reported as borrowed or lent).
DealRate *Rate2 `xml:"DealRate,omitempty"`
// Debt instrument in which the periodic interest payments are calculated on the basis of the value (that is fixing of an underlying reference rate such as EURIBOR) on predefined dates (that is fixing) dates and which has a maturity of no more than one year.
FloatingRateNote *FloatingRateNote2 `xml:"FltgRateNote,omitempty"`
// Specifies whether the transaction is arranged via a third party broker or not.
BrokeredDeal *BrokeredDeal1Code `xml:"BrkrdDeal,omitempty"`
// Provides the option details, when the transaction reported is a call/put option.
CallPutOption []*Option12 `xml:"CallPutOptn,omitempty"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
SupplementaryData []*SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (u *UnsecuredMarketTransaction3) SetReportedTransactionStatus(value string) {
u.ReportedTransactionStatus = (*TransactionOperationType1Code)(&value)
}
func (u *UnsecuredMarketTransaction3) SetBranchIdentification(value string) {
u.BranchIdentification = (*LEIIdentifier)(&value)
}
func (u *UnsecuredMarketTransaction3) SetUniqueTransactionIdentifier(value string) {
u.UniqueTransactionIdentifier = (*Max105Text)(&value)
}
func (u *UnsecuredMarketTransaction3) SetProprietaryTransactionIdentification(value string) {
u.ProprietaryTransactionIdentification = (*Max105Text)(&value)
}
func (u *UnsecuredMarketTransaction3) SetCounterpartyProprietaryTransactionIdentification(value string) {
u.CounterpartyProprietaryTransactionIdentification = (*Max105Text)(&value)
}
func (u *UnsecuredMarketTransaction3) AddCounterpartyIdentification() *CounterpartyIdentification2Choice {
u.CounterpartyIdentification = new(CounterpartyIdentification2Choice)
return u.CounterpartyIdentification
}
func (u *UnsecuredMarketTransaction3) AddTradeDate() *DateAndDateTimeChoice {
u.TradeDate = new(DateAndDateTimeChoice)
return u.TradeDate
}
func (u *UnsecuredMarketTransaction3) SetSettlementDate(value string) {
u.SettlementDate = (*ISODate)(&value)
}
func (u *UnsecuredMarketTransaction3) SetMaturityDate(value string) {
u.MaturityDate = (*ISODate)(&value)
}
func (u *UnsecuredMarketTransaction3) SetTransactionType(value string) {
u.TransactionType = (*MoneyMarketTransactionType1Code)(&value)
}
func (u *UnsecuredMarketTransaction3) SetInstrumentType(value string) {
u.InstrumentType = (*FinancialInstrumentProductType1Code)(&value)
}
func (u *UnsecuredMarketTransaction3) SetTransactionNominalAmount(value, currency string) {
u.TransactionNominalAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (u *UnsecuredMarketTransaction3) SetDealPrice(value string) {
u.DealPrice = (*PercentageRate)(&value)
}
func (u *UnsecuredMarketTransaction3) SetRateType(value string) {
u.RateType = (*InterestRateType1Code)(&value)
}
func (u *UnsecuredMarketTransaction3) AddDealRate() *Rate2 {
u.DealRate = new(Rate2)
return u.DealRate
}
func (u *UnsecuredMarketTransaction3) AddFloatingRateNote() *FloatingRateNote2 {
u.FloatingRateNote = new(FloatingRateNote2)
return u.FloatingRateNote
}
func (u *UnsecuredMarketTransaction3) SetBrokeredDeal(value string) {
u.BrokeredDeal = (*BrokeredDeal1Code)(&value)
}
func (u *UnsecuredMarketTransaction3) AddCallPutOption() *Option12 {
newValue := new (Option12)
u.CallPutOption = append(u.CallPutOption, newValue)
return newValue
}
func (u *UnsecuredMarketTransaction3) AddSupplementaryData() *SupplementaryData1 {
newValue := new (SupplementaryData1)
u.SupplementaryData = append(u.SupplementaryData, newValue)
return newValue
} | UnsecuredMarketTransaction3.go | 0.826817 | 0.64058 | UnsecuredMarketTransaction3.go | starcoder |
package comparator
// Comparator Should return a number:
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
type Comparator func(a, b interface{}) int
// BuiltinTypeComparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
// make sure a and b are both builtin type
func BuiltinTypeComparator(a, b interface{}) int {
if a == b {
return 0
}
switch a.(type) {
case int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64, uintptr:
return cmpInt(a, b)
case float32:
if a.(float32) < b.(float32) {
return -1
}
case float64:
if a.(float64) < b.(float64) {
return -1
}
case bool:
if a.(bool) == false && b.(bool) == true {
return -1
}
case string:
if a.(string) < b.(string) {
return -1
}
case complex64:
return cmpComplex64(a.(complex64), b.(complex64))
case complex128:
return cmpComplex128(a.(complex128), b.(complex128))
}
return 1
}
func cmpInt(a, b interface{}) int {
switch a.(type) {
case int:
return cmpInt64(int64(a.(int)), int64(b.(int)))
case uint:
return cmpUint64(uint64(a.(uint)), uint64(b.(uint)))
case int8:
return cmpInt64(int64(a.(int8)), int64(b.(int8)))
case uint8:
return cmpUint64(uint64(a.(uint8)), uint64(b.(uint8)))
case int16:
return cmpInt64(int64(a.(int16)), int64(b.(int16)))
case uint16:
return cmpUint64(uint64(a.(uint16)), uint64(b.(uint16)))
case int32:
return cmpInt64(int64(a.(int32)), int64(b.(int32)))
case uint32:
return cmpUint64(uint64(a.(uint32)), uint64(b.(uint32)))
case int64:
return cmpInt64(a.(int64), b.(int64))
case uint64:
return cmpUint64(a.(uint64), b.(uint64))
case uintptr:
return cmpUint64(uint64(a.(uintptr)), uint64(b.(uintptr)))
}
return 0
}
func cmpInt64(a, b int64) int {
if a < b {
return -1
}
return 1
}
func cmpUint64(a, b uint64) int {
if a < b {
return -1
}
return 1
}
func cmpFloat32(a, b float32) int {
if a < b {
return -1
}
return 1
}
func cmpFloat64(a, b float64) int {
if a < b {
return -1
}
return 1
}
func cmpComplex64(a, b complex64) int {
if real(a) < real(b) {
return -1
}
if real(a) == real(b) && imag(a) < imag(b) {
return -1
}
return 1
}
func cmpComplex128(a, b complex128) int {
if real(a) < real(b) {
return -1
}
if real(a) == real(b) && imag(a) < imag(b) {
return -1
}
return 1
}
//Reverse returns a comparator reverse to cmp
func Reverse(cmp Comparator) Comparator {
return func(a, b interface{}) int {
return -cmp(a, b)
}
}
// IntComparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func IntComparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(int) < b.(int) {
return -1
}
return 1
}
// UintComparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func UintComparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(uint) < b.(uint) {
return -1
}
return 1
}
// Int8Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Int8Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(int8) < b.(int8) {
return -1
}
return 1
}
// Uint8Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Uint8Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(uint8) < b.(uint8) {
return -1
}
return 1
}
// Int16Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Int16Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(int16) < b.(int16) {
return -1
}
return 1
}
// Uint16Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Uint16Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(uint16) < b.(uint16) {
return -1
}
return 1
}
// Int32Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Int32Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(int32) < b.(int32) {
return -1
}
return 1
}
// Uint32Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Uint32Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(uint32) < b.(uint32) {
return -1
}
return 1
}
// Int64Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Int64Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(int64) < b.(int64) {
return -1
}
return 1
}
// Uint64Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Uint64Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(uint64) < b.(uint64) {
return -1
}
return 1
}
// Float32Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Float32Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(float32) < b.(float32) {
return -1
}
return 1
}
// Float64Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Float64Comparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(float64) < b.(float64) {
return -1
}
return 1
}
// StringComparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func StringComparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(string) < b.(string) {
return -1
}
return 1
}
// UintptrComparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func UintptrComparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(uintptr) < b.(uintptr) {
return -1
}
return 1
}
// BoolComparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func BoolComparator(a, b interface{}) int {
if a == b {
return 0
}
if a.(bool) == false && b.(bool) == true {
return -1
}
return 1
}
// Complex64Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Complex64Comparator(a, b interface{}) int {
if a == b {
return 0
}
comA := a.(complex64)
comB := b.(complex64)
if real(comA) < real(comB) {
return -1
}
if real(comA) == real(comB) && imag(comA) < imag(comB) {
return -1
}
return 1
}
// Complex128Comparator compare a with b
// -1 , if a < b
// 0 , if a == b
// 1 , if a > b
func Complex128Comparator(a, b interface{}) int {
if a == b {
return 0
}
comA := a.(complex128)
comB := b.(complex128)
if real(comA) < real(comB) {
return -1
}
if real(comA) == real(comB) && imag(comA) < imag(comB) {
return -1
}
return 1
} | utils/comparator/comparator.go | 0.651244 | 0.416144 | comparator.go | starcoder |
package main
// Structs
// States holds time and a list of states(not sure if this is important or not)
type States struct {
Time int `json:"time"`
States []State `json:"states"`
}
// State is a struct witch stores states of data
type State struct {
Icao24 string `json:"Icao24"` // Unique ICAO 24-bit address of the transponder in hex string representation.
Callsign string `json:"Callsign"` // Callsign of the vehicle (8 chars). Can be null if no callsign has been received.
OriginCountry string `json:"OriginCountry"` // Country name inferred from the ICAO 24-bit address.
//TimePosition int `json:"TimePosition"` // Unix timestamp (seconds) for the last position update. Can be null if no position report was received by OpenSky within the past 15s.
//LastContact int `json:"LastContact"` // Unix timestamp (seconds) for the last update in general. This field is updated for any new, valid message received from the transponder.
Longitude float64 `json:"Longitude"` // WGS-84 longitude in decimal degrees. Can be null.
Latitude float64 `json:"Latitude"` // WGS-84 latitude in decimal degrees. Can be null.
BaroAltitude float64 `json:"BaroAltitude"` // Barometric altitude in meters. Can be null.
OnGround bool `json:"OnGround"` // Boolean value which indicates if the position was retrieved from a surface position report.
Velocity float64 `json:"Velocity"` // Velocity over ground in m/s. Can be null.
TrueTrack float64 `json:"TrueTrack"` // True track in decimal degrees clockwise from north (north=0°). Can be null.
VerticalRate float64 `json:"VerticalRate"` // Vertical rate in m/s. A positive value indicates that the airplane is climbing, a negative value indicates that it descends. Can be null.
// Sensors []int `json:"Sensors"` // IDs of the receivers which contributed to this state vector. Is null if no filtering for sensor was used in the request.
GeoAltitude float64 `json:"GeoAltitude"` // Geometric altitude in meters. Can be null.
Squawk string `json:"Squawk"` // The transponder code aka Squawk. Can be null.
Spi bool `json:"spi"` // Whether flight status indicates special purpose indicator.
/// PositionSource int `json:"positionSource"` //Origin of this state’s position: 0 = ADS-B, 1 = ASTERIX, 2 = MLAT
}
// Flight stores flight data and info about departure and arrival airport
type Flight struct {
Icao24F string `json:"icao24"` // Unique ICAO 24-bit address of the transponder in hex string representation. All letters are lower case.
FirstSeen int `json:"firstSeen"` // Estimated time of departure for the flight as Unix time (seconds since epoch).
EstDepartureAirport string `json:"estDepartureAirport"` // ICAO code of the estimated departure airport. Can be null if the airport could not be identified.
LastSeen int `json:"lastSeen"` // Estimated time of arrival for the flight as Unix time (seconds since epoch)
EstArrivalAirport string `json:"estArrivalAiport"` // ICAO code of the estimated arrival airport. Can be null if the airport could not be identified.
Callsign string `json:"callsign"` // Callsign of the vehicle (8 chars). Can be null if no callsign has been received. If the vehicle transmits multiple callsigns during the flight, we take the one seen most frequently
// EstDepartureAirportHorizDistance int // Horizontal distance of the last received airborne position to the estimated departure airport in meters
// EstDepartureAirportVertDistance int // Vertical distance of the last received airborne position to the estimated departure airport in meters
// EstArrivalAirportHorizDistance int // Horizontal distance of the last received airborne position to the estimated arrival airport in meters
// EstArrivalAirportVertDistance int // Vertical distance of the last received airborne position to the estimated arrival airport in meters
// DepartureAirportCandidatesCount int // Number of other possible departure airports. These are airports in short distance to estDepartureAirport.
// ArrivalAirportCandidatesCount int // Number of other possible departure airports. These are airports in short distance to estArrivalAirport.
}
// Planes is a struct with data from states and flights
type Planes struct {
State
Flight
}
//Airport is a struct that has data regarding the airports in the world
type Airport struct {
ID int `json:"ID"` //Unique OpenFlights identifier for this airport
Name string `json:"Name"` //Name of airport. May or may nor contain the City name.
City string `json:"City"` //Main city served by airport. May be spelled differently from Name.
Country string `json:"Country"` //Country or territory where airport is located.
IATA string `json:"IATA"` //3-letter IATA code. Null if not assigned/unknown
ICAO string `json:"ICAO"` //4-letter ICAO code. Null if not assigned.
Latitude float64 `json:"Latitude"` //Decimal degrees, usally to six significant digits. Negative is West, positive is East.
Longitude float64 `json:"Longitude"` //Decimal degrees, usally to six significant digits. Negative is West, positive is East.
Altitude float64 `json:"Altitude"` //In feet.
Timezone string `json:"Timezone"` //Hours offset from UTC. Fractional hours are expressed as decimals, eg. India is 5.5.
DST string `json:"DST"` //Daylight savings time. One of E (Europe), A (US/Canada), S (Sout America), O (Australia), Z (New Zealand), N (None) or U (Unknown)
TzDatabaseTimezone string `json:"Tz_Database_Timezone"` //Timezone in "tz" (Olson) format, eg. "America/Los_Angeles".
Type string `json:"Type"` //Type of airport. only type=airport included.
Source string `json:"Source"` //Source of this data. "OurAirports" is the only source
}
// AirportJSON holds the json for Airport
type AirportJSON struct {
Aport Airport `json:"Airport"`
Arriving string `json:"Arriving"`
Departing string `json:"Departing"`
}
// Database holds database basic data
type Database struct {
HostURL string
DatabaseName string
CollectionState string
CollectionAirport string
CollectionFlight string
}
// Markers holds markers values
type Markers struct {
Title string
Planes map[int]Planes
Airports map[int]Airport
}
// DBValues is a database element which is accessible everywhere, not sure if this is needed to be honest
var DBValues Database | data.go | 0.662141 | 0.646209 | data.go | starcoder |
package simplebuffer
// This file contains a buffer for use in testing, walker, and uploader.
// Some interface functions are only briefly implemented with a dummy return value.
import (
"fmt"
"github.com/aristanetworks/quantumfs"
"github.com/aristanetworks/quantumfs/encoding"
"github.com/aristanetworks/quantumfs/hash"
capn "github.com/glycerine/go-capnproto"
)
// Buffer only contains data and key to meet the requirements of Set() and
// Get() in datastore. This can be used in tests and tools which need to
// use datastore API and thus need an implementation of quantumfs.Buffer
// interface
type buffer struct {
key quantumfs.ObjectKey
data []byte
}
func New(in []byte, q_key quantumfs.ObjectKey) quantumfs.Buffer {
return &buffer{
key: q_key,
data: in,
}
}
func AssertNonZeroBuf(buf quantumfs.Buffer,
format string, args ...string) {
if buf.Size() == 0 {
panic(fmt.Sprintf(format, args))
}
}
// Implement the required interface functions. Only Get() and Set() will be called,
// so the others will be briefly implemented or be directly copied from
// daemon/datastore.go
func (buf *buffer) Write(c *quantumfs.Ctx, in []byte, offset uint32) uint32 {
panic("Error: The Write function of Buffer is not implemented")
}
func (buf *buffer) Read(out []byte, offset uint32) int {
panic("Error: The Read function of Buffer is not implemented")
}
func (buf *buffer) Get() []byte {
return buf.data
}
func (buf *buffer) Set(data []byte, keyType quantumfs.KeyType) {
buf.data = data
}
func (buf *buffer) KeyType() quantumfs.KeyType {
return buf.key.Type()
}
func (buf *buffer) ContentHash() [quantumfs.ObjectKeyLength - 1]byte {
return hash.Hash(buf.data)
}
func (buf *buffer) Key(c *quantumfs.Ctx) (quantumfs.ObjectKey, error) {
return buf.key, nil
}
func (buf *buffer) SetSize(size int) {
buf.data = buf.data[:size]
}
func (buf *buffer) Size() int {
return len(buf.data)
}
func (buf *buffer) AsDirectoryEntry() quantumfs.DirectoryEntry {
segment := capn.NewBuffer(buf.data)
return quantumfs.OverlayDirectoryEntry(
encoding.ReadRootDirectoryEntry(segment))
}
func (buf *buffer) AsWorkspaceRoot() quantumfs.WorkspaceRoot {
segment := capn.NewBuffer(buf.data)
return quantumfs.OverlayWorkspaceRoot(
encoding.ReadRootWorkspaceRoot(segment))
}
func (buf *buffer) AsMultiBlockFile() quantumfs.MultiBlockFile {
segment := capn.NewBuffer(buf.data)
return quantumfs.OverlayMultiBlockFile(
encoding.ReadRootMultiBlockFile(segment))
}
func (buf *buffer) AsVeryLargeFile() quantumfs.VeryLargeFile {
segment := capn.NewBuffer(buf.data)
return quantumfs.OverlayVeryLargeFile(
encoding.ReadRootVeryLargeFile(segment))
}
func (buf *buffer) AsExtendedAttributes() quantumfs.ExtendedAttributes {
segment := capn.NewBuffer(buf.data)
return quantumfs.OverlayExtendedAttributes(
encoding.ReadRootExtendedAttributes(segment))
}
func (buf *buffer) AsHardlinkEntry() quantumfs.HardlinkEntry {
segment := capn.NewBuffer(buf.data)
return quantumfs.OverlayHardlinkEntry(
encoding.ReadRootHardlinkEntry(segment))
} | utils/simplebuffer/simplebuffer.go | 0.774413 | 0.413714 | simplebuffer.go | starcoder |
package main
import (
"cbdeep/data"
"errors"
"fmt"
"math/rand"
)
type dataExample struct {
}
func (t dataExample) GetSchedule() string {
return "manual"
}
func (t dataExample) GetGroup() string {
return "examples"
}
func (t dataExample) GetName() string {
return "data"
}
func (t dataExample) Run() error {
// Generate some random data
var float32Rows [][]float32
for i := 0; i < 2; i++ {
var values []float32
for i := 0; i < 3; i++ {
values = append(values, rand.Float32())
}
float32Rows = append(float32Rows, values)
}
// Create a basic basicInt32Dataset
basicFloat32Dataset, e := data.NewFloat32Dataset(float32Rows, data.NewShape(len(float32Rows), 3))
if e != nil {
return e
}
// Print out basicInt32Dataset
fmt.Println(fmt.Sprintf(
"Got a basic dataset with dtype %s, shape %s, len %d",
basicFloat32Dataset.DType.String(),
basicFloat32Dataset.Shape.String(),
basicFloat32Dataset.Len(),
))
for true {
row, e := basicFloat32Dataset.Next()
if errors.Is(e, data.ErrIterationEnd) {
break
}
fmt.Println(fmt.Sprintf("\tGot a row with shape: %s and values: %s", row.Shape.String(), row.String()))
}
float32Generator := exampleFloat32DataGenerator{
Shape: data.NewShape(2, 2, 3),
}
// Create a generator dataset
generatorFloat32Dataset, e := data.NewFloat32GeneratorDataset(float32Generator.Generator, float32Generator.Shape)
if e != nil {
return e
}
// Print out generator dataset
fmt.Println(fmt.Sprintf(
"Got a generator dataset with dtype %s, shape %s, len %d",
generatorFloat32Dataset.DType.String(),
generatorFloat32Dataset.Shape.String(),
generatorFloat32Dataset.Len(),
))
for true {
row, e := generatorFloat32Dataset.Next()
if errors.Is(e, data.ErrIterationEnd) {
break
}
fmt.Println(fmt.Sprintf("\tGot a row with shape: %s and values: %s", row.Shape.String(), row.String()))
}
return nil
}
// Create data generator
type exampleFloat32DataGenerator struct {
Shape *data.Shape
}
// Define the generator func
func (g *exampleFloat32DataGenerator) Generator(offset int) ([]float32, error) {
// Generate a random row of the correct shape for the count of the dataset
if offset < g.Shape.Count {
var values []float32
for i := 0; i < g.Shape.GetDimensionsLen(); i++ {
values = append(values, rand.Float32())
}
return values, nil
}
// Generation is finished return iteration end error
return nil, data.ErrIterationEnd
} | examples/data.go | 0.643553 | 0.498596 | data.go | starcoder |
package main
import (
"fmt"
"strings"
"github.com/rolfschmidt/advent-of-code-2021/helper"
)
func main() {
fmt.Println("Part 1", Part1())
fmt.Println("Part 2", Part2())
}
func Part1() int {
return Run(false)
}
func Part2() int {
return Run(true)
}
func AddPoint(matrix map[int]map[int]bool, x int, y int) map[int]map[int]bool {
if _, ok := matrix[x]; !ok {
matrix[x] = map[int]bool{}
}
matrix[x][y] = true
return matrix
}
func MatrixMax(matrix map[int]map[int]bool) (int, int) {
maxX := 0
maxY := 0
for x := range matrix {
for y := range matrix[x] {
if x > maxX {
maxX = x
}
if y > maxY {
maxY = y
}
}
}
return maxX, maxY
}
func FoldUp(oldMatrix map[int]map[int]bool, fY int) map[int]map[int]bool {
matrix := map[int]map[int]bool{}
for x := range oldMatrix {
for y := range oldMatrix[x] {
if y <= fY {
matrix = AddPoint(matrix, x, y)
} else {
matrix = AddPoint(matrix, x, fY - (y - fY))
}
}
}
return matrix
}
func FoldLeft(oldMatrix map[int]map[int]bool, fX int) map[int]map[int]bool {
matrix := map[int]map[int]bool{}
for x := range oldMatrix {
for y := range oldMatrix[x] {
if x <= fX {
matrix = AddPoint(matrix, x, y)
} else {
matrix = AddPoint(matrix, fX - (x - fX), y)
}
}
}
return matrix
}
func Run(Part2 bool) int {
matrix := map[int]map[int]bool{}
folds := [][]int{}
content := helper.ReadFileString("input.txt");
parts := helper.Split(content, "\n\n")
for _, line := range helper.Split(parts[0], "\n") {
lineParts := helper.Split(line, ",")
lx := helper.String2Int(lineParts[0])
ly := helper.String2Int(lineParts[1])
matrix = AddPoint(matrix, lx, ly)
}
for _, line := range helper.Split(parts[1], "\n") {
if strings.Count(line, "fold along y=") > 0 {
y := helper.String2Int(strings.Replace(line, "fold along y=", "", -1))
folds = append(folds, []int{0, y})
} else if strings.Count(line, "fold along x=") > 0 {
x := helper.String2Int(strings.Replace(line, "fold along x=", "", -1))
folds = append(folds, []int{1, x})
}
}
maxX, maxY := MatrixMax(matrix)
count := 0
print := false
for _, fold := range folds {
count = 0
if fold[0] == 0 {
matrix = FoldUp(matrix, fold[1])
maxY = fold[1] - 1
} else if fold[0] == 1 {
matrix = FoldLeft(matrix, fold[1])
maxX = fold[1] - 1
}
for y := 0; y <= maxY; y++ {
for x := 0; x <= maxX; x++ {
if fold[0] == 0 && fold[1] == y {
if print {
fmt.Print("-")
}
} else if fold[0] == 1 && fold[1] == x {
if print {
fmt.Print("|")
}
} else if _, ok := matrix[x][y]; ok {
if print {
fmt.Print("#")
}
count += 1
} else {
if print {
fmt.Print(".")
}
}
}
if print {
fmt.Println()
}
}
if print {
fmt.Println()
}
if !Part2 {
return count
}
}
return count
} | day13/main.go | 0.600657 | 0.460228 | main.go | starcoder |
package data
import (
"github.com/grafana/grafana-plugin-sdk-go/data"
"time"
)
// Table is a convenience structure to create tables to response to SimpleJSON Table Queries
type Table struct {
Frame *data.Frame
}
// Column is used by New to specify the columns to create
type Column struct {
// Name of the column
Name string
// Values for the column
Values interface{}
}
// New creates a new Table with the specified Column fields
func New(columns ...Column) (table *Table) {
var fields data.Fields
for _, column := range columns {
fields = append(fields, data.NewField(column.Name, nil, column.Values))
}
return &Table{Frame: data.NewFrame("frame", fields...)}
}
// GetTimestamps returns the dataset's timestamps
func (t Table) GetTimestamps() (timestamps []time.Time) {
if index, found := t.getFirstTimestampColumn(); found {
for i := 0; i < t.Frame.Fields[index].Len(); i++ {
timestamps = append(timestamps, t.Frame.Fields[0].At(i).(time.Time))
}
}
return
}
func (t Table) getFirstTimestampColumn() (index int, found bool) {
for i, f := range t.Frame.Fields {
if f.Len() > 0 {
if _, ok := f.At(0).(time.Time); ok {
return i, true
}
}
}
return
}
// GetColumns returns the dataset's column names
func (t Table) GetColumns() (columns []string) {
for _, field := range t.Frame.Fields {
columns = append(columns, field.Name)
}
return
}
// GetValues returns the values for the specified column name. If the column does not exist, found will be false
func (t Table) GetValues(column string) (values []interface{}, found bool) {
if f, n := t.Frame.FieldByName(column); n != -1 {
found = true
for i := 0; i < f.Len(); i++ {
values = append(values, f.At(i))
}
}
return
}
// GetTimeValues returns the time values for the specified column name. If the column does not exist, found will be false.
// Will panic if the data in the provided column is of the wrong type.
func (t Table) GetTimeValues(column string) (values []time.Time, found bool) {
f, n := t.Frame.FieldByName(column)
if n == -1 {
return nil, false
}
return getFieldValues[time.Time](f), true
}
// GetFloatValues returns the float64 values for the specified column name. If the column does not exist, found will be false.
// Will panic if the data in the provided column is of the wrong type.
func (t Table) GetFloatValues(column string) (values []float64, found bool) {
f, n := t.Frame.FieldByName(column)
if n == -1 {
return nil, false
}
return getFieldValues[float64](f), true
}
// GetStringValues returns the string values for the specified column name. If the column does not exist, found will be false
// Will panic if the data in the provided column is of the wrong type.
func (t Table) GetStringValues(column string) (values []string, found bool) {
f, n := t.Frame.FieldByName(column)
if n == -1 {
return nil, false
}
return getFieldValues[string](f), true
}
func getFieldValues[T any](f *data.Field) (values []T) {
values = make([]T, f.Len())
for i := 0; i < f.Len(); i++ {
values[i] = f.At(i).(T)
}
return
} | data/table.go | 0.697918 | 0.521106 | table.go | starcoder |
package saes
import (
"github.com/OpenWhiteBox/primitives/matrix"
"github.com/OpenWhiteBox/primitives/number"
)
// Powers of x mod M(x).
var powx = [16]byte{0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f}
type Construction struct {
// A 16-byte AES key.
Key []byte
}
// BlockSize returns the block size of AES. (Necessary to implement cipher.Block.)
func (constr Construction) BlockSize() int { return 16 }
// Encrypt encrypts the first block in src into dst. Dst and src may point at the same memory.
func (constr Construction) Encrypt(dst, src []byte) {
roundKeys := constr.StretchedKey()
copy(dst, src[:constr.BlockSize()])
constr.AddRoundKey(roundKeys[0], dst)
for i := 1; i <= 9; i++ {
constr.SubBytes(dst)
constr.ShiftRows(dst)
constr.MixColumns(dst)
constr.AddRoundKey(roundKeys[i], dst)
}
constr.SubBytes(dst)
constr.ShiftRows(dst)
constr.AddRoundKey(roundKeys[10], dst)
}
// Decrypt decrypts the first block in src into dst. Dst and src may point at the same memory.
func (constr Construction) Decrypt(dst, src []byte) {
roundKeys := constr.StretchedKey()
copy(dst, src[:constr.BlockSize()])
constr.AddRoundKey(roundKeys[10], dst)
constr.UnShiftRows(dst)
constr.UnSubBytes(dst)
for i := 9; i >= 1; i-- {
constr.AddRoundKey(roundKeys[i], dst)
constr.UnMixColumns(dst)
constr.UnShiftRows(dst)
constr.UnSubBytes(dst)
}
constr.AddRoundKey(roundKeys[0], dst)
}
func rotw(w uint32) uint32 { return w<<8 | w>>24 }
// StretchedKey implements AES' key schedule. It returns the 11 round keys derived from the master key.
func (constr *Construction) StretchedKey() [11][]byte {
var (
i int = 0
temp uint32 = 0
stretched [4 * 11]uint32 // Stretched key
split [11][]byte // Each round key is combined and its uint32s are turned into 4 bytes
)
for ; i < 4; i++ { // First key-length of stretched is the raw key.
stretched[i] = (uint32(constr.Key[4*i]) << 24) |
(uint32(constr.Key[4*i+1]) << 16) |
(uint32(constr.Key[4*i+2]) << 8) |
uint32(constr.Key[4*i+3])
}
for ; i < (4 * 11); i++ {
temp = stretched[i-1]
if (i % 4) == 0 {
temp = constr.SubWord(rotw(temp)) ^ (uint32(powx[i/4-1]) << 24)
}
stretched[i] = stretched[i-4] ^ temp
}
for j := 0; j < 11; j++ {
split[j] = make([]byte, 16)
for k := 0; k < 4; k++ {
word := stretched[4*j+k]
split[j][4*k] = byte(word >> 24)
split[j][4*k+1] = byte(word >> 16)
split[j][4*k+2] = byte(word >> 8)
split[j][4*k+3] = byte(word)
}
}
return split
}
// AddRoundKey XORs roundKey into block.
func (constr *Construction) AddRoundKey(roundKey, block []byte) {
for i, _ := range block {
block[i] = roundKey[i] ^ block[i]
}
}
// SubBytes rewrites each byte of block with its image under SubByte.
func (constr *Construction) SubBytes(block []byte) {
for i, _ := range block {
block[i] = constr.SubByte(block[i])
}
}
// UnSubBytes rewrites each byte of block with its image under UnSubByte.
func (constr *Construction) UnSubBytes(block []byte) {
for i, _ := range block {
block[i] = constr.UnSubByte(block[i])
}
}
// SubWord applies SubByte to each byte of an unsigned integer word and returns the result.
func (constr *Construction) SubWord(w uint32) uint32 {
return (uint32(constr.SubByte(byte(w>>24))) << 24) |
(uint32(constr.SubByte(byte(w>>16))) << 16) |
(uint32(constr.SubByte(byte(w>>8))) << 8) |
uint32(constr.SubByte(byte(w)))
}
// SubByte is AES' S-box. It is a bijection.
func (constr *Construction) SubByte(e byte) byte {
// AES S-Box
m := matrix.Matrix{ // Linear component.
matrix.Row{0xF1}, // 0b11110001
matrix.Row{0xE3}, // 0b11100011
matrix.Row{0xC7}, // 0b11000111
matrix.Row{0x8F}, // 0b10001111
matrix.Row{0x1F}, // 0b00011111
matrix.Row{0x3E}, // 0b00111110
matrix.Row{0x7C}, // 0b01111100
matrix.Row{0xF8}, // 0b11111000
}
a := byte(0x63) // 0b01100011 - Affine component.
return m.Mul(matrix.Row{byte(number.ByteFieldElem(e).Invert())})[0] ^ a
}
// UnSubByte is the inverse of SubByte. It is a bijection.
func (constr *Construction) UnSubByte(e byte) byte {
// AES Inverse S-Box
m := matrix.Matrix{
matrix.Row{0xA4},
matrix.Row{0x49},
matrix.Row{0x92},
matrix.Row{0x25},
matrix.Row{0x4a},
matrix.Row{0x94},
matrix.Row{0x29},
matrix.Row{0x52},
}
a := byte(0x63)
invVal := m.Mul(matrix.Row{e ^ a})[0]
return byte(number.ByteFieldElem(invVal).Invert())
}
// ShiftRows permutes the first sixteen bytes of block with a fixed permutation.
func (constr *Construction) ShiftRows(block []byte) {
copy(block, []byte{
block[0], block[5], block[10], block[15], block[4], block[9], block[14], block[3], block[8], block[13], block[2],
block[7], block[12], block[1], block[6], block[11],
})
}
// UnShiftRows permutes the first sixteen bytes of block. It is the inverse of ShiftRows.
func (constr *Construction) UnShiftRows(block []byte) {
copy(block, []byte{
block[0], block[13], block[10], block[7], block[4], block[1], block[14], block[11], block[8], block[5], block[2],
block[15], block[12], block[9], block[6], block[3],
})
}
// MixColumns multiplies each word of block by a fixed elment of GF(2^32).
func (constr *Construction) MixColumns(block []byte) {
for i := 0; i < 16; i += 4 {
constr.MixColumn(block[i : i+4])
}
}
// UnMixColumns is the inverse of MixColumns.
func (constr *Construction) UnMixColumns(block []byte) {
for i := 0; i < 16; i += 4 {
constr.UnMixColumn(block[i : i+4])
}
}
// MixColumn multiplies the first four bytes of slice by a fixed element of GF(2^32).
func (constr *Construction) MixColumn(slice []byte) {
column := number.ArrayRingElem{
number.ByteFieldElem(slice[0]), number.ByteFieldElem(slice[1]),
number.ByteFieldElem(slice[2]), number.ByteFieldElem(slice[3]),
}.Mul(number.ArrayRingElem{
number.ByteFieldElem(0x02), number.ByteFieldElem(0x01),
number.ByteFieldElem(0x01), number.ByteFieldElem(0x03),
})
for i := 0; i < 4; i++ {
if len(column) > i {
slice[i] = byte(column[i])
} else {
slice[i] = 0x00
}
}
}
// UnMixColumn is the inverse of MixColumn.
func (constr *Construction) UnMixColumn(slice []byte) {
column := number.ArrayRingElem{
number.ByteFieldElem(slice[0]), number.ByteFieldElem(slice[1]),
number.ByteFieldElem(slice[2]), number.ByteFieldElem(slice[3]),
}.Mul(number.ArrayRingElem{
number.ByteFieldElem(0x0e), number.ByteFieldElem(0x09),
number.ByteFieldElem(0x0d), number.ByteFieldElem(0x0b),
})
for i := 0; i < 4; i++ {
if len(column) > i {
slice[i] = byte(column[i])
} else {
slice[i] = 0x00
}
}
} | constructions/saes/saes.go | 0.604165 | 0.445107 | saes.go | starcoder |
package modules
import (
"github.com/bbuck/dragon-mud/logger"
"github.com/bbuck/dragon-mud/scripting/lua"
"github.com/bbuck/dragon-mud/text/tmpl"
)
// Tmpl is the templating module accessible in scripts. This module consists of
// two accessible methods:
// register(name, body)
// @param name: string = the name to associate with this template after
// @param body: string = the uncompiled body of the template
// registration
// register a template with the given name
// render(name, data)
// @param name: string = the name of the compiled template to use for
// generating output
// @param data: table = a table of data to provide to the rendering of the
// named templates
// render the template with the given name using the given data to populate
// it
// render_in_layout(layout, children, data)
// @param layout: string = the name of the layout template to render.
// @param children: string or table = the children to render in the layout.
// if provided as a string, then the name to use in the layout is
// 'content', otherise this is table of field names -> template names to
// use in generating layout content.
// @param data: table = a table of data to provide to the rendering of the
// named templates (used for all views, so must be merged)
// render the child templates with the provided and building an additional
// set of data containing the rendered children before rendering the final
// layout template which can position the child templates via normal
// rendering means.
var Tmpl = lua.TableMap{
"register": func(name, contents string) bool {
err := tmpl.Register(name, contents)
if err != nil {
fields := logger.Fields{
"error": err.Error(),
}
if len(contents) < 255 {
fields["template"] = contents
}
log("tmpl").WithFields(fields).Error("Register failed from script with error")
}
return err == nil
},
"render": func(engine *lua.Engine) int {
data := engine.PopTable().AsMapStringInterface()
name := engine.PopString()
log := log("tmpl").WithField("tmpl_name", name)
t, err := tmpl.Template(name)
if err != nil {
log.WithError(err).Error("Failed to fetch template name.")
engine.RaiseError(err.Error())
return 0
}
result, err := t.Render(data)
if err != nil {
log.WithFields(logger.Fields{
"error": err.Error(),
"data": data,
}).Error("Failed to render template from requested in script.")
}
engine.PushValue(result)
return 1
},
"render_in_layout": func(eng *lua.Engine) int {
ldata := eng.PopValue()
children := eng.PopValue()
parent := eng.PopString()
pt, err := tmpl.Template(parent)
if err != nil {
log("tmpl").WithError(err).WithField("template", parent).Warn("Parent template requested but undefined, returning empty string.")
eng.PushValue("")
return 1
}
var data map[string]interface{}
if ldata.IsTable() {
data = ldata.AsMapStringInterface()
} else {
data = make(map[string]interface{})
}
switch {
case children.IsString():
cs := children.AsString()
r, err := tmpl.Template(cs)
// default child name is content in the case of single strings
if err != nil {
log("tmpl").WithError(err).WithField("tempalte", cs).Warn("Template requested, but doesn't exit. Using empty string.")
data["content"] = ""
} else {
data["content"], err = r.Render(data)
if err != nil {
log("tmpl").WithError(err).WithField("template", cs).Error("Failed to render template")
data["content"] = ""
}
}
case children.IsTable():
children.ForEach(func(key, val *lua.Value) {
if key.IsString() {
ks := key.AsString()
if val.IsString() {
vs := val.AsString()
r, err := tmpl.Template(vs)
if err != nil {
log("tmpl").WithError(err).WithField("tempalte", vs).Warn("Template requested, but doesn't exit. Using empty string.")
data[ks] = ""
return
}
data[ks], err = r.Render(data)
if err != nil {
log("tmpl").WithError(err).WithField("template", ks).Error("Failed to render template.")
data[ks] = ""
}
} else {
log("tmpl").WithFields(logger.Fields{
"template": ks,
"type": val.String(),
}).Warn("Non-string value given as name of template, using empty string.")
data[ks] = ""
}
} else {
log("tmpl").WithField("type", key.String()).Warn("Non-string key provided as key of rendered template, ignoring")
}
})
}
res, err := pt.Render(data)
if err != nil {
log("tmpl").WithError(err).WithField("template", pt).Error("Failed to render parent template, returning empty string.")
eng.PushValue("")
return 1
}
eng.PushValue(res)
return 1
},
} | scripting/modules/tmpl.go | 0.505859 | 0.404155 | tmpl.go | starcoder |
package mathh
// Min2Uint returns minimum of two passed uint.
func Min2Uint(a, b uint) uint {
if a <= b {
return a
}
return b
}
// Max2Uint returns maximum of two passed uint.
func Max2Uint(a, b uint) uint {
if a >= b {
return a
}
return b
}
// Min2Uint8 returns minimum of two passed uint8.
func Min2Uint8(a, b uint8) uint8 {
if a <= b {
return a
}
return b
}
// Max2Uint8 returns maximum of two passed uint8.
func Max2Uint8(a, b uint8) uint8 {
if a >= b {
return a
}
return b
}
// Min2Uint16 returns minimum of two passed uint16.
func Min2Uint16(a, b uint16) uint16 {
if a <= b {
return a
}
return b
}
// Max2Uint16 returns maximum of two passed uint16.
func Max2Uint16(a, b uint16) uint16 {
if a >= b {
return a
}
return b
}
// Min2Uint32 returns minimum of two passed uint32.
func Min2Uint32(a, b uint32) uint32 {
if a <= b {
return a
}
return b
}
// Max2Uint32 returns maximum of two passed uint32.
func Max2Uint32(a, b uint32) uint32 {
if a >= b {
return a
}
return b
}
// Min2Int returns minimum of two passed int.
func Min2Int(a, b int) int {
if a <= b {
return a
}
return b
}
// Max2Int returns maximum of two passed int.
func Max2Int(a, b int) int {
if a >= b {
return a
}
return b
}
// Min2Int8 returns minimum of two passed int8.
func Min2Int8(a, b int8) int8 {
if a <= b {
return a
}
return b
}
// Max2Int8 returns maximum of two passed int8.
func Max2Int8(a, b int8) int8 {
if a >= b {
return a
}
return b
}
// Min2Int16 returns minimum of two passed int16.
func Min2Int16(a, b int16) int16 {
if a <= b {
return a
}
return b
}
// Max2Int16 returns maximum of two passed int16.
func Max2Int16(a, b int16) int16 {
if a >= b {
return a
}
return b
}
// Min2Int32 returns minimum of two passed int32.
func Min2Int32(a, b int32) int32 {
if a <= b {
return a
}
return b
}
// Max2Int32 returns maximum of two passed int32.
func Max2Int32(a, b int32) int32 {
if a >= b {
return a
}
return b
}
// Min2Int64 returns minimum of two passed int64.
func Min2Int64(a, b int64) int64 {
if a <= b {
return a
}
return b
}
// Max2Int64 returns maximum of two passed int64.
func Max2Int64(a, b int64) int64 {
if a >= b {
return a
}
return b
}
// Min2Float64 returns minimum of two passed uint64.
func Min2Float64(a, b float64) float64 {
if IsNaNFloat64(a) || IsNaNFloat64(b) {
return NaNFloat64()
}
if IsPositiveZeroFloat64(a) && IsNegativeZeroFloat64(b) {
return NegativeZeroFloat64()
}
if a <= b {
return a
}
return b
}
// Max2Uint64 returns maximum of two passed uint64.
func Max2Float64(a, b float64) float64 {
if IsNaNFloat64(a) || IsNaNFloat64(b) {
return NaNFloat64()
}
if IsNegativeZeroFloat64(a) && IsPositiveZeroFloat64(b) {
return PositiveZeroFloat64()
}
if a >= b {
return a
}
return b
} | back/vendor/github.com/apaxa-go/helper/mathh/minmax-gen.go | 0.853776 | 0.453988 | minmax-gen.go | starcoder |
package nanovgo4
import (
"math"
)
// The following functions can be used to make calculations on 2x3 transformation matrices.
// TransformMatrix is a 2x3 matrix is represented as float[6].
type TransformMatrix [6]float32
// IdentityMatrix makes the transform to identity matrix.
func IdentityMatrix() TransformMatrix {
return TransformMatrix{1.0, 0.0, 0.0, 1.0, 0.0, 0.0}
}
// TranslateMatrix makes the transform to translation matrix matrix.
func TranslateMatrix(tx, ty float32) TransformMatrix {
return TransformMatrix{1.0, 0.0, 0.0, 1.0, tx, ty}
}
// ScaleMatrix makes the transform to scale matrix.
func ScaleMatrix(sx, sy float32) TransformMatrix {
return TransformMatrix{sx, 0.0, 0.0, sy, 0.0, 0.0}
}
// RotateMatrix makes the transform to rotate matrix. Angle is specified in radians.
func RotateMatrix(a float32) TransformMatrix {
sin, cos := math.Sincos(float64(a))
sinF := float32(sin)
cosF := float32(cos)
return TransformMatrix{cosF, sinF, -sinF, cosF, 0.0, 0.0}
}
// SkewXMatrix makes the transform to skew-x matrix. Angle is specified in radians.
func SkewXMatrix(a float32) TransformMatrix {
return TransformMatrix{1.0, 0.0, float32(math.Tan(float64(a))), 1.0, 0.0, 0.0}
}
// SkewYMatrix makes the transform to skew-y matrix. Angle is specified in radians.
func SkewYMatrix(a float32) TransformMatrix {
return TransformMatrix{1.0, float32(math.Tan(float64(a))), 0.0, 1.0, 0.0, 0.0}
}
// Multiply makes the transform to the result of multiplication of two transforms, of A = A*B.
func (t TransformMatrix) Multiply(s TransformMatrix) TransformMatrix {
t0 := t[0]*s[0] + t[1]*s[2]
t2 := t[2]*s[0] + t[3]*s[2]
t4 := t[4]*s[0] + t[5]*s[2] + s[4]
t[1] = t[0]*s[1] + t[1]*s[3]
t[3] = t[2]*s[1] + t[3]*s[3]
t[5] = t[4]*s[1] + t[5]*s[3] + s[5]
t[0] = t0
t[2] = t2
t[4] = t4
return t
}
// PreMultiply makes the transform to the result of multiplication of two transforms, of A = B*A.
func (t TransformMatrix) PreMultiply(s TransformMatrix) TransformMatrix {
return s.Multiply(t)
}
// Inverse makes the destination to inverse of specified transform.
// Returns 1 if the inverse could be calculated, else 0.
func (t TransformMatrix) Inverse() TransformMatrix {
t0 := float64(t[0])
t1 := float64(t[1])
t2 := float64(t[2])
t3 := float64(t[3])
det := t0*t3 - t2*t1
if det > -1e-6 && det < 1e-6 {
return IdentityMatrix()
}
t4 := float64(t[4])
t5 := float64(t[5])
invdet := 1.0 / det
return TransformMatrix{
float32(t3 * invdet),
float32(-t1 * invdet),
float32(-t2 * invdet),
float32(t0 * invdet),
float32((t2*t5 - t3*t4) * invdet),
float32((t1*t4 - t0*t5) * invdet),
}
}
// TransformPoint transforms a point by given TransformMatrix.
func (t TransformMatrix) TransformPoint(sx, sy float32) (dx, dy float32) {
dx = sx*t[0] + sy*t[2] + t[4]
dy = sx*t[1] + sy*t[3] + t[5]
return
}
// ToMat3x4 makes 3x4 matrix.
func (t TransformMatrix) ToMat3x4() []float32 {
return []float32{
t[0], t[1], 0.0, 0.0,
t[2], t[3], 0.0, 0.0,
t[4], t[5], 1.0, 0.0,
}
}
func (t TransformMatrix) getAverageScale() float32 {
sx := math.Sqrt(float64(t[0]*t[0] + t[2]*t[2]))
sy := math.Sqrt(float64(t[1]*t[1] + t[3]*t[3]))
return float32((sx + sy) * 0.5)
} | transform.go | 0.892463 | 0.914023 | transform.go | starcoder |
package wordvector
import (
"math"
"os"
"github.com/7phs/fastgotext/vector"
"github.com/7phs/fastgotext/wrapper/array"
"github.com/7phs/fastgotext/wrapper/emd"
)
var (
mfPool = array.NewFloatMatrixPool()
)
type WordVectorDictionary interface {
Find(string) int
}
type WordVectorModel interface {
GetDictionary() WordVectorDictionary
WordToVector(word string) vector.F32Vector
}
type wordVector struct {
model WordVectorModel
}
func WordVector(model WordVectorModel) *wordVector {
return &wordVector{
model: model,
}
}
func (w *wordVector) filterDoc(doc []string) (res []string) {
dict := w.model.GetDictionary()
res = make([]string, 0, len(doc))
for _, word := range doc {
if dict.Find(word) > 0 {
res = append(res, word)
}
}
return
}
func (w *wordVector) DocToVectors(doc []string) [][]float32 {
res := make([][]float32, 0, len(doc))
for _, word := range doc {
res = append(res, w.model.WordToVector(word))
}
return res
}
func (w *wordVector) WordsDistance(word1, word2 string) float32 {
vec := w.model.WordToVector(word1)
vec.Sub(w.model.WordToVector(word2))
vec.Pow()
return float32(math.Sqrt(float64(vec.Sum())))
}
func (w *wordVector) WMDistance(doc1, doc2 []string) (float32, error) {
doc1 = w.filterDoc(doc1)
doc2 = w.filterDoc(doc2)
dict1 := Dictionary(doc1...)
dict2 := Dictionary(doc2...)
if dict1.IsEmpty() || dict2.IsEmpty() {
return float32(math.Inf(1)), os.ErrInvalid
}
dict := dict1.Join(dict2)
if dict.Len() <= 1 {
return 1., nil
}
distanceMatrix := mfPool.Get(uint(dict.Len()), uint(dict.Len()))
defer distanceMatrix.Free()
data := distanceMatrix.Slice()
for i, word1 := range dict {
for j, word2 := range dict {
if dict1.WordIndex(word1) >= 0 && dict2.WordIndex(word2) >= 0 {
data[i][j] = array.FloatMatrixRec(w.WordsDistance(word1, word2))
}
}
}
return emd.Emd(dict.BowNormalize(doc1), dict.BowNormalize(doc2), distanceMatrix), nil
}
func (w *wordVector) docToUnitVec(doc []string) (vector.F32Vector, error) {
doc = w.filterDoc(doc)
core, err := vector.Mean(w.DocToVectors(doc)...)
if err != nil {
// TODO error wrap
return nil, err
}
coreF := vector.F32Vector(core)
if veclen := coreF.Distance(); veclen > .0 {
coreF.Normalize(veclen)
}
return coreF, nil
}
func (w *wordVector) Similarity(doc1, doc2 []string) (float32, error) {
unitCore1, err := w.docToUnitVec(doc1)
if err != nil {
// TODO error wrap
return .0, err
}
unitCore2, err := w.docToUnitVec(doc2)
if err != nil {
// TODO error wrap
return .0, err
}
return vector.F32Dot(unitCore1, unitCore2), nil
} | wordvector/wordvector.go | 0.514888 | 0.452899 | wordvector.go | starcoder |
package cpu
import (
"github.com/robherley/go-gameboy/internal/bits"
"github.com/robherley/go-gameboy/pkg/cartridge"
errs "github.com/robherley/go-gameboy/pkg/errors"
)
// https://gbdev.io/pandocs/CPU_Registers_and_Flags.html#registers
type Registers struct {
A byte
F byte
B byte
C byte
D byte
E byte
H byte
L byte
SP uint16
PC uint16
}
// https://gbdev.io/pandocs/Power_Up_Sequence.html#cpu-registers
func RegistersForDMG(cart *cartridge.Cartridge) *Registers {
r := &Registers{
A: 0x01,
F: 0x00,
B: 0x00,
C: 0x13,
D: 0x00,
E: 0xD8,
H: 0x01,
L: 0x4D,
PC: 0x0100,
SP: 0xFFFE,
}
r.SetFlag(FlagZ, true)
// set carry and half carry if header checksum is != 0x00
if cart.CalculateHeaderCheckSum() != 0 {
r.SetFlag(FlagH, true)
r.SetFlag(FlagC, true)
}
return r
}
func (registers *Registers) Set(reg Register, val uint16) {
switch reg {
case A:
registers.A = byte(val)
case B:
registers.B = byte(val)
case C:
registers.C = byte(val)
case D:
registers.D = byte(val)
case E:
registers.E = byte(val)
case F:
registers.F = byte(val)
case H:
registers.H = byte(val)
case L:
registers.L = byte(val)
case SP:
registers.SP = val
case PC:
registers.PC = val
case AF:
registers.SetAF(val)
case BC:
registers.SetBC(val)
case DE:
registers.SetDE(val)
case HL:
registers.SetHL(val)
default:
panic(errs.NewInvalidOperandError(reg))
}
}
func (registers *Registers) Get(reg Register) uint16 {
switch reg {
case A:
return uint16(registers.A)
case B:
return uint16(registers.B)
case C:
return uint16(registers.C)
case D:
return uint16(registers.D)
case E:
return uint16(registers.E)
case F:
return uint16(registers.F)
case H:
return uint16(registers.H)
case L:
return uint16(registers.L)
case SP:
return registers.SP
case PC:
return registers.PC
case AF:
return registers.GetAF()
case BC:
return registers.GetBC()
case DE:
return registers.GetDE()
case HL:
return registers.GetHL()
default:
panic(errs.NewInvalidOperandError(reg))
}
}
/*
Registers can be accessed as one 16 bit register OR separate 8 bit
|16|Hi|Lo|
|AF|A |* |
|BC|B |C |
|DE|D |E |
|HL|H |L |
*/
func (r *Registers) GetAF() uint16 {
return bits.To16(r.A, r.F)
}
func (r *Registers) SetAF(val uint16) {
r.A = bits.Hi(val)
r.F = bits.Lo(val & 0x00F0)
}
func (r *Registers) GetBC() uint16 {
return bits.To16(r.B, r.C)
}
func (r *Registers) SetBC(val uint16) {
r.B = bits.Hi(val)
r.C = bits.Lo(val)
}
func (r *Registers) GetDE() uint16 {
return bits.To16(r.D, r.E)
}
func (r *Registers) SetDE(val uint16) {
r.D = bits.Hi(val)
r.E = bits.Lo(val)
}
func (r *Registers) GetHL() uint16 {
return bits.To16(r.H, r.L)
}
func (r *Registers) SetHL(value uint16) {
r.H = bits.Hi(value)
r.L = bits.Lo(value)
}
/*
Flags
The "F" register holds the CPU flags like so:
|7|6|5|4|3|2|1|0|
|Z|N|H|C|0|0|0|0|
*/
// Flag aliases for specific bits in register F
type Flag = byte
const (
// Zero flag
FlagZ Flag = 7
// Subtraction flag
FlagN Flag = 6
// Half carry flag
FlagH Flag = 5
// Carry flag
FlagC Flag = 4
)
// If a condition is true or false based on register flags
func (r *Registers) IsCondition(cond Condition) bool {
switch cond {
case NZ:
return !r.GetFlag(FlagZ)
case Z:
return r.GetFlag(FlagZ)
case NC:
return !r.GetFlag(FlagC)
case Ca:
return r.GetFlag(FlagC)
default:
panic(errs.NewInvalidOperandError(cond))
}
}
func (r *Registers) GetFlag(f Flag) bool {
return bits.GetNBit(r.F, f)
}
func (r *Registers) SetFlag(f Flag, set bool) {
if set {
r.F = bits.SetNBit(r.F, f)
} else {
r.F = bits.ClearNBit(r.F, f)
}
}
// SetRotateAndShiftFlags: helper to set flags for rotate/shift funcs
func (r *Registers) SetRotateAndShiftFlags(result byte, isCarry bool) {
r.SetFlag(FlagZ, result == 0)
r.SetFlag(FlagN, false)
r.SetFlag(FlagH, false)
r.SetFlag(FlagC, isCarry)
} | pkg/cpu/registers.go | 0.618435 | 0.417509 | registers.go | starcoder |
// Package ads implements controlling the A/D and reading sampled values for the ADS1115 A/D Converter
package ads
import (
"github.com/sconklin/go-i2c"
)
// SensorType identify which Bosch Sensortec
// temperature and pressure sensor is used.
// BMP180 and BMP280 are supported.
type SensorType int
// Implement Stringer interface.
func (v SensorType) String() string {
if v == ADS1115 {
return "ADS1115"
}
return "!!! unknown !!!"
}
const (
// ADS1115 A/D Converter
ADS1115 SensorType = iota
)
// InputMuxMode : Input Multiplexer Mode
const (
MUX_DIFFERENTIAL_0_1 = 0 // Differential, 0 Positive, 1 Negative
MUX_DIFFERENTIAL_0_3 = 1 // Differential, 0 Positive, 3 Negative
MUX_DIFFERENTIAL_1_3 = 2 // Differential, 1 Positive, 3 Negative
MUX_DIFFERENTIAL_2_3 = 3 // Differential, 2 Positive, 3 Negative
MUX_SINGLE_0 = 4 // Single Ended 0
MUX_SINGLE_1 = 5 // Single Ended 1
MUX_SINGLE_2 = 6 // Single Ended 2
MUX_SINGLE_3 = 7 // Single Ended 3
MUX_MAX = 7
)
// PGAMode : Programmable Gain Amplifier config
const (
PGA_6_144 = 0 // Full Scale Range = +/- 6.144V
PGA_4_096 = 1 // Full Scale Range = +/- 4.096V
PGA_2_048 = 2 // Full Scale Range = +/- 2.048V
PGA_1_024 = 3 // Full Scale Range = +/- 1.024V
PGA_0_512 = 4 // Full Scale Range = +/- 0.512V
PGA_0_256 = 5 // Full Scale Range = +/- 0.128V
// PGA_0_256a = 6 // Full Scale Range = +/- 0.128V
// PGA_0_256b = 7 // Full Scale Range = +/- 0.128V
PGA_MAX = 5
)
// Mode : Conversion Mode
const (
MODE_CONTINUOUS = 0 // Continuous Conversion
MODE_SINGLE_SHOT = 1 // Single Shot Conversion
MODE_MAX = 1
)
// Datarate is the A/D sampling rate
const (
RATE_8 = 0 // 8 Samples per Second
RATE_16 = 1 // 16 Samples per Second
RATE_32 = 2 // 32 Samples per Second
RATE_64 = 3 // 64 Samples per Second
RATE_128 = 4 // 128 Samples per Second
RATE_150 = 5 // 150 Samples per Second
RATE_475 = 6 // 475 Samples per Second
RATE_860 = 7 // 860 Samples per Second
RATE_MAX = 7
)
// Comparator Mode
const (
COMP_MODE_TRADITIONAL = 0
COMP_MODE_WINDOW = 1
COMP_MODE_MAX = 1
)
// Comparator Polarity
const (
COMP_POL_ACTIVE_LOW = 0
COMP_POL_ACTIVE_HIGH = 1
COMP_POL_MAX = 1
)
// Comparator Latch
const (
COMP_LAT_OFF = 0
COMP_LAT_ON = 1
COMP_LAT_MAX = 1
)
// Comparator Queue
const (
COMP_QUE_ONE = 0
COMP_QUE_TWO = 1
COMP_QUE_FOUR = 2
COMP_QUE_DISABLE = 3
COMP_QUE_MAX = 3
)
// SensorInterface is an Abstract ADSx sensor interface
type SensorInterface interface {
// ReadConfig reads configuration from the chip
ReadConfig(i2c *i2c.I2C) (uint16, error)
// WriteConfig writes the stored configuration to the chip
WriteConfig(i2c *i2c.I2C) error
// SetMuxMode sets the stored configuration (does not write to chip)
SetMuxMode(uint16) error
// SetPGAMode sets the stored configuration (does not write to chip)
SetPgaMode(uint16) error
// SetConversionMode sets the stored configuration (does not write to chip)
SetConversionMode(uint16) error
// SetDataRate sets the stored configuration (does not write to chip)
SetDataRate(uint16) error
// SetComparatorMode sets the stored configuration (does not write to chip)
SetComparatorMode(uint16) error
// SetComparatorPolarity sets the stored configuration (does not write to chip)
SetComparatorPolarity(uint16) error
// SetComparatorLatch sets the stored configuration (does not write to chip)
SetComparatorLatch(uint16) error
// SetComparatorQueue sets the stored configuration (does not write to chip)
SetComparatorQueue(uint16) error
// ReadStatus reads the status register from the chip
// returns nonzero if a conversion is in progress
ReadStatus(i2c *i2c.I2C) (uint16, error)
// StartConversion will start a conversion in single-shot mode
StartConversion(i2c *i2c.I2C) error
// ReadLoThreshold reads the low comparator threshold from the chip
ReadLoThreshold(i2c *i2c.I2C) (int16, error)
// ReadHiThreshold reads the high comparator threshold from the chip
ReadHiThreshold(i2c *i2c.I2C) (int16, error)
// ReadConversion reads the converted value from the chip
ReadConversion(i2c *i2c.I2C) (int16, error)
}
// ADS represents only one model of A/D so far
type ADS struct {
sensorType SensorType
i2c *i2c.I2C
ads SensorInterface
}
// NewADS creats a new device interface
func NewADS(sensorType SensorType, i2c *i2c.I2C) (*ADS, error) {
v := &ADS{sensorType: sensorType, i2c: i2c}
switch sensorType {
case ADS1115:
v.ads = &SensorADS1115{}
}
_, err := v.ads.ReadConfig(i2c)
if err != nil {
return nil, err
}
return v, nil
}
// ReadConfig from the chip
func (v *ADS) ReadConfig() (uint16, error) {
t, err := v.ads.ReadConfig(v.i2c)
return t, err
}
// WriteConfig to the chip from the stored config data
func (v *ADS) WriteConfig() error {
err := v.ads.WriteConfig(v.i2c)
return err
}
// SetMuxMode in stored config
func (v *ADS) SetMuxMode(imm uint16) error {
err := v.ads.SetMuxMode(imm)
return err
}
// SetPgaMode in stored config
func (v *ADS) SetPgaMode(pm uint16) error {
err := v.ads.SetPgaMode(pm)
return err
}
// SetConversionMode in stored config
func (v *ADS) SetConversionMode(md uint16) error {
err := v.ads.SetConversionMode(md)
return err
}
// SetDataRate in stored config
func (v *ADS) SetDataRate(dr uint16) error {
err := v.ads.SetDataRate(dr)
return err
}
// SetComparatorMode in stored config
func (v *ADS) SetComparatorMode(cm uint16) error {
err := v.ads.SetComparatorMode(cm)
return err
}
// SetComparatorPolarity in stored config
func (v *ADS) SetComparatorPolarity(cp uint16) error {
err := v.ads.SetComparatorPolarity(cp)
return err
}
// SetComparatorLatch in stored config
func (v *ADS) SetComparatorLatch(cl uint16) error {
err := v.ads.SetComparatorLatch(cl)
return err
}
// SetComparatorQueue in stored config
func (v *ADS) SetComparatorQueue(cq uint16) error {
err := v.ads.SetComparatorQueue(cq)
return err
}
// ReadStatus from the chip
func (v *ADS) ReadStatus() (uint16, error) {
t, err := v.ads.ReadStatus(v.i2c)
return t, err
}
// StartConversion if in single-shot mode
func (v *ADS) StartConversion() error {
err := v.ads.StartConversion(v.i2c)
return err
}
// ReadLoThreshold for comparator from the chip
func (v *ADS) ReadLoThreshold() (int16, error) {
t, err := v.ads.ReadLoThreshold(v.i2c)
return t, err
}
// ReadHiThreshold for comparator from the chip
func (v *ADS) ReadHiThreshold() (int16, error) {
t, err := v.ads.ReadHiThreshold(v.i2c)
return t, err
}
// ReadConversion value from the chip
func (v *ADS) ReadConversion() (int16, error) {
t, err := v.ads.ReadConversion(v.i2c)
return t, err
} | ads.go | 0.76454 | 0.436682 | ads.go | starcoder |
package types
import "fmt"
// CanAssign checks if right can be assigned to left
// and whether cast is needed.
func CanAssign(left, right T) (canAssign bool, needCast bool) {
if c, ok := right.(*Const); ok {
if _, ok := c.Type.(Number); ok {
ret := InRange(c.Value.(int64), left)
return ret, ret
}
right = c.Type
}
if _, ok := left.(*Type); ok {
return false, false
}
if _, ok := left.(*Pkg); ok {
return false, false
}
if f, ok := left.(*Func); ok {
if f.IsBond {
return false, false
}
}
if f, ok := right.(*Func); ok {
if f.IsBond {
return false, false
}
}
if IsNil(right) {
switch left := left.(type) {
case *Pointer:
return true, true
case *Slice:
return true, true
case *Func:
if left.IsBond {
return false, false
}
return true, true
case *Interface:
return true, true
}
return false, false
}
return SameType(left, right), false
}
// SameType checks if two types are of the same type
func SameType(t1, t2 T) bool {
if t1 == t2 {
return true
}
switch t1 := t1.(type) {
case null:
return false
case *Const:
return false
case Basic:
if t2, ok := t2.(Basic); ok {
return t2 == t1
}
return false
case *Pointer:
if t2, ok := t2.(*Pointer); ok {
return SameType(t1.T, t2.T)
}
return false
case *Slice:
if t2, ok := t2.(*Slice); ok {
return SameType(t1.T, t2.T)
}
return false
case *Array:
if t2, ok := t2.(*Array); ok {
return t1.N == t2.N && SameType(t1.T, t2.T)
}
return false
case *Func:
t2, ok := t2.(*Func)
if !ok {
return false
}
if len(t1.Args) != len(t2.Args) {
return false
}
if len(t1.Rets) != len(t2.Rets) {
return false
}
for i, t := range t1.Args {
if !SameType(t.T, t2.Args[i].T) {
return false
}
}
for i, t := range t1.Rets {
if !SameType(t.T, t2.Rets[i].T) {
return false
}
}
return true
case *Struct:
if t2, ok := t2.(*Struct); ok {
return t1 == t2
}
return false
case *Interface:
if t2, ok := t2.(*Interface); ok {
return t1 == t2
}
return false
default:
panic(fmt.Errorf("invalid type: %T", t1))
}
}
// BothPointer checks if the internal type are the same pointer types.
// If both are of the same pointer type, it returns true.
// If one is nil, but the other one is a pointer, it returns true.
// Otherwise it returns false.
func BothPointer(t1, t2 T) bool {
p1 := PointerOf(t1)
p2 := PointerOf(t2)
if IsNil(t1) && p2 != nil {
return true
} else if IsNil(t2) && p1 != nil {
return true
} else if p1 == nil || p2 == nil {
return false
}
return SameType(p1, p2)
}
// BothFuncPointer checks if the two types are comparable func pointers.
// If they are the same type, it returns true.
// If one is nil, but the other one is a func pointer, it returns true.
// Otherwise it returns false.
func BothFuncPointer(t1, t2 T) bool {
b1 := IsFuncPointer(t1)
b2 := IsFuncPointer(t2)
if IsNil(t1) && b2 {
return true
} else if IsNil(t2) && b1 {
return true
} else if !b1 || !b2 {
return false
}
return SameType(t1, t2)
}
// BothSlice checks if the internal are of the same slice types
// If one of them is nil, but the other one is not, it returns the element
// type of the slice type.
// If one of t1 and t2 is not a slice and it is not a nil, it returns nil.
// If t1 and t2 are of different slice types, it returns nil.
func BothSlice(t1, t2 T) bool {
p1 := SliceOf(t1)
p2 := SliceOf(t2)
if IsNil(t1) && p2 != nil {
return true
} else if IsNil(t2) && p1 != nil {
return true
} else if p1 == nil || p2 == nil {
return false
}
return SameType(p1, p2)
}
// CastConst checks if a const can be used to define a const with type T.
// and return the Const type
func CastConst(ct *Const, t T) T {
if SameType(t, ct.Type) {
return &Const{Value: ct.Value, Type: t}
}
_, ok := ct.Type.(Number)
if IsInteger(t) && ok {
ret, e := NewConstInt(ct.Value.(int64), t)
if e == nil {
return ret
}
}
return nil
} | pl/types/same.go | 0.674694 | 0.41739 | same.go | starcoder |
package log
import (
"sort"
"github.com/prometheus/prometheus/pkg/labels"
)
var (
emptyLabelsResult = NewLabelsResult(labels.Labels{}, labels.Labels{}.Hash())
)
// LabelsResult is a computed labels result that contains the labels set with associated string and hash.
// The is mainly used for caching and returning labels computations out of pipelines and stages.
type LabelsResult interface {
String() string
Labels() labels.Labels
Hash() uint64
}
// NewLabelsResult creates a new LabelsResult from a labels set and a hash.
func NewLabelsResult(lbs labels.Labels, hash uint64) LabelsResult {
return &labelsResult{lbs: lbs, s: lbs.String(), h: hash}
}
type labelsResult struct {
lbs labels.Labels
s string
h uint64
}
func (l labelsResult) String() string {
return l.s
}
func (l labelsResult) Labels() labels.Labels {
return l.lbs
}
func (l labelsResult) Hash() uint64 {
return l.h
}
type hasher struct {
buf []byte // buffer for computing hash without bytes slice allocation.
}
// newHasher allow to compute hashes for labels by reusing the same buffer.
func newHasher() *hasher {
return &hasher{
buf: make([]byte, 0, 1024),
}
}
// Hash hashes the labels
func (h *hasher) Hash(lbs labels.Labels) uint64 {
var hash uint64
hash, h.buf = lbs.HashWithoutLabels(h.buf, []string(nil)...)
return hash
}
// BaseLabelsBuilder is a label builder used by pipeline and stages.
// Only one base builder is used and it contains cache for each LabelsBuilders.
type BaseLabelsBuilder struct {
del []string
add []labels.Label
// nolint(structcheck) https://github.com/golangci/golangci-lint/issues/826
err string
groups []string
parserKeyHints []string // label key hints for metric queries that allows to limit parser extractions to only this list of labels.
without, noLabels bool
resultCache map[uint64]LabelsResult
*hasher
}
// LabelsBuilder is the same as labels.Builder but tailored for this package.
type LabelsBuilder struct {
base labels.Labels
currentResult LabelsResult
groupedResult LabelsResult
*BaseLabelsBuilder
}
// NewBaseLabelsBuilderWithGrouping creates a new base labels builder with grouping to compute results.
func NewBaseLabelsBuilderWithGrouping(groups []string, parserKeyHints []string, without, noLabels bool) *BaseLabelsBuilder {
return &BaseLabelsBuilder{
del: make([]string, 0, 5),
add: make([]labels.Label, 0, 16),
resultCache: make(map[uint64]LabelsResult),
hasher: newHasher(),
groups: groups,
parserKeyHints: parserKeyHints,
noLabels: noLabels,
without: without,
}
}
// NewLabelsBuilder creates a new base labels builder.
func NewBaseLabelsBuilder() *BaseLabelsBuilder {
return NewBaseLabelsBuilderWithGrouping(nil, nil, false, false)
}
// ForLabels creates a labels builder for a given labels set as base.
// The labels cache is shared across all created LabelsBuilders.
func (b *BaseLabelsBuilder) ForLabels(lbs labels.Labels, hash uint64) *LabelsBuilder {
if labelResult, ok := b.resultCache[hash]; ok {
res := &LabelsBuilder{
base: lbs,
currentResult: labelResult,
BaseLabelsBuilder: b,
}
return res
}
labelResult := NewLabelsResult(lbs, hash)
b.resultCache[hash] = labelResult
res := &LabelsBuilder{
base: lbs,
currentResult: labelResult,
BaseLabelsBuilder: b,
}
return res
}
// Reset clears all current state for the builder.
func (b *LabelsBuilder) Reset() {
b.del = b.del[:0]
b.add = b.add[:0]
b.err = ""
}
// ParserLabelHints returns a limited list of expected labels to extract for metric queries.
// Returns nil when it's impossible to hint labels extractions.
func (b *BaseLabelsBuilder) ParserLabelHints() []string {
return b.parserKeyHints
}
// SetErr sets the error label.
func (b *LabelsBuilder) SetErr(err string) *LabelsBuilder {
b.err = err
return b
}
// GetErr return the current error label value.
func (b *LabelsBuilder) GetErr() string {
return b.err
}
// HasErr tells if the error label has been set.
func (b *LabelsBuilder) HasErr() bool {
return b.err != ""
}
// BaseHas returns the base labels have the given key
func (b *LabelsBuilder) BaseHas(key string) bool {
return b.base.Has(key)
}
// Get returns the value of a labels key if it exists.
func (b *LabelsBuilder) Get(key string) (string, bool) {
for _, a := range b.add {
if a.Name == key {
return a.Value, true
}
}
for _, d := range b.del {
if d == key {
return "", false
}
}
for _, l := range b.base {
if l.Name == key {
return l.Value, true
}
}
return "", false
}
// Del deletes the label of the given name.
func (b *LabelsBuilder) Del(ns ...string) *LabelsBuilder {
for _, n := range ns {
for i, a := range b.add {
if a.Name == n {
b.add = append(b.add[:i], b.add[i+1:]...)
}
}
b.del = append(b.del, n)
}
return b
}
// Set the name/value pair as a label.
func (b *LabelsBuilder) Set(n, v string) *LabelsBuilder {
for i, a := range b.add {
if a.Name == n {
b.add[i].Value = v
return b
}
}
b.add = append(b.add, labels.Label{Name: n, Value: v})
return b
}
// Labels returns the labels from the builder. If no modifications
// were made, the original labels are returned.
func (b *LabelsBuilder) Labels() labels.Labels {
if len(b.del) == 0 && len(b.add) == 0 {
if b.err == "" {
return b.base
}
res := append(b.base.Copy(), labels.Label{Name: ErrorLabel, Value: b.err})
sort.Sort(res)
return res
}
// In the general case, labels are removed, modified or moved
// rather than added.
res := make(labels.Labels, 0, len(b.base))
Outer:
for _, l := range b.base {
for _, n := range b.del {
if l.Name == n {
continue Outer
}
}
for _, la := range b.add {
if l.Name == la.Name {
continue Outer
}
}
res = append(res, l)
}
res = append(res, b.add...)
if b.err != "" {
res = append(res, labels.Label{Name: ErrorLabel, Value: b.err})
}
sort.Sort(res)
return res
}
// LabelsResult returns the LabelsResult from the builder.
// No grouping is applied and the cache is used when possible.
func (b *LabelsBuilder) LabelsResult() LabelsResult {
// unchanged path.
if len(b.del) == 0 && len(b.add) == 0 && b.err == "" {
return b.currentResult
}
return b.toResult(b.Labels())
}
func (b *BaseLabelsBuilder) toResult(lbs labels.Labels) LabelsResult {
hash := b.hasher.Hash(lbs)
if cached, ok := b.resultCache[hash]; ok {
return cached
}
res := NewLabelsResult(lbs, hash)
b.resultCache[hash] = res
return res
}
// GroupedLabels returns the LabelsResult from the builder.
// Groups are applied and the cache is used when possible.
func (b *LabelsBuilder) GroupedLabels() LabelsResult {
if b.err != "" {
// We need to return now before applying grouping otherwise the error might get lost.
return b.LabelsResult()
}
if b.noLabels {
return emptyLabelsResult
}
// unchanged path.
if len(b.del) == 0 && len(b.add) == 0 {
if len(b.groups) == 0 {
return b.currentResult
}
return b.toBaseGroup()
}
// no grouping
if len(b.groups) == 0 {
return b.LabelsResult()
}
if b.without {
return b.withoutResult()
}
return b.withResult()
}
func (b *LabelsBuilder) withResult() LabelsResult {
res := make(labels.Labels, 0, len(b.groups))
Outer:
for _, g := range b.groups {
for _, n := range b.del {
if g == n {
continue Outer
}
}
for _, la := range b.add {
if g == la.Name {
res = append(res, la)
continue Outer
}
}
for _, l := range b.base {
if g == l.Name {
res = append(res, l)
continue Outer
}
}
}
return b.toResult(res)
}
func (b *LabelsBuilder) withoutResult() LabelsResult {
size := len(b.base) + len(b.add) - len(b.del) - len(b.groups)
if size < 0 {
size = 0
}
res := make(labels.Labels, 0, size)
Outer:
for _, l := range b.base {
for _, n := range b.del {
if l.Name == n {
continue Outer
}
}
for _, la := range b.add {
if l.Name == la.Name {
continue Outer
}
}
for _, lg := range b.groups {
if l.Name == lg {
continue Outer
}
}
res = append(res, l)
}
OuterAdd:
for _, la := range b.add {
for _, lg := range b.groups {
if la.Name == lg {
continue OuterAdd
}
}
res = append(res, la)
}
sort.Sort(res)
return b.toResult(res)
}
func (b *LabelsBuilder) toBaseGroup() LabelsResult {
if b.groupedResult != nil {
return b.groupedResult
}
var lbs labels.Labels
if b.without {
lbs = b.base.WithoutLabels(b.groups...)
} else {
lbs = b.base.WithLabels(b.groups...)
}
res := NewLabelsResult(lbs, lbs.Hash())
b.groupedResult = res
return res
} | pkg/logql/log/labels.go | 0.794624 | 0.407392 | labels.go | starcoder |
package main
import (
"bytes"
"fmt"
"io"
"strings"
"text/template"
)
// This file contains the templates for the code generation
// These are the template for each table
// CreateQbModel creates the models template
func CreateQbModel(m Model, wr io.Writer) {
temp := template.Must(template.New(`qb-model`).
Funcs(template.FuncMap{
`title`: title,
`quote`: quote,
`join`: strings.Join,
`notnil`: notNil,
`qbtype`: qbType,
`migrate`: func() string {
buf := new(bytes.Buffer)
CreateMigration(m, buf)
return buf.String()
},
}).
Parse(queryTempl + tableTempl))
catch(temp.Execute(wr, m), `Unable to execute the Model template`)
}
// CreateMigration creates the migration template
func CreateMigration(m Model, wr io.Writer) {
temp := template.Must(template.New(`migration`).
Funcs(template.FuncMap{
`notnil`: notNil,
}).
Parse(queryTempl + migrationTempl))
catch(temp.Execute(wr, m), `Unable to execute migration template`)
}
func title(s interface{}) (t string) {
for _, part := range strings.Split(fmt.Sprint(s), `_`) {
switch part {
case `id`, `Id`, `sql`, `Sql`, `url`, `Url`:
t += strings.ToUpper(part)
default:
t += strings.Title(part)
}
}
return
}
func quote(i ...interface{}) (s string) {
s += "`"
if len(i) == 0 {
return
}
for e, it := range i {
s += fmt.Sprint(it)
if e != len(i)-1 {
s += ` `
}
}
s += "`"
return
}
func notNil(x interface{}) bool {
return x != nil
}
func qbType(x DataType) string {
switch x.Type() {
case `varchar`, `text`:
return `qb.String`
case `int`, `tinyint`, `smallint`, `bigint`:
return `qb.Int`
case `double`, `float`:
return `qb.Float`
case `date`, `datetime`:
return `qb.Date`
case `boolean`:
return `qb.Bool`
default:
return `qb.Int`
}
}
var queryTempl = `{{define "tablequery"}}CREATE TABLE IF NOT EXISTS {{print (index . 0).Table}} ({{range $n, $col := .}}{{if $n}},{{end}}
{{$col.Name -}}
{{- if notnil $col.DataType}} {{$col.DataType.Type}}{{end -}}
{{- if gt $col.Size 0}}({{$col.Size}}){{end -}}
{{- if and (not $col.Nullable) (eq $col.Default "")}} NOT NULL{{end -}}
{{- if $col.Primary}} PRIMARY KEY{{end -}}
{{- if $col.Unique}} UNIQUE{{end -}}
{{- if not (eq $col.Default "")}} DEFAULT '{{$col.Default}}'{{end -}}
{{- range $g, $c := $col.Constraints}}{{if $g}}, {{end}} ADD CONSTRAINT {{$c}}{{end -}}
{{- end}}
);{{- end}}
{{define "enumquery"}}CREATE TYPE {{print .Table}} AS ENUM ({{range $n, $val := .Values}}{{if $n}},{{end}} {{$val}}{{end}} );{{end}}`
var migrationTempl = `{{range $.Tables}}
{{- $col := .Columns $.Types -}}
{{- $enu := .Enum $.Types -}}
-- {{print .}}
{{if gt (len $col) 0 }}{{template "tablequery" $col}}
{{else if gt (len $enu.Values) 0}}{{template "enumquery" $enu}}
{{- end}}
{{end}}`
var tableTempl = `
{{range $e, $t := $.Tables -}}
{{- $cols := $t.Columns $.Types -}}
{{- $enu := $t.Enum $.Types -}}
// {{print $t}}
{{- if gt (len $cols) 0}}
var (
qb{{title $t}}Table = qb.Table{Name: {{quote $t}}}
{{range $cols}}
qb{{title $t}}F{{title .Name}} = qb.TableField{Parent: &qb{{title $t}}Table, Name: {{quote .Name}}
{{- if notnil .DataType}}, Type: {{qbtype .DataType}}{{end -}}
{{- if gt .Size 0}}, Size: {{.Size}}{{end -}}
{{- if .Nullable}}, Nullable: true{{end -}}
}{{end}}
)
// {{title $t}}Type represents the table "{{print $t}}"
type {{title $t}}Type struct {
{{- range $cols}}
{{title .Name}} qb.Field{{end}}
table *qb.Table
}
// SQL is the qb.Query implementation for migration the {{title $t}} table
func (*{{title $t}}Type) SQL(_ qb.SQLBuilder) (q string, _ []interface{}) {
q = {{quote}}{{template "tablequery" $cols}}{{- quote}}
return
}
// GetTable returns an object with info about the table
func (t *{{title $t}}Type) GetTable() *qb.Table {
return t.table
}
// Select starts a SELECT query
func (t *{{title $t}}Type) Select(f ...qb.Field) *qb.SelectBuilder {
return t.table.Select(f)
}
// Delete creates a DELETE query
func (t *{{title $t}}Type) Delete(c1 qb.Condition, c ...qb.Condition) qb.Query {
return t.table.Delete(c1, c...)
}
// Update starts a UPDATE query
func (t *{{title $t}}Type) Update() *qb.UpdateBuilder {
return t.table.Update()
}
// Insert starts a INSERT query
func (t *{{title $t}}Type) Insert(f ...qb.Field) *qb.InsertBuilder {
return t.table.Insert(f)
}
// {{title $t}} returns a new {{title $t}}Type
func {{title $t}}() *{{title $t}}Type {
table := qb{{title $t}}Table
return &{{title $t}}Type{
{{- range $cols}}
qb{{title $t}}F{{title .Name}}.Copy(&table),
{{- end}}
&table,
}
}
{{- else if notnil $enu.Table}}
// {{title $t}}Type represents the enum "{{print $t}}
type {{title $t}}Type []string
// SQL is the qb.Query implementation for migrating the {{title $t}} enum
func (*{{title $t}}Type) SQL(_ qb.SQLBuilder) (q string, _[]interface{}) {
q = {{quote}}{{template "enumquery" $enu}}{{quote}}
return
}
// {{title $t}} returns a new {{title $t}}Type
func {{title $t}}() *{{title $t}}Type {
enu := {{title $t}}Type([]string{ {{range $enu.Values}}
{{quote .}},{{end}}
})
return &enu
}
{{end}}
{{end}}
// Open creates a connection with the database and
// inserst the migration if not exists and
// returns the qbdb database
func Open(driver, connectionString string) (*qbdb.DB, error) {
db, err := sql.Open(driver, connectionString)
if err != nil {
return nil, err
}
q := {{ quote migrate }}
if _, err := db.Exec(q); err != nil {
return nil, fmt.Errorf("Migration failed: %v", err)
}
return autoqb.New(db), nil
}` | template.go | 0.511229 | 0.535766 | template.go | starcoder |
package p384
import (
"fmt"
"math/big"
)
// affinePoint represents an affine point of the curve. The point at
// infinity is (0,0) leveraging that it is not an affine point.
type affinePoint struct{ x, y fp384 }
func (ap affinePoint) String() string {
return fmt.Sprintf("x: %v\ny: %v", ap.x, ap.y)
}
func newAffinePoint(X, Y *big.Int) *affinePoint {
var P affinePoint
P.x.SetBigInt(X)
P.y.SetBigInt(Y)
montEncode(&P.x, &P.x)
montEncode(&P.y, &P.y)
return &P
}
func (ap *affinePoint) neg() { fp384Neg(&ap.y, &ap.y) }
func (ap *affinePoint) toJacobian() *jacobianPoint {
var P jacobianPoint
if !ap.isZero() {
P.x = ap.x
P.y = ap.y
montEncode(&P.z, &fp384{1})
}
return &P
}
func (ap *affinePoint) toInt() (*big.Int, *big.Int) {
x, y := &fp384{}, &fp384{}
montDecode(x, &ap.x)
montDecode(y, &ap.y)
return x.BigInt(), y.BigInt()
}
func (ap *affinePoint) isZero() bool {
zero := fp384{}
return ap.x == zero && ap.y == zero
}
// OddMultiples calculates the points iP for i={1,3,5,7,..., 2^(n-1)-1}
// Ensure that 1 < n < 31, otherwise it returns an empty slice.
func (ap affinePoint) oddMultiples(n uint) []jacobianPoint {
var t []jacobianPoint
if n > 1 && n < 31 {
P := ap.toJacobian()
s := int32(1) << (n - 1)
t = make([]jacobianPoint, s)
t[0] = *P
_2P := *P
_2P.double()
for i := int32(1); i < s; i++ {
t[i].add(&t[i-1], &_2P)
}
}
return t
}
// jacobianPoint represents a point in Jacobian coordinates. The point at
// infinity is any point with z=0 including (0,0,0) (although this is not a
// projective point).
type jacobianPoint struct{ x, y, z fp384 }
func (P *jacobianPoint) neg() { fp384Neg(&P.y, &P.y) }
// condNeg if P is negated if b=1.
func (P *jacobianPoint) cneg(b int) {
var mY fp384
fp384Neg(&mY, &P.y)
fp384Cmov(&P.y, &mY, b)
}
// cmov sets P to Q if b=1
func (P *jacobianPoint) cmov(Q *jacobianPoint, b int) {
fp384Cmov(&P.x, &Q.x, b)
fp384Cmov(&P.y, &Q.y, b)
fp384Cmov(&P.z, &Q.z, b)
}
func (P *jacobianPoint) toAffine() *affinePoint {
var aP affinePoint
z, z2 := &fp384{}, &fp384{}
fp384Inv(z, &P.z)
fp384Sqr(z2, z)
fp384Mul(&aP.x, &P.x, z2)
fp384Mul(&aP.y, &P.y, z)
fp384Mul(&aP.y, &aP.y, z2)
return &aP
}
func (P *jacobianPoint) toInt() (*big.Int, *big.Int, *big.Int) {
x, y, z := &fp384{}, &fp384{}, &fp384{}
montDecode(x, &P.x)
montDecode(y, &P.y)
montDecode(z, &P.z)
return x.BigInt(), y.BigInt(), z.BigInt()
}
func (P *jacobianPoint) isZero() bool { return P.z == fp384{} }
// add calculates P=Q+R such that Q and R are different than the identity point,
// and Q!==R. This function cannot be used for doublings.
func (P *jacobianPoint) add(Q, R *jacobianPoint) {
if Q.isZero() {
*P = *R
return
} else if R.isZero() {
*P = *Q
return
}
// Cohen-Miyagi-Ono (1998)
// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-1998-cmo-2
X1, Y1, Z1 := &Q.x, &Q.y, &Q.z
X2, Y2, Z2 := &R.x, &R.y, &R.z
Z1Z1, Z2Z2, U1, U2 := &fp384{}, &fp384{}, &fp384{}, &fp384{}
H, HH, HHH, RR := &fp384{}, &fp384{}, &fp384{}, &fp384{}
V, t4, t5, t6, t7, t8 := &fp384{}, &fp384{}, &fp384{}, &fp384{}, &fp384{}, &fp384{}
t0, t1, t2, t3, S1, S2 := &fp384{}, &fp384{}, &fp384{}, &fp384{}, &fp384{}, &fp384{}
fp384Sqr(Z1Z1, Z1) // Z1Z1 = Z1 ^ 2
fp384Sqr(Z2Z2, Z2) // Z2Z2 = Z2 ^ 2
fp384Mul(U1, X1, Z2Z2) // U1 = X1 * Z2Z2
fp384Mul(U2, X2, Z1Z1) // U2 = X2 * Z1Z1
fp384Mul(t0, Z2, Z2Z2) // t0 = Z2 * Z2Z2
fp384Mul(S1, Y1, t0) // S1 = Y1 * t0
fp384Mul(t1, Z1, Z1Z1) // t1 = Z1 * Z1Z1
fp384Mul(S2, Y2, t1) // S2 = Y2 * t1
fp384Sub(H, U2, U1) // H = U2 - U1
fp384Sqr(HH, H) // HH = H ^ 2
fp384Mul(HHH, H, HH) // HHH = H * HH
fp384Sub(RR, S2, S1) // r = S2 - S1
fp384Mul(V, U1, HH) // V = U1 * HH
fp384Sqr(t2, RR) // t2 = r ^ 2
fp384Add(t3, V, V) // t3 = V + V
fp384Sub(t4, t2, HHH) // t4 = t2 - HHH
fp384Sub(&P.x, t4, t3) // X3 = t4 - t3
fp384Sub(t5, V, &P.x) // t5 = V - X3
fp384Mul(t6, S1, HHH) // t6 = S1 * HHH
fp384Mul(t7, RR, t5) // t7 = r * t5
fp384Sub(&P.y, t7, t6) // Y3 = t7 - t6
fp384Mul(t8, Z2, H) // t8 = Z2 * H
fp384Mul(&P.z, Z1, t8) // Z3 = Z1 * t8
}
// mixadd calculates P=Q+R such that P and Q different than the identity point,
// and Q not in {P,-P, O}.
func (P *jacobianPoint) mixadd(Q *jacobianPoint, R *affinePoint) {
if Q.isZero() {
*P = *R.toJacobian()
return
} else if R.isZero() {
*P = *Q
return
}
z1z1, u2 := &fp384{}, &fp384{}
fp384Sqr(z1z1, &Q.z)
fp384Mul(u2, &R.x, z1z1)
s2 := &fp384{}
fp384Mul(s2, &R.y, &Q.z)
fp384Mul(s2, s2, z1z1)
if Q.x == *u2 {
if Q.y != *s2 {
*P = jacobianPoint{}
return
}
*P = *Q
P.double()
return
}
h, r := &fp384{}, &fp384{}
fp384Sub(h, u2, &Q.x)
fp384Mul(&P.z, h, &Q.z)
fp384Sub(r, s2, &Q.y)
h2, h3 := &fp384{}, &fp384{}
fp384Sqr(h2, h)
fp384Mul(h3, h2, h)
h3y1 := &fp384{}
fp384Mul(h3y1, h3, &Q.y)
h2x1 := &fp384{}
fp384Mul(h2x1, h2, &Q.x)
fp384Sqr(&P.x, r)
fp384Sub(&P.x, &P.x, h3)
fp384Sub(&P.x, &P.x, h2x1)
fp384Sub(&P.x, &P.x, h2x1)
fp384Sub(&P.y, h2x1, &P.x)
fp384Mul(&P.y, &P.y, r)
fp384Sub(&P.y, &P.y, h3y1)
}
func (P *jacobianPoint) double() {
delta, gamma, alpha, alpha2 := &fp384{}, &fp384{}, &fp384{}, &fp384{}
fp384Sqr(delta, &P.z)
fp384Sqr(gamma, &P.y)
fp384Sub(alpha, &P.x, delta)
fp384Add(alpha2, &P.x, delta)
fp384Mul(alpha, alpha, alpha2)
*alpha2 = *alpha
fp384Add(alpha, alpha, alpha)
fp384Add(alpha, alpha, alpha2)
beta := &fp384{}
fp384Mul(beta, &P.x, gamma)
beta8 := &fp384{}
fp384Sqr(&P.x, alpha)
fp384Add(beta8, beta, beta)
fp384Add(beta8, beta8, beta8)
fp384Add(beta8, beta8, beta8)
fp384Sub(&P.x, &P.x, beta8)
fp384Add(&P.z, &P.y, &P.z)
fp384Sqr(&P.z, &P.z)
fp384Sub(&P.z, &P.z, gamma)
fp384Sub(&P.z, &P.z, delta)
fp384Add(beta, beta, beta)
fp384Add(beta, beta, beta)
fp384Sub(beta, beta, &P.x)
fp384Mul(&P.y, alpha, beta)
fp384Sqr(gamma, gamma)
fp384Add(gamma, gamma, gamma)
fp384Add(gamma, gamma, gamma)
fp384Add(gamma, gamma, gamma)
fp384Sub(&P.y, &P.y, gamma)
}
func (P jacobianPoint) String() string {
return fmt.Sprintf("x: %v\ny: %v\nz: %v", P.x, P.y, P.z)
} | ecc/p384/point.go | 0.713432 | 0.482246 | point.go | starcoder |
package giso
import (
"math"
"sort"
)
type Shape struct {
paths []*Path
}
func NewShape(paths []*Path) *Shape {
res := &Shape{paths: paths}
if res.paths == nil {
res.paths = make([]*Path, 0)
}
return res
}
// Push append a path at the end of the shape.
func (sh *Shape) Push(pat *Path) *Shape {
sh.paths = append(sh.paths, pat)
return sh
}
// Translate translates the shape.
func (sh *Shape) Translate(dx, dy, dz float64) *Shape {
res := &Shape{paths: make([]*Path, len(sh.paths))}
for i, el := range sh.paths {
res.paths[i] = el.Translate(dx, dy, dz)
}
return res
}
// Scale scales the shape about the given origin.
func (sh *Shape) Scale(origin *Point, dx, dy, dz float64) *Shape {
res := &Shape{paths: make([]*Path, len(sh.paths))}
for i, el := range sh.paths {
res.paths[i] = el.Scale(origin, dx, dy, dz)
}
return res
}
// RotateX rotates the shape about origin on the X axis.
func (sh *Shape) RotateX(origin *Point, angle float64) *Shape {
res := &Shape{paths: make([]*Path, len(sh.paths))}
for i, el := range sh.paths {
res.paths[i] = el.RotateX(origin, angle)
}
return res
}
// RotateY rotates the shape about origin on the X axis.
func (sh *Shape) RotateY(origin *Point, angle float64) *Shape {
res := &Shape{paths: make([]*Path, len(sh.paths))}
for i, el := range sh.paths {
res.paths[i] = el.RotateY(origin, angle)
}
return res
}
// RotateZ rotates the shape about origin on the X axis.
func (sh *Shape) RotateZ(origin *Point, angle float64) *Shape {
res := &Shape{paths: make([]*Path, len(sh.paths))}
for i, el := range sh.paths {
res.paths[i] = el.RotateZ(origin, angle)
}
return res
}
func (sh *Shape) orderedPaths() []*Path {
sort.Slice(sh.paths, func(i, j int) bool {
return sh.paths[i].Depth() > sh.paths[j].Depth()
})
return sh.paths
}
// Extrude creates a 3D object by raising a 2D path along the z-axis.
func Extrude(p *Path, height float64) *Shape {
top := p.Translate(0, 0, height)
shape := &Shape{
paths: make([]*Path, 0),
}
/* Push the top and bottom faces, top face must be oriented correctly */
shape.Push(p.Reverse())
shape.Push(top)
topPathLen := len(top.points)
patLen := len(p.points)
for i := 0; i < patLen; i++ {
shape.Push(&Path{
points: []*Point{
top.points[i],
p.points[i],
p.points[(i+1)%patLen],
top.points[(i+1)%topPathLen],
},
})
}
return shape
}
func Prism(dx, dy, dz float64) *Shape {
shape := &Shape{
paths: make([]*Path, 6),
}
// Squares parallel to the x-axis
face1 := &Path{
points: []*Point{
{0, 0, 0},
{dx, 0, 0},
{dx, 0, dz},
{0, 0, dz},
},
}
// Push this face and its opposite
shape.paths[0] = face1
shape.paths[1] = face1.Reverse().Translate(0, dy, 0)
// Square parallel to the y-axis
face2 := &Path{
points: []*Point{
{0, 0, 0},
{0, 0, dz},
{0, dy, dz},
{0, dy, 0},
},
}
shape.paths[2] = face2
shape.paths[3] = face2.Reverse().Translate(dx, 0, 0)
// Square parallel to the xy-plane
face3 := &Path{
points: []*Point{
{0, 0, 0},
{dx, 0, 0},
{dx, dy, 0},
{0, dy, 0},
},
}
/* This surface is oriented backwards, so we need to reverse the points */
shape.paths[4] = face3.Reverse()
shape.paths[5] = face3.Translate(0, 0, dz)
return shape
}
func Stairs(steps int) *Shape {
paths := make([]*Path, steps*2+2)
zigZag := &Path{}
points := make([]*Point, steps*2+2)
points[0] = &Point{0, 0, 0}
count := 1
for i := 0; i < steps; i++ {
stepCorner := &Point{0, float64(i) / float64(steps), float64(i+1) / float64(steps)}
paths[count-1] = &Path{
points: []*Point{
stepCorner,
stepCorner.Translate(0, 0, -1/float64(steps)),
stepCorner.Translate(1, 0, -1/float64(steps)),
stepCorner.Translate(1, 0, 0),
},
}
points[count] = stepCorner
count = count + 1
paths[count-1] = &Path{
points: []*Point{
stepCorner,
stepCorner.Translate(1, 0, 0),
stepCorner.Translate(1, 1/float64(steps), 0),
stepCorner.Translate(0, 1/float64(steps), 0),
},
}
points[count] = stepCorner.Translate(0, 1/float64(steps), 0)
count = count + 1
}
points[count] = &Point{0, 1, 0}
zigZag.points = points
paths[count-1] = zigZag
count = count + 1
tmp := zigZag.Reverse()
paths[count-1] = tmp.Translate(1, 0, 0)
return &Shape{paths}
}
func Pyramid(dx, dy, dz float64) *Shape {
// Path parallel to the x-axis
face1 := &Path{
points: []*Point{
{0, 0, 0},
{dx, 0, 0},
{dx / 2, dy / 2, dz},
},
}
// Path parallel to the y-axis
face2 := &Path{
points: []*Point{
{0, 0, 0},
{dx / 2, dy / 2, dz},
{0, dy, 0},
},
}
centerOfRot := &Point{0.5 * dx, 0.5 * dy, 0}
return &Shape{
paths: []*Path{
face1,
face1.RotateZ(centerOfRot, math.Pi),
face2,
face2.RotateZ(centerOfRot, math.Pi),
},
}
}
func Cylinder(radius, height float64, vertices int) *Shape {
circle := Circle(radius, vertices)
return Extrude(circle, height)
}
func Octahedron() *Shape {
center := &Point{0.5, 0.5, 0.5}
upperTriangle := &Path{
points: []*Point{
{0.0, 0.0, 0.5},
{0.5, 0.5, 1.0},
{0.0, 1.0, 0.5},
},
}
lowerTriangle := &Path{
points: []*Point{
{0.0, 0.0, 0.5},
{0.0, 1.0, 0.5},
{0.5, 0.5, 0.0},
},
}
paths := make([]*Path, 8)
count := 0
for i := 0; i < 4; i++ {
paths[count] = upperTriangle.RotateZ(center, float64(i)*math.Pi/2.0)
count = count + 1
paths[count] = lowerTriangle.RotateZ(center, float64(i)*math.Pi/2.0)
count = count + 1
}
res := &Shape{paths: paths}
return res.Scale(center, math.Sqrt(2)/2.0, math.Sqrt(2)/2.0, 1)
} | shape.go | 0.792785 | 0.499817 | shape.go | starcoder |
package schelpers
import (
"fmt"
scapiv1alpha2 "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha2"
)
// TestSuitesToScorecardOutput takes an array of test suites and
// generates a v1alpha2 ScorecardOutput object with the provided suites and log
func TestSuitesToScorecardOutput(suites []TestSuite, log string) scapiv1alpha2.ScorecardOutput {
test := scapiv1alpha2.NewScorecardOutput()
test.Log = log
for _, suite := range suites {
for _, testResult := range suite.TestResults {
test.Results = append(test.Results, TestResultToScorecardTestResult(testResult))
}
}
return *test
}
// TestResultToScorecardTestResult is a helper function for converting from the TestResult type
// to the ScorecardTestResult type
func TestResultToScorecardTestResult(tr TestResult) scapiv1alpha2.ScorecardTestResult {
sctr := scapiv1alpha2.ScorecardTestResult{}
sctr.State = tr.State
sctr.Name = tr.Test.GetName()
sctr.Description = tr.Test.GetDescription()
sctr.Log = tr.Log
sctr.Suggestions = tr.Suggestions
if sctr.Suggestions == nil {
sctr.Suggestions = []string{}
}
stringErrors := []string{}
for _, err := range tr.Errors {
stringErrors = append(stringErrors, err.Error())
}
sctr.Errors = stringErrors
sctr.Labels = tr.Test.GetLabels()
return sctr
}
// ResultsCumulative takes multiple TestResults and returns a single TestResult with MaximumPoints
// equal to the sum of the MaximumPoints of the input and EarnedPoints as the sum of EarnedPoints
// of the input
func ResultsCumulative(results []TestResult) (TestResult, error) {
var name string
var failFound bool
finalResult := TestResult{
State: scapiv1alpha2.PassState,
}
if len(results) > 0 {
name = results[0].Test.GetName()
// all results have the same test
finalResult.Test = results[0].Test
}
for _, result := range results {
if !result.Test.IsCumulative() {
return finalResult, fmt.Errorf("non-cumulative test passed to ResultsCumulative: name (%s)", result.Test.GetName())
}
if result.Test.GetName() != name {
return finalResult, fmt.Errorf("test name mismatch in ResultsCumulative: %s != %s", result.Test.GetName(), name)
}
finalResult.Suggestions = append(finalResult.Suggestions, result.Suggestions...)
finalResult.Errors = append(finalResult.Errors, result.Errors...)
if result.State != scapiv1alpha2.PassState {
failFound = true
}
}
if failFound {
finalResult.State = scapiv1alpha2.FailState
}
return finalResult, nil
}
// ResultsPassFail combines multiple test results and returns a
// single test result
func ResultsPassFail(results []TestResult) (TestResult, error) {
var name string
var failFound bool
finalResult := TestResult{
State: scapiv1alpha2.PassState,
}
if len(results) > 0 {
name = results[0].Test.GetName()
// all results have the same test
finalResult.Test = results[0].Test
}
for _, result := range results {
if result.Test.IsCumulative() {
return finalResult, fmt.Errorf("cumulative test passed to ResultsPassFail: name (%s)", result.Test.GetName())
}
if result.Test.GetName() != name {
return finalResult, fmt.Errorf("test name mismatch in ResultsPassFail: %s != %s", result.Test.GetName(), name)
}
finalResult.Suggestions = append(finalResult.Suggestions, result.Suggestions...)
finalResult.Errors = append(finalResult.Errors, result.Errors...)
finalResult.Log = result.Log
if result.State != scapiv1alpha2.PassState {
failFound = true
}
}
if failFound {
finalResult.State = scapiv1alpha2.FailState
}
return finalResult, nil
} | internal/scorecard/helpers/helpers.go | 0.626353 | 0.423279 | helpers.go | starcoder |
package channel
import (
"math"
)
/*
Binary Input Stationary Memoryless Channel
input is {0, 1}, and output is Log likelihood ratio (ln(W(y|0)/W(y|1)), y is channel output).
*/
type BinaryMemorylessChannel interface {
Channel([]int) []float64
/* Evaluate error probability via density evolution
length is code length
index start with 1
WARNING: This method is not thread safe.
*/
CalcErrorProbabilityOfCombinedChannels(length int) []struct {
Index int
Prob float64
}
}
func calcErrorProbabilityViaDensityEvolution(length int, index int, base map[float64]float64) float64 {
evolvedProbability := densityEvolutionDiscreteProbability(length, index, base)
sum := 0.0
for llr, prob := range evolvedProbability {
switch {
case llr == 0:
sum += prob / 2
case llr < 0:
sum += prob
}
}
return sum
}
const padding = 1000000
var memo = make(map[int]map[float64]float64)
/*
approximate log likelihood ratio for evaluate density evolution probability.
*/
func approximateLLR(llr float64, prob float64) (float64, bool) {
if prob < 0.0000001 {
return 0, false
}
v := math.Floor(llr*20) / 20
if math.IsNaN(v) {
return 0, false
}
return v, true
}
func densityEvolutionDiscreteProbability(length int, index int, base map[float64]float64) map[float64]float64 {
if length == 1 {
return base
}
if value, ok := memo[length*padding+index]; ok {
return value
}
if index%2 == 0 {
child := densityEvolutionDiscreteProbability(length/2, index/2, base)
ret := make(map[float64]float64)
for llr1, prob1 := range child {
for llr2, prob2 := range child {
llr, isTarget := approximateLLR(llr1+llr2, prob1*prob2)
if !isTarget {
continue
}
ret[llr] += prob1 * prob2
}
}
memo[length*padding+index] = ret
return ret
}
child := densityEvolutionDiscreteProbability(length/2, (index+1)/2, base)
ret := make(map[float64]float64)
for llr1, prob1 := range child {
for llr2, prob2 := range child {
llr := 2 * math.Atanh(math.Tanh(llr1/2)*math.Tanh(llr2/2))
llr, isTarget := approximateLLR(llr, prob1*prob2)
if !isTarget {
continue
}
ret[llr] += prob1 * prob2
}
}
memo[length*padding+index] = ret
return ret
} | pkg/channel/binary_memoryless_channel.go | 0.569134 | 0.439627 | binary_memoryless_channel.go | starcoder |
package interval
import (
"net/http"
"time"
"github.com/zalando/skipper/predicates"
"github.com/zalando/skipper/routing"
)
type spec int
const (
between spec = iota
before
after
)
const rfc3339nz = "2006-01-02T15:04:05" // RFC3339 without numeric timezone offset
type predicate struct {
typ spec
begin time.Time
end time.Time
getTime func() time.Time
}
// Creates Between predicate.
func NewBetween() routing.PredicateSpec { return between }
// Creates Before predicate.
func NewBefore() routing.PredicateSpec { return before }
// Creates After predicate.
func NewAfter() routing.PredicateSpec { return after }
func (s spec) Name() string {
switch s {
case between:
return predicates.BetweenName
case before:
return predicates.BeforeName
case after:
return predicates.AfterName
default:
panic("invalid interval predicate type")
}
}
func (s spec) Create(args []interface{}) (routing.Predicate, error) {
p := predicate{typ: s, getTime: time.Now}
var loc *time.Location
switch {
case
s == between && len(args) == 3 && parseLocation(args[2], &loc) && parseRFCnz(args[0], &p.begin, loc) && parseRFCnz(args[1], &p.end, loc) && p.begin.Before(p.end),
s == between && len(args) == 2 && parseRFC(args[0], &p.begin) && parseRFC(args[1], &p.end) && p.begin.Before(p.end),
s == between && len(args) == 2 && parseUnix(args[0], &p.begin) && parseUnix(args[1], &p.end) && p.begin.Before(p.end),
s == before && len(args) == 2 && parseLocation(args[1], &loc) && parseRFCnz(args[0], &p.end, loc),
s == before && len(args) == 1 && parseRFC(args[0], &p.end),
s == before && len(args) == 1 && parseUnix(args[0], &p.end),
s == after && len(args) == 2 && parseLocation(args[1], &loc) && parseRFCnz(args[0], &p.begin, loc),
s == after && len(args) == 1 && parseRFC(args[0], &p.begin),
s == after && len(args) == 1 && parseUnix(args[0], &p.begin):
return &p, nil
}
return nil, predicates.ErrInvalidPredicateParameters
}
func parseUnix(arg interface{}, t *time.Time) bool {
switch a := arg.(type) {
case float64:
*t = time.Unix(int64(a), 0)
return true
case int64:
*t = time.Unix(a, 0)
return true
}
return false
}
func parseRFC(arg interface{}, t *time.Time) bool {
if s, ok := arg.(string); ok {
tt, err := time.Parse(time.RFC3339, s)
if err == nil {
*t = tt
return true
}
}
return false
}
func parseRFCnz(arg interface{}, t *time.Time, loc *time.Location) bool {
if s, ok := arg.(string); ok {
tt, err := time.ParseInLocation(rfc3339nz, s, loc)
if err == nil {
*t = tt
return true
}
}
return false
}
func parseLocation(arg interface{}, loc **time.Location) bool {
if s, ok := arg.(string); ok {
location, err := time.LoadLocation(s)
if err == nil {
*loc = location
return true
}
}
return false
}
func (p *predicate) Match(r *http.Request) bool {
now := p.getTime()
switch p.typ {
case between:
return (p.begin.Before(now) || p.begin.Equal(now)) && p.end.After(now)
case before:
return p.end.After(now)
case after:
return p.begin.Before(now) || p.begin.Equal(now)
default:
return false
}
} | predicates/interval/interval.go | 0.6137 | 0.407333 | interval.go | starcoder |
package packets
// The lap data packet gives details of all the cars in the session.
// Frequency: Rate as specified in menus
// Size: 1190 bytes
// Version: 1
type LapData struct {
LastLapTimeInMS uint32 // Last lap time in milliseconds
CurrentLapTimeInMS uint32 // Current time around the lap in milliseconds
Sector1TimeInMS uint16 // Sector 1 time in milliseconds
Sector2TimeInMS uint16 // Sector 2 time in milliseconds
LapDistance float32 // Distance vehicle is around current lap in metres – could, be negative if line hasn’t been crossed yet
TotalDistance float32 // Total distance travelled in session in metres – could, be negative if line hasn’t been crossed yet
SafetyCarDelta float32 // Delta in seconds for safety car
CarPosition uint8 // Car race position
CurrentLapNum uint8 // Current lap number
PitStatus uint8 // 0 = none, 1 = pitting, 2 = in pit area
NumPitStops uint8 //Number of pit stops taken in this race
Sector uint8 // 0 = sector1, 1 = sector2, 2 = sector3
CurrentLapInvalid uint8 // Current lap invalid - 0 = valid, 1 = invalid
Penalties uint8 // Accumulated time penalties in seconds to be added
Warnings uint8 // Accumulated number of warnings issued
NumUnservedDriveThroughPens uint8 // Num drive through pens left to serve
NumUnservedStopGoPens uint8 // Num stop go pens left to serve
GridPosition uint8 // Grid position the vehicle started the race in
DriverStatus uint8 // Status of driver - 0 = in garage, 1 = flying lap, 2 = in lap, 3 = out lap, 4 = on track
ResultStatus uint8 // Result status - 0 = invalid, 1 = inactive, 2 = active, 3 = finished, 4 = disqualified, 5 = not classified, 6 = retired
PitLaneTimerActive uint8 // Pit lane timing, 0 = inactive, 1 = active
PitLaneTimeInLaneInMS uint16 // If active, the current time spent in the pit lane in ms
PitStopTimerInMS uint16 // Time of the actual pit stop in ms
PitStopShouldServePen uint8 // Whether the car should serve a penalty at this stop
}
type PacketLapData struct {
Header PacketHeader
LapData [22]LapData // Lap data for all cars on track
} | pkg/packets/lap.go | 0.514888 | 0.425247 | lap.go | starcoder |
package testing
// simple testing library from https://github.com/qiniu/x
import (
"reflect"
"strings"
"testing"
)
// ----------------------------------------------------------------------------
// Testing represents a testing object.
type Testing struct {
t *testing.T
}
// New creates a testing object.
func New(t *testing.T) *Testing {
return &Testing{t: t}
}
// New creates a test case.
func (p *Testing) New(name string) *TestCase {
return &TestCase{name: name, t: p.t}
}
// Call creates a test case, and then calls a function.
func (p *Testing) Call(fn interface{}, args ...interface{}) *TestCase {
return p.New("").Call(fn, args...)
}
// Case creates a test case and sets its output parameters.
func (p *Testing) Case(name string, result ...interface{}) *TestCase {
return p.New(name).Init(result...)
}
// ----------------------------------------------------------------------------
// TestCase represents a test case.
type TestCase struct {
t *testing.T
name string
msg []byte
rcov interface{}
cstk *stack
out []reflect.Value
idx int
}
func (p *TestCase) newMsg() []byte {
msg := make([]byte, 0, 16)
if p.name != "" {
msg = append(msg, p.name...)
msg = append(msg, ' ')
}
return msg
}
// Init sets output parameters.
func (p *TestCase) Init(result ...interface{}) *TestCase {
out := make([]reflect.Value, len(result))
for i, ret := range result {
out[i] = reflect.ValueOf(ret)
}
p.msg = p.newMsg()
p.rcov = nil
p.out = out
p.idx = 0
return p
}
// Call calls a function.
func (p *TestCase) Call(fn interface{}, args ...interface{}) (e *TestCase) {
e = p
e.msg = CallDetail(e.newMsg(), fn, args...)
defer func() {
if e.rcov = recover(); e.rcov != nil {
e.cstk = callers(3)
}
}()
e.rcov = nil
e.out = reflect.ValueOf(fn).Call(makeArgs(args))
e.idx = 0
return
}
func makeArgs(args []interface{}) []reflect.Value {
in := make([]reflect.Value, len(args))
for i, arg := range args {
in[i] = reflect.ValueOf(arg)
}
return in
}
// Next sets current output value to next output parameter.
func (p *TestCase) Next() *TestCase {
p.idx++
return p
}
// With sets current output value to check.
func (p *TestCase) With(i int) *TestCase {
p.idx = i
return p
}
// Panic checks if function call panics or not. Panic(v) means
// function call panics with `v`. If v == nil, it means we don't
// care any detail information about panic.
func (p *TestCase) Panic(panicMsg ...interface{}) *TestCase {
if panicMsg == nil {
p.assertNotPanic()
} else {
assertPanic(p.t, p.msg, p.rcov, panicMsg[0])
}
return p
}
func assertPanic(t *testing.T, msg []byte, rcov interface{}, panicMsg interface{}) {
if rcov == nil {
t.Fatalf("%s:\nPanic checks: no panic, expected: panic\n", string(msg))
}
if panicMsg != nil {
if !reflect.DeepEqual(rcov, panicMsg) {
t.Fatalf("%s:\nPanic checks: %v, expected: %v\n", string(msg), rcov, panicMsg)
}
}
}
func (p *TestCase) assertNotPanic() {
if p.rcov != nil {
p.t.Fatalf("panic: %v\n%+v\n", p.rcov, p.cstk)
}
}
// Equal checks current output value.
func (p *TestCase) Equal(v interface{}) *TestCase {
p.assertNotPanic()
p.assertEq(p.out[p.idx].Interface(), v)
return p
}
// PropEqual checks property of current output value.
func (p *TestCase) PropEqual(prop string, v interface{}) *TestCase {
p.assertNotPanic()
o := PropVal(p.out[p.idx], prop)
p.assertEq(o.Interface(), v)
return p
}
func (p *TestCase) assertEq(a, b interface{}) {
if !reflect.DeepEqual(a, b) {
p.t.Fatalf("%s:\nassertEq failed: %v, expected: %v\n", string(p.msg), a, b)
}
}
// PropVal returns property value of an object.
func PropVal(o reflect.Value, prop string) reflect.Value {
start:
switch o.Kind() {
case reflect.Struct:
if ret := o.FieldByName(prop); ret.IsValid() {
return ret
}
case reflect.Map:
return o.MapIndex(reflect.ValueOf(prop))
case reflect.Interface, reflect.Ptr:
o = o.Elem()
goto start
}
if m := o.MethodByName(strings.Title(prop)); m.IsValid() {
out := m.Call([]reflect.Value{})
if len(out) != 1 {
panic("invalid PropVal: " + prop)
}
return out[0]
}
panic(o.Type().String() + " object hasn't property: " + prop)
} | testing/ts.go | 0.708616 | 0.419945 | ts.go | starcoder |
package version100
const JsonSchema100 = `{
"meta:license": [
" Copyright (c) 2012-2019 Red Hat, Inc.",
" This program and the accompanying materials are made",
" available under the terms of the Eclipse Public License 2.0",
" which is available at https://www.eclipse.org/legal/epl-2.0/",
" SPDX-License-Identifier: EPL-2.0",
" Contributors:",
" Red Hat, Inc. - initial API and implementation"
],
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"title": "Devfile object",
"description": "This schema describes the structure of the devfile object",
"definitions": {
"attributes": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"selector": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"required": [
"apiVersion",
"metadata"
],
"additionalProperties": false,
"properties": {
"apiVersion": {
"const": "1.0.0",
"title": "Devfile API Version"
},
"metadata": {
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": 1,
"title": "Devfile Name",
"description": "The name of the devfile. Workspaces created from devfile, will inherit this name",
"examples": [
"petclinic-dev-environment"
]
},
"generateName": {
"type": "string",
"minLength": 1,
"title": "Devfile Generate Name",
"description": "Workspaces created from devfile, will use it as base and append random suffix. It's used when name is not defined.",
"examples": [
"petclinic-"
]
}
},
"additionalProperties": false,
"anyOf": [
{
"required": [
"name"
]
},
{
"required": [
"generateName"
]
}
]
},
"projects": {
"type": "array",
"title": "The Projects List",
"description": "Description of the projects, containing names and sources locations",
"items": {
"type": "object",
"required": [
"name",
"source"
],
"additionalProperties": false,
"properties": {
"name": {
"type": "string",
"title": "The Project Name",
"examples": [
"petclinic"
]
},
"source": {
"type": "object",
"title": "The Project Source object",
"description": "Describes the project's source - type and location",
"required": [
"type",
"location"
],
"properties": {
"type": {
"type": "string",
"description": "Project's source type.",
"examples": [
"git",
"github",
"zip"
]
},
"location": {
"type": "string",
"description": "Project's source location address. Should be URL for git and github located projects",
"examples": [
"<EMAIL>:spring-projects/spring-petclinic.git"
]
},
"branch": {
"type": "string",
"description": "The name of the of the branch to check out after obtaining the source from the location. The branch has to already exist in the source otherwise the default branch is used. In case of git, this is also the name of the remote branch to push to.",
"examples": [
"master",
"feature-42"
]
},
"startPoint": {
"type": "string",
"description": "The tag or commit id to reset the checked out branch to.",
"examples": [
"release/4.2",
"349d3ad",
"v4.2.0"
]
},
"tag": {
"type": "string",
"description": "The name of the tag to reset the checked out branch to. Note that this is equivalent to 'startPoint' and provided for convenience.",
"examples": [
"v4.2.0"
]
},
"commitId": {
"type": "string",
"description": "The id of the commit to reset the checked out branch to. Note that this is equivalent to 'startPoint' and provided for convenience.",
"examples": [
"349d3ad"
]
},
"sparseCheckoutDir": {
"type": "string",
"description": "Part of project to populate in the working directory.",
"examples": [
"/core/",
"core/",
"core",
"/wsmaster/che-core-api-workspace/",
"/d*"
]
}
}
},
"clonePath": {
"type": "string",
"description": "The path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name."
}
}
}
},
"components": {
"type": "array",
"title": "The Components List",
"description": "Description of the workspace components, such as editor and plugins",
"items": {
"type": "object",
"required": [
"type"
],
"if": {
"properties": {
"type": {
"type": "string"
}
},
"required": [
"type"
]
},
"then": {
"allOf": [
{
"if": {
"properties": {
"type": {
"enum": [
"cheEditor",
"chePlugin"
]
}
}
},
"then": {
"oneOf": [
{
"required": [
"id"
],
"not": {
"required": [
"reference"
]
}
},
{
"required": [
"reference"
],
"not": {
"required": [
"id"
]
}
}
],
"properties": {
"type": {},
"alias": {},
"id": {
"type": "string",
"description": "Describes the component id. It has the following format: {plugin/editor PUBLISHER}/{plugin/editor NAME}/{plugin/editor VERSION}",
"pattern": "[a-z0-9_\\-.]+/[a-z0-9_\\-.]+/[a-z0-9_\\-.]+$",
"examples": [
"eclipse/maven-jdk8/1.0.0"
]
},
"reference": {
"description": "Describes raw location of plugin yaml file.",
"type": "string",
"examples": [
"https://pastebin.com/raw/kYprWiNB"
]
},
"registryUrl": {
"description": "Describes URL of custom plugin registry.",
"type": "string",
"pattern": "^(https?://)[a-zA-Z0-9_\\-./]+",
"examples": [
"https://che-plugin-registry.openshift.io/v3/"
]
},
"memoryLimit": {
"type": "string",
"description": "Describes memory limit for the component. You can express memory as a plain integer or as a fixed-point integer using one of these suffixes: E, P, T, G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki",
"examples": [
"128974848",
"129e6",
"129M",
"123Mi"
]
}
}
}
},
{
"if": {
"properties": {
"type": {
"enum": [
"cheEditor"
]
}
}
},
"then": {
"additionalProperties": false,
"properties": {
"type": {},
"alias": {},
"id": {},
"env": {},
"reference": {},
"registryUrl": {},
"memoryLimit": {}
}
}
},
{
"if": {
"properties": {
"type": {
"enum": [
"chePlugin"
]
}
}
},
"then": {
"additionalProperties": false,
"properties": {
"type": {},
"alias": {},
"id": {},
"env": {},
"memoryLimit": {},
"reference": {},
"registryUrl": {},
"preferences": {
"type": "object",
"description": "Additional plugin preferences",
"examples": [
"{\"java.home\": \"/home/user/jdk11\", \"java.jdt.ls.vmargs\": \"-Xmx1G\"}"
],
"additionalProperties": {
"type": [
"boolean",
"string",
"number"
]
}
}
}
}
},
{
"if": {
"properties": {
"type": {
"enum": [
"kubernetes",
"openshift"
]
}
}
},
"then": {
"anyOf": [
{
"required": [
"reference"
],
"additionalProperties": true
},
{
"required": [
"referenceContent"
],
"additionalProperties": true
}
],
"additionalProperties": false,
"properties": {
"type": {},
"alias": {},
"mountSources": {},
"env": {},
"reference": {
"description": "Describes absolute or devfile-relative location of Kubernetes list yaml file. Applicable only for 'kubernetes' and 'openshift' type components",
"type": "string",
"examples": [
"petclinic-app.yaml"
]
},
"referenceContent": {
"description": "Inlined content of a file specified in field 'reference'",
"type": "string",
"examples": [
"{\"kind\":\"List\",\"items\":[{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"name\":\"ws\"},\"spec\":{\"containers\":[{\"image\":\"eclipse/che-dev:nightly\"}]}}]}"
]
},
"selector": {
"$ref": "#/definitions/selector",
"description": "Describes the objects selector for the recipe type components. Allows to pick-up only selected items from k8s/openshift list",
"examples": [
"{\n \"app.kubernetes.io/name\" : \"mysql\", \n \"app.kubernetes.io/component\" : \"database\", \n \"app.kubernetes.io/part-of\" : \"petclinic\" \n}"
]
},
"entrypoints": {
"type": "array",
"items": {
"type": "object",
"properties": {
"parentName": {
"type": "string",
"description": "The name of the top level object in the referenced object list in which to search for containers. If not specified, the objects to search through can have any name."
},
"containerName": {
"type": "string",
"description": "The name of the container to apply the entrypoint to. If not specified, the entrypoint is modified on all matching containers."
},
"parentSelector": {
"$ref": "#/definitions/selector",
"description": "The selector on labels of the top level objects in the referenced list in which to search for containers. If not specified, the objects to search through can have any labels."
},
"command": {
"type": "array",
"items": {
"type": "string"
},
"default": null,
"description": "The command to run in the component instead of the default one provided in the image of the container. Defaults to null, meaning use whatever is defined in the image.",
"examples": [
"['/bin/sh', '-c']"
]
},
"args": {
"type": "array",
"items": {
"type": "string"
},
"default": null,
"description": "The arguments to supply to the command running the component. The arguments are supplied either to the default command provided in the image of the container or to the overridden command. Defaults to null, meaning use whatever is defined in the image.",
"examples": [
"['-R', '-f']"
]
}
}
}
}
}
}
},
{
"if": {
"properties": {
"type": {
"enum": [
"dockerimage"
]
}
}
},
"then": {
"required": [
"image",
"memoryLimit"
],
"additionalProperties": false,
"properties": {
"type": {},
"alias": {},
"mountSources": {},
"env": {},
"image": {
"type": "string",
"description": "Specifies the docker image that should be used for component",
"examples": [
"eclipse/maven-jdk8:1.0.0"
]
},
"memoryLimit": {
"type": "string",
"description": "Describes memory limit for the component. You can express memory as a plain integer or as a fixed-point integer using one of these suffixes: E, P, T, G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki",
"examples": [
"128974848",
"129e6",
"129M",
"123Mi"
]
},
"command": {
"type": "array",
"items": {
"type": "string"
},
"default": null,
"description": "The command to run in the dockerimage component instead of the default one provided in the image. Defaults to null, meaning use whatever is defined in the image.",
"examples": [
"['/bin/sh', '-c']"
]
},
"args": {
"type": "array",
"items": {
"type": "string"
},
"default": null,
"description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. Defaults to null, meaning use whatever is defined in the image.",
"examples": [
"['-R', '-f']"
]
},
"volumes": {
"type": "array",
"description": "Describes volumes which should be mount to component",
"items": {
"type": "object",
"description": "Describe volume that should be mount to component",
"required": [
"name",
"containerPath"
],
"properties": {
"name": {
"type": "string",
"title": "The Volume Name",
"description": "The volume name. If several components mount the same volume then they will reuse the volume and will be able to access to the same files",
"examples": [
"my-data"
]
},
"containerPath": {
"type": "string",
"title": "The path where volume should be mount to container",
"examples": [
"/home/user/data"
]
}
}
}
},
"endpoints": {
"type": "array",
"description": "Describes dockerimage component endpoints",
"items": {
"name": "object",
"description": "Describes dockerimage component endpoint",
"required": [
"name",
"port"
],
"properties": {
"name": {
"type": "string",
"title": "The Endpoint Name",
"description": "The Endpoint name"
},
"port": {
"type": "integer",
"title": "The Endpoint Port",
"description": "The container port that should be used as endpoint"
},
"attributes": {
"type": "object",
"public": {
"type": "boolean",
"description": "Identifies endpoint as workspace internally or externally accessible.",
"default": "true"
},
"secure": {
"type": "boolean",
"description": "Identifies server as secure or non-secure. Requests to secure servers will be authenticated and must contain machine token",
"default": "false"
},
"discoverable": {
"type": "boolean",
"description": "Identifies endpoint as accessible by its name.",
"default": "false"
},
"protocol": {
"type": "boolean",
"description": "Defines protocol that should be used for communication with endpoint. Is used for endpoint URL evaluation"
},
"additionalProperties": {
"type": "string"
},
"javaType": "java.util.Map<String, String>"
}
}
}
}
}
}
}
]
},
"properties": {
"alias": {
"description": "The name using which other places of this devfile (like commands) can refer to this component. This attribute is optional but must be unique in the devfile if specified.",
"type": "string",
"examples": [
"mvn-stack"
]
},
"type": {
"description": "Describes type of the component, e.g. whether it is an plugin or editor or other type",
"enum": [
"cheEditor",
"chePlugin",
"kubernetes",
"openshift",
"dockerimage"
],
"examples": [
"chePlugin",
"cheEditor",
"kubernetes",
"openshift",
"dockerimage"
]
},
"mountSources": {
"type": "boolean",
"description": "Describes whether projects sources should be mount to the component. CHE_PROJECTS_ROOT environment variable should contains a path where projects sources are mount",
"default": "false"
},
"env": {
"type": "array",
"description": "The environment variables list that should be set to docker container",
"items": {
"type": "object",
"description": "Describes environment variable",
"required": [
"name",
"value"
],
"properties": {
"name": {
"type": "string",
"title": "The Environment Variable Name",
"description": "The environment variable name"
},
"value": {
"type": "string",
"title": "The Environment Variable Value",
"description": "The environment variable value"
}
}
}
}
},
"additionalProperties": true
}
},
"commands": {
"type": "array",
"title": "The Commands List",
"description": "Description of the predefined commands to be available in workspace",
"items": {
"type": "object",
"additionalProperties": false,
"required": [
"name",
"actions"
],
"properties": {
"name": {
"description": "Describes the name of the command. Should be unique per commands set.",
"type": "string",
"examples": [
"build"
]
},
"attributes": {
"description": "Additional command attributes",
"$ref": "#/definitions/attributes"
},
"actions": {
"type": "array",
"description": "List of the actions of given command. Now the only one command must be specified in list but there are plans to implement supporting multiple actions commands.",
"title": "The Command Actions List",
"minItems": 1,
"maxItems": 1,
"items": {
"oneOf": [
{
"properties": {
"type": {},
"component": {},
"command": {},
"workdir": {}
},
"required": [
"type",
"component",
"command"
],
"additionalProperties": false
},
{
"properties": {
"type": {},
"reference": {},
"referenceContent": {}
},
"anyOf": [
{
"required": [
"type",
"reference"
],
"additionalProperties": true
},
{
"required": [
"type",
"referenceContent"
],
"additionalProperties": true
}
],
"additionalProperties": false
}
],
"type": "object",
"properties": {
"type": {
"description": "Describes action type",
"type": "string",
"examples": [
"exec"
]
},
"component": {
"type": "string",
"description": "Describes component to which given action relates",
"examples": [
"mvn-stack"
]
},
"command": {
"type": "string",
"description": "The actual action command-line string",
"examples": [
"mvn package"
]
},
"workdir": {
"type": "string",
"description": "Working directory where the command should be executed",
"examples": [
"/projects/spring-petclinic"
]
},
"reference": {
"type": "string",
"description": "the path relative to the location of the devfile to the configuration file defining one or more actions in the editor-specific format",
"examples": [
"../ide-config/launch.json"
]
},
"referenceContent": {
"type": "string",
"description": "The content of the referenced configuration file that defines one or more actions in the editor-specific format",
"examples": [
"{\"version\": \"2.0.0\",\n \"tasks\": [\n {\n \"type\": \"typescript\",\n \"tsconfig\": \"tsconfig.json\",\n \"problemMatcher\": [\n \"$tsc\"\n ],\n \"group\": {\n \"kind\": \"build\",\n \"isDefault\": true\n }\n }\n ]}"
]
}
}
}
},
"previewUrl": {
"type": "object",
"required": [
"port"
],
"properties": {
"port": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"path": {
"type": "string"
}
}
}
}
}
},
"attributes": {
"type": "object",
"editorFree": {
"type": "boolean",
"description": "Defines that no editor is needed and default one should not be provisioned. Defaults to false.",
"default": "false"
},
"persistVolumes": {
"type": "boolean",
"description": "Defines whether volumes should be stored or not. Defaults to true. In case of false workspace volumes will be created as emptyDir. The data in the emptyDir volume is deleted forever when a workspace Pod is removed for any reason(pod is crashed, workspace is restarted).",
"default": "true"
},
"additionalProperties": {
"type": "string"
},
"javaType": "java.util.Map<String, String>"
}
}
}` | pkg/devfile/parser/data/1.0.0/devfileJsonSchema100.go | 0.856197 | 0.444625 | devfileJsonSchema100.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTAllowEdgePointFilter2371 struct for BTAllowEdgePointFilter2371
type BTAllowEdgePointFilter2371 struct {
BTQueryFilter183
AllowsEdgePoint *bool `json:"allowsEdgePoint,omitempty"`
BtType *string `json:"btType,omitempty"`
}
// NewBTAllowEdgePointFilter2371 instantiates a new BTAllowEdgePointFilter2371 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTAllowEdgePointFilter2371() *BTAllowEdgePointFilter2371 {
this := BTAllowEdgePointFilter2371{}
return &this
}
// NewBTAllowEdgePointFilter2371WithDefaults instantiates a new BTAllowEdgePointFilter2371 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTAllowEdgePointFilter2371WithDefaults() *BTAllowEdgePointFilter2371 {
this := BTAllowEdgePointFilter2371{}
return &this
}
// GetAllowsEdgePoint returns the AllowsEdgePoint field value if set, zero value otherwise.
func (o *BTAllowEdgePointFilter2371) GetAllowsEdgePoint() bool {
if o == nil || o.AllowsEdgePoint == nil {
var ret bool
return ret
}
return *o.AllowsEdgePoint
}
// GetAllowsEdgePointOk returns a tuple with the AllowsEdgePoint field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTAllowEdgePointFilter2371) GetAllowsEdgePointOk() (*bool, bool) {
if o == nil || o.AllowsEdgePoint == nil {
return nil, false
}
return o.AllowsEdgePoint, true
}
// HasAllowsEdgePoint returns a boolean if a field has been set.
func (o *BTAllowEdgePointFilter2371) HasAllowsEdgePoint() bool {
if o != nil && o.AllowsEdgePoint != nil {
return true
}
return false
}
// SetAllowsEdgePoint gets a reference to the given bool and assigns it to the AllowsEdgePoint field.
func (o *BTAllowEdgePointFilter2371) SetAllowsEdgePoint(v bool) {
o.AllowsEdgePoint = &v
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTAllowEdgePointFilter2371) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTAllowEdgePointFilter2371) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTAllowEdgePointFilter2371) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTAllowEdgePointFilter2371) SetBtType(v string) {
o.BtType = &v
}
func (o BTAllowEdgePointFilter2371) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTQueryFilter183, errBTQueryFilter183 := json.Marshal(o.BTQueryFilter183)
if errBTQueryFilter183 != nil {
return []byte{}, errBTQueryFilter183
}
errBTQueryFilter183 = json.Unmarshal([]byte(serializedBTQueryFilter183), &toSerialize)
if errBTQueryFilter183 != nil {
return []byte{}, errBTQueryFilter183
}
if o.AllowsEdgePoint != nil {
toSerialize["allowsEdgePoint"] = o.AllowsEdgePoint
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
return json.Marshal(toSerialize)
}
type NullableBTAllowEdgePointFilter2371 struct {
value *BTAllowEdgePointFilter2371
isSet bool
}
func (v NullableBTAllowEdgePointFilter2371) Get() *BTAllowEdgePointFilter2371 {
return v.value
}
func (v *NullableBTAllowEdgePointFilter2371) Set(val *BTAllowEdgePointFilter2371) {
v.value = val
v.isSet = true
}
func (v NullableBTAllowEdgePointFilter2371) IsSet() bool {
return v.isSet
}
func (v *NullableBTAllowEdgePointFilter2371) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTAllowEdgePointFilter2371(val *BTAllowEdgePointFilter2371) *NullableBTAllowEdgePointFilter2371 {
return &NullableBTAllowEdgePointFilter2371{value: val, isSet: true}
}
func (v NullableBTAllowEdgePointFilter2371) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTAllowEdgePointFilter2371) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_allow_edge_point_filter_2371.go | 0.698946 | 0.479077 | model_bt_allow_edge_point_filter_2371.go | starcoder |
package utl
import "gosl/chk"
// Deep3alloc allocates a slice of slice of slice
func Deep3alloc(n1, n2, n3 int) (a [][][]float64) {
a = make([][][]float64, n1)
for i := 0; i < n1; i++ {
a[i] = make([][]float64, n2)
for j := 0; j < n2; j++ {
a[i][j] = make([]float64, n3)
}
}
return
}
// Deep4alloc allocates a slice of slice of slice of slice
func Deep4alloc(n1, n2, n3, n4 int) (a [][][][]float64) {
a = make([][][][]float64, n1)
for i := 0; i < n1; i++ {
a[i] = make([][][]float64, n2)
for j := 0; j < n2; j++ {
a[i][j] = make([][]float64, n3)
for k := 0; k < n3; k++ {
a[i][j][k] = make([]float64, n4)
}
}
}
return
}
// Deep3set sets deep slice of slice of slice with v values
func Deep3set(a [][][]float64, v float64) {
for i := 0; i < len(a); i++ {
for j := 0; j < len(a[i]); j++ {
for k := 0; k < len(a[i][j]); k++ {
a[i][j][k] = v
}
}
}
}
// Deep4set sets deep slice of slice of slice of slice with v values
func Deep4set(a [][][][]float64, v float64) {
for i := 0; i < len(a); i++ {
for j := 0; j < len(a[i]); j++ {
for k := 0; k < len(a[i][j]); k++ {
for l := 0; l < len(a[i][j][k]); l++ {
a[i][j][k][l] = v
}
}
}
}
}
// Deep2checkSize checks if dimensions of Deep2 slice are correct
func Deep2checkSize(n1, n2 int, a [][]float64) bool {
if len(a) != n1 {
return false
}
if n1 == 0 {
return true
}
if len(a[0]) != n2 {
return false
}
return true
}
// Deep3checkSize checks if dimensions of Deep3 slice are correct
func Deep3checkSize(n1, n2, n3 int, a [][][]float64) bool {
if len(a) != n1 {
return false
}
if n1 == 0 {
return true
}
if len(a[0]) != n2 {
return false
}
if n2 == 0 {
return true
}
if len(a[0][0]) != n3 {
return false
}
return true
}
// Deep2transpose returns the transpose of a deep2 slice
func Deep2transpose(a [][]float64) (aT [][]float64) {
if len(a) < 1 {
chk.Panic("input Deep2 slice must be greater than (1,1)\n")
}
m, n := len(a), len(a[0])
aT = Alloc(n, m)
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
aT[j][i] = a[i][j]
}
}
return
} | utl/deepslices.go | 0.658308 | 0.403596 | deepslices.go | starcoder |
package dwarfgen
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/src"
)
// A ScopeMarker tracks scope nesting and boundaries for later use
// during DWARF generation.
type ScopeMarker struct {
parents []ir.ScopeID
marks []ir.Mark
}
// checkPos validates the given position and returns the current scope.
func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID {
if !pos.IsKnown() {
base.Fatalf("unknown scope position")
}
if len(m.marks) == 0 {
return 0
}
last := &m.marks[len(m.marks)-1]
if xposBefore(pos, last.Pos) {
base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos))
}
return last.Scope
}
// Push records a transition to a new child scope of the current scope.
func (m *ScopeMarker) Push(pos src.XPos) {
current := m.checkPos(pos)
m.parents = append(m.parents, current)
child := ir.ScopeID(len(m.parents))
m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child})
}
// Pop records a transition back to the current scope's parent.
func (m *ScopeMarker) Pop(pos src.XPos) {
current := m.checkPos(pos)
parent := m.parents[current-1]
m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent})
}
// Unpush removes the current scope, which must be empty.
func (m *ScopeMarker) Unpush() {
i := len(m.marks) - 1
current := m.marks[i].Scope
if current != ir.ScopeID(len(m.parents)) {
base.FatalfAt(m.marks[i].Pos, "current scope is not empty")
}
m.parents = m.parents[:current-1]
m.marks = m.marks[:i]
}
// WriteTo writes the recorded scope marks to the given function,
// and resets the marker for reuse.
func (m *ScopeMarker) WriteTo(fn *ir.Func) {
m.compactMarks()
fn.Parents = make([]ir.ScopeID, len(m.parents))
copy(fn.Parents, m.parents)
m.parents = m.parents[:0]
fn.Marks = make([]ir.Mark, len(m.marks))
copy(fn.Marks, m.marks)
m.marks = m.marks[:0]
}
func (m *ScopeMarker) compactMarks() {
n := 0
for _, next := range m.marks {
if n > 0 && next.Pos == m.marks[n-1].Pos {
m.marks[n-1].Scope = next.Scope
continue
}
m.marks[n] = next
n++
}
m.marks = m.marks[:n]
} | src/cmd/compile/internal/dwarfgen/marker.go | 0.537041 | 0.425068 | marker.go | starcoder |
package strcmp
// Natural compares two strings naturally.
func Natural(left, right string) int {
leftLen := len(left)
rightLen := len(right)
minLen := leftLen
if minLen > rightLen {
minLen = rightLen
}
for idx := 0; idx < minLen; idx++ {
l := left[idx]
r := right[idx]
if l != r {
return innerCompare(l, r, left, right, idx+1)
}
}
// One string is a prefix of the other - longer is greater.
if leftLen < rightLen {
return -1
}
if leftLen > rightLen {
return 1
}
// Strings are equal.
return 0
}
func innerCompare(l, r byte, left, right string, idx int) int {
// Bytes l and r are assumed to be different.
li, lok := parseInt(l)
ri, rok := parseInt(r)
if !lok && !rok {
// Both are non-numbers. Compare as bytes.
if l < r {
return -1
}
return 1
}
// Any number character is "larger" than any non-number one.
if !lok {
return -1
}
if !rok {
return 1
}
// Both are numbers.
return innerCompareRemaining(left, right, int64(li), int64(ri), idx)
}
func innerCompareRemaining(left, right string, leftNum, rightNum int64, idx int) int {
leftLen := len(left)
rightLen := len(right)
for {
var li, ri int8
var lok, rok bool
if idx < leftLen {
li, lok = parseInt(left[idx])
}
if idx < rightLen {
ri, rok = parseInt(right[idx])
}
if !lok && !rok {
if leftNum < rightNum {
return -1
}
return 1
}
idx++
if !lok {
rightNum = 10*rightNum + int64(ri)
if rightNum >= leftNum {
return -1
}
// Read rest of right until it's larger than left.
for idx < rightLen {
n, ok := parseInt(right[idx])
if !ok {
break
}
rightNum = 10*rightNum + int64(n)
// Include '=' because right is longer than left (i.e. more leading zeros).
if rightNum >= leftNum {
return -1
}
idx++
}
// After reading all of right, rightNum remains strictly smaller than leftNum.
return 1
}
if !rok {
leftNum = 10*leftNum + int64(li)
if leftNum >= rightNum {
return 1
}
// Read rest of left until it's larger than right.
for idx < leftLen {
n, ok := parseInt(left[idx])
if !ok {
break
}
leftNum = 10*leftNum + int64(n)
// Include '=' because left is longer than right (i.e. more leading zeros).
if leftNum >= rightNum {
return 1
}
idx++
}
// After reading all of left, leftNum remains strictly smaller than rightNum.
return -1
}
leftNum = 10*leftNum + int64(li)
rightNum = 10*rightNum + int64(ri)
}
}
func parseInt(b byte) (int8, bool) {
i := int8(b) - '0'
return i, 0 <= i && i <= 9
} | natural.go | 0.806167 | 0.513851 | natural.go | starcoder |
package compile
import "github.com/raviqqe/lazy-ein/command/ast"
type freeVariableFinder struct {
variables map[string]struct{}
}
func newFreeVariableFinder(vs map[string]struct{}) freeVariableFinder {
return freeVariableFinder{vs}
}
func (f freeVariableFinder) Find(e ast.Expression) []string {
switch e := e.(type) {
case ast.Application:
ss := f.Find(e.Function())
for _, a := range e.Arguments() {
ss = append(ss, f.Find(a)...)
}
return ss
case ast.BinaryOperation:
return append(f.Find(e.LHS()), f.Find(e.RHS())...)
case ast.Case:
ss := f.Find(e.Argument())
for _, a := range e.Alternatives() {
ss = append(ss, f.addVariablesFromPattern(a.Pattern()).Find(a.Expression())...)
}
if d, ok := e.DefaultAlternative(); ok {
ss = append(ss, f.addVariables(d.Variable()).Find(d.Expression())...)
}
return ss
case ast.Lambda:
ss := make([]string, 0, len(e.Arguments()))
for _, s := range e.Arguments() {
ss = append(ss, s)
}
return f.addVariables(ss...).Find(e.Expression())
case ast.Let:
ss := make([]string, 0, len(e.Binds()))
for _, b := range e.Binds() {
ss = append(ss, b.Name())
}
f = f.addVariables(ss...)
sss := []string{}
for _, b := range e.Binds() {
sss = append(sss, f.Find(b.Expression())...)
}
return append(sss, f.Find(e.Expression())...)
case ast.List:
ss := []string{}
for _, a := range e.Arguments() {
ss = append(ss, f.Find(a.Expression())...)
}
return ss
case ast.Number:
break
case ast.Unboxed:
return nil
case ast.Variable:
if _, ok := f.variables[e.Name()]; ok {
return nil
}
return []string{e.Name()}
}
panic("unreachable")
}
func (f freeVariableFinder) addVariablesFromPattern(e ast.Expression) freeVariableFinder {
l, ok := e.(ast.List)
if !ok {
return f
}
ss := make([]string, 0, len(l.Arguments()))
for _, a := range l.Arguments() {
if v, ok := a.Expression().(ast.Variable); ok {
ss = append(ss, v.Name())
}
}
return f.addVariables(ss...)
}
func (f freeVariableFinder) addVariables(ss ...string) freeVariableFinder {
m := make(map[string]struct{}, len(f.variables)+len(ss))
for k := range f.variables {
m[k] = struct{}{}
}
for _, s := range ss {
m[s] = struct{}{}
}
return freeVariableFinder{m}
} | command/compile/free_variable_finder.go | 0.592077 | 0.478346 | free_variable_finder.go | starcoder |
package a
import (
"fmt"
)
// Represents a point in 3D space only in integer values
type IntVector3 struct {
X, Y, Z int
}
func (v IntVector3) SetXYZ(x, y, z int) {
v.X = x
v.Y = y
v.Z = z
}
func NewIntVector3(x, y, z int) IntVector3 {
return IntVector3{
X: x,
Y: y,
Z: z,
}
}
func (v IntVector3) ToMap() SiMap {
return map[string]interface{}{
"x": v.X,
"y": v.Y,
"z": v.Z,
}
}
func (v IntVector3) ToString() string {
return fmt.Sprintf("(%d, %d, %d)", v.X, v.Y, v.Z)
}
// Returns a new vector - the sum of two vectors
func (v IntVector3) Add(v2 IntVector3) IntVector3 {
return IntVector3{
X: v.X + v2.X,
Y: v.Y + v2.Y,
Z: v.Z + v2.Z,
}
}
// Returns a new vector - v-v2
func (v IntVector3) Sub(v2 IntVector3) IntVector3 {
return IntVector3{
X: v.X - v2.X,
Y: v.Y - v2.Y,
Z: v.Z - v2.Z,
}
}
// Returns new vector - multiplication of two vectors
func (v IntVector3) Multiply(v2 IntVector3) IntVector3 {
return IntVector3{
X: v.X * v2.X,
Y: v.Y * v2.Y,
Z: v.Z * v2.Z,
}
}
func (v IntVector3) ToFloat() Vector3 {
return NewVector3(float32(v.X), float32(v.Y), float32(v.Z))
}
func (v IntVector3) ToInt32() Int32Vector3 {
return Int32Vector3{
X: int32(v.X),
Y: int32(v.Y),
Z: int32(v.Z),
}
}
// Transforms vector ro normalized device coordinates vector
func (v IntVector3) Ndc(screen IntVector3) Vector3 {
xs := float32(screen.X)
ys := float32(screen.Y)
x0 := float32(screen.X) / 2
y0 := float32(screen.Y) / 2
x := float32(v.X)
y := float32(v.Y)
newX := (2*(x-x0))/xs
newY := (-2*(y-y0))/ys
return Vector3{newX, newY, 0}
}
// Checks if the vector is the same as other vector
func (v IntVector3) Equals(other interface{}) bool {
switch other.(type) {
case IntVector3:
v2 := other.(IntVector3)
return v.X == v2.X && v.Y == v2.Y && v.Z == v2.Z
default:
return false
}
}
func (v IntVector3) EncodeToByteArray() []byte {
arr := make([]byte, 12)
_ = CopyByteArray(IntToByteArray(int32(v.X)), arr, 0, 4)
_ = CopyByteArray(IntToByteArray(int32(v.Y)), arr, 4, 4)
_ = CopyByteArray(IntToByteArray(int32(v.Z)), arr, 8, 4)
return arr
}
func ZeroIntVector() IntVector3 {
return IntVector3{}
}
func OneIntVector() IntVector3 {
return IntVector3{
X: 1,
Y: 1,
Z: 1,
}
}
type Int32Vector3 struct {
X, Y, Z int32
} | common/a/intVector.go | 0.873026 | 0.77907 | intVector.go | starcoder |
package tsm1
// ReadFloatBlock reads the next block as a set of float values.
func (c *KeyCursor) ReadFloatBlock(tdec *TimeDecoder, vdec *FloatDecoder, buf *[]FloatValue) ([]FloatValue, error) {
// No matching blocks to decode
if len(c.current) == 0 {
return nil, nil
}
// First block is the oldest block containing the points we're searching for.
first := c.current[0]
*buf = (*buf)[:0]
values, err := first.r.ReadFloatBlockAt(&first.entry, tdec, vdec, buf)
// Remove values we already read
values = FloatValues(values).Exclude(first.readMin, first.readMax)
// Remove any tombstones
tombstones := first.r.TombstoneRange(c.key)
values = c.filterFloatValues(tombstones, values)
// Check we have remaining values.
if len(values) == 0 {
return nil, nil
}
// Only one block with this key and time range so return it
if len(c.current) == 1 {
if len(values) > 0 {
first.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())
}
return values, nil
}
// Use the current block time range as our overlapping window
minT, maxT := first.readMin, first.readMax
if len(values) > 0 {
minT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()
}
if c.ascending {
// Blocks are ordered by generation, we may have values in the past in later blocks, if so,
// expand the window to include the min time range to ensure values are returned in ascending
// order
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.MinTime < minT && !cur.read() {
minT = cur.entry.MinTime
}
}
// Find first block that overlaps our window
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {
// Shrink our window so it's the intersection of the first overlapping block and the
// first block. We do this to minimize the region that overlaps and needs to
// be merged.
if cur.entry.MaxTime > maxT {
maxT = cur.entry.MaxTime
}
values = FloatValues(values).Include(minT, maxT)
break
}
}
// Search the remaining blocks that overlap our window and append their values so we can
// merge them.
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
// Skip this block if it doesn't contain points we looking for or they have already been read
if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {
cur.markRead(minT, maxT)
continue
}
tombstones := cur.r.TombstoneRange(c.key)
var a []FloatValue
v, err := cur.r.ReadFloatBlockAt(&cur.entry, tdec, vdec, &a)
if err != nil {
return nil, err
}
// Remove any tombstoned values
v = c.filterFloatValues(tombstones, v)
// Remove values we already read
v = FloatValues(v).Exclude(cur.readMin, cur.readMax)
if len(v) > 0 {
// Only use values in the overlapping window
v = FloatValues(v).Include(minT, maxT)
// Merge the remaing values with the existing
values = FloatValues(values).Merge(v)
}
cur.markRead(minT, maxT)
}
} else {
// Blocks are ordered by generation, we may have values in the past in later blocks, if so,
// expand the window to include the max time range to ensure values are returned in descending
// order
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.MaxTime > maxT && !cur.read() {
maxT = cur.entry.MaxTime
}
}
// Find first block that overlaps our window
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {
// Shrink our window so it's the intersection of the first overlapping block and the
// first block. We do this to minimize the region that overlaps and needs to
// be merged.
if cur.entry.MinTime < minT {
minT = cur.entry.MinTime
}
values = FloatValues(values).Include(minT, maxT)
break
}
}
// Search the remaining blocks that overlap our window and append their values so we can
// merge them.
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
// Skip this block if it doesn't contain points we looking for or they have already been read
if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {
cur.markRead(minT, maxT)
continue
}
tombstones := cur.r.TombstoneRange(c.key)
var a []FloatValue
v, err := cur.r.ReadFloatBlockAt(&cur.entry, tdec, vdec, &a)
if err != nil {
return nil, err
}
// Remove any tombstoned values
v = c.filterFloatValues(tombstones, v)
// Remove values we already read
v = FloatValues(v).Exclude(cur.readMin, cur.readMax)
// If the block we decoded should have all of it's values included, mark it as read so we
// don't use it again.
if len(v) > 0 {
v = FloatValues(v).Include(minT, maxT)
// Merge the remaing values with the existing
values = FloatValues(v).Merge(values)
}
cur.markRead(minT, maxT)
}
}
first.markRead(minT, maxT)
return values, err
}
// ReadIntegerBlock reads the next block as a set of integer values.
func (c *KeyCursor) ReadIntegerBlock(tdec *TimeDecoder, vdec *IntegerDecoder, buf *[]IntegerValue) ([]IntegerValue, error) {
// No matching blocks to decode
if len(c.current) == 0 {
return nil, nil
}
// First block is the oldest block containing the points we're searching for.
first := c.current[0]
*buf = (*buf)[:0]
values, err := first.r.ReadIntegerBlockAt(&first.entry, tdec, vdec, buf)
// Remove values we already read
values = IntegerValues(values).Exclude(first.readMin, first.readMax)
// Remove any tombstones
tombstones := first.r.TombstoneRange(c.key)
values = c.filterIntegerValues(tombstones, values)
// Check we have remaining values.
if len(values) == 0 {
return nil, nil
}
// Only one block with this key and time range so return it
if len(c.current) == 1 {
if len(values) > 0 {
first.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())
}
return values, nil
}
// Use the current block time range as our overlapping window
minT, maxT := first.readMin, first.readMax
if len(values) > 0 {
minT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()
}
if c.ascending {
// Blocks are ordered by generation, we may have values in the past in later blocks, if so,
// expand the window to include the min time range to ensure values are returned in ascending
// order
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.MinTime < minT && !cur.read() {
minT = cur.entry.MinTime
}
}
// Find first block that overlaps our window
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {
// Shrink our window so it's the intersection of the first overlapping block and the
// first block. We do this to minimize the region that overlaps and needs to
// be merged.
if cur.entry.MaxTime > maxT {
maxT = cur.entry.MaxTime
}
values = IntegerValues(values).Include(minT, maxT)
break
}
}
// Search the remaining blocks that overlap our window and append their values so we can
// merge them.
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
// Skip this block if it doesn't contain points we looking for or they have already been read
if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {
cur.markRead(minT, maxT)
continue
}
tombstones := cur.r.TombstoneRange(c.key)
var a []IntegerValue
v, err := cur.r.ReadIntegerBlockAt(&cur.entry, tdec, vdec, &a)
if err != nil {
return nil, err
}
// Remove any tombstoned values
v = c.filterIntegerValues(tombstones, v)
// Remove values we already read
v = IntegerValues(v).Exclude(cur.readMin, cur.readMax)
if len(v) > 0 {
// Only use values in the overlapping window
v = IntegerValues(v).Include(minT, maxT)
// Merge the remaing values with the existing
values = IntegerValues(values).Merge(v)
}
cur.markRead(minT, maxT)
}
} else {
// Blocks are ordered by generation, we may have values in the past in later blocks, if so,
// expand the window to include the max time range to ensure values are returned in descending
// order
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.MaxTime > maxT && !cur.read() {
maxT = cur.entry.MaxTime
}
}
// Find first block that overlaps our window
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {
// Shrink our window so it's the intersection of the first overlapping block and the
// first block. We do this to minimize the region that overlaps and needs to
// be merged.
if cur.entry.MinTime < minT {
minT = cur.entry.MinTime
}
values = IntegerValues(values).Include(minT, maxT)
break
}
}
// Search the remaining blocks that overlap our window and append their values so we can
// merge them.
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
// Skip this block if it doesn't contain points we looking for or they have already been read
if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {
cur.markRead(minT, maxT)
continue
}
tombstones := cur.r.TombstoneRange(c.key)
var a []IntegerValue
v, err := cur.r.ReadIntegerBlockAt(&cur.entry, tdec, vdec, &a)
if err != nil {
return nil, err
}
// Remove any tombstoned values
v = c.filterIntegerValues(tombstones, v)
// Remove values we already read
v = IntegerValues(v).Exclude(cur.readMin, cur.readMax)
// If the block we decoded should have all of it's values included, mark it as read so we
// don't use it again.
if len(v) > 0 {
v = IntegerValues(v).Include(minT, maxT)
// Merge the remaing values with the existing
values = IntegerValues(v).Merge(values)
}
cur.markRead(minT, maxT)
}
}
first.markRead(minT, maxT)
return values, err
}
// ReadStringBlock reads the next block as a set of string values.
func (c *KeyCursor) ReadStringBlock(tdec *TimeDecoder, vdec *StringDecoder, buf *[]StringValue) ([]StringValue, error) {
// No matching blocks to decode
if len(c.current) == 0 {
return nil, nil
}
// First block is the oldest block containing the points we're searching for.
first := c.current[0]
*buf = (*buf)[:0]
values, err := first.r.ReadStringBlockAt(&first.entry, tdec, vdec, buf)
// Remove values we already read
values = StringValues(values).Exclude(first.readMin, first.readMax)
// Remove any tombstones
tombstones := first.r.TombstoneRange(c.key)
values = c.filterStringValues(tombstones, values)
// Check we have remaining values.
if len(values) == 0 {
return nil, nil
}
// Only one block with this key and time range so return it
if len(c.current) == 1 {
if len(values) > 0 {
first.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())
}
return values, nil
}
// Use the current block time range as our overlapping window
minT, maxT := first.readMin, first.readMax
if len(values) > 0 {
minT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()
}
if c.ascending {
// Blocks are ordered by generation, we may have values in the past in later blocks, if so,
// expand the window to include the min time range to ensure values are returned in ascending
// order
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.MinTime < minT && !cur.read() {
minT = cur.entry.MinTime
}
}
// Find first block that overlaps our window
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {
// Shrink our window so it's the intersection of the first overlapping block and the
// first block. We do this to minimize the region that overlaps and needs to
// be merged.
if cur.entry.MaxTime > maxT {
maxT = cur.entry.MaxTime
}
values = StringValues(values).Include(minT, maxT)
break
}
}
// Search the remaining blocks that overlap our window and append their values so we can
// merge them.
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
// Skip this block if it doesn't contain points we looking for or they have already been read
if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {
cur.markRead(minT, maxT)
continue
}
tombstones := cur.r.TombstoneRange(c.key)
var a []StringValue
v, err := cur.r.ReadStringBlockAt(&cur.entry, tdec, vdec, &a)
if err != nil {
return nil, err
}
// Remove any tombstoned values
v = c.filterStringValues(tombstones, v)
// Remove values we already read
v = StringValues(v).Exclude(cur.readMin, cur.readMax)
if len(v) > 0 {
// Only use values in the overlapping window
v = StringValues(v).Include(minT, maxT)
// Merge the remaing values with the existing
values = StringValues(values).Merge(v)
}
cur.markRead(minT, maxT)
}
} else {
// Blocks are ordered by generation, we may have values in the past in later blocks, if so,
// expand the window to include the max time range to ensure values are returned in descending
// order
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.MaxTime > maxT && !cur.read() {
maxT = cur.entry.MaxTime
}
}
// Find first block that overlaps our window
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {
// Shrink our window so it's the intersection of the first overlapping block and the
// first block. We do this to minimize the region that overlaps and needs to
// be merged.
if cur.entry.MinTime < minT {
minT = cur.entry.MinTime
}
values = StringValues(values).Include(minT, maxT)
break
}
}
// Search the remaining blocks that overlap our window and append their values so we can
// merge them.
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
// Skip this block if it doesn't contain points we looking for or they have already been read
if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {
cur.markRead(minT, maxT)
continue
}
tombstones := cur.r.TombstoneRange(c.key)
var a []StringValue
v, err := cur.r.ReadStringBlockAt(&cur.entry, tdec, vdec, &a)
if err != nil {
return nil, err
}
// Remove any tombstoned values
v = c.filterStringValues(tombstones, v)
// Remove values we already read
v = StringValues(v).Exclude(cur.readMin, cur.readMax)
// If the block we decoded should have all of it's values included, mark it as read so we
// don't use it again.
if len(v) > 0 {
v = StringValues(v).Include(minT, maxT)
// Merge the remaing values with the existing
values = StringValues(v).Merge(values)
}
cur.markRead(minT, maxT)
}
}
first.markRead(minT, maxT)
return values, err
}
// ReadBooleanBlock reads the next block as a set of boolean values.
func (c *KeyCursor) ReadBooleanBlock(tdec *TimeDecoder, vdec *BooleanDecoder, buf *[]BooleanValue) ([]BooleanValue, error) {
// No matching blocks to decode
if len(c.current) == 0 {
return nil, nil
}
// First block is the oldest block containing the points we're searching for.
first := c.current[0]
*buf = (*buf)[:0]
values, err := first.r.ReadBooleanBlockAt(&first.entry, tdec, vdec, buf)
// Remove values we already read
values = BooleanValues(values).Exclude(first.readMin, first.readMax)
// Remove any tombstones
tombstones := first.r.TombstoneRange(c.key)
values = c.filterBooleanValues(tombstones, values)
// Check we have remaining values.
if len(values) == 0 {
return nil, nil
}
// Only one block with this key and time range so return it
if len(c.current) == 1 {
if len(values) > 0 {
first.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())
}
return values, nil
}
// Use the current block time range as our overlapping window
minT, maxT := first.readMin, first.readMax
if len(values) > 0 {
minT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()
}
if c.ascending {
// Blocks are ordered by generation, we may have values in the past in later blocks, if so,
// expand the window to include the min time range to ensure values are returned in ascending
// order
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.MinTime < minT && !cur.read() {
minT = cur.entry.MinTime
}
}
// Find first block that overlaps our window
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {
// Shrink our window so it's the intersection of the first overlapping block and the
// first block. We do this to minimize the region that overlaps and needs to
// be merged.
if cur.entry.MaxTime > maxT {
maxT = cur.entry.MaxTime
}
values = BooleanValues(values).Include(minT, maxT)
break
}
}
// Search the remaining blocks that overlap our window and append their values so we can
// merge them.
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
// Skip this block if it doesn't contain points we looking for or they have already been read
if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {
cur.markRead(minT, maxT)
continue
}
tombstones := cur.r.TombstoneRange(c.key)
var a []BooleanValue
v, err := cur.r.ReadBooleanBlockAt(&cur.entry, tdec, vdec, &a)
if err != nil {
return nil, err
}
// Remove any tombstoned values
v = c.filterBooleanValues(tombstones, v)
// Remove values we already read
v = BooleanValues(v).Exclude(cur.readMin, cur.readMax)
if len(v) > 0 {
// Only use values in the overlapping window
v = BooleanValues(v).Include(minT, maxT)
// Merge the remaing values with the existing
values = BooleanValues(values).Merge(v)
}
cur.markRead(minT, maxT)
}
} else {
// Blocks are ordered by generation, we may have values in the past in later blocks, if so,
// expand the window to include the max time range to ensure values are returned in descending
// order
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.MaxTime > maxT && !cur.read() {
maxT = cur.entry.MaxTime
}
}
// Find first block that overlaps our window
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {
// Shrink our window so it's the intersection of the first overlapping block and the
// first block. We do this to minimize the region that overlaps and needs to
// be merged.
if cur.entry.MinTime < minT {
minT = cur.entry.MinTime
}
values = BooleanValues(values).Include(minT, maxT)
break
}
}
// Search the remaining blocks that overlap our window and append their values so we can
// merge them.
for i := 1; i < len(c.current); i++ {
cur := c.current[i]
// Skip this block if it doesn't contain points we looking for or they have already been read
if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {
cur.markRead(minT, maxT)
continue
}
tombstones := cur.r.TombstoneRange(c.key)
var a []BooleanValue
v, err := cur.r.ReadBooleanBlockAt(&cur.entry, tdec, vdec, &a)
if err != nil {
return nil, err
}
// Remove any tombstoned values
v = c.filterBooleanValues(tombstones, v)
// Remove values we already read
v = BooleanValues(v).Exclude(cur.readMin, cur.readMax)
// If the block we decoded should have all of it's values included, mark it as read so we
// don't use it again.
if len(v) > 0 {
v = BooleanValues(v).Include(minT, maxT)
// Merge the remaing values with the existing
values = BooleanValues(v).Merge(values)
}
cur.markRead(minT, maxT)
}
}
first.markRead(minT, maxT)
return values, err
} | tsdb/engine/tsm1/file_store.gen.go | 0.719876 | 0.645371 | file_store.gen.go | starcoder |
package gft
import (
"math"
"github.com/infastin/gul/gm32"
)
// This is a filter used for combining by using CombineColorhanFilters.
// Must be a pointer.
type ColorchanFilter interface {
// Returns changed color channel.
Fn(x float32) float32
// Returns true, if it is possible to create a lookup table usign Fn.
// Otherwise, returns false.
UseLut() bool
}
// This colorhan filter can merge other colorhan filters into itself.
type MergingColorchanFilter interface {
ColorchanFilter
// Prepares the filter before calling Fn multiple times.
Prepare()
// Returns true, if it is possible to merge one filter into an instance of interface.
// Otherwise, returns false.
CanMerge(filter ColorchanFilter) bool
// Returns true, if it is possible to demerge one filter from an instance of interface.
// Otherwise, returns false.
CanUndo(filter ColorchanFilter) bool
// Merges one filter into an instance of interface.
Merge(filter ColorchanFilter)
// Demerges one filter from an instance of interface.
Undo(filter ColorchanFilter)
// Returns true, if nothing will change after applying the filter.
// Otherwise, returns false.
Skip() bool
// Returns a copy of the filter.
Copy() ColorchanFilter
}
type colorchanFilterFunc struct {
fn func(x float32) float32
useLut bool
}
func (f *colorchanFilterFunc) UseLut() bool {
return f.useLut
}
func (f *colorchanFilterFunc) Fn(x float32) float32 {
return f.fn(x)
}
func ColorchanFilterFunc(fn func(x float32) float32, useLut bool) ColorchanFilter {
return &colorchanFilterFunc{
fn: fn,
useLut: useLut,
}
}
type invertFilter struct {
state byte
}
func (f *invertFilter) CanMerge(filter ColorchanFilter) bool {
if _, ok := filter.(*invertFilter); ok {
return true
}
return false
}
func (f *invertFilter) Merge(filter ColorchanFilter) {
filt := filter.(*invertFilter)
f.state ^= filt.state
}
func (f *invertFilter) Undo(filter ColorchanFilter) {
filt := filter.(*invertFilter)
f.state = f.state ^ filt.state
}
func (f *invertFilter) CanUndo(filter ColorchanFilter) bool {
if _, ok := filter.(*invertFilter); ok {
return true
}
return false
}
func (f *invertFilter) Skip() bool {
return f.state == 0
}
func (f *invertFilter) UseLut() bool {
return true
}
func (f *invertFilter) Copy() ColorchanFilter {
return &invertFilter{
state: f.state,
}
}
func (f *invertFilter) Prepare() {}
func (f *invertFilter) Fn(x float32) float32 {
return 1 - x
}
// Negates the colors of an image.
func Invert() MergingColorchanFilter {
return &invertFilter{
state: 1,
}
}
type gammaFilter struct {
gamma float32
}
func (f *gammaFilter) CanMerge(filter ColorchanFilter) bool {
if _, ok := filter.(*gammaFilter); ok {
return true
}
return false
}
func (f *gammaFilter) Merge(filter ColorchanFilter) {
filt := filter.(*gammaFilter)
f.gamma = gm32.Max(1.0e-5, f.gamma+filt.gamma)
}
func (f *gammaFilter) CanUndo(filter ColorchanFilter) bool {
if _, ok := filter.(*gammaFilter); ok {
return true
}
return false
}
func (f *gammaFilter) Undo(filter ColorchanFilter) {
filt := filter.(*gammaFilter)
f.gamma = gm32.Max(1.0e-5, f.gamma-filt.gamma)
}
func (f *gammaFilter) Skip() bool {
return f.gamma == 1
}
func (f *gammaFilter) Copy() ColorchanFilter {
return &gammaFilter{
gamma: f.gamma,
}
}
func (f *gammaFilter) Prepare() {
f.gamma = gm32.Max(1.0e-5, f.gamma)
}
func (f *gammaFilter) Fn(x float32) float32 {
e := 1 / f.gamma
return gm32.Pow(x, e)
}
func (f *gammaFilter) UseLut() bool {
return true
}
// Gamma creates a filter that performs a gamma correction on an image.
// The gamma parameter must be positive. Gamma = 1 gives the original image.
// Gamma less than 1 darkens the image and gamma greater than 1 lightens it.
func Gamma(gamma float32) MergingColorchanFilter {
if gamma == 1 {
return nil
}
return &gammaFilter{
gamma: gamma,
}
}
type contrastFilter struct {
contrast float32
}
func (f *contrastFilter) CanMerge(filter ColorchanFilter) bool {
if _, ok := filter.(*contrastFilter); ok {
return true
}
return false
}
func (f *contrastFilter) Merge(filter ColorchanFilter) {
filt := filter.(*contrastFilter)
f.contrast = gm32.Clamp(f.contrast+filt.contrast, -100, 100)
}
func (f *contrastFilter) CanUndo(filter ColorchanFilter) bool {
if _, ok := filter.(*contrastFilter); ok {
return true
}
return false
}
func (f *contrastFilter) Undo(filter ColorchanFilter) {
filt := filter.(*contrastFilter)
f.contrast = gm32.Clamp(f.contrast-filt.contrast, -100, 100)
}
func (f *contrastFilter) Skip() bool {
return f.contrast == 0
}
func (f *contrastFilter) Copy() ColorchanFilter {
return &contrastFilter{
contrast: f.contrast,
}
}
func (f *contrastFilter) Prepare() {
f.contrast = gm32.Clamp(f.contrast, -100, 100)
}
func (f *contrastFilter) Fn(x float32) float32 {
alpha := (f.contrast / 100) + 1
alpha = gm32.Tan(alpha * (math.Pi / 4))
c := (x-0.5)*alpha + 0.5
return gm32.Clamp(c, 0, 1)
}
func (f *contrastFilter) UseLut() bool {
return false
}
// Changes contrast of an image.
// The percentage parameter must be in the range [-100, 100].
// It can have any value for merging purposes.
// The percentage = -100 gives solid gray image. The percentage = 100 gives an overcontrasted image.
func Contrast(perc float32) MergingColorchanFilter {
if perc == 0 {
return nil
}
return &contrastFilter{
contrast: perc,
}
}
type brightnessFilter struct {
brightness float32
}
func (f *brightnessFilter) CanMerge(filter ColorchanFilter) bool {
if _, ok := filter.(*brightnessFilter); ok {
return true
}
return false
}
func (f *brightnessFilter) Merge(filter ColorchanFilter) {
filt := filter.(*brightnessFilter)
f.brightness = gm32.Clamp(f.brightness+filt.brightness, -100, 100)
}
func (f *brightnessFilter) CanUndo(filter ColorchanFilter) bool {
if _, ok := filter.(*brightnessFilter); ok {
return true
}
return false
}
func (f *brightnessFilter) Undo(filter ColorchanFilter) {
filt := filter.(*brightnessFilter)
f.brightness = gm32.Clamp(f.brightness-filt.brightness, -100, 100)
}
func (f *brightnessFilter) Skip() bool {
return f.brightness == 0
}
func (f *brightnessFilter) Copy() ColorchanFilter {
return &brightnessFilter{
brightness: f.brightness,
}
}
func (f *brightnessFilter) Prepare() {
f.brightness = gm32.Clamp(f.brightness, -100, 100)
}
func (f *brightnessFilter) Fn(x float32) float32 {
beta := f.brightness / 100
if beta < 0 {
x *= (1 + beta)
} else {
x += (1 - x) * beta
}
return gm32.Clamp(x, 0, 1)
}
func (f *brightnessFilter) UseLut() bool {
return false
}
// Changes brightness of an image.
// The percentage parameter must be in the range [-100, 100].
// It can have any value for merging purposes.
// The percentage = -100 gives solid black image. The percentage = 100 gives solid white image.
func Brightness(perc float32) MergingColorchanFilter {
if perc == 0 {
return nil
}
return &brightnessFilter{
brightness: perc,
}
} | gft/colorchan.go | 0.901271 | 0.435902 | colorchan.go | starcoder |
package cmscal
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"time"
ics "github.com/arran4/golang-ical"
)
var ScheduleSixth = GradeSchedule{
Name: "CMS Sixth Grade 2020-2021",
Description: "Block schedule for CMS Sixth Grade 2020-2021",
ScheduleMap: map[BlockDayType]DaySchedule{
BlueDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 32, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 24, Duration: time.Minute * 90, Description: "Period 2"},
{StartHour: 10, StartMinute: 59, Duration: time.Minute * 45, Description: "Period 3"},
{StartHour: 11, StartMinute: 49, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 12, StartMinute: 19, Duration: time.Minute * 45, Description: "Period 3"},
{StartHour: 13, StartMinute: 9, Duration: time.Minute * 90, Description: "Period 4"},
{StartHour: 14, StartMinute: 44, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 32, Duration: time.Minute * 8, Description: "Period 1"},
},
WhiteDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 32, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 24, Duration: time.Minute * 90, Description: "Period 5"},
{StartHour: 10, StartMinute: 59, Duration: time.Minute * 45, Description: "Period 6"},
{StartHour: 11, StartMinute: 49, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 12, StartMinute: 19, Duration: time.Minute * 45, Description: "Period 6"},
{StartHour: 13, StartMinute: 9, Duration: time.Minute * 90, Description: "Period 7"},
{StartHour: 14, StartMinute: 44, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 32, Duration: time.Minute * 8, Description: "Period 1"},
},
NonBlockDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 25, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 12, Duration: time.Minute * 45, Description: "Period 2"},
{StartHour: 10, StartMinute: 2, Duration: time.Minute * 45, Description: "Period 3"},
{StartHour: 10, StartMinute: 52, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 11, StartMinute: 27, Duration: time.Minute * 50, Description: "Period 4"},
{StartHour: 12, StartMinute: 17, Duration: time.Minute * 45, Description: "Period 5"},
{StartHour: 13, StartMinute: 7, Duration: time.Minute * 45, Description: "Period 6"},
{StartHour: 13, StartMinute: 57, Duration: time.Minute * 45, Description: "Period 7"},
{StartHour: 14, StartMinute: 47, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 35, Duration: time.Minute * 5, Description: "Period 1 (Return Chromebooks)"},
},
ShortWednesday: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 35, Description: "Period 2"},
{StartHour: 9, StartMinute: 27, Duration: time.Minute * 35, Description: "Period 3"},
{StartHour: 10, StartMinute: 7, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 10, StartMinute: 42, Duration: time.Minute * 35, Description: "Period 4"},
{StartHour: 11, StartMinute: 22, Duration: time.Minute * 35, Description: "Period 5"},
{StartHour: 12, StartMinute: 2, Duration: time.Minute * 35, Description: "Period 6"},
{StartHour: 12, StartMinute: 42, Duration: time.Minute * 35, Description: "Period 7"},
{StartHour: 13, StartMinute: 22, Duration: time.Minute * 35, Description: "Period 8"},
{StartHour: 14, StartMinute: 0, Duration: time.Minute * 5, Description: "Period 1 (Return Chromebooks)"},
},
},
}
var ScheduleSeventh = GradeSchedule{
Name: "CMS Seventh Grade 2020-2021",
Description: "Block schedule for CMS Seventh Grade 2020-2021",
ScheduleMap: map[BlockDayType]DaySchedule{
BlueDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 32, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 24, Duration: time.Minute * 90, Description: "Period 2"},
{StartHour: 10, StartMinute: 59, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 11, StartMinute: 34, Duration: time.Minute * 90, Description: "Period 3"},
{StartHour: 13, StartMinute: 9, Duration: time.Minute * 90, Description: "Period 4"},
{StartHour: 14, StartMinute: 44, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 32, Duration: time.Minute * 8, Description: "Period 1"},
},
WhiteDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 32, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 24, Duration: time.Minute * 90, Description: "Period 5"},
{StartHour: 10, StartMinute: 59, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 11, StartMinute: 34, Duration: time.Minute * 90, Description: "Period 6"},
{StartHour: 13, StartMinute: 9, Duration: time.Minute * 90, Description: "Period 7"},
{StartHour: 14, StartMinute: 44, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 32, Duration: time.Minute * 8, Description: "Period 1"},
},
NonBlockDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 25, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 12, Duration: time.Minute * 45, Description: "Period 2"},
{StartHour: 10, StartMinute: 2, Duration: time.Minute * 45, Description: "Period 3"},
{StartHour: 10, StartMinute: 52, Duration: time.Minute * 45, Description: "Period 4"},
{StartHour: 11, StartMinute: 42, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 12, StartMinute: 17, Duration: time.Minute * 45, Description: "Period 5"},
{StartHour: 13, StartMinute: 7, Duration: time.Minute * 45, Description: "Period 6"},
{StartHour: 13, StartMinute: 57, Duration: time.Minute * 45, Description: "Period 7"},
{StartHour: 14, StartMinute: 47, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 35, Duration: time.Minute * 5, Description: "Period 1 (Return Chromebooks)"},
},
ShortWednesday: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 35, Description: "Period 2"},
{StartHour: 9, StartMinute: 27, Duration: time.Minute * 35, Description: "Period 3"},
{StartHour: 10, StartMinute: 7, Duration: time.Minute * 35, Description: "Period 4"},
{StartHour: 10, StartMinute: 47, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 11, StartMinute: 22, Duration: time.Minute * 35, Description: "Period 5"},
{StartHour: 12, StartMinute: 2, Duration: time.Minute * 35, Description: "Period 6"},
{StartHour: 12, StartMinute: 42, Duration: time.Minute * 35, Description: "Period 7"},
{StartHour: 13, StartMinute: 22, Duration: time.Minute * 35, Description: "Period 8"},
{StartHour: 14, StartMinute: 0, Duration: time.Minute * 5, Description: "Period 1 (Return Chromebooks)"},
},
},
}
var ScheduleEighth = GradeSchedule{
Name: "CMS Eighth Grade 2020-2021",
Description: "Block schedule for CMS Eighth Grade 2020-2021",
ScheduleMap: map[BlockDayType]DaySchedule{
BlueDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 32, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 24, Duration: time.Minute * 90, Description: "Period 2"},
{StartHour: 10, StartMinute: 59, Duration: time.Minute * 90, Description: "Period 3"},
{StartHour: 12, StartMinute: 34, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 13, StartMinute: 9, Duration: time.Minute * 90, Description: "Period 4"},
{StartHour: 14, StartMinute: 44, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 32, Duration: time.Minute * 8, Description: "Period 1"},
},
WhiteDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 32, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 24, Duration: time.Minute * 90, Description: "Period 5"},
{StartHour: 10, StartMinute: 59, Duration: time.Minute * 90, Description: "Period 6"},
{StartHour: 12, StartMinute: 34, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 13, StartMinute: 9, Duration: time.Minute * 90, Description: "Period 7"},
{StartHour: 14, StartMinute: 44, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 32, Duration: time.Minute * 8, Description: "Period 1"},
},
NonBlockDay: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 25, Description: "Advisory/Period 1"},
{StartHour: 9, StartMinute: 12, Duration: time.Minute * 45, Description: "Period 2"},
{StartHour: 10, StartMinute: 2, Duration: time.Minute * 45, Description: "Period 3"},
{StartHour: 10, StartMinute: 52, Duration: time.Minute * 45, Description: "Period 4"},
{StartHour: 11, StartMinute: 42, Duration: time.Minute * 45, Description: "Period 5"},
{StartHour: 12, StartMinute: 32, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 13, StartMinute: 7, Duration: time.Minute * 45, Description: "Period 6"},
{StartHour: 13, StartMinute: 57, Duration: time.Minute * 45, Description: "Period 7"},
{StartHour: 14, StartMinute: 47, Duration: time.Minute * 45, Description: "Period 8"},
{StartHour: 15, StartMinute: 35, Duration: time.Minute * 5, Description: "Period 1 (Return Chromebooks)"},
},
ShortWednesday: {
{StartHour: 8, StartMinute: 47, Duration: time.Minute * 35, Description: "Period 2"},
{StartHour: 9, StartMinute: 27, Duration: time.Minute * 35, Description: "Period 3"},
{StartHour: 10, StartMinute: 7, Duration: time.Minute * 35, Description: "Period 4"},
{StartHour: 10, StartMinute: 47, Duration: time.Minute * 35, Description: "Period 5"},
{StartHour: 11, StartMinute: 27, Duration: time.Minute * 30, Description: "Lunch"},
{StartHour: 12, StartMinute: 2, Duration: time.Minute * 35, Description: "Period 6"},
{StartHour: 12, StartMinute: 42, Duration: time.Minute * 35, Description: "Period 7"},
{StartHour: 13, StartMinute: 22, Duration: time.Minute * 35, Description: "Period 8"},
{StartHour: 14, StartMinute: 0, Duration: time.Minute * 5, Description: "Period 1 (Return Chromebooks)"},
},
},
}
func makeHolidayMap(loc *time.Location) map[time.Time]bool {
m := make(map[time.Time]bool)
for _, h := range []time.Time{
time.Date(2020, time.September, 7, 0, 0, 0, 0, loc),
time.Date(2020, time.November, 2, 0, 0, 0, 0, loc),
time.Date(2020, time.November, 3, 0, 0, 0, 0, loc),
time.Date(2020, time.November, 25, 0, 0, 0, 0, loc),
time.Date(2020, time.November, 26, 0, 0, 0, 0, loc),
time.Date(2020, time.November, 27, 0, 0, 0, 0, loc),
} {
m[h] = true
}
return m
}
func MakeBuildingSchedule(loc *time.Location) *BuildingSchedule {
return &BuildingSchedule{
startDate: time.Date(2020, time.August, 17, 0, 0, 0, 0, loc),
numWeekdays: 90,
firstDayType: BlueDay,
holidays: makeHolidayMap(loc),
nonBlockChangeDay: time.Date(2020, time.September, 4, 0, 0, 0, 0, loc),
shortWednesdayChangeDay: time.Date(2020, time.October, 12, 0, 0, 0, 0, loc),
}
}
func ICalForSchedule(bs *BuildingSchedule, gs *GradeSchedule) string {
now := time.Now()
cal := ics.NewCalendar()
cal.SetXWRCalName(gs.Name)
cal.SetXWRCalDesc(fmt.Sprintf("%s. Source code at <https://github.com/lucasbergman/cmscal>.", gs.Description))
cal.SetName(gs.Name)
cal.SetDescription(fmt.Sprintf("%s. Source code at <https://github.com/lucasbergman/cmscal>.", gs.Description))
startDate := bs.startDate
daysFromStart := 0
weekdaysDone := 0
var currentDayType BlockDayType
for weekdaysDone < bs.numWeekdays {
date := startDate.AddDate(0, 0, daysFromStart)
daysFromStart += 1
if date.Weekday() == time.Saturday || date.Weekday() == time.Sunday {
continue
}
weekdaysDone += 1
if _, present := bs.holidays[date]; present {
continue
}
if weekdaysDone == 1 {
currentDayType = bs.firstDayType
} else {
currentDayType = nextDayType(bs, currentDayType, date)
}
for _, period := range gs.ScheduleMap[currentDayType] {
start := date.Add(time.Duration(period.StartHour)*time.Hour + time.Duration(period.StartMinute)*time.Minute)
end := start.Add(period.Duration)
hasher := sha1.New()
hasher.Write([]byte(start.String()))
hasher.Write([]byte(end.String()))
hasher.Write([]byte(period.Description))
hash := hex.EncodeToString(hasher.Sum(nil))
event := cal.AddEvent(fmt.Sprintf("<EMAIL>", hash))
event.SetDtStampTime(now)
event.SetStartAt(start)
event.SetEndAt(end)
event.AddProperty(ics.ComponentProperty("TRANSP"), "TRANSPARENT")
event.SetSummary(period.Description)
event.SetDescription(period.Description)
}
}
return cal.Serialize()
} | sched.go | 0.624064 | 0.405096 | sched.go | starcoder |
package pterm
import (
"strconv"
"strings"
"github.com/gookit/color"
"github.com/pterm/pterm/internal"
)
// RGB color model is an additive color model in which red, green, and blue light are added together in various ways to reproduce a broad array of colors.
// The name of the model comes from the initials of the three additive primary colors, red, green, and blue.
// https://en.wikipedia.org/wiki/RGB_color_model
type RGB struct {
R uint8
G uint8
B uint8
}
// GetValues returns the RGB values separately.
func (p RGB) GetValues() (r, g, b uint8) {
return p.R, p.G, p.B
}
// NewRGB returns a new RGB.
func NewRGB(r, g, b uint8) RGB {
return RGB{R: r, G: g, B: b}
}
// NewRGBFromHEX converts a HEX and returns a new RGB.
func NewRGBFromHEX(hex string) (RGB, error) {
hex = strings.ToLower(hex)
hex = strings.ReplaceAll(hex, "#", "")
hex = strings.ReplaceAll(hex, "0x", "")
if len(hex) == 3 {
hex = string([]byte{hex[0], hex[0], hex[1], hex[1], hex[2], hex[2]})
}
if len(hex) != 6 {
return RGB{}, ErrHexCodeIsInvalid
}
i64, err := strconv.ParseInt(hex, 16, 32)
if err != nil {
return RGB{}, err
}
c := int(i64)
return RGB{
R: uint8(c >> 16),
G: uint8((c & 0x00FF00) >> 8),
B: uint8(c & 0x0000FF),
}, nil
}
// Fade fades one RGB value (over other RGB values) to another RGB value, by giving the function a minimum, maximum and current value.
func (p RGB) Fade(min, max, current float32, end ...RGB) RGB {
if min < 0 {
max -= min
current -= min
min = 0
}
if len(end) == 1 {
return RGB{
R: uint8(internal.MapRangeToRange(min, max, float32(p.R), float32(end[0].R), current)),
G: uint8(internal.MapRangeToRange(min, max, float32(p.G), float32(end[0].G), current)),
B: uint8(internal.MapRangeToRange(min, max, float32(p.B), float32(end[0].B), current)),
}
} else if len(end) > 1 {
f := (max - min) / float32(len(end))
tempCurrent := current
if f > current {
return p.Fade(min, f, current, end[0])
} else {
for i := 0; i < len(end)-1; i++ {
tempCurrent -= f
if f > tempCurrent {
return end[i].Fade(min, min+f, tempCurrent, end[i+1])
}
}
}
}
return p
}
// Sprint formats using the default formats for its operands and returns the resulting string.
// Spaces are added between operands when neither is a string.
func (p RGB) Sprint(a ...interface{}) string {
return color.RGB(p.R, p.G, p.B).Sprint(a...)
}
// Sprintln formats using the default formats for its operands and returns the resulting string.
// Spaces are always added between operands and a newline is appended.
func (p RGB) Sprintln(a ...interface{}) string {
return p.Sprint(Sprintln(a...))
}
// Sprintf formats according to a format specifier and returns the resulting string.
func (p RGB) Sprintf(format string, a ...interface{}) string {
return p.Sprint(Sprintf(format, a...))
}
// Print formats using the default formats for its operands and writes to standard output.
// Spaces are added between operands when neither is a string.
// It returns the number of bytes written and any write error encountered.
func (p RGB) Print(a ...interface{}) *TextPrinter {
Print(p.Sprint(a...))
tp := TextPrinter(p)
return &tp
}
// Println formats using the default formats for its operands and writes to standard output.
// Spaces are always added between operands and a newline is appended.
// It returns the number of bytes written and any write error encountered.
func (p RGB) Println(a ...interface{}) *TextPrinter {
Print(p.Sprintln(a...))
tp := TextPrinter(p)
return &tp
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
func (p RGB) Printf(format string, a ...interface{}) *TextPrinter {
Print(p.Sprintf(format, a...))
tp := TextPrinter(p)
return &tp
} | rgb.go | 0.803212 | 0.617974 | rgb.go | starcoder |
package types
import (
"github.com/attic-labs/noms/go/d"
)
// SetIterator defines methods that can be used to efficiently iterate through a set in 'Noms-defined'
// sorted order.
type SetIterator interface {
// Next returns subsequent values from a set. It returns nil, when no objects remain.
Next() Value
// SkipTo(v) advances to and returns the next value in the iterator >= v.
// Note: if the iterator has already returned the value being skipped to, it will return the next
// value (just as if Next() was called). For example, given the following set:
// s = Set{ 0, 3, 6, 9, 12, 15, 18 }
// An iterator on the set would return:
// i := s.Iterator()
// i.Next() return 0
// i.SkipTo(4) -- returns 6
// i.skipTo(3) -- returns 9 (this is the next value in the iterator >= 3)
// i.skipTo(12) -- returns 12
// i.skipTo(12) -- return 15 (this is the next value in the iterator >= 12)
// i.skipTo(20) -- returns nil
// If there are no values left in the iterator that are >= v,
// the iterator will skip to the end of the sequence and return nil.
SkipTo(v Value) Value
}
type setIterator struct {
s Set
cursor *sequenceCursor
currentValue Value
}
func (si *setIterator) Next() Value {
if si.cursor.valid() {
si.currentValue = si.cursor.current().(Value)
si.cursor.advance()
} else {
si.currentValue = nil
}
return si.currentValue
}
func (si *setIterator) SkipTo(v Value) Value {
d.PanicIfTrue(v == nil)
if si.cursor.valid() {
if compareValue(v, si.currentValue) <= 0 {
return si.Next()
}
si.cursor = newCursorAtValue(si.s.orderedSequence, v, true, false)
if si.cursor.valid() {
si.currentValue = si.cursor.current().(Value)
si.cursor.advance()
} else {
si.currentValue = nil
}
} else {
si.currentValue = nil
}
return si.currentValue
}
// iterState contains iterator and it's current value
type iterState struct {
i SetIterator
v Value
}
func (st *iterState) Next() Value {
if st.v == nil {
return nil
}
v := st.v
st.v = st.i.Next()
return v
}
func (st *iterState) SkipTo(v Value) Value {
if st.v == nil || v == nil {
st.v = nil
return nil
}
st.v = st.i.SkipTo(v)
return st.v
}
// UnionIterator combines the results from two other iterators. The values from Next() are returned in
// noms-defined order with all duplicates removed.
type UnionIterator struct {
aState iterState
bState iterState
}
// NewUnionIterator creates a union iterator from two other SetIterators.
func NewUnionIterator(iterA, iterB SetIterator) SetIterator {
d.PanicIfTrue(iterA == nil)
d.PanicIfTrue(iterB == nil)
a := iterState{i: iterA, v: iterA.Next()}
b := iterState{i: iterB, v: iterB.Next()}
return &UnionIterator{aState: a, bState: b}
}
func (u *UnionIterator) Next() Value {
switch compareValue(u.aState.v, u.bState.v) {
case -1:
return u.aState.Next()
case 0:
u.aState.Next()
return u.bState.Next()
case 1:
return u.bState.Next()
}
panic("Unreachable")
}
func (u *UnionIterator) SkipTo(v Value) Value {
d.PanicIfTrue(v == nil)
didAdvance := false
if compareValue(u.aState.v, v) < 0 {
didAdvance = true
u.aState.SkipTo(v)
}
if compareValue(u.bState.v, v) < 0 {
didAdvance = true
u.bState.SkipTo(v)
}
if !didAdvance {
return u.Next()
}
switch compareValue(u.aState.v, u.bState.v) {
case -1:
return u.aState.Next()
case 0:
u.aState.Next()
return u.bState.Next()
case 1:
return u.bState.Next()
}
panic("Unreachable")
}
// IntersectionIterator only returns values that are returned in both of its child iterators.
// The values from Next() are returned in noms-defined order with all duplicates removed.
type IntersectionIterator struct {
aState iterState
bState iterState
}
// NewIntersectionIterator creates a intersect iterator from two other SetIterators.
func NewIntersectionIterator(iterA, iterB SetIterator) SetIterator {
d.PanicIfTrue(iterA == nil)
d.PanicIfTrue(iterB == nil)
a := iterState{i: iterA, v: iterA.Next()}
b := iterState{i: iterB, v: iterB.Next()}
return &IntersectionIterator{aState: a, bState: b}
}
func (i *IntersectionIterator) Next() Value {
for cont := true; cont; {
switch compareValue(i.aState.v, i.bState.v) {
case -1:
i.aState.SkipTo(i.bState.v)
case 0:
cont = false
case 1:
i.bState.SkipTo(i.aState.v)
}
}
// we only get here if aState and bState are equal
res := i.aState.v
i.aState.Next()
i.bState.Next()
return res
}
func (i *IntersectionIterator) SkipTo(v Value) Value {
d.PanicIfTrue(v == nil)
if compareValue(v, i.aState.v) >= 0 {
i.aState.SkipTo(v)
}
if compareValue(v, i.bState.v) >= 0 {
i.bState.SkipTo(v)
}
return i.Next()
}
// considers nil max value, return -1 if v1 < v2, 0 if v1 == v2, 1 if v1 > v2
func compareValue(v1, v2 Value) int {
if v1 == nil && v2 == nil {
return 0
}
if v2 == nil || (v1 != nil && v1.Less(v2)) {
return -1
}
if v1 == nil || (v2 != nil && v2.Less(v1)) {
return 1
}
return 0
} | go/types/set_iterator.go | 0.714927 | 0.437343 | set_iterator.go | starcoder |
package openapi
import (
"encoding/json"
"time"
)
// AnyWorkflowRunStepState struct for AnyWorkflowRunStepState
type AnyWorkflowRunStepState struct {
// The set of decorators for a workflow step
Decorators *[]WorkflowRunStepDecorator `json:"decorators,omitempty"`
// Time at which the step execution ended
EndedAt NullableTime `json:"ended_at,omitempty"`
// Time at which step execution started
StartedAt NullableTime `json:"started_at,omitempty"`
// Workflow run step status
Status string `json:"status"`
}
// NewAnyWorkflowRunStepState instantiates a new AnyWorkflowRunStepState object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewAnyWorkflowRunStepState(status string) *AnyWorkflowRunStepState {
this := AnyWorkflowRunStepState{}
this.Status = status
return &this
}
// NewAnyWorkflowRunStepStateWithDefaults instantiates a new AnyWorkflowRunStepState object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewAnyWorkflowRunStepStateWithDefaults() *AnyWorkflowRunStepState {
this := AnyWorkflowRunStepState{}
return &this
}
// GetDecorators returns the Decorators field value if set, zero value otherwise.
func (o *AnyWorkflowRunStepState) GetDecorators() []WorkflowRunStepDecorator {
if o == nil || o.Decorators == nil {
var ret []WorkflowRunStepDecorator
return ret
}
return *o.Decorators
}
// GetDecoratorsOk returns a tuple with the Decorators field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *AnyWorkflowRunStepState) GetDecoratorsOk() (*[]WorkflowRunStepDecorator, bool) {
if o == nil || o.Decorators == nil {
return nil, false
}
return o.Decorators, true
}
// HasDecorators returns a boolean if a field has been set.
func (o *AnyWorkflowRunStepState) HasDecorators() bool {
if o != nil && o.Decorators != nil {
return true
}
return false
}
// SetDecorators gets a reference to the given []WorkflowRunStepDecorator and assigns it to the Decorators field.
func (o *AnyWorkflowRunStepState) SetDecorators(v []WorkflowRunStepDecorator) {
o.Decorators = &v
}
// GetEndedAt returns the EndedAt field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *AnyWorkflowRunStepState) GetEndedAt() time.Time {
if o == nil || o.EndedAt.Get() == nil {
var ret time.Time
return ret
}
return *o.EndedAt.Get()
}
// GetEndedAtOk returns a tuple with the EndedAt field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AnyWorkflowRunStepState) GetEndedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return o.EndedAt.Get(), o.EndedAt.IsSet()
}
// HasEndedAt returns a boolean if a field has been set.
func (o *AnyWorkflowRunStepState) HasEndedAt() bool {
if o != nil && o.EndedAt.IsSet() {
return true
}
return false
}
// SetEndedAt gets a reference to the given NullableTime and assigns it to the EndedAt field.
func (o *AnyWorkflowRunStepState) SetEndedAt(v time.Time) {
o.EndedAt.Set(&v)
}
// SetEndedAtNil sets the value for EndedAt to be an explicit nil
func (o *AnyWorkflowRunStepState) SetEndedAtNil() {
o.EndedAt.Set(nil)
}
// UnsetEndedAt ensures that no value is present for EndedAt, not even an explicit nil
func (o *AnyWorkflowRunStepState) UnsetEndedAt() {
o.EndedAt.Unset()
}
// GetStartedAt returns the StartedAt field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *AnyWorkflowRunStepState) GetStartedAt() time.Time {
if o == nil || o.StartedAt.Get() == nil {
var ret time.Time
return ret
}
return *o.StartedAt.Get()
}
// GetStartedAtOk returns a tuple with the StartedAt field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *AnyWorkflowRunStepState) GetStartedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return o.StartedAt.Get(), o.StartedAt.IsSet()
}
// HasStartedAt returns a boolean if a field has been set.
func (o *AnyWorkflowRunStepState) HasStartedAt() bool {
if o != nil && o.StartedAt.IsSet() {
return true
}
return false
}
// SetStartedAt gets a reference to the given NullableTime and assigns it to the StartedAt field.
func (o *AnyWorkflowRunStepState) SetStartedAt(v time.Time) {
o.StartedAt.Set(&v)
}
// SetStartedAtNil sets the value for StartedAt to be an explicit nil
func (o *AnyWorkflowRunStepState) SetStartedAtNil() {
o.StartedAt.Set(nil)
}
// UnsetStartedAt ensures that no value is present for StartedAt, not even an explicit nil
func (o *AnyWorkflowRunStepState) UnsetStartedAt() {
o.StartedAt.Unset()
}
// GetStatus returns the Status field value
func (o *AnyWorkflowRunStepState) GetStatus() string {
if o == nil {
var ret string
return ret
}
return o.Status
}
// GetStatusOk returns a tuple with the Status field value
// and a boolean to check if the value has been set.
func (o *AnyWorkflowRunStepState) GetStatusOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Status, true
}
// SetStatus sets field value
func (o *AnyWorkflowRunStepState) SetStatus(v string) {
o.Status = v
}
func (o AnyWorkflowRunStepState) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Decorators != nil {
toSerialize["decorators"] = o.Decorators
}
if o.EndedAt.IsSet() {
toSerialize["ended_at"] = o.EndedAt.Get()
}
if o.StartedAt.IsSet() {
toSerialize["started_at"] = o.StartedAt.Get()
}
if true {
toSerialize["status"] = o.Status
}
return json.Marshal(toSerialize)
}
type NullableAnyWorkflowRunStepState struct {
value *AnyWorkflowRunStepState
isSet bool
}
func (v NullableAnyWorkflowRunStepState) Get() *AnyWorkflowRunStepState {
return v.value
}
func (v *NullableAnyWorkflowRunStepState) Set(val *AnyWorkflowRunStepState) {
v.value = val
v.isSet = true
}
func (v NullableAnyWorkflowRunStepState) IsSet() bool {
return v.isSet
}
func (v *NullableAnyWorkflowRunStepState) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableAnyWorkflowRunStepState(val *AnyWorkflowRunStepState) *NullableAnyWorkflowRunStepState {
return &NullableAnyWorkflowRunStepState{value: val, isSet: true}
}
func (v NullableAnyWorkflowRunStepState) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableAnyWorkflowRunStepState) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | client/pkg/client/openapi/model_any_workflow_run_step_state.go | 0.755547 | 0.421195 | model_any_workflow_run_step_state.go | starcoder |
package plan
import (
"fmt"
"time"
"github.com/influxdata/flux"
)
type Administration interface {
Now() time.Time
}
// CreateProcedureSpec creates a ProcedureSpec from an OperationSpec and Administration
type CreateProcedureSpec func(flux.OperationSpec, Administration) (ProcedureSpec, error)
var createProcedureFns = struct {
kind map[ProcedureKind]CreateProcedureSpec
operation map[flux.OperationKind][]CreateProcedureSpec
sideEffectKind map[ProcedureKind]CreateProcedureSpec
sideEffectOperation map[flux.OperationKind][]CreateProcedureSpec
}{
kind: make(map[ProcedureKind]CreateProcedureSpec),
operation: make(map[flux.OperationKind][]CreateProcedureSpec),
sideEffectKind: make(map[ProcedureKind]CreateProcedureSpec),
sideEffectOperation: make(map[flux.OperationKind][]CreateProcedureSpec),
}
// RegisterProcedureSpec registers a new procedure with the specified kind.
// The call panics if the kind is not unique.
func RegisterProcedureSpec(k ProcedureKind, c CreateProcedureSpec, qks ...flux.OperationKind) {
if createProcedureFns.kind[k] != nil {
panic(fmt.Errorf("duplicate registration for procedure kind %v", k))
}
createProcedureFns.kind[k] = c
for _, qk := range qks {
createProcedureFns.operation[qk] = append(createProcedureFns.operation[qk], c)
}
}
// RegisterProcedureSpecWithSideEffect registers a new procedure that produces side effects
func RegisterProcedureSpecWithSideEffect(k ProcedureKind, c CreateProcedureSpec, qks ...flux.OperationKind) {
if createProcedureFns.sideEffectKind[k] != nil {
panic(fmt.Errorf("duplicate registration for procedure kind %v", k))
}
createProcedureFns.sideEffectKind[k] = c
for _, qk := range qks {
createProcedureFns.sideEffectOperation[qk] = append(createProcedureFns.sideEffectOperation[qk], c)
}
}
func createProcedureFnsFromKind(kind flux.OperationKind) ([]CreateProcedureSpec, bool) {
var fns []CreateProcedureSpec
var ok bool
if fns, ok = createProcedureFns.operation[kind]; ok {
return fns, ok
}
if fns, ok = createProcedureFns.sideEffectOperation[kind]; ok {
return fns, ok
}
return nil, false
}
func HasSideEffect(spec ProcedureSpec) bool {
_, ok := createProcedureFns.sideEffectKind[spec.Kind()]
return ok
}
var ruleNameToLogicalRule = make(map[string]Rule)
var ruleNameToPhysicalRule = make(map[string]Rule)
// RegisterLogicalRules registers the rule created by createFn with the logical plan.
func RegisterLogicalRules(rules ...Rule) {
registerRule(ruleNameToLogicalRule, rules...)
}
// RegisterPhysicalRules registers the rule created by createFn with the physical plan.
func RegisterPhysicalRules(rules ...Rule) {
registerRule(ruleNameToPhysicalRule, rules...)
}
func registerRule(ruleMap map[string]Rule, rules ...Rule) {
for _, rule := range rules {
name := rule.Name()
if _, ok := ruleMap[name]; ok {
panic(fmt.Errorf(`rule with name "%v" has already been registered`, name))
}
ruleMap[name] = rule
}
} | vendor/github.com/influxdata/flux/plan/registration.go | 0.581422 | 0.475605 | registration.go | starcoder |
package matrix
import "math"
type Matrix []float64
func identiyOrOutMatrix(out []Matrix) Matrix {
var mat Matrix
if len(out) > 0 {
mat = out[0]
} else {
mat = NewIdentityMatrix()
}
return mat
}
func NewMatrix() Matrix {
return Matrix{
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
}
}
func NewIdentityMatrix() Matrix {
return Matrix{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}
}
func NewPerspectiveMatrix(fov, ratio, nearZ, farZ float64) Matrix {
fovRadii := (fov / 2.0) * (math.Pi / 360.0)
f := 1.0 / (math.Tan(fovRadii))
zDiff := farZ - nearZ
mat := NewMatrix()
mat[0] = f / ratio
mat[5] = f
mat[10] = -(farZ + nearZ) / zDiff
mat[11] = -(2.0 * farZ * nearZ) / zDiff
mat[14] = -1.0
mat[15] = 0.0
return mat
}
func NewTranslationMatrix(x, y, z float64, out ...Matrix) Matrix {
mat := identiyOrOutMatrix(out)
mat[12] = x
mat[13] = y
mat[14] = z
return mat
}
func NewRotationMatrix(x, y, z, rot float64, out ...Matrix) Matrix {
mat := identiyOrOutMatrix(out)
c := math.Cos(rot)
s := math.Sin(rot)
t := 1.0 - c
mat[0] = (t * (x * x)) + c
mat[1] = (t * (x * y)) - (s * z)
mat[2] = (t * (x * z)) + (s * y)
mat[4] = (t * (x * y)) + (s * z)
mat[5] = (t * (y * y)) + c
mat[6] = (t * (y * z)) - (s * x)
mat[8] = (t * (x * z)) - (s * y)
mat[9] = (t * (y * z)) + (s * x)
mat[10] = (t * (z * z)) + c
return mat
}
func Multiply(m1, m2 Matrix, out ...Matrix) Matrix {
mat := identiyOrOutMatrix(out)
mat[0] = (m1[0] * m2[0]) + (m1[1] * m2[4]) + (m1[2] * m2[8]) + (m1[3] * m2[12])
mat[1] = (m1[0] * m2[1]) + (m1[1] * m2[5]) + (m1[2] * m2[9]) + (m1[3] * m2[13])
mat[2] = (m1[0] * m2[2]) + (m1[1] * m2[6]) + (m1[2] * m2[10]) + (m1[3] * m2[14])
mat[3] = (m1[0] * m2[3]) + (m1[1] * m2[7]) + (m1[2] * m2[11]) + (m1[3] * m2[15])
mat[4] = (m1[4] * m2[0]) + (m1[5] * m2[4]) + (m1[6] * m2[8]) + (m1[7] * m2[12])
mat[5] = (m1[4] * m2[1]) + (m1[5] * m2[5]) + (m1[6] * m2[9]) + (m1[7] * m2[13])
mat[6] = (m1[4] * m2[2]) + (m1[5] * m2[6]) + (m1[6] * m2[10]) + (m1[7] * m2[14])
mat[7] = (m1[4] * m2[3]) + (m1[5] * m2[7]) + (m1[6] * m2[11]) + (m1[7] * m2[15])
mat[8] = (m1[8] * m2[0]) + (m1[9] * m2[4]) + (m1[10] * m2[8]) + (m1[11] * m2[12])
mat[9] = (m1[8] * m2[1]) + (m1[9] * m2[5]) + (m1[10] * m2[9]) + (m1[11] * m2[13])
mat[10] = (m1[8] * m2[2]) + (m1[9] * m2[6]) + (m1[10] * m2[10]) + (m1[11] * m2[14])
mat[11] = (m1[8] * m2[3]) + (m1[9] * m2[7]) + (m1[10] * m2[11]) + (m1[11] * m2[15])
mat[12] = (m1[12] * m2[0]) + (m1[13] * m2[4]) + (m1[14] * m2[8]) + (m1[15] * m2[12])
mat[13] = (m1[12] * m2[1]) + (m1[13] * m2[5]) + (m1[14] * m2[9]) + (m1[15] * m2[13])
mat[14] = (m1[12] * m2[2]) + (m1[13] * m2[6]) + (m1[14] * m2[10]) + (m1[15] * m2[14])
mat[15] = (m1[12] * m2[3]) + (m1[13] * m2[7]) + (m1[14] * m2[11]) + (m1[15] * m2[15])
return mat
}
func (m *Matrix) Translate(x, y, z float64) {
mat := NewMatrix()
copy(mat, (*m))
Multiply(mat, NewTranslationMatrix(x, y, z), (*m))
}
func (m *Matrix) Rotate(x, y, z, rot float64) {
mat := NewMatrix()
copy(mat, (*m))
Multiply(mat, NewRotationMatrix(x, y, z, rot), (*m))
}
func (m Matrix) ToGL() []float32 {
return []float32{
float32(m[0]), float32(m[1]), float32(m[2]), float32(m[3]),
float32(m[4]), float32(m[5]), float32(m[6]), float32(m[7]),
float32(m[8]), float32(m[9]), float32(m[10]), float32(m[11]),
float32(m[12]), float32(m[13]), float32(m[14]), float32(m[15]),
}
} | math/matrix/matrix.go | 0.707809 | 0.600774 | matrix.go | starcoder |
package spec3
// SecurityScheme defines a security scheme that can be used by the operations.
// Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect Discovery.
type SecurityScheme struct {
VendorExtensible
Reference
Type string `json:"type,omitempty"`
Description string `json:"description,omitempty"`
Name string `json:"name,omitempty"`
In string `json:"in,omitempty"`
Scheme string `json:"scheme,omitempty"`
BearerFormat string `json:"bearerFormat,omitempty"`
Flows OAuthFlow `json:"flows,omitempty"`
OpenIDConnectURL string `json:"openIdConnectUrl,omitempty"`
}
// OrderedSecuritySchemes is a map between a variable name and its value. The value is used for substitution in the server's URL template.
type OrderedSecuritySchemes struct {
data OrderedMap
}
// NewOrderedSecuritySchemes creates a new instance of OrderedSecuritySchemes with correct filter
func NewOrderedSecuritySchemes() OrderedSecuritySchemes {
return OrderedSecuritySchemes{
data: OrderedMap{
filter: MatchNonEmptyKeys, // TODO: check if keys are some regex or just any non empty string
},
}
}
// Get gets the security requirement by key
func (s *OrderedSecuritySchemes) Get(key string) *SecurityScheme {
v := s.data.Get(key)
if v == nil {
return nil
}
return v.(*SecurityScheme)
}
// GetOK checks if the key exists in the security requirement
func (s *OrderedSecuritySchemes) GetOK(key string) (*SecurityScheme, bool) {
v, ok := s.data.GetOK(key)
if !ok {
return nil, ok
}
sr, ok := v.(*SecurityScheme)
return sr, ok
}
// Set sets the value to the security requirement
func (s *OrderedSecuritySchemes) Set(key string, val *SecurityScheme) bool {
return s.data.Set(key, val)
}
// ForEach executes the function for each security requirement
func (s *OrderedSecuritySchemes) ForEach(fn func(string, *SecurityScheme) error) error {
s.data.ForEach(func(key string, val interface{}) error {
response, _ := val.(*SecurityScheme)
if err := fn(key, response); err != nil {
return err
}
return nil
})
return nil
}
// Keys gets the list of keys
func (s *OrderedSecuritySchemes) Keys() []string {
return s.data.Keys()
}
// TODO: (s *OrderedSecuritySchemes) Implement Marshal & Unmarshal -> JSON, YAML | security_scheme.go | 0.609524 | 0.409693 | security_scheme.go | starcoder |
package golisp2
import (
"fmt"
"math"
"strings"
)
type (
// Value represents any arbitrary value within the lisp interpreting
// environment. While it just extends "expr", the implicit contract is that no
// work is actually performed at eval time; it just returns itself.
Value interface {
// InspectStr returns a printable version of the expression.
InspectStr() string
}
// NumberValue is a representation of a number within the interpreted
// environment.
NumberValue struct {
Val float64
}
// NilValue is a representation of an null value within the interpreted
// environment.
NilValue struct {
}
// StringValue is a representation of a string within the interpreted
// environment.
StringValue struct {
Val string
}
// BoolValue is a representation of a boolean within the interpreted
// environment.
BoolValue struct {
Val bool
}
// FuncValue is a representation of a basic function within the interpreted
// environment.
FuncValue struct {
// Fn is the function body the function value references.
Fn func(*EvalContext, ...Value) (Value, error)
}
// CellValue is a representation of a pair of values within the interpreted
// environment. This can be composed to represent lists with standard car/cdr
// operators.
CellValue struct {
Left, Right Value
}
// ListValue represents a list of values.
ListValue struct {
Vals []Value
}
// MapValue represents a map of values to values.
MapValue struct {
Vals map[string]Value
}
)
// NewCellValue creates a cell with the given left/right values. Either can be
// 'nil'.
func NewCellValue(left, right Value) *CellValue {
if left == nil {
left = &NilValue{}
}
if right == nil {
right = &NilValue{}
}
return &CellValue{
Left: left,
Right: right,
}
}
// Eval returns the cell.
func (cv *CellValue) Eval(*EvalContext) (Value, error) {
return cv, nil
}
// InspectStr outputs the contents of all the cells.
func (cv *CellValue) InspectStr() string {
// todo (bs): if second cell is a node, treat this different
return fmt.Sprintf("(%s . %s)", cv.Left.InspectStr(), cv.Right.InspectStr())
}
// InspectStr prints the number.
func (nv *NumberValue) InspectStr() string {
if nv.Val == math.Trunc(nv.Val) {
return fmt.Sprintf("%d", int64(nv.Val))
}
return fmt.Sprintf("%f", nv.Val)
}
// InspectStr outputs "nil".
func (nv *NilValue) InspectStr() string {
return "nil"
}
// InspectStr prints the string.
func (sv *StringValue) InspectStr() string {
return fmt.Sprintf("\"%s\"", sv.Val)
}
// InspectStr prints "true"/"false" based on the value.
func (bv *BoolValue) InspectStr() string {
return fmt.Sprintf("%t", bv.Val)
}
// InspectStr outputs some information about the function.
func (fv *FuncValue) InspectStr() string {
// note (bs): probably want to customize this to print some details about the
// function itself. That will involve (optionally) retaining the declaration
// name of the function.
return fmt.Sprintf("<func>")
}
// InspectStr returns a human-readable string representation of the list.
func (lv *ListValue) InspectStr() string {
var sb strings.Builder
sb.WriteString("[")
for i, v := range lv.Vals {
if i > 0 {
sb.WriteString(" ")
}
sb.WriteString(v.InspectStr())
}
sb.WriteString("]")
return sb.String()
}
// InspectStr returns a human-readable map representation of the list.
func (mv *MapValue) InspectStr() string {
var sb strings.Builder
sb.WriteString("{")
for k, v := range mv.Vals {
sb.WriteString(" ")
sb.WriteString(k)
sb.WriteString(":")
sb.WriteString(v.InspectStr())
}
sb.WriteString(" }")
return sb.String()
} | values.go | 0.645567 | 0.440048 | values.go | starcoder |
package metric
import (
"fmt"
"runtime"
)
func newGoMetricCollector() *Collector {
golang := &golang{
goRoutineMetric: goMetric{
Name: "go_goroutines",
Help: "Number of goroutines that currently exist.",
GetFunc: func() float64 { return float64(runtime.NumGoroutine()) },
},
goProcessMetric: goMetric{
Name: "go_threads",
Help: "Number of OS threads created",
GetFunc: func() float64 {
n, _ := runtime.ThreadCreateProfile(nil)
return float64(n)
},
},
goCPUMetric: goMetric{
Name: "go_cpu_used",
Help: " the number of logical CPUs usable by the current process.",
GetFunc: func() float64 { return float64(runtime.NumCPU()) },
},
goMemStateMetrics: []goMetric{
{
Name: memstatNamespace("alloc_bytes"),
Help: "Number of bytes allocated and still in use.",
MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
},
{
Name: memstatNamespace("alloc_bytes_total"),
Help: "Total number of bytes allocated, even if freed.",
MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
},
{
Name: memstatNamespace("sys_bytes"),
Help: "Number of bytes obtained from system.",
MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
},
{
Name: memstatNamespace("mallocs_total"),
Help: "Total number of mallocs.",
MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
},
{
Name: memstatNamespace("frees_total"),
Help: "Total number of frees.",
MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
},
//{
// Name: memstatNamespace("lookups_total"),
// Help: "Total number of pointer lookups.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.Lookups)},
//},
//{
// Name: memstatNamespace("heap_alloc_bytes"),
// Help: "Number of heap bytes allocated and still in use.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.HeapAlloc},
//},
//{
// Name: memstatNamespace("heap_sys_bytes"),
// Help: "Number of heap bytes obtained from system.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.HeapSys)},
//},
//{
// Name: memstatNamespace("heap_idle_bytes"),
// Help: "Number of heap bytes waiting to be used.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.HeapIdle)},
//},
//{
// Name: memstatNamespace("heap_inuse_bytes"),
// Help: "Number of heap bytes that are in use.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.HeapInuse) },
//},
//{
// Name: memstatNamespace("heap_released_bytes"),
// Help: "Number of heap bytes released to OS.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.HeapReleased},
//},
//{
// Name: memstatNamespace("heap_objects"),
// Help: "Number of allocated objects.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.HeapObjects},
//},
//{
// Name: memstatNamespace("stack_inuse_bytes"),
// Help: "Number of bytes in use by the stack allocator.",
// MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse},
//},
//{
// Name: memstatNamespace("stack_sys_bytes"),
// Help: "Number of bytes obtained from system for stack allocator.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.StackSys},
//},
//{
// Name: memstatNamespace("mspan_inuse_bytes"),
// Help: "Number of bytes in use by mspan structures.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.MSpanInuse},
//},
//{
// Name: memstatNamespace("mspan_sys_bytes"),
// Help: "Number of bytes used for mspan structures obtained from system.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.MSpanSys) },
//},
//{
// Name: memstatNamespace("mcache_inuse_bytes"),
// Help: "Number of bytes in use by mcache structures.",
// MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
//},
//{
// Name: memstatNamespace("mcache_sys_bytes"),
// Help: "Number of bytes used for mcache structures obtained from system.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.MCacheSys) },
//},
//{
// Name: memstatNamespace("buck_hash_sys_bytes"),
// Help: "Number of bytes used by the profiling bucket hash table.",
// MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys},
//},
//{
// Name: memstatNamespace("gc_sys_bytes"),
// Help: "Number of bytes used for garbage collection system metadata.",
// MemGetFunc: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
//},
//{
// Name: memstatNamespace("other_sys_bytes"),
// Help: "Number of bytes used for other system allocations.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.OtherSys)},
//},
//{
// Name: memstatNamespace("next_gc_bytes"),
// Help: "Number of heap bytes when next garbage collection will take place.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.NextGC) },
//},
//{
// Name: memstatNamespace("last_gc_time_seconds"),
// Help: "Number of seconds since 1970 of last garbage collection.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return float64(ms.LastGC) / 1e9},
//},
//{
// Name: memstatNamespace("gc_cpu_fraction"),
// Help: "The fraction of this program's available CPU time used by the GC since the program started.",
// MemGetFunc: func(ms *runtime.MemStats) float64 {return ms.GCCPUFraction },
//},
},
}
return NewCollector("golang_metrics", golang)
}
func memstatNamespace(s string) string {
return fmt.Sprintf("go_memstats_%s", s)
}
type golang struct {
goRoutineMetric goMetric
goProcessMetric goMetric
goCPUMetric goMetric
goMemStateMetrics []goMetric
}
func (g golang) Collect() []MetricInterf {
m := make([]MetricInterf, 0)
m = append(m, g.goRoutineMetric)
m = append(m, g.goProcessMetric)
ms := &runtime.MemStats{}
runtime.ReadMemStats(ms)
for idx := range g.goMemStateMetrics {
g.goMemStateMetrics[idx].MemStats = ms
m = append(m, &g.goMemStateMetrics[idx])
}
return m
}
type goMetric struct {
Name string
Help string
MemStats *runtime.MemStats
MemGetFunc func(stat *runtime.MemStats) float64
GetFunc func() float64
}
func (m goMetric) GetMeta() *MetricMeta {
return &MetricMeta{
Name: m.Name,
Help: m.Help,
}
}
func (m goMetric) GetValue() (*FloatOrString, error) {
if m.MemStats != nil {
return FormFloatOrString(m.MemGetFunc(m.MemStats))
}
return FormFloatOrString(m.GetFunc())
}
func (m goMetric) GetExtension() (*MetricExtension, error) {
return nil, nil
} | src/common/metric/go-metric.go | 0.558568 | 0.406509 | go-metric.go | starcoder |
package solid
import (
"github.com/adamcolton/geom/d3"
"github.com/adamcolton/geom/d3/curve/line"
)
// Edge between two points. Edge should be ordered by calling Sort. Edge is used
// as a map key to emulate a set.
type Edge [2]d3.Pt
// NewEdge from 2 points in the correct order
func NewEdge(a, b d3.Point) Edge {
e := Edge{a.Pt(), b.Pt()}
e.Sort()
return e
}
// Sort guarentees the order of the points. Should only be called once when the
// Edge is created.
func (e *Edge) Sort() {
if e[0].X > e[1].X {
e[0], e[1] = e[1], e[0]
} else if e[0].X == e[1].X {
if e[0].Y > e[1].Y {
e[0], e[1] = e[1], e[0]
} else if e[0].Y == e[1].Y {
if e[0].Z > e[1].Z {
e[0], e[1] = e[1], e[0]
}
}
}
}
// Pt1 treats the edge as a line and returns the corresponding point on that
// line.
func (e Edge) Pt1(t0 float64) d3.Pt {
return line.New(e[0], e[1]).Pt1(t0)
}
// EdgeMesh represents a mesh as a set of edges. It can detect if the mesh is
// solid or if an edge is over used.
type EdgeMesh struct {
edges map[Edge]byte
singles uint
}
// NewEdgeMesh creates an empty EdgeMesh
func NewEdgeMesh() *EdgeMesh {
return &EdgeMesh{
edges: make(map[Edge]byte),
}
}
// ErrEdgeOverUsed is returned if an edge appears more than twice in a mesh.
type ErrEdgeOverUsed struct{}
// Error fulfils error on ErrEdgeOverUsed
func (ErrEdgeOverUsed) Error() string {
return "Within a mesh, an edge should appear no more than twice"
}
// ErrTwoPoints is returned if a single point is added to a mesh
type ErrTwoPoints struct{}
// Error fulfils error on ErrTwoPoints
func (ErrTwoPoints) Error() string {
return "At least two points are required"
}
func (em *EdgeMesh) add(e Edge) error {
switch em.edges[e] {
case 0:
em.edges[e] = 1
em.singles++
case 1:
em.edges[e] = 2
em.singles--
case 2:
return ErrEdgeOverUsed{}
}
return nil
}
// Add points to a mesh. If two points are give an edge is added. If multiple
// points are given then they are added as a polygon. So if the points A,B and C
// are given the edges (A,B), (B,C) and (C,A) will all be added.
func (em *EdgeMesh) Add(pts ...d3.Point) error {
if len(pts) < 2 {
return ErrTwoPoints{}
}
if len(pts) == 2 {
return em.add(NewEdge(pts[0], pts[1]))
}
ln := len(pts)
for i, a := range pts {
b := pts[(i+1)%ln]
err := em.add(NewEdge(a, b))
if err != nil {
return err
}
}
return nil
}
// Solid returns true if the mesh has edges and each edge is used exactly twice.
func (em *EdgeMesh) Solid() bool {
return len(em.edges) > 0 && em.singles == 0
} | d3/solid/edge.go | 0.69181 | 0.431045 | edge.go | starcoder |
package bitmatrix
type BitMatrix interface {
// New returns a new BitMatrix. It does not change the receiver.
New(size int) BitMatrix
// None sets all the bits of the receiver matrix to 0.
// It returns the receiver matrix to support chaining.
None() BitMatrix
// Size returns the size of the matrix size x size.
// It does not change the receiver matrix.
Size() int
// Set sets the index position in the receiver matrix to 1.
// It returns the receiver matrix to support chaining.
Set(index int) BitMatrix
// Is returns true if the bit at the index is 1.
// It does not change the receiver matrix.
Is(index int) bool
// Equal compares the receiver matrix with the argument matrix.
// It does not change the receiver matrix.
Equal(matrix BitMatrix) bool
// Count returns the number of 1 bits of the receiver matrix.
// It does not change the receiver matrix.
Count() int
// Clones creates and returns a clone of the receiver matrix.
// It does not change the receiver matrix.
Clone() BitMatrix
// Up shifts all the bits of the receiver matrix up.
// The bottom row is filled with 0s.
// It returns the receiver matrix to support chaining.
Up() BitMatrix
// Down shifts all the bits of the receiver matrix down.
// The top row is filled with 0s.
// It returns the receiver matrix to support chaining.
Down() BitMatrix
// Left shifts all the bits of the receiver matrix to the left.
// The leftmost column is filled with 0s.
// It returns the receiver matrix to support chaining.
Left() BitMatrix
// Right shifts all the bits of the receiver matrix to the right.
// The rightmost column is filled with 0s.
// It returns the receiver matrix to support chaining.
Right() BitMatrix
// Inverse inverts all bits of receiver matrix.
// It returns the receiver matrix to support chaining.
Inverse() BitMatrix
// Or performs a boolean OR operation of the receiver and the argument matrix.
// The receiver is changed to the result of the operation.
// It returns the receiver matrix to support chaining.
Or(BitMatrix) BitMatrix
// And performs a boolean AND operation of the receiver and the argument matrix.
// The receiver is changed to the result of the operation.
// It returns the receiver matrix to support chaining.
And(BitMatrix) BitMatrix
// Xor performs a boolean XOR operation of the receiver and the argument matrix.
// The receiver is changed to the result of the operation.
// It returns the receiver matrix to support chaining.
Xor(BitMatrix) BitMatrix
// Minuns performs a boolean MINUS operation of the receiver and the argument matrix.
// The receiver is changed to the result of the operation.
// It returns the receiver matrix to support chaining.
Minus(BitMatrix) BitMatrix
// Read reads the board 2 dimensional slice and sets all the bits of the receiver matrix.
// It returns the receiver matrix to support chaining.
Read(board [][]int) BitMatrix
} | bitmatrix_interface.go | 0.81457 | 0.741791 | bitmatrix_interface.go | starcoder |
package main
import "fmt"
func sortArray(nums []int) []int {
if len(nums) <= 1 {
return nums
}
// quickSort(nums, 0, len(nums)-1)
countSort(nums)
return nums
}
// quickSort sorts the elements in nums in [l, r] (r included)
// time: O(nlogn) ~ O(n^2)
// space: O(logn) ~ O(n)
func quickSort(nums []int, l, r int) {
if l >= r {
return
}
i, j := l+1, r
for i <= j {
if nums[i] <= nums[l] {
i++
} else if nums[j] >= nums[l] {
j--
} else {
nums[i], nums[j] = nums[j], nums[i]
i++
j--
}
}
nums[j], nums[l] = nums[l], nums[j]
quickSort(nums, l, j-1)
quickSort(nums, j+1, r)
}
// countSort
// time: O(n)
// space: O(max(nums) - min(nums))
func countSort(nums []int) {
maxNum, minNum := maxOfNums(nums), minOfNums(nums)
counts := make([]int, maxNum-minNum+1)
for _, v := range nums {
counts[v-minNum]++
}
cur := 0
for i, v := range counts {
for j := 0; j < v; j++ {
nums[cur] = i + minNum
cur++
}
}
}
// heapSort, we did not use the implementation from 'container'
// we implemented the maxHeap by ourselves here
// time: O(n*logn)
// space: O(n)
func heapSort(nums []int) {
// init heap
for i := len(nums) / 2; i >= 0; i-- {
heapify(nums, i, len(nums)-1)
}
// get max to the end
for i := len(nums) - 1; i >= 1; i-- {
nums[0], nums[i] = nums[i], nums[0]
heapify(nums, 0, i-1)
}
}
// heapify the nums [i, e] (e included)
func heapify(nums []int, i, e int) {
for i <= e {
l := 2*i + 1 // left child
r := 2*i + 2 // right child
j := i
if l <= e && nums[l] > nums[j] {
j = l
}
if r <= e && nums[r] > nums[j] {
j = r
}
if j == i {
break
}
nums[i], nums[j] = nums[j], nums[i]
i = j
}
}
// mergeSort sort the elements in nums in [l, r) (r not included)
// time: O(n*logn)
// space: O(logn + n)
func mergeSort(nums []int, l, r int) {
if l+1 >= r {
return
}
mid := l + (r-l)/2
mergeSort(nums, l, mid)
mergeSort(nums, mid, r)
i, j := l, mid
cur := 0
temps := make([]int, r-l)
for i < mid || j < r {
if j == r || (i < mid && nums[i] < nums[j]) {
temps[cur] = nums[i]
i++
} else {
temps[cur] = nums[j]
j++
}
cur++
}
for i, v := range temps {
nums[l+i] = v
}
}
// BST
// time: O(n*logn)
// space: O(n)
type TreeNode struct {
Val, Count int
Left, Right *TreeNode
}
func buildBST(nums []int) *TreeNode {
if len(nums) == 0 {
return nil
}
newBST := &TreeNode{nums[0], 1, nil, nil}
for i := 1; i < len(nums); i++ {
cur := newBST
for {
if cur.Val < nums[i] {
if cur.Right == nil {
cur.Right = &TreeNode{nums[i], 1, nil, nil}
break
} else {
cur = cur.Right
}
} else if nums[i] < cur.Val {
if cur.Left == nil {
cur.Left = &TreeNode{nums[i], 1, nil, nil}
break
} else {
cur = cur.Left
}
} else {
cur.Count++
break
}
}
}
return newBST
}
func toSortedArray(root *TreeNode) []int {
if root == nil {
return []int{}
}
leftArr := toSortedArray(root.Left)
midArr := []int{}
for i := 0; i < root.Count; i++ {
midArr = append(midArr, root.Val)
}
rightArr := toSortedArray(root.Right)
return append(leftArr, append(midArr, rightArr...)...)
}
// ----------------------------------------------------------------
func maxOfNums(nums []int) int {
ans := nums[0]
for i := 1; i < len(nums); i++ {
if nums[i] > ans {
ans = nums[i]
}
}
return ans
}
func minOfNums(nums []int) int {
ans := nums[0]
for i := 1; i < len(nums); i++ {
if nums[i] < ans {
ans = nums[i]
}
}
return ans
}
func main() {
fmt.Println(toSortedArray(buildBST([]int{4, 3, 2, 1, 5, 6, 3, 7, 8, 9, 4, 5, 7, 3, 5, 1, 2, 3})))
} | leetcode/0912_sort-an-array/main.go | 0.505371 | 0.50531 | main.go | starcoder |
package models
import (
"fmt"
"github.com/astaxie/beego/orm"
"strings"
"time"
)
type DtuRowOfDay struct {
DTU_no string `orm:"column(dtu_no)"`
MeterAddress int `orm:"column(meter_address)"`
//MeterTypeNO string `orm:"column(meter_type_no)"`
//MeterType string `orm:"column(meter_type)"`
//GatewayNO string `orm:"column(gateway_no)"`
//GatewayDesc string `orm:"column(gateway_desc)"`
CollectTime time.Time `orm:"column(collect_time)"`
DayRows int `orm:"column(day_rows)"`
Rows int `orm:"column(rows)"`
}
type OverviewToday struct {
TypeID int `orm:"column(type_id)"`
TypeNO int `orm:"column(type_no)"`
TypeDesc string `orm:"colnum(type_desc)"`
Num int `orm:"column(num)"`
DayRows int `orm:"column(day_rows)"`
Rows int `orm:"column(rows)"`
}
type CollectCountOfMonth struct {
CollectTime string `orm:"column(collect_date)"`
Rows int `orm:"column(rows)"`
}
type CollectBaseInfo struct {
CollectTime time.Time `orm:"column(collect_time)"`
DTU_no string `orm:"column(dtu_no)"`
MeterAddress int `orm:"column(meter_address)"`
A_electricity float64 `orm:"digits(12);decimals(4);column(a_electricity)"`
B_electricity float64 `orm:"digits(12);decimals(4);column(b_electricity)"`
C_electricity float64 `orm:"digits(12);decimals(4);column(c_electricity)"`
A_power_factor float64 `orm:"digits(12);decimals(4);column(a_power_factor)"`
B_power_factor float64 `orm:"digits(12);decimals(4);column(b_power_factor)"`
C_power_factor float64 `orm:"digits(12);decimals(4);column(c_power_factor)"`
Total_power_factor float64 `orm:"digits(12);decimals(4);column(total_power_factor)"`
Total_p_at_ee float64 `orm:"digits(12);decimals(4);column(total_p_at_ee)"`
Total_r_at_ee float64 `orm:"digits(12);decimals(4);column(total_r_at_ee)"`
Total_ap_a_ee float64 `orm:"digits(12);decimals(4);column(total_ap_a_ee)"`
Total_ap_reat_ee float64 `orm:"digits(12);decimals(4);column(total_ap_reat_ee)"`
A_voltage float64 `orm:"digits(12);decimals(4);column(a_voltage)"`
B_voltage float64 `orm:"digits(12);decimals(4);column(b_voltage)"`
C_voltage float64 `orm:"digits(12);decimals(4);column(c_voltage)"`
Total_ap_power float64 `orm:"digits(12);decimals(4);column(total_ap_power)"`
A_ap_power float64 `orm:"digits(12);decimals(4);column(a_ap_power)"`
B_ap_power float64 `orm:"digits(12);decimals(4);column(b_ap_power)"`
C_ap_power float64 `orm:"digits(12);decimals(4);column(c_ap_power)"`
Total_reactive_power float64 `orm:"digits(12);decimals(4);column(total_reactive_power)"`
A_reactive_power float64 `orm:"digits(12);decimals(4);column(a_reactive_power)"`
B_reactive_power float64 `orm:"digits(12);decimals(4);column(b_reactive_power)"`
C_reactive_power float64 `orm:"digits(12);decimals(4);column(c_reactive_power)"`
Total_active_power float64 `orm:"digits(12);decimals(4);column(total_active_power)"`
A_active_power float64 `orm:"digits(12);decimals(4);column(a_active_power)"`
B_active_power float64 `orm:"digits(12);decimals(4);column(b_active_power)"`
C_active_power float64 `orm:"digits(12);decimals(4);column(c_active_power)"`
Total_p_reat_ee float64 `orm:"digits(12);decimals(4);column(total_p_reat_ee)"`
Total_r_reat_ee float64 `orm:"digits(12);decimals(4);column(total_r_reat_ee)"`
Total_at_ee float64 `orm:"digits(12);decimals(4);column(total_at_ee)"`
Frequency float64 `orm:"digits(12);decimals(4);column(frequency)"`
Uab_line_voltage float64 `orm:"digits(12);decimals(4);column(uab_line_voltage)"`
Ubc_line_voltage float64 `orm:"digits(12);decimals(4);column(ubc_line_voltage)"`
Uac_line_voltage float64 `orm:"digits(12);decimals(4);column(uac_line_voltage)"`
}
type CollectBaseInfoQueryParam struct {
BaseQueryParam
CollectTime string
DTU_no string
MeterAddress string
}
func CollectBaseInfoPageList(params *CollectBaseInfoQueryParam) ([]*CollectBaseInfo, int64) {
if len(strings.TrimSpace(params.CollectTime)) <= 0 {
return nil, 0
}
if len(strings.TrimSpace(params.MeterAddress)) <= 0 {
return nil, 0
}
beginTime := params.CollectTime + " 00:00:00"
endTime := params.CollectTime + " 23:59:59"
data := make([]*CollectBaseInfo, 0)
o := orm.NewOrm()
o.Using("kxtimingdata")
var total int64
sql := fmt.Sprintf(`SELECT count(1) as rows
FROM collect_base_info
where collect_time >= '%s' and collect_time <= '%s'
and dtu_no like '%s%%'
and meter_address = %s
`,
beginTime,
endTime,
params.DTU_no,
params.MeterAddress,
)
err := o.Raw(sql).QueryRow(&total)
if err != nil {
return nil, 0
}
sql2 := fmt.Sprintf(`SELECT collect_time, dtu_no, meter_address,
a_electricity, b_electricity, c_electricity,
a_power_factor, b_power_factor, c_power_factor,
total_power_factor, total_p_at_ee, total_r_at_ee, total_ap_a_ee, total_ap_reat_ee,
a_voltage, b_voltage, c_voltage,
total_ap_power, a_ap_power, b_ap_power, c_ap_power,
total_reactive_power, a_reactive_power, b_reactive_power, c_reactive_power,
total_active_power, a_active_power, b_active_power, c_active_power,
total_p_reat_ee, total_r_reat_ee, total_at_ee,
frequency,
uab_line_voltage, ubc_line_voltage, uac_line_voltage
FROM collect_base_info
where collect_time >= '%s' and collect_time <= '%s'
and dtu_no like '%s%%'
and meter_address = %s
`,
beginTime,
endTime,
params.DTU_no,
params.MeterAddress,
)
sql2 = sql2 + fmt.Sprintf(" LIMIT %d, %d", params.Offset, params.Limit)
_, err = o.Raw(sql2).QueryRows(&data)
if err != nil {
return nil, 0
}
return data, total
}
func CollectBaseInfoDataList(params *CollectBaseInfoQueryParam) []*CollectBaseInfo {
params.Limit = 65535
params.Sort = "collect_time"
params.Order = "asc"
data, _ := CollectBaseInfoPageList(params)
return data
}
//今日采集进度查询
func GetDtuRowsTodayList() ([]*DtuRowOfDay, error) {
o := orm.NewOrm()
o.Using("kxtimingdata")
sql := "call p_dtu_day_row_today()"
data := make([]*DtuRowOfDay, 0)
_, err := o.Raw(sql).QueryRows(&data)
if err != nil {
return nil, err
}
return data, nil
}
//取月采集数量
func GetCollectRowsOfMonth() ([]*CollectCountOfMonth, error) {
o := orm.NewOrm()
o.Using("kxtimingdata")
sql := fmt.Sprintf("SELECT collect_date, `rows` FROM v_collect_total_rows where collect_date <'%s'", time.Now().Format("2006-01-02"))
data := make([]*CollectCountOfMonth, 0)
_, err := o.Raw(sql).QueryRows(&data)
if err != nil {
return nil, err
}
return data, nil
}
//获取概述信息
func GetOverviewToday(choiceDate string) ([]*OverviewToday, error) {
data := make([]*OverviewToday, 0)
o := orm.NewOrm()
o.Using("kxtimingdata")
sql := fmt.Sprintf(`call p_collect_overview('%s')`, choiceDate)
_, err := o.Raw(sql).QueryRows(&data)
if err != nil {
return nil, err
}
return data, nil
} | models/CollectBaseInfo.go | 0.634996 | 0.513546 | CollectBaseInfo.go | starcoder |
package ent
import (
"context"
"errors"
"fmt"
"github.com/facebookincubator/ent/dialect/sql/sqlgraph"
"github.com/facebookincubator/ent/schema/field"
"github.com/thoverik/gobench/ent/histogram"
"github.com/thoverik/gobench/ent/metric"
)
// HistogramCreate is the builder for creating a Histogram entity.
type HistogramCreate struct {
config
mutation *HistogramMutation
hooks []Hook
}
// SetTime sets the time field.
func (hc *HistogramCreate) SetTime(i int64) *HistogramCreate {
hc.mutation.SetTime(i)
return hc
}
// SetCount sets the count field.
func (hc *HistogramCreate) SetCount(i int64) *HistogramCreate {
hc.mutation.SetCount(i)
return hc
}
// SetMin sets the min field.
func (hc *HistogramCreate) SetMin(i int64) *HistogramCreate {
hc.mutation.SetMin(i)
return hc
}
// SetMax sets the max field.
func (hc *HistogramCreate) SetMax(i int64) *HistogramCreate {
hc.mutation.SetMax(i)
return hc
}
// SetMean sets the mean field.
func (hc *HistogramCreate) SetMean(f float64) *HistogramCreate {
hc.mutation.SetMean(f)
return hc
}
// SetStddev sets the stddev field.
func (hc *HistogramCreate) SetStddev(f float64) *HistogramCreate {
hc.mutation.SetStddev(f)
return hc
}
// SetMedian sets the median field.
func (hc *HistogramCreate) SetMedian(f float64) *HistogramCreate {
hc.mutation.SetMedian(f)
return hc
}
// SetP75 sets the p75 field.
func (hc *HistogramCreate) SetP75(f float64) *HistogramCreate {
hc.mutation.SetP75(f)
return hc
}
// SetP95 sets the p95 field.
func (hc *HistogramCreate) SetP95(f float64) *HistogramCreate {
hc.mutation.SetP95(f)
return hc
}
// SetP99 sets the p99 field.
func (hc *HistogramCreate) SetP99(f float64) *HistogramCreate {
hc.mutation.SetP99(f)
return hc
}
// SetP999 sets the p999 field.
func (hc *HistogramCreate) SetP999(f float64) *HistogramCreate {
hc.mutation.SetP999(f)
return hc
}
// SetMetricID sets the metric edge to Metric by id.
func (hc *HistogramCreate) SetMetricID(id int) *HistogramCreate {
hc.mutation.SetMetricID(id)
return hc
}
// SetNillableMetricID sets the metric edge to Metric by id if the given value is not nil.
func (hc *HistogramCreate) SetNillableMetricID(id *int) *HistogramCreate {
if id != nil {
hc = hc.SetMetricID(*id)
}
return hc
}
// SetMetric sets the metric edge to Metric.
func (hc *HistogramCreate) SetMetric(m *Metric) *HistogramCreate {
return hc.SetMetricID(m.ID)
}
// Save creates the Histogram in the database.
func (hc *HistogramCreate) Save(ctx context.Context) (*Histogram, error) {
if _, ok := hc.mutation.Time(); !ok {
return nil, errors.New("ent: missing required field \"time\"")
}
if _, ok := hc.mutation.Count(); !ok {
return nil, errors.New("ent: missing required field \"count\"")
}
if _, ok := hc.mutation.Min(); !ok {
return nil, errors.New("ent: missing required field \"min\"")
}
if _, ok := hc.mutation.Max(); !ok {
return nil, errors.New("ent: missing required field \"max\"")
}
if _, ok := hc.mutation.Mean(); !ok {
return nil, errors.New("ent: missing required field \"mean\"")
}
if _, ok := hc.mutation.Stddev(); !ok {
return nil, errors.New("ent: missing required field \"stddev\"")
}
if _, ok := hc.mutation.Median(); !ok {
return nil, errors.New("ent: missing required field \"median\"")
}
if _, ok := hc.mutation.P75(); !ok {
return nil, errors.New("ent: missing required field \"p75\"")
}
if _, ok := hc.mutation.P95(); !ok {
return nil, errors.New("ent: missing required field \"p95\"")
}
if _, ok := hc.mutation.P99(); !ok {
return nil, errors.New("ent: missing required field \"p99\"")
}
if _, ok := hc.mutation.P999(); !ok {
return nil, errors.New("ent: missing required field \"p999\"")
}
var (
err error
node *Histogram
)
if len(hc.hooks) == 0 {
node, err = hc.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*HistogramMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
hc.mutation = mutation
node, err = hc.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(hc.hooks) - 1; i >= 0; i-- {
mut = hc.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, hc.mutation); err != nil {
return nil, err
}
}
return node, err
}
// SaveX calls Save and panics if Save returns an error.
func (hc *HistogramCreate) SaveX(ctx context.Context) *Histogram {
v, err := hc.Save(ctx)
if err != nil {
panic(err)
}
return v
}
func (hc *HistogramCreate) sqlSave(ctx context.Context) (*Histogram, error) {
var (
h = &Histogram{config: hc.config}
_spec = &sqlgraph.CreateSpec{
Table: histogram.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: histogram.FieldID,
},
}
)
if value, ok := hc.mutation.Time(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: histogram.FieldTime,
})
h.Time = value
}
if value, ok := hc.mutation.Count(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: histogram.FieldCount,
})
h.Count = value
}
if value, ok := hc.mutation.Min(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: histogram.FieldMin,
})
h.Min = value
}
if value, ok := hc.mutation.Max(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: histogram.FieldMax,
})
h.Max = value
}
if value, ok := hc.mutation.Mean(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat64,
Value: value,
Column: histogram.FieldMean,
})
h.Mean = value
}
if value, ok := hc.mutation.Stddev(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat64,
Value: value,
Column: histogram.FieldStddev,
})
h.Stddev = value
}
if value, ok := hc.mutation.Median(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat64,
Value: value,
Column: histogram.FieldMedian,
})
h.Median = value
}
if value, ok := hc.mutation.P75(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat64,
Value: value,
Column: histogram.FieldP75,
})
h.P75 = value
}
if value, ok := hc.mutation.P95(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat64,
Value: value,
Column: histogram.FieldP95,
})
h.P95 = value
}
if value, ok := hc.mutation.P99(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat64,
Value: value,
Column: histogram.FieldP99,
})
h.P99 = value
}
if value, ok := hc.mutation.P999(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat64,
Value: value,
Column: histogram.FieldP999,
})
h.P999 = value
}
if nodes := hc.mutation.MetricIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: histogram.MetricTable,
Columns: []string{histogram.MetricColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: metric.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if err := sqlgraph.CreateNode(ctx, hc.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
}
return nil, err
}
id := _spec.ID.Value.(int64)
h.ID = int(id)
return h, nil
} | ent/histogram_create.go | 0.677047 | 0.478224 | histogram_create.go | starcoder |
package layout
import (
"math"
)
// "Fast and Simple Horizontal Coordinate Assignment" by <NAME> and <NAME>, 2002
// Computes horizontal coordinate in layered graph, given ordering within each layer.
// Produces result such that neighbors are close and long edges cross Layers are straight.
// Works on fully connected graphs.
// Assuming nodes do not have width.
type BrandesKopfLayersNodesHorizontalAssigner struct {
Delta int // distance between nodes, including fake ones
}
func (s BrandesKopfLayersNodesHorizontalAssigner) NodesHorizontalCoordinates(_ Graph, g LayeredGraph) map[uint64]int {
typeOneSegments := preprocessing(g)
root, align := verticalAlignment(g, typeOneSegments)
x := horizontalCompaction(g, root, align, s.Delta)
// TODO: balancing by taking median for every node across 4 runs for each run as in algorithm
return x
}
// Alg 1.
// Type 1 conflicts arise when a non-inner segment (normal edge) crosses an inner segment (edge between two fake nodes).
// The algorithm traverses Layers from left to right (index l) while maintaining the upper neighbors,
// v(i)_k0 and v(i)_k1, of the two closest inner Segments.
func preprocessing(g LayeredGraph) (typeOneSegments map[[2]uint64]bool) {
typeOneSegments = map[[2]uint64]bool{}
for i := range g.Layers() {
if i == (len(g.Layers()) - 1) {
continue
}
nextLayer := g.Layers()[i+1]
k0 := 0
l := 0
for l1, v := range nextLayer {
var upperNeighborInnerSegment uint64
for _, u := range g.UpperNeighbors(v) {
if g.IsInnerSegment([2]uint64{u, v}) {
upperNeighborInnerSegment = u
break
}
}
if (l1 == (len(nextLayer) - 1)) || upperNeighborInnerSegment != 0 {
k1 := len(g.Layers()[i]) - 1
if upperNeighborInnerSegment != 0 {
k1 = g.NodeYX[upperNeighborInnerSegment][1]
}
for l <= l1 {
for k, u := range g.UpperNeighbors(nextLayer[l]) {
if k < k0 || k > k1 {
typeOneSegments[[2]uint64{u, v}] = true
}
}
l += 1
}
k0 = k1
}
}
}
return typeOneSegments
}
// Alg 2.
// Obtain a leftmost alignment with upper neighbors.
// A maximal set of vertically aligned vertices is called a block, and we define the root of a block to be its topmost vertex.
// Blocks are stored as cyclicly linked lists, each node has reference to its lower aligned neighbor and lowest refers to topmost.
// Each node has additional reference to root of its block.
func verticalAlignment(g LayeredGraph, typeOneSegments map[[2]uint64]bool) (root map[uint64]uint64, align map[uint64]uint64) {
root = make(map[uint64]uint64, len(g.NodeYX))
align = make(map[uint64]uint64, len(g.NodeYX))
for v := range g.NodeYX {
root[v] = v
align[v] = v
}
for i := range g.Layers() {
r := 0
for _, v := range g.Layers()[i] {
upNeighbors := g.UpperNeighbors(v)
if d := len(upNeighbors); d > 0 {
for m := d / 2; m < ((d+1)/2) && (m < d); m++ {
u := upNeighbors[m]
if align[v] == v {
if !typeOneSegments[[2]uint64{u, v}] && r < g.NodeYX[u][1] {
align[u] = v
root[v] = root[u]
align[v] = root[v]
r = g.NodeYX[u][1]
}
}
}
}
}
}
return root, align
}
// part of Alg 3.
func placeBlock(g LayeredGraph, x map[uint64]int, root map[uint64]uint64, align map[uint64]uint64, sink map[uint64]uint64, shift map[uint64]int, delta int, v uint64) {
if _, ok := x[v]; !ok {
x[v] = 0
flag := true
for w := v; flag; flag = v != w {
if g.NodeYX[w][1] > 0 {
u := root[g.Layers()[g.NodeYX[w][0]][g.NodeYX[w][1]-1]]
placeBlock(g, x, root, align, sink, shift, delta, u)
if sink[v] == v {
sink[v] = sink[u]
}
if sink[v] != sink[u] {
if s := x[v] - x[u] - delta; s < shift[sink[u]] {
shift[sink[u]] = s
}
} else {
if s := x[u] + delta; s > x[v] {
x[v] = s
}
}
}
w = align[w]
}
}
}
// Alg 3.
// All node of a block are assigned the coordinate of the root.
// Partition each block in to classes.
// Class is defined by reachable sink which has the topmost root
// Within each class, we apply a longest path layering,
// i.e. the relative coordinate of a block with respect to the defining
// sink is recursively determined to be the maximum coordinate of
// the preceding blocks in the same class, plus minimum separation.
// For each class, from top to bottom, we then compute the absolute coordinates
// of its members by placing the class with minimum separation from previously placed classes.
func horizontalCompaction(g LayeredGraph, root map[uint64]uint64, align map[uint64]uint64, delta int) (x map[uint64]int) {
sink := map[uint64]uint64{}
shift := map[uint64]int{}
x = map[uint64]int{}
for v := range g.NodeYX {
sink[v] = v
shift[v] = math.MaxInt
}
// root coordinates relative to sink
for v := range g.NodeYX {
if root[v] == v {
placeBlock(g, x, root, align, sink, shift, delta, v)
}
}
// absolute coordinates
for v := range g.NodeYX {
x[v] = x[root[v]]
if s := shift[sink[root[v]]]; s < math.MaxInt {
x[v] += s
}
}
return x
} | layout/brandeskopf.go | 0.615897 | 0.5144 | brandeskopf.go | starcoder |
package digitalocean
import (
"context"
"fmt"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/terraform-providers/terraform-provider-digitalocean/internal/datalist"
)
func dataSourceDigitalOceanSizes() *schema.Resource {
dataListConfig := &datalist.ResourceConfig{
RecordSchema: map[string]*schema.Schema{
"slug": {
Type: schema.TypeString,
Description: "A human-readable string that is used to uniquely identify each size.",
},
"available": {
Type: schema.TypeBool,
Description: "This represents whether new Droplets can be created with this size.",
},
"transfer": {
Type: schema.TypeFloat,
Description: "The amount of transfer bandwidth that is available for Droplets created in this size. This only counts traffic on the public interface. The value is given in terabytes.",
},
"price_monthly": {
Type: schema.TypeFloat,
Description: "The monthly cost of Droplets created in this size if they are kept for an entire month. The value is measured in US dollars.",
},
"price_hourly": {
Type: schema.TypeFloat,
Description: "The hourly cost of Droplets created in this size as measured hourly. The value is measured in US dollars.",
},
"memory": {
Type: schema.TypeInt,
Description: "The amount of RAM allocated to Droplets created of this size. The value is measured in megabytes.",
},
"vcpus": {
Type: schema.TypeInt,
Description: "The number of CPUs allocated to Droplets of this size.",
},
"disk": {
Type: schema.TypeInt,
Description: "The amount of disk space set aside for Droplets of this size. The value is measured in gigabytes.",
},
"regions": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Description: "List of region slugs where Droplets can be created in this size.",
},
},
FilterKeys: []string{
"slug",
"regions",
"memory",
"vcpus",
"disk",
"transfer",
"price_monthly",
"price_hourly",
"available",
},
SortKeys: []string{
"slug",
"memory",
"vcpus",
"disk",
"transfer",
"price_monthly",
"price_hourly",
},
ResultAttributeName: "sizes",
FlattenRecord: flattenDigitalOceanSize,
GetRecords: getDigitalOceanSizes,
}
return datalist.NewResource(dataListConfig)
}
func getDigitalOceanSizes(meta interface{}) ([]interface{}, error) {
client := meta.(*CombinedConfig).godoClient()
sizes := []interface{}{}
opts := &godo.ListOptions{
Page: 1,
PerPage: 200,
}
for {
partialSizes, resp, err := client.Sizes.List(context.Background(), opts)
if err != nil {
return nil, fmt.Errorf("Error retrieving sizes: %s", err)
}
for _, partialSize := range partialSizes {
sizes = append(sizes, partialSize)
}
if resp.Links == nil || resp.Links.IsLastPage() {
break
}
page, err := resp.Links.CurrentPage()
if err != nil {
return nil, fmt.Errorf("Error retrieving sizes: %s", err)
}
opts.Page = page + 1
}
return sizes, nil
}
func flattenDigitalOceanSize(size, meta interface{}) (map[string]interface{}, error) {
s := size.(godo.Size)
flattenedSize := map[string]interface{}{}
flattenedSize["slug"] = s.Slug
flattenedSize["available"] = s.Available
flattenedSize["transfer"] = s.Transfer
flattenedSize["price_monthly"] = s.PriceMonthly
flattenedSize["price_hourly"] = s.PriceHourly
flattenedSize["memory"] = s.Memory
flattenedSize["vcpus"] = s.Vcpus
flattenedSize["disk"] = s.Disk
flattenedRegions := schema.NewSet(schema.HashString, []interface{}{})
for _, r := range s.Regions {
flattenedRegions.Add(r)
}
flattenedSize["regions"] = flattenedRegions
return flattenedSize, nil
} | vendor/github.com/terraform-providers/terraform-provider-digitalocean/digitalocean/datasource_digitalocean_sizes.go | 0.644449 | 0.457803 | datasource_digitalocean_sizes.go | starcoder |
package assert
import (
"github.com/tinyhubs/et/et"
"testing"
)
// Equal is used to check if exp equals to got.
func Equal(t *testing.T, exp, got interface{}) {
et.AssertInner(t, "", &et.Equal{exp, got}, 2)
}
// Equali is same with Equal but a need msg to express your intention.
func Equali(t *testing.T, msg string, exp, got interface{}) {
et.AssertInner(t, msg, &et.Equal{exp, got}, 2)
}
// NotEqual is used to check if exp is not equals to got
func NotEqual(t *testing.T, exp, got interface{}) {
et.AssertInner(t, "", &et.NotEqual{exp, got}, 2)
}
// NotEquali is same with NotEqual but a need msg to express your intention.
func NotEquali(t *testing.T, msg string, exp, got interface{}) {
et.AssertInner(t, msg, &et.NotEqual{exp, got}, 2)
}
// True is used to check the got be true.
func True(t *testing.T, got bool) {
et.AssertInner(t, "", &et.True{got}, 2)
}
// Truei is same with True but a need msg to express your intention.
func Truei(t *testing.T, msg string, got bool) {
et.AssertInner(t, msg, &et.True{got}, 2)
}
// False is used to check the got be false.
func False(t *testing.T, got bool) {
et.AssertInner(t, "", &et.False{got}, 2)
}
// Falsei is same with False but a need msg to express your intention.
func Falsei(t *testing.T, msg string, got bool) {
et.AssertInner(t, msg, &et.False{got}, 2)
}
// Panic is used to check the fn should give a panic.
func Panic(t *testing.T, fn func()) {
et.AssertInner(t, "", &et.Panic{fn}, 2)
}
// Panici is same with Panic but a need msg to express your intention.
func Panici(t *testing.T, msg string, fn func()) {
et.AssertInner(t, msg, &et.Panic{fn}, 2)
}
// NoPanic is used to check the fn should not give a panic.
func NoPanic(t *testing.T, fn func()) {
et.AssertInner(t, "", &et.NoPanic{fn}, 2)
}
// NoPanici is same with NoPanic but a need msg to express your intention.
func NoPanici(t *testing.T, msg string, fn func()) {
et.AssertInner(t, msg, &et.NoPanic{fn}, 2)
}
// Match is used to check the got is match to the regular expression of exp.
func Match(t *testing.T, regex string, got string) {
et.AssertInner(t, "", &et.Match{regex, got}, 2)
}
// Matchi is same with Match but a need msg to express your intention.
func Matchi(t *testing.T, msg string, regex string, got string) {
et.AssertInner(t, msg, &et.Match{regex, got}, 2)
}
// NotMatch is used to check the got be not matched with exp.
func NotMatch(t *testing.T, regex string, got string) {
et.AssertInner(t, "", &et.NotMatch{regex, got}, 2)
}
// NotMatchi is same with NotMatch but a need msg to express your intention.
func NotMatchi(t *testing.T, msg string, regex string, got string) {
et.AssertInner(t, msg, &et.NotMatch{regex, got}, 2)
}
// Nil expect the got be nil.
func Nil(t *testing.T, got interface{}) {
et.AssertInner(t, "", &et.Nil{got}, 2)
}
// Nili is same with NotMatch but a need msg to express your intention.
func Nili(t *testing.T, msg string, got interface{}) {
et.AssertInner(t, msg, &et.Nil{got}, 2)
}
// NotNil expect the got be not nil.
func NotNil(t *testing.T, got interface{}) {
et.AssertInner(t, "", &et.NotNil{got}, 2)
}
// NotNili is same with NotNil but a need msg to express your intention.
func NotNili(t *testing.T, msg string, got interface{}) {
et.AssertInner(t, msg, &et.NotNil{got}, 2)
} | assert/assert.go | 0.592195 | 0.567757 | assert.go | starcoder |
package httpc
import "net/http"
// StatusFn is func for comparing expected status code against
// an expected status code.
type StatusFn func(statusCode int) bool
// StatusIn checks whether the response's status code matches at least 1
// of the input status codes provided.
func StatusIn(status int, others ...int) StatusFn {
return func(statusCode int) bool {
for _, code := range append(others, status) {
if code == statusCode {
return true
}
}
return false
}
}
// StatusInRange checks the response's status code is in in the range provided.
// The range is [low, high).
func StatusInRange(low, high int) StatusFn {
return func(statusCode int) bool {
return low <= statusCode && high > statusCode
}
}
// StatusNotIn checks whether the response's status code does match any
// of the input status codes provided.
func StatusNotIn(status int, others ...int) StatusFn {
return func(statusCode int) bool {
return !StatusIn(status, others...)(statusCode)
}
}
// StatusOK compares the response's status code to match Status OK.
func StatusOK() StatusFn {
return func(status int) bool {
return http.StatusOK == status
}
}
// StatusAccepted compares the response's status code to match Status Accepted.
func StatusAccepted() StatusFn {
return func(status int) bool {
return http.StatusAccepted == status
}
}
// StatusPartialContent compares the response's status code to match Status Partial Content
func StatusPartialContent() StatusFn {
return func(status int) bool {
return http.StatusPartialContent == status
}
}
// StatusSuccessfulRange compares the response's status code to match Status SuccessfulRange.
func StatusSuccessfulRange() StatusFn {
return func(status int) bool {
return 200 <= status && status <= 299
}
}
// StatusCreated compares the response's status code to match Status Created.
func StatusCreated() StatusFn {
return func(status int) bool {
return http.StatusCreated == status
}
}
// StatusNoContent compares the response's status code to match Status No Content.
func StatusNoContent() StatusFn {
return func(status int) bool {
return http.StatusNoContent == status
}
}
// StatusForbidden compares the response's status code to match Status Forbidden.
func StatusForbidden() StatusFn {
return func(status int) bool {
return http.StatusForbidden == status
}
}
// StatusNotFound compares the response's status code to match Status Not Found.
func StatusNotFound() StatusFn {
return func(status int) bool {
return http.StatusNotFound == status
}
}
// StatusUnprocessableEntity compares the response's status code to match Status Unprocessable Entity.
func StatusUnprocessableEntity() StatusFn {
return func(status int) bool {
return http.StatusUnprocessableEntity == status
}
}
// StatusInternalServerError compares the response's status code to match Status Internal Server Error.
func StatusInternalServerError() StatusFn {
return func(status int) bool {
return http.StatusInternalServerError == status
}
}
func statusMatches(status int, fns []StatusFn) bool {
for _, fn := range fns {
if fn(status) {
return true
}
}
return false
} | http/httpc/status.go | 0.674479 | 0.451145 | status.go | starcoder |
package gfx
import (
"fmt"
"image/color"
)
// A Font is a bitmapped font which can represent glyphs, or renderings of a
// single character, as FrameBuffers that can then be blitted onto a larger
// FrameBuffer.
type Font struct {
// GlyphWidth is the width of a certain glyph within the font
GlyphWidth uint
// GlyphHeight is the height of a certain glyph within the font
GlyphHeight uint
defaultGlyph *FrameBuffer
glyphMap map[int]*FrameBuffer
}
// NewFont returns a new font in which each glyph will have dimensions based on
// the given width and height.
func NewFont(width, height uint) *Font {
f := new(Font)
f.GlyphWidth = width
f.GlyphHeight = height
f.defaultGlyph = NewFrameBuffer(f.GlyphWidth, f.GlyphHeight)
f.glyphMap = make(map[int]*FrameBuffer)
return f
}
// Glyph returns a glyph for a given int. If no such int exists in our font,
// we will return the font's _default glyph_, rather than an error.
func (f *Font) Glyph(ch int) *FrameBuffer {
fb, ok := f.glyphMap[ch]
if !ok {
return f.defaultGlyph
}
return fb
}
// DefineGlyph will define a new glyph in the font, or replace an existing
// glyph, for a given int. Points deserves special attention: it's expected to
// be a sequence of zeroes and ones, where zero indicates a point in the bitmap
// font that should not be drawn, and one indicates a point that should be
// drawn. The length of points should be equal to the product of width x height
// and, if it isn't, DefineGlyph will panic.
func (f *Font) DefineGlyph(ch int, points []byte) {
if len(points) != int(f.GlyphWidth)*int(f.GlyphHeight) {
panic(fmt.Sprintf(
"invalid points length for font (pl[%d] != w[%d] x h[%d]",
len(points), f.GlyphWidth, f.GlyphHeight,
))
}
fb := NewFrameBuffer(f.GlyphWidth, f.GlyphHeight)
for i, pt := range points {
ui := uint(i)
// It's ok to ignore the error return here, since the only error
// condition of SetCell can occur if you attempt an out-of-bounds set.
// We confirmed that we can't by the check above on the length of
// points.
_ = fb.SetCell(ui%f.GlyphWidth, ui/f.GlyphWidth, gcolor(pt))
}
f.glyphMap[ch] = fb
}
// gcolor will return a color that, for our purposes, will suffice to indicate
// that one cell should be drawn or another should not.
func gcolor(b byte) color.RGBA {
if b == 0 {
return color.RGBA{}
}
return color.RGBA{R: 255, G: 255, B: 255}
}
// DefineGlyphAsBuffer will take a fully formed framebuffer as its glyph. If
// the given framebuffer does not have the same dimension as our font, this
// method will panic.
func (f *Font) DefineGlyphAsBuffer(ch int, fb *FrameBuffer) {
if fb.Width != f.GlyphWidth || fb.Height != f.GlyphHeight {
panic(fmt.Sprintf(
"fb width[%d] or height[%d] mismatches font width[%d] or height[%d]",
fb.Width, fb.Height, f.GlyphWidth, f.GlyphHeight,
))
}
f.glyphMap[ch] = fb
}
// Write will write a message out to a framebuffer at some given position.
func (f *Font) Write(message string, x, y uint, fb *FrameBuffer) error {
cursor := x
for _, c := range message {
err := fb.Blit(cursor, y, f.glyphMap[int(c)])
if err != nil {
return err
}
cursor += f.GlyphWidth
}
return nil
} | pkg/gfx/font.go | 0.787727 | 0.468 | font.go | starcoder |
package optional
import "errors"
var (
// ErrNoneValueTaken represents the error that is raised when None value is taken.
ErrNoneValueTaken = errors.New("none value taken")
)
// Option is a data type that must be Some (i.e. having a value) or None (i.e. doesn't have a value).
type Option[T any] struct {
value T
exists *struct{}
}
// Some is a function to make a Option type instance with the actual value.
func Some[T any](value T) Option[T] {
return Option[T]{
value: value,
exists: &struct{}{},
}
}
// None is a function to make a Option type that doesn't have a value.
func None[T any]() Option[T] {
return Option[T]{}
}
// IsNone returns whether the Option *doesn't* have a value or not.
func (o Option[T]) IsNone() bool {
return o.exists == nil
}
// IsSome returns whether the Option has a value or not.
func (o Option[T]) IsSome() bool {
return o.exists != nil
}
// Take takes the contained value in Option.
// If Option value is Some, this returns the value that is contained in Option.
// On the other hand, this returns an ErrNoneValueTaken as the second return value.
func (o Option[T]) Take() (T, error) {
if o.IsNone() {
return o.value, ErrNoneValueTaken
// ~~~~~~~ uninitialized default value
}
return o.value, nil
}
// TakeOr returns the actual value if the Option has a value.
// On the other hand, this returns fallbackValue.
func (o Option[T]) TakeOr(fallbackValue T) T {
if o.IsNone() {
return fallbackValue
}
return o.value
}
// TakeOrElse returns the actual value if the Option has a value.
// On the other hand, this executes fallbackFunc and returns the result value of that function.
func (o Option[T]) TakeOrElse(fallbackFunc func() T) T {
if o.IsNone() {
return fallbackFunc()
}
return o.value
}
// Filter returns self if the Option has a value and the value matches the condition of the predicate function.
// In other cases (i.e. it doesn't match with the predicate or the Option is None), this returns None value.
func (o Option[T]) Filter(predicate func(v T) bool) Option[T] {
if o.IsNone() {
return None[T]()
}
if predicate(o.value) {
return o
}
return None[T]()
}
// Map converts given Option value to another Option value according to the mapper function.
// If given Option value is None, this also returns None.
func Map[T, U any](option Option[T], mapper func(v T) U) Option[U] {
if option.IsNone() {
return None[U]()
}
return Some(mapper(option.value))
}
// MapOr converts given Option value to another *actual* value according to the mapper function.
// If given Option value is None, this returns fallbackValue.
func MapOr[T, U any](option Option[T], fallbackValue U, mapper func(v T) U) U {
if option.IsNone() {
return fallbackValue
}
return mapper(option.value)
}
// MapWithError converts given Option value to another Option value according to the mapper function that has the ability to return the value with an error.
// If given Option value is None, this returns (None, nil). Else if the mapper returns an error then this returns (None, error).
// Unless of them, i.e. given Option value is Some and the mapper doesn't return the error, this returns (Some[U], nil).
func MapWithError[T, U any](option Option[T], mapper func(v T) (U, error)) (Option[U], error) {
if option.IsNone() {
return None[U](), nil
}
u, err := mapper(option.value)
if err != nil {
return None[U](), err
}
return Some(u), nil
}
// MapOrWithError converts given Option value to another *actual* value according to the mapper function that has the ability to return the value with an error.
// If given Option value is None, this returns (fallbackValue, nil). Else if the mapper returns an error then returns (_, error).
// Unless of them, i.e. given Option value is Some and the mapper doesn't return the error, this returns (U, nil).
func MapOrWithError[T, U any](option Option[T], fallbackValue U, mapper func(v T) (U, error)) (U, error) {
if option.IsNone() {
return fallbackValue, nil
}
return mapper(option.value)
}
// Pair is a data type that represents a tuple that has two elements.
type Pair[T, U any] struct {
Value1 T
Value2 U
}
// Zip zips two Options into a Pair that has each Option's value.
// If either one of the Options is None, this also returns None.
func Zip[T, U any](opt1 Option[T], opt2 Option[U]) Option[Pair[T, U]] {
if opt1.IsSome() && opt2.IsSome() {
return Some(Pair[T, U]{
Value1: opt1.value,
Value2: opt2.value,
})
}
return None[Pair[T, U]]()
}
// ZipWith zips two Options into a typed value according to the zipper function.
// If either one of the Options is None, this also returns None.
func ZipWith[T, U, V any](opt1 Option[T], opt2 Option[U], zipper func(opt1 T, opt2 U) V) Option[V] {
if opt1.IsSome() && opt2.IsSome() {
return Some(zipper(opt1.value, opt2.value))
}
return None[V]()
}
// Unzip extracts the values from a Pair and pack them into each Option value.
// If the given zipped value is None, this returns None for all return values.
func Unzip[T, U any](zipped Option[Pair[T, U]]) (Option[T], Option[U]) {
if zipped.IsNone() {
return None[T](), None[U]()
}
pair := zipped.value
return Some(pair.Value1), Some(pair.Value2)
}
// UnzipWith extracts the values from the given value according to the unzipper function and pack the into each Option value.
// If the given zipped value is None, this returns None for all return values.
func UnzipWith[T, U, V any](zipped Option[V], unzipper func(zipped V) (T, U)) (Option[T], Option[U]) {
if zipped.IsNone() {
return None[T](), None[U]()
}
v1, v2 := unzipper(zipped.value)
return Some(v1), Some(v2)
} | option.go | 0.807916 | 0.476823 | option.go | starcoder |
package feature
import (
"fmt"
"math"
)
/*
Criterion represents a constraint on a feature
Its SatisfiedBy method takes a sample and returns a boolean indicating if
the given value satisfies the feature criterion.
Its Feature method returns the feature on which the criterion is applied.
*/
type Criterion interface {
Feature() Feature
SatisfiedBy(sample Sample) (bool, error)
}
/*
Sample is an interface for something that can satisfy a Criterion.
Its ValueFor method returns the value corresponding to the feature
passed as parameter.
*/
type Sample interface {
ValueFor(Feature) (interface{}, error)
}
/*
ContinuousCriterion represents a constraint on a continuous feature, a
range that delimits which values it may take. The interval can be open on one end,
thus representing -Infinity or +Infinity
Its Interval method returns the start and end of the interval to which the
feature is constrained as a pair of float64 values.
*/
type ContinuousCriterion interface {
Criterion
Interval() (float64, float64)
}
/*
DiscreteCriterion represents a constraint on a discrete feature, a
value it may take.
Its Value method returns the value to which the feature is constrained as
a string.
*/
type DiscreteCriterion interface {
Criterion
Value() string
}
/*
UndefinedCriterion represents the lack of constraint on a specific feature.
*/
type UndefinedCriterion interface {
Criterion
IsUndefinedCriterion() bool
}
type continuousCriterion struct {
feature *ContinuousFeature
a, b float64
}
type discreteCriterion struct {
feature *DiscreteFeature
value string
}
type undefinedCriterion struct {
feature Feature
}
/*
NewContinuousCriterion takes a ContinuousFeature feature and a pair of
float64 values indicating the start and the end of an interval and return a
ContinuousCriterion with the feature and interval. The interval can be
open on any end by providing -Inf and/or +Inf.
*/
func NewContinuousCriterion(feature *ContinuousFeature, a float64, b float64) ContinuousCriterion {
return &continuousCriterion{feature, a, b}
}
/*
NewDiscreteCriterion takes a DiscreteFeature feature and a pair of
float64 values indicating the start and the end of an interval and return a
DiscreteCriterion with the feature and interval. The interval can be
open on any end by providing -Inf and/or +Inf.
*/
func NewDiscreteCriterion(feature *DiscreteFeature, value string) DiscreteCriterion {
return &discreteCriterion{feature, value}
}
/*
NewUndefinedCriterion takes a Feature and returns a Criterion that
is always satisfied.
*/
func NewUndefinedCriterion(f Feature) UndefinedCriterion {
return &undefinedCriterion{f}
}
/*
Feature returns the feature to which the constraint applies.
*/
func (cfc *continuousCriterion) Feature() Feature {
return cfc.feature
}
/*
SatisfiedBy receives a sample as parameter and returns a boolean indicating if the
sample satisfies the criterion. Specifically, it returns false if the sample does
not define a value for the feature, true if the value, being a float64, is in the
range defined by the criterion; and false otherwise.
*/
func (cfc *continuousCriterion) SatisfiedBy(sample Sample) (bool, error) {
val, err := sample.ValueFor(cfc.feature)
if err != nil {
return false, err
}
if val == nil {
return false, nil
}
floatVal, ok := val.(float64)
if !ok {
return false, nil
}
return (math.IsInf(cfc.a, 0) || cfc.a <= floatVal) && (math.IsInf(cfc.b, 0) || floatVal < cfc.b), nil
}
func (cfc *continuousCriterion) Interval() (float64, float64) {
return cfc.a, cfc.b
}
func (cfc *continuousCriterion) String() string {
if math.IsInf(cfc.a, 0) {
return fmt.Sprintf("%s < %f", cfc.feature.Name(), cfc.b)
}
if math.IsInf(cfc.b, 0) {
return fmt.Sprintf("%f <= %s", cfc.a, cfc.feature.Name())
}
return fmt.Sprintf("%f <= %s < %f", cfc.a, cfc.feature.Name(), cfc.b)
}
/*
Feature returns the feature to which the constraint applies.
*/
func (dfc *discreteCriterion) Feature() Feature {
return dfc.feature
}
/*
SatisfiedBy receives a sample as parameter and returns a boolean indicating if the
sample satisfies the criterion. Specifically, it returns false if the sample does
not define a value for the feature, true if the value, being a string, equals the
value on the criterion; and false otherwise.
*/
func (dfc *discreteCriterion) SatisfiedBy(sample Sample) (bool, error) {
val, err := sample.ValueFor(dfc.feature)
if err != nil {
return false, err
}
if val == nil {
return false, nil
}
stringVal, ok := val.(string)
if !ok {
return false, nil
}
return dfc.value == stringVal, nil
}
func (dfc *discreteCriterion) Value() string {
return dfc.value
}
func (dfc *discreteCriterion) String() string {
return fmt.Sprintf("%s is %s", dfc.feature.Name(), dfc.value)
}
func (u *undefinedCriterion) Feature() Feature {
return u.feature
}
func (u *undefinedCriterion) SatisfiedBy(sample Sample) (bool, error) {
return true, nil
}
func (u *undefinedCriterion) IsUndefinedCriterion() bool {
return true
}
func (u *undefinedCriterion) String() string {
return fmt.Sprintf("%s not defined", u.feature.Name())
} | feature/criterion.go | 0.879981 | 0.563018 | criterion.go | starcoder |
package policyv1
import (
hash "hash"
)
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Policy) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Policy_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Metadata) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Metadata_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *ResourcePolicy) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_ResourcePolicy_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *ResourceRule) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_ResourceRule_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *PrincipalPolicy) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_PrincipalPolicy_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *PrincipalRule) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_PrincipalRule_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *PrincipalRule_Action) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_PrincipalRule_Action_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *DerivedRoles) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_DerivedRoles_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *RoleDef) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_RoleDef_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Condition) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Condition_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Match) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Match_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Match_ExprList) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Match_ExprList_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Schemas) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Schemas_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Schemas_IgnoreWhen) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Schemas_IgnoreWhen_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Schemas_Schema) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Schemas_Schema_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestSuite) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestSuite_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestTable) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestTable_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestTable_Input) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestTable_Input_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestTable_Expectation) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestTable_Expectation_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Test) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Test_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Test_TestName) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Test_TestName_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults_Tally) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_Tally_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults_Summary) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_Summary_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults_Suite) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_Suite_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults_Principal) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_Principal_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults_Resource) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_Resource_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults_Action) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_Action_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults_Details) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_Details_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestResults_Failure) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestResults_Failure_hashpb_sum(m, hasher, ignore)
}
} | api/genpb/cerbos/policy/v1/policy_hashpb.pb.go | 0.802517 | 0.404713 | policy_hashpb.pb.go | starcoder |
package mapping
import "fmt"
func V2SuggestMapping(shards, replicas int) string {
shards, replicas = setDefaults(shards, replicas)
return fmt.Sprintf(
v2SuggestMapping,
shards,
replicas,
)
}
// v2Mapping is the default mapping for the RDF records enabled by hub3
var v2SuggestMapping = `{
"settings": {
"index": {
"mapping.total_fields.limit": 1000,
"mapping.depth.limit": 20,
"mapping.nested_fields.limit": 50,
"number_of_shards": %d,
"number_of_replicas": %d
},
"analysis": {
"analyzer": {
"default": {
"tokenizer": "standard",
"char_filter": ["html_strip"],
"filter" : ["lowercase","asciifolding"]
}
}
}
},
"mappings":{
"dynamic": true,
"date_detection" : false,
"properties": {
"meta": {
"type": "object",
"properties": {
"spec": {"type": "keyword"},
"orgID": {"type": "keyword"},
"hubID": {"type": "keyword"},
"revision": {"type": "long"},
"tags": {"type": "keyword"},
"docType": {"type": "keyword"},
"namedGraphURI": {"type": "keyword"},
"entryURI": {"type": "keyword"},
"modified": {"type": "date"},
"sourceID": {"type": "keyword"},
"sourcePath": {"type": "keyword"},
"groupID": {"type": "keyword"}
}
},
"id": {"type": "keyword"},
"brocadeID": {"type": "keyword"},
"orgID": {"type": "keyword"},
"suggestType": {"type": "keyword"},
"json": {
"type": "keyword",
"store": true,
"index": false,
"doc_values": false
},
"text": {
"type": "text",
"fields": {
"keyword": {"type": "keyword", "ignore_above": 512},
"suggest": { "type": "completion"}
}
},
"parent": {
"type": "text",
"fields": {
"keyword": {"type": "keyword", "ignore_above": 512}
}
},
"name": {
"type": "text",
"fields": {
"keyword": {"type": "keyword", "ignore_above": 512},
"suggest": { "type": "completion"}
}
},
"capacity": {
"type": "text",
"fields": {
"keyword": {"type": "keyword", "ignore_above": 512},
"suggest": { "type": "completion"}
}
},
"capacityID": {
"type": "text",
"fields": {
"keyword": {"type": "keyword", "ignore_above": 512}
}
},
"hasCapacity": {"type": "boolean"},
"isCapacity": {"type": "boolean"},
"nameWithContext": {
"type": "text",
"fields": {
"keyword": {"type": "keyword", "ignore_above": 512},
"suggest": { "type": "completion"}
}
}
}
}
}`
func V2SuggestMappingUpdate() string {
return v2SuggestMappingUpdate
}
// v2MappingUpdate contains updates to the original model that are incremental,
// but will lead to index errors when these fields are not present due to the
// 'strict' on dynamic creating of new fields in the index.
var v2SuggestMappingUpdate = `{
"properties": {
"meta": {
"type": "object",
"properties": {
"sourceID": {"type": "keyword"},
"sourcePath": {"type": "keyword"},
"groupID": {"type": "keyword"}
}
},
"text": {
"type": "text",
"fields": {
"keyword": {"type": "keyword", "ignore_above": 512},
"suggest": { "type": "completion"}
}
}
}
}
` | ikuzo/driver/elasticsearch/internal/mapping/v2suggest.go | 0.505127 | 0.404331 | v2suggest.go | starcoder |
package parser
import (
"fmt"
"sort"
"strings"
)
// DecodeToNode converts the labels to a tree of nodes.
// If any filters are present, labels which do not match the filters are skipped.
func DecodeToNode(labels map[string]string, rootName string, filters ...string) (*Node, error) {
sortedKeys := sortKeys(labels, filters)
var node *Node
for i, key := range sortedKeys {
split := strings.Split(key, ".")
if split[0] != rootName {
return nil, fmt.Errorf("invalid label root %s", split[0])
}
var parts []string
for _, v := range split {
if v[0] == '[' {
return nil, fmt.Errorf("invalid leading character '[' in field name (bracket is a slice delimiter): %s", v)
}
if strings.HasSuffix(v, "]") && v[0] != '[' {
indexLeft := strings.Index(v, "[")
parts = append(parts, v[:indexLeft], v[indexLeft:])
} else {
parts = append(parts, v)
}
}
if i == 0 {
node = &Node{}
}
decodeToNode(node, parts, labels[key])
}
return node, nil
}
func decodeToNode(root *Node, path []string, value string) {
if len(root.Name) == 0 {
root.Name = path[0]
}
// it's a leaf or not -> children
if len(path) > 1 {
if n := containsNode(root.Children, path[1]); n != nil {
// the child already exists
decodeToNode(n, path[1:], value)
} else {
// new child
child := &Node{Name: path[1]}
decodeToNode(child, path[1:], value)
root.Children = append(root.Children, child)
}
} else {
root.Value = value
}
}
func containsNode(nodes []*Node, name string) *Node {
for _, n := range nodes {
if strings.EqualFold(name, n.Name) {
return n
}
}
return nil
}
func sortKeys(labels map[string]string, filters []string) []string {
var sortedKeys []string
for key := range labels {
if len(filters) == 0 {
sortedKeys = append(sortedKeys, key)
continue
}
for _, filter := range filters {
if len(key) >= len(filter) && strings.EqualFold(key[:len(filter)], filter) {
sortedKeys = append(sortedKeys, key)
continue
}
}
}
sort.Strings(sortedKeys)
return sortedKeys
} | pkg/config/parser/labels_decode.go | 0.665302 | 0.404919 | labels_decode.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.