code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package gostructureless
import "fmt"
type Array struct {
Path string
t string
x []interface{}
}
func newArray(path string, a []interface{}) *Array {
return &Array{
Path: path,
t: "array",
x: a,
}
}
func (a *Array) GetPath() string {
return a.Path
}
func (a *Array) Bool() (bool, error) {
return false, newTypeError(a.Path, a.t)
}
func (a *Array) BoolOr(def bool) bool {
return def
}
func (a *Array) I64() (int64, error) {
return 0, newTypeError(a.Path, a.t)
}
func (a *Array) I64Or(def int64) int64 {
return def
}
func (a *Array) F64() (float64, error) {
return 0.0, newTypeError(a.Path, a.t)
}
func (a *Array) F64Or(def float64) float64 {
return def
}
func (a *Array) String() (string, error) {
return "", newTypeError(a.Path, a.t)
}
func (a *Array) StringOr(def string) string {
return def
}
func (a *Array) At(i int) Value {
arr := a.x
path := fmt.Sprintf("%s[%d]", a.Path, i)
if i < len(arr) {
return NewValue(path, arr[i])
} else {
return newNotFoundErrorNode(path)
}
}
func (a *Array) StringArray() ([]string, error) {
var result []string
for i, elem := range a.x {
if s, ok := elem.(string); ok == true {
result = append(result, s)
} else {
return nil, a.elementTypeError(i, "string")
}
}
return result, nil
}
func (a *Array) StringArrayOr(def []string) []string {
if a, e := a.StringArray(); e != nil {
return def
} else {
return a
}
}
func (a *Array) Arr() ([]Value, error) {
var arr []Value
for i := 0; i < len(a.x); i++ {
arr = append(arr, a.At(i))
}
return arr, nil
}
func (a *Array) Key(key string) Value {
return newTypeErrorNode(a.Path, "array")
}
func (a *Array) Map() (map[string]interface{}, error) {
return nil, newTypeError(a.Path, a.t)
}
func (a *Array) MapOr(def map[string]interface{}) map[string]interface{} {
return def
}
func (a *Array) Obj() (map[string]Value, error) {
return nil, newTypeError(a.Path, a.t)
}
func (a *Array) IsNil() bool {
return false
}
func (a *Array) elementTypeError(i int, t string) error {
return newTypeError(fmt.Sprintf("%s[%d]", a.Path, i), t)
} | array.go | 0.609175 | 0.406862 | array.go | starcoder |
package main
/**
* <p>Implement the <code>myAtoi(string s)</code> function, which converts a string to a 32-bit signed integer (similar to C/C++'s <code>atoi</code> function).</p>
<p>The algorithm for <code>myAtoi(string s)</code> is as follows:</p>
<ol>
<li>Read in and ignore any leading whitespace.</li>
<li>Check if the next character (if not already at the end of the string) is <code>'-'</code> or <code>'+'</code>. Read this character in if it is either. This determines if the final result is negative or positive respectively. Assume the result is positive if neither is present.</li>
<li>Read in next the characters until the next non-digit charcter or the end of the input is reached. The rest of the string is ignored.</li>
<li>Convert these digits into an integer (i.e. <code>"123" -> 123</code>, <code>"0032" -> 32</code>). If no digits were read, then the integer is <code>0</code>. Change the sign as necessary (from step 2).</li>
<li>If the integer is out of the 32-bit signed integer range <code>[-2<sup>31</sup>, 2<sup>31</sup> - 1]</code>, then clamp the integer so that it remains in the range. Specifically, integers less than <code>-2<sup>31</sup></code> should be clamped to <code>-2<sup>31</sup></code>, and integers greater than <code>2<sup>31</sup> - 1</code> should be clamped to <code>2<sup>31</sup> - 1</code>.</li>
<li>Return the integer as the final result.</li>
</ol>
<p><strong>Note:</strong></p>
<ul>
<li>Only the space character <code>' '</code> is considered a whitespace character.</li>
<li><strong>Do not ignore</strong> any characters other than the leading whitespace or the rest of the string after the digits.</li>
</ul>
<p> </p>
<p><strong>Example 1:</strong></p>
<pre>
<strong>Input:</strong> s = "42"
<strong>Output:</strong> 42
<strong>Explanation:</strong> The underlined characters are what is read in, the caret is the current reader position.
Step 1: "42" (no characters read because there is no leading whitespace)
^
Step 2: "42" (no characters read because there is neither a '-' nor '+')
^
Step 3: "<u>42</u>" ("42" is read in)
^
The parsed integer is 42.
Since 42 is in the range [-2<sup>31</sup>, 2<sup>31</sup> - 1], the final result is 42.
</pre>
<p><strong>Example 2:</strong></p>
<pre>
<strong>Input:</strong> s = " -42"
<strong>Output:</strong> -42
<strong>Explanation:</strong>
Step 1: "<u> </u>-42" (leading whitespace is read and ignored)
^
Step 2: " <u>-</u>42" ('-' is read, so the result should be negative)
^
Step 3: " -<u>42</u>" ("42" is read in)
^
The parsed integer is -42.
Since -42 is in the range [-2<sup>31</sup>, 2<sup>31</sup> - 1], the final result is -42.
</pre>
<p><strong>Example 3:</strong></p>
<pre>
<strong>Input:</strong> s = "4193 with words"
<strong>Output:</strong> 4193
<strong>Explanation:</strong>
Step 1: "4193 with words" (no characters read because there is no leading whitespace)
^
Step 2: "4193 with words" (no characters read because there is neither a '-' nor '+')
^
Step 3: "<u>4193</u> with words" ("4193" is read in; reading stops because the next character is a non-digit)
^
The parsed integer is 4193.
Since 4193 is in the range [-2<sup>31</sup>, 2<sup>31</sup> - 1], the final result is 4193.
</pre>
<p><strong>Example 4:</strong></p>
<pre>
<strong>Input:</strong> s = "words and 987"
<strong>Output:</strong> 0
<strong>Explanation:
</strong>Step 1: "words and 987" (no characters read because there is no leading whitespace)
^
Step 2: "words and 987" (no characters read because there is neither a '-' nor '+')
^
Step 3: "words and 987" (reading stops immediately because there is a non-digit 'w')
^
The parsed integer is 0 because no digits were read.
Since 0 is in the range [-2<sup>31</sup>, 2<sup>31</sup> - 1], the final result is 0.
</pre>
<p><strong>Example 5:</strong></p>
<pre>
<strong>Input:</strong> s = "-91283472332"
<strong>Output:</strong> -2147483648
<strong>Explanation:
</strong>Step 1: "-91283472332" (no characters read because there is no leading whitespace)
^
Step 2: "<u>-</u>91283472332" ('-' is read, so the result should be negative)
^
Step 3: "-<u>91283472332</u>" ("91283472332" is read in)
^
The parsed integer is -91283472332.
Since -91283472332 is less than the lower bound of the range [-2<sup>31</sup>, 2<sup>31</sup> - 1], the final result is clamped to -2<sup>31</sup> = -2147483648.<strong><span style="display: none;"> </span></strong>
</pre>
<p> </p>
<p><strong>Constraints:</strong></p>
<ul>
<li><code>0 <= s.length <= 200</code></li>
<li><code>s</code> consists of English letters (lower-case and upper-case), digits (<code>0-9</code>), <code>' '</code>, <code>'+'</code>, <code>'-'</code>, and <code>'.'</code>.</li>
</ul>
**/
/**
* "42"
**/
var digits = []rune{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
var digitsMap = map[rune]int{
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
}
func myAtoi(s string) int {
sign := +1
first := true
resRune := []rune{}
for _, c := range s {
if first {
if c == ' ' {
continue
}
if c == '+' {
sign = +1
first = false
continue
}
if c == '-' {
sign = -1
first = false
continue
}
if !isDigits(c) {
return 0
}
first = false
if c == '0' {
continue
}
resRune = append(resRune, c)
continue
}
if isDigits(c) {
if len(resRune) == 0 && c == '0' {
continue
}
resRune = append(resRune, c)
continue
}
break
}
if len(resRune) == 0 {
return 0
}
max := 1
for i := 0; i < 31; i++ {
max = max * 2
}
numDigits := len(resRune)
res := 0
for _, r := range resRune {
d := 1
for i := 0; i < numDigits-1; i++ {
d = d * 10
if d > max {
if sign > 0 {
return max - 1
} else {
return -max
}
}
}
numDigits = numDigits - 1
if digitsMap[r] == 0 {
continue
}
res = res + d*digitsMap[r]
if res < 0 {
if sign > 0 {
return max - 1
} else {
return -max
}
}
}
resSign := res * sign
if resSign > 0 {
if resSign > max-1 {
return max - 1
}
} else {
if resSign < -max {
return -max
}
}
return resSign
}
func isDigits(c rune) bool {
for _, d := range digits {
if c == d {
return true
}
}
return false
} | algorithms/8.string-to-integer-atoi.go | 0.752468 | 0.652408 | 8.string-to-integer-atoi.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// UnifiedRoleAssignmentMultiple
type UnifiedRoleAssignmentMultiple struct {
Entity
// Ids of the app specific scopes when the assignment scopes are app specific. The scopes of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. App scopes are scopes that are defined and understood by this application only.
appScopeIds []string
// Read-only collection with details of the app specific scopes when the assignment scopes are app specific. Containment entity. Read-only.
appScopes []AppScopeable
// The condition property
condition *string
// Description of the role assignment.
description *string
// Ids of the directory objects representing the scopes of the assignment. The scopes of an assignment determine the set of resources for which the principals have been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. App scopes are scopes that are defined and understood by this application only.
directoryScopeIds []string
// Read-only collection referencing the directory objects that are scope of the assignment. Provided so that callers can get the directory objects using $expand at the same time as getting the role assignment. Read-only. Supports $expand.
directoryScopes []DirectoryObjectable
// Name of the role assignment. Required.
displayName *string
// Identifiers of the principals to which the assignment is granted. Supports $filter (any operator only).
principalIds []string
// Read-only collection referencing the assigned principals. Provided so that callers can get the principals using $expand at the same time as getting the role assignment. Read-only. Supports $expand.
principals []DirectoryObjectable
// Specifies the roleDefinition that the assignment is for. Provided so that callers can get the role definition using $expand at the same time as getting the role assignment. Supports $filter (eq operator on id, isBuiltIn, and displayName, and startsWith operator on displayName) and $expand.
roleDefinition UnifiedRoleDefinitionable
// Identifier of the unifiedRoleDefinition the assignment is for.
roleDefinitionId *string
}
// NewUnifiedRoleAssignmentMultiple instantiates a new unifiedRoleAssignmentMultiple and sets the default values.
func NewUnifiedRoleAssignmentMultiple()(*UnifiedRoleAssignmentMultiple) {
m := &UnifiedRoleAssignmentMultiple{
Entity: *NewEntity(),
}
return m
}
// CreateUnifiedRoleAssignmentMultipleFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateUnifiedRoleAssignmentMultipleFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewUnifiedRoleAssignmentMultiple(), nil
}
// GetAppScopeIds gets the appScopeIds property value. Ids of the app specific scopes when the assignment scopes are app specific. The scopes of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. App scopes are scopes that are defined and understood by this application only.
func (m *UnifiedRoleAssignmentMultiple) GetAppScopeIds()([]string) {
if m == nil {
return nil
} else {
return m.appScopeIds
}
}
// GetAppScopes gets the appScopes property value. Read-only collection with details of the app specific scopes when the assignment scopes are app specific. Containment entity. Read-only.
func (m *UnifiedRoleAssignmentMultiple) GetAppScopes()([]AppScopeable) {
if m == nil {
return nil
} else {
return m.appScopes
}
}
// GetCondition gets the condition property value. The condition property
func (m *UnifiedRoleAssignmentMultiple) GetCondition()(*string) {
if m == nil {
return nil
} else {
return m.condition
}
}
// GetDescription gets the description property value. Description of the role assignment.
func (m *UnifiedRoleAssignmentMultiple) GetDescription()(*string) {
if m == nil {
return nil
} else {
return m.description
}
}
// GetDirectoryScopeIds gets the directoryScopeIds property value. Ids of the directory objects representing the scopes of the assignment. The scopes of an assignment determine the set of resources for which the principals have been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. App scopes are scopes that are defined and understood by this application only.
func (m *UnifiedRoleAssignmentMultiple) GetDirectoryScopeIds()([]string) {
if m == nil {
return nil
} else {
return m.directoryScopeIds
}
}
// GetDirectoryScopes gets the directoryScopes property value. Read-only collection referencing the directory objects that are scope of the assignment. Provided so that callers can get the directory objects using $expand at the same time as getting the role assignment. Read-only. Supports $expand.
func (m *UnifiedRoleAssignmentMultiple) GetDirectoryScopes()([]DirectoryObjectable) {
if m == nil {
return nil
} else {
return m.directoryScopes
}
}
// GetDisplayName gets the displayName property value. Name of the role assignment. Required.
func (m *UnifiedRoleAssignmentMultiple) GetDisplayName()(*string) {
if m == nil {
return nil
} else {
return m.displayName
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *UnifiedRoleAssignmentMultiple) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["appScopeIds"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfPrimitiveValues("string")
if err != nil {
return err
}
if val != nil {
res := make([]string, len(val))
for i, v := range val {
res[i] = *(v.(*string))
}
m.SetAppScopeIds(res)
}
return nil
}
res["appScopes"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateAppScopeFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]AppScopeable, len(val))
for i, v := range val {
res[i] = v.(AppScopeable)
}
m.SetAppScopes(res)
}
return nil
}
res["condition"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCondition(val)
}
return nil
}
res["description"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDescription(val)
}
return nil
}
res["directoryScopeIds"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfPrimitiveValues("string")
if err != nil {
return err
}
if val != nil {
res := make([]string, len(val))
for i, v := range val {
res[i] = *(v.(*string))
}
m.SetDirectoryScopeIds(res)
}
return nil
}
res["directoryScopes"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateDirectoryObjectFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]DirectoryObjectable, len(val))
for i, v := range val {
res[i] = v.(DirectoryObjectable)
}
m.SetDirectoryScopes(res)
}
return nil
}
res["displayName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDisplayName(val)
}
return nil
}
res["principalIds"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfPrimitiveValues("string")
if err != nil {
return err
}
if val != nil {
res := make([]string, len(val))
for i, v := range val {
res[i] = *(v.(*string))
}
m.SetPrincipalIds(res)
}
return nil
}
res["principals"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateDirectoryObjectFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]DirectoryObjectable, len(val))
for i, v := range val {
res[i] = v.(DirectoryObjectable)
}
m.SetPrincipals(res)
}
return nil
}
res["roleDefinition"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateUnifiedRoleDefinitionFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetRoleDefinition(val.(UnifiedRoleDefinitionable))
}
return nil
}
res["roleDefinitionId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetRoleDefinitionId(val)
}
return nil
}
return res
}
// GetPrincipalIds gets the principalIds property value. Identifiers of the principals to which the assignment is granted. Supports $filter (any operator only).
func (m *UnifiedRoleAssignmentMultiple) GetPrincipalIds()([]string) {
if m == nil {
return nil
} else {
return m.principalIds
}
}
// GetPrincipals gets the principals property value. Read-only collection referencing the assigned principals. Provided so that callers can get the principals using $expand at the same time as getting the role assignment. Read-only. Supports $expand.
func (m *UnifiedRoleAssignmentMultiple) GetPrincipals()([]DirectoryObjectable) {
if m == nil {
return nil
} else {
return m.principals
}
}
// GetRoleDefinition gets the roleDefinition property value. Specifies the roleDefinition that the assignment is for. Provided so that callers can get the role definition using $expand at the same time as getting the role assignment. Supports $filter (eq operator on id, isBuiltIn, and displayName, and startsWith operator on displayName) and $expand.
func (m *UnifiedRoleAssignmentMultiple) GetRoleDefinition()(UnifiedRoleDefinitionable) {
if m == nil {
return nil
} else {
return m.roleDefinition
}
}
// GetRoleDefinitionId gets the roleDefinitionId property value. Identifier of the unifiedRoleDefinition the assignment is for.
func (m *UnifiedRoleAssignmentMultiple) GetRoleDefinitionId()(*string) {
if m == nil {
return nil
} else {
return m.roleDefinitionId
}
}
// Serialize serializes information the current object
func (m *UnifiedRoleAssignmentMultiple) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
if m.GetAppScopeIds() != nil {
err = writer.WriteCollectionOfStringValues("appScopeIds", m.GetAppScopeIds())
if err != nil {
return err
}
}
if m.GetAppScopes() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetAppScopes()))
for i, v := range m.GetAppScopes() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("appScopes", cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("condition", m.GetCondition())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("description", m.GetDescription())
if err != nil {
return err
}
}
if m.GetDirectoryScopeIds() != nil {
err = writer.WriteCollectionOfStringValues("directoryScopeIds", m.GetDirectoryScopeIds())
if err != nil {
return err
}
}
if m.GetDirectoryScopes() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetDirectoryScopes()))
for i, v := range m.GetDirectoryScopes() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("directoryScopes", cast)
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("displayName", m.GetDisplayName())
if err != nil {
return err
}
}
if m.GetPrincipalIds() != nil {
err = writer.WriteCollectionOfStringValues("principalIds", m.GetPrincipalIds())
if err != nil {
return err
}
}
if m.GetPrincipals() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetPrincipals()))
for i, v := range m.GetPrincipals() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("principals", cast)
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("roleDefinition", m.GetRoleDefinition())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("roleDefinitionId", m.GetRoleDefinitionId())
if err != nil {
return err
}
}
return nil
}
// SetAppScopeIds sets the appScopeIds property value. Ids of the app specific scopes when the assignment scopes are app specific. The scopes of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. App scopes are scopes that are defined and understood by this application only.
func (m *UnifiedRoleAssignmentMultiple) SetAppScopeIds(value []string)() {
if m != nil {
m.appScopeIds = value
}
}
// SetAppScopes sets the appScopes property value. Read-only collection with details of the app specific scopes when the assignment scopes are app specific. Containment entity. Read-only.
func (m *UnifiedRoleAssignmentMultiple) SetAppScopes(value []AppScopeable)() {
if m != nil {
m.appScopes = value
}
}
// SetCondition sets the condition property value. The condition property
func (m *UnifiedRoleAssignmentMultiple) SetCondition(value *string)() {
if m != nil {
m.condition = value
}
}
// SetDescription sets the description property value. Description of the role assignment.
func (m *UnifiedRoleAssignmentMultiple) SetDescription(value *string)() {
if m != nil {
m.description = value
}
}
// SetDirectoryScopeIds sets the directoryScopeIds property value. Ids of the directory objects representing the scopes of the assignment. The scopes of an assignment determine the set of resources for which the principals have been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. App scopes are scopes that are defined and understood by this application only.
func (m *UnifiedRoleAssignmentMultiple) SetDirectoryScopeIds(value []string)() {
if m != nil {
m.directoryScopeIds = value
}
}
// SetDirectoryScopes sets the directoryScopes property value. Read-only collection referencing the directory objects that are scope of the assignment. Provided so that callers can get the directory objects using $expand at the same time as getting the role assignment. Read-only. Supports $expand.
func (m *UnifiedRoleAssignmentMultiple) SetDirectoryScopes(value []DirectoryObjectable)() {
if m != nil {
m.directoryScopes = value
}
}
// SetDisplayName sets the displayName property value. Name of the role assignment. Required.
func (m *UnifiedRoleAssignmentMultiple) SetDisplayName(value *string)() {
if m != nil {
m.displayName = value
}
}
// SetPrincipalIds sets the principalIds property value. Identifiers of the principals to which the assignment is granted. Supports $filter (any operator only).
func (m *UnifiedRoleAssignmentMultiple) SetPrincipalIds(value []string)() {
if m != nil {
m.principalIds = value
}
}
// SetPrincipals sets the principals property value. Read-only collection referencing the assigned principals. Provided so that callers can get the principals using $expand at the same time as getting the role assignment. Read-only. Supports $expand.
func (m *UnifiedRoleAssignmentMultiple) SetPrincipals(value []DirectoryObjectable)() {
if m != nil {
m.principals = value
}
}
// SetRoleDefinition sets the roleDefinition property value. Specifies the roleDefinition that the assignment is for. Provided so that callers can get the role definition using $expand at the same time as getting the role assignment. Supports $filter (eq operator on id, isBuiltIn, and displayName, and startsWith operator on displayName) and $expand.
func (m *UnifiedRoleAssignmentMultiple) SetRoleDefinition(value UnifiedRoleDefinitionable)() {
if m != nil {
m.roleDefinition = value
}
}
// SetRoleDefinitionId sets the roleDefinitionId property value. Identifier of the unifiedRoleDefinition the assignment is for.
func (m *UnifiedRoleAssignmentMultiple) SetRoleDefinitionId(value *string)() {
if m != nil {
m.roleDefinitionId = value
}
} | models/unified_role_assignment_multiple.go | 0.649134 | 0.471102 | unified_role_assignment_multiple.go | starcoder |
package cpu
import "errors"
type amode func(bool) *uint8
// helper functions
func setZeroAndNegative(value uint8) {
zero = value == 0
negative = value >= 0x80
}
func readUInt16(address uint16) uint16 {
ticksToNext += 2
return uint16(Memory[address]) + (uint16(Memory[address + 1]) << 8)
}
func readUInt16WithError(address uint16) uint16 {
addressLo := address & 0x00FF
addressHi := address & 0xFF00
ticksToNext += 2
return uint16(Memory[address]) + (uint16(Memory[addressHi + ((addressLo + 1) & 0x00FF)]) << 8)
}
func pushByte(value uint8) {
Stack[sp] = value
sp--
ticksToNext++
}
func pushWord(value uint16) {
pushByte(uint8(value >> 8))
pushByte(uint8(value))
}
func pullByte() uint8 {
sp++
ticksToNext++
return Stack[sp]
}
func pullWord() uint16 {
addressLo := pullByte()
addressHi := pullByte()
return uint16(addressLo) + (uint16(addressHi) << 8)
}
func getBit(value, index uint8) bool {
return value & (1 << index) != 0
}
// base for branches
func jumpIfTrue(condition, isBvc bool) {
if condition {
ticksToNext++
oldPc := pc + 2
pc = uint16(int(pc) + int(int8(Memory[pc + 1])))
if !isBvc && (oldPc & 0xFF00 < pc & 0xFF00) {
ticksToNext++
}
} else {
pc += 2
}
ticksToNext++
}
func consumeTicksForWrite(write bool) {
if write {
ticksToNext += 2
}
}
func incrementPc(increment uint16) {
pc += increment
}
// ------ VALUE ADDRESSING MODES -------
func immediate(write bool) *uint8 {
if write {
errors.New("should never happen")
}
ticksToNext++
defer incrementPc(2)
return &Memory[pc + 1]
}
func accumulator(write bool) *uint8 {
if !write {
errors.New("should never happen")
}
ticksToNext++
pc += 1
return &a
}
func zeroPageIndexed(address uint16, index uint8, write bool) *uint8 {
consumeTicksForWrite(write)
pc += 2
ticksToNext += 2
return &Memory[Memory[address] + index]
}
func zeroPage(write bool) *uint8 {
return zeroPageIndexed(pc + 1, 0, write)
}
func zeroPageX(write bool) *uint8 {
ticksToNext++
return zeroPageIndexed(pc + 1, x, write)
}
func zeroPageY(write bool) *uint8 {
ticksToNext++
return zeroPageIndexed(pc + 1, y, write)
}
func absolute(write bool) *uint8 {
consumeTicksForWrite(write)
defer incrementPc(3)
ticksToNext++
return &Memory[absoluteAddress()]
}
func absoluteIndexed(address uint16, index uint8, write bool) *uint8 {
ticksToNext++
indexedAddress := address + uint16(index)
if write || (address & 0xFF00 < indexedAddress & 0xFF00) {
ticksToNext++
}
pc += 3
consumeTicksForWrite(write)
return &Memory[indexedAddress]
}
func absoluteX(write bool) *uint8 {
return absoluteIndexed(absoluteAddress(), x, write)
}
func absoluteY(write bool) *uint8 {
return absoluteIndexed(absoluteAddress(), y, write)
}
func indexedIndirect(write bool) *uint8 {
if write {
errors.New("should never happen")
}
defer incrementPc(2)
ticksToNext += 3
return &Memory[readUInt16(uint16(Memory[pc + 1] + x))]
}
func indirectIndexed(write bool) *uint8 {
if write {
errors.New("should never happen")
}
defer incrementPc(2)
ticksToNext++
return absoluteIndexed(readUInt16(uint16(Memory[pc + 1])), y, write)
}
// ------ POINTER ADDRESSING MODES ------
func absoluteAddress() uint16 {
return readUInt16(pc + 1)
}
func indirectAddress() uint16 {
return readUInt16WithError(absoluteAddress())
}
// ------ INSTRUCTIONS -------
func adc(addressingMode amode) {
value := *addressingMode(false)
var carryInc uint8 = 0
if carry {
carryInc = 1
}
if decimalMode {
aL := a & 0x0F + value & 0x0F + carryInc
aH := a >> 4 + value >> 4
if aL > 0x0F {
aH++
}
if aL > 0x09 {
aL += 0x06
}
zero = a + value + carryInc == 0
negative = aH & 0x08 != 0
overflow = (a & 0x80 == value & 0x80) && (value & 0x80 != (aH << 4) & 0x80)
if aH > 0x09 {
aH += 0x06
}
carry = aH > 0x0F
a = (aH << 4) + (aL & 0x0F)
} else {
newValue := uint16(a) + uint16(value)
if carry {
newValue++
}
carry = newValue > 0xFF
overflow = (a & 0x80 == value & 0x80) && (value & 0x80 != uint8(newValue) & 0x80)
a = uint8(newValue)
setZeroAndNegative(a)
}
}
func and(addressingMode amode) {
a &= *addressingMode(false)
setZeroAndNegative(a)
}
func asl(addressingMode amode) {
ptr := addressingMode(true)
carry = *ptr >= 128
*ptr <<= 1
setZeroAndNegative(*ptr)
}
func bcc() {
jumpIfTrue(!carry, false)
}
func bcs() {
jumpIfTrue(carry, false)
}
func beq() {
jumpIfTrue(zero, false)
}
func bit(addressingMode amode) {
value := *addressingMode(false)
negative = value & 128 == 128
overflow = value & 64 == 64
zero = value & a == 0
}
func bmi() {
jumpIfTrue(negative, false)
}
func bne() {
jumpIfTrue(!zero, false)
}
func bpl() {
jumpIfTrue(!negative, false)
}
func brk() {
pushWord(pc + 2)
pushByte(getPs())
pc = 0xFFFE
ticksToNext += 3
}
func bvc() {
jumpIfTrue(!overflow, true)
}
func bvs() {
jumpIfTrue(overflow, false)
}
func clc() {
carry = false
pc++
ticksToNext++
}
func cld() {
decimalMode = false
pc++
ticksToNext++
}
func cli() {
interruptDisable = false
pc++
ticksToNext++
}
func clv() {
overflow = false
pc++
ticksToNext++
}
func cmp(addressingMode amode) {
value := *addressingMode(false)
carry = a >= value
setZeroAndNegative(value)
}
func cpx(addressingMode amode) {
value := *addressingMode(false)
carry = x >= value
setZeroAndNegative(x)
}
func cpy(addressingMode amode) {
value := *addressingMode(false)
carry = y >= value
setZeroAndNegative(y)
}
func dec(addressingMode amode) {
ptr := addressingMode(true)
*ptr--
setZeroAndNegative(*ptr)
}
func dex() {
x--
setZeroAndNegative(x)
pc++
ticksToNext++
}
func dey() {
y--
setZeroAndNegative(y)
pc++
ticksToNext++
}
func eor(addressingMode amode) {
a ^= *addressingMode(false)
setZeroAndNegative(a)
}
func inc(addressingMode amode) {
ptr := addressingMode(true)
*ptr++
setZeroAndNegative(*ptr)
}
func inx() {
x++
setZeroAndNegative(x)
pc++
ticksToNext++
}
func iny() {
y++
setZeroAndNegative(y)
pc++
ticksToNext++
}
func jmp(addressingMode func() uint16) {
pc = addressingMode()
}
func jsr() {
pushWord(pc + 2)
pc = absoluteAddress()
ticksToNext++
}
func lda(addressingMode amode) {
a = *addressingMode(false)
setZeroAndNegative(a)
}
func ldx(addressingMode amode) {
x = *addressingMode(false)
setZeroAndNegative(x)
}
func ldy(addressingMode amode) {
y = *addressingMode(false)
setZeroAndNegative(y)
}
func lsr(addressingMode amode) {
ptr := addressingMode(true)
carry = *ptr & 1 == 1
*ptr >>= 1
zero = *ptr == 0
negative = false
}
func nop() {
pc++
ticksToNext++
}
func ora(addressingMode amode) {
a |= *addressingMode(false)
setZeroAndNegative(a)
}
func pha() {
pushByte(a)
pc++
ticksToNext++
}
func php() {
pushByte(getPs())
pc++
ticksToNext++
}
func pla() {
a = pullByte()
pc++
ticksToNext += 2
}
func plp() {
setPs(pullByte())
pc++
ticksToNext += 2
}
func rol(addressingMode amode) {
ptr := addressingMode(true)
newCarry := getBit(*ptr, 7)
*ptr <<= 1
if carry {
*ptr++
}
carry = newCarry
setZeroAndNegative(*ptr)
}
func ror(addressingMode amode) {
ptr := addressingMode(true)
newCarry := getBit(*ptr, 0)
*ptr >>= 1
if carry {
*ptr += 128
}
carry = newCarry
setZeroAndNegative(*ptr)
}
func rti() {
setPs(pullByte())
pc = pullWord()
ticksToNext += 2
}
func rts() {
pc = pullWord() + 1
ticksToNext += 3
}
func sbc(addressingMode amode) {
value := *addressingMode(false)
var carryDec uint8 = 0
if !carry {
carryDec = 1
}
newValue := uint16(a) - uint16(value) - uint16(carryDec)
overflow = (a & 0x80 != value & 0x80) && (value & 0x80 == uint8(newValue) & 0x80)
if decimalMode {
aL := a & 0x0F - value & 0x0F - carryDec
aH := a >> 4 - value >> 4
if aL > 0x0F {
aL -= 0x06
aH--
}
if aH > 0x0F {
aH -= 0x06
}
a = (aH << 4) + (aL & 0x0F)
} else {
a = uint8(newValue)
}
carry = newValue < 0x0100
setZeroAndNegative(a)
}
func sec() {
carry = true
pc++
ticksToNext++
}
func sed() {
decimalMode = true
pc++
ticksToNext++
}
func sei() {
interruptDisable = true
pc++
ticksToNext++
}
func sta(addressingMode amode) {
*addressingMode(true) = a
ticksToNext -= 2
}
func stx(addressingMode amode) {
*addressingMode(true) = x
ticksToNext -= 2
}
func sty(addressingMode amode) {
*addressingMode(true) = y
ticksToNext -= 2
}
func tax() {
x = a
setZeroAndNegative(x)
pc++
ticksToNext++
}
func tay() {
y = a
setZeroAndNegative(y)
pc++
ticksToNext++
}
func tsx() {
x = sp
setZeroAndNegative(x)
pc++
ticksToNext++
}
func txa() {
a = x
setZeroAndNegative(a)
pc++
ticksToNext++
}
func txs() {
sp = x
pc++
ticksToNext++
}
func tya() {
a = y
setZeroAndNegative(a)
pc++
ticksToNext++
} | cpu/instruction_set.go | 0.554229 | 0.402099 | instruction_set.go | starcoder |
package recurrence
import (
"encoding/json"
"fmt"
"strconv"
"time"
)
// A Day specifies a day of the month. (1, 2, 3, ...31)
type Day int
// IsOccurring implements the Schedule interface.
func (d Day) IsOccurring(t time.Time) bool {
dayInt := int(d)
if dayInt == Last {
return isLastDayInMonth(t)
}
return dayInt == t.Day()
}
// Occurrences implements the Schedule interface.
func (d Day) Occurrences(tr TimeRange) chan time.Time {
return occurrencesFor(d, tr)
}
func (d Day) nextAfter(t time.Time) (time.Time, error) {
desiredDay := int(d)
if desiredDay == Last {
if isLastDayInMonth(t) {
return t.AddDate(0, 0, 1).AddDate(0, 1, -1), nil
}
return firstDayOfMonth(t).AddDate(0, 2, -1), nil
}
if t.Day() > desiredDay {
if isLastDayInMonth(t) && desiredDay == First {
return t.AddDate(0, 0, 1), nil
}
return d.nextAfter(t.AddDate(0, 0, 1))
}
if t.Day() < desiredDay {
totalDays := lastDayOfMonth(t).Day()
if totalDays < desiredDay {
return d.nextAfter(t.AddDate(0, 1, 0))
}
return time.Date(t.Year(), t.Month(), desiredDay, 0, 0, 0, 0, time.UTC), nil
}
totalDaysNextMonth := lastDayOfMonth(lastDayOfMonth(t).AddDate(0, 0, 1)).Day()
if totalDaysNextMonth < desiredDay {
return d.nextAfter(t.AddDate(0, 2, -1))
}
return t.AddDate(0, 1, 0), nil
}
// MarshalJSON implements the json.Marshaler interface.
func (d Day) MarshalJSON() ([]byte, error) {
if int(d) == Last {
return json.Marshal(map[string]interface{}{"day": "Last"})
}
return json.Marshal(map[string]interface{}{"day": int(d)})
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (d *Day) UnmarshalJSON(b []byte) error {
s := string(b)
i, err := strconv.ParseInt(s, 10, 0)
if err != nil {
if s != `"Last"` {
return fmt.Errorf("day cannot unmarshal %s", b)
}
*d = Day(Last)
} else {
if i < 1 || i > 31 {
return fmt.Errorf("day must be 1-31. Was %#v", i)
}
*d = Day(i)
}
return nil
}
func isLastDayInMonth(t time.Time) bool {
return t.Month() != t.AddDate(0, 0, 1).Month()
}
func lastDayOfMonth(t time.Time) time.Time {
return firstDayOfMonth(t).AddDate(0, 1, -1)
}
func firstDayOfMonth(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, time.UTC)
} | day.go | 0.774199 | 0.577614 | day.go | starcoder |
package steg
import (
"encoding/binary"
"fmt"
"github.com/DimitarPetrov/stegify/bits"
"image"
"io"
"os"
)
//Decode performs steganography decoding of Reader with previously encoded data by the Encode function and writes to result Writer.
func Decode(carrier io.Reader, result io.Writer) error {
RGBAImage, _, err := getImageAsRGBA(carrier)
if err != nil {
return fmt.Errorf("error parsing carrier image: %v", err)
}
dx := RGBAImage.Bounds().Dx()
dy := RGBAImage.Bounds().Dy()
dataBytes := make([]byte, 0, 2048)
resultBytes := make([]byte, 0, 2048)
dataCount := extractDataCount(RGBAImage)
var count int
for x := 0; x < dx && dataCount > 0; x++ {
for y := 0; y < dy && dataCount > 0; y++ {
if count >= dataSizeHeaderReservedBytes {
c := RGBAImage.RGBAAt(x, y)
dataBytes = append(dataBytes, bits.GetLastTwoBits(c.R), bits.GetLastTwoBits(c.G), bits.GetLastTwoBits(c.B))
dataCount -= 3
} else {
count += 4
}
}
}
if dataCount < 0 {
dataBytes = dataBytes[:len(dataBytes)+dataCount] //remove bytes that are not part of data and mistakenly added
}
dataBytes = align(dataBytes) // len(dataBytes) must be aliquot of 4
for i := 0; i < len(dataBytes); i += 4 {
resultBytes = append(resultBytes, bits.ConstructByteOfQuartersAsSlice(dataBytes[i:i+4]))
}
if _, err = result.Write(resultBytes); err != nil {
return err
}
return nil
}
//DecodeByFileNames performs steganography decoding of data previously encoded by the Encode function.
//The data is decoded from file carrier and it is saved in separate new file
func DecodeByFileNames(carrierFileName string, newFileName string) (err error) {
carrier, err := os.Open(carrierFileName)
if err != nil {
return fmt.Errorf("error opening carrier file: %v", err)
}
defer func() {
closeErr := carrier.Close()
if err == nil {
err = closeErr
}
}()
result, err := os.Create(newFileName)
if err != nil {
return fmt.Errorf("error creating result file: %v", err)
}
defer func() {
closeErr := result.Close()
if err == nil {
err = closeErr
}
}()
err = Decode(carrier, result)
if err != nil {
_ = os.Remove(newFileName)
}
return err
}
func align(dataBytes []byte) []byte {
switch len(dataBytes) % 4 {
case 1:
dataBytes = append(dataBytes, byte(0), byte(0), byte(0))
case 2:
dataBytes = append(dataBytes, byte(0), byte(0))
case 3:
dataBytes = append(dataBytes, byte(0))
}
return dataBytes
}
func extractDataCount(RGBAImage *image.RGBA) int {
dataCountBytes := make([]byte, 0, 16)
dx := RGBAImage.Bounds().Dx()
dy := RGBAImage.Bounds().Dy()
count := 0
for x := 0; x < dx && count < dataSizeHeaderReservedBytes; x++ {
for y := 0; y < dy && count < dataSizeHeaderReservedBytes; y++ {
c := RGBAImage.RGBAAt(x, y)
dataCountBytes = append(dataCountBytes, bits.GetLastTwoBits(c.R), bits.GetLastTwoBits(c.G), bits.GetLastTwoBits(c.B))
count += 4
}
}
dataCountBytes = append(dataCountBytes, byte(0))
var bs = []byte{bits.ConstructByteOfQuartersAsSlice(dataCountBytes[:4]),
bits.ConstructByteOfQuartersAsSlice(dataCountBytes[4:8]),
bits.ConstructByteOfQuartersAsSlice(dataCountBytes[8:12]),
bits.ConstructByteOfQuartersAsSlice(dataCountBytes[12:])}
return int(binary.LittleEndian.Uint32(bs))
} | steg/steg_decode.go | 0.652906 | 0.45847 | steg_decode.go | starcoder |
package plaid
import (
"encoding/json"
)
// TaxpayerID Taxpayer ID of the individual receiving the paystub.
type TaxpayerID struct {
// Type of ID, e.g. 'SSN'
IdType NullableString `json:"id_type,omitempty"`
// ID mask; i.e. last 4 digits of the taxpayer ID
IdMask NullableString `json:"id_mask,omitempty"`
// Last 4 digits of unique number of ID.
Last4Digits NullableString `json:"last_4_digits,omitempty"`
AdditionalProperties map[string]interface{}
}
type _TaxpayerID TaxpayerID
// NewTaxpayerID instantiates a new TaxpayerID object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewTaxpayerID() *TaxpayerID {
this := TaxpayerID{}
return &this
}
// NewTaxpayerIDWithDefaults instantiates a new TaxpayerID object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewTaxpayerIDWithDefaults() *TaxpayerID {
this := TaxpayerID{}
return &this
}
// GetIdType returns the IdType field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TaxpayerID) GetIdType() string {
if o == nil || o.IdType.Get() == nil {
var ret string
return ret
}
return *o.IdType.Get()
}
// GetIdTypeOk returns a tuple with the IdType field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TaxpayerID) GetIdTypeOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.IdType.Get(), o.IdType.IsSet()
}
// HasIdType returns a boolean if a field has been set.
func (o *TaxpayerID) HasIdType() bool {
if o != nil && o.IdType.IsSet() {
return true
}
return false
}
// SetIdType gets a reference to the given NullableString and assigns it to the IdType field.
func (o *TaxpayerID) SetIdType(v string) {
o.IdType.Set(&v)
}
// SetIdTypeNil sets the value for IdType to be an explicit nil
func (o *TaxpayerID) SetIdTypeNil() {
o.IdType.Set(nil)
}
// UnsetIdType ensures that no value is present for IdType, not even an explicit nil
func (o *TaxpayerID) UnsetIdType() {
o.IdType.Unset()
}
// GetIdMask returns the IdMask field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TaxpayerID) GetIdMask() string {
if o == nil || o.IdMask.Get() == nil {
var ret string
return ret
}
return *o.IdMask.Get()
}
// GetIdMaskOk returns a tuple with the IdMask field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TaxpayerID) GetIdMaskOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.IdMask.Get(), o.IdMask.IsSet()
}
// HasIdMask returns a boolean if a field has been set.
func (o *TaxpayerID) HasIdMask() bool {
if o != nil && o.IdMask.IsSet() {
return true
}
return false
}
// SetIdMask gets a reference to the given NullableString and assigns it to the IdMask field.
func (o *TaxpayerID) SetIdMask(v string) {
o.IdMask.Set(&v)
}
// SetIdMaskNil sets the value for IdMask to be an explicit nil
func (o *TaxpayerID) SetIdMaskNil() {
o.IdMask.Set(nil)
}
// UnsetIdMask ensures that no value is present for IdMask, not even an explicit nil
func (o *TaxpayerID) UnsetIdMask() {
o.IdMask.Unset()
}
// GetLast4Digits returns the Last4Digits field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TaxpayerID) GetLast4Digits() string {
if o == nil || o.Last4Digits.Get() == nil {
var ret string
return ret
}
return *o.Last4Digits.Get()
}
// GetLast4DigitsOk returns a tuple with the Last4Digits field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TaxpayerID) GetLast4DigitsOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.Last4Digits.Get(), o.Last4Digits.IsSet()
}
// HasLast4Digits returns a boolean if a field has been set.
func (o *TaxpayerID) HasLast4Digits() bool {
if o != nil && o.Last4Digits.IsSet() {
return true
}
return false
}
// SetLast4Digits gets a reference to the given NullableString and assigns it to the Last4Digits field.
func (o *TaxpayerID) SetLast4Digits(v string) {
o.Last4Digits.Set(&v)
}
// SetLast4DigitsNil sets the value for Last4Digits to be an explicit nil
func (o *TaxpayerID) SetLast4DigitsNil() {
o.Last4Digits.Set(nil)
}
// UnsetLast4Digits ensures that no value is present for Last4Digits, not even an explicit nil
func (o *TaxpayerID) UnsetLast4Digits() {
o.Last4Digits.Unset()
}
func (o TaxpayerID) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.IdType.IsSet() {
toSerialize["id_type"] = o.IdType.Get()
}
if o.IdMask.IsSet() {
toSerialize["id_mask"] = o.IdMask.Get()
}
if o.Last4Digits.IsSet() {
toSerialize["last_4_digits"] = o.Last4Digits.Get()
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *TaxpayerID) UnmarshalJSON(bytes []byte) (err error) {
varTaxpayerID := _TaxpayerID{}
if err = json.Unmarshal(bytes, &varTaxpayerID); err == nil {
*o = TaxpayerID(varTaxpayerID)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "id_type")
delete(additionalProperties, "id_mask")
delete(additionalProperties, "last_4_digits")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableTaxpayerID struct {
value *TaxpayerID
isSet bool
}
func (v NullableTaxpayerID) Get() *TaxpayerID {
return v.value
}
func (v *NullableTaxpayerID) Set(val *TaxpayerID) {
v.value = val
v.isSet = true
}
func (v NullableTaxpayerID) IsSet() bool {
return v.isSet
}
func (v *NullableTaxpayerID) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableTaxpayerID(val *TaxpayerID) *NullableTaxpayerID {
return &NullableTaxpayerID{value: val, isSet: true}
}
func (v NullableTaxpayerID) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableTaxpayerID) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_taxpayer_id.go | 0.759047 | 0.415373 | model_taxpayer_id.go | starcoder |
package clip
import (
"encoding/binary"
"math"
"github.com/p9c/gio/internal/opconst"
"github.com/p9c/gio/op"
)
// Stroke represents a stroked path.
type Stroke struct {
Path PathSpec
Style StrokeStyle
// Dashes specify the dashes of the stroke.
// The empty value denotes no dashes.
Dashes DashSpec
}
// Op returns a clip operation representing the stroke.
func (s Stroke) Op() Op {
return Op{
path: s.Path,
stroke: s.Style,
dashes: s.Dashes,
}
}
// StrokeStyle describes how a path should be stroked.
type StrokeStyle struct {
Width float32 // Width of the stroked path.
// Miter is the limit to apply to a miter joint.
// The zero Miter disables the miter joint; setting Miter to +∞
// unconditionally enables the miter joint.
Miter float32
Cap StrokeCap // Cap describes the head or tail of a stroked path.
Join StrokeJoin // Join describes how stroked paths are collated.
}
// StrokeCap describes the head or tail of a stroked path.
type StrokeCap uint8
const (
// RoundCap caps stroked paths with a round cap, joining the right-hand and
// left-hand sides of a stroked path with a half disc of diameter the
// stroked path's width.
RoundCap StrokeCap = iota
// FlatCap caps stroked paths with a flat cap, joining the right-hand
// and left-hand sides of a stroked path with a straight line.
FlatCap
// SquareCap caps stroked paths with a square cap, joining the right-hand
// and left-hand sides of a stroked path with a half square of length
// the stroked path's width.
SquareCap
)
// StrokeJoin describes how stroked paths are collated.
type StrokeJoin uint8
const (
// RoundJoin joins path segments with a round segment.
RoundJoin StrokeJoin = iota
// BevelJoin joins path segments with sharp bevels.
BevelJoin
)
// Dash records dashes' lengths and phase for a stroked path.
type Dash struct {
ops *op.Ops
macro op.MacroOp
phase float32
size uint8 // size of the pattern
}
func (d *Dash) Begin(ops *op.Ops) {
d.ops = ops
d.macro = op.Record(ops)
// Write the TypeAux opcode
data := ops.Write(opconst.TypeAuxLen)
data[0] = byte(opconst.TypeAux)
}
func (d *Dash) Phase(v float32) {
d.phase = v
}
func (d *Dash) Dash(length float32) {
if d.size == math.MaxUint8 {
panic("clip: dash pattern too large")
}
data := d.ops.Write(4)
bo := binary.LittleEndian
bo.PutUint32(data[0:], math.Float32bits(length))
d.size++
}
func (d *Dash) End() DashSpec {
c := d.macro.Stop()
return DashSpec{
spec: c,
phase: d.phase,
size: d.size,
}
}
// DashSpec describes a dashed pattern.
type DashSpec struct {
spec op.CallOp
phase float32
size uint8 // size of the pattern
} | op/clip/stroke.go | 0.817793 | 0.519948 | stroke.go | starcoder |
package promtest
import (
"math"
"testing"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
// TestRegistry is a prometheus registry meant to be used for testing
type TestRegistry struct {
*prometheus.Registry
t *testing.T
}
// NewTestRegistry allocates and initializes a new TestRegistry
func NewTestRegistry(t *testing.T) *TestRegistry {
return &TestRegistry{
Registry: prometheus.NewPedanticRegistry(),
t: t,
}
}
// TakeSnapshot takes a snapshot of the current values of metrics for testing
func (r *TestRegistry) TakeSnapshot() (*Snapshot, error) {
metrics, err := r.Registry.Gather()
if err != nil {
return nil, err
}
metricMap := make(map[string]*dto.MetricFamily)
for _, metric := range metrics {
metricMap[metric.GetName()] = metric
}
return &Snapshot{metricMap, r.t}, nil
}
// Snapshot provides methods for asserting on metrics
type Snapshot struct {
MetricMap map[string]*dto.MetricFamily
t *testing.T
}
// AssertCount asserts existence and count of a counter in the snapshot.
func (s *Snapshot) AssertCount(name string, labels map[string]string, value float64) {
s.t.Helper()
metric := s.GetMetric(dto.MetricType_COUNTER, name, labels)
if metric == nil {
if value == 0 {
// Counter not existing is the same as the counter having 0 value
return
}
s.t.Errorf("Could not find Counter %s with the labels %v", name, labels)
}
if actualValue := metric.GetCounter().GetValue(); !floatEquals(actualValue, value) {
s.t.Errorf("Expected counter value %f but was %f", value, actualValue)
}
}
// AssertGauge asserts existence and value of a gauge in the snapshot.
func (s *Snapshot) AssertGauge(name string, labels map[string]string, value float64) {
s.t.Helper()
metric := s.GetMetric(dto.MetricType_GAUGE, name, labels)
if metric == nil {
if value == 0 {
// Gauge not existing is the same as the counter having 0 value
return
}
s.t.Errorf("Could not find Gauge %s with the labels %v", name, labels)
}
if actualValue := metric.GetGauge().GetValue(); !floatEquals(actualValue, value) {
s.t.Errorf("Expected gauge value %f but was %f", value, actualValue)
}
}
// AssertSummary asserts that the existence and the sample sum and count of a summary in the snapshot.
func (s *Snapshot) AssertSummary(name string, labels map[string]string, sum float64, count uint64) {
s.t.Helper()
metric := s.GetMetric(dto.MetricType_SUMMARY, name, labels)
summary := metric.GetSummary()
if metric == nil {
if count == 0 {
// Summary not existing is the same as the summary having 0 value
return
}
s.t.Errorf("Could not find Summary %s with the labels %v", name, labels)
}
if actualSum := summary.GetSampleSum(); !floatEquals(actualSum, sum) {
s.t.Errorf("Expected summary [%s] sample sum to be %f but was %f", name, sum, actualSum)
}
if actualCount := summary.GetSampleCount(); actualCount != count {
s.t.Errorf("Expected summary [%s] sample count to be %d but was %d", name, count, actualCount)
}
}
// AssertHistogram asserts that the existence and the sample sum and count of a histogram in the
// snapshot.
func (s *Snapshot) AssertHistogram(name string, labels map[string]string, sum float64, count uint64) {
s.t.Helper()
metric := s.GetMetric(dto.MetricType_HISTOGRAM, name, labels)
histogram := metric.GetHistogram()
if metric == nil {
if count == 0 {
// Histogram not existing is the same as the histogram having 0 value
return
}
s.t.Errorf("Could not find Histogram %s with the labels %v", name, labels)
}
if actualSum := histogram.GetSampleSum(); !floatEquals(actualSum, sum) {
s.t.Errorf("Expected histogram [%s] sample sum to be %f but was %f", name, sum, actualSum)
}
if actualCount := histogram.GetSampleCount(); actualCount != count {
s.t.Errorf("Expected histogram [%s] sample count to be %d but was %d", name, count, actualCount)
}
}
// AssertSummaryNonZero asserts that the summary exists and its value is non-zero
func (s *Snapshot) AssertSummaryNonZero(name string, labels map[string]string) {
s.t.Helper()
metric := s.GetMetric(dto.MetricType_SUMMARY, name, labels)
summary := metric.GetSummary()
if metric == nil {
s.t.Errorf("Could not find Summary %s with the labels %v", name, labels)
}
if actualSum := summary.GetSampleSum(); actualSum == 0 {
s.t.Errorf("Expected summary sample sum to be >0")
}
}
// AssertHistogramSampleCount asserts that the histogram exists and contains exact number of samples
func (s *Snapshot) AssertHistogramSampleCount(name string, sampleCount uint64) {
s.t.Helper()
metric := s.GetMetric(dto.MetricType_HISTOGRAM, name, map[string]string{})
histogram := metric.GetHistogram()
if histogram == nil {
s.t.Errorf("Could not find Histogram %s", name)
}
if sampleCount != histogram.GetSampleCount() {
s.t.Errorf("Expected histogram sample count did not match: %d != %d",
sampleCount, histogram.GetSampleCount())
}
}
// GetMetric returns a matching metric from the snapshot
func (s *Snapshot) GetMetric(metricType dto.MetricType, name string, labels map[string]string) *dto.Metric {
family, ok := s.MetricMap[name]
if !ok {
return nil
}
if actualType := family.GetType(); actualType != metricType {
s.t.Errorf("Expected %s to be of type %s but was %s",
name, dto.MetricType_name[int32(metricType)], dto.MetricType_name[int32(actualType)])
return nil
}
var metric *dto.Metric
Outer:
for _, m := range family.GetMetric() {
labelPairs := m.GetLabel()
if len(labelPairs) != len(labels) {
continue
}
for _, labelPair := range labelPairs {
if labelValue, ok := labels[labelPair.GetName()]; !ok || labelValue != labelPair.GetValue() {
continue Outer
}
}
metric = m
break
}
return metric
}
func floatEquals(a, b float64) bool {
epsilon := 0.00000001
return math.Abs(a-b) < epsilon
} | promtest.go | 0.846006 | 0.464537 | promtest.go | starcoder |
package kgo
import (
"errors"
"strings"
"time"
)
// DateFormat pattern rules.
var datePatterns = []string{
// year
"Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003
"y", "06", // A two digit representation of a year Examples: 99 or 03
// month
"m", "01", // Numeric representation of a month, with leading zeros 01 through 12
"n", "1", // Numeric representation of a month, without leading zeros 1 through 12
"M", "Jan", // A short textual representation of a month, three letters Jan through Dec
"F", "January", // A full textual representation of a month, such as January or March January through December
// day
"d", "02", // Day of the month, 2 digits with leading zeros 01 to 31
"j", "2", // Day of the month without leading zeros 1 to 31
// week
"D", "Mon", // A textual representation of a day, three letters Mon through Sun
"l", "Monday", // A full textual representation of the day of the week Sunday through Saturday
// time
"g", "3", // 12-hour format of an hour without leading zeros 1 through 12
"G", "15", // 24-hour format of an hour without leading zeros 0 through 23
"h", "03", // 12-hour format of an hour with leading zeros 01 through 12
"H", "15", // 24-hour format of an hour with leading zeros 00 through 23
"a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm
"A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM
"i", "04", // Minutes with leading zeros 00 to 59
"s", "05", // Seconds, with leading zeros 00 through 59
// time zone
"T", "MST",
"P", "-07:00",
"O", "-0700",
// RFC 2822
"r", time.RFC1123Z,
}
// Time 获取当前Unix时间戳(秒).
func (kt *LkkTime) Time() int64 {
return time.Now().Unix()
}
// MilliTime 获取当前Unix时间戳(毫秒).
func (kt *LkkTime) MilliTime() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
// MicroTime 获取当前Unix时间戳(微秒).
func (kt *LkkTime) MicroTime() int64 {
return time.Now().UnixNano() / int64(time.Microsecond)
}
// Str2Time 将字符串转换为时间结构.
// str 为要转换的字符串;
// format 为该字符串的格式,默认为"2006-01-02 15:04:05" .
func (kt *LkkTime) Str2Timestruct(str string, format ...string) (time.Time, error) {
var f string
if len(format) > 0 {
f = strings.Trim(format[0], " ")
} else {
f = "2006-01-02 15:04:05"
}
if len(str) != len(f) {
return time.Now(), errors.New("Str2Timestruct: parameter format error")
}
return time.Parse(f, str)
}
// Str2Timestamp 将字符串转换为时间戳,秒.
// str 为要转换的字符串;
// format 为该字符串的格式,默认为"2006-01-02 15:04:05" .
func (kt *LkkTime) Str2Timestamp(str string, format ...string) (int64, error) {
tim, err := kt.Str2Timestruct(str, format...)
if err != nil {
return 0, err
}
return tim.Unix(), nil
}
// Date 格式化时间.
// format 格式,如"Y-m-d H:i:s".
// ts为int/int64类型时间戳或time.Time类型.
func (kt *LkkTime) Date(format string, ts ...interface{}) string {
replacer := strings.NewReplacer(datePatterns...)
format = replacer.Replace(format)
var t time.Time
if len(ts) > 0 {
val := ts[0]
if v, ok := val.(time.Time); ok {
t = v
} else if v, ok := val.(int); ok {
t = time.Unix(int64(v), 0)
} else if v, ok := val.(int64); ok {
t = time.Unix(int64(v), 0)
} else {
return ""
}
} else {
t = time.Now()
}
return t.Format(format)
}
// CheckDate 检查是否正常的日期.
func (kt *LkkTime) CheckDate(year, month, day int) bool {
if month < 1 || month > 12 || day < 1 || day > 31 || year < 1 || year > 32767 {
return false
}
switch month {
case 4, 6, 9, 11:
if day > 30 {
return false
}
case 2:
// leap year
if year%4 == 0 && (year%100 != 0 || year%400 == 0) {
if day > 29 {
return false
}
} else if day > 28 {
return false
}
}
return true
}
// Sleep 延缓执行,秒.
func (kt *LkkTime) Sleep(t int64) {
time.Sleep(time.Duration(t) * time.Second)
}
// Usleep 以指定的微秒数延迟执行.
func (kt *LkkTime) Usleep(t int64) {
time.Sleep(time.Duration(t) * time.Microsecond)
}
// ServiceStartime 获取当前服务启动时间戳,秒.
func (kt *LkkTime) ServiceStartime() int64 {
return Kuptime.Unix()
}
// ServiceUptime 获取当前服务运行时间,纳秒int64.
func (kt *LkkTime) ServiceUptime() time.Duration {
return time.Since(Kuptime)
}
// GetMonthDays 获取指定月份的天数.years年份,可选,默认当前年份.
func (kt *LkkTime) GetMonthDays(month int, years ...int) int {
months := map[int]int{1: 31, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
if days, ok := months[month]; ok {
return days
} else if month < 1 || month > 12 {
return 0
}
var year int
yLen := len(years)
if yLen == 0 {
year = time.Now().Year()
} else {
year = years[0]
}
if year%100 == 0 {
if year%400 == 0 {
return 29
} else {
return 28
}
} else if year%4 == 0 {
return 29
} else {
return 28
}
}
// Year 获取年份.
func (kt *LkkTime) Year(t ...time.Time) int {
var tm time.Time
if len(t) > 0 {
tm = t[0]
} else {
tm = time.Now()
}
return tm.Year()
}
// Month 获取月份.
func (kt *LkkTime) Month(t ...time.Time) int {
var tm time.Time
if len(t) > 0 {
tm = t[0]
} else {
tm = time.Now()
}
return int(tm.Month())
}
// Day 获取日份.
func (kt *LkkTime) Day(t ...time.Time) int {
var tm time.Time
if len(t) > 0 {
tm = t[0]
} else {
tm = time.Now()
}
return tm.Day()
}
// Hour 获取小时.
func (kt *LkkTime) Hour(t ...time.Time) int {
var tm time.Time
if len(t) > 0 {
tm = t[0]
} else {
tm = time.Now()
}
return tm.Hour()
}
// Minute 获取分钟.
func (kt *LkkTime) Minute(t ...time.Time) int {
var tm time.Time
if len(t) > 0 {
tm = t[0]
} else {
tm = time.Now()
}
return tm.Minute()
}
// Second 获取秒数.
func (kt *LkkTime) Second(t ...time.Time) int {
var tm time.Time
if len(t) > 0 {
tm = t[0]
} else {
tm = time.Now()
}
return tm.Second()
} | time.go | 0.553747 | 0.523847 | time.go | starcoder |
package box
import (
"github.com/adamcolton/geom/d2"
"github.com/adamcolton/geom/d2/curve/line"
)
// Box is a rectangle that lies orthoganal to the plane. The first point should
// be the min point and the second should be the max.
type Box [2]d2.Pt
// New Box containing all the points passed in.
func New(pts ...d2.Pt) *Box {
b := &Box{}
b[0], b[1] = d2.MinMax(pts...)
return b
}
// V is the vector from the min point to the max point
func (b *Box) V() d2.V {
return b[1].Subtract(b[0])
}
// Vertex returns one of the 4 verticies of the box proceeding counter
// clockwise.
func (b *Box) Vertex(n int) d2.Pt {
n %= 4
if n == 2 {
return b[1]
}
p := b[0]
if n == 1 {
p.X = b[1].X
} else if n == 3 {
p.Y = b[1].Y
}
return p
}
// Side returns one of the sides proceeding counter clockwise.
func (b *Box) Side(n int) line.Line {
return line.New(b.Vertex(n), b.Vertex(n+1))
}
// Sides returns an array of the 4 sides in counter clockwise order.
func (b *Box) Sides() *[4]line.Line {
v := b.V()
return &[4]line.Line{
{b[0], d2.V{v.X, 0}},
{d2.Pt{b[1].X, b[0].Y}, d2.V{0, v.Y}},
{b[1], d2.V{-v.X, 0}},
{d2.Pt{b[0].X, b[1].Y}, d2.V{0, -v.Y}},
}
}
// LineIntersections returning the sides of the box that are intersected.
// Fulfills line.Intersector and shape.Shape.
func (b *Box) LineIntersections(l line.Line, buf []float64) []float64 {
max := len(buf)
buf = buf[:0]
for _, s := range b.Sides() {
t0, t1, ok := l.Intersection(s)
if ok && t0 >= 0 && t0 < 1 {
buf = append(buf, t1)
if max == 1 {
return buf
}
}
}
return buf
}
// Centroid returns the center of the box, fulfilling shape.Centroid
func (b *Box) Centroid() d2.Pt {
return line.New(b[0], b[1]).Pt1(0.5)
}
// Area of the box fulling shape.Area.
func (b *Box) Area() float64 {
a := b.SignedArea()
if a < 0 {
return -a
}
return a
}
// SignedArea of the box fulling shape.Area. If the box is well formed the area
// should always be positive.
func (b *Box) SignedArea() float64 {
v := b.V()
return v.X * v.Y
}
// Contains returns true of the box contains the point. Fulfills shape.Container
// and shape.Shape.
func (b *Box) Contains(p d2.Pt) bool {
return b[0].X <= p.X &&
b[1].X >= p.X &&
b[0].Y <= p.Y &&
b[1].Y >= p.Y
}
// Perimeter of the box. Fulfills shape.Perimeter.
func (b *Box) Perimeter() float64 {
v := b.V()
return 2 * (v.X + v.Y)
}
// BoundingBox fulfils shape.BoundingBoxer and shape.Shape.
func (b *Box) BoundingBox() (min, max d2.Pt) {
return b[0], b[1]
} | d2/shape/box/box.go | 0.886782 | 0.608071 | box.go | starcoder |
package duration
import (
"time"
"github.com/Cloud-Foundations/tricorder/go/tricorder/units"
)
// Duration represents a duration of time
// For negative durations, both Seconds and Nanoseconds are negative.
// Internal use only for now.
type Duration struct {
Seconds int64
Nanoseconds int32
}
func New(d time.Duration) Duration {
return newDuration(d)
}
// SinceEpoch returns the amount of time since unix epoch
func SinceEpoch(now time.Time) Duration {
return sinceEpoch(now)
}
// SinceEpochFloat returns the amount of time since unix epoch
func SinceEpochFloat(secondsSinceEpoch float64) Duration {
return sinceEpochFloat(secondsSinceEpoch)
}
// ParseWithUnit takes a string that is a quantity of unit and converts it
// to a Duration.
func ParseWithUnit(str string, unit units.Unit) (dur Duration, err error) {
inUnit, err := parse(str)
if err != nil {
return
}
dur = inUnit.convert(unit, units.Second)
return
}
// AsGoDuration converts this duration to a go duration
func (d Duration) AsGoDuration() time.Duration {
return d.asGoDuration()
}
// AsGoTime Converts this duration to a go time in the
// system's local time zone.
func (d Duration) AsGoTime() time.Time {
return d.asGoTime()
}
// AsFloat returns this duration in seconds.
func (d Duration) AsFloat() float64 {
return d.asFloat()
}
// String shows in seconds
func (d Duration) String() string {
return d.toString()
}
// StringUsingUnits shows in specified time unit.
// If unit not a time, shows in seconds.
func (d Duration) StringUsingUnits(unit units.Unit) string {
return d.convert(units.Second, unit).toString()
}
// IsNegative returns true if this duration is negative.
func (d Duration) IsNegative() bool {
return d.isNegative()
}
// PrettyFormat pretty formats this duration.
// PrettyFormat panics if this duration is negative.
func (d Duration) PrettyFormat() string {
return d.prettyFormat()
}
// FloatToTime converts seconds after Jan 1, 1970 GMT to a time in the
// system's local time zone.
func FloatToTime(secondsSinceEpoch float64) time.Time {
return SinceEpochFloat(secondsSinceEpoch).AsGoTime()
}
// TimeToFloat returns t as seconds after Jan 1, 1970 GMT
func TimeToFloat(t time.Time) (secondsSinceEpoch float64) {
return SinceEpoch(t).AsFloat()
}
// ToFloat returns d as seconds
func ToFloat(d time.Duration) (seconds float64) {
return float64(d) / float64(time.Second)
}
// FromFloat converts a value in seconds to a duration
func FromFloat(seconds float64) time.Duration {
return time.Duration(seconds*float64(time.Second) + 0.5)
} | go/tricorder/duration/api.go | 0.931665 | 0.514034 | api.go | starcoder |
package mongodb
import (
"errors"
"go/token"
"strings"
"time"
"github.com/eroatta/src-reader/entity"
"github.com/google/uuid"
)
// identifierMapper maps an Identifier between its model and database representations.
type identifierMapper struct{}
// fromTokenToString transforms a token.Token value into a human-readable string.
func (im *identifierMapper) fromTokenToString(tok token.Token) string {
var tokenString string
switch tok {
case token.FUNC:
tokenString = "func"
case token.VAR:
tokenString = "var"
case token.CONST:
tokenString = "const"
case token.STRUCT:
tokenString = "struct"
case token.INTERFACE:
tokenString = "interface"
default:
tokenString = "unknown"
}
return tokenString
}
func (im *identifierMapper) fromStringToToken(str string) token.Token {
var tok token.Token
switch str {
case "func":
tok = token.FUNC
case "var":
tok = token.VAR
case "const":
tok = token.CONST
case "struct":
tok = token.STRUCT
case "interface":
tok = token.INTERFACE
default:
tok = token.DEFAULT
}
return tok
}
// toDTO maps the entity for Identifier into a Data Transfer Object.
func (im *identifierMapper) toDTO(ent entity.Identifier, analysisEnt entity.AnalysisResults) identifierDTO {
dto := identifierDTO{
ID: ent.ID,
Package: ent.Package,
AbsolutePackage: ent.FullPackageName(),
File: ent.File,
Position: ent.Position,
Name: ent.Name,
Type: im.fromTokenToString(ent.Type),
AnalysisID: analysisEnt.ID.String(),
ProjectRef: analysisEnt.ProjectName,
CreatedAt: time.Now(),
Exported: ent.Exported(),
Normalization: normalizationDTO{
Word: ent.Normalization.Word,
Algorithm: ent.Normalization.Algorithm,
Score: ent.Normalization.Score,
},
}
splits := make(map[string][]splitDTO, len(ent.Splits))
joinedSplits := make(map[string]string, len(ent.Splits))
for k, v := range ent.Splits {
items := make([]splitDTO, len(v))
words := make([]string, len(v))
for i, splitEnt := range v {
items[i] = splitDTO{
Order: splitEnt.Order,
Value: splitEnt.Value,
}
words[i] = splitEnt.Value
}
splits[k] = items
joinedSplits[k] = strings.Join(words, "_")
}
dto.Splits = splits
dto.JoinedSplits = joinedSplits
expansions := make(map[string][]expansionDTO, len(ent.Expansions))
joinedExpansions := make(map[string]string, len(ent.Expansions))
for k, v := range ent.Expansions {
items := make([]expansionDTO, len(v))
words := make([]string, len(v))
for i, expansionEnt := range v {
items[i] = expansionDTO{
Order: expansionEnt.Order,
SplittingAlgorithm: expansionEnt.SplittingAlgorithm,
From: expansionEnt.From,
Values: expansionEnt.Values,
}
words[i] = strings.Join(expansionEnt.Values, "|")
}
expansions[k] = items
joinedExpansions[k] = strings.Join(words, "_")
}
dto.Expansions = expansions
dto.JoinedExpansions = joinedExpansions
return dto
}
// toEntity maps the Data Transfer Object for Identifier into a domain entity.
func (im *identifierMapper) toEntity(dto identifierDTO) entity.Identifier {
splits := make(map[string][]entity.Split, len(dto.Splits))
for alg, split := range dto.Splits {
items := make([]entity.Split, len(split))
for i, splitDto := range split {
items[i] = entity.Split{
Order: splitDto.Order,
Value: splitDto.Value,
}
}
splits[alg] = items
}
expansions := make(map[string][]entity.Expansion, len(dto.Expansions))
for alg, exp := range dto.Expansions {
items := make([]entity.Expansion, len(exp))
for i, expDto := range exp {
items[i] = entity.Expansion{
Order: expDto.Order,
SplittingAlgorithm: expDto.SplittingAlgorithm,
From: expDto.From,
Values: expDto.Values,
}
}
expansions[alg] = items
}
var err error
if dto.Error != "" {
err = errors.New(dto.Error)
}
return entity.Identifier{
ID: dto.ID,
ProjectRef: dto.ProjectRef,
AnalysisID: uuid.MustParse(dto.AnalysisID),
Package: dto.Package,
File: dto.File,
Position: dto.Position,
Name: dto.Name,
Type: im.fromStringToToken(dto.Type),
Node: nil,
Splits: splits,
Expansions: expansions,
Error: err,
Normalization: entity.Normalization{
Word: dto.Normalization.Word,
Algorithm: dto.Normalization.Algorithm,
Score: dto.Normalization.Score,
},
}
}
// identifierDTO is the database representation for an Identifier.
type identifierDTO struct {
ID string `bson:"identifier_id"`
Package string `bson:"package"`
AbsolutePackage string `bson:"absolute_package"`
File string `bson:"file"`
Position token.Pos `bson:"position"`
Name string `bson:"name"`
Type string `bson:"type"`
Splits map[string][]splitDTO `bson:"splits"`
JoinedSplits map[string]string `bson:"joined_splits"`
Expansions map[string][]expansionDTO `bson:"expansions"`
JoinedExpansions map[string]string `bson:"joined_expansions"`
Error string `bson:"error_value,omitempty"`
AnalysisID string `bson:"analysis_id"`
ProjectRef string `bson:"project_ref"`
CreatedAt time.Time `bson:"created_at"`
Exported bool `bson:"is_exported"`
Normalization normalizationDTO `bson:"normalization"`
}
// splitDTO is the database representation for an Identifier's Split results.
type splitDTO struct {
Order int `bson:"order"`
Value string `bson:"value"`
}
// expansionDTO is the database representation for an Identifier's Expansion results.
type expansionDTO struct {
Order int `bson:"order"`
SplittingAlgorithm string `bson:"splitting_algorithm"`
From string `bson:"from"`
Values []string `bson:"values"`
}
// normalizationDTO is the database representation for an Identifer's Normalization results.
type normalizationDTO struct {
Word string `bson:"word"`
Algorithm string `bson:"algorithm"`
Score float64 `bson:"score"`
} | port/outgoing/adapter/repository/mongodb/identifier_mapper.go | 0.640748 | 0.435121 | identifier_mapper.go | starcoder |
package ast
import (
"github.com/botobag/artemis/graphql"
"github.com/botobag/artemis/graphql/ast"
)
// TypeResolver is an utility class which tries to resolve type for an AST nodes in a given schema.
type TypeResolver struct {
Schema graphql.Schema
}
// ResolveType determines Type for an ast.Type.
func (resolver TypeResolver) ResolveType(ttype ast.Type) graphql.Type {
// A "true" is added to wrapTypes when an ast.ListType is encountered and "false" for ast.NonNullType.
var (
wrapTypes []bool
t graphql.Type
)
// Find the innermost ast.NamedType. Memoize what type we've went through.
named_type_loop:
for {
switch astType := ttype.(type) {
case ast.ListType:
wrapTypes = append(wrapTypes, true)
ttype = astType.ItemType
case ast.NamedType:
t = resolver.Schema.TypeMap().Lookup(astType.Name.Value())
break named_type_loop
case ast.NonNullType:
wrapTypes = append(wrapTypes, false)
ttype = astType.Type
}
}
if t != nil {
// Go through wrapTypes backward to build wrapping type.
var err error
for i := len(wrapTypes); i > 0 && err == nil; i-- {
if wrapTypes[i-1] {
t, err = graphql.NewListOfType(t)
} else {
t, err = graphql.NewNonNullOfType(t)
}
}
}
return t
}
// ResolveField determines Field for an ast.Field.
func (resolver TypeResolver) ResolveField(parentType graphql.Type, field *ast.Field) graphql.Field {
// We may not be able to retrieve the parent type statically.
if parentType == nil {
return nil
}
// Not exactly the same as findFieldDef in executor. In this statically evaluated environment we
// do not always have an Object type, and need to handle Interface and Union types.
name := field.Name.Value()
if parentType == resolver.Schema.Query() {
if name == graphql.SchemaMetaFieldName {
return graphql.SchemaMetaFieldDef()
} else if name == graphql.TypeMetaFieldName {
return graphql.TypeMetaFieldDef()
}
}
if name == graphql.TypenameMetaFieldName && graphql.IsCompositeType(parentType) {
return graphql.TypenameMetaFieldDef()
}
switch parentType := parentType.(type) {
case graphql.Object:
return parentType.Fields()[name]
case graphql.Interface:
return parentType.Fields()[name]
}
return nil
} | graphql/util/ast/type_resolver.go | 0.638272 | 0.473962 | type_resolver.go | starcoder |
package config
// PoolingType is a type of pooling, using runtime or mmap'd bytes pooling.
type PoolingType string
const (
// SimplePooling uses the basic Go runtime to allocate bytes for bytes pools.
SimplePooling PoolingType = "simple"
// NativePooling uses a mmap syscall to allocate bytes for bytes pools, take
// great care when experimenting with this. There's not enough protection
// even with ref counting that M3DB performs to use this safely in
// production. Here be dragons and so forth.
NativePooling PoolingType = "native"
)
const (
defaultMaxFinalizerCapacity = 4
)
// PoolingPolicy specifies the pooling policy.
type PoolingPolicy struct {
// The initial alloc size for a block
BlockAllocSize int `yaml:"blockAllocSize"`
// The general pool type: simple or native.
Type PoolingType `yaml:"type"`
// The Bytes pool buckets to use
BytesPool BucketPoolPolicy `yaml:"bytesPool"`
// The policy for the Closers pool
ClosersPool PoolPolicy `yaml:"closersPool"`
// The policy for the Context pool
ContextPool ContextPoolPolicy `yaml:"contextPool"`
// The policy for the DatabaseSeries pool
SeriesPool PoolPolicy `yaml:"seriesPool"`
// The policy for the DatabaseBlock pool
BlockPool PoolPolicy `yaml:"blockPool"`
// The policy for the Encoder pool
EncoderPool PoolPolicy `yaml:"encoderPool"`
// The policy for the Iterator pool
IteratorPool PoolPolicy `yaml:"iteratorPool"`
// The policy for the Segment Reader pool
SegmentReaderPool PoolPolicy `yaml:"segmentReaderPool"`
// The policy for the Identifier pool
IdentifierPool PoolPolicy `yaml:"identifierPool"`
// The policy for the FetchBlockMetadataResult pool
FetchBlockMetadataResultsPool CapacityPoolPolicy `yaml:"fetchBlockMetadataResultsPool"`
// The policy for the FetchBlocksMetadataResults pool
FetchBlocksMetadataResultsPool CapacityPoolPolicy `yaml:"fetchBlocksMetadataResultsPool"`
// The policy for the HostBlockMetadataSlice pool
HostBlockMetadataSlicePool CapacityPoolPolicy `yaml:"hostBlockMetadataSlicePool"`
// The policy for the BlockMetadat pool
BlockMetadataPool PoolPolicy `yaml:"blockMetadataPool"`
// The policy for the BlockMetadataSlice pool
BlockMetadataSlicePool CapacityPoolPolicy `yaml:"blockMetadataSlicePool"`
// The policy for the BlocksMetadata pool
BlocksMetadataPool PoolPolicy `yaml:"blocksMetadataPool"`
// The policy for the BlocksMetadataSlice pool
BlocksMetadataSlicePool CapacityPoolPolicy `yaml:"blocksMetadataSlicePool"`
// The policy for the tags pool
TagsPool MaxCapacityPoolPolicy `yaml:"tagsPool"`
// The policy for the tags iterator pool
TagsIteratorPool PoolPolicy `yaml:"tagIteratorPool"`
// The policy for the index.ResultsPool
IndexResultsPool PoolPolicy `yaml:"indexResultsPool"`
// The policy for the TagEncoderPool
TagEncoderPool PoolPolicy `yaml:"tagEncoderPool"`
// The policy for the TagDecoderPool
TagDecoderPool PoolPolicy `yaml:"tagDecoderPool"`
}
// PoolPolicy specifies a single pool policy.
type PoolPolicy struct {
// The size of the pool
Size int `yaml:"size"`
// The low watermark to start refilling the pool, if zero none
RefillLowWaterMark float64 `yaml:"lowWatermark" validate:"min=0.0,max=1.0"`
// The high watermark to stop refilling the pool, if zero none
RefillHighWaterMark float64 `yaml:"highWatermark" validate:"min=0.0,max=1.0"`
}
// CapacityPoolPolicy specifies a single pool policy that has a
// per element capacity.
type CapacityPoolPolicy struct {
// The size of the pool
Size int `yaml:"size"`
// The capacity of items in the pool
Capacity int `yaml:"capacity"`
// The low watermark to start refilling the pool, if zero none
RefillLowWaterMark float64 `yaml:"lowWatermark" validate:"min=0.0,max=1.0"`
// The high watermark to stop refilling the pool, if zero none
RefillHighWaterMark float64 `yaml:"highWatermark" validate:"min=0.0,max=1.0"`
}
// MaxCapacityPoolPolicy specifies a single pool policy that has a
// per element capacity, and a maximum allowed capacity as well.
type MaxCapacityPoolPolicy struct {
// The size of the pool
Size int `yaml:"size"`
// The capacity of items in the pool
Capacity int `yaml:"capacity"`
// The max capacity of items in the pool
MaxCapacity int `yaml:"maxCapacity"`
// The low watermark to start refilling the pool, if zero none
RefillLowWaterMark float64 `yaml:"lowWatermark" validate:"min=0.0,max=1.0"`
// The high watermark to stop refilling the pool, if zero none
RefillHighWaterMark float64 `yaml:"highWatermark" validate:"min=0.0,max=1.0"`
}
// BucketPoolPolicy specifies a bucket pool policy.
type BucketPoolPolicy struct {
// The pool buckets sizes to use
Buckets []CapacityPoolPolicy `yaml:"buckets"`
}
// ContextPoolPolicy specifies the policy for the context pool
type ContextPoolPolicy struct {
// The size of the pool
Size int `yaml:"size"`
// The low watermark to start refilling the pool, if zero none
RefillLowWaterMark float64 `yaml:"lowWatermark" validate:"min=0.0,max=1.0"`
// The high watermark to stop refilling the pool, if zero none
RefillHighWaterMark float64 `yaml:"highWatermark" validate:"min=0.0,max=1.0"`
// The maximum allowable size for a slice of finalizers that the
// pool will allow to be returned (finalizer slices that grow too
// large during use will be discarded instead of returning to the
// pool where they would consume more memory.)
MaxFinalizerCapacity int `yaml:"maxFinalizerCapacity" validate:"min=0"`
}
// PoolPolicy returns the PoolPolicy that is represented by the ContextPoolPolicy
func (c ContextPoolPolicy) PoolPolicy() PoolPolicy {
return PoolPolicy{
Size: c.Size,
RefillLowWaterMark: c.RefillLowWaterMark,
RefillHighWaterMark: c.RefillHighWaterMark,
}
}
// MaxFinalizerCapacityWithDefault returns the maximum finalizer capacity and
// fallsback to the default value if its not set
func (c ContextPoolPolicy) MaxFinalizerCapacityWithDefault() int {
if c.MaxFinalizerCapacity == 0 {
return defaultMaxFinalizerCapacity
}
return c.MaxFinalizerCapacity
} | src/cmd/services/m3dbnode/config/pooling.go | 0.829665 | 0.441191 | pooling.go | starcoder |
package geogoth
// NewPoint create Point with given coordinates
func NewPoint(coordinate []float64) *Geometry {
return &Geometry{
Type: Point,
Coordinates: coordinate,
}
}
// GetPointCoordinates returns longitude, latitude of Point geom
func GetPointCoordinates(feature *Feature) (float64, float64) {
// long = longitude (Y)
// lat = latitude (X)
// convert coordinates from interface to []float64
coord := (feature.Geom.Coordinates).([]float64)
lon := coord[0]
lat := coord[1]
return lon, lat
}
// NewMultiPoint create NewMultiPoint with given coordinates
func NewMultiPoint(coordinates [][]float64) *Geometry {
return &Geometry{
Type: MultiPoint,
Coordinates: coordinates,
}
}
// NewLineString create NewLineString with given coordinates
func NewLineString(coordinates [][]float64) *Geometry {
return &Geometry{
Type: LineString,
Coordinates: coordinates,
}
}
// GetTwoDimArrayCoordinates returns array of longitude, latitude of Two-dimensional arrays (MultiPoint, Linestring)
// coordnum - index of coordinate arr
func GetTwoDimArrayCoordinates(feature *Feature, coordnum int) (float64, float64) {
coords := (feature.Geom.Coordinates).([][]float64)
lon := coords[coordnum][0]
lat := coords[coordnum][1]
return lon, lat // longitude (Y), latitude (X)
}
// NewMultiLineString create NewMultiLineString with given coordinates
func NewMultiLineString(lines [][][]float64) *Geometry {
return &Geometry{
Type: MultiLineString,
Coordinates: lines,
}
}
// NewPolygon create NewPolygon with given coordinates
func NewPolygon(polygon [][][]float64) *Geometry {
return &Geometry{
Type: Polygon,
Coordinates: polygon,
}
}
// GetThreeDimArrayCoordinates returns array of longitude, latitude of Three-dimensional arrays (MultiLineString, Polygon)
// coordnum - index of coordinate arr
func GetThreeDimArrayCoordinates(feature *Feature, setnum, coordnum int) (float64, float64) {
coords := (feature.Geom.Coordinates).([][][]float64)
lon := coords[setnum][coordnum][0]
lat := coords[setnum][coordnum][1]
return lon, lat // longitude (Y), latitude (X)
}
// NewMultiPolygon create NewMultiPolygon with given coordinates
func NewMultiPolygon(polygons [][][][]float64) *Geometry {
return &Geometry{
Type: MultiPolygon,
Coordinates: polygons,
}
}
// GetFourDimArrayCoordinates returns array of longitude, latitude of Four-dimensional arrays (MultiPolygon)
// coordnum - index of coordinate arr
func GetFourDimArrayCoordinates(feature *Feature, setsnum, setnum, coordnum int) (float64, float64) {
coords := (feature.Geom.Coordinates).([][][][]float64)
lon := coords[setsnum][setnum][coordnum][0]
lat := coords[setsnum][setnum][coordnum][1]
return lon, lat // longitude (Y), latitude (X)
} | geojson/coordinates.go | 0.896679 | 0.733523 | coordinates.go | starcoder |
package sarama
import (
"fmt"
"strings"
"github.com/gogf/gkafka/third/github.com/rcrowley/go-metrics"
)
// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
const (
metricsReservoirSize = 1028
metricsAlphaFactor = 0.015
)
func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
return r.GetOrRegister(name, func() metrics.Histogram {
return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
}).(metrics.Histogram)
}
func getMetricNameForBroker(name string, broker *Broker) string {
// Use broker id like the Java client as it does not contain '.' or ':' characters that
// can be interpreted as special character by monitoring tool (e.g. Graphite)
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
}
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
}
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
}
func getMetricNameForTopic(name string, topic string) string {
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
// cf. KAFKA-1902 and KAFKA-2337
return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
}
func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
}
func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
} | third/github.com/Shopify/sarama/metrics.go | 0.850267 | 0.40486 | metrics.go | starcoder |
package cmp
import (
"fmt"
"reflect"
"github.com/go-spatial/geom"
)
func IsEmptyPoint(pt [2]float64) bool {
return pt != pt
}
func IsEmptyPoints(pts [][2]float64) bool {
for _, v := range pts {
if !IsEmptyPoint(v) {
return false
}
}
return true
}
func IsEmptyLines(lns [][][2]float64) bool {
for _, v := range lns {
if !IsEmptyPoints(v) {
return false
}
}
return true
}
func IsNil(a interface{}) bool {
defer func() { recover() }()
return a == nil || reflect.ValueOf(a).IsNil()
}
func IsEmptyGeo(geo geom.Geometry) (isEmpty bool, err error) {
if IsNil(geo) {
return true, nil
}
switch g := geo.(type) {
case [2]float64:
return IsEmptyPoint(g), nil
case geom.Point:
return IsEmptyPoint(g.XY()), nil
case *geom.Point:
if g == nil {
return true, nil
}
return IsEmptyPoint(g.XY()), nil
case [][2]float64:
return IsEmptyPoints(g), nil
case geom.MultiPoint:
return IsEmptyPoints(g.Points()), nil
case *geom.MultiPoint:
if g == nil {
return true, nil
}
return IsEmptyPoints(g.Points()), nil
case geom.LineString:
return IsEmptyPoints(g.Vertices()), nil
case *geom.LineString:
if g == nil {
return true, nil
}
return IsEmptyPoints(g.Vertices()), nil
case geom.MultiLineString:
return IsEmptyLines(g.LineStrings()), nil
case *geom.MultiLineString:
if g == nil {
return true, nil
}
return IsEmptyLines(g.LineStrings()), nil
case geom.Polygon:
return IsEmptyLines(g.LinearRings()), nil
case *geom.Polygon:
if g == nil {
return true, nil
}
return IsEmptyLines(g.LinearRings()), nil
case geom.MultiPolygon:
for _, v := range g.Polygons() {
if !IsEmptyLines(v) {
return false, nil
}
}
return true, nil
case *geom.MultiPolygon:
if g == nil {
return true, nil
}
for _, v := range g.Polygons() {
if !IsEmptyLines(v) {
return false, nil
}
}
return true, nil
case geom.Collection:
for _, v := range g.Geometries() {
isEmpty, err := IsEmptyGeo(v)
if err != nil {
return false, err
}
if !isEmpty {
return false, nil
}
}
return true, nil
case *geom.Collection:
if g == nil {
return true, nil
}
for _, v := range g.Geometries() {
isEmpty, err := IsEmptyGeo(v)
if err != nil {
return false, err
}
if !isEmpty {
return false, nil
}
}
return true, nil
default:
return false, fmt.Errorf("unknown geometry %T", geo)
}
} | vendor/github.com/go-spatial/geom/cmp/empty.go | 0.620737 | 0.513363 | empty.go | starcoder |
package types
import (
"bytes"
"context"
"github.com/liquidata-inc/dolt/go/store/hash"
)
type ValueCallback func(v Value) error
type RefCallback func(ref Ref) error
// Valuable is an interface from which a Value can be retrieved.
type Valuable interface {
// Kind is the NomsKind describing the kind of value this is.
Kind() NomsKind
Value(ctx context.Context) (Value, error)
}
type LesserValuable interface {
Valuable
// Less determines if this Noms value is less than another Noms value.
// When comparing two Noms values and both are comparable and the same type (Bool, Float or
// String) then the natural ordering is used. For other Noms values the Hash of the value is
// used. When comparing Noms values of different type the following ordering is used:
// Bool < Float < String < everything else.
Less(nbf *NomsBinFormat, other LesserValuable) (bool, error)
}
// Emptyable is an interface for Values which may or may not be empty
type Emptyable interface {
Empty() bool
}
// Value is the interface all Noms values implement.
type Value interface {
LesserValuable
// Equals determines if two different Noms values represents the same underlying value.
Equals(other Value) bool
// Hash is the hash of the value. All Noms values have a unique hash and if two values have the
// same hash they must be equal.
Hash(*NomsBinFormat) (hash.Hash, error)
// WalkValues iterates over the immediate children of this value in the DAG, if any, not including
// Type()
WalkValues(context.Context, ValueCallback) error
// WalkRefs iterates over the refs to the underlying chunks. If this value is a collection that has been
// chunked then this will return the refs of th sub trees of the prolly-tree.
WalkRefs(*NomsBinFormat, RefCallback) error
// typeOf is the internal implementation of types.TypeOf. It is not normalized
// and unions might have a single element, duplicates and be in the wrong
// order.
typeOf() (*Type, error)
// writeTo writes the encoded version of the value to a nomsWriter.
writeTo(nomsWriter, *NomsBinFormat) error
}
type ValueSlice []Value
func (vs ValueSlice) Equals(other ValueSlice) bool {
if len(vs) != len(other) {
return false
}
for i, v := range vs {
if !v.Equals(other[i]) {
return false
}
}
return true
}
func (vs ValueSlice) Contains(nbf *NomsBinFormat, v Value) bool {
for _, v := range vs {
if v.Equals(v) {
return true
}
}
return false
}
type ValueSort struct {
values []Value
nbf *NomsBinFormat
}
func (vs ValueSort) Len() int { return len(vs.values) }
func (vs ValueSort) Swap(i, j int) { vs.values[i], vs.values[j] = vs.values[j], vs.values[i] }
func (vs ValueSort) Less(i, j int) (bool, error) {
return vs.values[i].Less(vs.nbf, vs.values[j])
}
func (vs ValueSort) Equals(other ValueSort) bool {
return ValueSlice(vs.values).Equals(ValueSlice(other.values))
}
func (vs ValueSort) Contains(v Value) bool {
return ValueSlice(vs.values).Contains(vs.nbf, v)
}
type valueReadWriter interface {
valueReadWriter() ValueReadWriter
}
type valueImpl struct {
vrw ValueReadWriter
nbf *NomsBinFormat
buff []byte
offsets []uint32
}
func (v valueImpl) valueReadWriter() ValueReadWriter {
return v.vrw
}
func (v valueImpl) writeTo(enc nomsWriter, nbf *NomsBinFormat) error {
enc.writeRaw(v.buff)
return nil
}
func (v valueImpl) valueBytes(nbf *NomsBinFormat) ([]byte, error) {
return v.buff, nil
}
// IsZeroValue can be used to test if a Value is the same as T{}.
func (v valueImpl) IsZeroValue() bool {
return v.buff == nil
}
func (v valueImpl) Hash(*NomsBinFormat) (hash.Hash, error) {
return hash.Of(v.buff), nil
}
func (v valueImpl) decoder() valueDecoder {
return newValueDecoder(v.buff, v.vrw)
}
func (v valueImpl) format() *NomsBinFormat {
return v.nbf
}
func (v valueImpl) decoderAtOffset(offset int) valueDecoder {
return newValueDecoder(v.buff[offset:], v.vrw)
}
func (v valueImpl) asValueImpl() valueImpl {
return v
}
func (v valueImpl) Equals(other Value) bool {
if otherValueImpl, ok := other.(asValueImpl); ok {
return bytes.Equal(v.buff, otherValueImpl.asValueImpl().buff)
}
return false
}
func (v valueImpl) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
return valueLess(nbf, v, other.(Value))
}
func (v valueImpl) WalkRefs(nbf *NomsBinFormat, cb RefCallback) error {
bts, err := v.valueBytes(nbf)
if err != nil {
return err
}
return walkRefs(bts, nbf, cb)
}
type asValueImpl interface {
asValueImpl() valueImpl
}
func (v valueImpl) Kind() NomsKind {
return NomsKind(v.buff[0])
} | go/store/types/value.go | 0.755727 | 0.555496 | value.go | starcoder |
package aac
import (
"fmt"
)
// ADTSPacket is an ADTS packet.
type ADTSPacket struct {
Type int
SampleRate int
ChannelCount int
AU []byte
}
// DecodeADTS decodes an ADTS stream into ADTS packets.
func DecodeADTS(buf []byte) ([]*ADTSPacket, error) {
// refs: https://wiki.multimedia.cx/index.php/ADTS
var ret []*ADTSPacket
bl := len(buf)
pos := 0
for {
if (bl - pos) < 8 {
return nil, fmt.Errorf("invalid length")
}
syncWord := (uint16(buf[pos]) << 4) | (uint16(buf[pos+1]) >> 4)
if syncWord != 0xfff {
return nil, fmt.Errorf("invalid syncword")
}
protectionAbsent := buf[pos+1] & 0x01
if protectionAbsent != 1 {
return nil, fmt.Errorf("CRC is not supported")
}
pkt := &ADTSPacket{}
pkt.Type = int((buf[pos+2] >> 6) + 1)
switch MPEG4AudioType(pkt.Type) {
case MPEG4AudioTypeAACLC:
default:
return nil, fmt.Errorf("unsupported audio type: %d", pkt.Type)
}
sampleRateIndex := (buf[pos+2] >> 2) & 0x0F
switch {
case sampleRateIndex <= 12:
pkt.SampleRate = sampleRates[sampleRateIndex]
default:
return nil, fmt.Errorf("invalid sample rate index: %d", sampleRateIndex)
}
channelConfig := ((buf[pos+2] & 0x01) << 2) | ((buf[pos+3] >> 6) & 0x03)
switch {
case channelConfig >= 1 && channelConfig <= 6:
pkt.ChannelCount = int(channelConfig)
case channelConfig == 7:
pkt.ChannelCount = 8
default:
return nil, fmt.Errorf("invalid channel configuration: %d", channelConfig)
}
frameLen := int(((uint16(buf[pos+3])&0x03)<<11)|
(uint16(buf[pos+4])<<3)|
((uint16(buf[pos+5])>>5)&0x07)) - 7
if frameLen > MaxAccessUnitSize {
return nil, fmt.Errorf("AU size (%d) is too big (maximum is %d)", frameLen, MaxAccessUnitSize)
}
frameCount := buf[pos+6] & 0x03
if frameCount != 0 {
return nil, fmt.Errorf("frame count greater than 1 is not supported")
}
if len(buf[pos+7:]) < frameLen {
return nil, fmt.Errorf("invalid frame length")
}
pkt.AU = buf[pos+7 : pos+7+frameLen]
pos += 7 + frameLen
ret = append(ret, pkt)
if (bl - pos) == 0 {
break
}
}
return ret, nil
}
func encodeADTSSize(pkts []*ADTSPacket) int {
n := 0
for _, pkt := range pkts {
n += 7 + len(pkt.AU)
}
return n
}
// EncodeADTS encodes ADTS packets into an ADTS stream.
func EncodeADTS(pkts []*ADTSPacket) ([]byte, error) {
buf := make([]byte, encodeADTSSize(pkts))
pos := 0
for _, pkt := range pkts {
sampleRateIndex, ok := reverseSampleRates[pkt.SampleRate]
if !ok {
return nil, fmt.Errorf("invalid sample rate: %d", pkt.SampleRate)
}
var channelConfig int
switch {
case pkt.ChannelCount >= 1 && pkt.ChannelCount <= 6:
channelConfig = pkt.ChannelCount
case pkt.ChannelCount == 8:
channelConfig = 7
default:
return nil, fmt.Errorf("invalid channel count (%d)", pkt.ChannelCount)
}
frameLen := len(pkt.AU) + 7
fullness := 0x07FF // like ffmpeg does
buf[pos+0] = 0xFF
buf[pos+1] = 0xF1
buf[pos+2] = uint8(((pkt.Type - 1) << 6) | (sampleRateIndex << 2) | ((channelConfig >> 2) & 0x01))
buf[pos+3] = uint8((channelConfig&0x03)<<6 | (frameLen>>11)&0x03)
buf[pos+4] = uint8((frameLen >> 3) & 0xFF)
buf[pos+5] = uint8((frameLen&0x07)<<5 | ((fullness >> 6) & 0x1F))
buf[pos+6] = uint8((fullness & 0x3F) << 2)
pos += 7
pos += copy(buf[pos:], pkt.AU)
}
return buf, nil
} | pkg/aac/adts.go | 0.607663 | 0.472075 | adts.go | starcoder |
package notification
// Status describes the current state of an outgoing message.
type Status struct {
// State is the current state.
State State
// Details can contain any additional information about the State (e.g. "ringing", "no-answer" etc..).
Details string
// Sequence can be used when the provider sends updates out-of order (e.g. Twilio).
// The Sequence number defaults to 0, and a status update is ignored unless its
// Sequence number is >= the current one.
Sequence int
}
// SendResult represents the result of a sent message.
type SendResult struct {
// ID is the GoAlert message ID.
ID string
// ProviderMessageID is an identifier that represents the provider-specific ID
// of the message (e.g. Twilio SID).
ProviderMessageID ProviderMessageID
Status
}
// State represents the current state of an outgoing message.
type State int
const (
// StateSending should be specified when a message is sending but has not been sent.
// This includes things like remotely queued, ringing, or in-progress calls.
StateSending State = iota
// StatePending idicates a message waiting to be sent.
StatePending
// StateSent means the message has been sent completely, but may not
// have been delivered (or delivery confirmation is not supported.). For
// example, an SMS on the carrier network (but not device) or a voice call
// that rang but got `no-answer`.
StateSent
// StateDelivered means the message is completed and was received
// by the end device. SMS delivery confirmation, or a voice call was
// completed (including if it was voice mail).
StateDelivered
// StateFailedTemp should be set when a message was not sent (no SMS or ringing phone)
// but a subsequent try later may succeed. (e.g. voice call with busy signal).
StateFailedTemp
// StateFailedPerm should be set when a message was not sent (no SMS or ringing phone)
// but a subsequent attempt will not be expected to succeed. For messages that fail due to
// invalid config, they should set this state, as without manual intervention, a retry
// will also fail.
StateFailedPerm
) | notification/status.go | 0.567457 | 0.401101 | status.go | starcoder |
// util.go contains various utility functions.
// Prefer the free-form functions to member functions.
package board
// Pawns return the set of pawns of the given color.
func Pawns(pos *Position, us Color) Bitboard {
return pos.ByPiece(us, Pawn)
}
// Knights return the set of knights of the given color.
func Knights(pos *Position, us Color) Bitboard {
return pos.ByPiece(us, Knight)
}
// Bishops return the set of bishops of the given color.
func Bishops(pos *Position, us Color) Bitboard {
return pos.ByPiece(us, Bishop)
}
// Rooks return the set of rooks of the given color.
func Rooks(pos *Position, us Color) Bitboard {
return pos.ByPiece(us, Rook)
}
// Queens return the set of queens of the given color.
func Queens(pos *Position, us Color) Bitboard {
return pos.ByPiece(us, Queen)
}
// Kings return the set of kings of the given color.
// Normally there is exactly on king for each side.
func Kings(pos *Position, us Color) Bitboard {
return pos.ByPiece(us, King)
}
// PawnThreats returns the squares threatened by our pawns.
func PawnThreats(pos *Position, us Color) Bitboard {
ours := Pawns(pos, us)
return Forward(us, East(ours)|West(ours))
}
// BackwardPawns returns the our backward pawns.
// A backward pawn is a pawn that has no pawns behind them on its file or
// adjacent file, it's not isolated and cannot advance safely.
func BackwardPawns(pos *Position, us Color) Bitboard {
ours := Pawns(pos, us)
behind := ForwardFill(us, East(ours)|West(ours))
doubled := BackwardSpan(us, ours)
isolated := IsolatedPawns(pos, us)
return ours & Backward(us, PawnThreats(pos, us.Opposite())) &^ behind &^ doubled &^ isolated
}
// DoubledPawns returns a bitboard with our doubled pawns.
func DoubledPawns(pos *Position, us Color) Bitboard {
ours := Pawns(pos, us)
return ours & Backward(us, ours)
}
// IsolatedPawns returns a bitboard with our isolated pawns.
func IsolatedPawns(pos *Position, us Color) Bitboard {
ours := Pawns(pos, us)
wings := East(ours) | West(ours)
return ours &^ Fill(wings)
}
// PassedPawns returns a bitboard with our passed pawns.
func PassedPawns(pos *Position, us Color) Bitboard {
// From white's POV: w - white pawn, b - black pawn, x - non-passed pawns.
// ........
// .....w..
// .....x..
// ..b..x..
// .xxx.x..
// .xxx.x..
ours := Pawns(pos, us)
theirs := pos.ByPiece(us.Opposite(), Pawn)
theirs |= East(theirs) | West(theirs)
block := BackwardSpan(us, theirs|ours)
return ours &^ block
}
// ConnectedPawns returns a bitboad with our connected pawns.
func ConnectedPawns(pos *Position, us Color) Bitboard {
ours := Pawns(pos, us)
wings := East(ours) | West(ours)
return ours & (North(wings) | wings | South(wings))
}
// RammedPawns returns pawns on ranks 2, 3 for white
// and rank 6 and 7 blocking an advanced enemy pawn.
func RammedPawns(pos *Position, us Color) Bitboard {
var bb Bitboard
if us == White {
bb = BbRank2 | BbRank3
} else if us == Black {
bb = BbRank7 | BbRank6
}
return Pawns(pos, us) & Backward(us, pos.ByPiece(us.Opposite(), Pawn)) & bb
}
// Minors returns a bitboard with our knights and bishops.
func Minors(pos *Position, us Color) Bitboard {
return pos.ByPiece2(us, Knight, Bishop)
}
// Majors returns a bitboard with our rooks and queens.
func Majors(pos *Position, us Color) Bitboard {
return pos.ByPiece2(us, Rook, Queen)
}
// MinorsAndMajors returns a bitboard with minor and major pieces.
func MinorsAndMajors(pos *Position, col Color) Bitboard {
return pos.ByColor(col) &^ pos.ByFigure(Pawn) &^ pos.ByFigure(King)
}
// OpenFiles returns our fully set files with no pawns.
func OpenFiles(pos *Position, us Color) Bitboard {
pawns := pos.ByFigure(Pawn)
return ^Fill(pawns)
}
// SemiOpenFiles returns our fully set files with enemy pawns, but no friendly pawns.
func SemiOpenFiles(pos *Position, us Color) Bitboard {
ours := Pawns(pos, us)
theirs := pos.ByPiece(us.Opposite(), Pawn)
return Fill(theirs) &^ Fill(ours)
}
// KingArea returns an area around king.
func KingArea(pos *Position, us Color) Bitboard {
bb := pos.ByPiece(us, King)
bb = East(bb) | bb | West(bb)
bb = North(bb) | bb | South(bb)
return bb
}
// PawnPromotionSquare returns the propotion square of a col pawn on sq.
// Undefined behaviour if col is not White or Black.
func PawnPromotionSquare(col Color, sq Square) Square {
if col == White {
return sq | 0x38
}
if col == Black {
return sq &^ 0x38
}
return sq
}
var homeRank = [ColorArraySize]int{0, 7, 0}
// HomeRank returns the rank of the king at the begining of the game.
// By construction HomeRank(col)^1 returns the pawn rank.
// Result is undefined if c is not White or Black.
func HomeRank(col Color) int {
return homeRank[col]
} | util.go | 0.852322 | 0.725758 | util.go | starcoder |
package images
import (
"bytes"
"image"
"image/jpeg"
"image/png"
"io"
"strings"
"github.com/nfnt/resize"
)
/*
IResizer is an interface to describe structs that resize images
*/
type IResizer interface {
ResizeImage(source io.ReadSeeker, contentType string, imageSize ImageSize) (*bytes.Buffer, error)
ResizeImagePixels(source io.ReadSeeker, contentType string, width, height int) (*bytes.Buffer, error)
}
/*
A Resizer contains methods for resizing images and re-encoding them.
*/
type Resizer struct{}
/*
ResizeImage takes a source image, content type (MIME), and a image size (
THUMBNAIL, SMALL, MEDIUM, LARGE) and resizes proportionally.
*/
func (r Resizer) ResizeImage(source io.ReadSeeker, contentType string, imageSize ImageSize) (*bytes.Buffer, error) {
var (
err error
result *bytes.Buffer
sourceImage image.Image
)
if !r.isValidImageFormat(contentType) {
return result, ErrInvalidFileType
}
source.Seek(0, 0)
if sourceImage, err = r.readSourceImage(source); err != nil {
return result, err
}
sourceHeight := sourceImage.Bounds().Max.Y - sourceImage.Bounds().Min.Y
sourceWidth := sourceImage.Bounds().Max.X - sourceImage.Bounds().Min.Y
adjustedHeight := r.calculateHeight(sourceHeight, imageSize)
adjustedWidth := r.calculateWidth(sourceWidth, imageSize)
percentHeightChange := r.calculateHeightChangePercentage(adjustedHeight, sourceHeight)
percentWidthChange := r.calculateWidthChangePercentage(adjustedWidth, sourceWidth)
percent := r.determinePercentageChangeToMake(percentWidthChange, percentHeightChange)
newHeight := r.calculateNewHeight(sourceHeight, percent)
newWidth := r.calculateNewWidth(sourceWidth, percent)
return r.resizeImage(sourceImage, contentType, newWidth, newHeight)
}
/*
ResizeImagePixels resizes a source image to the specified widthxheight.
*/
func (r Resizer) ResizeImagePixels(source io.ReadSeeker, contentType string, width, height int) (*bytes.Buffer, error) {
var (
err error
result *bytes.Buffer
sourceImage image.Image
)
if !r.isValidImageFormat(contentType) {
return result, ErrInvalidFileType
}
source.Seek(0, 0)
if sourceImage, err = r.readSourceImage(source); err != nil {
return result, err
}
return r.resizeImage(sourceImage, contentType, width, height)
}
func (r Resizer) resizeImage(sourceImage image.Image, contentType string, width, height int) (*bytes.Buffer, error) {
result := new(bytes.Buffer)
var err error
resizedImage := resize.Resize(uint(width), uint(height), sourceImage, resize.Lanczos3)
encoderType := r.getEncoderType(contentType)
if encoderType == "jpg" {
err = jpeg.Encode(result, resizedImage, nil)
return result, err
}
err = png.Encode(result, resizedImage)
return result, err
}
func (r Resizer) readSourceImage(sourceImage io.Reader) (image.Image, error) {
decodedImage, _, err := image.Decode(sourceImage)
if err != nil {
return nil, err
}
return decodedImage, nil
}
func (r Resizer) getMultiplierFromSize(imageSize ImageSize) float64 {
if imageSize == THUMBNAIL {
return 0.10
}
if imageSize == SMALL {
return 0.25
}
if imageSize == MEDIUM {
return 0.50
}
return 1.0
}
func (r Resizer) calculateHeight(height int, imageSize ImageSize) float64 {
return float64(height) * r.getMultiplierFromSize(imageSize)
}
func (r Resizer) calculateWidth(width int, imageSize ImageSize) float64 {
return float64(width) * r.getMultiplierFromSize(imageSize)
}
func (r Resizer) calculateHeightChangePercentage(adjustedHeight float64, originalHeight int) float64 {
return adjustedHeight / float64(originalHeight)
}
func (r Resizer) calculateWidthChangePercentage(adjustedWidth float64, originalWidth int) float64 {
return adjustedWidth / float64(originalWidth)
}
func (r Resizer) determinePercentageChangeToMake(widthChangePercentage float64, heightChangePercentage float64) float64 {
if heightChangePercentage < widthChangePercentage {
return heightChangePercentage
}
return widthChangePercentage
}
func (r Resizer) calculateNewHeight(originalHeight int, percentageChange float64) int {
return int(float64(originalHeight) * percentageChange)
}
func (r Resizer) calculateNewWidth(originalWidth int, percentageChange float64) int {
return int(float64(originalWidth) * percentageChange)
}
func (r Resizer) isValidImageFormat(contentType string) bool {
validMIMETypes := []string{
"jpg",
"jpeg",
"png",
}
for _, mimeType := range validMIMETypes {
if strings.Contains(contentType, mimeType) {
return true
}
}
return false
}
func (r Resizer) getEncoderType(contentType string) string {
if strings.Contains(contentType, "jpg") || strings.Contains(contentType, "jpeg") {
return "jpg"
}
return "png"
} | images/Resizer.go | 0.690768 | 0.408218 | Resizer.go | starcoder |
package iso20022
// Parameters applied to the settlement of a security transfer.
type Transfer8 struct {
// Unique and unambiguous identifier for a group of individual transfers as assigned by the instructing party. This identifier links the individual transfers together.
MasterReference *Max35Text `xml:"MstrRef,omitempty"`
// Unique and unambiguous identifier for a transfer instruction, as assigned by the instructing party.
TransferReference *Max35Text `xml:"TrfRef"`
// Unique and unambiguous investor's identification of a transfer. This reference can typically be used in a hub scenario to give the reference of the transfer as assigned by the underlying client.
ClientReference *Max35Text `xml:"ClntRef,omitempty"`
// Requested date at which the instructing party places the transfer instruction.
RequestedTransferDate *DateFormat1Choice `xml:"ReqdTrfDt"`
// Total quantity of securities to be settled.
TotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"TtlUnitsNb"`
// Total quantity of securities to be settled.
PortfolioTransferOutRate *PercentageRate `xml:"PrtflTrfOutRate"`
// Information about the units to be transferred.
UnitsDetails []*Unit3 `xml:"UnitsDtls,omitempty"`
// Indicates the rounding direction applied to nearest unit.
Rounding *RoundingDirection2Code `xml:"Rndg,omitempty"`
// Indicates whether the transfer results in a change of beneficial owner.
OwnAccountTransferIndicator *YesNoIndicator `xml:"OwnAcctTrfInd,omitempty"`
// Value of a security, as booked in an account. Book value is often different from the current market value of the security.
AveragePrice *ActiveOrHistoricCurrencyAnd13DecimalAmount `xml:"AvrgPric,omitempty"`
// Additional specific settlement information for non-regulated traded funds.
NonStandardSettlementInformation *Max350Text `xml:"NonStdSttlmInf,omitempty"`
}
func (t *Transfer8) SetMasterReference(value string) {
t.MasterReference = (*Max35Text)(&value)
}
func (t *Transfer8) SetTransferReference(value string) {
t.TransferReference = (*Max35Text)(&value)
}
func (t *Transfer8) SetClientReference(value string) {
t.ClientReference = (*Max35Text)(&value)
}
func (t *Transfer8) AddRequestedTransferDate() *DateFormat1Choice {
t.RequestedTransferDate = new(DateFormat1Choice)
return t.RequestedTransferDate
}
func (t *Transfer8) AddTotalUnitsNumber() *FinancialInstrumentQuantity1 {
t.TotalUnitsNumber = new(FinancialInstrumentQuantity1)
return t.TotalUnitsNumber
}
func (t *Transfer8) SetPortfolioTransferOutRate(value string) {
t.PortfolioTransferOutRate = (*PercentageRate)(&value)
}
func (t *Transfer8) AddUnitsDetails() *Unit3 {
newValue := new(Unit3)
t.UnitsDetails = append(t.UnitsDetails, newValue)
return newValue
}
func (t *Transfer8) SetRounding(value string) {
t.Rounding = (*RoundingDirection2Code)(&value)
}
func (t *Transfer8) SetOwnAccountTransferIndicator(value string) {
t.OwnAccountTransferIndicator = (*YesNoIndicator)(&value)
}
func (t *Transfer8) SetAveragePrice(value, currency string) {
t.AveragePrice = NewActiveOrHistoricCurrencyAnd13DecimalAmount(value, currency)
}
func (t *Transfer8) SetNonStandardSettlementInformation(value string) {
t.NonStandardSettlementInformation = (*Max350Text)(&value)
} | Transfer8.go | 0.856212 | 0.413063 | Transfer8.go | starcoder |
package randxdr
import (
"math"
"regexp"
"strings"
goxdr "github.com/xdrpp/goxdr/xdr"
)
// Selector is function used to match fields of a goxdr.XdrType
type Selector func(string, goxdr.XdrType) bool
// Setter is a function used to set field values for a goxdr.XdrType
type Setter func(*randMarshaller, string, goxdr.XdrType)
// Preset can be used to restrict values for specific fields of a goxdr.XdrType.
type Preset struct {
Selector Selector
Setter Setter
}
// FieldEquals returns a Selector which matches on a field name by equality
func FieldEquals(toMatch string) Selector {
return func(name string, xdrType goxdr.XdrType) bool {
return name == toMatch
}
}
// FieldMatches returns a Selector which matches on a field name by regexp
func FieldMatches(r *regexp.Regexp) Selector {
return func(name string, xdrType goxdr.XdrType) bool {
return r.MatchString(name)
}
}
// And is a Selector which returns true if the given pair of selectors
// match the field.
func And(a, b Selector) Selector {
return func(s string, xdrType goxdr.XdrType) bool {
return a(s, xdrType) && b(s, xdrType)
}
}
// IsPtr is a Selector which matches on all XDR pointer fields
var IsPtr Selector = func(name string, xdrType goxdr.XdrType) bool {
_, ok := goxdr.XdrBaseType(xdrType).(goxdr.XdrPtr)
return ok
}
// IsNestedInnerSet is a Selector which identifies nesting for the following xdr type:
// struct SCPQuorumSet
// {
// uint32 threshold;
// PublicKey validators<>;
// SCPQuorumSet innerSets<>;
// };
// supports things like: A,B,C,(D,E,F),(G,H,(I,J,K,L))
// only allows 2 levels of nesting
var IsNestedInnerSet Selector = func(name string, xdrType goxdr.XdrType) bool {
if strings.HasSuffix(name, ".innerSets") && strings.Count(name, ".innerSets[") > 0 {
_, ok := goxdr.XdrBaseType(xdrType).(goxdr.XdrVec)
return ok
}
return false
}
// SetPtr is a Setter which sets the xdr pointer to null if present is false
func SetPtr(present bool) Setter {
return func(m *randMarshaller, name string, xdrType goxdr.XdrType) {
p := goxdr.XdrBaseType(xdrType).(goxdr.XdrPtr)
p.SetPresent(present)
p.XdrMarshalValue(m, name)
}
}
// SetVecLen returns a Setter which sets the length of a variable length
// array ( https://tools.ietf.org/html/rfc4506#section-4.13 ) to a fixed value
func SetVecLen(vecLen uint32) Setter {
return func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
v := goxdr.XdrBaseType(xdrType).(goxdr.XdrVec)
v.SetVecLen(vecLen)
v.XdrMarshalN(x, field, vecLen)
}
}
// SetU32 returns a Setter which sets a uint32 XDR field to a fixed value
func SetU32(val uint32) Setter {
return func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum32)
f.SetU32(val)
}
}
// SetPositiveNum64 returns a Setter which sets a uint64 XDR field to a random positive value
var SetPositiveNum64 Setter = func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum64)
f.SetU64(uint64(x.rand.Int63n(math.MaxInt64)))
}
// SetPositiveNum32 returns a Setter which sets a uint32 XDR field to a random positive value
var SetPositiveNum32 Setter = func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum32)
f.SetU32(uint32(x.rand.Int31n(math.MaxInt32)))
}
const alphaNumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
// SetAssetCode returns a Setter which sets an asset code XDR field to a
// random alphanumeric string right-padded with 0 bytes
var SetAssetCode Setter = func(x *randMarshaller, field string, xdrType goxdr.XdrType) {
f := goxdr.XdrBaseType(xdrType).(goxdr.XdrBytes)
slice := f.GetByteSlice()
var end int
switch len(slice) {
case 4:
end = int(x.rand.Int31n(4))
case 12:
end = int(4 + x.rand.Int31n(8))
}
for i := 0; i <= end; i++ {
slice[i] = alphaNumeric[x.rand.Int31n(int32(len(alphaNumeric)))]
}
} | randxdr/presets.go | 0.657868 | 0.74158 | presets.go | starcoder |
package main
import "fmt"
// Point represents a point on cartesian plane
type Point struct {
x, y int
}
func (point Point) String() string {
return fmt.Sprintf("(%v, %v)", point.x, point.y)
}
// PointWithDistance is a node that has two properties:
// distance - square of euclidean distance from some reference point
// point - the point which is at the given distance from the reference point
type PointWithDistance struct {
distance int
point *Point
}
// MaxHeap represents a max heap of points.
// Each node in the heap is a point with distance from some reference point
type MaxHeap struct {
capacity int
size int
nodes []*PointWithDistance
}
// Create a MaxHeap of capacity k points closest to the target from an array of Points
func createMaxHeap(points []*Point, target *Point, k int) *MaxHeap {
newHeap := MaxHeap{k, 0, make([]*PointWithDistance, k)}
for _, point := range points {
pointWithDistance := createNode(point, target)
newHeap.addNodeToHeap(pointWithDistance)
}
return &newHeap
}
func createNode(point, target *Point) *PointWithDistance {
delX := point.x - target.x
delY := point.y - target.y
distance := (delX * delX) + (delY * delY)
return &PointWithDistance{distance, point}
}
// Add node to heap
func (heap *MaxHeap) addNodeToHeap(newNode *PointWithDistance) {
if heap.size != heap.capacity {
heap.nodes[heap.size] = newNode
heap.size++
heap.heapifyFromBottom()
} else if newNode.distance < heap.nodes[0].distance {
heap.nodes[0] = newNode
heap.heapifyFromTop()
}
}
func getLeftChild(i int) int {
return (2 * i) + 1
}
func getRightChild(i int) int {
return (2 * i) + 2
}
func getParent(i int) int {
return (i - 1) / 2
}
func (heap *MaxHeap) swap(i, j int) {
temp := heap.nodes[i]
heap.nodes[i] = heap.nodes[j]
heap.nodes[j] = temp
}
func (heap *MaxHeap) isLeaf(nodeIndex int) bool {
return getLeftChild(nodeIndex) > heap.size-1
}
// gets index of the child node with max distance
func (heap *MaxHeap) getMaxChildIndex(nodeIndex int) int {
if heap.isLeaf(nodeIndex) {
panic("Cannot get max child for a leaf node")
}
if getRightChild(nodeIndex) >= heap.size || heap.nodes[getLeftChild(nodeIndex)].distance > heap.nodes[getRightChild(nodeIndex)].distance {
return getLeftChild(nodeIndex)
}
return getRightChild(nodeIndex)
}
// Last node may not be at the correct place in the heap. Trickle it up
func (heap *MaxHeap) heapifyFromBottom() {
trickleNodeIndex := heap.size - 1
for (trickleNodeIndex != 0) && (heap.nodes[trickleNodeIndex].distance > heap.nodes[getParent(trickleNodeIndex)].distance) {
heap.swap(trickleNodeIndex, getParent(trickleNodeIndex))
trickleNodeIndex = getParent(trickleNodeIndex)
}
}
// Heap root may not be at the correct plact. Trickle it down
func (heap *MaxHeap) heapifyFromTop() {
trickleNodeIndex := 0
for trickleNodeIndex < heap.size {
if heap.isLeaf(trickleNodeIndex) {
return
}
maxChildIndex := heap.getMaxChildIndex(trickleNodeIndex)
if heap.nodes[trickleNodeIndex].distance > heap.nodes[maxChildIndex].distance {
return
}
heap.swap(trickleNodeIndex, maxChildIndex)
trickleNodeIndex = maxChildIndex
}
}
// Finds k closest points from target point
func findClosest(k int, points []*Point, target *Point) []*Point {
if points == nil || k >= len(points) {
return points
}
maxHeap := createMaxHeap(points, target, k)
closestPoints := make([]*Point, k)
for i, node := range maxHeap.nodes {
closestPoints[i] = node.point
}
return closestPoints
}
// A utility function that returns all the points with distance from target point
func getPointsWithDistance(points []*Point, target *Point) []*PointWithDistance {
pwds := make([]*PointWithDistance, len(points))
for i, point := range points {
pwds[i] = createNode(point, target)
}
return pwds
}
func main() {
points := []*Point{
{0, -2},
{-2, 4},
{1, 3},
{-1, -3},
{5, 2},
}
origin := &Point{1, 2}
// fmt.Println(getPointsWithDistance(points, origin))
fmt.Println(findClosest(1, points, origin))
} | k-closest.go | 0.85405 | 0.644812 | k-closest.go | starcoder |
package framework
import (
"strings"
"sigs.k8s.io/kustomize/kyaml/yaml"
)
// Function defines a function which mutates or validates a collection of configuration
// To create a structured validation result, return a Result as the error.
type Function func(nodes []*yaml.RNode) ([]*yaml.RNode, error)
// Result defines a function result which will be set on the emitted ResourceList
type Result struct {
// Name is the name of the function creating the result
Name string `yaml:"name,omitempty"`
// Items are the individual results
Items []Item `yaml:"items,omitempty"`
}
// Severity indicates the severity of the result
type Severity string
const (
// Error indicates the result is an error. Will cause the function to exit non-0.
Error Severity = "error"
// Warning indicates the result is a warning
Warning Severity = "warning"
// Info indicates the result is an informative message
Info Severity = "info"
)
// Item defines a validation result
type Item struct {
// Message is a human readable message
Message string `yaml:"message,omitempty"`
// Severity is the severity of the
Severity Severity `yaml:"severity,omitempty"`
// ResourceRef is a reference to a resource
ResourceRef yaml.ResourceMeta `yaml:"resourceRef,omitempty"`
Field Field `yaml:"field,omitempty"`
File File `yaml:"file,omitempty"`
}
// File references a file containing a resource
type File struct {
// Path is relative path to the file containing the resource
Path string `yaml:"path,omitempty"`
// Index is the index into the file containing the resource
// (i.e. if there are multiple resources in a single file)
Index int `yaml:"index,omitempty"`
}
// Field references a field in a resource
type Field struct {
// Path is the field path
Path string `yaml:"path,omitempty"`
// CurrentValue is the current field value
CurrentValue string `yaml:"currentValue,omitempty"`
// SuggestedValue is the suggested field value
SuggestedValue string `yaml:"suggestedValue,omitempty"`
}
// Error implement error
func (e Result) Error() string {
var msgs []string
for _, i := range e.Items {
msgs = append(msgs, i.Message)
}
return strings.Join(msgs, "\n\n")
}
// ExitCode provides the exit code based on the result
func (e Result) ExitCode() int {
for _, i := range e.Items {
if i.Severity == Error {
return 1
}
}
return 0
} | kyaml/fn/framework/types.go | 0.62223 | 0.417093 | types.go | starcoder |
package serialization
import (
"github.com/lyraproj/puppet-evaluator/eval"
"github.com/lyraproj/puppet-evaluator/types"
)
// A Collector receives streaming events and produces an eval.Value
type Collector interface {
ValueConsumer
// Value returns the created value. Must not be called until the consumption
// of values is complete.
Value() eval.Value
}
type collector struct {
values []eval.Value
stack [][]eval.Value
}
// NewCollector returns a new Collector instance
func NewCollector() Collector {
hm := &collector{}
hm.Init()
return hm
}
func (hm *collector) Init() {
hm.values = make([]eval.Value, 0, 64)
hm.stack = make([][]eval.Value, 1, 8)
hm.stack[0] = make([]eval.Value, 0, 1)
}
func (hm *collector) AddArray(cap int, doer eval.Doer) {
types.BuildArray(cap, func(ar *types.ArrayValue, elements []eval.Value) []eval.Value {
hm.Add(ar)
top := len(hm.stack)
hm.stack = append(hm.stack, elements)
doer()
st := hm.stack[top]
hm.stack = hm.stack[0:top]
return st
})
}
func (hm *collector) AddHash(cap int, doer eval.Doer) {
types.BuildHash(cap, func(ar *types.HashValue, entries []*types.HashEntry) []*types.HashEntry {
hm.Add(ar)
top := len(hm.stack)
hm.stack = append(hm.stack, make([]eval.Value, 0, cap*2))
doer()
st := hm.stack[top]
hm.stack = hm.stack[0:top]
top = len(st)
for i := 0; i < top; i += 2 {
entries = append(entries, types.WrapHashEntry(st[i], st[i+1]))
}
return entries
})
}
func (hm *collector) Add(element eval.Value) {
top := len(hm.stack) - 1
hm.stack[top] = append(hm.stack[top], element)
hm.values = append(hm.values, element)
}
func (hm *collector) AddRef(ref int) {
top := len(hm.stack) - 1
hm.stack[top] = append(hm.stack[top], hm.values[ref])
}
func (hm *collector) CanDoBinary() bool {
return true
}
func (hm *collector) CanDoComplexKeys() bool {
return true
}
func (hm *collector) StringDedupThreshold() int {
return 0
}
func (hm *collector) Value() eval.Value {
return hm.stack[0][0]
} | serialization/collector.go | 0.714927 | 0.415907 | collector.go | starcoder |
package money
import (
"fmt"
"math/big"
"strings"
)
// Money represents a monetary value
type Money struct {
rat *big.Rat
}
// New creates a new instance with a zero value.
func New() *Money {
return &Money{
rat: big.NewRat(0, 1),
}
}
// NewFromCents creates a new instance with a cents value.
func NewFromCents(cents int64) *Money {
return &Money{
rat: big.NewRat(cents, 100),
}
}
// Parse a string to create a new money value. It can read `XX.YY` and `XX,YY`.
// An empty string is parsed as zero.
func Parse(s string) (*Money, error) {
if len(s) == 0 {
return New(), nil
}
s = strings.Replace(s, ",", ".", -1)
rat := new(big.Rat)
if _, err := fmt.Sscan(s, rat); err != nil {
return nil, fmt.Errorf("money: cannot scan value: %s: %s", s, err)
}
return &Money{rat}, nil
}
// Cents returns the value with cents precision (2 decimal places) as a number.
func (money *Money) Cents() int64 {
cents := big.NewInt(100)
v := new(big.Int)
v.Mul(money.rat.Num(), cents)
v.Quo(v, money.rat.Denom())
return v.Int64()
}
// Format the money value with a specific decimal precision.
func (money *Money) Format(prec int) string {
return money.rat.FloatString(prec)
}
// Mul multiplies the money value n times and returns the result.
func (money *Money) Mul(n int64) *Money {
b := big.NewRat(n, 1)
result := New()
result.rat.Mul(money.rat, b)
return result
}
// Add two money values together and returns the result.
func (money *Money) Add(other *Money) *Money {
result := New()
result.rat.Add(money.rat, other.rat)
return result
}
// Sub subtracts two money values and returns the result.
func (money *Money) Sub(other *Money) *Money {
result := New()
result.rat.Sub(money.rat, other.rat)
return result
}
// Div divides two money values and returns the result.
func (money *Money) Div(other *Money) *Money {
result := New()
result.rat.Quo(money.rat, other.rat)
return result
}
// LessThan returns true if a money value is less than the other.
func (money *Money) LessThan(other *Money) bool {
return money.rat.Cmp(other.rat) == -1
}
// AddTaxPercent adds a percentage of the price to itself.
func (money *Money) AddTaxPercent(tax int64) *Money {
result := New()
result.rat.Set(money.rat)
ratTax := big.NewRat(tax, 100)
result.rat.Add(result.rat, ratTax.Mul(ratTax, money.rat))
return result
}
// IsZero returns true if there is no money.
func (money *Money) IsZero() bool {
return money.Cents() == 0
}
// Markup adds a percentage with decimals of the price to itself. The
// percentage should be pre-multiplied by 100 to avoid floating point issues.
func (money *Money) Markup(tax int64) *Money {
result := New()
result.rat.Set(money.rat)
ratTax := big.NewRat(tax, 10000)
result.rat.Add(result.rat, ratTax.Mul(ratTax, money.rat))
return result
} | money.go | 0.871105 | 0.572842 | money.go | starcoder |
package eff
import (
"errors"
"math"
"math/rand"
"strconv"
)
const (
// Version current semantic version of eff
Version = "0.4.8"
)
// Point container for 2d points
type Point struct {
X int
Y int
}
// Scale returns a new scaled point
func (p *Point) Scale(s float64) Point {
return Point{
X: int(float64(p.X) * s),
Y: int(float64(p.Y) * s),
}
}
// Offset returns an offset point
func (p *Point) Offset(x int, y int) Point {
return Point{
X: p.X + x,
Y: p.Y + y,
}
}
// ScalePoints returns a new slice of scaled points
func ScalePoints(points []Point, s float64) []Point {
var scaledPoints []Point
for _, p := range points {
scaledPoints = append(scaledPoints, p.Scale(s))
}
return scaledPoints
}
// OffsetPoints returns a new slice of offset points
func OffsetPoints(points []Point, x int, y int) []Point {
var offsetPoints []Point
for _, p := range points {
offsetPoints = append(offsetPoints, p.Offset(x, y))
}
return offsetPoints
}
// Color container for argb colors
type Color struct {
R int
G int
B int
A int
}
// Add offset the rgb values of the color
func (c *Color) Add(v int) {
c.R += v
c.G += v
c.B += v
c.R = int(math.Min(0xFF, float64(c.R)))
c.R = int(math.Max(0x00, float64(c.R)))
c.G = int(math.Min(0xFF, float64(c.G)))
c.G = int(math.Max(0x00, float64(c.G)))
c.B = int(math.Min(0xFF, float64(c.B)))
c.B = int(math.Max(0x00, float64(c.B)))
}
// RandomColor genereate a random color struct. The opacity is also random
func RandomColor() Color {
return Color{
R: rand.Intn(0xFF),
G: rand.Intn(0xFF),
B: rand.Intn(0xFF),
A: 0xFF,
}
}
// Black returns a color struct that is black
func Black() Color {
return Color{
R: 0x00,
G: 0x00,
B: 0x00,
A: 0xFF,
}
}
// White returns a color struct that is white
func White() Color {
return Color{
R: 0xFF,
G: 0xFF,
B: 0xFF,
A: 0xFF,
}
}
// ColorWithHex creates an eff color w/ a hex string in the formant "#FF00FF"
func ColorWithHex(hex string) (Color, error) {
if hex[0] == '#' {
hex = hex[1:]
}
if len(hex) < 6 {
return Color{}, errors.New("Invalid hex color, too short")
}
r, err := strconv.ParseInt(hex[:2], 16, 32)
if err != nil {
return Color{}, err
}
g, err := strconv.ParseInt(hex[2:4], 16, 32)
if err != nil {
return Color{}, err
}
b, err := strconv.ParseInt(hex[4:6], 16, 32)
if err != nil {
return Color{}, err
}
return Color{
R: int(r),
G: int(g),
B: int(b),
A: 0xFF,
}, nil
}
// Rect container for rectangle
type Rect struct {
X int
Y int
W int
H int
}
// Scale returns a new scaled Rect
func (r *Rect) Scale(s float64) Rect {
return Rect{
X: int(float64(r.X) * s),
Y: int(float64(r.Y) * s),
W: int(float64(r.W) * s),
H: int(float64(r.H) * s),
}
}
// LocalInside tests to see if rect is inside of this rect, assumes test rect has coordinates local to this rect
func (r *Rect) LocalInside(testRect Rect) bool {
if testRect.X > r.W {
return false
}
if testRect.Y > r.H {
return false
}
if (testRect.X + testRect.W) < 0 {
return false
}
if (testRect.Y + testRect.H) < 0 {
return false
}
return true
}
// ScaleRects returns a new slice of scaled Rects
func ScaleRects(rects []Rect, s float64) []Rect {
var scaledRects []Rect
for _, r := range rects {
scaledRects = append(scaledRects, r.Scale(s))
}
return scaledRects
}
// Font describes a ttf font
type Font interface {
Path() string
Size() int
}
// Image describes an image
type Image interface {
Path() string
Width() int
Height() int
}
// Equals test to see if two rectangles occupy the same location exactly
func (r *Rect) Equals(otherRect Rect) bool {
return (r.X == otherRect.X &&
r.Y == otherRect.Y &&
r.W == otherRect.W &&
r.H == otherRect.H)
}
// Intersects check to see if a rectangle is inside of this rectangle
func (r *Rect) Intersects(otherRect Rect) bool {
return (int(math.Abs(float64(r.X-otherRect.X)))*2 < (r.W + otherRect.W)) &&
(int(math.Abs(float64(r.Y-otherRect.Y)))*2 < (r.H + otherRect.H))
}
// Inside check to see if a point inside of this rectangle
func (r *Rect) Inside(p Point) bool {
return (p.X > r.X) && (p.X < (r.X + r.W)) && (p.Y > r.Y) && (p.Y < (r.Y + r.H))
} | eff.go | 0.886936 | 0.461866 | eff.go | starcoder |
package msgraph
// RatingFranceMoviesType undocumented
type RatingFranceMoviesType int
const (
// RatingFranceMoviesTypeVAllAllowed undocumented
RatingFranceMoviesTypeVAllAllowed RatingFranceMoviesType = 0
// RatingFranceMoviesTypeVAllBlocked undocumented
RatingFranceMoviesTypeVAllBlocked RatingFranceMoviesType = 1
// RatingFranceMoviesTypeVAgesAbove10 undocumented
RatingFranceMoviesTypeVAgesAbove10 RatingFranceMoviesType = 2
// RatingFranceMoviesTypeVAgesAbove12 undocumented
RatingFranceMoviesTypeVAgesAbove12 RatingFranceMoviesType = 3
// RatingFranceMoviesTypeVAgesAbove16 undocumented
RatingFranceMoviesTypeVAgesAbove16 RatingFranceMoviesType = 4
// RatingFranceMoviesTypeVAgesAbove18 undocumented
RatingFranceMoviesTypeVAgesAbove18 RatingFranceMoviesType = 5
)
// RatingFranceMoviesTypePAllAllowed returns a pointer to RatingFranceMoviesTypeVAllAllowed
func RatingFranceMoviesTypePAllAllowed() *RatingFranceMoviesType {
v := RatingFranceMoviesTypeVAllAllowed
return &v
}
// RatingFranceMoviesTypePAllBlocked returns a pointer to RatingFranceMoviesTypeVAllBlocked
func RatingFranceMoviesTypePAllBlocked() *RatingFranceMoviesType {
v := RatingFranceMoviesTypeVAllBlocked
return &v
}
// RatingFranceMoviesTypePAgesAbove10 returns a pointer to RatingFranceMoviesTypeVAgesAbove10
func RatingFranceMoviesTypePAgesAbove10() *RatingFranceMoviesType {
v := RatingFranceMoviesTypeVAgesAbove10
return &v
}
// RatingFranceMoviesTypePAgesAbove12 returns a pointer to RatingFranceMoviesTypeVAgesAbove12
func RatingFranceMoviesTypePAgesAbove12() *RatingFranceMoviesType {
v := RatingFranceMoviesTypeVAgesAbove12
return &v
}
// RatingFranceMoviesTypePAgesAbove16 returns a pointer to RatingFranceMoviesTypeVAgesAbove16
func RatingFranceMoviesTypePAgesAbove16() *RatingFranceMoviesType {
v := RatingFranceMoviesTypeVAgesAbove16
return &v
}
// RatingFranceMoviesTypePAgesAbove18 returns a pointer to RatingFranceMoviesTypeVAgesAbove18
func RatingFranceMoviesTypePAgesAbove18() *RatingFranceMoviesType {
v := RatingFranceMoviesTypeVAgesAbove18
return &v
} | v1.0/RatingFranceMoviesTypeEnum.go | 0.582016 | 0.499146 | RatingFranceMoviesTypeEnum.go | starcoder |
package cmd
var example = ` The following examples make use of a basic.json file which contains JSON
logs where each log line is a single JSON object. You can find the
basic.json file in ./cmd/fixture/ along with other test fixtures used for
golden file tests. Further note that you can configure flag defaults via a
config file.
~/.config/gg/config.yaml
Config file defaults are supported for the following flags.
-c/--colour
-g/--group
-t/--time
Select all logs where any key matches "obj" and its associated value matches
"qihx8". This can be used to e.g. grep for all logs related to Tenant
Cluster "qihx8".
cat basic.json | gg -s obj:qihx8
Select all logs like the example above but on top of that filter also for
logs where any key matches "res" and its associated value matches "dra".
This can be used to e.g. grep for all logs of the "drainer" and
"drainfinisher" resource implementation.
cat basic.json | gg -s obj:qihx8 -s res:dra
Select all logs like the example above but on top of that only output
key-value pairs of the logs where any key matches "ti" or "mes". This can be
used to e.g. show only "time" and "message". Note that the order of fields
given determines the output order. Here "ti,mes" makes it way easier to read
the output since "time" is always consistently formatted, whereas "message"
can be of almost arbitrary length.
cat basic.json | gg -s obj:qihx8 -s res:dra -f tim,mes
Select all logs like the example above but on top of that group output
key-value pairs of the logs based on the common value of associated keys
matching "lo". This can be used to e.g. group resource logs by their
reconciliation "loop".
cat basic.json | gg -s obj:qihx8 -s res:dra -f tim,mes -g loo
Select all error logs and display their caller and stack.
cat basic.json | gg -s lev:err -f cal,sta
Select all logs of two different resource handlers. This is to show the
s3object and tccpn resource handler logs together.
cat basic.json | gg -s obj:8ztu4 -s con:plane -s res:s3obj -s res:tccpn\$ -f res,mes -g loo
Display log messages of drainer resources and format their timestamp using
to the given format.
cat basic.json | gg -s obj:qihx8 -s res:dra -f res,mes,tim -t 15:04:05` | cmd/example.go | 0.687945 | 0.525978 | example.go | starcoder |
package retrieval
import (
"context"
"math"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
promlabels "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/tsdb/labels"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
)
// CounterAggregator provides the 'aggregated counters' feature of the sidecar.
// It can be used to export a sum of multiple counters from Prometheus to
// Stackdriver as a single cumulative metric.
// Each aggregated counter is associated with a single OpenCensus counter that
// can then be exported to Stackdriver (as a CUMULATIVE metric) or exposed to
// Prometheus via the standard `/metrics` endpoint. Regular flushing of counter
// values is implemented by OpenCensus.
type CounterAggregator struct {
logger log.Logger
counters []*aggregatedCounter
statsRecord func(context.Context, ...stats.Measurement) // used in testing.
}
// aggregatedCounter is where CounterAggregator keeps internal state about each
// exported metric: OpenCensus measure and view as well as a list of Matchers that
// define which Prometheus metrics will get aggregated.
type aggregatedCounter struct {
measure *stats.Float64Measure
view *view.View
matchers [][]*promlabels.Matcher
}
// CounterAggregatorConfig contains configuration for CounterAggregator. Keys of the map
// are metric names that will be exported by counter aggregator.
type CounterAggregatorConfig map[string]*CounterAggregatorMetricConfig
// CounterAggregatorMetricConfig provides configuration of a single aggregated counter.
// Matchers specify what Prometheus metrics (which are expected to be counter metrics) will
// be re-aggregated. Help provides a description for the exported metric.
type CounterAggregatorMetricConfig struct {
Matchers [][]*promlabels.Matcher
Help string
}
// counterTracker keeps track of a single time series that has at least one aggregated
// counter associated with it (i.e. there is at least one aggregated counter that has
// Matchers covering this time series). Last timestamp and value are tracked
// to detect counter resets.
type counterTracker struct {
lastTimestamp int64
lastValue float64
measures []*stats.Float64Measure
ca *CounterAggregator
}
// NewCounterAggregator creates a counter aggregator.
func NewCounterAggregator(logger log.Logger, config *CounterAggregatorConfig) (*CounterAggregator, error) {
aggregator := &CounterAggregator{logger: logger, statsRecord: stats.Record}
for metric, cfg := range *config {
measure := stats.Float64(metric, cfg.Help, stats.UnitDimensionless)
v := &view.View{
Name: metric,
Description: cfg.Help,
Measure: measure,
Aggregation: view.Sum(),
}
if err := view.Register(v); err != nil {
return nil, err
}
aggregator.counters = append(aggregator.counters, &aggregatedCounter{
measure: measure,
view: v,
matchers: cfg.Matchers,
})
}
return aggregator, nil
}
// Close must be called when CounterAggregator is no longer needed.
func (c *CounterAggregator) Close() {
for _, counter := range c.counters {
view.Unregister(counter.view)
}
}
// getTracker returns a counterTracker for a specific time series defined by labelset.
// If `nil` is returned, it means that there are no aggregated counters that need to
// be incremented for this time series.
func (c *CounterAggregator) getTracker(lset labels.Labels) *counterTracker {
var measures []*stats.Float64Measure
for _, counter := range c.counters {
if matchFiltersets(lset, counter.matchers) {
measures = append(measures, counter.measure)
}
}
if len(measures) == 0 {
return nil
}
return &counterTracker{measures: measures, ca: c}
}
// newPoint gets called on each new sample (timestamp, value) for time series that need to feed
// values into aggregated counters.
func (t *counterTracker) newPoint(ctx context.Context, lset labels.Labels, ts int64, v float64) {
if math.IsNaN(v) {
level.Debug(t.ca.logger).Log("msg", "got NaN value", "labels", lset, "last ts", t.lastTimestamp, "ts", t, "lastValue", t.lastValue)
return
}
// Ignore measurements that are earlier than last seen timestamp, since they are already covered by
// later values. Samples are coming from TSDB in order, so this is unlikely to happen.
if ts < t.lastTimestamp {
level.Debug(t.ca.logger).Log("msg", "out of order timestamp", "labels", lset, "last ts", t.lastTimestamp, "ts", ts)
return
}
// Use the first value we see as the starting point for the counter.
if t.lastTimestamp == 0 {
level.Debug(t.ca.logger).Log("msg", "first point", "labels", lset)
t.lastTimestamp = ts
t.lastValue = v
return
}
var delta float64
if v < t.lastValue {
// Counter was reset.
delta = v
level.Debug(t.ca.logger).Log("msg", "counter reset", "labels", lset, "value", v, "lastValue", t.lastValue, "delta", delta)
} else {
delta = v - t.lastValue
level.Debug(t.ca.logger).Log("msg", "got delta", "labels", lset, "value", v, "lastValue", t.lastValue, "delta", delta)
}
t.lastTimestamp = ts
t.lastValue = v
if delta == 0 {
return
}
ms := make([]stats.Measurement, len(t.measures))
for i, measure := range t.measures {
ms[i] = measure.M(delta)
}
t.ca.statsRecord(ctx, ms...)
} | retrieval/aggregator.go | 0.742795 | 0.456652 | aggregator.go | starcoder |
package actionlint
import (
"fmt"
"strconv"
"strings"
"gopkg.in/yaml.v3"
)
// Pos represents position in the file.
type Pos struct {
// Line is a line number of the position. This value is 1-based.
Line int
// Col is a column number of the position. This value is 1-based.
Col int
}
func (p *Pos) String() string {
return fmt.Sprintf("line:%d,col:%d", p.Line, p.Col)
}
// String represents generic string value in YAML file with position.
type String struct {
// Value is a raw value of the string.
Value string
// Quoted represents the string is quoted with ' or " in the YAML source.
Quoted bool
// Pos is a position of the string in source.
Pos *Pos
}
// Bool represents generic boolean value in YAML file with position.
type Bool struct {
// Value is a raw value of the bool string.
Value bool
// Expression is a string when expression syntax ${{ }} is used for this section.
Expression *String
// Pos is a position in source.
Pos *Pos
}
// Int represents generic integer value in YAML file with position.
type Int struct {
// Value is a raw value of the integer string.
Value int
// Expression is a string when expression syntax ${{ }} is used for this section.
Expression *String
// Pos is a position in source.
Pos *Pos
}
// Float represents generic float value in YAML file with position.
type Float struct {
// Value is a raw value of the float string.
Value float64
// Expression is a string when expression syntax ${{ }} is used for this section.
Expression *String
// Pos is a position in source.
Pos *Pos
}
// Event interface represents workflow events in 'on' section
type Event interface {
// EventName returns name of the event to trigger this workflow.
EventName() string
}
// WebhookEvent represents event type based on webhook events.
// Some events can't have 'types' field. Only 'push' and 'pull' events can have 'tags', 'tags-ignore',
// 'paths' and 'paths-ignore' fields. Only 'workflow_run' event can have 'workflows' field.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onevent_nametypes
type WebhookEvent struct {
// Hook is a name of the webhook event.
Hook *String
// Types is list of types of the webhook event. Only the types enumerated here will trigger
// the workflow.
Types []*String
// Branches is list of branch filters to choose branches.
Branches []*String
// BranchesIgnore is list of branch filters to reject some branches.
BranchesIgnore []*String
// Tags is list of tag filters to choose tags.
Tags []*String
// TagsIgnore is list of tag filters to reject some tags.
TagsIgnore []*String
// Paths is list of path filters to choose file paths.
Paths []*String
// PathsIgnore is list of path filters to reject some file paths.
PathsIgnore []*String
// Workflows is list of workflow names which are triggered by 'workflow_run' event.
Workflows []*String
// Pos is a position in source.
Pos *Pos
}
// EventName returns name of the event to trigger this workflow.
func (e *WebhookEvent) EventName() string {
return e.Hook.Value
}
// ScheduledEvent is event scheduled by workflow.
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#scheduled-events
type ScheduledEvent struct {
// Cron is list of cron strings which schedules workflow.
Cron []*String
// Pos is a position in source.
Pos *Pos
}
// EventName returns name of the event to trigger this workflow.
func (e *ScheduledEvent) EventName() string {
return "schedule"
}
// DispatchInput is input specified on dispatching workflow manually.
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#workflow_dispatch
type DispatchInput struct {
// Name is a name of input value specified on dispatching workflow manually.
Name *String
// Description is a description of input value specified on dispatching workflow manually.
Description *String
// Required is a flag to show if this input is mandatory or not on dispatching workflow manually.
Required *Bool
// Default is a default value of input value on dispatching workflow manually.
Default *String
}
// WorkflowDispatchEvent is event on dispatching workflow manually.
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#workflow_dispatch
type WorkflowDispatchEvent struct {
// Inputs is map from input names to input attributes.
Inputs map[string]*DispatchInput
// Pos is a position in source.
Pos *Pos
}
// EventName returns name of the event to trigger this workflow.
func (e *WorkflowDispatchEvent) EventName() string {
return "workflow_dispatch"
}
// RepositoryDispatchEvent is repository_dispatch event configuration.
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#repository_dispatch
type RepositoryDispatchEvent struct {
// Types is list of types which can trigger workflow.
Types []*String
// Pos is a position in source.
Pos *Pos
}
// EventName returns name of the event to trigger this workflow.
func (e *RepositoryDispatchEvent) EventName() string {
return "repository_dispatch"
}
// WorkflowCallEventInputType is a type of inputs at workflow_call event.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callinput_idtype
type WorkflowCallEventInputType uint8
const (
// WorkflowCallEventInputTypeInvalid represents invalid type input as default value of the type.
WorkflowCallEventInputTypeInvalid WorkflowCallEventInputType = iota
// WorkflowCallEventInputTypeBoolean represents boolean type input.
WorkflowCallEventInputTypeBoolean
// WorkflowCallEventInputTypeNumber represents number type input.
WorkflowCallEventInputTypeNumber
// WorkflowCallEventInputTypeString represents string type input.
WorkflowCallEventInputTypeString
)
// WorkflowCallEventInput is an input configuration of workflow_call event.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callinputs
type WorkflowCallEventInput struct {
// Description is a description of the input.
Description *String
// Default is a default value of the input. Nil means no default value.
Default *String
// Required represents if the input is required or optional. When this value is nil, it means optional.
Required *Bool
// Type of the input, which must be one of 'boolean', 'number' or 'string'. This property is required.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callinput_idtype
Type WorkflowCallEventInputType
}
// WorkflowCallEventSecret is a secret configuration of workflow_call event.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callsecrets
type WorkflowCallEventSecret struct {
// Description is a description of the secret.
Description *String
// Required represents if the secret is required or optional. When this value is nil, it means optional.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callsecretssecret_idrequired
Required *Bool
}
// WorkflowCallEvent is workflow_call event configuration.
// https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#workflow-reuse-events
type WorkflowCallEvent struct {
// Inputs is a map from input name to input configuration.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callinputs
Inputs map[*String]*WorkflowCallEventInput
// Secrets is a map from name of secret to secret configuration.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callsecrets
Secrets map[*String]*WorkflowCallEventSecret
// Pos is a position in source.
Pos *Pos
}
// EventName returns name of the event to trigger this workflow.
func (e *WorkflowCallEvent) EventName() string {
return "workflow_call"
}
// PermissionScope is struct for respective permission scope like "issues", "checks", ...
// https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token
type PermissionScope struct {
// Name is name of the scope.
Name *String
// Value is permission value of the scope.
Value *String
}
// Permissions is set of permission configurations in workflow file. All permissions can be set at
// once. Or each permission can be configured respectively.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#permissions
type Permissions struct {
// All represents a permission value for all the scopes at once.
All *String
// Scopes is mappings from scope name to its permission configuration
Scopes map[string]*PermissionScope
// Pos is a position in source.
Pos *Pos
}
// DefaultsRun is configuration that shell is how to be run.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#defaultsrun
type DefaultsRun struct {
// Shell is shell name to be run.
Shell *String
// WorkingDirectory is a default working directory path.
WorkingDirectory *String
// Pos is a position in source.
Pos *Pos
}
// Defaults is set of default configurations to run shell.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#defaults
type Defaults struct {
// Run is configuration of how to run shell.
Run *DefaultsRun
// Pos is a position in source.
Pos *Pos
}
// Concurrency is a configuration of concurrency of the workflow.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#concurrency
type Concurrency struct {
// Group is name of the concurrency group.
Group *String
// CancelInProgress is a flag that shows if canceling this workflow cancels other jobs in progress.
CancelInProgress *Bool
// Pos is a position in source.
Pos *Pos
}
// Environment is a configuration of environment.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idenvironment
type Environment struct {
// Name is a name of environment which the workflow uses.
Name *String
// URL is the URL mapped to 'environment_url' in the deployments API. Empty value means no value was specified.
URL *String
// Pos is a position in source.
Pos *Pos
}
// ExecKind is kind of how the step is executed. A step runs some action or runs some shell script.
type ExecKind uint8
const (
// ExecKindAction is kind for step to run action
ExecKindAction ExecKind = iota
// ExecKindRun is kind for step to run shell script
ExecKindRun
)
// Exec is an interface how the step is executed. Step in workflow runs either an action or a script
type Exec interface {
// Kind returns kind of the step execution.
Kind() ExecKind
// SetWorkingDir sets working-directory section.
SetWorkingDir(d *String)
}
// ExecRun is configuration how to run shell script at the step.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsrun
type ExecRun struct {
// Run is script to run.
Run *String
// Shell represents optional 'shell' field. Nil means nothing specified.
Shell *String
// WorkingDirectory represents optional 'working-directory' field. Nil means nothing specified.
WorkingDirectory *String
// RunPos is position of 'run' section
RunPos *Pos
}
// Kind returns kind of the step execution.
func (e *ExecRun) Kind() ExecKind {
return ExecKindRun
}
// SetWorkingDir sets working-directory section.
func (e *ExecRun) SetWorkingDir(dir *String) {
e.WorkingDirectory = dir
}
// Input is an input field for running an action.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepswith
type Input struct {
// Name is a name of the input.
Name *String
// Value is a value of the input.
Value *String
}
// ExecAction is configuration how to run action at the step.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsuses
type ExecAction struct {
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsuses
Uses *String
// Inputs represents inputs to the action to execute in 'with' section
Inputs map[string]*Input
// Entrypoint represents optional 'entrypoint' field in 'with' section. Nil field means nothing specified
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepswithentrypoint
Entrypoint *String
// Args represents optional 'args' field in 'with' section. Nil field means nothing specified
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepswithargs
Args *String
// WorkingDirectory is a default working directory path to run action
WorkingDirectory *String
}
// Kind returns kind of the step execution.
func (e *ExecAction) Kind() ExecKind {
return ExecKindAction
}
// SetWorkingDir sets working-directory section.
func (e *ExecAction) SetWorkingDir(dir *String) {
e.WorkingDirectory = dir
}
// RawYAMLValueKind is kind of raw YAML values
type RawYAMLValueKind int
const (
// RawYAMLValueKindObject is kind for an object value of raw YAML value.
RawYAMLValueKindObject = RawYAMLValueKind(yaml.MappingNode)
// RawYAMLValueKindArray is kind for an array value of raw YAML value.
RawYAMLValueKindArray = RawYAMLValueKind(yaml.SequenceNode)
// RawYAMLValueKindString is kind for a string value of raw YAML value.
RawYAMLValueKindString = RawYAMLValueKind(yaml.ScalarNode)
)
// RawYAMLValue is a value at matrix variation. Any value can be put at matrix variations
// including mappings and arrays.
type RawYAMLValue interface {
// Kind returns kind of raw YAML value.
Kind() RawYAMLValueKind
// Equals returns if the other value is equal to the value.
Equals(other RawYAMLValue) bool
// Pos returns the start position of the value in the source file
Pos() *Pos
// String returns string representation of the value
String() string
}
// RawYAMLObject is raw YAML mapping value.
type RawYAMLObject struct {
// Props is map from property names to their values.
Props map[string]RawYAMLValue
pos *Pos
}
// Kind returns kind of raw YAML value.
func (o *RawYAMLObject) Kind() RawYAMLValueKind {
return RawYAMLValueKindObject
}
// Equals returns if the other value is equal to the value.
func (o *RawYAMLObject) Equals(other RawYAMLValue) bool {
switch other := other.(type) {
case *RawYAMLObject:
for n, p1 := range o.Props {
if p2, ok := other.Props[n]; !ok || !p1.Equals(p2) {
return false
}
}
return true
default:
return false
}
}
// Pos returns the start position of the value in the source file
func (o *RawYAMLObject) Pos() *Pos {
return o.pos
}
func (o *RawYAMLObject) String() string {
qs := make([]string, 0, len(o.Props))
for n, p := range o.Props {
qs = append(qs, fmt.Sprintf("%q: %s", n, p.String()))
}
return fmt.Sprintf("{%s}", strings.Join(qs, ", "))
}
// RawYAMLArray is raw YAML sequence value.
type RawYAMLArray struct {
// Elems is list of elements of the array value.
Elems []RawYAMLValue
pos *Pos
}
// Kind returns kind of raw YAML value.
func (a *RawYAMLArray) Kind() RawYAMLValueKind {
return RawYAMLValueKindArray
}
// Equals returns if the other value is equal to the value.
func (a *RawYAMLArray) Equals(other RawYAMLValue) bool {
switch other := other.(type) {
case *RawYAMLArray:
if len(a.Elems) != len(other.Elems) {
return false
}
for i, e1 := range a.Elems {
if !e1.Equals(other.Elems[i]) {
return false
}
}
return true
default:
return false
}
}
// Pos returns the start position of the value in the source file
func (a *RawYAMLArray) Pos() *Pos {
return a.pos
}
func (a *RawYAMLArray) String() string {
qs := make([]string, 0, len(a.Elems))
for _, v := range a.Elems {
qs = append(qs, v.String())
}
return fmt.Sprintf("[%s]", strings.Join(qs, ", "))
}
// RawYAMLString is raw YAML scalar value.
type RawYAMLString struct {
// Note: Might be useful to add kind to check the string value is int/float/bool/null.
// Value is string representation of the scalar node.
Value string
pos *Pos
}
// Kind returns kind of raw YAML value.
func (s *RawYAMLString) Kind() RawYAMLValueKind {
return RawYAMLValueKindString
}
// Equals returns if the other value is equal to the value.
func (s *RawYAMLString) Equals(other RawYAMLValue) bool {
switch other := other.(type) {
case *RawYAMLString:
return s.Value == other.Value
default:
return false
}
}
// Pos returns the start position of the value in the source file
func (s *RawYAMLString) Pos() *Pos {
return s.pos
}
func (s *RawYAMLString) String() string {
return strconv.Quote(s.Value)
}
// MatrixRow is one row of matrix. One matrix row can take multiple values. Those variations are
// stored as row of values in this struct.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymatrix
type MatrixRow struct {
// Name is a name of matrix value.
Name *String
// Values is variations of values which the matrix value can take.
Values []RawYAMLValue
// Expression is a string when expression syntax ${{ }} is used for this section.
Expression *String
}
// MatrixAssign represents which value should be taken in the row of the matrix.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymatrix
type MatrixAssign struct {
// Key is a name of the matrix value.
Key *String
// Value is the value selected from values in row.
Value RawYAMLValue
}
// MatrixCombination is combination of matrix value assignments to define one of matrix variations.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymatrix
type MatrixCombination struct {
Assigns map[string]*MatrixAssign
// Expression is a string when expression syntax ${{ }} is used for this section.
Expression *String
}
// MatrixCombinations is list of combinations of matrix assignments used for 'include' and 'exclude'
// sections.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymatrix
type MatrixCombinations struct {
Combinations []*MatrixCombination
// Expression is a string when expression syntax ${{ }} is used for this section.
Expression *String
}
// ContainsExpression returns if the combinations section includes at least one expression node.
func (cs *MatrixCombinations) ContainsExpression() bool {
if cs.Expression != nil {
return true
}
for _, c := range cs.Combinations {
if c.Expression != nil {
return true
}
}
return false
}
// Matrix is matrix variations configuration of a job.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymatrix
type Matrix struct {
// Values stores mappings from name to values.
Rows map[string]*MatrixRow
// Include is list of combinations of matrix values and additional values on running matrix combinations.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#example-including-additional-values-into-combinations
Include *MatrixCombinations
// Exclude is list of combinations of matrix values which should not be run. Combinations in
// this list will be removed from combinations of matrix to run.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#example-excluding-configurations-from-a-matrix
Exclude *MatrixCombinations
// Expression is a string when expression syntax ${{ }} is used for this section.
Expression *String
// Pos is a position in source.
Pos *Pos
}
// Strategy is strategy configuration of how the job is run.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategy
type Strategy struct {
// Matrix is matrix of combinations of values. Each combination will run the job once.
Matrix *Matrix
// FailFast is flag to show if other jobs should stop when one job fails.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategyfail-fast
FailFast *Bool
// MaxParallel is how many jobs should be run at once.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymax-parallel
MaxParallel *Int
// Pos is a position in source.
Pos *Pos
}
// EnvVar represents key-value of environment variable setup.
type EnvVar struct {
// Name is name of the environment variable.
Name *String
// Value is string value of the environment variable.
Value *String
}
// Env represents set of environment variables.
type Env struct {
// Vars is mapping from env var name to env var value.
Vars map[string]*EnvVar
// Expression is an expression string which contains ${{ ... }}. When this value is not empty,
// Vars should be nil.
Expression *String
}
// Step is step configuration. Step runs one action or one shell script.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idsteps
type Step struct {
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsid
ID *String
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsif
If *String
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsname
Name *String
Exec Exec
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsenv
Env *Env
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepscontinue-on-error
ContinueOnError *Bool
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepstimeout-minutes
TimeoutMinutes *Float
// Pos is a position in source.
Pos *Pos
}
// Credentials is credentials configuration.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainercredentials
type Credentials struct {
// Username is username for authentication.
Username *String
// Password is password for authentication.
Password *String
// Pos is a position in source.
Pos *Pos
}
// Container is configuration of how to run the container.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainer
type Container struct {
// Image is specification of Docker image.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainerimage
Image *String
// Credentials is credentials configuration of the Docker container.
Credentials *Credentials
// Env is environment variables setup in the container.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainerenv
Env *Env
// Ports is list of port number mappings of the container.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainerports
Ports []*String
// Volumes are list of volumes to be mounted to the container.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainervolumes
Volumes []*String
// Options is options string to run the container.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontaineroptions
Options *String
// Pos is a position in source.
Pos *Pos
}
// Service is configuration to run a service like database.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idservices
type Service struct {
// Name is name of the service.
Name *String
// Container is configuration of container which runs the service.
Container *Container
}
// Output is output entry of the job.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idoutputs
type Output struct {
// Name is name of output.
Name *String
// Value is value of output.
Value *String
}
// Runner is struct for runner configuration.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idruns-on
type Runner struct {
// Labels is list label names to select a runner to run a job. There are preset labels and user
// defined labels. Runner matching to the labels is selected.
Labels []*String
}
// WorkflowCallInput is a normal input for workflow call.
type WorkflowCallInput struct {
// Name is a name of the input.
Name *String
// Value is a value of the input.
Value *String
}
// WorkflowCallSecret is a secret input for workflow call.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idwith
type WorkflowCallSecret struct {
// Name is a name of the secret
Name *String
// Value is a value of the secret
Value *String
}
// WorkflowCall is a struct to represent workflow call at jobs.<job_id>.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_iduses
type WorkflowCall struct {
// Uses is a workflow specification to be called. This field is mandatory.
Uses *String
// Inputs is a map from input name to input value at 'with:'.
Inputs map[string]*WorkflowCallInput
// Secrets is a map from secret name to secret value at 'secrets:'.
Secrets map[string]*WorkflowCallSecret
}
// Job is configuration of how to run a job.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobs
type Job struct {
// ID is an ID of the job, which is key of job configuration objects.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_id
ID *String
// Name is a name of job that user can specify freely.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idname
Name *String
// Needs is list of job IDs which should be run before running this job.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idneeds
Needs []*String
// RunsOn is runner configuration which run the job.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idruns-on
RunsOn *Runner
// Permissions is permission configuration for running the job.
Permissions *Permissions
// Environment is environment specification where the job runs.
Environment *Environment
// Concurrency is concurrency configuration on running the job.
Concurrency *Concurrency
// Outputs is map from output name to output specifications.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idoutputs
Outputs map[string]*Output
// Env is environment variables setup while running the job.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idenv
Env *Env
// Defaults is default configurations of how to run scripts.
Defaults *Defaults
// If is a condition whether this job should be run.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idif
If *String
// Steps is list of steps to be run in the job.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idsteps
Steps []*Step
// TimeoutMinutes is timeout value of running the job in minutes.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes
TimeoutMinutes *Float
// Strategy is strategy configuration of running the job.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategy
Strategy *Strategy
// ContinueOnError is a flag to show if execution should continue on error.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontinue-on-error
ContinueOnError *Bool
// Container is container configuration to run the job.
Container *Container
// Services is map from service names to service configurations.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idservices
Services map[string]*Service
// WorkflowCall is a workflow call by 'uses:'.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_iduses
WorkflowCall *WorkflowCall
// Pos is a position in source.
Pos *Pos
}
// Workflow is root of workflow syntax tree, which represents one workflow configuration file.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions
type Workflow struct {
// Name is name of the workflow. This field can be nil when user didn't specify the name explicitly.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#name
Name *String
// On is list of events which can trigger this workflow.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onpushpull_requestbranchestags
On []Event
// Permissions is configuration of permissions of this workflow.
Permissions *Permissions
// Env is a default set of environment variables while running this workflow.
// https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#env
Env *Env
// Defaults is default configuration of how to run scripts.
Defaults *Defaults
// Concurrency is concurrency configuration of entire workflow. Each jobs also can their own
// concurrency configurations.
Concurrency *Concurrency
// Jobs is mappings from job ID to the job object
Jobs map[string]*Job
} | ast.go | 0.778313 | 0.437643 | ast.go | starcoder |
package policies
import (
"fmt"
cb "github.com/hyperledger/fabric-protos-go/common"
"github.com/pkg/errors"
)
// remap explores the policy tree depth first and remaps the "signed by"
// entries according to the remapping rules; a "signed by" rule requires
// a signature from a principal given its position in the array of principals;
// the idRemap map tells us how to remap these integers given that merging two
// policies implies deduplicating their principals
func remap(sp *cb.SignaturePolicy, idRemap map[int]int) *cb.SignaturePolicy {
switch t := sp.Type.(type) {
case *cb.SignaturePolicy_NOutOf_:
rules := []*cb.SignaturePolicy{}
for _, rule := range t.NOutOf.Rules {
// here we call remap again - we're doing a
// depth-first traversal of this policy tree
rules = append(rules, remap(rule, idRemap))
}
return &cb.SignaturePolicy{
Type: &cb.SignaturePolicy_NOutOf_{
NOutOf: &cb.SignaturePolicy_NOutOf{
N: t.NOutOf.N,
Rules: rules,
},
},
}
case *cb.SignaturePolicy_SignedBy:
// here we do the actual remapping because we have
// the "signed by" rule, whose reference to the
// principal we need to remap
newID, in := idRemap[int(t.SignedBy)]
if !in {
panic("programming error")
}
return &cb.SignaturePolicy{
Type: &cb.SignaturePolicy_SignedBy{
SignedBy: int32(newID),
},
}
default:
panic(fmt.Sprintf("invalid policy type %T", t))
}
}
// merge integrates the policy `that` into the
// policy `this`. The first argument is changed
// whereas the second isn't
func merge(this *cb.SignaturePolicyEnvelope, that *cb.SignaturePolicyEnvelope) {
// at first we build a map of principals in `this`
IDs := this.Identities
idMap := map[string]int{}
for i, id := range this.Identities {
str := id.PrincipalClassification.String() + string(id.Principal)
idMap[str] = i
}
// then we traverse each of the principals in `that`,
// deduplicate them against the ones in `this` and
// create remapping rules so that if `that` references
// a duplicate policy in this, the merged policy will
// ensure that the references in `that` point to the
// correct principal
idRemap := map[int]int{}
for i, id := range that.Identities {
str := id.PrincipalClassification.String() + string(id.Principal)
if j, in := idMap[str]; in {
idRemap[i] = j
} else {
idRemap[i] = len(IDs)
idMap[str] = len(IDs)
IDs = append(IDs, id)
}
}
this.Identities = IDs
newEntry := remap(that.Rule, idRemap)
existingRules := this.Rule.Type.(*cb.SignaturePolicy_NOutOf_).NOutOf.Rules
this.Rule.Type.(*cb.SignaturePolicy_NOutOf_).NOutOf.Rules = append(existingRules, newEntry)
}
// Convert implements the policies.Converter function to
// convert an implicit meta policy into a signature policy envelope.
func (p *ImplicitMetaPolicy) Convert() (*cb.SignaturePolicyEnvelope, error) {
converted := &cb.SignaturePolicyEnvelope{
Version: 0,
Rule: &cb.SignaturePolicy{
Type: &cb.SignaturePolicy_NOutOf_{
NOutOf: &cb.SignaturePolicy_NOutOf{
N: int32(p.Threshold),
},
},
},
}
// the conversion approach for an implicit meta
// policy is to convert each of the subpolicies,
// merge it with the previous one and return the
// merged policy
for i, subPolicy := range p.SubPolicies {
convertibleSubpolicy, ok := subPolicy.(Converter)
if !ok {
return nil, errors.Errorf("subpolicy number %d type %T of policy %s is not convertible", i, subPolicy, p.SubPolicyName)
}
spe, err := convertibleSubpolicy.Convert()
if err != nil {
return nil, errors.WithMessagef(err, "failed to convert subpolicy number %d of policy %s", i, p.SubPolicyName)
}
merge(converted, spe)
}
return converted, nil
} | common/policies/convert.go | 0.722135 | 0.446133 | convert.go | starcoder |
package benchmark
import (
"reflect"
"testing"
)
func isBoolToInt16FuncCalibrated(supplier func() bool) bool {
return isCalibrated(reflect.Bool, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isIntToInt16FuncCalibrated(supplier func() int) bool {
return isCalibrated(reflect.Int, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isInt8ToInt16FuncCalibrated(supplier func() int8) bool {
return isCalibrated(reflect.Int8, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isInt16ToInt16FuncCalibrated(supplier func() int16) bool {
return isCalibrated(reflect.Int16, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isInt32ToInt16FuncCalibrated(supplier func() int32) bool {
return isCalibrated(reflect.Int32, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isInt64ToInt16FuncCalibrated(supplier func() int64) bool {
return isCalibrated(reflect.Int64, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isUintToInt16FuncCalibrated(supplier func() uint) bool {
return isCalibrated(reflect.Uint, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isUint8ToInt16FuncCalibrated(supplier func() uint8) bool {
return isCalibrated(reflect.Uint8, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isUint16ToInt16FuncCalibrated(supplier func() uint16) bool {
return isCalibrated(reflect.Uint16, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isUint32ToInt16FuncCalibrated(supplier func() uint32) bool {
return isCalibrated(reflect.Uint32, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func isUint64ToInt16FuncCalibrated(supplier func() uint64) bool {
return isCalibrated(reflect.Uint64, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setBoolToInt16FuncCalibrated(supplier func() bool) {
setCalibrated(reflect.Bool, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setIntToInt16FuncCalibrated(supplier func() int) {
setCalibrated(reflect.Int, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setInt8ToInt16FuncCalibrated(supplier func() int8) {
setCalibrated(reflect.Int8, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setInt16ToInt16FuncCalibrated(supplier func() int16) {
setCalibrated(reflect.Int16, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setInt32ToInt16FuncCalibrated(supplier func() int32) {
setCalibrated(reflect.Int32, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setInt64ToInt16FuncCalibrated(supplier func() int64) {
setCalibrated(reflect.Int64, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setUintToInt16FuncCalibrated(supplier func() uint) {
setCalibrated(reflect.Uint, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setUint8ToInt16FuncCalibrated(supplier func() uint8) {
setCalibrated(reflect.Uint8, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setUint16ToInt16FuncCalibrated(supplier func() uint16) {
setCalibrated(reflect.Uint16, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setUint32ToInt16FuncCalibrated(supplier func() uint32) {
setCalibrated(reflect.Uint32, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
func setUint64ToInt16FuncCalibrated(supplier func() uint64) {
setCalibrated(reflect.Uint64, reflect.Int16, reflect.ValueOf(supplier).Pointer())
}
// BoolToInt16Func benchmarks a function with the signature:
// func(bool) int16
// ID: B-4-1
func BoolToInt16Func(b *testing.B, supplier func() bool, toInt16Func func(bool) int16) {
if !isBoolSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isBoolToInt16FuncCalibrated(supplier) {
panic("BoolToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// IntToInt16Func benchmarks a function with the signature:
// func(int) int16
// ID: B-4-2
func IntToInt16Func(b *testing.B, supplier func() int, toInt16Func func(int) int16) {
if !isIntSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isIntToInt16FuncCalibrated(supplier) {
panic("IntToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// Int8ToInt16Func benchmarks a function with the signature:
// func(int8) int16
// ID: B-4-3
func Int8ToInt16Func(b *testing.B, supplier func() int8, toInt16Func func(int8) int16) {
if !isInt8SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt8ToInt16FuncCalibrated(supplier) {
panic("Int8ToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// Int16ToInt16Func benchmarks a function with the signature:
// func(int16) int16
// ID: B-4-4
func Int16ToInt16Func(b *testing.B, supplier func() int16, toInt16Func func(int16) int16) {
if !isInt16SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt16ToInt16FuncCalibrated(supplier) {
panic("Int16ToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// Int32ToInt16Func benchmarks a function with the signature:
// func(int32) int16
// ID: B-4-5
func Int32ToInt16Func(b *testing.B, supplier func() int32, toInt16Func func(int32) int16) {
if !isInt32SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt32ToInt16FuncCalibrated(supplier) {
panic("Int32ToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// Int64ToInt16Func benchmarks a function with the signature:
// func(int64) int16
// ID: B-4-6
func Int64ToInt16Func(b *testing.B, supplier func() int64, toInt16Func func(int64) int16) {
if !isInt64SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isInt64ToInt16FuncCalibrated(supplier) {
panic("Int64ToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// UintToInt16Func benchmarks a function with the signature:
// func(uint) int16
// ID: B-4-7
func UintToInt16Func(b *testing.B, supplier func() uint, toInt16Func func(uint) int16) {
if !isUintSupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUintToInt16FuncCalibrated(supplier) {
panic("UintToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// Uint8ToInt16Func benchmarks a function with the signature:
// func(uint8) int16
// ID: B-4-8
func Uint8ToInt16Func(b *testing.B, supplier func() uint8, toInt16Func func(uint8) int16) {
if !isUint8SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint8ToInt16FuncCalibrated(supplier) {
panic("Uint8ToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// Uint16ToInt16Func benchmarks a function with the signature:
// func(uint16) int16
// ID: B-4-9
func Uint16ToInt16Func(b *testing.B, supplier func() uint16, toInt16Func func(uint16) int16) {
if !isUint16SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint16ToInt16FuncCalibrated(supplier) {
panic("Uint16ToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// Uint32ToInt16Func benchmarks a function with the signature:
// func(uint32) int16
// ID: B-4-10
func Uint32ToInt16Func(b *testing.B, supplier func() uint32, toInt16Func func(uint32) int16) {
if !isUint32SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint32ToInt16FuncCalibrated(supplier) {
panic("Uint32ToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
}
// Uint64ToInt16Func benchmarks a function with the signature:
// func(int16) int16
// ID: B-4-11
func Uint64ToInt16Func(b *testing.B, supplier func() uint64, toInt16Func func(uint64) int16) {
if !isUint64SupplierCalibrated(supplier) {
panic("supplier function not calibrated")
}
if !isUint64ToInt16FuncCalibrated(supplier) {
panic("Uint64ToInt16Func not calibrated with this supplier")
}
for i, count := 0, b.N; i < count; i++ {
toInt16Func(supplier())
}
} | common/benchmark/04_to_int16_func.go | 0.692122 | 0.769145 | 04_to_int16_func.go | starcoder |
package big
import (
"bytes"
"fmt"
"math/big"
"regexp"
"strconv"
"strings"
"github.com/golang-plus/errors"
)
var (
// max decimal digits allowd for indivisible quotient (exceeding be truncated).
MaxDecimalDigits = uint(200)
)
// Decimal represents a decimal which can handing fixed precision.
type Decimal struct {
integer *big.Int
exponent int
}
func (d *Decimal) ensureInitialized() {
if d.integer == nil {
d.integer = new(big.Int)
}
}
// Sign returns:
// -1: if d < 0
// 0: if d == 0
// +1: if d > 0
func (d *Decimal) Sign() int {
d.ensureInitialized()
return d.integer.Sign()
}
// IsZero reports whether the value of d is equal to zero.
func (d *Decimal) IsZero() bool {
d.ensureInitialized()
return d.integer.Sign() == 0
}
// Float32 returns the float32 value nearest to d and a boolean indicating whether is exact.
func (d *Decimal) Float32() (float32, bool) {
d.ensureInitialized()
a := new(big.Rat).SetInt(d.integer)
if d.exponent == 0 {
return a.Float32()
}
b := new(big.Rat).SetInt(new(big.Int).Exp(big.NewInt(10), new(big.Int).Abs(big.NewInt(int64(d.exponent))), nil))
if d.exponent > 0 {
b.Inv(b)
}
z := new(big.Rat).Quo(a, b)
return z.Float32()
}
// Float64 returns the float64 value nearest to d and a boolean indicating whether is exact.
func (d *Decimal) Float64() (float64, bool) {
d.ensureInitialized()
a := new(big.Rat).SetInt(d.integer)
if d.exponent == 0 {
return a.Float64()
}
b := new(big.Rat).SetInt(new(big.Int).Exp(big.NewInt(10), new(big.Int).Abs(big.NewInt(int64(d.exponent))), nil))
if d.exponent > 0 {
b.Inv(b)
}
z := new(big.Rat).Quo(a, b)
return z.Float64()
}
// Int64 returns the int64 value nearest to d and a boolean indicating whether is exact.
func (d *Decimal) Int64() (int64, bool) {
d.ensureInitialized()
if d.exponent == 0 {
return d.integer.Int64(), true
}
if d.exponent > 0 {
z := new(big.Int).Mul(d.integer, new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(d.exponent)), nil))
return z.Int64(), true
}
z := new(big.Int).Quo(d.integer, new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(d.exponent*-1)), nil))
return z.Int64(), false
}
// String converts the floating-point number d to a string.
func (d *Decimal) String() string {
d.ensureInitialized()
if d.integer.Cmp(big.NewInt(0)) == 0 { // euqal to zero
return "0"
}
str := d.integer.String()
if d.exponent == 0 { // value is the integer without exponent
return str
}
if d.exponent > 0 { // value is the integer with exponent
return str + strings.Repeat("0", d.exponent)
}
// has decimal digits
var buf bytes.Buffer
if strings.HasPrefix(str, "-") {
buf.WriteString("-")
str = str[1:]
}
p := len(str) - (d.exponent * -1)
if p <= 0 {
buf.WriteString("0.")
buf.WriteString(strings.Repeat("0", p*-1))
buf.WriteString(strings.TrimRight(str, "0"))
} else {
buf.WriteString(str[:p]) // integer part
decimals := strings.TrimRight(str[p:], "0")
if len(decimals) > 0 {
buf.WriteString(".")
buf.WriteString(decimals)
}
}
return buf.String()
}
// SetInt64 sets x to y and returns x.
func (x *Decimal) SetInt64(y int64) *Decimal {
x.ensureInitialized()
x.integer.SetInt64(y)
x.exponent = 0
return x
}
var (
_DecimalPattern = regexp.MustCompile(`^([-+]?\d+)(\.(\d+))?([eE]([-+]?\d+))?$`)
)
// SetString sets x to the value of y and returns x and a boolean indicating success.
// If the operation failed, the value of d is undefined but the returned value is nil.
func (x *Decimal) SetString(y string) (*Decimal, bool) {
x.ensureInitialized()
matches := _DecimalPattern.FindStringSubmatch(y)
if len(matches) != 6 {
return nil, false
}
decimals := strings.TrimRight(matches[3], "0")
integer := matches[1] + decimals
exponent := len(decimals) * -1
if len(matches[5]) > 0 {
exp, _ := strconv.ParseInt(matches[5], 10, 64)
exponent += int(exp)
}
x.integer.SetString(integer, 10)
x.exponent = exponent
return x, true
}
// SetFloat64 sets x to y and returns x.
func (x *Decimal) SetFloat64(y float64) *Decimal {
x.ensureInitialized()
x.SetString(strconv.FormatFloat(y, 'f', -1, 64))
return x
}
// Copy sets x to y and returns x. y is not changed.
func (x *Decimal) Copy(y *Decimal) *Decimal {
x.ensureInitialized()
y.ensureInitialized()
x.integer.Set(y.integer)
x.exponent = y.exponent
return x
}
// Abs sets d to the value |d| (the absolute value of d) and returns d.
func (d *Decimal) Abs() *Decimal {
d.ensureInitialized()
d.integer.Abs(d.integer)
return d
}
// Neg sets d to the value of d with its sign negated, and returns d.
func (d *Decimal) Neg() *Decimal {
d.ensureInitialized()
d.integer.Neg(d.integer)
return d
}
func (x *Decimal) align(y *Decimal) {
if x.exponent != y.exponent {
diff := new(big.Int).Abs(new(big.Int).Sub(new(big.Int).Abs(big.NewInt(int64(x.exponent))), new(big.Int).Abs(big.NewInt(int64(y.exponent)))))
if x.exponent > y.exponent {
x.integer.Mul(x.integer, new(big.Int).Exp(big.NewInt(10), diff, nil))
x.exponent = y.exponent
} else {
y.integer.Mul(y.integer, new(big.Int).Exp(big.NewInt(10), diff, nil))
y.exponent = x.exponent
}
}
}
// Cmp compares x and y and returns:
// -1 if d < y
// 0 if d == y (includes: -0 == 0, -Inf == -Inf, and +Inf == +Inf)
// +1 if d > y
func (x *Decimal) Cmp(y *Decimal) int {
x.ensureInitialized()
y.ensureInitialized()
x.align(y)
return x.integer.Cmp(y.integer)
}
// Add sets d to the sum of d and y and returns x.
func (x *Decimal) Add(y *Decimal) *Decimal {
x.ensureInitialized()
y.ensureInitialized()
x.align(y)
x.integer.Add(x.integer, y.integer)
return x
}
// Sub sets d to the difference x-y and returns x.
func (x *Decimal) Sub(y *Decimal) *Decimal {
x.ensureInitialized()
y.ensureInitialized()
x.align(y)
x.integer.Sub(x.integer, y.integer)
return x
}
// Mul sets x to the product x*y and returns x.
func (x *Decimal) Mul(y *Decimal) *Decimal {
x.ensureInitialized()
y.ensureInitialized()
if y.integer.Sign() == 0 { // *0
x.integer.SetInt64(0)
x.exponent = 0
return x
}
x.integer.Mul(x.integer, y.integer)
x.exponent += y.exponent
return x
}
// Quo sets x to the quotient x/y and return x.
// Please set MaxDecimalDigitis for indivisible quotient.
func (x *Decimal) Quo(y *Decimal) *Decimal {
x.ensureInitialized()
y.ensureInitialized()
if y.integer.Sign() == 0 { // /0
x.integer.SetInt64(0)
x.exponent = 0
return x
}
// modulus x%y == 0
if z, r := new(big.Int).QuoRem(x.integer, y.integer, new(big.Int)); r.Sign() == 0 {
x.integer = z
x.exponent -= y.exponent
return x
}
// modulus x%y > 0
var buf bytes.Buffer
if x.integer.Sign()*y.integer.Sign() == -1 {
buf.WriteString("-")
}
xi := new(big.Int).Abs(x.integer)
yi := new(big.Int).Abs(y.integer)
exp := x.exponent - y.exponent
z, r := new(big.Int).QuoRem(xi, yi, new(big.Int))
buf.WriteString(z.String())
for r.Sign() != 0 && exp*-1 < int(MaxDecimalDigits) {
r.Mul(r, big.NewInt(10))
z, r = new(big.Int).QuoRem(r, yi, new(big.Int))
buf.WriteString(z.String())
exp -= 1
}
str := fmt.Sprintf("%se%d", buf.String(), exp)
x.SetString(str)
return x
}
// Div is same to Quo.
func (x *Decimal) Div(y *Decimal) *Decimal {
return x.Quo(y)
}
// RoundToNearestEven rounds (IEEE 754-2008, round to nearest, ties to even) the floating-point number x with given precision (the number of digits after the decimal point).
func (d *Decimal) RoundToNearestEven(precision uint) *Decimal {
d.ensureInitialized()
prec := int(precision)
if d.IsZero() || d.exponent > 0 || d.exponent*-1 <= prec { // rounding needless
return d
}
str := d.integer.String()
var sign, part1, part2 string
if strings.HasPrefix(str, "-") {
sign = "-"
str = str[1:]
}
if len(str) < d.exponent*-1 {
str = strings.Repeat("0", (d.exponent*-1)-len(str)+1) + str
}
part1 = str[:len(str)+d.exponent]
part2 = str[len(part1):]
isRoundUp := false
switch part2[prec : prec+1] {
case "6", "7", "8", "9":
isRoundUp = true
case "5":
if len(part2) > prec+1 { // found decimals back of "5"
isRoundUp = true
} else {
var neighbor string
if prec == 0 { // get neighbor from integer part
neighbor = part1[len(part1)-1:]
} else {
neighbor = part2[prec-1 : prec]
}
switch neighbor {
case "1", "3", "5", "7", "9":
isRoundUp = true
}
}
}
z, _ := new(big.Int).SetString(sign+part1+part2[:prec], 10)
if isRoundUp {
z.Add(z, big.NewInt(int64(d.integer.Sign())))
}
d.integer = z
d.exponent = prec * -1
return d
}
// Round is short to RoundToNearestEven.
func (d *Decimal) Round(precision uint) *Decimal {
return d.RoundToNearestEven(precision)
}
// RoundToNearestAway rounds (IEEE 754-2008, round to nearest, ties away from zero) the floating-point number x with given precision (the number of digits after the decimal point).
func (d *Decimal) RoundToNearestAway(precision uint) *Decimal {
d.ensureInitialized()
prec := int(precision)
if d.IsZero() || d.exponent > 0 || d.exponent*-1 <= prec { // rounding needless
return d
}
diff := new(big.Int).Sub(new(big.Int).Abs(big.NewInt(int64(d.exponent))), big.NewInt(int64(prec+1)))
d.integer.Quo(d.integer, new(big.Int).Exp(big.NewInt(10), diff, nil))
factor := big.NewInt(int64(5))
if d.integer.Sign() == -1 {
factor.Neg(factor)
}
d.integer.Add(d.integer, factor)
d.integer.Quo(d.integer, big.NewInt(10))
d.exponent = prec * -1
return d
}
// RoundToZero rounds (IEEE 754-2008, round towards zero) the floating-point number x with given precision (the number of digits after the decimal point).
func (d *Decimal) RoundToZero(precision uint) *Decimal {
d.ensureInitialized()
prec := int(precision)
if d.IsZero() || d.exponent > 0 || d.exponent*-1 <= prec { // rounding needless
return d
}
diff := new(big.Int).Sub(new(big.Int).Abs(big.NewInt(int64(d.exponent))), big.NewInt(int64(prec)))
d.integer.Quo(d.integer, new(big.Int).Exp(big.NewInt(10), diff, nil))
d.exponent = prec * -1
return d
}
// Truncate is same as RoundToZero.
func (d *Decimal) Truncate(precision uint) *Decimal {
return d.RoundToZero(precision)
}
// RoundDown is same as RoundToZero.
func (d *Decimal) RoundDown(precision uint) *Decimal {
return d.RoundToZero(precision)
}
// RoundAwayFromZero rounds (no IEEE 754-2008, round away from zero) to floating-point number d with given precision.
func (d *Decimal) RoundAwayFromZero(precision uint) *Decimal {
d.ensureInitialized()
prec := int(precision)
if d.IsZero() || d.exponent > 0 || d.exponent*-1 <= prec { // rounding needless
return d
}
sign := d.integer.Sign()
diff := new(big.Int).Sub(new(big.Int).Abs(big.NewInt(int64(d.exponent))), big.NewInt(int64(prec)))
if _, r := d.integer.QuoRem(d.integer, new(big.Int).Exp(big.NewInt(10), diff, nil), new(big.Int)); r.Sign() != 0 {
d.integer.Add(d.integer, big.NewInt(int64(1*sign))) // round up
}
d.exponent = prec * -1
return d
}
// RoundUp is same as RoundAwayFromZero.
func (d *Decimal) RoundUp(precision uint) *Decimal {
return d.RoundAwayFromZero(precision)
}
// NewDecimal returns a new decimal.
func NewDecimal(number float64) *Decimal {
return new(Decimal).SetFloat64(number)
}
// ParseDecimal returns a new decimal by parsing decimal string.
func ParseDecimal(str string) (*Decimal, error) {
if d, ok := new(Decimal).SetString(str); ok {
return d, nil
}
return nil, errors.Newf("decimal string %q is invalid", str)
}
// MustParseDecimal is similar to ParseDecimal but panics if error occurred.
func MustParseDecimal(str string) *Decimal {
d, err := ParseDecimal(str)
if err != nil {
panic(err)
}
return d
} | big/decimal.go | 0.774114 | 0.429728 | decimal.go | starcoder |
package gcast
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
var (
// ErrUnaddressable unaddressable val
ErrUnaddressable = errors.New("val must be addressable")
// ErrNotPointer pinter val
ErrNotPointer = errors.New("val must be a pointer")
)
// Decode decode interface into struct
func Decode(src interface{}, dst interface{}) error {
if err := check(dst); err != nil {
return err
}
return decode(src, reflect.ValueOf(dst).Elem())
}
func check(val interface{}) error {
v := reflect.ValueOf(val)
if v.Kind() != reflect.Ptr {
return ErrNotPointer
}
if !v.Elem().CanAddr() {
return ErrUnaddressable
}
return nil
}
func decode(data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
if !dataVal.IsValid() {
val.Set(reflect.Zero(dataVal.Type()))
return nil
}
kind := val.Kind()
switch kind {
case reflect.Bool:
return decodeBool(data, val)
case reflect.Interface:
return decodeInterface(data, val)
case reflect.String:
return decodeString(data, val)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return decodeInt(data, val)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return decodeUint(data, val)
case reflect.Float32, reflect.Float64:
return decodeFloat(data, val)
case reflect.Map, reflect.Slice:
return decodeInterface(data, val)
case reflect.Ptr:
return decodePtr(data, val)
case reflect.Struct:
return decodeStruct(data, val)
default:
return fmt.Errorf("unsupported type %s", kind)
}
}
func decodeStruct(data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := dataVal.Kind()
if dataVal.Type() == val.Type() {
val.Set(dataVal)
return nil
}
switch dataKind {
// Only map can converted into struct
case reflect.Map:
default:
return fmt.Errorf("")
}
return nil
}
func decodePtr(data interface{}, val reflect.Value) error {
valType := val.Type()
valElem := valType.Elem()
elem := val
if elem.IsNil() {
elem = reflect.New(valElem)
}
if err := decode(data, reflect.Indirect(elem)); err != nil {
return err
}
val.Set(elem)
return nil
}
func decodeBool(data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := dataVal.Kind()
switch dataKind {
case reflect.Bool:
val.SetBool(dataVal.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val.SetBool(0 != dataVal.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val.SetBool(0 != dataVal.Uint())
case reflect.Float32, reflect.Float64:
val.SetBool(0 != dataVal.Float())
case reflect.String:
ok := strings.Contains(" True true ", dataVal.String())
val.SetBool(ok)
default:
return fmt.Errorf("")
}
return nil
}
func decodeInt(data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := dataVal.Kind()
switch dataKind {
case reflect.Bool:
if dataVal.Bool() {
val.SetInt(1)
} else {
val.SetInt(0)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val.SetInt(dataVal.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val.SetInt(int64(dataVal.Uint()))
case reflect.Float32, reflect.Float64:
val.SetInt(int64(dataVal.Float()))
case reflect.String:
d, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
if err != nil {
return fmt.Errorf("parse '%s' as int failed: %s", dataVal.String(), err)
}
val.SetInt(d)
default:
return fmt.Errorf("decode int failed: %#v", data)
}
return nil
}
func decodeUint(data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := dataVal.Kind()
switch dataKind {
case reflect.Bool:
if dataVal.Bool() {
val.SetUint(1)
} else {
val.SetUint(0)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i := dataVal.Int()
if i < 0 {
return fmt.Errorf("decode uint failed: int out of range '%d'", i)
}
val.SetUint(uint64(i))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val.SetUint(dataVal.Uint())
case reflect.Float32, reflect.Float64:
f := dataVal.Float()
if f < 0 {
return fmt.Errorf("decode uint failed: int out of range '%f'", f)
}
val.SetUint(uint64(f))
case reflect.String:
d, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
if err != nil {
return fmt.Errorf("parse '%s' as int failed: %s", dataVal.String(), err)
}
val.SetUint(d)
default:
return fmt.Errorf("decode uint failed: %#v", data)
}
return nil
}
func decodeString(data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := dataVal.Kind()
switch dataKind {
case reflect.Bool:
if dataVal.Bool() {
val.SetString("1")
} else {
val.SetString("0")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val.SetString(strconv.FormatInt(dataVal.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
case reflect.Float32, reflect.Float64:
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
case reflect.String:
d, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
if err != nil {
return fmt.Errorf("parse '%s' as int failed: %s", dataVal.String(), err)
}
val.SetInt(d)
default:
return fmt.Errorf("decode int failed: %#v", data)
}
return nil
}
func decodeInterface(data interface{}, val reflect.Value) error {
valType := val.Type()
valKey := valType.Key()
valElem := valType.Elem()
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := dataVal.Kind()
switch dataKind {
case reflect.Map:
valMap := val
if valMap.IsNil() {
valMap = reflect.MakeMap(reflect.MapOf(valKey, valElem))
}
for _, k := range dataVal.MapKeys() {
subKey := reflect.Indirect(reflect.New(valType))
if err := decode(k.Interface(), subKey); err != nil {
continue
}
v := dataVal.MapIndex(k).Interface()
subVal := reflect.Indirect(reflect.New(valElem))
if err := decode(v, subVal); err != nil {
continue
}
valMap.SetMapIndex(subKey, subVal)
}
val.Set(valMap)
case reflect.Array:
case reflect.Slice:
valSlice := val
if valSlice.IsNil() {
valSlice = reflect.MakeSlice(reflect.SliceOf(valElem), dataVal.Len(), dataVal.Len())
}
for i := 0; i < dataVal.Len(); i++ {
subData := dataVal.Index(i).Interface()
for valSlice.Len() <= i {
valSlice = reflect.Append(valSlice, reflect.Zero(valElem))
}
subField := valSlice.Index(i)
if err := decode(subData, subField); err != nil {
continue
}
}
val.Set(valSlice)
default:
return fmt.Errorf("decode map failed: %#v", data)
}
return nil
}
func decodeFloat(data interface{}, val reflect.Value) error {
dataVal := reflect.ValueOf(data)
dataKind := dataVal.Kind()
switch dataKind {
case reflect.Bool:
if dataVal.Bool() {
val.SetFloat(1.0)
} else {
val.SetFloat(0.0)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val.SetFloat(float64(dataVal.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val.SetFloat(float64(dataVal.Uint()))
case reflect.Float32, reflect.Float64:
val.SetFloat(dataVal.Float())
case reflect.String:
f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
if err != nil {
return fmt.Errorf("parse '%s' as int failed: %s", dataVal.String(), err)
}
val.SetFloat(f)
default:
return fmt.Errorf("decode int failed: %#v", data)
}
return nil
} | decode.go | 0.623377 | 0.426799 | decode.go | starcoder |
package rke
import "github.com/hashicorp/terraform/helper/schema"
func nodeSchema() map[string]*schema.Schema {
return map[string]*schema.Schema{
"node_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Name of the host provisioned via docker machine",
},
"address": {
Type: schema.TypeString,
Required: true,
Description: "IP or FQDN that is fully resolvable and used for SSH communication",
},
"port": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validateIntegerInRange(1, 65535),
Description: "Port used for SSH communication",
},
"internal_address": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Internal address that will be used for components communication",
},
"role": {
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
// cannot use ConflictsWith in this context. see https://github.com/terraform-providers/terraform-provider-google/pull/1062
// ConflictsWith: []string{"roles"},
Description: "Node role in kubernetes cluster [controlplane/worker/etcd])",
},
"roles": {
Type: schema.TypeString,
Optional: true,
// cannot use ConflictsWith in this context. see https://github.com/terraform-providers/terraform-provider-google/pull/1062
// ConflictsWith: []string{"role"},
Deprecated: "roles is a workaround when a role can not be specified in list",
Description: "Node role in kubernetes cluster [controlplane/worker/etcd], specified by a comma-separated string",
},
"hostname_override": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "HostnameOverride",
},
"user": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "SSH user that will be used by RKE",
},
"docker_socket": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Docker socket on the node that will be used in tunneling",
},
"ssh_agent_auth": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "SSH Agent Auth enable",
},
"ssh_key": {
Type: schema.TypeString,
Sensitive: true,
Optional: true,
Computed: true,
Description: "SSH Private Key",
},
"ssh_key_path": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "SSH Private Key",
},
"labels": {
Type: schema.TypeMap,
Optional: true,
Computed: true,
Description: "Node Labels",
},
}
}
func nodeDataSourceSchema() map[string]*schema.Schema {
nodeSchema := nodeSchema()
nodeSchema["yaml"] = &schema.Schema{
Type: schema.TypeString,
Computed: true,
Description: "RKE Node YAML",
}
nodeSchema["json"] = &schema.Schema{
Type: schema.TypeString,
Computed: true,
Description: "RKE Node JSON",
}
return nodeSchema
} | rke/node_schema.go | 0.584745 | 0.434821 | node_schema.go | starcoder |
package palette
import (
"github.com/Lexus123/gamut"
colorful "github.com/lucasb-eyer/go-colorful"
)
func init() {
Crayola.AddColors(
gamut.Colors{
{"Red", colorful.Color{R: 0.929412, G: 0.039216, B: 0.247059}, ""},
{"Maroon", colorful.Color{R: 0.764706, G: 0.129412, B: 0.282353}, ""},
{"Scarlet", colorful.Color{R: 0.992157, G: 0.054902, B: 0.207843}, ""},
{"Brick Red", colorful.Color{R: 0.776471, G: 0.176471, B: 0.258824}, ""},
{"English Vermilion", colorful.Color{R: 0.800000, G: 0.278431, B: 0.294118}, ""},
{"Madder Lake", colorful.Color{R: 0.800000, G: 0.200000, B: 0.211765}, ""},
{"Permanent Geranium Lake", colorful.Color{R: 0.882353, G: 0.172549, B: 0.172549}, ""},
{"Maximum Red", colorful.Color{R: 0.850980, G: 0.129412, B: 0.129412}, ""},
{"Indian Red", colorful.Color{R: 0.725490, G: 0.305882, B: 0.282353}, ""},
{"Orange-Red", colorful.Color{R: 1.000000, G: 0.325490, B: 0.286275}, ""},
{"Sunset Orange", colorful.Color{R: 0.996078, G: 0.298039, B: 0.250980}, ""},
{"Bittersweet", colorful.Color{R: 0.996078, G: 0.435294, B: 0.368627}, ""},
{"Dark Venetian Red", colorful.Color{R: 0.701961, G: 0.231373, B: 0.141176}, ""},
{"Venetian Red", colorful.Color{R: 0.800000, G: 0.333333, B: 0.239216}, ""},
{"Light Venetian Red", colorful.Color{R: 0.901961, G: 0.450980, B: 0.360784}, ""},
{"Vivid Tangerine", colorful.Color{R: 1.000000, G: 0.600000, B: 0.501961}, ""},
{"Middle Red", colorful.Color{R: 0.898039, G: 0.556863, B: 0.450980}, ""},
{"Burnt Orange", colorful.Color{R: 1.000000, G: 0.439216, B: 0.203922}, ""},
{"Red-Orange", colorful.Color{R: 1.000000, G: 0.407843, B: 0.121569}, ""},
{"Orange", colorful.Color{R: 1.000000, G: 0.533333, B: 0.200000}, ""},
{"Macaroni and Cheese", colorful.Color{R: 1.000000, G: 0.725490, B: 0.482353}, ""},
{"Middle Yellow Red", colorful.Color{R: 0.925490, G: 0.674510, B: 0.462745}, ""},
{"Mango Tango", colorful.Color{R: 0.905882, G: 0.447059, B: 0.000000}, ""},
{"Yellow-Orange", colorful.Color{R: 1.000000, G: 0.682353, B: 0.258824}, ""},
{"Maximum Yellow Red", colorful.Color{R: 0.949020, G: 0.729412, B: 0.286275}, ""},
{"Banana Mania", colorful.Color{R: 0.984314, G: 0.905882, B: 0.698039}, ""},
{"Maize", colorful.Color{R: 0.949020, G: 0.776471, B: 0.286275}, ""},
{"Orange-Yellow", colorful.Color{R: 0.972549, G: 0.835294, B: 0.407843}, ""},
{"Goldenrod", colorful.Color{R: 0.988235, G: 0.839216, B: 0.403922}, ""},
{"Dandelion", colorful.Color{R: 0.996078, G: 0.847059, B: 0.364706}, ""},
{"Yellow", colorful.Color{R: 0.984314, G: 0.909804, B: 0.439216}, ""},
{"Green-Yellow", colorful.Color{R: 0.945098, G: 0.905882, B: 0.533333}, ""},
{"Middle Yellow", colorful.Color{R: 1.000000, G: 0.921569, B: 0.000000}, ""},
{"Olive Green", colorful.Color{R: 0.709804, G: 0.701961, B: 0.360784}, ""},
{"Spring Green", colorful.Color{R: 0.925490, G: 0.921569, B: 0.741176}, ""},
{"Maximum Yellow", colorful.Color{R: 0.980392, G: 0.980392, B: 0.215686}, ""},
{"Canary", colorful.Color{R: 1.000000, G: 1.000000, B: 0.600000}, ""},
{"Lemon Yellow", colorful.Color{R: 1.000000, G: 1.000000, B: 0.623529}, ""},
{"Maximum Green Yellow", colorful.Color{R: 0.850980, G: 0.901961, B: 0.313725}, ""},
{"Middle Green Yellow", colorful.Color{R: 0.674510, G: 0.749020, B: 0.376471}, ""},
{"Inchworm", colorful.Color{R: 0.686275, G: 0.890196, B: 0.074510}, ""},
{"Light Chrome Green", colorful.Color{R: 0.745098, G: 0.901961, B: 0.294118}, ""},
{"Yellow-Green", colorful.Color{R: 0.772549, G: 0.882353, B: 0.478431}, ""},
{"Maximum Green", colorful.Color{R: 0.368627, G: 0.549020, B: 0.192157}, ""},
{"Asparagus", colorful.Color{R: 0.482353, G: 0.627451, B: 0.356863}, ""},
{"<NAME>", colorful.Color{R: 0.615686, G: 0.878431, B: 0.576471}, ""},
{"Fern", colorful.Color{R: 0.388235, G: 0.717647, B: 0.423529}, ""},
{"Middle Green", colorful.Color{R: 0.301961, G: 0.549020, B: 0.341176}, ""},
{"Green", colorful.Color{R: 0.227451, G: 0.650980, B: 0.333333}, ""},
{"Medium Chrome Green", colorful.Color{R: 0.423529, G: 0.650980, B: 0.486275}, ""},
{"Forest Green", colorful.Color{R: 0.372549, G: 0.654902, B: 0.466667}, ""},
{"Sea Green", colorful.Color{R: 0.576471, G: 0.874510, B: 0.721569}, ""},
{"Shamrock", colorful.Color{R: 0.200000, G: 0.800000, B: 0.600000}, ""},
{"Mountain Meadow", colorful.Color{R: 0.101961, G: 0.701961, B: 0.521569}, ""},
{"Jungle Green", colorful.Color{R: 0.160784, G: 0.670588, B: 0.529412}, ""},
{"Caribbean Green", colorful.Color{R: 0.000000, G: 0.800000, B: 0.600000}, ""},
{"Tropical Rain Forest", colorful.Color{R: 0.000000, G: 0.458824, B: 0.368627}, ""},
{"Middle Blue Green", colorful.Color{R: 0.552941, G: 0.850980, B: 0.800000}, ""},
{"Pine Green", colorful.Color{R: 0.003922, G: 0.470588, B: 0.435294}, ""},
{"Maximum Blue Green", colorful.Color{R: 0.188235, G: 0.749020, B: 0.749020}, ""},
{"Robin's Egg Blue", colorful.Color{R: 0.000000, G: 0.800000, B: 0.800000}, ""},
{"Teal Blue", colorful.Color{R: 0.000000, G: 0.501961, B: 0.501961}, ""},
{"Light Blue", colorful.Color{R: 0.560784, G: 0.847059, B: 0.847059}, ""},
{"Aquamarine", colorful.Color{R: 0.584314, G: 0.878431, B: 0.909804}, ""},
{"Turquoise Blue", colorful.Color{R: 0.423529, G: 0.854902, B: 0.905882}, ""},
{"Outer Space", colorful.Color{R: 0.176471, G: 0.219608, B: 0.227451}, ""},
{"Sky Blue", colorful.Color{R: 0.462745, G: 0.843137, B: 0.917647}, ""},
{"Middle Blue", colorful.Color{R: 0.494118, G: 0.831373, B: 0.901961}, ""},
{"Blue-Green", colorful.Color{R: 0.000000, G: 0.584314, B: 0.717647}, ""},
{"Pacific Blue", colorful.Color{R: 0.000000, G: 0.615686, B: 0.768627}, ""},
{"Cerulean", colorful.Color{R: 0.007843, G: 0.643137, B: 0.827451}, ""},
{"Maximum Blue", colorful.Color{R: 0.278431, G: 0.670588, B: 0.800000}, ""},
{"Blue (I)", colorful.Color{R: 0.180392, G: 0.705882, B: 0.901961}, ""},
{"Cerulean Blue", colorful.Color{R: 0.200000, G: 0.603922, B: 0.800000}, ""},
{"Cornflower", colorful.Color{R: 0.576471, G: 0.800000, B: 0.917647}, ""},
{"Green-Blue", colorful.Color{R: 0.156863, G: 0.529412, B: 0.784314}, ""},
{"Midnight Blue", colorful.Color{R: 0.000000, G: 0.274510, B: 0.549020}, ""},
{"Navy Blue", colorful.Color{R: 0.000000, G: 0.400000, B: 0.800000}, ""},
{"Denim", colorful.Color{R: 0.082353, G: 0.376471, B: 0.741176}, ""},
{"Blue (III)", colorful.Color{R: 0.000000, G: 0.400000, B: 1.000000}, ""},
{"Cadet Blue", colorful.Color{R: 0.662745, G: 0.698039, B: 0.764706}, ""},
{"Periwinkle", colorful.Color{R: 0.764706, G: 0.803922, B: 0.901961}, ""},
{"Blue (II)", colorful.Color{R: 0.270588, G: 0.439216, B: 0.901961}, ""},
{"Bluetiful", colorful.Color{R: 0.235294, G: 0.411765, B: 0.905882}, ""},
{"Wild Blue Yonder", colorful.Color{R: 0.478431, G: 0.537255, B: 0.721569}, ""},
{"Indigo", colorful.Color{R: 0.309804, G: 0.411765, B: 0.776471}, ""},
{"Manatee", colorful.Color{R: 0.552941, G: 0.564706, B: 0.631373}, ""},
{"Cobalt Blue", colorful.Color{R: 0.549020, G: 0.564706, B: 0.784314}, ""},
{"Celestial Blue", colorful.Color{R: 0.439216, G: 0.439216, B: 0.800000}, ""},
{"Blue Bell", colorful.Color{R: 0.600000, G: 0.600000, B: 0.800000}, ""},
{"Maximum Blue Purple", colorful.Color{R: 0.674510, G: 0.674510, B: 0.901961}, ""},
{"Violet-Blue", colorful.Color{R: 0.462745, G: 0.431373, B: 0.784314}, ""},
{"Blue-Violet", colorful.Color{R: 0.392157, G: 0.337255, B: 0.717647}, ""},
{"Ultramarine Blue", colorful.Color{R: 0.247059, G: 0.149020, B: 0.749020}, ""},
{"Middle Blue Purple", colorful.Color{R: 0.545098, G: 0.447059, B: 0.745098}, ""},
{"Purple Heart", colorful.Color{R: 0.396078, G: 0.176471, B: 0.756863}, ""},
{"Royal Purple", colorful.Color{R: 0.419608, G: 0.247059, B: 0.627451}, ""},
{"Violet (II)", colorful.Color{R: 0.513725, G: 0.349020, B: 0.639216}, ""},
{"Medium Violet", colorful.Color{R: 0.560784, G: 0.278431, B: 0.701961}, ""},
{"Wisteria", colorful.Color{R: 0.788235, G: 0.627451, B: 0.862745}, ""},
{"Lavender (I)", colorful.Color{R: 0.749020, G: 0.560784, B: 0.800000}, ""},
{"Vivid Violet", colorful.Color{R: 0.501961, G: 0.215686, B: 0.564706}, ""},
{"Maximum Purple", colorful.Color{R: 0.450980, G: 0.200000, B: 0.501961}, ""},
{"Purple Mountains' Majesty", colorful.Color{R: 0.839216, G: 0.682353, B: 0.866667}, ""},
{"Fuchsia", colorful.Color{R: 0.756863, G: 0.329412, B: 0.756863}, ""},
{"Pink Flamingo", colorful.Color{R: 0.988235, G: 0.454902, B: 0.992157}, ""},
{"Violet (I)", colorful.Color{R: 0.450980, G: 0.180392, B: 0.423529}, ""},
{"Brilliant Rose", colorful.Color{R: 0.901961, G: 0.403922, B: 0.807843}, ""},
{"Orchid", colorful.Color{R: 0.886275, G: 0.611765, B: 0.823529}, ""},
{"Plum", colorful.Color{R: 0.556863, G: 0.192157, B: 0.474510}, ""},
{"Medium Rose", colorful.Color{R: 0.850980, G: 0.423529, B: 0.745098}, ""},
{"Thistle", colorful.Color{R: 0.921569, G: 0.690196, B: 0.843137}, ""},
{"Mulberry", colorful.Color{R: 0.784314, G: 0.313725, B: 0.607843}, ""},
{"Red-Violet", colorful.Color{R: 0.733333, G: 0.200000, B: 0.521569}, ""},
{"Middle Purple", colorful.Color{R: 0.850980, G: 0.509804, B: 0.709804}, ""},
{"Maximum Red Purple", colorful.Color{R: 0.650980, G: 0.227451, B: 0.474510}, ""},
{"Jazzberry Jam", colorful.Color{R: 0.647059, G: 0.043137, B: 0.368627}, ""},
{"Eggplant", colorful.Color{R: 0.380392, G: 0.250980, B: 0.317647}, ""},
{"Magenta", colorful.Color{R: 0.964706, G: 0.325490, B: 0.650980}, ""},
{"Cerise", colorful.Color{R: 0.854902, G: 0.196078, B: 0.529412}, ""},
{"Wild Strawberry", colorful.Color{R: 1.000000, G: 0.200000, B: 0.600000}, ""},
{"Lavender (II)", colorful.Color{R: 0.984314, G: 0.682353, B: 0.823529}, ""},
{"Cotton Candy", colorful.Color{R: 1.000000, G: 0.717647, B: 0.835294}, ""},
{"Carnation Pink", colorful.Color{R: 1.000000, G: 0.650980, B: 0.788235}, ""},
{"Violet-Red", colorful.Color{R: 0.968627, G: 0.274510, B: 0.541176}, ""},
{"Razzmatazz", colorful.Color{R: 0.890196, G: 0.043137, B: 0.360784}, ""},
{"Piggy Pink", colorful.Color{R: 0.992157, G: 0.843137, B: 0.894118}, ""},
{"Carmine", colorful.Color{R: 0.901961, G: 0.180392, B: 0.419608}, ""},
{"Blush", colorful.Color{R: 0.858824, G: 0.313725, B: 0.474510}, ""},
{"Tickle Me Pink", colorful.Color{R: 0.988235, G: 0.501961, B: 0.647059}, ""},
{"Mauvelous", colorful.Color{R: 0.941176, G: 0.568627, B: 0.662745}, ""},
{"Salmon", colorful.Color{R: 1.000000, G: 0.568627, B: 0.643137}, ""},
{"Middle Red Purple", colorful.Color{R: 0.647059, G: 0.325490, B: 0.325490}, ""},
{"Mahogany", colorful.Color{R: 0.792157, G: 0.203922, B: 0.207843}, ""},
{"Melon", colorful.Color{R: 0.996078, G: 0.729412, B: 0.678431}, ""},
{"<NAME>", colorful.Color{R: 0.968627, G: 0.639216, B: 0.556863}, ""},
{"<NAME>", colorful.Color{R: 0.913725, G: 0.454902, B: 0.317647}, ""},
{"Brown", colorful.Color{R: 0.686275, G: 0.349020, B: 0.243137}, ""},
{"Sepia", colorful.Color{R: 0.619608, G: 0.356863, B: 0.250980}, ""},
{"Fuzzy Wuzzy", colorful.Color{R: 0.529412, G: 0.258824, B: 0.121569}, ""},
{"Beaver", colorful.Color{R: 0.572549, G: 0.435294, B: 0.356863}, ""},
{"Tumbleweed", colorful.Color{R: 0.870588, G: 0.650980, B: 0.505882}, ""},
{"<NAME>", colorful.Color{R: 0.823529, G: 0.490196, B: 0.274510}, ""},
{"<NAME>", colorful.Color{R: 0.400000, G: 0.258824, B: 0.156863}, ""},
{"Tan", colorful.Color{R: 0.850980, G: 0.603922, B: 0.423529}, ""},
{"<NAME>", colorful.Color{R: 0.929412, G: 0.788235, B: 0.686275}, ""},
{"Peach", colorful.Color{R: 1.000000, G: 0.796078, B: 0.643137}, ""},
{"<NAME>", colorful.Color{R: 0.501961, G: 0.333333, B: 0.200000}, ""},
{"Apricot", colorful.Color{R: 0.992157, G: 0.835294, B: 0.694118}, ""},
{"Almond", colorful.Color{R: 0.933333, G: 0.850980, B: 0.768627}, ""},
{"<NAME>", colorful.Color{R: 0.400000, G: 0.321569, B: 0.200000}, ""},
{"Shadow", colorful.Color{R: 0.513725, G: 0.439216, B: 0.313725}, ""},
{"<NAME> (I)", colorful.Color{R: 0.901961, G: 0.737255, B: 0.360784}, ""},
{"Timberwolf", colorful.Color{R: 0.850980, G: 0.839216, B: 0.811765}, ""},
{"Gold (I)", colorful.Color{R: 0.572549, G: 0.572549, B: 0.431373}, ""},
{"Gold (II)", colorful.Color{R: 0.901961, G: 0.745098, B: 0.541176}, ""},
{"Silver", colorful.Color{R: 0.788235, G: 0.752941, B: 0.733333}, ""},
{"Copper", colorful.Color{R: 0.854902, G: 0.541176, B: 0.403922}, ""},
{"Antique Brass", colorful.Color{R: 0.784314, G: 0.541176, B: 0.396078}, ""},
{"Black", colorful.Color{R: 0.000000, G: 0.000000, B: 0.000000}, ""},
{"Charcoal Gray", colorful.Color{R: 0.450980, G: 0.415686, B: 0.384314}, ""},
{"Gray", colorful.Color{R: 0.545098, G: 0.525490, B: 0.501961}, ""},
{"Blue-Gray", colorful.Color{R: 0.784314, G: 0.784314, B: 0.803922}, ""},
{"White", colorful.Color{R: 1.000000, G: 1.000000, B: 1.000000}, ""},
{"Radical Red", colorful.Color{R: 1.000000, G: 0.207843, B: 0.368627}, ""},
{"Wild Watermelon", colorful.Color{R: 0.992157, G: 0.356863, B: 0.470588}, ""},
{"Outrageous Orange", colorful.Color{R: 1.000000, G: 0.376471, B: 0.215686}, ""},
{"Atomic Tangerine", colorful.Color{R: 1.000000, G: 0.600000, B: 0.400000}, ""},
{"Neon Carrot", colorful.Color{R: 1.000000, G: 0.600000, B: 0.200000}, ""},
{"Sunglow", colorful.Color{R: 1.000000, G: 0.800000, B: 0.200000}, ""},
{"Laser Lemon", colorful.Color{R: 1.000000, G: 1.000000, B: 0.400000}, ""},
{"Unmellow Yellow", colorful.Color{R: 1.000000, G: 1.000000, B: 0.400000}, ""},
{"Electric Lime", colorful.Color{R: 0.800000, G: 1.000000, B: 0.000000}, ""},
{"Screamin' Green", colorful.Color{R: 0.400000, G: 1.000000, B: 0.400000}, ""},
{"Magic Mint", colorful.Color{R: 0.666667, G: 0.941176, B: 0.819608}, ""},
{"Blizzard Blue", colorful.Color{R: 0.313725, G: 0.749020, B: 0.901961}, ""},
{"Shocking Pink", colorful.Color{R: 1.000000, G: 0.431373, B: 1.000000}, ""},
{"Razzle Dazzle Rose", colorful.Color{R: 0.933333, G: 0.203922, B: 0.823529}, ""},
{"Hot Magenta", colorful.Color{R: 1.000000, G: 0.000000, B: 0.800000}, ""},
{"Purple Pizzazz", colorful.Color{R: 1.000000, G: 0.000000, B: 0.800000}, ""},
})
} | palette/crayola.go | 0.60964 | 0.578389 | crayola.go | starcoder |
package sobel
import (
"decompose/layer"
"math"
)
type Sobel struct {
XKernel [][]float64
YKernel [][]float64
Crop float64
MinCrop float64
MaxCrop float64
MergeFunc func(uint, float64, float64) float64
}
func With33Kernel() *Sobel {
return &Sobel{
XKernel: [][]float64{
{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1},
},
YKernel: [][]float64{
{1, 2, 1},
{0, 0, 0},
{-1, -2, 1},
},
MinCrop: 20000,
MaxCrop: 40000,
MergeFunc: SumMerge,
}
}
func With55Kernel() *Sobel {
return &Sobel{
XKernel: [][]float64{
{-2, -1, 0, 1, 2},
{-3, -2, 0, 2, 3},
{-4, -3, 0, 3, 4},
{-3, -2, 0, 2, 3},
{-2, -1, 0, 1, 2},
},
YKernel: [][]float64{
{2, 3, 4, 3, 2},
{1, 2, 3, 2, 1},
{0, 0, 0, 0, 0},
{-1, -2, -3, -2, -1},
{-2, -3, -4, -3, -2},
},
MinCrop: 20000,
MaxCrop: 50000,
MergeFunc: EuclideanDistanceMerge,
}
}
func EuclideanDistanceMerge(count uint, gx, gy float64) float64 {
return math.Sqrt(gx*gx+gy*gy) / float64(count)
}
func SumMerge(count uint, gx, gy float64) float64 {
return (math.Abs(gx) + math.Abs(gy)) / float64(count/2)
}
func (s *Sobel) GetKernelDimension() uint {
if len(s.XKernel) != len(s.YKernel) {
panic("X and Y kernel sizes differ")
}
if len(s.XKernel) == 0 {
return uint(0)
}
if len(s.XKernel[0]) != len(s.YKernel[0]) {
panic("X and Y kernel sizes differ")
}
if len(s.XKernel)%2 == 0 {
panic("X and Y kernel dimensions cannot be even")
}
if len(s.XKernel[0])%2 == 0 {
panic("X and Y kernel dimensions cannot be even")
}
if len(s.XKernel) != len(s.XKernel[0]) {
panic("X and Y kernel dimensions aren't equal")
}
return uint(len(s.XKernel))
}
func (s *Sobel) Apply(l layer.Layer) layer.Layer {
sizeX, sizeY := l.GetDimensions()
kernelDim := int(s.GetKernelDimension())
border := int(kernelDim / 2)
copy := layer.NewLayer(sizeX, sizeY)
for y := border; y < sizeY-border; y++ {
for x := int(border); x < sizeX-border; x++ {
gx := float64(0)
gy := float64(0)
for j := -border; j <= border; j++ {
for i := -border; i <= border; i++ {
gx += s.XKernel[j+border][i+border] * l[y-j][x-i]
gy += s.YKernel[j+border][i+border] * l[y-j][x-i]
}
}
copy[y][x] = s.MergeFunc(uint(kernelDim*kernelDim), gx, gy)
if copy[y][x] < s.MinCrop {
copy[y][x] = float64(0x0000)
} else if copy[y][x] > s.MaxCrop {
copy[y][x] = float64(0xffff)
}
copy[y][x] = float64(0xffff) - copy[y][x]
}
}
for y := 0; y <= border; y++ {
for x := 0; x < sizeX; x++ {
copy[y][x] = 0
}
}
for y := sizeY - border; y < sizeY; y++ {
for x := 0; x < sizeX; x++ {
copy[y][x] = 0
}
}
for y := 1; y < sizeY-1; y++ {
for x := 0; x < int(border); x++ {
copy[y][x] = 0
}
for x := sizeX - int(border); x < sizeX; x++ {
copy[y][x] = 0
}
}
return copy
} | labo-3/decompose/sobel/sobel.go | 0.596551 | 0.441312 | sobel.go | starcoder |
package main
import (
"github.com/ByteArena/box2d"
"github.com/wdevore/Ranger-Go-IGE/api"
"github.com/wdevore/Ranger-Go-IGE/extras/shapes"
)
type boxPhysicsComponent struct {
physicsComponent
}
func newBoxPhysicsComponent() *boxPhysicsComponent {
o := new(boxPhysicsComponent)
return o
}
// EnableGravity enables/disables gravity for this component
func (p *boxPhysicsComponent) EnableGravity(enable bool) {
if enable {
p.b2Body.SetGravityScale(1.0)
} else {
p.b2Body.SetGravityScale(0.0)
}
}
// ApplyForce applies linear force to box center
func (p *boxPhysicsComponent) ApplyForce(dirX, dirY float64) {
p.b2Body.ApplyForce(box2d.B2Vec2{X: dirX, Y: dirY}, p.b2Body.GetWorldCenter(), true)
}
// ApplyImpulse applies linear impulse to box center
func (p *boxPhysicsComponent) ApplyImpulse(dirX, dirY float64) {
p.b2Body.ApplyLinearImpulse(box2d.B2Vec2{X: dirX, Y: dirY}, p.b2Body.GetWorldCenter(), true)
}
// ApplyImpulseToCorner applies linear impulse to 1,1 box corner
// As the box rotates the 1,1 corner rotates which means impulses
// could change the rotation to either CW or CCW.
func (p *boxPhysicsComponent) ApplyImpulseToCorner(dirX, dirY float64) {
p.b2Body.ApplyLinearImpulse(box2d.B2Vec2{X: dirX, Y: dirY}, p.b2Body.GetWorldPoint(box2d.B2Vec2{X: 1.0, Y: 1.0}), true)
}
// ApplyTorque applies torgue to box center
func (p *boxPhysicsComponent) ApplyTorque(torgue float64) {
p.b2Body.ApplyTorque(torgue, true)
}
// ApplyAngularImpulse applies angular impulse to box center
func (p *boxPhysicsComponent) ApplyAngularImpulse(impulse float64) {
p.b2Body.ApplyAngularImpulse(impulse, true)
}
func (p *boxPhysicsComponent) Build(phyWorld *box2d.B2World, node api.INode, position api.IPoint) {
p.phyNode = node
p.position = position
// -------------------------------------------
// A body def used to create bodies
bDef := box2d.MakeB2BodyDef()
bDef.Type = box2d.B2BodyType.B2_dynamicBody
// Set the position of the Body
px := p.phyNode.Position().X()
py := p.phyNode.Position().Y()
bDef.Position.Set(
float64(px),
float64(py),
)
// An instance of a body to contain Fixtures
p.b2Body = phyWorld.CreateBody(&bDef)
// Every Fixture has a shape
b2Shape := box2d.MakeB2PolygonShape()
// Box2D assumes the same is defined in unit-space which
// means if the object is defined otherwise we need the object
// to return the correct value
tcc := p.phyNode.(*shapes.MonoSquareNode)
b2Shape.SetAsBoxFromCenterAndAngle(
float64(tcc.HalfSide()), float64(tcc.HalfSide()),
box2d.B2Vec2{X: 0.0, Y: 0.0}, 0.0)
fd := box2d.MakeB2FixtureDef()
fd.Shape = &b2Shape
fd.Density = 1.0
p.b2Body.CreateFixtureFromDef(&fd) // attach Fixture to body
} | examples/complex/physics/basic/p2_linear_impulses/box_physics_component.go | 0.734786 | 0.551453 | box_physics_component.go | starcoder |
package sweetiebot
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/bwmarrin/discordgo"
)
type UsersModule struct {
}
func (w *UsersModule) Name() string {
return "Users"
}
func (w *UsersModule) Register(info *GuildInfo) {}
func (w *UsersModule) Commands() []Command {
return []Command{
&NewUsersCommand{},
&AKACommand{},
&BanCommand{},
&TimeCommand{},
&SetTimeZoneCommand{},
&UserInfoCommand{},
&DefaultServerCommand{},
&SilenceCommand{},
&UnsilenceCommand{},
}
}
func (w *UsersModule) Description() string {
return "Contains commands for getting and setting user information."
}
type NewUsersCommand struct {
}
func (c *NewUsersCommand) Name() string {
return "newusers"
}
func (c *NewUsersCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
maxresults := 5
if len(args) > 0 {
maxresults, _ = strconv.Atoi(args[0])
}
if maxresults < 1 {
return "```How I return no results???```", false, nil
}
if maxresults > 30 {
maxresults = 30
}
r := sb.db.GetNewestUsers(maxresults, SBatoi(info.Guild.ID))
s := make([]string, 0, len(r))
for _, v := range r {
s = append(s, v.User.Username+" (joined: "+ApplyTimezone(v.FirstSeen, info, msg.Author).Format(time.ANSIC)+") ["+v.User.ID+"]")
}
return "```\n" + strings.Join(s, "\n") + "```", true, nil
}
func (c *NewUsersCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Lists up to maxresults users, starting with the newest user to join the server.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "maxresults", Desc: "Defaults to 5 results, returns a maximum of 30.", Optional: true},
},
}
}
func (c *NewUsersCommand) UsageShort() string {
return "[PM Only] Gets a list of the most recent users to join the server."
}
type AKACommand struct {
}
func (c *AKACommand) Name() string {
return "aka"
}
func (c *AKACommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 1 {
return "```You must provide a user to search for.```", false, nil
}
arg := msg.Content[indices[0]:]
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5, nil
}
r := sb.db.GetAliases(IDs[0])
u, _ := sb.db.GetMember(IDs[0], SBatoi(info.Guild.ID))
if u == nil {
return "```Error: User does not exist!```", false, nil
}
nick := u.User.Username
if len(u.Nick) > 0 {
nick = u.Nick
}
return "```All known aliases for " + nick + " [" + u.User.ID + "]\n " + strings.Join(r, "\n ") + "```", false, nil
}
func (c *AKACommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Lists all known aliases of the user in question, up to a maximum of 10, with the names used the longest first.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "user", Desc: "A ping of the user, or simply their name.", Optional: true},
},
}
}
func (c *AKACommand) UsageShort() string { return "Lists all known aliases of a user." }
func ProcessDurationAndReason(args []string, msg *discordgo.Message, indices []int, ty uint8, uID string, gID uint64) (string, string) {
reason := ""
if len(args) > 0 {
if strings.ToLower(args[0]) == "for:" {
if len(args) < 3 {
return "", "```Error: Duration should be specified as 'for: 5 DAYS' or 'for: 72 HOURS'```"
}
duration, err := strconv.Atoi(args[1])
if err != nil {
return "", "```Error: Duration number was not an integer.```"
}
t := time.Now().UTC()
switch parseRepeatInterval(args[2]) {
case 1:
t = t.Add(time.Duration(duration) * time.Second)
case 2:
t = t.Add(time.Duration(duration) * time.Minute)
case 3:
t = t.Add(time.Duration(duration) * time.Hour)
case 4:
t = t.AddDate(0, 0, duration)
case 5:
t = t.AddDate(0, 0, duration*7)
case 6:
t = t.AddDate(0, duration, 0)
case 8:
t = t.AddDate(duration, 0, 0)
case 7:
fallthrough
case 255:
return "", "```Error: unrecognized interval.```"
}
if !sb.db.AddSchedule(gID, t, ty, uID) {
return "", "```Error: servers can't have more than 5000 events!```"
}
scheduleID := sb.db.FindEvent(uID, gID, ty)
if scheduleID == nil {
return "", "```Error: Could not find inserted event!```"
}
if len(args) > 3 {
reason = msg.Content[indices[3]:]
}
} else {
reason = msg.Content[indices[0]:]
}
}
return reason, ""
}
// Ban command that tracks who banned someone, why, and optionally make the ban temporary
type BanCommand struct {
}
func (c *BanCommand) Name() string {
return "ban"
}
func (c *BanCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
// make sure we passed a valid argument to the command
if len(args) < 1 {
return "```You didn't tell me who to zap with the friendship gun, silly.```", false, nil
}
// get the user ID and deal with Discord's alias bullshit
arg := args[0]
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5, nil
}
gID := SBatoi(info.Guild.ID)
u, _, _, _ := sb.db.GetUser(IDs[0])
if u == nil {
return "```Error: User does not exist!```", false, nil
}
uID := SBitoa(IDs[0])
reason, e := ProcessDurationAndReason(args[1:], msg, indices[1:], 0, uID, gID)
if len(e) > 0 {
return e, false, nil
}
fmt.Printf("Banned %s because: %s\n", u.Username, reason)
err := sb.dg.GuildBanCreate(info.Guild.ID, uID, 1) // Note that this will probably generate a SawBan event
if err != nil {
return "```Error: " + err.Error() + "```", false, nil
}
return "```Banned " + u.Username + " from the server. Harmony restored.```", false, nil
}
func (c *BanCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Bans the given user. Examples: `'!ban @CrystalFlash for: 5 MINUTES because he's a dunce` or `!ban \"Name With Spaces\" caught stealing cookies`",
Params: []CommandUsageParam{
CommandUsageParam{Name: "user", Desc: "A ping of the user, or simply their name. If the name has spaces, this argument must be put in quotes.", Optional: false},
CommandUsageParam{Name: "for: duration", Desc: "If the keyword `for:` is used after the username, looks for a duration of the form `for: 50 MINUTES` and creates an unban event that will be fired after that much time has passed from now.", Optional: true},
CommandUsageParam{Name: "reason", Desc: "The rest of the message is treated as a reason for the ban (currently not saved anywhere).", Optional: true},
},
}
}
func (c *BanCommand) UsageShort() string { return "Bans a user." }
type TimeCommand struct {
}
func (c *TimeCommand) Name() string {
return "time"
}
func (c *TimeCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 1 {
return "```This server's local time is: " + ApplyTimezone(time.Now().UTC(), info, nil).Format("Jan 2, 3:04pm```"), false, nil
}
arg := msg.Content[indices[0]:]
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5, nil
}
tz := sb.db.GetTimeZone(IDs[0])
if tz == nil {
return "```That user has not specified what their timezone is.```", false, nil
}
return "```That user's local time is: " + time.Now().In(tz).Format("Jan 2, 3:04pm```"), false, nil
}
func (c *TimeCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Gets the local time for the specified user, or simply gets the local time for this server.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "user", Desc: "A ping of the user, or simply their name.", Optional: true},
},
}
}
func (c *TimeCommand) UsageShort() string { return "Gets a user's local time." }
type SetTimeZoneCommand struct {
}
func (c *SetTimeZoneCommand) Name() string {
return "settimezone"
}
func (c *SetTimeZoneCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 1 {
return "```You have to specify what your timezone is!```", false, nil
}
tz := []string{}
if len(args) < 2 {
tz = sb.db.FindTimeZone("%" + args[0] + "%")
} else {
offset, err := strconv.Atoi(args[1])
if err != nil {
return "```Could not parse offset. Note that timezones do not have spaces - use underscores (_) instead. The second argument should be your time difference from GMT in hours. For example, PDT is GMT-7, so you could search for \"America -7\".```", false, nil
}
tz = sb.db.FindTimeZoneOffset("%"+args[0]+"%", offset*60)
}
if len(tz) < 1 {
if len(args) < 2 {
return "```Could not find any timezone locations that match that string. Try broadening your search (for example, search for 'America' or 'Pacific').```", false, nil
} else {
return "```Could not find any timezone locations that match that string and offset combination. Try broadening your search, or leaving out the timezone offset parameter.```", false, nil
}
}
if len(tz) > 1 {
return "Could be any of the following timezones:\n" + strings.Join(tz, "\n"), len(tz) > 6, nil
}
loc, err := time.LoadLocation(tz[0])
if err != nil {
return "```Could not load location! Is the timezone data missing or corrupt? Error: " + err.Error() + "```", false, nil
}
if sb.db.SetTimeZone(SBatoi(msg.Author.ID), loc) != nil {
return "```Error: could not set timezone!```", false, nil
}
return "```Set your timezone to " + loc.String() + "```", false, nil
}
func (c *SetTimeZoneCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Sets your timezone to the given location. Providing a partial timezone name, like \"America\", will return a list of all possible timezones that contain that string.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "timezone", Desc: "A timezone location, such as `America/Los_Angeles`. Note that timezones do not have spaces.", Optional: true},
CommandUsageParam{Name: "offset", Desc: "Your expected timezone offset in hours, used to narrow the search. For example, if you know you're in the PDT timezone, which is GMT-7, you could search for `America -7` to list all timezones in america with a standard or DST timezone offset of -7.", Optional: true},
},
}
}
func (c *SetTimeZoneCommand) UsageShort() string { return "Set your local timezone." }
type UserInfoCommand struct {
}
func (c *UserInfoCommand) Name() string {
return "UserInfo"
}
func (c *UserInfoCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 1 {
return "```You must provide a user to search for.```", false, nil
}
arg := msg.Content[indices[0]:]
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5, nil
}
aliases := sb.db.GetAliases(IDs[0])
dbuser, lastseen, tz, _ := sb.db.GetUser(IDs[0])
localtime := ""
if tz == nil {
tz = time.FixedZone("[Not Set]", 0)
} else {
localtime = time.Now().In(tz).Format(time.RFC1123)
}
m, err := sb.dg.GuildMember(info.Guild.ID, SBitoa(IDs[0]))
if err != nil {
m = &discordgo.Member{Roles: []string{}}
u, err := sb.dg.User(SBitoa(IDs[0]))
if err != nil {
if dbuser == nil {
return "```Error retrieving user information: " + err.Error() + "```", false, nil
}
u = dbuser
}
m.User = u
}
authortz := getTimezone(info, msg.Author)
joinedat, err := time.Parse(time.RFC3339Nano, m.JoinedAt)
joined := ""
if err == nil {
joined = joinedat.In(authortz).Format(time.RFC1123)
}
guildroles, err := sb.dg.GuildRoles(info.Guild.ID)
if err != nil {
guildroles = info.Guild.Roles
}
roles := make([]string, 0, len(m.Roles))
for _, v := range m.Roles {
if err == nil {
for _, role := range guildroles {
if role.ID == v {
roles = append(roles, role.Name)
break
}
}
} else {
roles = append(roles, "<@&"+v+">")
}
}
return ExtraSanitize(fmt.Sprintf("**ID:** %v\n**Username:** %v#%v\n**Nickname:** %v\n**Timezone:** %v\n**Local Time:** %v\n**Joined:** %v\n**Roles:** %v\n**Bot:** %v\n**Last Seen:** %v\n**Aliases:** %v\n**Avatar:** ", m.User.ID, m.User.Username, m.User.Discriminator, m.Nick, tz, localtime, joined, strings.Join(roles, ", "), m.User.Bot, lastseen.In(authortz).Format(time.RFC1123), strings.Join(aliases, ", "))) + discordgo.EndpointUserAvatar(m.User.ID, m.User.Avatar), false, nil
}
func (c *UserInfoCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Lists the ID, username, nickname, timezone, roles, avatar, join date, and other information about a given user.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "user", Desc: "A ping of the user, or simply their name.", Optional: false},
},
}
}
func (c *UserInfoCommand) UsageShort() string { return "Lists information about a user." }
type DefaultServerCommand struct {
}
func (c *DefaultServerCommand) Name() string {
return "DefaultServer"
}
func (c *DefaultServerCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
gIDs := sb.db.GetUserGuilds(SBatoi(msg.Author.ID))
find := ""
if len(args) > 0 {
find = msg.Content[indices[0]:]
}
guilds := findServers(find, gIDs)
names := make([]string, len(guilds), len(guilds))
for k, v := range guilds {
names[k] = v.Guild.Name
}
if len(args) < 1 {
server := getDefaultServer(SBatoi(msg.Author.ID))
if server != nil {
return fmt.Sprintf("```Your default server is %s. You are on the following servers:\n%s```", server.Guild.Name, strings.Join(names, "\n")), false, nil
}
return fmt.Sprintf("```You have no default server. You are on the following servers:\n%s```", strings.Join(names, "\n")), false, nil
}
if len(guilds) > 1 {
return "```Could be any of the following servers:\n" + strings.Join(names, "\n") + "```", false, nil
}
if len(guilds) < 1 {
return "```No server matches that string (or you haven't joined that server).```", false, nil
}
target := SBatoi(guilds[0].Guild.ID)
_, err := sb.dg.GuildMember(guilds[0].Guild.ID, msg.Author.ID) // Attempt to verify the user is actually in this guild.
if err != nil {
return fmt.Sprintf("```You aren't a member of %s (or discord blew up, in which case, try again).```", guilds[0].Guild.Name), false, nil
}
sb.db.SetDefaultServer(SBatoi(msg.Author.ID), target)
return fmt.Sprintf("```Your default server was set to %s```", guilds[0].Guild.Name), false, nil
}
func (c *DefaultServerCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Sets the default server SB will run commands on that you PM to her.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "server", Desc: "The exact name of your default server.", Optional: false},
},
}
}
func (c *DefaultServerCommand) UsageShort() string { return "Sets your default server." }
type SilenceCommand struct {
}
func (c *SilenceCommand) Name() string {
return "Silence"
}
func (c *SilenceCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 1 {
return "```You must provide a user to silence.```", false, nil
}
index := len(args)
for i := 1; i < len(args); i++ {
if strings.ToLower(args[i]) == "for:" {
index = i
break
}
}
arg := strings.Join(args[0:index], " ")
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5, nil
}
gID := SBatoi(info.Guild.ID)
uID := SBitoa(IDs[0])
reason, e := ProcessDurationAndReason(args[index:], msg, indices[index:], 8, uID, gID)
if len(e) > 0 {
return e, false, nil
}
if SilenceMember(SBitoa(IDs[0]), info) < 0 {
return "```Error occured trying to silence " + IDsToUsernames(IDs, info)[0] + ".```", false, nil
}
if len(info.config.Spam.SilenceMessage) > 0 {
sb.dg.ChannelMessageSend(SBitoa(info.config.Users.WelcomeChannel), "<@"+SBitoa(IDs[0])+"> "+info.config.Spam.SilenceMessage)
}
if len(reason) > 0 {
reason = " because " + reason
}
return fmt.Sprintf("```Silenced %s%s.```", IDsToUsernames(IDs, info)[0], reason), false, nil
}
func (c *SilenceCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Silences the given user.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "user", Desc: "A ping of the user, or simply their name.", Optional: false},
CommandUsageParam{Name: "for: duration", Desc: "If the keyword `for:` is used after the username, looks for a duration of the form `for: 50 MINUTES` and creates an unsilence event that will be fired after that much time has passed from now.", Optional: true},
},
}
}
func (c *SilenceCommand) UsageShort() string { return "Silences a user." }
func UnsilenceMember(user uint64, info *GuildInfo) (int8, error) {
srole := SBitoa(info.config.Spam.SilentRole)
userID := SBitoa(user)
m, err := sb.dg.GuildMember(info.Guild.ID, userID)
if err != nil {
return -1, err
}
for i := 0; i < len(m.Roles); i++ {
if m.Roles[i] == srole {
m.Roles = append(m.Roles[:i], m.Roles[i+1:]...)
sb.dg.GuildMemberEdit(info.Guild.ID, userID, m.Roles)
return 0, nil
}
}
return 1, nil
}
type UnsilenceCommand struct {
}
func (c *UnsilenceCommand) Name() string {
return "Unsilence"
}
func (c *UnsilenceCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 1 {
return "```You must provide a user to unsilence.```", false, nil
}
arg := msg.Content[indices[0]:]
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false, nil
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5, nil
}
e, err := UnsilenceMember(IDs[0], info)
if e == -1 {
return "```Could not get member: " + err.Error() + "```", false, nil
} else if e == 1 {
return "```" + IDsToUsernames(IDs, info)[0] + " wasn't silenced in the first place!```", false, nil
}
return "```Unsilenced " + IDsToUsernames(IDs, info)[0] + ".```", false, nil
}
func (c *UnsilenceCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Unsilences the given user.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "user", Desc: "A ping of the user, or simply their name.", Optional: false},
},
}
}
func (c *UnsilenceCommand) UsageShort() string { return "Unsilences a user." } | sweetiebot/users_command.go | 0.57332 | 0.46873 | users_command.go | starcoder |
package samples
import (
"fmt"
sll "github.com/emirpasic/gods/lists/singlylinkedlist"
)
// Samples is a fixed size array scaled to the length of the simulation.
// The graph scans and renders the array.
// The sim populates the array based on a moving index.
// Samples is a 2D list of samples for synapses
type Samples struct {
// List of SamplesLane(s), for all passes.
// This data is used for rendering by the graphs.
// Each lane is a fixed size array.
lanes *sll.List
// scanIdx int
synCnt int
size int // typically the length of simulation
// deprecated
// mutex *sync.Mutex
}
// Lanes are trains of spikes for a given synapse.
type SamplesLane struct {
Id int
Samples []*Spike
}
func NewSamples(synCnt, size int) *Samples {
s := new(Samples)
s.lanes = sll.New()
s.synCnt = synCnt
s.size = size
// Pre expand collection
for i := 0; i < synCnt; i++ {
l := new(SamplesLane)
l.Id = i
l.Samples = make([]*Spike, size)
for s := 0; s < size; s++ {
l.Samples[s] = NewSpike()
}
s.lanes.Add(l)
}
// s.mutex = &sync.Mutex{}
return s
}
func (s *Samples) GetLanes() *sll.List {
return s.lanes
}
func (s *Samples) Size() int {
return s.size
}
func (s *Samples) Put(time float64, value byte, sid, key int) {
// sid is usually synId
_, lif := s.lanes.Find(func(id int, v interface{}) bool {
return v.(*SamplesLane).Id == sid
})
l := lif.(*SamplesLane)
sp := l.Samples[int(time)]
sp.Time = time
sp.Value = value
sp.Id = sid
sp.Key = key
// s.scanIdx = (s.scanIdx + 1) % s.size
}
func (s *Samples) Print() {
it := s.lanes.Iterator()
for it.Next() {
lane := it.Value().(*SamplesLane)
fmt.Printf("(%d) %v\n", lane.Id, lane.Samples)
}
}
// ---------------------------------------------------------
// Data samples
// ---------------------------------------------------------
var PoiSamples *DatSamples
var StimSamples *DatSamples
type DatSamples struct {
// List of SamplesLane(s), for all passes.
// This data is used for rendering by the graphs.
// Each lane is a fixed size array.
lanes *sll.List
synCnt int
size int // typically the length of simulation
}
func NewDatSamples(synCnt, size int) *DatSamples {
s := new(DatSamples)
s.lanes = sll.New()
s.synCnt = synCnt
s.size = size
// Pre expand collection
for i := 0; i < synCnt; i++ {
l := new(SamplesLane)
l.Id = i
l.Samples = make([]*Spike, size)
for s := 0; s < size; s++ {
l.Samples[s] = NewSpike()
}
s.lanes.Add(l)
}
return s
}
func (s *DatSamples) GetLanes() *sll.List {
return s.lanes
}
func (s *DatSamples) Size() int {
return s.size
}
func (s *DatSamples) Put(time float64, value byte, sid, key int) {
// sid is usually synId
_, lif := s.lanes.Find(func(id int, v interface{}) bool {
return v.(*SamplesLane).Id == sid
})
l := lif.(*SamplesLane)
sp := l.Samples[int(time)]
sp.Time = time
sp.Value = value
sp.Id = sid
sp.Key = key
}
// ---------------------------------------------------------
// Neuron samples
// ---------------------------------------------------------
var CellSamples *NeuronSamples
type NeuronSamples struct {
Samples []*Spike
}
func NewNeuronSamples(size int) *NeuronSamples {
ns := new(NeuronSamples)
// Pre expand collection
ns.Samples = make([]*Spike, size)
for s := 0; s < size; s++ {
ns.Samples[s] = NewSpike()
}
return ns
}
func (ns *NeuronSamples) Put(time float64, value byte, sid, key int) {
sp := ns.Samples[int(time)]
sp.Time = time
sp.Value = value
sp.Id = sid
sp.Key = key
}
// func (s *Samples) Use() *sll.List {
// s.mutex.Lock()
// return s.lanes
// }
// func (s *Samples) Release() {
// s.mutex.Unlock()
// } | simulation/samples/samples.go | 0.669637 | 0.437343 | samples.go | starcoder |
package rect
import (
"errors"
"image"
"image/color"
"gitlab.com/256/Underbot/cv/object"
)
// CenterColor gets the color of the pixel in the middle of an image
func CenterColor(tmpImg image.Image) (color.Color, error) {
// Gets the underlying type of RGBA which supports At()
img, ok := tmpImg.(*image.RGBA)
if !ok {
return nil, errors.New("the underlying image is not of type image.RGBA")
}
centerPoint := RectangleCenter(img.Rect)
return img.At(centerPoint.X, centerPoint.Y), nil
}
// RectangleCenter finds the point in the middle of a rectangle
func RectangleCenter(rect image.Rectangle) (point image.Point) {
point.X = (rect.Min.X + rect.Max.X) / 2
point.Y = (rect.Min.Y + rect.Max.Y) / 2
return point
}
// DrawObject is a wrapper for the DrawRectangle function for objects
func DrawObject(img *image.RGBA, color color.Color, obj object.Object) {
DrawRectangle(img, color, obj.Bounds)
}
// DrawRectangle draws a rectangle using the specified color onto an image
func DrawRectangle(img *image.RGBA, color color.Color, rect image.Rectangle) {
Rect(img, color, rect.Min.X, rect.Min.Y, rect.Max.X, rect.Max.Y)
}
// GetRectangle creates a rectangle around a cluster of points
func GetRectangle(points []image.Point) image.Rectangle {
topLeftX := 0
topLeftY := 0
bottomRightX := 0
bottomRightY := 0
for i, point := range points {
if i == 0 {
bottomRightX = point.X
bottomRightY = point.Y
topLeftX = point.X
topLeftY = point.Y
}
if point.X > bottomRightX {
bottomRightX = point.X
}
if point.X < topLeftX {
topLeftX = point.X
}
if point.Y < topLeftY {
topLeftY = point.Y
}
if point.Y > bottomRightY {
bottomRightY = point.Y
}
}
return image.Rect(topLeftX, topLeftY, bottomRightX, bottomRightY)
}
// HLine draws a horizontal line
func HLine(img *image.RGBA, col color.Color, x1, y, x2 int) {
for ; x1 <= x2; x1++ {
img.Set(x1, y, col)
}
}
// VLine draws a vertical line
func VLine(img *image.RGBA, col color.Color, x, y1, y2 int) {
for ; y1 <= y2; y1++ {
img.Set(x, y1, col)
}
}
// Rect draws a rectangle utilizing HLine() and VLine()
func Rect(img *image.RGBA, col color.Color, x1, y1, x2, y2 int) {
HLine(img, col, x1, y1, x2)
HLine(img, col, x1, y2, x2)
VLine(img, col, x1, y1, y2)
VLine(img, col, x2, y1, y2)
}
// AverageSize gets the average of the height and width of the rectangle
func AverageSize(rect image.Rectangle) int {
return (rect.Dx() + rect.Dy()) / 2
} | cv/rect/rect.go | 0.78316 | 0.577287 | rect.go | starcoder |
package pinapi
import (
"encoding/json"
)
// GetBetsByTypeResponseV3 struct for GetBetsByTypeResponseV3
type GetBetsByTypeResponseV3 struct {
// Whether there are more pages available.
MoreAvailable *bool `json:"moreAvailable,omitempty"`
// Page size. Default is 1000.
PageSize *int `json:"pageSize,omitempty"`
// Starting record number of the result set. Records start at zero
FromRecord *int `json:"fromRecord,omitempty"`
// Ending record number of the result set.
ToRecord *int `json:"toRecord,omitempty"`
// A collection of placed straight bets.
StraightBets *[]StraightBetV3 `json:"straightBets,omitempty"`
// A collection of placed parlay bets.
ParlayBets *[]ParlayBet `json:"parlayBets,omitempty"`
// A collection of placed teaser bets.
TeaserBets *[]TeaserBet `json:"teaserBets,omitempty"`
// A collection of placed special bets.
SpecialBets *[]SpecialBet `json:"specialBets,omitempty"`
// A collection of placed manual bets.
ManualBets *[]ManualBet `json:"manualBets,omitempty"`
}
// NewGetBetsByTypeResponseV3 instantiates a new GetBetsByTypeResponseV3 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewGetBetsByTypeResponseV3() *GetBetsByTypeResponseV3 {
this := GetBetsByTypeResponseV3{}
return &this
}
// NewGetBetsByTypeResponseV3WithDefaults instantiates a new GetBetsByTypeResponseV3 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewGetBetsByTypeResponseV3WithDefaults() *GetBetsByTypeResponseV3 {
this := GetBetsByTypeResponseV3{}
return &this
}
// GetMoreAvailable returns the MoreAvailable field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetMoreAvailable() bool {
if o == nil || o.MoreAvailable == nil {
var ret bool
return ret
}
return *o.MoreAvailable
}
// GetMoreAvailableOk returns a tuple with the MoreAvailable field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetMoreAvailableOk() (*bool, bool) {
if o == nil || o.MoreAvailable == nil {
return nil, false
}
return o.MoreAvailable, true
}
// HasMoreAvailable returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasMoreAvailable() bool {
if o != nil && o.MoreAvailable != nil {
return true
}
return false
}
// SetMoreAvailable gets a reference to the given bool and assigns it to the MoreAvailable field.
func (o *GetBetsByTypeResponseV3) SetMoreAvailable(v bool) {
o.MoreAvailable = &v
}
// GetPageSize returns the PageSize field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetPageSize() int {
if o == nil || o.PageSize == nil {
var ret int
return ret
}
return *o.PageSize
}
// GetPageSizeOk returns a tuple with the PageSize field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetPageSizeOk() (*int, bool) {
if o == nil || o.PageSize == nil {
return nil, false
}
return o.PageSize, true
}
// HasPageSize returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasPageSize() bool {
if o != nil && o.PageSize != nil {
return true
}
return false
}
// SetPageSize gets a reference to the given int and assigns it to the PageSize field.
func (o *GetBetsByTypeResponseV3) SetPageSize(v int) {
o.PageSize = &v
}
// GetFromRecord returns the FromRecord field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetFromRecord() int {
if o == nil || o.FromRecord == nil {
var ret int
return ret
}
return *o.FromRecord
}
// GetFromRecordOk returns a tuple with the FromRecord field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetFromRecordOk() (*int, bool) {
if o == nil || o.FromRecord == nil {
return nil, false
}
return o.FromRecord, true
}
// HasFromRecord returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasFromRecord() bool {
if o != nil && o.FromRecord != nil {
return true
}
return false
}
// SetFromRecord gets a reference to the given int and assigns it to the FromRecord field.
func (o *GetBetsByTypeResponseV3) SetFromRecord(v int) {
o.FromRecord = &v
}
// GetToRecord returns the ToRecord field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetToRecord() int {
if o == nil || o.ToRecord == nil {
var ret int
return ret
}
return *o.ToRecord
}
// GetToRecordOk returns a tuple with the ToRecord field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetToRecordOk() (*int, bool) {
if o == nil || o.ToRecord == nil {
return nil, false
}
return o.ToRecord, true
}
// HasToRecord returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasToRecord() bool {
if o != nil && o.ToRecord != nil {
return true
}
return false
}
// SetToRecord gets a reference to the given int and assigns it to the ToRecord field.
func (o *GetBetsByTypeResponseV3) SetToRecord(v int) {
o.ToRecord = &v
}
// GetStraightBets returns the StraightBets field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetStraightBets() []StraightBetV3 {
if o == nil || o.StraightBets == nil {
var ret []StraightBetV3
return ret
}
return *o.StraightBets
}
// GetStraightBetsOk returns a tuple with the StraightBets field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetStraightBetsOk() (*[]StraightBetV3, bool) {
if o == nil || o.StraightBets == nil {
return nil, false
}
return o.StraightBets, true
}
// HasStraightBets returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasStraightBets() bool {
if o != nil && o.StraightBets != nil {
return true
}
return false
}
// SetStraightBets gets a reference to the given []StraightBetV3 and assigns it to the StraightBets field.
func (o *GetBetsByTypeResponseV3) SetStraightBets(v []StraightBetV3) {
o.StraightBets = &v
}
// GetParlayBets returns the ParlayBets field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetParlayBets() []ParlayBet {
if o == nil || o.ParlayBets == nil {
var ret []ParlayBet
return ret
}
return *o.ParlayBets
}
// GetParlayBetsOk returns a tuple with the ParlayBets field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetParlayBetsOk() (*[]ParlayBet, bool) {
if o == nil || o.ParlayBets == nil {
return nil, false
}
return o.ParlayBets, true
}
// HasParlayBets returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasParlayBets() bool {
if o != nil && o.ParlayBets != nil {
return true
}
return false
}
// SetParlayBets gets a reference to the given []ParlayBet and assigns it to the ParlayBets field.
func (o *GetBetsByTypeResponseV3) SetParlayBets(v []ParlayBet) {
o.ParlayBets = &v
}
// GetTeaserBets returns the TeaserBets field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetTeaserBets() []TeaserBet {
if o == nil || o.TeaserBets == nil {
var ret []TeaserBet
return ret
}
return *o.TeaserBets
}
// GetTeaserBetsOk returns a tuple with the TeaserBets field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetTeaserBetsOk() (*[]TeaserBet, bool) {
if o == nil || o.TeaserBets == nil {
return nil, false
}
return o.TeaserBets, true
}
// HasTeaserBets returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasTeaserBets() bool {
if o != nil && o.TeaserBets != nil {
return true
}
return false
}
// SetTeaserBets gets a reference to the given []TeaserBet and assigns it to the TeaserBets field.
func (o *GetBetsByTypeResponseV3) SetTeaserBets(v []TeaserBet) {
o.TeaserBets = &v
}
// GetSpecialBets returns the SpecialBets field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetSpecialBets() []SpecialBet {
if o == nil || o.SpecialBets == nil {
var ret []SpecialBet
return ret
}
return *o.SpecialBets
}
// GetSpecialBetsOk returns a tuple with the SpecialBets field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetSpecialBetsOk() (*[]SpecialBet, bool) {
if o == nil || o.SpecialBets == nil {
return nil, false
}
return o.SpecialBets, true
}
// HasSpecialBets returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasSpecialBets() bool {
if o != nil && o.SpecialBets != nil {
return true
}
return false
}
// SetSpecialBets gets a reference to the given []SpecialBet and assigns it to the SpecialBets field.
func (o *GetBetsByTypeResponseV3) SetSpecialBets(v []SpecialBet) {
o.SpecialBets = &v
}
// GetManualBets returns the ManualBets field value if set, zero value otherwise.
func (o *GetBetsByTypeResponseV3) GetManualBets() []ManualBet {
if o == nil || o.ManualBets == nil {
var ret []ManualBet
return ret
}
return *o.ManualBets
}
// GetManualBetsOk returns a tuple with the ManualBets field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *GetBetsByTypeResponseV3) GetManualBetsOk() (*[]ManualBet, bool) {
if o == nil || o.ManualBets == nil {
return nil, false
}
return o.ManualBets, true
}
// HasManualBets returns a boolean if a field has been set.
func (o *GetBetsByTypeResponseV3) HasManualBets() bool {
if o != nil && o.ManualBets != nil {
return true
}
return false
}
// SetManualBets gets a reference to the given []ManualBet and assigns it to the ManualBets field.
func (o *GetBetsByTypeResponseV3) SetManualBets(v []ManualBet) {
o.ManualBets = &v
}
func (o GetBetsByTypeResponseV3) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.MoreAvailable != nil {
toSerialize["moreAvailable"] = o.MoreAvailable
}
if o.PageSize != nil {
toSerialize["pageSize"] = o.PageSize
}
if o.FromRecord != nil {
toSerialize["fromRecord"] = o.FromRecord
}
if o.ToRecord != nil {
toSerialize["toRecord"] = o.ToRecord
}
if o.StraightBets != nil {
toSerialize["straightBets"] = o.StraightBets
}
if o.ParlayBets != nil {
toSerialize["parlayBets"] = o.ParlayBets
}
if o.TeaserBets != nil {
toSerialize["teaserBets"] = o.TeaserBets
}
if o.SpecialBets != nil {
toSerialize["specialBets"] = o.SpecialBets
}
if o.ManualBets != nil {
toSerialize["manualBets"] = o.ManualBets
}
return json.Marshal(toSerialize)
}
type NullableGetBetsByTypeResponseV3 struct {
value *GetBetsByTypeResponseV3
isSet bool
}
func (v NullableGetBetsByTypeResponseV3) Get() *GetBetsByTypeResponseV3 {
return v.value
}
func (v *NullableGetBetsByTypeResponseV3) Set(val *GetBetsByTypeResponseV3) {
v.value = val
v.isSet = true
}
func (v NullableGetBetsByTypeResponseV3) IsSet() bool {
return v.isSet
}
func (v *NullableGetBetsByTypeResponseV3) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableGetBetsByTypeResponseV3(val *GetBetsByTypeResponseV3) *NullableGetBetsByTypeResponseV3 {
return &NullableGetBetsByTypeResponseV3{value: val, isSet: true}
}
func (v NullableGetBetsByTypeResponseV3) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableGetBetsByTypeResponseV3) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | pinapi/model_get_bets_by_type_response_v3.go | 0.749546 | 0.409103 | model_get_bets_by_type_response_v3.go | starcoder |
package index
import (
"bytes"
"errors"
"github.com/asdine/genji/engine"
)
const (
separator byte = 0x1E
)
var (
// ErrDuplicate is returned when a value is already associated with a key
ErrDuplicate = errors.New("duplicate")
)
// An Index associates encoded values with keys.
// It is sorted by value following the lexicographic order.
type Index interface {
// Set associates a value with a key.
Set(value []byte, key []byte) error
// Delete all the references to the key from the index.
Delete(value []byte, key []byte) error
// AscendGreaterOrEqual seeks for the pivot and then goes through all the subsequent key value pairs in increasing order and calls the given function for each pair.
// If the given function returns an error, the iteration stops and returns that error.
// If the pivot is nil, starts from the beginning.
AscendGreaterOrEqual(pivot []byte, fn func(value []byte, key []byte) error) error
// DescendLessOrEqual seeks for the pivot and then goes through all the subsequent key value pairs in descreasing order and calls the given function for each pair.
// If the given function returns an error, the iteration stops and returns that error.
// If the pivot is nil, starts from the end.
DescendLessOrEqual(pivot []byte, fn func(value, key []byte) error) error
}
// Options of the index.
type Options struct {
// If set to true, values will be associated with at most one key. False by default.
Unique bool
}
// New creates an index with the given store and options.
func New(store engine.Store, opts Options) Index {
if opts.Unique {
return &uniqueIndex{
store: store,
}
}
return &listIndex{
store: store,
}
}
// listIndex is an implementation that associates a value with a list of keys.
type listIndex struct {
store engine.Store
}
// Set associates a value with a key. It is possible to associate multiple keys for the same value
// but a key can be associated to only one value.
func (i *listIndex) Set(value, key []byte) error {
if len(value) == 0 {
return errors.New("value cannot be nil")
}
buf := make([]byte, 0, len(value)+len(key)+1)
buf = append(buf, value...)
buf = append(buf, separator)
buf = append(buf, key...)
return i.store.Put(buf, nil)
}
func (i *listIndex) Delete(value, key []byte) error {
buf := make([]byte, 0, len(value)+len(key)+1)
buf = append(buf, value...)
buf = append(buf, separator)
buf = append(buf, key...)
return i.store.Delete(buf)
}
func (i *listIndex) AscendGreaterOrEqual(pivot []byte, fn func(value []byte, key []byte) error) error {
return i.store.AscendGreaterOrEqual(pivot, func(k, v []byte) error {
idx := bytes.LastIndexByte(k, separator)
return fn(k[:idx], k[idx+1:])
})
}
func (i *listIndex) DescendLessOrEqual(pivot []byte, fn func(k, v []byte) error) error {
if len(pivot) > 0 {
// ensure the pivot is bigger than the requested value so it doesn't get skipped.
pivot = append(pivot, separator, 0xFF)
}
return i.store.DescendLessOrEqual(pivot, func(k, v []byte) error {
idx := bytes.LastIndexByte(k, separator)
return fn(k[:idx], k[idx+1:])
})
}
// uniqueIndex is an implementation that associates a value with a exactly one key.
type uniqueIndex struct {
store engine.Store
}
// Set associates a value with exactly one key.
// If the association already exists, it returns an error.
func (i *uniqueIndex) Set(value []byte, key []byte) error {
if len(value) == 0 {
return errors.New("value cannot be nil")
}
_, err := i.store.Get(value)
if err == nil {
return ErrDuplicate
}
if err != engine.ErrKeyNotFound {
return err
}
return i.store.Put(value, key)
}
func (i *uniqueIndex) Delete(value, key []byte) error {
return i.store.Delete(value)
}
func (i *uniqueIndex) AscendGreaterOrEqual(pivot []byte, fn func(value []byte, key []byte) error) error {
return i.store.AscendGreaterOrEqual(pivot, fn)
}
func (i *uniqueIndex) DescendLessOrEqual(pivot []byte, fn func(k, v []byte) error) error {
return i.store.DescendLessOrEqual(pivot, fn)
} | index/index.go | 0.710126 | 0.46308 | index.go | starcoder |
package runtime
import "strings"
// NumberType represents a type of number
type NumberType uint16
const (
// IsFloat is the type of Floats
IsFloat NumberType = 1 << iota
// IsInt is the type of Ints
IsInt
// NaN is the type of values which are not numbers
NaN
// NaI is a type for values which a not Ints
NaI
)
// ToNumber returns x as a Float or Int, and the type (IsFloat, IsInt or NaN).
func ToNumber(v Value) (int64, float64, NumberType) {
switch v.iface.(type) {
case int64:
return v.AsInt(), 0, IsInt
case float64:
return 0, v.AsFloat(), IsFloat
case string:
s := v.AsString()
return stringToNumber(strings.Trim(s, " "))
}
return 0, 0, NaN
}
// ToNumberValue returns x as a Float or Int, and if it is a number.
func ToNumberValue(v Value) (Value, NumberType) {
switch v.NumberType() {
case IntType:
return v, IsInt
case FloatType:
return v, IsFloat
}
if s, ok := v.TryString(); ok {
n, f, tp := stringToNumber(strings.Trim(s, " "))
switch tp {
case IsInt:
return IntValue(n), IsInt
case IsFloat:
return FloatValue(f), IsFloat
}
}
return NilValue, NaN
}
// ToInt returns v as an Int and true if v is actually a valid integer.
func ToInt(v Value) (int64, bool) {
if n, ok := v.TryInt(); ok {
return n, true
}
if f, ok := v.TryFloat(); ok {
n, tp := FloatToInt(f)
return n, tp == IsInt
}
if s, ok := v.TryString(); ok {
n, tp := stringToInt(s)
return n, tp == IsInt
}
return 0, false
}
// ToIntNoString returns v as an Int and true if v is actually a valid integer.
func ToIntNoString(v Value) (int64, bool) {
switch v.iface.(type) {
case int64:
return v.AsInt(), true
case float64:
n, tp := FloatToInt(v.AsFloat())
return n, tp == IsInt
}
return 0, false
}
// ToFloat returns v as a FLoat and true if v is a valid float.
func ToFloat(v Value) (float64, bool) {
if n, ok := v.TryInt(); ok {
return float64(n), true
}
if f, ok := v.TryFloat(); ok {
return f, true
}
if s, ok := v.TryString(); ok {
n, f, tp := stringToNumber(s)
switch tp {
case IsInt:
return float64(n), true
case IsFloat:
return f, true
}
}
return 0, false
} | runtime/numconv.go | 0.748444 | 0.445107 | numconv.go | starcoder |
package colour
import (
"image/color"
"math"
)
type Colour struct {
R float64 `json:"r"`
G float64 `json:"g"`
B float64 `json:"b"`
}
func NewColourFromRGB(rgb uint32) Colour {
r := (rgb & 0xff0000) >> 16
g := (rgb & 0x00ff00) >> 8
b := (rgb & 0x0000ff)
return Colour{
R: float64(r) / 0xff,
G: float64(g) / 0xff,
B: float64(b) / 0xff,
}
}
func NewColourFromHSL(hue, saturation, lightness float64) Colour {
if hue < 0 || hue > 360 {
panic("hue must be from 0 to 360")
}
if saturation < 0 || saturation > 1 {
panic("saturation must be between 0 and 1")
}
if lightness < 0 || lightness > 1 {
panic("lightness must be between 0 and 1")
}
c := (1 - math.Abs(2*lightness-1)) * saturation // chroma
hueAdj := hue / 60
for hueAdj > 2 {
hueAdj -= 2
}
x := c * (1 - math.Abs(hueAdj-1))
var r, g, b float64
switch {
case hueAdj <= 1:
r, g, b = c, x, 0
case hueAdj <= 2:
r, g, b = x, c, 0
case hueAdj <= 3:
r, g, b = 0, c, x
case hueAdj <= 4:
r, g, b = 0, x, c
case hueAdj <= 5:
r, g, b = x, 0, c
case hueAdj <= 6:
r, g, b = c, 0, x
default:
panic(false)
}
m := lightness - 0.5*c
r += m
g += m
b += m
if r < 0 || r > 1.0 {
panic(false)
}
if g < 0 || g > 1.0 {
panic(false)
}
if b < 0 || b > 1.0 {
panic(false)
}
return Colour{r, g, b}
}
func (c Colour) Add(rhs Colour) Colour {
return Colour{
c.R + rhs.R,
c.G + rhs.G,
c.B + rhs.B,
}
}
func (c Colour) Scale(f float64) Colour {
return Colour{
c.R * f,
c.G * f,
c.B * f,
}
}
func (c Colour) Pow(exp float64) Colour {
return Colour{
math.Pow(c.R, exp),
math.Pow(c.G, exp),
math.Pow(c.B, exp),
}
}
func (c Colour) Mul(r Colour) Colour {
return Colour{
c.R * r.R,
c.G * r.G,
c.B * r.B,
}
}
func (c Colour) Div(r Colour) Colour {
return Colour{
c.R / r.R,
c.G / r.G,
c.B / r.B,
}
}
func (c Colour) ToNRGBA() color.NRGBA {
return color.NRGBA{
R: float64ToUint8(c.R),
G: float64ToUint8(c.G),
B: float64ToUint8(c.B),
A: 0xff,
}
}
func float64ToUint8(f float64) uint8 {
switch {
case f >= 1.0:
return 0xff
case f < 0.0:
return 0x00
default:
// Since f >= 0.0 and f < 1.0, this returns a value beween 0x00 and
// 0xff (inclusive).
return uint8(f * 0x100)
}
} | colour/colour.go | 0.81604 | 0.463748 | colour.go | starcoder |
// Taken from src/crypto/rsa/rsa.go
package server
import (
"crypto/rand"
"crypto/rsa"
"io"
"math/big"
)
var bigZero = big.NewInt(0)
var bigOne = big.NewInt(1)
// modInverse returns ia, the inverse of a in the multiplicative group of prime
// order n. It requires that a be a member of the group (i.e. less than n).
func modInverse(a, n *big.Int) (ia *big.Int, ok bool) {
g := new(big.Int)
x := new(big.Int)
y := new(big.Int)
g.GCD(x, y, a, n)
if g.Cmp(bigOne) != 0 {
// In this case, a and n aren't coprime and we cannot calculate
// the inverse. This happens because the values of n are nearly
// prime (being the product of two primes) rather than truly
// prime.
return
}
if x.Cmp(bigOne) < 0 {
// 0 is not the multiplicative inverse of any element so, if x
// < 1, then x is negative.
x.Add(x, n)
}
return x, true
}
// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
// random source is given, RSA blinding is used.
func rsaDecrypt(random io.Reader, priv *rsa.PrivateKey, c *big.Int) (m *big.Int, err error) {
// TODO(agl): can we get away with reusing blinds?
if c.Cmp(priv.N) > 0 {
err = rsa.ErrDecryption
return
}
if priv.N.Sign() == 0 {
return nil, rsa.ErrDecryption
}
var ir *big.Int
if random != nil {
// Blinding enabled. Blinding involves multiplying c by r^e.
// Then the decryption operation performs (m^e * r^e)^d mod n
// which equals mr mod n. The factor of r can then be removed
// by multiplying by the multiplicative inverse of r.
var r *big.Int
for {
r, err = rand.Int(random, priv.N)
if err != nil {
return
}
if r.Cmp(bigZero) == 0 {
r = bigOne
}
var ok bool
ir, ok = modInverse(r, priv.N)
if ok {
break
}
}
bigE := big.NewInt(int64(priv.E))
rpowe := new(big.Int).Exp(r, bigE, priv.N) // N != 0
cCopy := new(big.Int).Set(c)
cCopy.Mul(cCopy, rpowe)
cCopy.Mod(cCopy, priv.N)
c = cCopy
}
if priv.Precomputed.Dp == nil {
m = new(big.Int).Exp(c, priv.D, priv.N)
} else {
// We have the precalculated values needed for the CRT.
m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0])
m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1])
m.Sub(m, m2)
if m.Sign() < 0 {
m.Add(m, priv.Primes[0])
}
m.Mul(m, priv.Precomputed.Qinv)
m.Mod(m, priv.Primes[0])
m.Mul(m, priv.Primes[1])
m.Add(m, m2)
for i, values := range priv.Precomputed.CRTValues {
prime := priv.Primes[2+i]
m2.Exp(c, values.Exp, prime)
m2.Sub(m2, m)
m2.Mul(m2, values.Coeff)
m2.Mod(m2, prime)
if m2.Sign() < 0 {
m2.Add(m2, prime)
}
m2.Mul(m2, values.R)
m.Add(m, m2)
}
}
if ir != nil {
// Unblind.
m.Mul(m, ir)
m.Mod(m, priv.N)
}
return
}
func rsaRawDecrypt(random io.Reader, priv *rsa.PrivateKey, ciphertext []byte) (plaintext []byte, err error) {
k := (priv.N.BitLen() + 7) / 8
if k < 11 {
err = rsa.ErrDecryption
return
}
c := new(big.Int).SetBytes(ciphertext)
m, err := rsaDecrypt(random, priv, c)
if err != nil {
return
}
return leftPad(m.Bytes(), k), nil
}
// leftPad returns a new slice of length size. The contents of input are right
// aligned in the new slice.
func leftPad(input []byte, size int) (out []byte) {
n := len(input)
if n > size {
n = size
}
out = make([]byte, size)
copy(out[len(out)-n:], input)
return
} | server/rsa_raw.go | 0.539226 | 0.457621 | rsa_raw.go | starcoder |
package ast
type keyType BasicType
const (
MixedKey = String | Integer
StringKey = String
IntegerKey = Integer
)
// ArrayType is an array type
type ArrayType struct {
KeyType keyType
ValueType Type
}
// BasicType is a basic type
type BasicType int
const (
Invalid BasicType = iota
String
Integer
Float
Boolean
Null
Resource
Array
Object
Function
)
// Numeric represents either a float or an integer
var Numeric = compoundType{Integer: struct{}{}, Float: struct{}{}}
var typeMap = map[BasicType]string{
String: "string",
Integer: "integer",
Float: "float",
Boolean: "boolean",
Null: "null",
Resource: "resource",
Array: "array",
Object: "object",
Function: "function",
Invalid: "invalid-type",
}
func (t BasicType) Contains(typ Type) bool {
if bt, ok := typ.(BasicType); ok {
return t&bt != 0
}
return false
}
func (t BasicType) String() string {
if st, ok := typeMap[t]; ok {
return st
}
return typeMap[Invalid]
}
func (t BasicType) Basic() []BasicType {
return []BasicType{t}
}
func (t BasicType) Equals(o Type) bool {
ot, ok := o.(BasicType)
if !ok {
return false
}
return ot == t
}
func (t BasicType) Single() bool {
return t != 0 && t&(t-1) == 0
}
func (t BasicType) Union(o Type) Type {
return compoundType{t: struct{}{}, o: struct{}{}}
}
type compoundType map[Type]struct{}
func (c compoundType) Equals(t Type) bool {
if ct, ok := t.(compoundType); ok {
if len(ct) != len(c) {
return false
}
for it := range c {
if _, ok := ct[it]; !ok {
return false
}
}
return true
}
if len(c) == 1 {
for it := range c {
return it.Equals(t)
}
}
return false
}
func (c compoundType) Contains(t Type) bool {
if ct, ok := t.(compoundType); ok {
if len(ct) > len(c) {
return false
}
for it := range ct {
if _, ok := c[it]; !ok {
return false
}
}
return true
}
for it := range c {
if it.Contains(t) {
return true
}
}
return false
}
// Union returns a new type that includes both the receiver and the argument.
func (c compoundType) Union(t Type) Type {
c[t] = struct{}{}
return c
}
// Single returns true if the receiver expresses one type and only one type.
func (c compoundType) Single() bool {
if len(c) != 1 {
return false
}
for t := range c {
return t.Single()
}
return false
}
// String returns the receiver expressed as a string.
func (c compoundType) String() string {
return ""
}
// Basic returns the basic type a type expresses.
func (c compoundType) Basic() []BasicType {
return nil
}
// Type is a type
type Type interface {
// Equals returns true if the receiver is of the same type as the argument.
Equals(Type) bool
// Contains returns true if the receiver contains the argument type.
Contains(Type) bool
// Union returns a new type that includes both the receiver and the argument.
Union(Type) Type
// Single returns true if the receiver expresses one type and only one type.
Single() bool
// String returns the receiver expressed as a string.
String() string
// Basic returns the basic type a type expresses.
Basic() []BasicType
}
// Unknown represents an unknown type
var Unknown = new(unknownType)
type unknownType struct{}
func (unknownType) Equals(t Type) bool {
return t == Unknown
}
func (unknownType) Contains(t Type) bool {
return t == Unknown
}
func (unknownType) Union(t Type) Type {
return t
}
func (unknownType) Single() bool {
return false
}
func (unknownType) String() string {
return "unknown"
}
func (unknownType) Basic() []BasicType {
return nil
}
// ObjectType is an object type
type ObjectType struct {
Class string
}
func (o ObjectType) Equals(t Type) bool {
if t, ok := t.(ObjectType); ok {
return t.Class == o.Class
}
return false
}
func (o ObjectType) Contains(t Type) bool {
return o.Equals(t)
}
func (o ObjectType) Union(t Type) Type {
if o.Equals(t) {
return o
}
return compoundType{o: struct{}{}, t: struct{}{}}
}
func (ObjectType) Single() bool {
return true
}
func (o ObjectType) String() string {
return o.Class
}
func (ObjectType) Basic() []BasicType {
return []BasicType{Object}
} | ast/types.go | 0.832441 | 0.475788 | types.go | starcoder |
package maptile
import (
"math"
"sync"
"github.com/go-courier/geography/encoding/mvt"
"github.com/go-courier/geography"
)
func NewMapTile(z, x, y uint32) *MapTile {
return &MapTile{
Z: z,
X: x,
Y: y,
}
}
type MapTile struct {
coordsTransform CoordsTransform
Z uint32
X uint32
Y uint32
Layers []*Layer
}
type CoordsTransform interface {
ToEarth(point geography.Point) geography.Point
ToMars(point geography.Point) geography.Point
}
func (t *MapTile) SetCoordsTransform(coordsTransform CoordsTransform) {
t.coordsTransform = coordsTransform
}
func (t *MapTile) MarshalMVT(w *mvt.MVTWriter) error {
for i := range t.Layers {
layer := t.Layers[i]
if layer == nil || len(layer.Features) == 0 {
continue
}
features := make([]*mvt.Feature, 0)
for i := range layer.Features {
feat := layer.Features[i]
if feat == nil {
continue
}
geo := feat.ToGeom()
if geo == nil {
continue
}
g := geo.Project(t.NewTransform(layer.Extent))
f := &mvt.Feature{
Type: g.Type(),
Geometry: g.Geometry(),
Properties: feat.Properties(),
}
if f == nil || len(f.Geometry) == 0 {
continue
}
if fid, ok := feat.(FeatureID); ok {
f.ID = fid.ID()
}
features = append(features, f)
}
w.WriteLayer(layer.Name, layer.Extent, features...)
}
return nil
}
func (t *MapTile) NewTransform(extent uint32) geography.Transform {
n := uint32(TrailingZeros32(extent))
z := uint32(t.Z) + n
minx := float64(t.X << n)
miny := float64(t.Y << n)
return func(p geography.Point) geography.Point {
if t.coordsTransform != nil {
p = t.coordsTransform.ToMars(p)
}
x, y := lonLatToPixelXY(p[0], p[1], z)
return geography.Point{
math.Floor(x - minx),
math.Floor(y - miny),
}
}
}
func (t *MapTile) BBox() geography.Bound {
buffer := 0.0
x := float64(t.X)
y := float64(t.Y)
minx := x - buffer
miny := y - buffer
if miny < 0 {
miny = 0
}
lon1, lat1 := geography.TileXYToLonLat(minx, miny, uint32(t.Z))
maxX := x + 1 + buffer
maxTiles := float64(uint32(1 << t.Z))
maxY := y + 1 + buffer
if maxY > maxTiles {
maxY = maxTiles
}
lon2, lat2 := geography.TileXYToLonLat(maxX, maxY, uint32(t.Z))
if t.coordsTransform != nil {
return geography.Bound{
Min: t.coordsTransform.ToEarth(geography.Point{lon1, lat2}),
Max: t.coordsTransform.ToEarth(geography.Point{lon2, lat1}),
}
}
return geography.Bound{
Min: geography.Point{lon1, lat2},
Max: geography.Point{lon2, lat1},
}
}
func (t *MapTile) AddLayers(layers ...*Layer) {
t.Layers = append(t.Layers, layers...)
}
func (t *MapTile) AddTileLayers(tileLayers ...TileLayer) (e error) {
wg := sync.WaitGroup{}
result := make(chan interface{})
for i := range tileLayers {
wg.Add(1)
go func(tileLayer TileLayer) {
defer wg.Done()
features, err := tileLayer.Features(t)
if err != nil {
result <- err
return
}
extend := uint32(0)
if tileLayerExtentConf, ok := tileLayer.(TileLayerExtentConf); ok {
extend = tileLayerExtentConf.Extent()
}
result <- NewLayer(tileLayer.Name(), extend, features...)
}(tileLayers[i])
}
go func() {
wg.Wait()
close(result)
}()
for r := range result {
switch v := r.(type) {
case error:
e = v
case *Layer:
t.AddLayers(v)
}
}
return
} | maptile/tile.go | 0.627609 | 0.564519 | tile.go | starcoder |
package main
import "strconv"
// Device is the representation of the wrist device
type Device [4]int
func (d Device) isEqual(other Device) bool {
for i, value := range d {
if other[i] != value {
return false
}
}
return true
}
func initDevice(strslice []string) Device {
result := Device{}
for i := 0; i < 4; i++ {
result[i], _ = strconv.Atoi(strslice[i])
}
return result
}
// Below are the different operations that can be done on the device
//Addition:
// addr (add register) stores into register C the result of adding register A and register B.
func addr(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain] + d[i.Bin]
return d
}
//addi (add immediate) stores into register C the result of adding register A and value B.
func addi(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain] + i.Bin
return d
}
// Multiplication:
// mulr (multiply register) stores into register C the result of multiplying register A and register B.
func mulr(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain] * d[i.Bin]
return d
}
// muli (multiply immediate) stores into register C the result of multiplying register A and value B.
func muli(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain] * i.Bin
return d
}
// Bitwise AND:
// banr (bitwise AND register) stores into register C the result of the bitwise AND of register A and register B.
func banr(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain] & d[i.Bin]
return d
}
// bani (bitwise AND immediate) stores into register C the result of the bitwise AND of register A and value B.
func bani(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain] & i.Bin
return d
}
// Bitwise OR:
// borr (bitwise OR register) stores into register C the result of the bitwise OR of register A and register B.
func borr(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain] | d[i.Bin]
return d
}
// bori (bitwise OR immediate) stores into register C the result of the bitwise OR of register A and value B.
func bori(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain] | i.Bin
return d
}
// Assignment:
// setr (set register) copies the contents of register A into register C. (Input B is ignored.)
func setr(i Instruction, d Device) Device {
d[i.Cout] = d[i.Ain]
return d
}
// seti (set immediate) stores value A into register C. (Input B is ignored.)
func seti(i Instruction, d Device) Device {
d[i.Cout] = i.Ain
return d
}
// Greater-than testing:
// gtir (greater-than immediate/register) sets register C to 1 if value A is greater than register B. Otherwise, register C is set to 0.
func gtir(i Instruction, d Device) Device {
if i.Ain > d[i.Bin] {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// gtri (greater-than register/immediate) sets register C to 1 if register A is greater than value B. Otherwise, register C is set to 0.
func gtri(i Instruction, d Device) Device {
if d[i.Ain] > i.Bin {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0.
func gtrr(i Instruction, d Device) Device {
if d[i.Ain] > d[i.Bin] {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// Equality testing:
// eqir (equal immediate/register) sets register C to 1 if value A is equal to register B. Otherwise, register C is set to 0.
func eqir(i Instruction, d Device) Device {
if i.Ain == d[i.Bin] {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// eqri (equal register/immediate) sets register C to 1 if register A is equal to value B. Otherwise, register C is set to 0.
func eqri(i Instruction, d Device) Device {
if d[i.Ain] == i.Bin {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
}
// eqrr (equal register/register) sets register C to 1 if register A is equal to register B. Otherwise, register C is set to 0.
func eqrr(i Instruction, d Device) Device {
if d[i.Ain] == d[i.Bin] {
d[i.Cout] = 1
} else {
d[i.Cout] = 0
}
return d
} | 2018/16_2/device.go | 0.731826 | 0.443721 | device.go | starcoder |
package main
import (
"fmt"
"math/rand"
"os"
"path"
"github.com/YadaYuki/deeplearning-golang/mnist"
"github.com/YadaYuki/deeplearning-golang/model"
"github.com/YadaYuki/deeplearning-golang/utils"
"github.com/vorduin/nune"
)
func getBatchData[T nune.Number](idxes []int, data nune.Tensor[T]) nune.Tensor[T] {
batchSize := len(idxes)
dataDim := data.Shape()[1]
batchX := nune.Zeros[T](batchSize, dataDim)
for i, idx := range idxes {
for j := 0; j < dataDim; j++ {
batchX.Index(i, j).Ravel()[0] = data.Index(idx, j).Scalar()
}
}
return batchX
}
func where[T nune.Number](data nune.Tensor[T], target T) (int, error) {
size := data.Size(0)
for i := 0; i < size; i++ {
if data.Index(i).Scalar() == target {
return i, nil
}
}
return -1, fmt.Errorf("target not found")
}
func getCorrectNum[T nune.Number](yBatch nune.Tensor[T], tBatch nune.Tensor[T]) int {
if yBatch.Shape()[0] != tBatch.Shape()[0] || yBatch.Shape()[1] != tBatch.Shape()[1] {
panic("Dimension mismatch")
}
batchSize := yBatch.Shape()[0]
correct := 0
for i := 0; i < batchSize; i++ {
y_pred, _ := where(yBatch.Index(i), yBatch.Index(i).Max().Scalar())
t_true, _ := where(tBatch.Index(i), tBatch.Index(i).Max().Scalar())
if y_pred == t_true {
correct++
}
}
return correct
}
func train[T nune.Number](net *model.TwoLayerNet[T], xTrain nune.Tensor[T], tTrain nune.Tensor[T], batchSize int) {
dataSize := xTrain.Shape()[0]
batchIdxes := utils.SplitSlice(utils.Shuffle(utils.Range(0, dataSize)), batchSize)
fmt.Println("Train")
for epoch := 0; epoch < 10; epoch++ {
fmt.Println("epoch:", epoch+1)
for i := 0; i < len(batchIdxes); i++ {
xBatch := getBatchData(batchIdxes[i], xTrain)
tBatch := getBatchData(batchIdxes[i], tTrain)
loss := net.TrainStep(xBatch, tBatch)
if (i+1)%20 == 0 {
fmt.Printf("iter:%d/%d train loss: %v \n", (i + 1), len(batchIdxes), loss)
}
}
}
fmt.Println("Train finished")
}
func test[T nune.Number](net *model.TwoLayerNet[T], xVal nune.Tensor[T], tVal nune.Tensor[T], batchSize int) (accuracy float64, loss float64) {
dataSize := xVal.Shape()[0]
batchIdxes := utils.SplitSlice(utils.Range(0, dataSize), batchSize)
loss = 0.0
correct := 0
fmt.Println("Test")
for i := 0; i < len(batchIdxes); i++ {
xBatch := getBatchData(batchIdxes[i], xVal)
tBatch := getBatchData(batchIdxes[i], tVal)
yBatch := net.Predict(xBatch)
correct += getCorrectNum(yBatch, tBatch)
loss += float64(net.ForwardAndLoss(xBatch, tBatch))
}
fmt.Println("Test finished")
loss /= float64(len(batchIdxes))
return float64(correct) / float64(dataSize), loss
}
func main() {
// load mnist data
wd, _ := os.Getwd()
pathToMnistDir := path.Join(wd, "mnist/data")
xTrain, tTrain, xTest, tTest, _ := mnist.LoadMnist[float64](pathToMnistDir, true)
// initialize network
weightInitilizer := func() float64 {
return rand.Float64() * 0.01
}
biasInitilizer := func() float64 {
return rand.Float64()
}
net := model.NewTwoLayerNet(784, 50, 10, weightInitilizer, biasInitilizer)
// train & test network
batchSize := 100
train(net, xTrain, tTrain, batchSize)
accuracy, loss := test(net, xTest, tTest, batchSize)
fmt.Printf("accuracy: %v, loss: %v\n", accuracy, loss)
} | main.go | 0.615781 | 0.460713 | main.go | starcoder |
package fpe
import (
"encoding/binary"
"math/bits"
)
// A BlockCipher represents an implementation of block cipher using a given key.
type BlockCipher interface {
// BlockSize returns the cipher's block size.
BlockSize() int
// Encrypt encrypts the first block in src into dst.
// Dst and src must overlap entirely or not at all.
Encrypt(dst, src []byte)
}
// FeistelRounds is the number of rounds in random Feistel scheme.
// See <NAME> CRYPTO'03 for the number.
const FeistelRounds = 10
// SimpleEncrypt encrypts the plaintext number into another number in [0;maxvalue].
func SimpleEncrypt(blockCipher BlockCipher, plaintext, maxvalue uint64) uint64 {
return Encrypt(blockCipher, plaintext, maxvalue, 0, FeistelRounds)
}
// SimpleDecrypt decrypts the ciphertext number back.
func SimpleDecrypt(blockCipher BlockCipher, ciphertext, maxvalue uint64) uint64 {
return Decrypt(blockCipher, ciphertext, maxvalue, 0, FeistelRounds)
}
// Encrypt encrypts the plaintext number into another number in [0;maxvalue].
// You can specify the number of feistel network rounds and a tweak (publicly
// known modifier of the algorithm).
func Encrypt(blockCipher BlockCipher, plaintext, maxvalue, tweak uint64, feistelRounds int) uint64 {
oddRounds := (feistelRounds % 2) == 1
return encryptDecrypt(blockCipher, plaintext, maxvalue, tweak, true, oddRounds, 0, feistelRounds, 1)
}
// Decrypt decrypts the ciphertext number back.
func Decrypt(blockCipher BlockCipher, ciphertext, maxvalue, tweak uint64, feistelRounds int) uint64 {
oddRounds := (feistelRounds % 2) == 1
return encryptDecrypt(blockCipher, ciphertext, maxvalue, tweak, false, oddRounds, feistelRounds-1, -1, -1)
}
var zeros = make([]byte, 64)
func encryptDecrypt(blockCipher BlockCipher, value, maxvalue, tweak uint64, encrypt, oddRounds bool, startRound, endRound, delta int) uint64 {
if blockCipher.BlockSize() < 16 && tweak != 0 {
panic("tweak is not supported for block ciphers with blocks smaller than 16")
}
totalBits := uint(bits.Len64(maxvalue))
if totalBits < 2 {
panic("domain is too small")
}
for {
value = feistelNetwork(blockCipher, value, tweak, totalBits, encrypt, oddRounds, startRound, endRound, delta)
if value <= maxvalue {
return value
}
}
}
func feistelNetwork(blockCipher BlockCipher, value, tweak uint64, totalBits uint, encrypt, oddRounds bool, startRound, endRound, delta int) uint64 {
buffer := make([]byte, 64)[:blockCipher.BlockSize()]
leftBits := totalBits / 2
rightBits := totalBits - leftBits
if !encrypt && oddRounds {
leftBits, rightBits = rightBits, leftBits
}
left := uint32(value >> rightBits)
right := uint32(value & ((1 << rightBits) - 1))
if !encrypt {
leftBits, rightBits = rightBits, leftBits
left, right = right, left
}
// Perform random Feistel scheme.
for round := startRound; round != endRound; round += delta {
// Encrypt old R, XOR the result with old L and assign to new R. Assign new L to old R.
copy(buffer, zeros)
binary.LittleEndian.PutUint32(buffer[0:4], right)
buffer[5] = byte(round) // To make all round functions different.
if blockCipher.BlockSize() >= 16 {
binary.LittleEndian.PutUint64(buffer[8:16], tweak)
}
blockCipher.Encrypt(buffer, buffer)
cipherText := binary.LittleEndian.Uint32(buffer[:4])
xor := cipherText ^ left
leftBits, rightBits = rightBits, leftBits
left = right
right = xor & ((1 << rightBits) - 1)
}
if !encrypt {
rightBits = leftBits // leftBits = rightBits
left, right = right, left
}
return (uint64(left) << rightBits) | uint64(right)
} | vendor/gitlab.com/starius/fpe/fpe.go | 0.772015 | 0.511168 | fpe.go | starcoder |
package validator
// MessageMap is a map of string, that can be used as error message for ValidateStruct function.
var MessageMap = map[string]string{
"accepted": "The {{.Attribute}} must be accepted.",
"activeUrl": "The {{.Attribute}} is not a valid URL.",
"after": "The {{.Attribute}} must be a date after {{.Date}}.",
"afterOrEqual": "The {{.Attribute}} must be a date after or equal to {{.Date}}.",
"alpha": "The {{.Attribute}} may only contain letters.",
"alphaDash": "The {{.Attribute}} may only contain letters, numbers, dashes and underscores.",
"alphaNum": "The {{.Attribute}} may only contain letters and numbers.",
"array": "The {{.Attribute}} must be an array.",
"before": "The {{.Attribute}} must be a date before {{.Date}}.",
"beforeOrEqual": "The {{.Attribute}} must be a date before or equal to {{.Date}}.",
"between.numeric": "The {{.Attribute}} must be between {{.Min}} and {{.Max}}.",
"between.file": "The {{.Attribute}} must be between {{.Min}} and {{.Max}} kilobytes.",
"between.string": "The {{.Attribute}} must be between {{.Min}} and {{.Max}} characters.",
"between.array": "The {{.Attribute}} must have between {{.Min}} and {{.Max}} items.",
"boolean": "The {{.Attribute}} field must be true or false.",
"confirmed": "The {{.Attribute}} confirmation does not match.",
"date": "The {{.Attribute}} is not a valid date.",
"dateFormat": "The {{.Attribute}} does not match the format {{.Format}}.",
"different": "The {{.Attribute}} and {{.Other}} must be different.",
"digits": "The {{.Attribute}} must be {{.Digits}} digits.",
"digitsBetween": "The {{.Attribute}} must be between {{.Min}} and {{.Max}} digits.",
"dimensions": "The {{.Attribute}} has invalid image dimensions.",
"distinct": "The {{.Attribute}} field has a duplicate value.",
"email": "The {{.Attribute}} must be a valid email address.",
"exists": "The selected {{.Attribute}} is invalid.",
"file": "The {{.Attribute}} must be a file.",
"filled": "The {{.Attribute}} field must have a value.",
"gt.numeric": "The {{.Attribute}} must be greater than {{.Value}}.",
"gt.file": "The {{.Attribute}} must be greater than {{.Value}} kilobytes.",
"gt.string": "The {{.Attribute}} must be greater than {{.Value}} characters.",
"gt.array": "The {{.Attribute}} must have greater than {{.Value}} items.",
"gte.numeric": "The {{.Attribute}} must be greater than or equal {{.Value}}.",
"gte.file": "The {{.Attribute}} must be greater than or equal {{.Value}} kilobytes.",
"gte.string": "The {{.Attribute}} must be greater than or equal {{.Value}} characters.",
"gte.array": "The {{.Attribute}} must have {{.Value}} items or more.",
"image": "The {{.Attribute}} must be an image.",
"in": "The selected {{.Attribute}} is invalid.",
"inArray": "The {{.Attribute}} field does not exist in {{.Other}}.",
"integer": "The {{.Attribute}} must be an integer.",
"ip": "The {{.Attribute}} must be a valid IP address.",
"ipv4": "The {{.Attribute}} must be a valid IPv4 address.",
"ipv6": "The {{.Attribute}} must be a valid IPv6 address.",
"json": "The {{.Attribute}} must be a valid JSON string.",
"lt.numeric": "The {{.Attribute}} must be less than {{.Value}}.",
"lt.file": "The {{.Attribute}} must be less than {{.Value}} kilobytes.",
"lt.string": "The {{.Attribute}} must be less than {{.Value}} characters.",
"lt.array": "The {{.Attribute}} must have less than {{.Value}} items.",
"lte.numeric": "The {{.Attribute}} must be less than or equal {{.Value}}.",
"lte.file": "The {{.Attribute}} must be less than or equal {{.Value}} kilobytes.",
"lte.string": "The {{.Attribute}} must be less than or equal {{.Value}} characters.",
"lte.array": "The {{.Attribute}} must not have more than {{.Value}} items.",
"max.numeric": "The {{.Attribute}} may not be greater than {{.Max}}.",
"max.file": "The {{.Attribute}} may not be greater than {{.Max}} kilobytes.",
"max.string": "The {{.Attribute}} may not be greater than {{.Max}} characters.",
"max.array": "The {{.Attribute}} may not have more than {{.Max}} items.",
"mimes": "The {{.Attribute}} must be a file of type: {{.Values}}.",
"mimetypes": "The {{.Attribute}} must be a file of type: {{.Values}}.",
"min.numeric": "The {{.Attribute}} must be at least {{.Min}}.",
"min.file": "The {{.Attribute}} must be at least {{.Min}} kilobytes.",
"min.string": "The {{.Attribute}} must be at least {{.Min}} characters.",
"min.array": "The {{.Attribute}} must have at least {{.Min}} items.",
"notIn": "The selected {{.Attribute}} is invalid.",
"notRegex": "The {{.Attribute}} format is invalid.",
"numeric": "The {{.Attribute}} must be a number.",
"present": "The {{.Attribute}} field must be present.",
"regex": "The {{.Attribute}} format is invalid.",
"required": "The {{.Attribute}} field is required.",
"requiredIf": "The {{.Attribute}} field is required when {{.Other}} is {{.Value}}.",
"requiredUnless": "The {{.Attribute}} field is required unless {{.Other}} is in {{.Values}}.",
"requiredWith": "The {{.Attribute}} field is required when {{.Values}} is present.",
"requiredWithAll": "The {{.Attribute}} field is required when {{.Values}} is present.",
"requiredWithout": "The {{.Attribute}} field is required when {{.Values}} is not present.",
"requiredWithoutAll": "The {{.Attribute}} field is required when none of {{.Values}} are present.",
"same": "The {{.Attribute}} and {{.Other}} must match.",
"size.numeric": "The {{.Attribute}} must be {{.Size}}.",
"size.file": "The {{.Attribute}} must be {{.Size}} kilobytes.",
"size.string": "The {{.Attribute}} must be {{.Size}} characters.",
"size.array": "The {{.Attribute}} must contain {{.Size}} items.",
"string": "The {{.Attribute}} must be a string.",
"timezone": "The {{.Attribute}} must be a valid zone.",
"unique": "The {{.Attribute}} has already been taken.",
"uploaded": "The {{.Attribute}} failed to upload.",
"uuid3": "The {{.Attribute}} format is invalid.",
"uuid4": "The {{.Attribute}} format is invalid.",
"uuid5": "The {{.Attribute}} format is invalid.",
"uuid": "The {{.Attribute}} format is invalid.",
} | message.go | 0.699665 | 0.564819 | message.go | starcoder |
package parser
// A Visitor's Visit method is invoked for each node encountered by Walk.
// If the result visitor w is not nil, Walk visits each of the children
// of node with the visitor w, followed by a call of w.Visit(nil).
type Visitor interface {
Visit(node Node) (w Visitor)
}
// Walk traverses an CST in depth-first order: It starts by calling
// v.Visit(node); node must not be nil. If the visitor w returned by
// v.Visit(node) is not nil, Walk is invoked recursively with visitor
// w for each of the non-nil children of node, followed by a call of
// w.Visit(nil).
func Walk(node Node, v Visitor) {
if v = v.Visit(node); v == nil {
return
}
switch n := node.(type) {
case *Module:
if n.Doc != nil {
Walk(n.Doc, v)
}
walkDeclList(n.Decls, v)
case *Decl:
switch {
case n.Bad != nil:
Walk(n.Bad, v)
case n.Import != nil:
Walk(n.Import, v)
case n.Export != nil:
Walk(n.Export, v)
case n.Func != nil:
Walk(n.Func, v)
case n.Doc != nil:
Walk(n.Doc, v)
}
case *ImportDecl:
if n.Ident != nil {
Walk(n.Ident, v)
}
if n.ImportFunc != nil {
Walk(n.ImportFunc, v)
}
if n.ImportPath != nil {
Walk(n.ImportPath, v)
}
case *ImportFunc:
if n.Func != nil {
Walk(n.Func, v)
}
case *ExportDecl:
if n.Ident != nil {
Walk(n.Ident, v)
}
case *FuncDecl:
if n.Type != nil {
Walk(n.Type, v)
}
if n.Name != nil {
Walk(n.Name, v)
}
if n.Params != nil {
Walk(n.Params, v)
}
if n.Body != nil {
Walk(n.Body, v)
}
case *AliasDecl:
if n.As != nil {
Walk(n.As, v)
}
if n.Ident != nil {
Walk(n.Ident, v)
}
case *FieldList:
walkFieldList(n.List, v)
case *Field:
if n.Variadic != nil {
Walk(n.Variadic, v)
}
if n.Type != nil {
Walk(n.Type, v)
}
if n.Name != nil {
Walk(n.Name, v)
}
case *Expr:
switch {
case n.Bad != nil:
Walk(n.Bad, v)
case n.Selector != nil:
Walk(n.Selector, v)
case n.Ident != nil:
Walk(n.Ident, v)
case n.BasicLit != nil:
Walk(n.BasicLit, v)
case n.FuncLit != nil:
Walk(n.FuncLit, v)
}
case *Selector:
if n.Ident != nil {
Walk(n.Ident, v)
}
if n.Select != nil {
Walk(n.Select, v)
}
case *BasicLit:
if n.Numeric != nil {
Walk(n.Numeric, v)
}
case *FuncLit:
if n.Body != nil {
Walk(n.Body, v)
}
case *Stmt:
switch {
case n.Bad != nil:
Walk(n.Bad, v)
case n.Call != nil:
Walk(n.Call, v)
case n.Doc != nil:
Walk(n.Doc, v)
}
case *CallStmt:
if n.Func != nil {
Walk(n.Func, v)
}
walkExprList(n.Args, v)
if n.Alias != nil {
Walk(n.Alias, v)
}
if n.WithOpt != nil {
Walk(n.WithOpt, v)
}
if n.StmtEnd != nil {
if n.StmtEnd.Comment != nil {
Walk(n.StmtEnd.Comment, v)
}
}
case *WithOpt:
if n.With != nil {
Walk(n.With, v)
}
if n.Expr != nil {
Walk(n.Expr, v)
}
case *BlockStmt:
walkStmtList(n.List, v)
case *CommentGroup:
walkCommentList(n.List, v)
}
v.Visit(nil)
}
// Inspect traverses an CST in depth-first order: It starts by calling
// f(node); node must not be nil. If f returns true, Inspect invokes f
// recursively for each of the non-nil children of node, followed by a
// call of f(nil).
func Inspect(node Node, f func(Node) bool) {
Walk(node, inspector(f))
}
type inspector func(Node) bool
func (f inspector) Visit(node Node) Visitor {
if f(node) {
return f
}
return nil
}
func walkDeclList(list []*Decl, v Visitor) {
for _, x := range list {
Walk(x, v)
}
}
func walkFieldList(list []*Field, v Visitor) {
for _, x := range list {
Walk(x, v)
}
}
func walkExprList(list []*Expr, v Visitor) {
for _, x := range list {
Walk(x, v)
}
}
func walkStmtList(list []*Stmt, v Visitor) {
for _, x := range list {
Walk(x, v)
}
}
func walkCommentList(list []*Comment, v Visitor) {
for _, x := range list {
Walk(x, v)
}
} | parser/walk.go | 0.674372 | 0.428951 | walk.go | starcoder |
package test_version1
import (
"testing"
"github.com/pip-services-users/pip-clients-sessions-go/version1"
"github.com/pip-services3-go/pip-services3-commons-go/data"
"github.com/stretchr/testify/assert"
)
type SessionsClientFixtureV1 struct {
Client version1.ISessionsClientV1
}
func NewSessionsClientFixtureV1(client version1.ISessionsClientV1) *SessionsClientFixtureV1 {
return &SessionsClientFixtureV1{
Client: client,
}
}
func (c *SessionsClientFixtureV1) clear() {
page, _ := c.Client.GetSessions("", nil, nil)
for _, v := range page.Data {
session := v.(*version1.SessionV1)
c.Client.DeleteSessionById("", session.Id)
}
}
func (c *SessionsClientFixtureV1) TestOpenSession(t *testing.T) {
c.clear()
defer c.clear()
// Open new session
session, err := c.Client.OpenSession("", "1", "User 1", "localhost", "test", nil, "abc")
assert.Nil(t, err)
assert.NotNil(t, session)
assert.NotNil(t, session.Id)
assert.NotNil(t, session.RequestTime)
assert.Equal(t, session.Address, "localhost")
assert.Equal(t, session.Client, "test")
assert.Equal(t, session.Data, "abc")
session1 := session
// Store session data
session, err = c.Client.StoreSessionData("", session1.Id, "xyz")
assert.Nil(t, err)
// Update session user
session, err = c.Client.UpdateSessionUser("", session1.Id, "xyz")
assert.Nil(t, err)
// Get session by id
session, err = c.Client.GetSessionById("", session1.Id)
assert.Nil(t, err)
assert.NotNil(t, session)
assert.Equal(t, session.Address, "localhost")
assert.Equal(t, session.Client, "test")
assert.Equal(t, session.Data, "xyz")
assert.Equal(t, session.Data, "xyz")
// Get open sessions
page, err1 := c.Client.GetSessions("",
data.NewFilterParamsFromTuples("user_id", "1", "active", true), nil)
assert.Nil(t, err1)
assert.NotNil(t, page)
assert.True(t, len(page.Data) >= 1)
session = page.Data[0].(*version1.SessionV1)
assert.NotNil(t, session)
assert.Equal(t, session.Address, "localhost")
assert.Equal(t, session.Client, "test")
}
func (c *SessionsClientFixtureV1) TestCloseSession(t *testing.T) {
c.clear()
defer c.clear()
// Open new session
session, err := c.Client.OpenSession("", "1", "User 1", "localhost", "test", nil, "abc")
assert.Nil(t, err)
assert.NotNil(t, session)
assert.NotNil(t, session.Id)
assert.NotNil(t, session.RequestTime)
assert.Equal(t, session.Address, "localhost")
assert.Equal(t, session.Client, "test")
assert.Equal(t, session.Data, "abc")
session1 := session
// Close created session
session, err = c.Client.CloseSession("", session1.Id)
assert.Nil(t, err)
// Get session by id
session, err = c.Client.GetSessionById("", session1.Id)
assert.Nil(t, err)
assert.NotNil(t, session)
assert.False(t, session.Active)
// Delete session
session, err = c.Client.DeleteSessionById("", session1.Id)
assert.Nil(t, err)
// Try to get deleted session
session, err = c.Client.GetSessionById("", session1.Id)
assert.Nil(t, err)
assert.Nil(t, session)
} | test/version1/SessionsClientFixtureV1.go | 0.580114 | 0.406921 | SessionsClientFixtureV1.go | starcoder |
package main
import (
"fmt"
"github.com/404Polaris/RayTracing-go/pkg/camera"
"github.com/404Polaris/RayTracing-go/pkg/geometry"
"github.com/404Polaris/RayTracing-go/pkg/material"
"github.com/404Polaris/RayTracing-go/pkg/mathplus"
"github.com/404Polaris/RayTracing-go/pkg/scene"
"image"
"image/color"
"image/png"
"log"
"math"
"os"
"sync"
)
func rayColor(ray *mathplus.Ray, scene geometry.Hittable, depth int) mathplus.Vector3 {
if depth <= 0 {
return *mathplus.NewVector3(0, 0, 0)
}
var hitInfo geometry.HitInfo
if scene.Hit(ray, 0.001, math.MaxFloat64, &hitInfo) {
var scatteredRay mathplus.Ray
var attenuation mathplus.Vector3
mat := hitInfo.Material.(material.Material)
if mat.Scatter(ray, hitInfo, &attenuation, &scatteredRay) {
return attenuation.MulVector(rayColor(&scatteredRay, scene, depth-1))
}
return *mathplus.NewVector3(0, 0, 0)
}
direction := ray.Direction().Normalize()
t := 0.5 * (direction.Y() + 1.0)
return mathplus.NewVector3(1, 1, 1).Mul(1.0 - t).Add(mathplus.NewVector3(0.5, 0.7, 1.0).Mul(t))
}
func randomScene() geometry.Hittable {
s := scene.NewScene()
m1 := material.NewDielectric(1.5)
s.Add(geometry.NewSphere(1, *mathplus.NewVector3(0, 1, 0), m1))
m2 := material.NewLambertian(*mathplus.NewVector3(0.4, 0.2, 0.1))
s.Add(geometry.NewSphere(1, *mathplus.NewVector3(-4, 1, 0), m2))
m3 := material.NewMetal(0.0, *mathplus.NewVector3(0.7, 0.6, 0.5))
s.Add(geometry.NewSphere(1, *mathplus.NewVector3(4, 1, 0), m3))
m4 := material.NewLambertian(*mathplus.NewVector3(0.5, 0.5, 0.5))
s.Add(geometry.NewSphere(1000, *mathplus.NewVector3(0, -1000, 0), m4))
for a := -11; a < 11; a++ {
for b := -11; b < 11; b++ {
rFloat := mathplus.RandomFloat64()
center := mathplus.NewVector3(float64(a)+0.9*rFloat, 0.2, float64(b)+0.9*rFloat)
if center.Sub(*mathplus.NewVector3(4, 0.2, 0)).Length() <= 0.9 {
continue
}
if rFloat < 0.8 {
albedo := mathplus.RandomVector3().MulVector(mathplus.RandomVector3())
m := material.NewLambertian(albedo)
s.Add(geometry.NewSphere(0.2, *center, m))
} else if rFloat < 0.95 {
albedo := mathplus.RandomVector3()
fuzz := mathplus.RandomFloat64InRange(0, 0.5)
m := material.NewMetal(fuzz, albedo)
s.Add(geometry.NewSphere(0.2, *center, m))
} else {
m := material.NewDielectric(1.5)
s.Add(geometry.NewSphere(0.2, *center, m))
}
}
}
return s
}
func calcRayTracing(x int, y int, cam *camera.Camera, w int, h int, scene geometry.Hittable, reflectTimes int, samplePerPixel int) color.RGBA {
wg := sync.WaitGroup{}
wg.Add(samplePerPixel)
pixelColor := *mathplus.NewVector3(0, 0, 0)
pixelColors := make([]mathplus.Vector3, samplePerPixel, samplePerPixel)
calcRayColor := func(i int, u float64, v float64) {
ray := cam.GetRay(u, v)
pixelColors[i] = rayColor(ray, scene, reflectTimes)
wg.Done()
}
for i := 0; i < samplePerPixel; i++ {
u := (float64(x) + mathplus.RandomFloat64()) / (float64(w) - 1)
v := (float64(y) + mathplus.RandomFloat64()) / (float64(h) - 1)
go calcRayColor(i, u, v)
}
wg.Wait()
for i := 0; i < samplePerPixel; i++ {
pixelColor = pixelColor.Add(pixelColors[i])
}
pixelColor = pixelColor.Div(float64(samplePerPixel))
pixelColor = *mathplus.NewVector3(math.Sqrt(pixelColor.X()), math.Sqrt(pixelColor.Y()), math.Sqrt(pixelColor.Z()))
R := mathplus.Clamp(pixelColor.R(), 0, 0.999)
G := mathplus.Clamp(pixelColor.G(), 0, 0.999)
B := mathplus.Clamp(pixelColor.B(), 0, 0.999)
pixelColor = *mathplus.NewVector3(R, G, B)
pixelColor = pixelColor.Mul(256)
return color.RGBA{R: uint8(pixelColor.R()), G: uint8(pixelColor.G()), B: uint8(pixelColor.B()), A: 255}
}
func main() {
w := 768
reflectTimes := 500
samplePerPixel := 500
aspectRatio := 16.0 / 9.0
h := int(float64(w) / aspectRatio)
distToFocus := 10.0
vUp := mathplus.NewVector3(0, 1, 0)
lookAt := mathplus.NewVector3(0, 0, 0)
lookFrom := mathplus.NewVector3(12, 2, 3)
cam := camera.NewCamera(20, *vUp, *lookAt, *lookFrom, aspectRatio, 0, distToFocus)
s := randomScene()
img := image.NewRGBA(image.Rect(0, 0, w, h))
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
pixelColor := calcRayTracing(x, y, cam, w, h, s, reflectTimes, samplePerPixel)
img.SetRGBA(x, h-y, pixelColor)
}
fmt.Printf("Current Progress : %f\n", float64(y)*100.0/float64(h))
}
imgFile, err := os.Create("image.png")
defer imgFile.Close()
err = png.Encode(imgFile, img)
if err != nil {
log.Fatal(err)
}
} | cmd/main.go | 0.639849 | 0.460168 | main.go | starcoder |
package dackbox
import (
"github.com/pkg/errors"
"github.com/stackrox/rox/pkg/dbhelper"
)
// Path represents path to go from one idspace to another
type Path struct {
Path [][]byte
ForwardTraversal bool
}
// ForwardPath returns a forward path over the given elements.
func ForwardPath(elems ...[]byte) Path {
return Path{
Path: elems,
ForwardTraversal: true,
}
}
// BackwardsPath returns a backwards path over the given elements.
func BackwardsPath(elems ...[]byte) Path {
return Path{
Path: elems,
ForwardTraversal: false,
}
}
// BucketPath is a newer version of Path, that explicitly references the bucket handlers.
type BucketPath struct {
Elements []*dbhelper.BucketHandler
BackwardTraversal bool
}
// Len returns the length of this path
func (p *BucketPath) Len() int {
return len(p.Elements)
}
// KeyPath returns the key path for IDs along the bucket path. The number of IDs specified here must match the
// length of the path, otherwise this will panic.
func (p *BucketPath) KeyPath(ids ...string) Path {
if len(ids) != p.Len() {
panic(errors.Errorf("key path must have exactly %d elements, has %d", p.Len(), len(ids)))
}
pathElems := make([][]byte, 0, len(ids))
for i, id := range ids {
pathElems = append(pathElems, p.Elements[i].GetKey(id))
}
return Path{
Path: pathElems,
ForwardTraversal: !p.BackwardTraversal,
}
}
// Reversed returns a bucket path that is the reverse of this bucket path.
func (p *BucketPath) Reversed() BucketPath {
elems := make([]*dbhelper.BucketHandler, 0, len(p.Elements))
for i := len(p.Elements) - 1; i >= 0; i-- {
elems = append(elems, p.Elements[i])
}
return BucketPath{
Elements: elems,
BackwardTraversal: !p.BackwardTraversal,
}
}
// ForwardBucketPath returns the BucketPath that corresponds to a forward traversal.
func ForwardBucketPath(elements ...*dbhelper.BucketHandler) BucketPath {
return BucketPath{
Elements: elements,
}
}
// BackwardsBucketPath returns the BucketPath that corresponds to a backward traversal.
func BackwardsBucketPath(elements ...*dbhelper.BucketHandler) BucketPath {
return BucketPath{
Elements: elements,
BackwardTraversal: true,
}
}
// ConcatenatePaths concatenates one or more paths. All paths must be non-empty and have the same traversal direction,
// and each path (except for the first one) must start with the same element that the previous one ends with.
func ConcatenatePaths(paths ...BucketPath) (BucketPath, error) {
if len(paths) == 0 {
return BucketPath{}, errors.New("concatenation requires one or more paths")
}
var elems []*dbhelper.BucketHandler
var backwardsTraversal bool
for _, path := range paths {
if path.Len() == 0 {
return BucketPath{}, errors.New("concatenation requires all paths to be non-empty")
}
if len(elems) == 0 {
backwardsTraversal = path.BackwardTraversal
elems = append(elems, path.Elements...)
continue
}
if path.BackwardTraversal != backwardsTraversal {
if len(elems) == 1 {
// a path of length one doesn't have a direction.
backwardsTraversal = path.BackwardTraversal
} else if path.Len() > 1 { // a path of length 1 can be appended regardless of direction
return BucketPath{}, errors.New("cannot concatenate paths with different traversal directions")
}
}
if path.Elements[0] != elems[len(elems)-1] {
return BucketPath{}, errors.Errorf("cannot concatenate a path ending with %q with one starting with %q", elems[len(elems)-1].Name(), path.Elements[0].Name())
}
elems = append(elems, path.Elements[1:]...)
}
return BucketPath{Elements: elems, BackwardTraversal: backwardsTraversal}, nil
} | pkg/dackbox/path.go | 0.871543 | 0.457985 | path.go | starcoder |
package props
import "github.com/madnikulin50/maroto/pkg/consts"
// Proportion represents a proportion from a rectangle, example: 16x9, 4x3...
type Proportion struct {
// Width from the rectangle: Barcode, image and etc
Width float64
// Height from the rectangle: Barcode, image and etc
Height float64
}
// Barcode represents properties from a barcode inside a cell
type Barcode struct {
// Left is the space between the left cell boundary to the barcode, if center is false
Left float64
// Top is space between the upper cell limit to the barcode, if center is false
Top float64
// Percent is how much the barcode will occupy the cell,
// ex 100%: The barcode will fulfill the entire cell
// ex 50%: The greater side from the barcode will have half the size of the cell
Percent float64
// Proportion is the proportion between size of the barcode
// Ex: 16x9, 4x3...
Proportion Proportion
// Center define that the barcode will be vertically and horizontally centralized
Center bool
}
// Rect represents properties from a rectangle (Image, QrCode or Barcode) inside a cell
type Rect struct {
// Left is the space between the left cell boundary to the rectangle, if center is false
Left float64
// Top is space between the upper cell limit to the barcode, if center is false
Top float64
// Percent is how much the rectangle will occupy the cell,
// ex 100%: The rectangle will fulfill the entire cell
// ex 50%: The greater side from the rectangle will have half the size of the cell
Percent float64
// Center define that the barcode will be vertically and horizontally centralized
Center bool
}
// Text represents properties from a Text inside a cell
type Text struct {
// Top is space between the upper cell limit to the barcode, if align is not center
Top float64
// Family of the text, ex: consts.Arial, helvetica and etc
Family consts.Family
// Style of the text, ex: consts.Normal, bold and etc
Style consts.Style
// Size of the text
Size float64
// Align of the text
Align consts.Align
// Extrapolate define if the text will automatically add a new line when
// text reach the right cell boundary
Extrapolate bool
}
// Font represents properties from a text
type Font struct {
// Family of the text, ex: consts.Arial, helvetica and etc
Family consts.Family
// Style of the text, ex: consts.Normal, bold and etc
Style consts.Style
// Size of the text
Size float64
}
// TableList represents properties from a TableList
type TableList struct {
// HeaderHeight is the height of the cell with headers
HeaderHeight float64
// HeaderProp is the custom properties of the text inside
// the headers
HeaderProp Font
// ContentHeight is the height of the cells with contents
ContentHeight float64
// ContentProp is the custom properties of the text inside
// the contents
ContentProp Font
// Align is the align of the text (header and content) inside the columns
Align consts.Align
// HeaderContentSpace is the space between the header and the contents
HeaderContentSpace float64
}
// MakeValid from Rect means will make the properties from a rectangle reliable to fit inside a cell
// and define default values for a rectangle
func (r *Rect) MakeValid() {
if r.Percent <= 0.0 || r.Percent > 100.0 {
r.Percent = 100.0
}
if r.Center {
r.Left = 0
r.Top = 0
}
if r.Left < 0.0 {
r.Left = 0.0
}
if r.Top < 0.0 {
r.Top = 0
}
}
// MakeValid from Barcode means will make the properties from a barcode reliable to fit inside a cell
// and define default values for a barcode
func (r *Barcode) MakeValid() {
if r.Percent <= 0.0 || r.Percent > 100.0 {
r.Percent = 100.0
}
if r.Center {
r.Left = 0
r.Top = 0
}
if r.Left < 0.0 {
r.Left = 0.0
}
if r.Top < 0.0 {
r.Top = 0
}
if r.Proportion.Width <= 0 {
r.Proportion.Width = 1
}
if r.Proportion.Height <= 0 {
r.Proportion.Height = 1
}
if r.Proportion.Height > r.Proportion.Width*0.20 {
r.Proportion.Height = r.Proportion.Width * 0.20
} else if r.Proportion.Height < r.Proportion.Width*0.10 {
r.Proportion.Height = r.Proportion.Width * 0.10
}
}
// MakeValid from Text define default values for a Text
func (f *Text) MakeValid() {
if f.Family == "" {
f.Family = consts.Arial
}
if f.Style == "" {
f.Style = consts.Normal
}
if f.Align == "" {
f.Align = consts.Left
}
if f.Size == 0.0 {
f.Size = 10.0
}
if f.Top < 0.0 {
f.Top = 0.0
}
}
// MakeValid from Font define default values for a Signature
func (f *Font) MakeValid() {
if f.Family == "" {
f.Family = consts.Arial
}
if f.Style == "" {
f.Style = consts.Bold
}
if f.Size == 0.0 {
f.Size = 8.0
}
}
// ToTextProp from Font return a Text based on Font
func (f *Font) ToTextProp(align consts.Align, top float64) Text {
textProp := Text{
Family: f.Family,
Style: f.Style,
Size: f.Size,
Align: align,
Top: top,
}
textProp.MakeValid()
return textProp
}
// MakeValid from TableList define default values for a TableList
func (t *TableList) MakeValid() {
if t.HeaderProp.Size == 0.0 {
t.HeaderProp.Size = 10.0
}
if t.HeaderProp.Family == "" {
t.HeaderProp.Family = consts.Arial
}
if t.HeaderProp.Style == "" {
t.HeaderProp.Style = consts.Bold
}
if t.HeaderHeight == 0.0 {
t.HeaderHeight = 7.0
}
if t.Align == "" {
t.Align = consts.Left
}
if t.ContentProp.Size == 0.0 {
t.ContentProp.Size = 10.0
}
if t.ContentProp.Family == "" {
t.ContentProp.Family = consts.Arial
}
if t.ContentProp.Style == "" {
t.ContentProp.Style = consts.Normal
}
if t.ContentHeight == 0.0 {
t.ContentHeight = 5.0
}
if t.HeaderContentSpace == 0.0 {
t.HeaderContentSpace = 4.0
}
} | pkg/props/prop.go | 0.718496 | 0.644784 | prop.go | starcoder |
package types
import (
"bytes"
"github.com/attic-labs/noms/go/d"
"github.com/attic-labs/noms/go/hash"
)
type leafSequence struct {
vrw ValueReadWriter
buff []byte
offsets []uint32
}
func newLeafSequence(kind NomsKind, count uint64, vrw ValueReadWriter, vs ...Value) leafSequence {
d.PanicIfTrue(vrw == nil)
w := newBinaryNomsWriter()
offsets := make([]uint32, len(vs)+sequencePartValues+1)
offsets[sequencePartKind] = w.offset
kind.writeTo(&w)
offsets[sequencePartLevel] = w.offset
w.writeCount(0) // level
offsets[sequencePartCount] = w.offset
w.writeCount(count)
offsets[sequencePartValues] = w.offset
for i, v := range vs {
v.writeTo(&w)
offsets[i+sequencePartValues+1] = w.offset
}
return leafSequence{vrw, w.data(), offsets}
}
// readLeafSequence reads the data provided by a decoder and moves the decoder forward.
func readLeafSequence(dec *valueDecoder) leafSequence {
start := dec.pos()
offsets := skipLeafSequence(dec)
end := dec.pos()
return leafSequence{dec.vrw, dec.byteSlice(start, end), offsets}
}
func skipLeafSequence(dec *valueDecoder) []uint32 {
kindPos := dec.pos()
dec.skipKind()
levelPos := dec.pos()
dec.skipCount() // level
countPos := dec.pos()
count := dec.readCount()
offsets := make([]uint32, count+sequencePartValues+1)
offsets[sequencePartKind] = kindPos
offsets[sequencePartLevel] = levelPos
offsets[sequencePartCount] = countPos
offsets[sequencePartValues] = dec.pos()
for i := uint64(0); i < count; i++ {
dec.skipValue()
offsets[i+sequencePartValues+1] = dec.pos()
}
return offsets
}
func (seq leafSequence) decoder() valueDecoder {
return newValueDecoder(seq.buff, seq.vrw)
}
func (seq leafSequence) decoderAtOffset(offset int) valueDecoder {
return newValueDecoder(seq.buff[offset:], seq.vrw)
}
func (seq leafSequence) decoderAtPart(part uint32) valueDecoder {
offset := seq.offsets[part] - seq.offsets[sequencePartKind]
return newValueDecoder(seq.buff[offset:], seq.vrw)
}
func (seq leafSequence) decoderSkipToValues() (valueDecoder, uint64) {
dec := seq.decoderAtPart(sequencePartCount)
count := dec.readCount()
return dec, count
}
func (seq leafSequence) decoderSkipToIndex(idx int) valueDecoder {
offset := seq.getItemOffset(idx)
return seq.decoderAtOffset(offset)
}
func (seq leafSequence) writeTo(w nomsWriter) {
w.writeRaw(seq.buff)
}
func (seq leafSequence) values() []Value {
dec, count := seq.decoderSkipToValues()
vs := make([]Value, count)
for i := uint64(0); i < count; i++ {
vs[i] = dec.readValue()
}
return vs
}
func (seq leafSequence) getCompareFnHelper(other leafSequence) compareFn {
dec := seq.decoder()
otherDec := other.decoder()
return func(idx, otherIdx int) bool {
dec.offset = uint32(seq.getItemOffset(idx))
otherDec.offset = uint32(other.getItemOffset(otherIdx))
return dec.readValue().Equals(otherDec.readValue())
}
}
func (seq leafSequence) getCompareFn(other sequence) compareFn {
panic("unreachable")
}
func (seq leafSequence) typeOf() *Type {
dec := seq.decoder()
kind := dec.readKind()
dec.skipCount() // level
count := dec.readCount()
ts := make([]*Type, count)
for i := uint64(0); i < count; i++ {
ts[i] = dec.readTypeOfValue()
}
return makeCompoundType(kind, makeCompoundType(UnionKind, ts...))
}
func (seq leafSequence) seqLen() int {
return int(seq.numLeaves())
}
func (seq leafSequence) numLeaves() uint64 {
_, count := seq.decoderSkipToValues()
return count
}
func (seq leafSequence) valueReadWriter() ValueReadWriter {
return seq.vrw
}
func (seq leafSequence) getChildSequence(idx int) sequence {
return nil
}
func (seq leafSequence) Kind() NomsKind {
dec := seq.decoder()
return dec.readKind()
}
func (seq leafSequence) treeLevel() uint64 {
return 0
}
func (seq leafSequence) isLeaf() bool {
return true
}
func (seq leafSequence) cumulativeNumberOfLeaves(idx int) uint64 {
return uint64(idx) + 1
}
func (seq leafSequence) getCompositeChildSequence(start uint64, length uint64) sequence {
panic("getCompositeChildSequence called on a leaf sequence")
}
func (seq leafSequence) getItemOffset(idx int) int {
// kind, level, count, elements...
// 0 1 2 3 n+1
d.PanicIfTrue(idx+sequencePartValues+1 > len(seq.offsets))
return int(seq.offsets[idx+sequencePartValues] - seq.offsets[sequencePartKind])
}
func (seq leafSequence) getItem(idx int) sequenceItem {
dec := seq.decoderSkipToIndex(idx)
return dec.readValue()
}
func (seq leafSequence) WalkRefs(cb RefCallback) {
dec, count := seq.decoderSkipToValues()
for i := uint64(0); i < count; i++ {
dec.readValue().WalkRefs(cb)
}
}
// Collection interface
func (seq leafSequence) Len() uint64 {
_, count := seq.decoderSkipToValues()
return count
}
func (seq leafSequence) Empty() bool {
return seq.Len() == uint64(0)
}
func (seq leafSequence) hash() hash.Hash {
return hash.Of(seq.buff)
}
func (seq leafSequence) equals(other sequence) bool {
return bytes.Equal(seq.bytes(), other.bytes())
}
func (seq leafSequence) bytes() []byte {
return seq.buff
} | go/types/leaf_sequence.go | 0.74382 | 0.48438 | leaf_sequence.go | starcoder |
package helpers
import (
"net/url"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
// Custom URL matcher for outgoing pubnub server requests
func UrlsEqual(expectedString, actualString string,
ignoreKeys, mixedKeys []string) (bool, error) {
expected, err := url.Parse(expectedString)
if err != nil {
return false, err
}
actual, err := url.Parse(actualString)
if err != nil {
return false, err
}
if expected.Scheme != actual.Scheme {
return false, nil
}
if expected.Host != actual.Host {
return false, nil
}
if !PathsEqual(expected.Path, actual.Path, []int{}) {
return false, nil
}
eQuery := expected.Query()
aQuery := actual.Query()
if !QueriesEqual(&eQuery, &aQuery, ignoreKeys, mixedKeys) {
return false, nil
}
return true, nil
}
// PathsEqual mixedPositions - a position of items which can contain unsorted items like
// multiple unsorted channels. If no such positions expected use an empty slice.
// Like in arrays, the first position is 0.
func PathsEqual(expectedString, actualString string,
mixedPositions []int) bool {
if expectedString == actualString {
return true
}
expected := strings.Split(expectedString, "/")
actual := strings.Split(actualString, "/")
if len(actual) != len(expected) {
return false
}
for k, v := range expected {
if !isValueInSlice(k, mixedPositions) {
if v != actual[k] {
return false
}
} else {
expectedItems := strings.Split(v, ",")
actualItems := strings.Split(actual[k], ",")
if len(expectedItems) != len(actualItems) {
return false
}
for _, v := range expectedItems {
if !isValueInSlice(v, expectedItems) {
return false
}
}
}
}
return true
}
func QueriesEqual(expectedString, actualString *url.Values,
ignoreKeys []string, mixedKeys []string) bool {
if expectedString.Encode() == actualString.Encode() {
return true
}
for k, aVal := range *actualString {
if isValueInSlice(k, ignoreKeys) {
continue
}
if eVal, ok := (*expectedString)[k]; ok {
if isValueInSlice(k, mixedKeys) {
eParts := strings.Split(eVal[0], ",")
aParts := strings.Split(aVal[0], ",")
if len(aParts) != len(eParts) {
return false
}
for _, e := range eParts {
if !isValueInSlice(e, aParts) {
return false
}
}
} else {
if aVal[0] != eVal[0] {
return false
}
}
} else {
return false
}
}
for k, _ := range *expectedString {
if val := actualString.Get(k); val == "" {
return false
}
}
return true
}
func isValueInSlice(item interface{}, slice interface{}) bool {
if s, ok := slice.([]string); ok {
for _, v := range s {
if item == v {
return true
}
}
} else if s, ok := slice.([]int); ok {
for _, v := range s {
if item == v {
return true
}
}
}
return false
}
// Assertion wrappers for tests
func AssertPathsEqual(t *testing.T, expectedString, actualString string,
itemsPositions []int) {
match := PathsEqual(expectedString, actualString, itemsPositions)
assert.True(t, match, "Paths are not equal:\nExpected: %s\nActual: %s\n",
expectedString, actualString)
}
func AssertQueriesEqual(t *testing.T, expectedString, actualString *url.Values,
ignoreKeys, mixedKeys []string) {
match := QueriesEqual(expectedString, actualString, ignoreKeys, mixedKeys)
assert.True(t, match, "Queries are not equal:\nExpected: %s\nActual: %s\n",
expectedString, actualString)
} | tests/helpers/url_helpers.go | 0.725162 | 0.435421 | url_helpers.go | starcoder |
package main
import (
"fmt"
"math"
"io/ioutil"
"encoding/json"
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
// Object Maps a generic object
type Object struct {
Name string
Type string // {sphere|circle|circle-filled}
Radius float32 // Kilometers
Compression float32 // Compression ratio at poles
Inclination float32 // Degrees
Tilt float32 // Degrees
Revolution float32 // Yars
Rotation float32 // Days
Distance float32 // Kilometers
Center bool // Is center reference?
Radiate bool // Emmits light?
Cosmic bool // Is far away cosmic object (doesnt receive light)
Objects []Object
}
// ObjectElement Maps a generic object internal elements (drawing attributes)
type ObjectElement struct {
Vertices []float32
VerticeNormals []float32
Indices []int32
TextureCoords []float32
}
func loadObjects(mapName string) (*[]Object) {
var objectsMap []Object
// Load objects map
filePath := fmt.Sprintf("maps/%s.json", mapName)
file, err := ioutil.ReadFile(filePath)
if err != nil {
panic(err)
}
// Transform JSON map into Go map
err = json.Unmarshal(file, &objectsMap)
if err != nil {
panic(err)
}
return &objectsMap
}
func renderObjects(objects *[]Object, program uint32) {
// Acquire shader
shader := getShader()
light := getLight()
matrixUniforms := getMatrixUniforms()
// Iterate on current-level objects
for o := range *objects {
object := &((*objects)[o])
buffers := getBuffers(object.Name)
gl.BindTexture(gl.TEXTURE_2D, buffers.Texture.Ref)
// Toggle to child context
pushMatrix()
// Update angles for object
buffers.addToAngleRotation(rotationAngleSinceLast(object))
buffers.addToAngleRevolution(revolutionAngleSinceLast(object))
// Apply model transforms
currentMatrixShared := getMatrix()
if object.Tilt != 0 {
*currentMatrixShared = (*currentMatrixShared).Mul4(mgl32.HomogRotate3D(buffers.AngleTilt, mgl32.Vec3{1, 0, 0}))
}
if object.Revolution != 0 {
*currentMatrixShared = (*currentMatrixShared).Mul4(mgl32.HomogRotate3D(buffers.AngleRevolution, mgl32.Vec3{0, 1, 0}))
}
if object.Distance > 0 && object.Center != true {
*currentMatrixShared = (*currentMatrixShared).Mul4(mgl32.Translate3D(normalizeObjectSize(object.Distance), 0.0, 0.0))
}
setMatrix(currentMatrixShared)
// Toggle to unary context
pushMatrix()
// Apply object angles
currentMatrixSelf := getMatrix()
if object.Inclination > 0 {
*currentMatrixSelf = (*currentMatrixSelf).Mul4(mgl32.HomogRotate3D(object.Inclination / 90.0, mgl32.Vec3{0, 0, 1}))
}
if object.Rotation != 0 {
*currentMatrixSelf = (*currentMatrixSelf).Mul4(mgl32.HomogRotate3D(buffers.AngleRotation, mgl32.Vec3{0, 1, 0}))
}
// Process normal to model matrix
normalMatrix := mgl32.Mat4Normal(*currentMatrixSelf)
// Apply model + normal
gl.UniformMatrix4fv(matrixUniforms.Model, 1, false, &((*currentMatrixSelf)[0]))
gl.UniformMatrix3fv(matrixUniforms.Normal, 1, false, &normalMatrix[0])
// Render vertices
gl.BindBuffer(gl.ARRAY_BUFFER, buffers.VBOElementVertices)
gl.VertexAttribPointer(shader.VertexAttributes, 3, gl.FLOAT, false, 0, gl.PtrOffset(0))
// Render textures
gl.BindBuffer(gl.ARRAY_BUFFER, buffers.VBOElementTexture)
gl.VertexAttribPointer(shader.VertexTextureCoords, 2, gl.FLOAT, false, 0, gl.PtrOffset(0))
// Render vertice lightings
gl.BindBuffer(gl.ARRAY_BUFFER, buffers.VBOElementVerticeNormals)
gl.VertexAttribPointer(shader.NormalAttributes, 3, gl.FLOAT, false, 0, gl.PtrOffset(0))
// Light emitter? (eg: Sun)
if object.Radiate == true {
gl.Uniform1i(light.IsLightEmitterUniform, 1)
gl.Uniform3f(light.PointLightingLocationUniform, 0, 0, 0);
gl.Uniform3f(light.PointLightingColorUniform, 1, 1, 1);
}
// Light receiver? (eg: planet, moon)
if object.Cosmic == true {
// It is a far-away cosmic object, dont light it from emitter
gl.Uniform1i(light.IsLightReceiverUniform, 0)
} else {
gl.Uniform1i(light.IsLightReceiverUniform, 1)
}
// Render indices
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, buffers.VBOElementIndices)
// Draw elements
gl.DrawElements(getObjectDrawMode(object), int32(len(buffers.Element.Indices) * 2), gl.UNSIGNED_INT, gl.PtrOffset(0))
// Reset buffers
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, 0)
// Toggle back from unary context
popMatrix()
// Render children (if any?)
renderObjects(&(object.Objects), program)
// Toggle back to parent context
popMatrix()
}
}
func normalizeObjectSize(size float32) (float32) {
return float32(math.Sqrt(float64(size)) * ConfigObjectFactorSize)
}
func getObjectDrawMode(object *Object) (uint32) {
if object.Type == "circle" {
return gl.LINES
}
return gl.TRIANGLES
} | object.go | 0.763396 | 0.404155 | object.go | starcoder |
package main
import (
"fmt"
"math/rand"
"sort"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/vg"
"gonum.org/v1/plot/vg/draw"
"github.com/pointlander/gradient/tf32"
)
// XORNetwork is an xor neural network
type XORNetwork struct {
Input, Output *tf32.V
Parameters []*tf32.V
Genome [][]*tf32.V
Cost tf32.Meta
Fitness float32
}
// NewXORNetwork creates a new xor network
func NewXORNetwork(rnd *rand.Rand, width, depth int) XORNetwork {
random32 := func(a, b float32) float32 {
if rnd == nil {
return 0
}
return (b-a)*rnd.Float32() + a
}
input, output := tf32.NewV(2, 4), tf32.NewV(1, 4)
w1, b1, w2, b2 := tf32.NewV(2, width), tf32.NewV(width), tf32.NewV(width), tf32.NewV(1)
parameters := []*tf32.V{&w1, &b1, &w2, &b2}
genome := make([][]*tf32.V, 4)
input.X = append(input.X, 0, 0, 1, 0, 0, 1, 1, 1)
output.X = append(output.X, 0, 1, 1, 0)
m1, m2, m1a, m2a := w1.Meta(), w2.Meta(), b1.Meta(), b2.Meta()
for i := 0; i < depth; i++ {
a, b := tf32.NewV(2, 2), tf32.NewV(2, width)
m1 = tf32.Add(tf32.Mul(a.Meta(), b.Meta()), m1)
parameters = append(parameters, &a, &b)
genome[0] = append(genome[0], &a, &b)
}
for i := 0; i < depth; i++ {
a, b := tf32.NewV(width, width), tf32.NewV(width)
m1a = tf32.Add(tf32.Mul(a.Meta(), b.Meta()), m1a)
parameters = append(parameters, &a, &b)
genome[1] = append(genome[1], &a, &b)
}
for i := 0; i < depth; i++ {
a, b := tf32.NewV(width, width), tf32.NewV(width)
m2 = tf32.Add(tf32.Mul(a.Meta(), b.Meta()), m2)
parameters = append(parameters, &a, &b)
genome[2] = append(genome[2], &a, &b)
}
for i := 0; i < depth; i++ {
a, b := tf32.NewV(1), tf32.NewV(1)
m2a = tf32.Add(tf32.Mul(a.Meta(), b.Meta()), m2a)
parameters = append(parameters, &a, &b)
genome[3] = append(genome[3], &a, &b)
}
for _, p := range parameters {
for i := 0; i < cap(p.X); i++ {
p.X = append(p.X, random32(-1, 1))
}
}
l1 := tf32.Sigmoid(tf32.Add(tf32.Mul(m1, input.Meta()), m1a))
l2 := tf32.Sigmoid(tf32.Add(tf32.Mul(m2, l1), m2a))
cost := tf32.Avg(tf32.Quadratic(l2, output.Meta()))
return XORNetwork{
Input: &input,
Output: &output,
Parameters: parameters,
Genome: genome,
Cost: cost,
}
}
// Fit get the fitness of the network
func (n *XORNetwork) Fit() float32 {
fitness := tf32.Gradient(n.Cost).X[0]
n.Fitness = fitness
return fitness
}
// Mutate mutates the network with gradient descent
func (n *XORNetwork) Mutate() float32 {
for _, p := range n.Parameters {
p.Zero()
}
cost := tf32.Gradient(n.Cost).X[0]
eta := float32(.6)
for _, p := range n.Parameters {
for l, d := range p.D {
p.X[l] -= eta * d
}
}
n.Fitness = cost
return cost
}
// XORParallelExperiment runs parallel version of experiment
func XORParallelExperiment(seed int64, depth int) (generatrions int) {
rnd := rand.New(rand.NewSource(seed))
networks := make([]XORNetwork, 100)
for i := range networks {
networks[i] = NewXORNetwork(rnd, 3, depth)
}
done := make(chan float32, 8)
fit := func(n *XORNetwork) {
done <- n.Fit()
}
mutate := func(n *XORNetwork) {
done <- n.Mutate()
}
generatrions, rnd = 1000, rand.New(rand.NewSource(seed))
for i := 0; i < 1000; i++ {
for j := range networks {
go mutate(&networks[j])
}
for j := 0; j < 100; j++ {
<-done
}
tf32.Static.InferenceOnly = true
for j := range networks {
go fit(&networks[j])
}
for j := 0; j < 100; j++ {
<-done
}
tf32.Static.InferenceOnly = false
sort.Slice(networks, func(i, j int) bool {
return networks[i].Fitness < networks[j].Fitness
})
//fmt.Println(i, networks[0].Fitness)
if networks[0].Fitness < .01 {
generatrions = i
break
}
index := 50
for j := 0; j < 25; j++ {
a, b := rnd.Intn(50), rnd.Intn(50)
for a == b {
b = rnd.Intn(50)
}
childa := &networks[index]
for k, p := range childa.Parameters {
copy(p.X, networks[a].Parameters[k].X)
}
index++
childb := &networks[index]
for k, p := range childb.Parameters {
copy(p.X, networks[b].Parameters[k].X)
}
index++
set, x, y := rnd.Intn(4), rnd.Intn(depth), rnd.Intn(depth)
childa.Genome[set][x*2].X, childb.Genome[set][y*2].X =
childb.Genome[set][y*2].X, childa.Genome[set][x*2].X
childa.Genome[set][x*2+1].X, childb.Genome[set][y*2+1].X =
childb.Genome[set][y*2+1].X, childa.Genome[set][x*2+1].X
}
}
return
}
// RunXORRepeatedParallelExperiment runs xor prarallel experiment repeatedly
func RunXORRepeatedParallelExperiment() {
total := 0
for i := 0; i < 256; i++ {
total += XORParallelExperiment(int64(i)+1, 16)
}
fmt.Printf("generations=%f\n", float64(total)/256)
}
// XORExperiment xor neural network experiment
func XORExperiment(seed int64, width, depth int, optimizer Optimizer, batch, inception, dct, context bool) Result {
rnd, costs, converged := rand.New(rand.NewSource(seed)), make([]float32, 0, 1000), false
random32 := func(a, b float32) float32 {
return (b-a)*rnd.Float32() + a
}
var input, output tf32.V
if batch {
input, output = tf32.NewV(2, 4), tf32.NewV(1, 4)
} else {
input, output = tf32.NewV(2), tf32.NewV(1)
}
w1, b1, w2, b2 := tf32.NewV(2, width), tf32.NewV(width), tf32.NewV(width), tf32.NewV(1)
parameters, zero := []*tf32.V{&w1, &b1, &w2, &b2}, []*tf32.V{}
m1, m2, m1a, m2a := w1.Meta(), w2.Meta(), b1.Meta(), b2.Meta()
if dct {
t1, tt1 := DCT2(2)
t2, tt2 := DCT2(width)
t3, tt3 := DCT2(width)
t4, tt4 := DCT2(1)
w1b, b1b, w2b, b2b := tf32.NewV(2, width), tf32.NewV(width), tf32.NewV(width), tf32.NewV(1)
m1 = tf32.Add(tf32.Mul(tt1.Meta(), tf32.T(tf32.Mul(m1, t1.Meta()))), w1b.Meta())
m1a = tf32.Add(tf32.Mul(tt2.Meta(), tf32.T(tf32.Mul(m1a, t2.Meta()))), b1b.Meta())
m2 = tf32.Add(tf32.Mul(tt3.Meta(), tf32.T(tf32.Mul(m2, t3.Meta()))), w2b.Meta())
m2a = tf32.Add(tf32.Mul(tt4.Meta(), tf32.T(tf32.Mul(m2a, t4.Meta()))), b2b.Meta())
zero = append(zero, &t1, &tt1, &t2, &tt2, &t3, &tt3, &t4, &tt4)
parameters = append(parameters, &w1b, &b1b, &w2b, &b2b)
} else if inception {
for i := 0; i < depth; i++ {
a, b := tf32.NewV(2, 2), tf32.NewV(2, width)
m1 = tf32.Add(tf32.Mul(a.Meta(), b.Meta()), m1)
parameters = append(parameters, &a, &b)
}
for i := 0; i < depth; i++ {
a, b := tf32.NewV(width, width), tf32.NewV(width)
m1a = tf32.Add(tf32.Mul(a.Meta(), b.Meta()), m1a)
parameters = append(parameters, &a, &b)
}
for i := 0; i < depth; i++ {
a, b := tf32.NewV(width, width), tf32.NewV(width)
m2 = tf32.Add(tf32.Mul(a.Meta(), b.Meta()), m2)
parameters = append(parameters, &a, &b)
}
for i := 0; i < depth; i++ {
a, b := tf32.NewV(1), tf32.NewV(1)
m2a = tf32.Add(tf32.Mul(a.Meta(), b.Meta()), m2a)
parameters = append(parameters, &a, &b)
}
}
var deltas, m, v [][]float32
for _, p := range parameters {
for i := 0; i < cap(p.X); i++ {
p.X = append(p.X, random32(-1, 1))
}
switch optimizer {
case OptimizerMomentum:
deltas = append(deltas, make([]float32, len(p.X)))
case OptimizerAdam:
m = append(m, make([]float32, len(p.X)))
v = append(v, make([]float32, len(p.X)))
}
}
l1 := tf32.Sigmoid(tf32.Add(tf32.Mul(m1, input.Meta()), m1a))
l2 := tf32.Sigmoid(tf32.Add(tf32.Mul(m2, l1), m2a))
cost := tf32.Avg(tf32.Quadratic(l2, output.Meta()))
type Datum struct {
input []float32
output []float32
deltas, m, v [][]float32
}
data := [...]Datum{
{
input: []float32{0, 0},
output: []float32{0},
},
{
input: []float32{1, 0},
output: []float32{1},
},
{
input: []float32{0, 1},
output: []float32{1},
},
{
input: []float32{1, 1},
output: []float32{0},
},
}
table := make([]*Datum, len(data))
for i := range data {
if context {
for _, p := range parameters {
switch optimizer {
case OptimizerMomentum:
data[i].deltas = append(data[i].deltas, make([]float32, len(p.X)))
case OptimizerAdam:
data[i].m = append(data[i].m, make([]float32, len(p.X)))
data[i].v = append(data[i].v, make([]float32, len(p.X)))
}
}
}
table[i] = &data[i]
}
rnd = rand.New(rand.NewSource(seed))
// momentum parameters
alpha, eta := float32(.1), float32(.6)
// adam parameters
a, beta1, beta2, epsilon := float32(.001), float32(.9), float32(.999), float32(1E-8)
optimize := func(i int) {
for k, p := range parameters {
for l, d := range p.D {
switch optimizer {
case OptimizerStatic:
p.X[l] -= eta * d
case OptimizerMomentum:
deltas[k][l] = alpha*deltas[k][l] - eta*d
p.X[l] += deltas[k][l]
case OptimizerAdam:
m[k][l] = beta1*m[k][l] + (1-beta1)*d
v[k][l] = beta2*v[k][l] + (1-beta2)*d*d
t := float32(i + 1)
mCorrected := m[k][l] / (1 - pow(beta1, t))
vCorrected := v[k][l] / (1 - pow(beta2, t))
p.X[l] -= a * mCorrected / (sqrt(vCorrected) + epsilon)
}
}
}
}
if batch {
inputs, outputs := make([]float32, 0, 16), make([]float32, 0, 4)
for i := range table {
inputs = append(inputs, table[i].input...)
outputs = append(outputs, table[i].output...)
}
input.Set(inputs)
output.Set(outputs)
for i := 0; i < 10000; i++ {
for _, p := range parameters {
p.Zero()
}
for _, p := range zero {
p.Zero()
}
total := tf32.Gradient(cost).X[0]
optimize(i)
costs = append(costs, total)
if total < .01 {
converged = true
break
}
}
} else {
Learn:
for i := 0; i < 10000; i++ {
for i := range table {
j := i + rnd.Intn(len(data)-i)
table[i], table[j] = table[j], table[i]
}
total := float32(0.0)
for j := range table {
for _, p := range parameters {
p.Zero()
}
for _, p := range zero {
p.Zero()
}
input.Set(table[j].input)
output.Set(table[j].output)
total += tf32.Gradient(cost).X[0]
if context {
switch optimizer {
case OptimizerMomentum:
deltas = table[j].deltas
case OptimizerAdam:
m = table[j].m
v = table[j].v
}
}
optimize(i)
}
costs = append(costs, total)
switch optimizer {
case OptimizerStatic, OptimizerMomentum:
if total < .01 {
converged = true
break Learn
}
case OptimizerAdam:
if total < .1 {
converged = true
break Learn
}
}
}
}
if converged {
for i := range data {
input.X[0], input.X[1] = data[i].input[0], data[i].input[1]
var output tf32.V
l2(func(a *tf32.V) {
output = *a
})
if data[i].output[0] == 1 && output.X[0] < .5 {
panic(fmt.Sprintf("%v output should be 1 %f %v %v %s %v", context, output.X[0], data[i].input, data[i].output,
optimizer.String(), batch))
} else if data[i].output[0] == 0 && output.X[0] >= .5 {
panic(fmt.Sprintf("%v output should be 0 %f %v %v %s %v", context, output.X[0], data[i].input, data[i].output,
optimizer.String(), batch))
}
}
}
return Result{
Costs: costs,
Converged: converged,
}
}
// RunXORRepeatedExperiment runs multiple xor experiments
func RunXORRepeatedExperiment() {
run := func(optimizer Optimizer, batch bool) (normalStats, inceptionStats Statistics) {
normalStats.Mode, inceptionStats.Mode = "normal", "inception"
normalStats.Optimizer, inceptionStats.Optimizer = optimizer, optimizer
if batch {
normalStats.Batch, inceptionStats.Batch = 4, 4
} else {
normalStats.Batch, inceptionStats.Batch = 1, 1
}
experiment := func(seed int64, inception, context bool, results chan<- Result) {
results <- XORExperiment(seed, 3, 16, optimizer, batch, inception, false, context)
}
normalResults, inceptionResults := make(chan Result, 8), make(chan Result, 8)
for i := 1; i <= 256; i++ {
go experiment(int64(i), false, false, normalResults)
go experiment(int64(i), true, false, inceptionResults)
}
for normalStats.Count < 256 || inceptionStats.Count < 256 {
select {
case result := <-normalResults:
normalStats.Aggregate(result)
case result := <-inceptionResults:
inceptionStats.Aggregate(result)
}
}
return
}
statistics := []Statistics{}
for _, optimizer := range Optimizers {
normalStats, inceptionStats := run(optimizer, false)
statistics = append(statistics, normalStats, inceptionStats)
normalStats, inceptionStats = run(optimizer, true)
statistics = append(statistics, normalStats, inceptionStats)
}
sort.Slice(statistics, func(i, j int) bool {
return statistics[i].AverageEpochs() < statistics[j].AverageEpochs()
})
headers := []string{"Mode", "Optimizer", "Batch", "Converged", "Epochs"}
sizes, results := make([]int, 5), make([][5]string, len(statistics))
for i, header := range headers {
sizes[i] = len(header)
}
for i, statistic := range statistics {
results[i][0] = statistic.Mode
if length := len(results[i][0]); length > sizes[0] {
sizes[0] = length
}
results[i][1] = statistic.Optimizer.String()
if length := len(results[i][1]); length > sizes[1] {
sizes[1] = length
}
results[i][2] = fmt.Sprintf("%d", statistic.Batch)
if length := len(results[i][2]); length > sizes[2] {
sizes[2] = length
}
results[i][3] = fmt.Sprintf("%f", statistic.ConvergenceProbability())
if length := len(results[i][3]); length > sizes[3] {
sizes[3] = length
}
results[i][4] = fmt.Sprintf("%f", statistic.AverageEpochs())
if length := len(results[i][4]); length > sizes[4] {
sizes[4] = length
}
}
fmt.Printf("| ")
for i, header := range headers {
fmt.Printf("%s", header)
spaces := sizes[i] - len(header)
for spaces > 0 {
fmt.Printf(" ")
spaces--
}
fmt.Printf(" | ")
}
fmt.Printf("\n| ")
for i, header := range headers {
dashes := len(header)
if sizes[i] > dashes {
dashes = sizes[i]
}
for dashes > 0 {
fmt.Printf("-")
dashes--
}
fmt.Printf(" | ")
}
fmt.Printf("\n")
for _, row := range results {
fmt.Printf("| ")
for i, entry := range row {
spaces := sizes[i] - len(entry)
fmt.Printf("%s", entry)
for spaces > 0 {
fmt.Printf(" ")
spaces--
}
fmt.Printf(" | ")
}
fmt.Printf("\n")
}
}
// RunXORExperiment runs an xor experiment once
func RunXORExperiment(seed int64) {
p, err := plot.New()
if err != nil {
panic(err)
}
p.Title.Text = "xor epochs"
p.X.Label.Text = "epoch"
p.Y.Label.Text = "cost"
p.Legend.Top = true
index := 0
for _, optimizer := range Optimizers {
normal := XORExperiment(seed, 3, 16, optimizer, true, false, false, false)
inception := XORExperiment(seed, 3, 16, optimizer, true, true, false, false)
pointsNormal := make(plotter.XYs, 0, len(normal.Costs))
for i, cost := range normal.Costs {
pointsNormal = append(pointsNormal, plotter.XY{X: float64(i), Y: float64(cost)})
}
pointsInception := make(plotter.XYs, 0, len(inception.Costs))
for i, cost := range inception.Costs {
pointsInception = append(pointsInception, plotter.XY{X: float64(i), Y: float64(cost)})
}
normalScatter, err := plotter.NewScatter(pointsNormal)
if err != nil {
panic(err)
}
normalScatter.GlyphStyle.Radius = vg.Length(1)
normalScatter.GlyphStyle.Shape = draw.CircleGlyph{}
normalScatter.GlyphStyle.Color = colors[index]
normalScatter.GlyphStyle.Radius = 2
index++
inceptionScatter, err := plotter.NewScatter(pointsInception)
if err != nil {
panic(err)
}
inceptionScatter.GlyphStyle.Radius = vg.Length(1)
inceptionScatter.GlyphStyle.Shape = draw.CircleGlyph{}
inceptionScatter.GlyphStyle.Color = colors[index]
inceptionScatter.GlyphStyle.Radius = 2
index++
p.Add(normalScatter, inceptionScatter)
p.Legend.Add(fmt.Sprintf("normal %s", optimizer.String()), normalScatter)
p.Legend.Add(fmt.Sprintf("inception %s", optimizer.String()), inceptionScatter)
}
err = p.Save(8*vg.Inch, 8*vg.Inch, "cost_xor.png")
if err != nil {
panic(err)
}
} | experiment_xor.go | 0.643329 | 0.421195 | experiment_xor.go | starcoder |
package aws
const ecsDescription = `Connects to one or more ECS clusters and updates
API Clusters stored with the Turbine Labs API at startup and periodically
thereafter.
Within ECS, items are marked as Turbine Labs Cluster members through the use of
a configurable Docker label (defaulting to ` + ecsDefaultClusterTag + `). This
tag has the format of a comma-separated series of 'cluster:port' declarations.
When a collection pass runs it first examines the ECS Services defined in the
specified ECS Clusters and the associated Tasks. The ContainerInstances for
each Task are examined and, using the Container definition, any container with
the specified tag is marked as handling traffic for the indicated cluster on
the indicated port (as taken from the '<cluster>:<port>' annotation). If
multiple values are included each pairing indicates an additional instance that
will be bound to the corresponding API Cluster. Note that the port from this
label should indicate container port. Validation is done to ensure that a port
specified in the tag values is defined as an exposed port on the container.
If a container has a port exposed multiple times assignment of the corresponding
host port will use each host mapping once then select randomly for each API
cluster that maps to that port.
To summarize, once the ECS cluster layout is ingested:
(1) running ECS Tasks are requested for each {ECS cluster, ECS Service} pairing
(2) ContainerInstance data is pulled from each Task
(3) Containers within each ContainerInstance are inspected and API Clusters are
created with API Cluster Instances added as the 'cluster:port' label directs
(4) Each instance attached to an API Cluster will include as metadata the
originating:
- ECS cluster (short name)
- ECS service (short name)
- ECS service (ARN)
- ECS Task Definition (ARN)
- ECS Container Name within Task
- ECS Cluster Instance (ARN)
- ECS Task Instance (ARN)
- EC2 Instance Id
An example:
The ECS environment definition:
Cluster 'prod'
Service 'user-auth' (bound to task 'auth_task:4' with 1 instance)
Service 'api-prod-frontend' (bound to task 'api_task:2' with 4 instances)
Cluster 'beta'
Service 'api-beta-frontend' (bound to task 'api_task:3' with 1 instance)
Tasks
auth_task:4
Container 'user-service'
exposes 9990 to port 0
no labels
linked to db-proxy
Container 'db-proxy'
no labels
api_task:2
Container 'api'
exposes 80 to 0
label "tbn-cluster: auth_api:80"
api_task:3
Container 'api'
exposes 80 to 8000
labels "tbn-cluster: auth_api:80"
This ECS environment is then run on the hosts configured for each cluster and
rotor is instructed to collect API Clusters from 'prod' and 'beta' ECS
clusters using the identifying tag of 'tbn-cluster'.
The ECS execution environment:
prod:
10.0.1.10 running containers
user-service with 18592 -> 9990/tcp
db-proxy with no exposed ports
api wih 18593 -> 80/tcp
10.0.1.11:
api wih 11248 -> 80/tcp
10.0.1.12:
api wih 23422 -> 80/tcp
api wih 23423 -> 80/tcp
beta:
10.0.1.13:
api with 8000 -> 80/tcp
The expected result would be:
API Cluster 'api' with 5 Instances:
10.0.1.10:18593, metadata{
ecs-cluster -> prod
ecs-service -> api-prod-frontend
ecs-service-arn -> arn:aws:ecs:us-east-1:<acct-id>:service/api-prod-frontend
ecs-task-definition -> arn:aws:ecs:us-east-1:<acct-id>:task-definition/api_task:2
ecs-task-container -> api
ecs-container-instance -> arn:aws:ecs:us-east-1:<acct-id>:container-instance/<ci-uuid-1>
ecs-task-instance -> arn:aws:ecs:us-east-1:<acct-id>:task/<t-uuid-1>
ec2-instance-id -> <ec2-id-1>
}
10.0.1.11:11248, metadata{
ecs-cluster -> prod
ecs-service -> api-prod-frontend
ecs-service-arn -> arn:aws:ecs:us-east-1:<acct-id>:service/api-prod-frontend
ecs-task-definition -> arn:aws:ecs:us-east-1:<acct-id>:task-definition/api_task:2
ecs-task-container -> api
ecs-container-instance -> arn:aws:ecs:us-east-1:<acct-id>:container-instance/<ci-uuid-2>
ecs-task-instance -> arn:aws:ecs:us-east-1:<acct-id>:task/<t-uuid-2>
ec2-instance-id -> <ec2-id-2>
}
10.0.1.12:23422, metadata{
ecs-cluster -> prod
ecs-service -> api-prod-frontend
ecs-service-arn -> arn:aws:ecs:us-east-1:<acct-id>:service/api-prod-frontend
ecs-task-definition -> arn:aws:ecs:us-east-1:<acct-id>:task-definition/api_task:2
ecs-task-container -> api
ecs-container-instance -> arn:aws:ecs:us-east-1:<acct-id>:container-instance/<ci-uuid-3>
ecs-task-instance -> arn:aws:ecs:us-east-1:<acct-id>:task/<t-uuid-3>
ec2-instance-id -> <ec2-id-3>
}
10.0.1.12:23423, metadata{
ecs-cluster -> prod
ecs-service -> api-prod-frontend
ecs-service-arn -> arn:aws:ecs:us-east-1:<acct-id>:service/api-prod-frontend
ecs-task-definition -> arn:aws:ecs:us-east-1:<acct-id>:task-definition/api_task:2
ecs-task-container -> api
ecs-container-instance -> arn:aws:ecs:us-east-1:<acct-id>:container-instance/<ci-uuid-3>
ecs-task-instance -> arn:aws:ecs:us-east-1:<acct-id>:task/<t-uuid-3>
ec2-instance-id -> <ec2-id-3>
}
10.0.1.13:8000, metadata{
ecs-cluster -> beta
ecs-service -> api-beta-frontend
ecs-service-arn -> arn:aws:ecs:us-east-1:<acct-id>:service/api-beta-frontend
ecs-task-definition -> arn:aws:ecs:us-east-1:<acct-id>:task-definition/api_task:3
ecs-task-container -> api
ecs-container-instance -> arn:aws:ecs:us-east-1:<acct-id>:container-instance/<ci-uuid-4>
ecs-task-instance -> arn:aws:ecs:us-east-1:<acct-id>:task/<t-uuid-4>
ec2-instance-id -> <ec2-id-4>
}
Notice that:
When the exposed port is not bound in the ECS Task/Container configuration
(as in task 'api_task:2') rotor infers the correct host port based on
the port directs to the port specified in the ` + ecsDefaultClusterTag + `
tag.
If a host port is explicitly bound (as in 'api_task:3') it is used in the
constructed API Instance. If that port is not available it is an error an
the instance will not be included.
The names of the ECS Service and ECS Cluster do not impact the name of
the constructed API Cluster. Our examples are all within the 'api' cluster
because of the label despite being sourced from the 'prod' and 'beta' ECS
clusters. If the originating cluster is necessary for routing it will be
included in the instance metadata so constraints may be added.
No API Cluster was created to track containers running in support of
'user-auth' because the Task definition did not include any appropriately
labeled containers.` | plugins/aws/ecs_helptext.go | 0.852675 | 0.521593 | ecs_helptext.go | starcoder |
package runtime
import "unsafe"
// This garbage collector implementation allows TinyGo to use an external memory allocator.
// It appends a header to the end of every allocation which the garbage collector uses for tracking purposes.
// This is also a conservative collector.
const (
gcDebug = false
gcAsserts = false
)
func initHeap() {}
// memTreap is a treap which is used to track allocations for the garbage collector.
type memTreap struct {
root *memTreapNode
}
// printNode recursively prints a subtree at a given indentation depth.
func (t *memTreap) printNode(n *memTreapNode, depth int) {
for i := 0; i < depth; i++ {
print(" ")
}
println(n, n.priority())
if n == nil {
return
}
if gcAsserts && n.parent == nil && t.root != n {
runtimePanic("parent missing")
}
t.printNode(n.left, depth+1)
t.printNode(n.right, depth+1)
}
// print the treap.
func (t *memTreap) print() {
println("treap:")
t.printNode(t.root, 1)
}
// empty returns whether the treap contains any nodes.
func (t *memTreap) empty() bool {
return t.root == nil
}
// minAddr returns the lowest address contained in an allocation in the treap.
func (t *memTreap) minAddr() uintptr {
// Find the rightmost node.
n := t.root
for n.right != nil {
n = n.right
}
// The lowest address is the base of the rightmost node.
return uintptr(unsafe.Pointer(&n.base))
}
// maxAddr returns the highest address contained in an allocation in the treap.
func (t *memTreap) maxAddr() uintptr {
// Find the leftmost node.
n := t.root
for n.left != nil {
n = n.left
}
// The highest address is the end of the leftmost node.
return uintptr(unsafe.Pointer(&n.base)) + n.size
}
// rotateRight does a right rotation of p and q.
// https://en.wikipedia.org/wiki/Tree_rotation#/media/File:Tree_rotation.png
func (t *memTreap) rotateRight(p, q *memTreapNode) {
if t.root == q {
t.root = p
} else {
*q.parentSlot() = p
}
//a := p.left
b := p.right
//c := q.right
p.parent = q.parent
p.right = q
q.parent = p
q.left = b
if b != nil {
b.parent = q
}
}
// rotateLeft does a left rotation of p and q.
// https://en.wikipedia.org/wiki/Tree_rotation#/media/File:Tree_rotation.png
func (t *memTreap) rotateLeft(p, q *memTreapNode) {
if t.root == p {
t.root = q
} else {
*p.parentSlot() = q
}
//a := p.left
b := q.left
//c := q.right
q.parent = p.parent
q.left = p
p.parent = q
p.right = b
if b != nil {
b.parent = p
}
}
// rotate rotates a lower node up to its parent.
// The node n must be a child of m, and will be the parent of m after the rotation.
func (t *memTreap) rotate(n, m *memTreapNode) {
// https://en.wikipedia.org/wiki/Tree_rotation#/media/File:Tree_rotation.png
if uintptr(unsafe.Pointer(n)) > uintptr(unsafe.Pointer(m)) {
t.rotateRight(n, m)
} else {
t.rotateLeft(m, n)
}
}
// insert a node into the treap.
func (t *memTreap) insert(n *memTreapNode) {
if gcAsserts && (n.parent != nil || n.left != nil || n.right != nil) {
runtimePanic("tried to insert unzeroed treap node")
}
if t.root == nil {
// This is the first node, and can be inserted directly into the root.
t.root = n
return
}
// Insert like a regular binary search tree.
for n.parent = t.root; *n.parentSlot() != nil; n.parent = *n.parentSlot() {
}
*n.parentSlot() = n
// Rotate the tree to restore the heap invariant.
priority := n.priority()
for n.parent != nil && priority > n.parent.priority() {
t.rotate(n, n.parent)
}
}
// lookupAddr finds the treap node with the allocation containing the specified address.
// If the address is not contained in any allocations in this treap, nil is returned.
// NOTE: fields of memTreapNodes are not considered part of the allocations.
func (t *memTreap) lookupAddr(addr uintptr) *memTreapNode {
n := t.root
for n != nil && !n.contains(addr) {
if addr > uintptr(unsafe.Pointer(n)) {
n = n.left
} else {
n = n.right
}
}
return n
}
// replace a node with another node on the treap.
func (t *memTreap) replace(old, new *memTreapNode) {
if gcAsserts && (old == nil || new == nil) {
if gcDebug {
println("tried to replace:", old, "->", new)
}
runtimePanic("invalid replacement")
}
if gcAsserts && old.parent == nil && old != t.root {
if gcDebug {
println("tried to replace:", old, "->", new)
t.print()
}
runtimePanic("corrupted tree")
}
new.parent = old.parent
if old == t.root {
t.root = new
} else {
*new.parentSlot() = new
}
}
// remove a node from the treap.
// This does not free the allocation.
func (t *memTreap) remove(n *memTreapNode) {
scan:
for {
switch {
case n.left == nil && n.right == nil && n.parent == nil:
// This is the only node - uproot it.
t.root = nil
break scan
case n.left == nil && n.right == nil:
// There are no nodes beneath here, so just remove this node from the parent.
*n.parentSlot() = nil
break scan
case n.left != nil && n.right == nil:
t.replace(n, n.left)
break scan
case n.right != nil && n.left == nil:
t.replace(n, n.right)
break scan
default:
// Rotate this node downward.
if n.left.priority() > n.right.priority() {
t.rotate(n.left, n)
} else {
t.rotate(n.right, n)
}
}
}
n.left = nil
n.right = nil
n.parent = nil
}
// memTreapNode is a treap node used to track allocations for the garbage collector.
// This struct is prepended to every allocation.
type memTreapNode struct {
parent, left, right *memTreapNode
size uintptr
base struct{}
}
// priority computes a pseudo-random priority value for this treap node.
// This value is a fibonacci hash (https://en.wikipedia.org/wiki/Hash_function#Fibonacci_hashing) of the node's memory address.
func (n *memTreapNode) priority() uintptr {
// Select fibonacci multiplier for this bit-width.
var fibonacciMultiplier uint64
switch 8 * unsafe.Sizeof(uintptr(0)) {
case 16:
fibonacciMultiplier = 40503
case 32:
fibonacciMultiplier = 2654435769
case 64:
fibonacciMultiplier = 11400714819323198485
default:
runtimePanic("invalid size of uintptr")
}
// Hash the pointer.
return uintptr(fibonacciMultiplier) * uintptr(unsafe.Pointer(n))
}
// contains returns whether this allocation contains a given address.
func (n *memTreapNode) contains(addr uintptr) bool {
return addr >= uintptr(unsafe.Pointer(&n.base)) && addr < uintptr(unsafe.Pointer(&n.base))+n.size
}
// parentSlot returns a pointer to the parent's reference to this node.
func (n *memTreapNode) parentSlot() **memTreapNode {
if uintptr(unsafe.Pointer(n)) > uintptr(unsafe.Pointer(n.parent)) {
return &n.parent.left
} else {
return &n.parent.right
}
}
// memScanQueue is a queue of memTreapNodes.
type memScanQueue struct {
head, tail *memTreapNode
}
// push adds an allocation onto the queue.
func (q *memScanQueue) push(n *memTreapNode) {
if gcAsserts && (n.left != nil || n.right != nil || n.parent != nil) {
runtimePanic("tried to push a treap node that is in use")
}
if q.head == nil {
q.tail = n
} else {
q.head.left = n
}
n.right = q.head
q.head = n
}
// pop removes the next allocation from the queue.
func (q *memScanQueue) pop() *memTreapNode {
n := q.tail
q.tail = n.left
if q.tail == nil {
q.head = nil
}
n.left = nil
n.right = nil
return n
}
// empty returns whether the queue contains any allocations.
func (q *memScanQueue) empty() bool {
return q.tail == nil
}
// allocations is a treap containing all allocations.
var allocations memTreap
// usedMem is the total amount of allocated memory (including the space taken up by memory treap nodes).
var usedMem uintptr
// firstPtr and lastPtr are the bounds of memory used by the heap.
// They are computed before the collector starts marking, and are used to quickly eliminate false positives.
var firstPtr, lastPtr uintptr
// scanQueue is a queue of marked allocations to scan.
var scanQueue memScanQueue
// mark searches for an allocation containing the given address and marks it if found.
func mark(addr uintptr) bool {
if addr < firstPtr || addr > lastPtr {
// Pointer is outside of allocated bounds.
return false
}
node := allocations.lookupAddr(addr)
if node != nil {
if gcDebug {
println("mark:", addr)
}
allocations.remove(node)
scanQueue.push(node)
}
return node != nil
}
func markRoot(addr uintptr, root uintptr) {
marked := mark(root)
if gcDebug {
if marked {
println("marked root:", root, "at", addr)
} else if addr != 0 {
println("did not mark root:", root, "at", addr)
}
}
}
func markRoots(start uintptr, end uintptr) {
scan(start, end)
}
// scan loads all pointer-aligned words and marks any pointers that it finds.
func scan(start uintptr, end uintptr) {
// Align start and end pointers.
start = (start + unsafe.Alignof(unsafe.Pointer(nil)) - 1) &^ (unsafe.Alignof(unsafe.Pointer(nil)) - 1)
end &^= unsafe.Alignof(unsafe.Pointer(nil)) - 1
// Mark all pointers.
for ptr := start; ptr < end; ptr += unsafe.Alignof(unsafe.Pointer(nil)) {
mark(*(*uintptr)(unsafe.Pointer(ptr)))
}
}
// scan marks all allocations referenced by this allocation.
// This should only be invoked by the garbage collector.
func (n *memTreapNode) scan() {
start := uintptr(unsafe.Pointer(&n.base))
end := start + n.size
scan(start, end)
}
// destroy removes and frees all allocations in the treap.
func (t *memTreap) destroy() {
n := t.root
for n != nil {
switch {
case n.left != nil:
// Destroy the left subtree.
n = n.left
case n.right != nil:
// Destroy the right subtree.
n = n.right
default:
// This is a leaf node, so delete it and jump back to the parent.
// Save the parent to jump back to.
parent := n.parent
if parent != nil {
*n.parentSlot() = nil
} else {
t.root = nil
}
// Update used memory.
usedMem -= unsafe.Sizeof(memTreapNode{}) + n.size
if gcDebug {
println("collecting:", &n.base, "size:", n.size)
println("used memory:", usedMem)
}
// Free the node.
extfree(unsafe.Pointer(n))
// Jump back to the parent node.
n = parent
}
}
}
// gcrunning is used by gcAsserts to determine whether the garbage collector is running.
// This is used to detect if the collector is invoking itself or trying to allocate memory.
var gcrunning bool
// activeMem is a treap used to store marked allocations which have already been scanned.
// This is only used when the garbage collector is running.
var activeMem memTreap
func GC() {
if gcDebug {
println("running GC")
}
if allocations.empty() {
// Skip collection because the heap is empty.
if gcDebug {
println("nothing to collect")
}
return
}
if gcAsserts {
if gcrunning {
runtimePanic("GC called itself")
}
gcrunning = true
}
if gcDebug {
println("pre-GC allocations:")
allocations.print()
}
// Before scanning, find the lowest and highest allocated pointers.
// These can be quickly compared against to eliminate most false positives.
firstPtr, lastPtr = allocations.minAddr(), allocations.maxAddr()
// Start by scanning all of the global variables and the stack.
markGlobals()
markStack()
// Scan all referenced allocations, building a new treap with marked allocations.
// The marking process deletes the allocations from the old allocations treap, so they are only queued once.
for !scanQueue.empty() {
// Pop a marked node off of the scan queue.
n := scanQueue.pop()
// Scan and mark all nodes that this references.
n.scan()
// Insert this node into the new treap.
activeMem.insert(n)
}
// The allocations treap now only contains unreferenced nodes. Destroy them all.
allocations.destroy()
if gcAsserts && !allocations.empty() {
runtimePanic("failed to fully destroy allocations")
}
// Replace the allocations treap with the new treap.
allocations = activeMem
activeMem = memTreap{}
if gcDebug {
println("GC finished")
}
if gcAsserts {
gcrunning = false
}
}
// heapBound is used to control the growth of the heap.
// When the heap exceeds this size, the garbage collector is run.
// If the garbage collector cannot free up enough memory, the bound is doubled until the allocation fits.
var heapBound uintptr = 4 * unsafe.Sizeof(memTreapNode{})
// zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes.
var zeroSizedAlloc uint8
// alloc tries to find some free space on the heap, possibly doing a garbage
// collection cycle if needed. If no space is free, it panics.
//go:noinline
func alloc(size uintptr) unsafe.Pointer {
if size == 0 {
return unsafe.Pointer(&zeroSizedAlloc)
}
if gcAsserts && gcrunning {
runtimePanic("allocated inside the garbage collector")
}
// Calculate size of allocation including treap node.
allocSize := unsafe.Sizeof(memTreapNode{}) + size
var gcRan bool
for {
// Try to bound heap growth.
if usedMem+allocSize < usedMem {
if gcDebug {
println("current mem:", usedMem, "alloc size:", allocSize)
}
runtimePanic("target heap size exceeds address space size")
}
if usedMem+allocSize > heapBound {
if !gcRan {
// Run the garbage collector before growing the heap.
if gcDebug {
println("heap reached size limit")
}
GC()
gcRan = true
continue
} else {
// Grow the heap bound to fit the allocation.
for heapBound != 0 && usedMem+allocSize > heapBound {
heapBound <<= 1
}
if heapBound == 0 {
// This is only possible on hosted 32-bit systems.
// Allow the heap bound to encompass everything.
heapBound = ^uintptr(0)
}
if gcDebug {
println("raising heap size limit to", heapBound)
}
}
}
// Allocate the memory.
ptr := extalloc(allocSize)
if ptr == nil {
if gcDebug {
println("extalloc failed")
}
if gcRan {
// Garbage collector was not able to free up enough memory.
runtimePanic("out of memory")
} else {
// Run the garbage collector and try again.
GC()
gcRan = true
continue
}
}
// Initialize the memory treap node.
node := (*memTreapNode)(ptr)
*node = memTreapNode{
size: size,
}
// Insert allocation into the allocations treap.
allocations.insert(node)
// Extract the user's section of the allocation.
ptr = unsafe.Pointer(&node.base)
if gcAsserts && !node.contains(uintptr(ptr)) {
runtimePanic("node is not self-contained")
}
if gcAsserts {
check := allocations.lookupAddr(uintptr(ptr))
if check == nil {
if gcDebug {
println("failed to find:", ptr)
allocations.print()
}
runtimePanic("bad insert")
}
}
// Zero the allocation.
memzero(ptr, size)
// Update used memory.
usedMem += allocSize
if gcDebug {
println("allocated:", uintptr(ptr), "size:", size)
println("used memory:", usedMem)
}
return ptr
}
}
func free(ptr unsafe.Pointer) {
// Currently unimplemented due to bugs in coroutine lowering.
}
func KeepAlive(x interface{}) {
// Unimplemented. Only required with SetFinalizer().
}
func SetFinalizer(obj interface{}, finalizer interface{}) {
// Unimplemented.
} | src/runtime/gc_extalloc.go | 0.71103 | 0.487063 | gc_extalloc.go | starcoder |
package array
import (
"reflect"
)
// InMap check index in Map
func InMap(needle string, haystack []string) bool {
newStack := map[string]struct{}{}
for _, val := range haystack {
newStack[val] = struct{}{}
}
if _, ok := newStack[needle]; ok {
return true
}
return false
}
// InSlice check index in slice
func InSlice(needle string, haystack []string) bool {
for _, val := range haystack {
if val == needle {
return true
}
}
return false
}
// InSliceInt64 check index in slice int64
func InSliceInt64(needle int64, haystack []int64) bool {
for _, val := range haystack {
if val == needle {
return true
}
}
return false
}
// InArray index in array for interface{}
func InArray(val interface{}, array interface{}) (exists bool, index int) {
exists = false
index = -1
switch reflect.TypeOf(array).Kind() {
case reflect.Slice:
s := reflect.ValueOf(array)
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(val, s.Index(i).Interface()) {
index = i
exists = true
return
}
}
}
return
}
// In check string in array.
func In(needle string, haystack []string) ([]string, bool) {
newHaystack := make([]string, len(haystack))
copy(newHaystack, haystack)
if len(newHaystack) == 0 {
return newHaystack, false
}
for i, val := range newHaystack {
if val == needle {
newHaystack = append(newHaystack[:i], newHaystack[i+1:]...)
return newHaystack, true
}
}
return newHaystack, false
}
// Diff show difference in two array.
func Diff(s, t []string) []string {
slice1 := make([]string, len(s))
slice2 := make([]string, len(t))
copy(slice1, s)
copy(slice2, t)
v := []string{}
if len(slice1) == 0 && len(slice2) == 0 {
return []string{}
}
if len(slice1) == 0 {
return slice2
}
if len(slice2) == 0 {
return slice1
}
if len(slice1) > len(slice2) {
slice1, slice2 = slice2, slice1
}
for _, val := range slice1 {
if newT, ok := In(val, slice2); ok {
slice2 = newT
continue
}
v = append(v, val)
}
if len(slice2) > 0 {
v = append(v, slice2...)
}
return v
} | array/array.go | 0.632162 | 0.445831 | array.go | starcoder |
package grumpy
var (
// ArithmeticErrorType corresponds to the Python type 'ArithmeticError'.
ArithmeticErrorType = newSimpleType("ArithmeticError", StandardErrorType)
// AssertionErrorType corresponds to the Python type 'AssertionError'.
AssertionErrorType = newSimpleType("AssertionError", StandardErrorType)
// AttributeErrorType corresponds to the Python type 'AttributeError'.
AttributeErrorType = newSimpleType("AttributeError", StandardErrorType)
// BytesWarningType corresponds to the Python type 'BytesWarning'.
BytesWarningType = newSimpleType("BytesWarning", WarningType)
// DeprecationWarningType corresponds to the Python type 'DeprecationWarning'.
DeprecationWarningType = newSimpleType("DeprecationWarning", WarningType)
// EnvironmentErrorType corresponds to the Python type
// 'EnvironmentError'.
EnvironmentErrorType = newSimpleType("EnvironmentError", StandardErrorType)
// EOFErrorType corresponds to the Python type 'EOFError'.
EOFErrorType = newSimpleType("EOFError", StandardErrorType)
// ExceptionType corresponds to the Python type 'Exception'.
ExceptionType = newSimpleType("Exception", BaseExceptionType)
// FutureWarningType corresponds to the Python type 'FutureWarning'.
FutureWarningType = newSimpleType("FutureWarning", WarningType)
// ImportErrorType corresponds to the Python type 'ImportError'.
ImportErrorType = newSimpleType("ImportError", StandardErrorType)
// ImportWarningType corresponds to the Python type 'ImportWarning'.
ImportWarningType = newSimpleType("ImportWarning", WarningType)
// IndexErrorType corresponds to the Python type 'IndexError'.
IndexErrorType = newSimpleType("IndexError", LookupErrorType)
// IOErrorType corresponds to the Python type 'IOError'.
IOErrorType = newSimpleType("IOError", EnvironmentErrorType)
// KeyboardInterruptType corresponds to the Python type 'KeyboardInterrupt'.
KeyboardInterruptType = newSimpleType("KeyboardInterrupt", BaseExceptionType)
// KeyErrorType corresponds to the Python type 'KeyError'.
KeyErrorType = newSimpleType("KeyError", LookupErrorType)
// LookupErrorType corresponds to the Python type 'LookupError'.
LookupErrorType = newSimpleType("LookupError", StandardErrorType)
// MemoryErrorType corresponds to the Python type 'MemoryError'.
MemoryErrorType = newSimpleType("MemoryError", StandardErrorType)
// NameErrorType corresponds to the Python type 'NameError'.
NameErrorType = newSimpleType("NameError", StandardErrorType)
// NotImplementedErrorType corresponds to the Python type
// 'NotImplementedError'.
NotImplementedErrorType = newSimpleType("NotImplementedError", RuntimeErrorType)
// OSErrorType corresponds to the Python type 'OSError'.
OSErrorType = newSimpleType("OSError", EnvironmentErrorType)
// OverflowErrorType corresponds to the Python type 'OverflowError'.
OverflowErrorType = newSimpleType("OverflowError", ArithmeticErrorType)
// PendingDeprecationWarningType corresponds to the Python type 'PendingDeprecationWarning'.
PendingDeprecationWarningType = newSimpleType("PendingDeprecationWarning", WarningType)
// ReferenceErrorType corresponds to the Python type 'ReferenceError'.
ReferenceErrorType = newSimpleType("ReferenceError", StandardErrorType)
// RuntimeErrorType corresponds to the Python type 'RuntimeError'.
RuntimeErrorType = newSimpleType("RuntimeError", StandardErrorType)
// RuntimeWarningType corresponds to the Python type 'RuntimeWarning'.
RuntimeWarningType = newSimpleType("RuntimeWarning", WarningType)
// StandardErrorType corresponds to the Python type 'StandardError'.
StandardErrorType = newSimpleType("StandardError", ExceptionType)
// StopIterationType corresponds to the Python type 'StopIteration'.
StopIterationType = newSimpleType("StopIteration", ExceptionType)
// SyntaxErrorType corresponds to the Python type 'SyntaxError'.
SyntaxErrorType = newSimpleType("SyntaxError", StandardErrorType)
// SyntaxWarningType corresponds to the Python type 'SyntaxWarning'.
SyntaxWarningType = newSimpleType("SyntaxWarning", WarningType)
// SystemErrorType corresponds to the Python type 'SystemError'.
SystemErrorType = newSimpleType("SystemError", StandardErrorType)
// SystemExitType corresponds to the Python type 'SystemExit'.
SystemExitType = newSimpleType("SystemExit", BaseExceptionType)
// TypeErrorType corresponds to the Python type 'TypeError'.
TypeErrorType = newSimpleType("TypeError", StandardErrorType)
// UnboundLocalErrorType corresponds to the Python type
// 'UnboundLocalError'.
UnboundLocalErrorType = newSimpleType("UnboundLocalError", NameErrorType)
// UnicodeDecodeErrorType corresponds to the Python type 'UnicodeDecodeError'.
UnicodeDecodeErrorType = newSimpleType("UnicodeDecodeError", ValueErrorType)
// UnicodeEncodeErrorType corresponds to the Python type 'UnicodeEncodeError'.
UnicodeEncodeErrorType = newSimpleType("UnicodeEncodeError", ValueErrorType)
// UnicodeErrorType corresponds to the Python type 'UnicodeError'.
UnicodeErrorType = newSimpleType("UnicodeError", ValueErrorType)
// UnicodeWarningType corresponds to the Python type 'UnicodeWarning'.
UnicodeWarningType = newSimpleType("UnicodeWarning", WarningType)
// UserWarningType corresponds to the Python type 'UserWarning'.
UserWarningType = newSimpleType("UserWarning", WarningType)
// ValueErrorType corresponds to the Python type 'ValueError'.
ValueErrorType = newSimpleType("ValueError", StandardErrorType)
// WarningType corresponds to the Python type 'Warning'.
WarningType = newSimpleType("Warning", ExceptionType)
// ZeroDivisionErrorType corresponds to the Python type
// 'ZeroDivisionError'.
ZeroDivisionErrorType = newSimpleType("ZeroDivisionError", ArithmeticErrorType)
)
func systemExitInit(f *Frame, o *Object, args Args, kwargs KWArgs) (*Object, *BaseException) {
baseExceptionInit(f, o, args, kwargs)
code := None
if len(args) > 0 {
code = args[0]
}
if raised := SetAttr(f, o, NewStr("code"), code); raised != nil {
return nil, raised
}
return None, nil
}
func initSystemExitType(map[string]*Object) {
SystemExitType.slots.Init = &initSlot{systemExitInit}
} | runtime/exceptions.go | 0.610918 | 0.470919 | exceptions.go | starcoder |
package geom
import (
"log"
"github.com/badu/term"
"github.com/badu/term/style"
)
// RootRectangle interface
type RootRectangle interface {
Orientation() style.Orientation
HasRows() bool
NumRows() int
Rows() PixelsMatrix
Row(index int) Pixels
HasColumns() bool
NumColumns() int
Columns() PixelsMatrix
Column(index int) Pixels
}
type root struct {
orientation style.Orientation // orientation dictates pixel slices above (rows or cols). Default orientation is style.Vertical
topCorner *term.Position // The current top corner of the Rectangle
bottomCorner *term.Position // The current top corner of the Rectangle
pxs map[int]px // map[position_hash]pixel, for fast access to pixels
rows PixelsMatrix // when organized by rows, for fast access to rows
cols PixelsMatrix // when organized by cols, for fast access to columns
Marks *map[string]*term.Position // temporary, to figure it out
maxCol int // temporary
}
// Orientation
func (r *root) Orientation() style.Orientation {
return r.orientation
}
// HasRows
func (r *root) HasRows() bool {
return r.orientation == style.Vertical
}
// Row returns the row of pixels at index (absolute, starting with zero)
func (r *root) Row(index int) Pixels {
if r.HasColumns() {
if index <= 0 {
if Debug {
log.Println("bad call to Rectangle.Row : bad index")
}
return nil
}
if len(r.cols) <= 0 {
if Debug {
log.Println("bad call to Rectangle.Row : horizontal orientation, but columns are empty")
}
}
var result Pixels
for _, column := range r.cols {
for idx, row := range column {
if idx == index {
result = append(result, row)
}
}
}
return result
}
// vertical orientation
if index <= 0 {
if Debug {
log.Println("bad call to Rectangle.Row : bad index")
}
return nil
}
if index-1 >= len(r.rows) {
if Debug {
log.Println("bad call to Rectangle.Row : index outside number of rows")
}
return nil
}
return r.rows[index-1]
}
// NumRows - depends on orientation
func (r *root) NumRows() int {
if r.HasColumns() {
if len(r.cols) <= 0 {
if Debug {
log.Println("bad call to Rectangle.NumRows : cannot calculate number of rows (columns are empty)")
}
return 0
}
return len(r.cols[0])
}
return len(r.rows)
}
// Rows
func (r *root) Rows() PixelsMatrix {
if r.HasColumns() {
if len(r.cols) <= 0 {
if Debug {
log.Println("bad call to Rectangle.Rows : cannot return rotated (columns are empty)")
}
return nil
}
return rotate(r.cols)
}
return r.rows
}
// HasColumns
func (r *root) HasColumns() bool {
return r.orientation == style.Horizontal
}
// Column returns the column of pixels at index (absolute, starting with zero)
func (r *root) Column(index int) Pixels {
if r.HasRows() {
// vertical orientation column
if index <= 0 {
if Debug {
log.Println("bad call to Rectangle.Column : bad index")
}
return nil
}
if len(r.rows) <= 0 {
if Debug {
log.Println("bad call to Rectangle.Column : vertical orientation, but rows are empty")
}
}
var result Pixels
for _, row := range r.rows {
for idx, column := range row {
if idx == index {
result = append(result, column)
}
}
}
return result
}
// horizontal direction
if index <= 0 {
if Debug {
log.Println("bad call to Rectangle.Column : bad index")
}
return nil
}
if index-1 > len(r.cols) {
if Debug {
log.Println("bad call to Rectangle.Column : index outside number of columns")
}
return nil
}
return r.cols[index-1]
}
// NumColumns - depends on orientation
func (r *root) NumColumns() int {
if r.HasRows() {
if len(r.rows) <= 0 {
if Debug {
log.Println("bad call to Rectangle.NumColumns : cannot calculate number of columns (rows are empty)")
}
return 0
}
return len(r.rows[0])
}
return len(r.cols)
}
// Columns
func (r *root) Columns() PixelsMatrix {
if r.HasRows() {
if len(r.rows) <= 0 {
if Debug {
log.Println("bad call to Rectangle.Columns : cannot return rotated (rows are empty)")
}
return nil
}
return rotate(r.rows)
}
return r.cols
}
// resize on a rectangle updates the new size of this object.
// If it has a stroke width this will cause it to Refresh.
func (r *root) resize(size *term.Size) {
switch r.orientation {
case style.Vertical:
if len(r.rows) == 0 {
if Debug {
log.Println("bad root rectangle (no height)")
}
return
}
if len(r.rows[0]) == 0 {
if Debug {
log.Println("bad root rectangle (no width)")
}
return
}
numRows, numColumns := len(r.rows), len(r.rows[0])
if numColumns == size.Columns && numRows == size.Rows {
return
}
r.verticalResize(size.Columns, size.Rows)
case style.Horizontal:
if len(r.cols) == 0 {
if Debug {
log.Println("bad root rectangle (no width)")
}
return
}
if len(r.cols[0]) == 0 {
if Debug {
log.Println("bad root rectangle (no height)")
}
return
}
numColumns, numRows := len(r.cols), len(r.cols[0])
if numColumns == size.Columns && numRows == size.Rows {
return
}
r.horizontalResize(size.Columns, size.Rows)
}
}
// startup
func (r *root) startup(engine term.Engine) {
r.pxs = make(map[int]px)
pixels := make([]term.PixelGetter, 0)
columns := r.bottomCorner.Column - r.topCorner.Column
rows := r.bottomCorner.Row - r.topCorner.Row
switch r.orientation {
case style.Vertical:
r.rows = make(PixelsMatrix, rows)
for row := 0; row < rows; row++ {
r.rows[row] = make(Pixels, columns)
for column := 0; column < columns; column++ {
pixel := newPixel(column, row)
r.pxs[pixel.PositionHash()] = pixel
pixels = append(pixels, &pixel)
r.rows[row][column] = pixel
}
}
case style.Horizontal:
r.cols = make(PixelsMatrix, columns)
for column := 0; column < columns; column++ {
r.cols[column] = make(Pixels, rows)
for row := 0; row < rows; row++ {
pixel := newPixel(column, row)
r.pxs[pixel.PositionHash()] = pixel
pixels = append(pixels, &pixel)
r.cols[column][row] = pixel
}
}
}
engine.ActivePixels(pixels)
}
func (r *root) horizontalResize(newRows, newColumns int) { // Note : the params are inverted (rows, columns)
currRows := len(r.cols)
if currRows < newRows { // grow or shrink our rows
r.cols = append(r.cols, make(PixelsMatrix, newRows-currRows)...)
} else if currRows > newRows {
r.cols = r.cols[:newRows]
// TODO : announce dead pixels
}
for column := range r.cols { // iterate through our columns to grow or shrink their rows
currRows := len(r.cols[column])
if currRows < newColumns { // grow or shrink our rows
r.cols[column] = append(r.cols[column], make([]Cell, newColumns-currRows)...)
for row := currRows; row < newColumns; row++ {
pixel := newPixel(column, row)
r.pxs[pixel.PositionHash()] = pixel
r.cols[column][row] = pixel
}
} else if currRows > newColumns {
r.cols[column] = r.cols[column][:newColumns]
// TODO : announce dead pixels
}
}
}
func (r *root) verticalResize(newColumns, newRows int) { // Note : the params are inverted (columns, rows)
currRows := len(r.rows)
if currRows < newRows { // grow or shrink our rows
r.rows = append(r.rows, make(PixelsMatrix, newRows-currRows)...)
} else if currRows > newRows {
r.rows = r.rows[:newRows]
// TODO : announce dead pixels
}
for row := range r.rows { // iterate through our rows to grow or shrink their columns
currColumn := len(r.rows[row])
if currColumn < newColumns { // grow or shrink our columns
r.rows[row] = append(r.rows[row], make([]Cell, newColumns-currColumn)...)
for column := currColumn; column < newColumns; column++ {
pixel := newPixel(column, row)
r.pxs[pixel.PositionHash()] = pixel
r.rows[row][column] = pixel
}
} else if currColumn > newColumns {
r.rows[row] = r.rows[row][:newColumns]
// TODO : announce dead pixels
}
}
}
func rotate(matrix PixelsMatrix) PixelsMatrix {
m, n := len(matrix), len(matrix[0])
result := make(PixelsMatrix, n)
for i := 0; i < n; i++ {
result[i] = make(Pixels, m)
}
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
result[j][m-i-1] = matrix[i][j]
}
}
return result
} | geom/root_rectangle.go | 0.751466 | 0.512327 | root_rectangle.go | starcoder |
package search_query_injection
import (
"github.com/threagile/threagile/model"
)
func Category() model.RiskCategory {
return model.RiskCategory{
Id: "search-query-injection",
Title: "Search-Query Injection",
Description: "When a search engine server is accessed Search-Query Injection risks might arise." +
"<br><br>See for example <a href=\"https://github.com/veracode-research/solr-injection\">https://github.com/veracode-research/solr-injection</a> and " +
"<a href=\"https://github.com/veracode-research/solr-injection/blob/master/slides/DEFCON-27-Michael-Stepankin-Apache-Solr-Injection.pdf\">https://github.com/veracode-research/solr-injection/blob/master/slides/DEFCON-27-Michael-Stepankin-Apache-Solr-Injection.pdf</a> " +
"for more details (here related to Solr, but in general showcasing the topic of search query injections).",
Impact: "If this risk remains unmitigated, attackers might be able to read more data from the search index and " +
"eventually further escalate towards a deeper system penetration via code executions.",
ASVS: "V5 - Validation, Sanitization and Encoding Verification Requirements",
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Injection_Prevention_Cheat_Sheet.html",
Action: "Search-Query Injection Prevention",
Mitigation: "Try to use libraries that properly encode search query meta characters in searches and don't expose the " +
"query unfiltered to the caller. " +
"When a third-party product is used instead of custom developed software, check if the product applies the proper mitigation and ensure a reasonable patch-level.",
Check: "Are recommendations from the linked cheat sheet and referenced ASVS chapter applied?",
Function: model.Development,
STRIDE: model.Tampering,
DetectionLogic: "In-scope clients accessing search engine servers via typical search access protocols.",
RiskAssessment: "The risk rating depends on the sensitivity of the search engine server itself and of the data assets processed or stored.",
FalsePositives: "Server engine queries by search values not consisting of parts controllable by the caller can be considered " +
"as false positives after individual review.",
ModelFailurePossibleReason: false,
CWE: 74,
}
}
func GenerateRisks() []model.Risk {
risks := make([]model.Risk, 0)
for _, id := range model.SortedTechnicalAssetIDs() {
technicalAsset := model.ParsedModelRoot.TechnicalAssets[id]
if technicalAsset.Technology == model.SearchEngine || technicalAsset.Technology == model.SearchIndex {
incomingFlows := model.IncomingTechnicalCommunicationLinksMappedByTargetId[technicalAsset.Id]
for _, incomingFlow := range incomingFlows {
if model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId].OutOfScope {
continue
}
if incomingFlow.Protocol == model.HTTP || incomingFlow.Protocol == model.HTTPS ||
incomingFlow.Protocol == model.BINARY || incomingFlow.Protocol == model.BINARY_encrypted {
likelihood := model.VeryLikely
if incomingFlow.Usage == model.DevOps {
likelihood = model.Likely
}
risks = append(risks, createRisk(technicalAsset, incomingFlow, likelihood))
}
}
}
}
return risks
}
func SupportedTags() []string {
return []string{}
}
func createRisk(technicalAsset model.TechnicalAsset, incomingFlow model.CommunicationLink, likelihood model.RiskExploitationLikelihood) model.Risk {
caller := model.ParsedModelRoot.TechnicalAssets[incomingFlow.SourceId]
title := "<b>Search Query Injection</b> risk at <b>" + caller.Title + "</b> against search engine server <b>" + technicalAsset.Title + "</b>" +
" via <b>" + incomingFlow.Title + "</b>"
impact := model.MediumImpact
if technicalAsset.HighestConfidentiality() == model.StrictlyConfidential || technicalAsset.HighestIntegrity() == model.MissionCritical {
impact = model.HighImpact
} else if technicalAsset.HighestConfidentiality() <= model.Internal && technicalAsset.HighestIntegrity() == model.Operational {
impact = model.LowImpact
}
risk := model.Risk{
Category: Category(),
Severity: model.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: caller.Id,
MostRelevantCommunicationLinkId: incomingFlow.Id,
DataBreachProbability: model.Probable,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
risk.SyntheticId = risk.Category.Id + "@" + caller.Id + "@" + technicalAsset.Id + "@" + incomingFlow.Id
return risk
} | risks/built-in/search-query-injection/search-query-injection-rule.go | 0.773045 | 0.522019 | search-query-injection-rule.go | starcoder |
Simplifying and Isolating Failure-Inducing Input
<NAME> (2002)
https://www.st.cs.uni-saarland.de/papers/tse2002/tse2002.pdf
*/
package quickcheck
type result int
const (
// Pass indicates the test passed
ddPass result = iota
// Fail indicates the expected test failure was produced
ddFail
// Unresolved indicates the test failed for a different reason
ddUnresolved
)
// looks to minimize data so that f will fail
func minimize(data []Step, f func(d []Step) ([]Result, result)) []Result {
if ret, res := f(nil); res == ddFail {
// that was easy..
return ret
}
if _, res := f(data); res == ddPass {
panic("ddmin: function must fail on data")
}
return ddmin(data, f, 2)
}
func ddmin(data []Step, f func(d []Step) ([]Result, result), granularity int) []Result {
var res []Result
var ret result
mainloop:
for len(data) >= 1 {
subsets := makeSubsets(data, granularity)
for _, subset := range subsets {
if res, ret = f(subset); ret == ddFail {
// fake tail recursion
data = subset
granularity = 2
continue mainloop
}
}
b := make([]Step, len(data))
for i := range subsets {
complement := makeComplement(subsets, i, b[:0])
if res, ret = f(complement); ret == ddFail {
granularity--
if granularity < 2 {
granularity = 2
}
// fake tail recursion
data = complement
continue mainloop
}
}
if granularity == len(data) {
res, _ = f(data)
return res
}
granularity *= 2
if granularity > len(data) {
granularity = len(data)
}
}
return res
}
func makeSubsets(data []Step, granularity int) [][]Step {
var subsets [][]Step
size := len(data) / granularity
for i := 0; i < granularity-1; i++ {
subsets = append(subsets, data[:size])
data = data[size:]
}
// data might be slightly larger than size due to round-off error, but we don't care
subsets = append(subsets, data)
return subsets
}
func makeComplement(subsets [][]Step, n int, b []Step) []Step {
for i, s := range subsets {
if i == n {
continue
}
b = append(b, s...)
}
return b
} | ddmin.go | 0.640411 | 0.519156 | ddmin.go | starcoder |
package collections
//--------------------
// IMPORTS
//--------------------
import (
"fmt"
"github.com/tideland/golib/errors"
)
//--------------------
// STACK
//--------------------
// stack implements the Stack interface.
type stack struct {
values []interface{}
}
// NewStack creates a stack with the passed values
// as initial content.
func NewStack(vs ...interface{}) Stack {
return &stack{
values: vs,
}
}
// Push implements the Stack interface.
func (s *stack) Push(vs ...interface{}) {
s.values = append(s.values, vs...)
}
// Pop implements the Stack interface.
func (s *stack) Pop() (interface{}, error) {
lv := len(s.values)
if lv == 0 {
return nil, errors.New(ErrEmpty, errorMessages)
}
v := s.values[lv-1]
s.values = s.values[:lv-1]
return v, nil
}
// Peek implements the Stack interface.
func (s stack) Peek() (interface{}, error) {
lv := len(s.values)
if lv == 0 {
return nil, errors.New(ErrEmpty, errorMessages)
}
v := s.values[lv-1]
return v, nil
}
// All implements the Stack interface.
func (s *stack) All() []interface{} {
sl := len(s.values)
all := make([]interface{}, sl)
copy(all, s.values)
return all
}
// AllReverse implements the Stack interface.
func (s *stack) AllReverse() []interface{} {
sl := len(s.values)
all := make([]interface{}, sl)
for i, value := range s.values {
all[sl-1-i] = value
}
return all
}
// Len implements the Stack interface.
func (s *stack) Len() int {
return len(s.values)
}
// Deflate implements the Stack interface.
func (s *stack) Deflate() {
s.values = []interface{}{}
}
// Deflate implements the Stringer interface.
func (s *stack) String() string {
return fmt.Sprintf("%v", s.values)
}
//--------------------
// STRING STACK
//--------------------
// stringStack implements the StringStack interface.
type stringStack struct {
values []string
}
// NewStringStack creates a string stack with the passed values
// as initial content.
func NewStringStack(vs ...string) StringStack {
return &stringStack{
values: vs,
}
}
// Push implements the StringStack interface.
func (s *stringStack) Push(vs ...string) {
s.values = append(s.values, vs...)
}
// Pop implements the StringStack interface.
func (s *stringStack) Pop() (string, error) {
lv := len(s.values)
if lv == 0 {
return "", errors.New(ErrEmpty, errorMessages)
}
v := s.values[lv-1]
s.values = s.values[:lv-1]
return v, nil
}
// Peek implements the StringStack interface.
func (s *stringStack) Peek() (string, error) {
lv := len(s.values)
if lv == 0 {
return "", errors.New(ErrEmpty, errorMessages)
}
v := s.values[lv-1]
return v, nil
}
// All implements the StringStack interface.
func (s *stringStack) All() []string {
sl := len(s.values)
all := make([]string, sl)
copy(all, s.values)
return all
}
// AllReverse implements the StringStack interface.
func (s *stringStack) AllReverse() []string {
sl := len(s.values)
all := make([]string, sl)
for i, value := range s.values {
all[sl-1-i] = value
}
return all
}
// Len implements the Base interface.
func (s *stringStack) Len() int {
return len(s.values)
}
// Deflate implements the Base interface.
func (s *stringStack) Deflate() {
s.values = []string{}
}
// Deflate implements the Stringer interface.
func (s *stringStack) String() string {
return fmt.Sprintf("%v", s.values)
}
// EOF | vendor/github.com/tideland/golib/collections/stacks.go | 0.710126 | 0.466967 | stacks.go | starcoder |
package v1beta1
import (
v1beta1 "github.com/kubeless/kinesis-trigger/pkg/apis/kubeless/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// KinesisTriggerLister helps list KinesisTriggers.
type KinesisTriggerLister interface {
// List lists all KinesisTriggers in the indexer.
List(selector labels.Selector) (ret []*v1beta1.KinesisTrigger, err error)
// KinesisTriggers returns an object that can list and get KinesisTriggers.
KinesisTriggers(namespace string) KinesisTriggerNamespaceLister
KinesisTriggerListerExpansion
}
// kinesisTriggerLister implements the KinesisTriggerLister interface.
type kinesisTriggerLister struct {
indexer cache.Indexer
}
// NewKinesisTriggerLister returns a new KinesisTriggerLister.
func NewKinesisTriggerLister(indexer cache.Indexer) KinesisTriggerLister {
return &kinesisTriggerLister{indexer: indexer}
}
// List lists all KinesisTriggers in the indexer.
func (s *kinesisTriggerLister) List(selector labels.Selector) (ret []*v1beta1.KinesisTrigger, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.KinesisTrigger))
})
return ret, err
}
// KinesisTriggers returns an object that can list and get KinesisTriggers.
func (s *kinesisTriggerLister) KinesisTriggers(namespace string) KinesisTriggerNamespaceLister {
return kinesisTriggerNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// KinesisTriggerNamespaceLister helps list and get KinesisTriggers.
type KinesisTriggerNamespaceLister interface {
// List lists all KinesisTriggers in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1beta1.KinesisTrigger, err error)
// Get retrieves the KinesisTrigger from the indexer for a given namespace and name.
Get(name string) (*v1beta1.KinesisTrigger, error)
KinesisTriggerNamespaceListerExpansion
}
// kinesisTriggerNamespaceLister implements the KinesisTriggerNamespaceLister
// interface.
type kinesisTriggerNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all KinesisTriggers in the indexer for a given namespace.
func (s kinesisTriggerNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.KinesisTrigger, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.KinesisTrigger))
})
return ret, err
}
// Get retrieves the KinesisTrigger from the indexer for a given namespace and name.
func (s kinesisTriggerNamespaceLister) Get(name string) (*v1beta1.KinesisTrigger, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1beta1.Resource("kinesistrigger"), name)
}
return obj.(*v1beta1.KinesisTrigger), nil
} | pkg/client/listers/kubeless/v1beta1/kinesistrigger.go | 0.609408 | 0.425367 | kinesistrigger.go | starcoder |
package fn
import (
mat "github.com/nlpodyssey/spago/pkg/mat32"
"github.com/nlpodyssey/spago/pkg/mat32/floatutils"
matsort "github.com/nlpodyssey/spago/pkg/mat32/sort"
"sort"
)
// SparseMax function implementation, based on https://github.com/gokceneraslan/SparseMax.torch
type SparseMax struct {
x Operand
y mat.Matrix // initialized during the forward pass, required by the backward pass
}
var _ Function = &SparseMax{}
var _ Function = &SparseMaxLoss{}
// NewSparseMax returns a new SparseMax Function.
func NewSparseMax(x Operand) *SparseMax {
return &SparseMax{x: x}
}
// Forward computes the output of the function.
func (s *SparseMax) Forward() mat.Matrix {
s.y = mat.NewVecDense(sparseMax(translateInput(s.x.Value().Data())))
return s.y
}
// Backward computes the backward pass.
func (s *SparseMax) Backward(gy mat.Matrix) {
if s.x.RequiresGrad() {
output := s.y.Data()
var nzSum mat.Float = 0.0
var nzCount mat.Float = 0.0
gx := mat.GetDenseWorkspace(s.x.Value().Rows(), s.x.Value().Columns())
defer mat.ReleaseDense(gx)
for i := range output {
if output[i] != 0 {
nzSum += gy.At(i, 0)
nzCount++
}
}
nzSum = nzSum / nzCount
for i := range output {
if output[i] != 0 {
gx.Set(i, 0, gy.At(i, 0)-nzSum)
} else {
gx.Set(i, 0, 0)
}
}
s.x.PropagateGrad(gx)
}
}
// translateInput translates the input by max for numerical stability
func translateInput(v []mat.Float) []mat.Float {
maximum := max(v)
translated := make([]mat.Float, len(v))
for i := range v {
translated[i] = v[i] - maximum
}
return translated
}
func sparseMaxCommon(v []mat.Float) (zs []mat.Float, bounds []mat.Float, cumSumInput []mat.Float, tau mat.Float) {
zs = make([]mat.Float, len(v))
copy(zs, v)
// Sort zs in descending order.
sort.Sort(sort.Reverse(matsort.FloatSlice(zs)))
bounds = make([]mat.Float, len(zs))
for i := range bounds {
bounds[i] = 1 + mat.Float(i+1)*zs[i]
}
cumSumInput = make([]mat.Float, len(zs))
floatutils.CumSum(cumSumInput, zs)
k := -1
tau = 0.0
for i := range zs {
if bounds[i] > cumSumInput[i] {
if k < (i + 1) {
k = i + 1
}
tau += zs[i]
}
}
tau = (tau - 1) / mat.Float(k)
return zs, bounds, cumSumInput, tau
}
func sparseMax(v []mat.Float) []mat.Float {
zs, _, _, tau := sparseMaxCommon(v)
//Reuses zs to avoid allocating new slice
for i := range zs {
zs[i] = mat.Max(0.0, v[i]-tau)
}
return zs
}
// SparseMaxLoss function implementation, based on https://github.com/gokceneraslan/SparseMax.torch
type SparseMaxLoss struct {
x Operand
tau mat.Float // computed during the forward pass
y mat.Matrix // computed during forward pass
}
// NewSparseMaxLoss returns a new SparseMaxLoss Function.
func NewSparseMaxLoss(x Operand) *SparseMaxLoss {
return &SparseMaxLoss{x: x}
}
// sparseMaxLoss computes the sparseMax loss function and returns
// the loss and the tau parameter (needed by backward)
func sparseMaxLoss(v []mat.Float) ([]mat.Float, mat.Float) {
zs, bounds, cumSumInput, tau := sparseMaxCommon(v)
var regTerm mat.Float = 0.0
tauSquared := tau * tau
for i := range zs {
if bounds[i] > cumSumInput[i] {
regTerm += zs[i]*zs[i] - tauSquared
}
}
regTerm = regTerm*0.5 + 0.5
// Reuse zs to avoid allocating a new slice
for i := range zs {
zs[i] = v[i] - regTerm
}
return zs, tau
}
// Forward computes the output of the function.
func (s *SparseMaxLoss) Forward() mat.Matrix {
output, tau := sparseMaxLoss(s.x.Value().Data())
s.y = mat.NewVecDense(output)
s.tau = tau
return s.y
}
// Backward computes the backward pass.
func (s *SparseMaxLoss) Backward(gy mat.Matrix) {
if s.x.RequiresGrad() {
input := s.x.Value().Data()
sparseMax := make([]mat.Float, len(input))
for i := range sparseMax {
sparseMax[i] = mat.Max(0, input[i]-s.tau)
}
gx := mat.GetDenseWorkspace(s.x.Value().Rows(), s.x.Value().Columns())
defer mat.ReleaseDense(gx)
gyData := gy.Data()
gySum := floatutils.Sum(gyData)
for i := range gyData {
gx.Set(i, 0, gy.At(i, 0)-gySum*sparseMax[i])
}
s.x.PropagateGrad(gx)
}
} | pkg/ml/ag/fn/sparsemax.go | 0.74382 | 0.463444 | sparsemax.go | starcoder |
package main
import (
"fmt"
dataframe "github.com/rocketlaunchr/dataframe-go"
)
type Stock struct {
price float64
shares float64
mfee float64
end float64
steps float64
}
func first_transaction(price float64, shares float64) []float64{
var principal float64 = shares * price
var margin_amount float64 = 0.5 * principal
var total_req float64 = principal + margin_amount
res := []float64{shares, price, principal, margin_amount, total_req}
return res
}
func price_increase(end float64, steps float64, mfee float64, shares float64, price float64) {
var principal float64 = shares * price
var margin_amount float64 = 0.5 * principal
var initial_req float64 = margin_amount + principal
var margin_call float64 = 0
sval := dataframe.NewSeriesFloat64("Shares", nil, shares)
share_price := dataframe.NewSeriesFloat64("Share Price",nil,price)
short_val := dataframe.NewSeriesFloat64("Short Value",nil, principal)
margin_requirement := dataframe.NewSeriesFloat64("Margin Requirement",nil, margin_amount)
total_requirement := dataframe.NewSeriesFloat64("Total Requirement",nil,initial_req)
mcall := dataframe.NewSeriesFloat64("Margin Call", nil, margin_call)
for i:=price+steps; i < end; i+=steps {
var short_value float64 = shares * i
var margin_req float64 = short_value * mfee
var total_req float64 = short_value + margin_req
if total_req < initial_req {
margin_call = 0
sval.Append(shares)
share_price.Append(i)
short_val.Append(short_value)
margin_requirement.Append(margin_req)
total_requirement.Append(total_req)
mcall.Append(margin_call)
} else {
margin_call = total_req - initial_req
sval.Append(shares)
share_price.Append(i)
short_val.Append(short_value)
margin_requirement.Append(margin_req)
total_requirement.Append(total_req)
mcall.Append(margin_call)
}
}
df := dataframe.NewDataFrame(sval, share_price, short_val, margin_requirement, total_requirement, mcall)
fmt.Println(df.Table())
}
func price_decrease(end float64, steps float64, mfee float64, shares float64, price float64) {
var principal float64 = shares * price
var margin_amount float64 = 0.5 * principal
var initial_req float64 = margin_amount + principal
var mrel float64 = 0
sval := dataframe.NewSeriesFloat64("Shares", nil, shares)
share_price := dataframe.NewSeriesFloat64("Share Price",nil,price)
short_val := dataframe.NewSeriesFloat64("Short Sale Value",nil, principal)
margin_val := dataframe.NewSeriesFloat64("Additional Value",nil, margin_amount)
total_requirement := dataframe.NewSeriesFloat64("Total Requirement",nil,initial_req)
mreleased := dataframe.NewSeriesFloat64("Margin Released", nil, mrel)
for i := price - steps; i > end; i-= steps {
var short_value float64 = shares * i
var margin_req float64 = 0.5 * short_value
var total_req float64 = short_value + margin_req
mrel = initial_req - total_req
sval.Append(shares)
share_price.Append(i)
short_val.Append(short_value)
margin_val.Append(margin_req)
total_requirement.Append(total_req)
mreleased.Append(mrel)
}
df := dataframe.NewDataFrame(sval, share_price, short_val, margin_val, total_requirement, mreleased)
fmt.Println(df.Table())
}
func main() {
/*
stock := Stock{50, 1000}
ans := first_transaction(stock.price, stock.shares)
fmt.Println("First Transaction: ", ans)
*/
//Struct
//price, shares, mfee, end, steps
//Price Increase
stock := Stock{50.00, 1000.00, 0.3, 80.00, 5.00}
price_increase(stock.end, stock.steps, stock.mfee, stock.shares, stock.price)
//Price Decrease
stock2 := Stock{50.00, 1000.00, 0.3, 30.00, 5.00}
price_decrease(stock2.end, stock2.steps, stock2.mfee, stock2.shares, stock2.price)
} | Markets/Go/short.go | 0.537041 | 0.450601 | short.go | starcoder |
package typ
import (
"fmt"
"xelf.org/xelf/cor"
"xelf.org/xelf/knd"
)
// Select reads path and returns the selected type from t or an error.
func Select(t Type, path string) (Type, error) {
p, err := cor.ParsePath(path)
if err != nil {
return Void, err
}
return SelectPath(t, p)
}
// SelectPath returns the selected type from t or an error.
func SelectPath(t Type, path cor.Path) (r Type, err error) {
for i, s := range path {
if s.Sel {
r, err = SelectList(t, path[i:])
} else if s.Key != "" {
r, err = SelectKey(t, s.Key)
} else {
r, err = SelectIdx(t, s.Idx)
}
if err != nil {
return Void, err
}
t = r
}
return t, nil
}
func SelectKey(t Type, key string) (Type, error) {
if t.Kind&(knd.Keyr|knd.Spec) == 0 {
return Void, fmt.Errorf("want keyr got %s", t)
}
switch b := t.Body.(type) {
case *ElBody:
return b.El, nil
case *ParamBody:
if p := b.FindKeyIndex(key); p >= 0 {
return b.Params[p].Type, nil
}
}
if t.Kind&knd.Dict == 0 {
return Void, fmt.Errorf("key %s not found in %s", key, t)
}
return Any, nil
}
func SelectIdx(t Type, idx int) (Type, error) {
if t.Kind&(knd.Idxr|knd.Spec) == 0 {
return Void, fmt.Errorf("want idxr got %s", t)
}
switch b := t.Body.(type) {
case *ElBody:
return b.El, nil
case *ParamBody:
i, l := idx, len(b.Params)
if i < 0 {
i = l + i
}
if i < 0 || i >= l {
return Void, fmt.Errorf("idx %d %w [0:%d]", idx, ErrIdxBounds, l-1)
}
return b.Params[i].Type, nil
}
return Any, nil
}
func SelectList(t Type, p cor.Path) (r Type, err error) {
if t.Kind&(knd.Idxr|knd.Spec) == 0 {
return Void, fmt.Errorf("want idxr got %s", t)
}
switch b := t.Body.(type) {
case nil:
r = Any
case *ElBody:
r = b.El
case *ParamBody:
l := len(b.Params)
if t.Kind&knd.Spec != 0 {
l--
}
for i := 0; i < l; i++ {
r = Alt(r, b.Params[i].Type)
}
}
if r == Any {
return List, nil
}
if s := p[0]; s.Key != "" {
r, err = SelectKey(r, s.Key)
} else {
r, err = SelectIdx(r, s.Idx)
}
if err == nil && len(p) > 1 {
r, err = SelectPath(r, p[1:])
}
if err != nil {
return Void, err
}
return ListOf(r), nil
} | typ/path.go | 0.500732 | 0.415314 | path.go | starcoder |
package blocks
import (
"fmt"
"regexp"
"strings"
yamlv3 "gopkg.in/yaml.v3"
core "github.com/authzed/spicedb/pkg/proto/core/v1"
"github.com/authzed/spicedb/pkg/commonerrors"
"github.com/authzed/spicedb/pkg/tuple"
)
// ParsedExpectedRelations represents the expected relations defined in the validation
// file.
type ParsedExpectedRelations struct {
// ValidationMap is the parsed expected relations validation map.
ValidationMap ValidationMap
// SourcePosition is the position of the expected relations in the file.
SourcePosition commonerrors.SourcePosition
}
// UnmarshalYAML is a custom unmarshaller.
func (per *ParsedExpectedRelations) UnmarshalYAML(node *yamlv3.Node) error {
err := node.Decode(&per.ValidationMap)
if err != nil {
return convertYamlError(err)
}
per.SourcePosition = commonerrors.SourcePosition{LineNumber: node.Line, ColumnPosition: node.Column}
return nil
}
// ValidationMap is a map from an Object Relation (as a Relationship) to the
// validation strings containing the Subjects for that Object Relation.
type ValidationMap map[ObjectRelation][]ExpectedSubject
// ObjectRelation represents an ONR defined as a string in the key for
// the ValidationMap.
type ObjectRelation struct {
// ObjectRelationString is the string form of the object relation.
ObjectRelationString string
// ObjectAndRelation is the parsed object and relation.
ObjectAndRelation *core.ObjectAndRelation
// SourcePosition is the position of the expected relations in the file.
SourcePosition commonerrors.SourcePosition
}
// UnmarshalYAML is a custom unmarshaller.
func (ors *ObjectRelation) UnmarshalYAML(node *yamlv3.Node) error {
err := node.Decode(&ors.ObjectRelationString)
if err != nil {
return convertYamlError(err)
}
parsed := tuple.ParseONR(ors.ObjectRelationString)
if parsed == nil {
return commonerrors.NewErrorWithSource(
fmt.Errorf("could not parse %s", ors.ObjectRelationString),
ors.ObjectRelationString,
uint64(node.Line),
uint64(node.Column),
)
}
ors.ObjectAndRelation = parsed
ors.SourcePosition = commonerrors.SourcePosition{LineNumber: node.Line, ColumnPosition: node.Column}
return nil
}
var (
vsSubjectRegex = regexp.MustCompile(`(.*?)\[(?P<user_str>.*)\](.*?)`)
vsObjectAndRelationRegex = regexp.MustCompile(`(.*?)<(?P<onr_str>[^\>]+)>(.*?)`)
vsSubjectWithExceptionsRegex = regexp.MustCompile(`^(.+)\s*-\s*\{([^\}]+)\}$`)
)
// ExpectedSubject is a subject expected for the ObjectAndRelation.
type ExpectedSubject struct {
// ValidationString holds a validation string containing a Subject and one or
// more Relations to the parent Object.
// Example: `[tenant/user:someuser#...] is <tenant/document:example#viewer>`
ValidationString ValidationString
// Subject is the subject expected. May be nil if not defined in the line.
SubjectWithExceptions *SubjectWithExceptions
// Resources are the resources under which the subject is found.
Resources []*core.ObjectAndRelation
// SourcePosition is the position of the expected subject in the file.
SourcePosition commonerrors.SourcePosition
}
// SubjectWithExceptions returns the subject found in a validation string, along with any exceptions.
type SubjectWithExceptions struct {
// Subject is the subject found.
Subject *core.ObjectAndRelation
// Exceptions are those subjects removed from the subject, if it is a wildcard.
Exceptions []*core.ObjectAndRelation
}
// UnmarshalYAML is a custom unmarshaller.
func (es *ExpectedSubject) UnmarshalYAML(node *yamlv3.Node) error {
err := node.Decode(&es.ValidationString)
if err != nil {
return convertYamlError(err)
}
subjectWithExceptions, subErr := es.ValidationString.Subject()
if subErr != nil {
return commonerrors.NewErrorWithSource(
subErr,
subErr.SourceCodeString,
uint64(node.Line)+subErr.LineNumber,
uint64(node.Column)+subErr.ColumnPosition,
)
}
onrs, onrErr := es.ValidationString.ONRS()
if onrErr != nil {
return commonerrors.NewErrorWithSource(
onrErr,
onrErr.SourceCodeString,
uint64(node.Line)+onrErr.LineNumber,
uint64(node.Column)+onrErr.ColumnPosition,
)
}
es.SubjectWithExceptions = subjectWithExceptions
es.SourcePosition = commonerrors.SourcePosition{LineNumber: node.Line, ColumnPosition: node.Column}
es.Resources = onrs
return nil
}
// ValidationString holds a validation string containing a Subject and one or
// more Relations to the parent Object.
// Example: `[tenant/user:someuser#...] is <tenant/document:example#viewer>`
type ValidationString string
// SubjectString returns the subject contained in the ValidationString, if any.
func (vs ValidationString) SubjectString() (string, bool) {
result := vsSubjectRegex.FindStringSubmatch(string(vs))
if len(result) != 4 {
return "", false
}
return result[2], true
}
// Subject returns the subject contained in the ValidationString, if any. If
// none, returns nil.
func (vs ValidationString) Subject() (*SubjectWithExceptions, *commonerrors.ErrorWithSource) {
subjectStr, ok := vs.SubjectString()
if !ok {
return nil, nil
}
bracketedSubjectString := fmt.Sprintf("[%s]", subjectStr)
subjectStr = strings.TrimSpace(subjectStr)
if strings.HasSuffix(subjectStr, "}") {
result := vsSubjectWithExceptionsRegex.FindStringSubmatch(subjectStr)
if len(result) != 3 {
return nil, commonerrors.NewErrorWithSource(fmt.Errorf("invalid subject: `%s`", subjectStr), bracketedSubjectString, 0, 0)
}
subjectONR := tuple.ParseSubjectONR(strings.TrimSpace(result[1]))
if subjectONR == nil {
return nil, commonerrors.NewErrorWithSource(fmt.Errorf("invalid subject: `%s`", result[1]), result[1], 0, 0)
}
exceptionsString := strings.TrimSpace(result[2])
exceptionsStringsSlice := strings.Split(exceptionsString, ",")
exceptions := make([]*core.ObjectAndRelation, 0, len(exceptionsStringsSlice))
for _, exceptionString := range exceptionsStringsSlice {
exceptionONR := tuple.ParseSubjectONR(strings.TrimSpace(exceptionString))
if exceptionONR == nil {
return nil, commonerrors.NewErrorWithSource(fmt.Errorf("invalid subject: `%s`", exceptionString), exceptionString, 0, 0)
}
exceptions = append(exceptions, exceptionONR)
}
return &SubjectWithExceptions{subjectONR, exceptions}, nil
}
found := tuple.ParseSubjectONR(subjectStr)
if found == nil {
return nil, commonerrors.NewErrorWithSource(fmt.Errorf("invalid subject: `%s`", subjectStr), bracketedSubjectString, 0, 0)
}
return &SubjectWithExceptions{found, nil}, nil
}
// ONRStrings returns the ONRs contained in the ValidationString, if any.
func (vs ValidationString) ONRStrings() []string {
results := vsObjectAndRelationRegex.FindAllStringSubmatch(string(vs), -1)
onrStrings := []string{}
for _, result := range results {
onrStrings = append(onrStrings, result[2])
}
return onrStrings
}
// ONRS returns the subject ONRs in the ValidationString, if any.
func (vs ValidationString) ONRS() ([]*core.ObjectAndRelation, *commonerrors.ErrorWithSource) {
onrStrings := vs.ONRStrings()
onrs := []*core.ObjectAndRelation{}
for _, onrString := range onrStrings {
found := tuple.ParseONR(onrString)
if found == nil {
return nil, commonerrors.NewErrorWithSource(fmt.Errorf("invalid resource and relation: `%s`", onrString), onrString, 0, 0)
}
onrs = append(onrs, found)
}
return onrs, nil
}
// ParseExpectedRelationsBlock parses the given contents as an expected relations block.
func ParseExpectedRelationsBlock(contents []byte) (*ParsedExpectedRelations, error) {
per := ParsedExpectedRelations{}
err := yamlv3.Unmarshal(contents, &per)
if err != nil {
return nil, convertYamlError(err)
}
return &per, nil
} | pkg/validationfile/blocks/expectedrelations.go | 0.780662 | 0.439447 | expectedrelations.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTExportTessellatedFacesFacet1417 struct for BTExportTessellatedFacesFacet1417
type BTExportTessellatedFacesFacet1417 struct {
BtType *string `json:"btType,omitempty"`
Indices *[]int32 `json:"indices,omitempty"`
Normal *BTVector3d389 `json:"normal,omitempty"`
Normals *[]BTVector3d389 `json:"normals,omitempty"`
TextureCoordinates *[]BTVector2d1812 `json:"textureCoordinates,omitempty"`
Vertices *[]BTVector3d389 `json:"vertices,omitempty"`
}
// NewBTExportTessellatedFacesFacet1417 instantiates a new BTExportTessellatedFacesFacet1417 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTExportTessellatedFacesFacet1417() *BTExportTessellatedFacesFacet1417 {
this := BTExportTessellatedFacesFacet1417{}
return &this
}
// NewBTExportTessellatedFacesFacet1417WithDefaults instantiates a new BTExportTessellatedFacesFacet1417 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTExportTessellatedFacesFacet1417WithDefaults() *BTExportTessellatedFacesFacet1417 {
this := BTExportTessellatedFacesFacet1417{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTExportTessellatedFacesFacet1417) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedFacesFacet1417) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTExportTessellatedFacesFacet1417) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTExportTessellatedFacesFacet1417) SetBtType(v string) {
o.BtType = &v
}
// GetIndices returns the Indices field value if set, zero value otherwise.
func (o *BTExportTessellatedFacesFacet1417) GetIndices() []int32 {
if o == nil || o.Indices == nil {
var ret []int32
return ret
}
return *o.Indices
}
// GetIndicesOk returns a tuple with the Indices field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedFacesFacet1417) GetIndicesOk() (*[]int32, bool) {
if o == nil || o.Indices == nil {
return nil, false
}
return o.Indices, true
}
// HasIndices returns a boolean if a field has been set.
func (o *BTExportTessellatedFacesFacet1417) HasIndices() bool {
if o != nil && o.Indices != nil {
return true
}
return false
}
// SetIndices gets a reference to the given []int32 and assigns it to the Indices field.
func (o *BTExportTessellatedFacesFacet1417) SetIndices(v []int32) {
o.Indices = &v
}
// GetNormal returns the Normal field value if set, zero value otherwise.
func (o *BTExportTessellatedFacesFacet1417) GetNormal() BTVector3d389 {
if o == nil || o.Normal == nil {
var ret BTVector3d389
return ret
}
return *o.Normal
}
// GetNormalOk returns a tuple with the Normal field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedFacesFacet1417) GetNormalOk() (*BTVector3d389, bool) {
if o == nil || o.Normal == nil {
return nil, false
}
return o.Normal, true
}
// HasNormal returns a boolean if a field has been set.
func (o *BTExportTessellatedFacesFacet1417) HasNormal() bool {
if o != nil && o.Normal != nil {
return true
}
return false
}
// SetNormal gets a reference to the given BTVector3d389 and assigns it to the Normal field.
func (o *BTExportTessellatedFacesFacet1417) SetNormal(v BTVector3d389) {
o.Normal = &v
}
// GetNormals returns the Normals field value if set, zero value otherwise.
func (o *BTExportTessellatedFacesFacet1417) GetNormals() []BTVector3d389 {
if o == nil || o.Normals == nil {
var ret []BTVector3d389
return ret
}
return *o.Normals
}
// GetNormalsOk returns a tuple with the Normals field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedFacesFacet1417) GetNormalsOk() (*[]BTVector3d389, bool) {
if o == nil || o.Normals == nil {
return nil, false
}
return o.Normals, true
}
// HasNormals returns a boolean if a field has been set.
func (o *BTExportTessellatedFacesFacet1417) HasNormals() bool {
if o != nil && o.Normals != nil {
return true
}
return false
}
// SetNormals gets a reference to the given []BTVector3d389 and assigns it to the Normals field.
func (o *BTExportTessellatedFacesFacet1417) SetNormals(v []BTVector3d389) {
o.Normals = &v
}
// GetTextureCoordinates returns the TextureCoordinates field value if set, zero value otherwise.
func (o *BTExportTessellatedFacesFacet1417) GetTextureCoordinates() []BTVector2d1812 {
if o == nil || o.TextureCoordinates == nil {
var ret []BTVector2d1812
return ret
}
return *o.TextureCoordinates
}
// GetTextureCoordinatesOk returns a tuple with the TextureCoordinates field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedFacesFacet1417) GetTextureCoordinatesOk() (*[]BTVector2d1812, bool) {
if o == nil || o.TextureCoordinates == nil {
return nil, false
}
return o.TextureCoordinates, true
}
// HasTextureCoordinates returns a boolean if a field has been set.
func (o *BTExportTessellatedFacesFacet1417) HasTextureCoordinates() bool {
if o != nil && o.TextureCoordinates != nil {
return true
}
return false
}
// SetTextureCoordinates gets a reference to the given []BTVector2d1812 and assigns it to the TextureCoordinates field.
func (o *BTExportTessellatedFacesFacet1417) SetTextureCoordinates(v []BTVector2d1812) {
o.TextureCoordinates = &v
}
// GetVertices returns the Vertices field value if set, zero value otherwise.
func (o *BTExportTessellatedFacesFacet1417) GetVertices() []BTVector3d389 {
if o == nil || o.Vertices == nil {
var ret []BTVector3d389
return ret
}
return *o.Vertices
}
// GetVerticesOk returns a tuple with the Vertices field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedFacesFacet1417) GetVerticesOk() (*[]BTVector3d389, bool) {
if o == nil || o.Vertices == nil {
return nil, false
}
return o.Vertices, true
}
// HasVertices returns a boolean if a field has been set.
func (o *BTExportTessellatedFacesFacet1417) HasVertices() bool {
if o != nil && o.Vertices != nil {
return true
}
return false
}
// SetVertices gets a reference to the given []BTVector3d389 and assigns it to the Vertices field.
func (o *BTExportTessellatedFacesFacet1417) SetVertices(v []BTVector3d389) {
o.Vertices = &v
}
func (o BTExportTessellatedFacesFacet1417) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Indices != nil {
toSerialize["indices"] = o.Indices
}
if o.Normal != nil {
toSerialize["normal"] = o.Normal
}
if o.Normals != nil {
toSerialize["normals"] = o.Normals
}
if o.TextureCoordinates != nil {
toSerialize["textureCoordinates"] = o.TextureCoordinates
}
if o.Vertices != nil {
toSerialize["vertices"] = o.Vertices
}
return json.Marshal(toSerialize)
}
type NullableBTExportTessellatedFacesFacet1417 struct {
value *BTExportTessellatedFacesFacet1417
isSet bool
}
func (v NullableBTExportTessellatedFacesFacet1417) Get() *BTExportTessellatedFacesFacet1417 {
return v.value
}
func (v *NullableBTExportTessellatedFacesFacet1417) Set(val *BTExportTessellatedFacesFacet1417) {
v.value = val
v.isSet = true
}
func (v NullableBTExportTessellatedFacesFacet1417) IsSet() bool {
return v.isSet
}
func (v *NullableBTExportTessellatedFacesFacet1417) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTExportTessellatedFacesFacet1417(val *BTExportTessellatedFacesFacet1417) *NullableBTExportTessellatedFacesFacet1417 {
return &NullableBTExportTessellatedFacesFacet1417{value: val, isSet: true}
}
func (v NullableBTExportTessellatedFacesFacet1417) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTExportTessellatedFacesFacet1417) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_export_tessellated_faces_facet_1417.go | 0.752468 | 0.437763 | model_bt_export_tessellated_faces_facet_1417.go | starcoder |
// Package frames describes the Frame interface.
// A set of standard frames are also defined in this package. These are: Fixed, Window, Wild and WildMin.
package frames
import (
"strconv"
"github.com/richardlehane/siegfried/internal/bytematcher/patterns"
"github.com/richardlehane/siegfried/internal/persist"
)
// Frame encapsulates a pattern with offset information, mediating between the pattern and the bytestream.
type Frame struct {
Min int
Max int
OffType
patterns.Pattern
}
// OffType is the type of offset
type OffType uint8
// Four offset types are supported
const (
BOF OffType = iota // beginning of file offset
PREV // offset from previous frame
SUCC // offset from successive frame
EOF // end of file offset
)
// OffString is an exported array of strings representing each of the four offset types
var OffString = [...]string{"B", "P", "S", "E"}
// Orientation returns the offset type of the frame which must be either BOF, PREV, SUCC or EOF
func (o OffType) Orientation() OffType {
return o
}
// SwitchOff returns a new offset type according to a given set of rules. These are:
// - PREV -> SUCC
// - SUCC and EOF -> PREV
// This is helpful when changing the orientation of a frame (for example to allow right-left searching).
func (o OffType) SwitchOff() OffType {
switch o {
case PREV:
return SUCC
case SUCC, EOF:
return PREV
default:
return o
}
}
// NewFrame generates Fixed, Window, Wild and WildMin frames. The offsets argument controls what type of frame is created:
// - for a Wild frame, give no offsets or give a max offset of < 0 and a min of < 1
// - for a WildMin frame, give one offset, or give a max offset of < 0 and a min of > 0
// - for a Fixed frame, give two offsets that are both >= 0 and that are equal to each other
// - for a Window frame, give two offsets that are both >= 0 and that are not equal to each other.
func NewFrame(typ OffType, pat patterns.Pattern, offsets ...int) Frame {
switch len(offsets) {
case 0:
return Frame{0, -1, typ, pat}
case 1:
if offsets[0] > 0 {
return Frame{offsets[0], -1, typ, pat}
}
return Frame{0, -1, typ, pat}
}
if offsets[1] < 0 {
if offsets[0] > 0 {
return Frame{offsets[0], -1, typ, pat}
}
return Frame{0, -1, typ, pat}
}
if offsets[0] < 0 {
offsets[0] = 0
}
return Frame{offsets[0], offsets[1], typ, pat}
}
// SwitchFrame returns a new frame with a different orientation (for example to allow right-left searching).
func SwitchFrame(f Frame, p patterns.Pattern) Frame {
return NewFrame(f.SwitchOff(), p, f.Min, f.Max)
}
// BMHConvert converts the patterns within a slice of frames to BMH sequences if possible.
func BMHConvert(fs []Frame, rev bool) []Frame {
nfs := make([]Frame, len(fs))
for i, f := range fs {
nfs[i] = NewFrame(f.Orientation(), patterns.BMH(f.Pattern, rev), f.Min, f.Max)
}
return nfs
}
// NonZero checks whether, when converted to simple byte sequences, this frame's pattern is all 0 bytes.
func NonZero(f Frame) bool {
for _, seq := range f.Sequences() {
allzeros := true
for _, b := range seq {
if b != 0 {
allzeros = false
}
}
if allzeros {
return false
}
}
return true
}
// TotalLength is sum of the maximum length of the enclosed pattern and the maximum offset.
func TotalLength(f Frame) int {
// a wild frame has no total length
if f.Max < 0 {
return -1
}
_, l := f.Length()
return l + f.Max
}
// Match the enclosed pattern against the byte slice in a L-R direction.
// Returns a slice of offsets for where a successive match by a related frame should begin.
func (f Frame) Match(b []byte) []int {
ret := make([]int, 0, 1)
min, max := f.Min, f.Max
if max < 0 || max > len(b) {
max = len(b)
}
for min <= max {
lengths, adv := f.Test(b[min:])
for _, l := range lengths {
ret = append(ret, min+l)
}
if adv < 1 {
break
}
min += adv
}
return ret
}
// For the nth match (per above), return the offset for successive match by related frame and bytes that can advance to make a successive test by this frame.
func (f Frame) MatchN(b []byte, n int) (int, int) {
var i int
min, max := f.Min, f.Max
if max < 0 || max > len(b) {
max = len(b)
}
for min <= max {
lengths, adv := f.Test(b[min:])
for _, l := range lengths {
if i == n {
return min + l, min + adv
}
i++
}
if adv < 1 {
break
}
min += adv
}
return -1, 0
}
// Match the enclosed pattern against the byte slice in a reverse (R-L) direction. Returns a slice of offsets for where a successive match by a related frame should begin.
func (f Frame) MatchR(b []byte) []int {
ret := make([]int, 0, 1)
min, max := f.Min, f.Max
if max < 0 || max > len(b) {
max = len(b)
}
for min <= max {
lengths, adv := f.TestR(b[:len(b)-min])
for _, l := range lengths {
ret = append(ret, min+l)
}
if adv < 1 {
break
}
min += adv
}
return ret
}
// For the nth match (per above), return the offset for successive match by related frame and bytes that can advance to make a successive test by this frame.
func (f Frame) MatchNR(b []byte, n int) (int, int) {
var i int
min, max := f.Min, f.Max
if max < 0 || max > len(b) {
max = len(b)
}
for min <= max {
lengths, adv := f.TestR(b[:len(b)-min])
for _, l := range lengths {
if i == n {
return min + l, min + adv
}
i++
}
if adv < 1 {
break
}
min += adv
}
return -1, 0
}
func (f Frame) Equals(f1 Frame) bool {
if f.Min == f1.Min && f.Max == f1.Max && f.OffType == f1.OffType && f.Pattern.Equals(f1.Pattern) {
return true
}
return false
}
func (f Frame) String() string {
var rng string
if f.Min == f.Max {
rng = strconv.Itoa(f.Min)
} else {
if f.Max < 0 {
rng = strconv.Itoa(f.Min) + "..*"
}
rng = strconv.Itoa(f.Min) + ".." + strconv.Itoa(f.Max)
}
return OffString[f.OffType] + ":" + rng + " " + f.Pattern.String()
}
// MaxMatches returns:
// - the max number of times a frame can match, given a byte slice of length 'l'
// - the maximum remaining slice length
// - the minimum length of a successful pattern match
func (f Frame) MaxMatches(l int) (int, int, int) {
min, _ := f.Length()
rem := l - min - f.Min
if rem < 0 && l >= 0 {
return 0, 0, 0
}
// handle fixed
if f.Min == f.Max || (l < 0 && f.Max < 0) {
return 1, rem, min
}
var ov int
if f.OffType <= PREV {
ov = patterns.Overlap(f.Pattern)
} else {
ov = patterns.OverlapR(f.Pattern)
}
if f.Max < 0 || (l > 0 && f.Max+min > l) {
return rem/ov + 1, rem, min
}
return (f.Max-f.Min)/ov + 1, rem, min
}
// Linked tests whether a frame is linked to a preceding frame (by a preceding or succeding relationship) with an offset and range that is less than the supplied ints.
// If -1 is given for maxDistance & maxRange, then will check if frame is linked to a preceding frame via a PREV or SUCC relationship.
// If -1 is given for maxDistance, but not maxRange, then will check if frame linked without regard to distance (only range),
// this is useful because if give maxRange of 0 you can tell if it is a fixed relationship
func (f Frame) Linked(prev Frame, maxDistance, maxRange int) (bool, int, int) {
switch f.OffType {
case PREV:
if maxDistance < 0 && f.Max > -1 && (maxRange < 0 || f.Max-f.Min <= maxRange) {
return true, maxDistance, maxRange
}
if f.Max < 0 || f.Max > maxDistance || f.Max-f.Min > maxRange {
return false, 0, 0
}
return true, maxDistance - f.Max, maxRange - (f.Max - f.Min)
case SUCC, EOF:
if prev.Orientation() != SUCC || prev.Max < 0 {
return false, 0, 0
}
if maxDistance < 0 && (maxRange < 0 || prev.Max-prev.Min <= maxRange) {
return true, maxDistance, maxRange
}
if prev.Max > maxDistance || prev.Max-prev.Min > maxRange {
return false, 0, 0
}
return true, maxDistance - prev.Max, maxRange - (prev.Max - prev.Min)
default:
return false, 0, 0
}
}
func (f Frame) Save(ls *persist.LoadSaver) {
ls.SaveInt(f.Min)
ls.SaveInt(f.Max)
ls.SaveByte(byte(f.OffType))
f.Pattern.Save(ls)
}
func Load(ls *persist.LoadSaver) Frame {
return Frame{
ls.LoadInt(),
ls.LoadInt(),
OffType(ls.LoadByte()),
patterns.Load(ls),
}
} | internal/bytematcher/frames/frames.go | 0.802362 | 0.46873 | frames.go | starcoder |
package mingo
// Object model
type Object map[string]interface{}
// Query model
type Query struct {
Criteria Object
compiled []func(Object) bool
}
// Test method evaluates the query by processing the expression's operators.
func (q *Query) Test(obj Object) bool {
q.compile()
for _, v := range q.compiled {
if !v(obj) {
return false
}
}
return true
}
// compile method takes the query's criteria, breaks it down and processes
// the supported operators.
func (q *Query) compile() {
if len(q.Criteria) == 0 {
return
}
var whereOperator Object
for k, v := range q.Criteria {
if k == "$where" {
whereOperator = Object{
"field": k,
"expr": v,
}
} else if k == "$expr" {
q.processOperator(k, k, v)
} else if q.operatorInList(k, []string{"$and", "$or", "$nor"}) {
q.processOperator(k, k, v)
} else {
expr := q.normalize(v)
for k2, v2 := range expr {
q.processOperator(k, k2, v2)
}
}
if len(whereOperator) > 0 {
q.processOperator(whereOperator["field"].(string), whereOperator["field"].(string), whereOperator["expr"].(Object))
}
}
}
// operatorInList method checks if the given operator is in the given slice
// of operators.
func (q *Query) operatorInList(operator string, list []string) bool {
for _, v := range list {
if v == operator {
return true
}
}
return false
}
// processOperator method applies the respective query operator on the sub
// expression.
func (q *Query) processOperator(field string, operator string, expr interface{}) {
qo := QueryOperator{}
switch operator {
case "$eq":
q.compiled = append(q.compiled, qo.eq(field, expr))
break
case "$ne":
q.compiled = append(q.compiled, qo.ne(field, expr))
break
case "$gt":
q.compiled = append(q.compiled, qo.gt(field, expr))
break
case "$gte":
q.compiled = append(q.compiled, qo.gte(field, expr))
break
case "$lt":
q.compiled = append(q.compiled, qo.lt(field, expr))
break
case "$lte":
q.compiled = append(q.compiled, qo.lte(field, expr))
break
case "$and":
switch expr.(type) {
case []Object:
q.compiled = append(q.compiled, qo.and(field, expr.([]Object)))
}
break
}
}
// normalize method flattens down object expression values. Defaults to the
// equal operator.
func (q *Query) normalize(expr interface{}) Object {
switch expr.(type) {
// Object
case Object:
for k, v := range expr.(Object) {
for _, v2 := range objectOperators {
if k == v2 {
return Object{v2: v}
}
}
}
return Object{
"$eq": expr,
}
// Value
default:
return Object{
"$eq": expr,
}
}
} | query.go | 0.647352 | 0.407923 | query.go | starcoder |
package types
import (
"github.com/benbjohnson/immutable"
)
var emptyMap = immutable.NewSortedMap(nil)
var EmptyTypeMap = TypeMap{emptyMap}
// TypeMap contains immutable mappings from labels to immutable lists of types.
type TypeMap struct {
m *immutable.SortedMap
}
func NewTypeMap() TypeMap { return TypeMap{emptyMap} }
// Create a TypeMap with a single entry.
func SingletonTypeMap(label string, t Type) TypeMap {
return TypeMap{emptyMap.Set(label, emptyList.Append(t))}
}
// Create a TypeMap with unscoped labels.
func NewFlatTypeMap(m map[string]Type) TypeMap {
b := NewTypeMapBuilder()
for name, t := range m {
b.Set(name, SingletonTypeList(t))
}
return b.Build()
}
// Get the number of entries in the map.
func (m TypeMap) Len() int { return m.m.Len() }
// Get the first entry in the map. Entries are sorted by label.
func (m TypeMap) First() (string, TypeList) {
if m.Len() == 0 {
return "", EmptyTypeList
}
k, v := m.m.Iterator().Next()
return k.(string), TypeList{v.(*immutable.List)}
}
// Get the list of types for a label.
func (m TypeMap) Get(label string) (TypeList, bool) {
l, ok := m.m.Get(label)
if !ok {
return TypeList{}, false
}
return TypeList{l: l.(*immutable.List)}, true
}
// Iterate over entries in the map.
// If f returns false, iteration will be stopped.
func (m TypeMap) Range(f func(string, TypeList) bool) {
iter := m.m.Iterator()
for !iter.Done() {
k, v := iter.Next()
if !f(k.(string), TypeList{v.(*immutable.List)}) {
return
}
}
}
// Get an iterator which may be used to read entries in the map, in sequential order.
func (m TypeMap) Iterator() TypeMapIterator {
return TypeMapIterator{m.m.Iterator()}
}
// Convert the map to a builder for modification, without mutating the existing map.
func (m TypeMap) Builder() TypeMapBuilder {
imm := m.m
if imm == nil {
imm = emptyMap
}
return TypeMapBuilder{immutable.NewSortedMapBuilder(imm)}
}
// TypeMapBuilder enables in-place updates of a map before finalization.
type TypeMapBuilder struct {
b *immutable.SortedMapBuilder
}
func NewTypeMapBuilder() TypeMapBuilder {
return TypeMapBuilder{immutable.NewSortedMapBuilder(emptyMap)}
}
func (b *TypeMapBuilder) EnsureInitialized() {
if b.b != nil {
return
}
b.b = immutable.NewSortedMapBuilder(emptyMap)
}
// Get the number of entries in the builder.
func (b TypeMapBuilder) Len() int {
if b.b == nil {
return 0
}
return b.b.Len()
}
// Set the type list for the given label in the builder.
func (b TypeMapBuilder) Set(label string, ts TypeList) TypeMapBuilder {
b.b.Set(label, ts.l)
return b
}
// Delete the given label and corresponding type list from the builder.
func (b TypeMapBuilder) Delete(label string) TypeMapBuilder {
b.b.Delete(label)
return b
}
// Finalize the builder into an immutable map.
func (b TypeMapBuilder) Build() TypeMap {
if b.b == nil {
return EmptyTypeMap
}
return TypeMap{b.b.Map()}
}
// Merge entries into the builder.
func (a TypeMapBuilder) Merge(b TypeMap) TypeMapBuilder {
b.Range(func(label string, bts TypeList) bool {
ts, ok := a.b.Get(label)
if !ok {
a.Set(label, bts)
return true
}
lb := TypeListBuilder{immutable.NewListBuilder(ts.(*immutable.List))}
bts.Range(func(i int, t Type) bool {
lb.Append(t)
return true
})
a.Set(label, lb.Build())
return true
})
return a
}
// TypeMapIterator reads entries in a map, in sequential order.
type TypeMapIterator struct {
i *immutable.SortedMapIterator
}
// Done returns true if the iterator has reached the end a map.
func (i TypeMapIterator) Done() bool { return i.i.Done() }
// Next advances the iterator and returns the next entry from a map.
func (i TypeMapIterator) Next() (string, TypeList) {
if i.Done() {
return "", EmptyTypeList
}
k, v := i.i.Next()
return k.(string), TypeList{v.(*immutable.List)}
}
// Peek returns the next entry from a map without advancing the iterator.
func (i TypeMapIterator) Peek() (string, TypeList) {
if i.Done() {
return "", EmptyTypeList
}
k, v := i.i.Next()
i.i.Prev()
return k.(string), TypeList{v.(*immutable.List)}
} | types/type_map.go | 0.774924 | 0.588948 | type_map.go | starcoder |
package display
import (
"fmt"
"github.com/inkyblackness/shocked-model"
"github.com/inkyblackness/shocked-client/graphics"
"github.com/inkyblackness/shocked-client/opengl"
)
var mapTileGridVertexShaderSource = `
#version 150
precision mediump float;
in vec3 vertexPosition;
uniform mat4 viewMatrix;
uniform mat4 projectionMatrix;
out float height;
void main(void) {
gl_Position = projectionMatrix * viewMatrix * vec4(vertexPosition.xy, 0.0, 1.0);
height = vertexPosition.z;
}
`
var mapTileGridFragmentShaderSource = `
#version 150
precision mediump float;
in float height;
out vec4 fragColor;
void main(void) {
fragColor = vec4(0.0, 0.8, 0.0, height);
}
`
// TileGridMapRenderable is a renderable for the tile grid.
type TileGridMapRenderable struct {
context *graphics.RenderContext
program uint32
vao *opengl.VertexArrayObject
vertexPositionBuffer uint32
vertexPositionAttrib int32
viewMatrixUniform opengl.Matrix4Uniform
projectionMatrixUniform opengl.Matrix4Uniform
tiles [][]*model.TileProperties
}
// NewTileGridMapRenderable returns a new instance of a renderable for tile grids.
func NewTileGridMapRenderable(context *graphics.RenderContext) *TileGridMapRenderable {
gl := context.OpenGl()
program, programErr := opengl.LinkNewStandardProgram(gl, mapTileGridVertexShaderSource, mapTileGridFragmentShaderSource)
if programErr != nil {
panic(fmt.Errorf("TileGridMapRenderable shader failed: %v", programErr))
}
renderable := &TileGridMapRenderable{
context: context,
program: program,
vao: opengl.NewVertexArrayObject(gl, program),
vertexPositionBuffer: gl.GenBuffers(1)[0],
vertexPositionAttrib: gl.GetAttribLocation(program, "vertexPosition"),
viewMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "viewMatrix")),
projectionMatrixUniform: opengl.Matrix4Uniform(gl.GetUniformLocation(program, "projectionMatrix")),
tiles: make([][]*model.TileProperties, int(tilesPerMapSide))}
for i := 0; i < len(renderable.tiles); i++ {
renderable.tiles[i] = make([]*model.TileProperties, int(tilesPerMapSide))
}
renderable.vao.WithSetter(func(gl opengl.OpenGl) {
gl.EnableVertexAttribArray(uint32(renderable.vertexPositionAttrib))
gl.BindBuffer(opengl.ARRAY_BUFFER, renderable.vertexPositionBuffer)
gl.VertexAttribOffset(uint32(renderable.vertexPositionAttrib), 3, opengl.FLOAT, false, 0, 0)
gl.BindBuffer(opengl.ARRAY_BUFFER, 0)
})
return renderable
}
// Dispose releases any internal resources
func (renderable *TileGridMapRenderable) Dispose() {
gl := renderable.context.OpenGl()
gl.DeleteProgram(renderable.program)
gl.DeleteBuffers([]uint32{renderable.vertexPositionBuffer})
renderable.vao.Dispose()
}
// SetTile sets the properties for the specified tile coordinate.
func (renderable *TileGridMapRenderable) SetTile(x, y int, properties *model.TileProperties) {
renderable.tiles[y][x] = properties
}
// Clear resets all tiles.
func (renderable *TileGridMapRenderable) Clear() {
for _, row := range renderable.tiles {
for index := 0; index < len(row); index++ {
row[index] = nil
}
}
}
// Render renders
func (renderable *TileGridMapRenderable) Render() {
gl := renderable.context.OpenGl()
renderable.vao.OnShader(func() {
renderable.viewMatrixUniform.Set(gl, renderable.context.ViewMatrix())
renderable.projectionMatrixUniform.Set(gl, renderable.context.ProjectionMatrix())
gl.BindBuffer(opengl.ARRAY_BUFFER, renderable.vertexPositionBuffer)
for y, row := range renderable.tiles {
for x, tile := range row {
if tile != nil {
left := float32(x) * fineCoordinatesPerTileSide
right := left + fineCoordinatesPerTileSide
bottom := float32(y) * fineCoordinatesPerTileSide
top := bottom + fineCoordinatesPerTileSide
vertices := make([]float32, 0, 6*2*3)
if tile.CalculatedWallHeights.North > 0 {
vertices = append(vertices, left, top, tile.CalculatedWallHeights.North, right, top, tile.CalculatedWallHeights.North)
}
if tile.CalculatedWallHeights.South > 0 {
vertices = append(vertices, left, bottom, tile.CalculatedWallHeights.South, right, bottom, tile.CalculatedWallHeights.South)
}
if tile.CalculatedWallHeights.West > 0 {
vertices = append(vertices, left, top, tile.CalculatedWallHeights.West, left, bottom, tile.CalculatedWallHeights.West)
}
if tile.CalculatedWallHeights.East > 0 {
vertices = append(vertices, right, top, tile.CalculatedWallHeights.East, right, bottom, tile.CalculatedWallHeights.East)
}
if *tile.Type == model.DiagonalOpenNorthEast || *tile.Type == model.DiagonalOpenSouthWest {
vertices = append(vertices, left, top, 1.0, right, bottom, 1.0)
}
if *tile.Type == model.DiagonalOpenNorthWest || *tile.Type == model.DiagonalOpenSouthEast {
vertices = append(vertices, left, bottom, 1.0, right, top, 1.0)
}
if len(vertices) > 0 {
gl.BufferData(opengl.ARRAY_BUFFER, len(vertices)*4, vertices, opengl.STATIC_DRAW)
gl.DrawArrays(opengl.LINES, 0, int32(len(vertices)/3))
}
}
}
}
gl.BindBuffer(opengl.ARRAY_BUFFER, 0)
})
} | src/github.com/inkyblackness/shocked-client/editor/display/TileGridMapRenderable.go | 0.750004 | 0.491883 | TileGridMapRenderable.go | starcoder |
package factory
import (
"reflect"
)
// Factory represents a factory defined by some model struct
type Factory struct {
ModelType reflect.Type
Table string
FiledValues map[string]interface{}
SequenceFiledValues map[string]*sequenceValue
DynamicFieldValues map[string]DynamicFieldValue
AssociationFieldValues map[string]*AssociationFieldValue
Traits map[string]*Factory
AfterBuildCallbacks []Callback
BeforeCreateCallbacks []Callback
AfterCreateCallbacks []Callback
CanHaveAssociations bool
CanHaveTraits bool
CanHaveCallbacks bool
}
// AddSequenceFiledValue adds sequence field value to factory by field name
func (f *Factory) AddSequenceFiledValue(name string, first int64, value SequenceFieldValue) {
if f.SequenceFiledValues == nil {
f.SequenceFiledValues = map[string]*sequenceValue{}
}
f.SequenceFiledValues[name] = newSequenceValue(first, value)
}
// sequenceValue defines the value of a sequence field.
type sequenceValue struct {
valueGenerateFunc SequenceFieldValue
sequence *sequence
}
// value calculates the value of current sequenceValue
func (seqValue *sequenceValue) value() (interface{}, error) {
return seqValue.valueGenerateFunc(seqValue.sequence.next())
}
// newSequenceValue create a new SequenceValue instance
func newSequenceValue(first int64, value SequenceFieldValue) *sequenceValue {
return &sequenceValue{
valueGenerateFunc: value,
sequence: &sequence{
first: first,
},
}
}
// SequenceFieldValue defines the value generator type of sequence field.
// It's return result will be set as the value of the sequence field dynamicly.
type SequenceFieldValue func(n int64) (interface{}, error)
// DynamicFieldValue defines the value generator type of a field.
// It's return result will be set as the value of the field dynamicly.
type DynamicFieldValue func(model interface{}) (interface{}, error)
// AssociationFieldValue represents a struct which contains data to generate value of a association field.
type AssociationFieldValue struct {
ReferenceField string
AssociationReferenceField string
OriginalFactory *Factory
Factory *Factory
}
// Callback defines the callback function type
type Callback func(model interface{}) error | factory.go | 0.807309 | 0.411702 | factory.go | starcoder |
package esi
import (
"math"
"github.com/gonum/floats"
"github.com/gonum/stat"
"github.com/evepraisal/go-evepraisal"
)
func nanToZero(f float64) float64 {
if math.IsNaN(f) {
return 0
}
return f
}
func getPriceAggregatesForOrders(orders []MarketOrder) evepraisal.Prices {
var prices evepraisal.Prices
buyPrices := make([]float64, 0)
buyWeights := make([]float64, 0)
sellPrices := make([]float64, 0)
sellWeights := make([]float64, 0)
allPrices := make([]float64, 0)
allWeights := make([]float64, 0)
for _, order := range orders {
if order.Buy {
buyPrices = append(buyPrices, order.Price)
buyWeights = append(buyWeights, float64(order.Volume))
prices.Buy.Volume += order.Volume
prices.Buy.OrderCount++
} else {
sellPrices = append(sellPrices, order.Price)
sellWeights = append(sellWeights, float64(order.Volume))
prices.Sell.Volume += order.Volume
prices.Sell.OrderCount++
}
allPrices = append(allPrices, order.Price)
allWeights = append(allWeights, float64(order.Volume))
prices.All.Volume += order.Volume
prices.All.OrderCount++
}
// Buy
if prices.Buy.OrderCount > 0 {
stat.SortWeighted(buyPrices, buyWeights)
prices.Buy.Average = nanToZero(stat.GeometricMean(buyPrices, buyWeights))
prices.Buy.Min = floats.Min(buyPrices)
prices.Buy.Max = floats.Max(buyPrices)
prices.Buy.Median = nanToZero(stat.Quantile(0.5, stat.Empirical, buyPrices, buyWeights))
prices.Buy.Percentile = nanToZero(stat.Quantile(0.99, stat.Empirical, buyPrices, buyWeights))
prices.Buy.Stddev = nanToZero(stat.StdDev(buyPrices, buyWeights))
}
// Sell
if prices.Sell.OrderCount > 0 {
stat.SortWeighted(sellPrices, sellWeights)
prices.Sell.Average = nanToZero(stat.GeometricMean(sellPrices, sellWeights))
prices.Sell.Min = floats.Min(sellPrices)
prices.Sell.Max = floats.Max(sellPrices)
prices.Sell.Median = nanToZero(stat.Quantile(0.5, stat.Empirical, sellPrices, sellWeights))
prices.Sell.Percentile = nanToZero(stat.Quantile(0.01, stat.Empirical, sellPrices, sellWeights))
prices.Sell.Stddev = nanToZero(stat.StdDev(sellPrices, sellWeights))
}
// All
if prices.All.OrderCount > 0 {
stat.SortWeighted(allPrices, allWeights)
prices.All.Average = nanToZero(stat.GeometricMean(allPrices, allWeights))
prices.All.Min = floats.Min(allPrices)
prices.All.Max = floats.Max(allPrices)
prices.All.Median = nanToZero(stat.Quantile(0.5, stat.Empirical, allPrices, allWeights))
prices.All.Percentile = nanToZero(stat.Quantile(0.9, stat.Empirical, allPrices, allWeights))
prices.All.Stddev = nanToZero(stat.StdDev(allPrices, allWeights))
}
return prices
} | esi/price_aggregates.go | 0.566019 | 0.623635 | price_aggregates.go | starcoder |
package jsonlogic
import (
"math"
"strconv"
)
type opAdd struct{}
type opSub struct{}
type opMul struct{}
type opDiv struct{}
type opMod struct{}
type opGreater struct{}
type opGreaterEqual struct{}
type opLess struct{}
type opLessEqual struct{}
type opMax struct{}
type opMin struct{}
func getFloatNumber(v interface{}) (f float64, ok bool) {
switch v := v.(type) {
case nil:
return 0, ok
case float64:
return v, true
case string:
if vv, err := strconv.ParseFloat(v, 64); err == nil {
return vv, true
}
}
return 0, false
}
func binaryOperate(applier LogicApplier, data DataType, params []RuleType, op func(float64, float64) interface{}) (DataType, error) {
if len(params) <= 1 {
return nil, nil
}
var (
v1, v2 interface{}
vv1, vv2 float64
err error
ok bool
)
if v1, err = applier.Apply(params[0], data); err != nil {
return nil, err
}
if vv1, ok = getFloatNumber(v1); !ok {
return nil, nil
}
if v2, err = applier.Apply(params[1], data); err != nil {
return nil, err
}
if vv2, ok = getFloatNumber(v2); !ok {
return nil, nil
}
return op(vv1, vv2), nil
}
func reduceOperate(applier LogicApplier, data DataType, params []RuleType, zero float64, op func(float64, float64) float64) (DataType, error) {
r := zero
for _, p := range params {
v, err := applier.Apply(p, data)
if err != nil {
return nil, err
}
vv, ok := getFloatNumber(v)
if !ok {
return nil, nil
}
r = op(r, vv)
if math.IsNaN(r) {
break
}
}
return r, nil
}
func (opAdd) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
return reduceOperate(applier, data, params, float64(0), func(v1, v2 float64) float64 {
return v1 + v2
})
}
func (opMul) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
return reduceOperate(applier, data, params, float64(1), func(v1, v2 float64) float64 {
return v1 * v2
})
}
func (opSub) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
if len(params) == 1 {
v, err := applier.Apply(params[0], data)
if err != nil {
return nil, err
}
if vv, ok := getFloatNumber(v); ok {
return -vv, nil
}
return nil, nil
}
return binaryOperate(applier, data, params, func(v1, v2 float64) interface{} {
return v1 - v2
})
}
func (opDiv) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
return binaryOperate(applier, data, params, func(v1, v2 float64) interface{} {
return v1 / v2
})
}
func (opMod) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
return binaryOperate(applier, data, params, func(v1, v2 float64) interface{} {
return math.Mod(v1, v2)
})
}
func (opGreater) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
ret, err := reduceOperate(applier, data, params, math.Inf(+1), func(v1, v2 float64) float64 {
if v1 > v2 {
return v2
}
return math.NaN()
})
if err != nil {
return nil, err
}
if ret == nil {
return false, nil
}
return !math.IsNaN(ret.(float64)), nil
}
func (opGreaterEqual) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
ret, err := reduceOperate(applier, data, params, math.Inf(+1), func(v1, v2 float64) float64 {
if v1 >= v2 {
return v2
}
return math.NaN()
})
if err != nil {
return nil, err
}
if ret == nil {
return false, nil
}
return !math.IsNaN(ret.(float64)), nil
}
func (opLess) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
ret, err := reduceOperate(applier, data, params, math.Inf(-1), func(v1, v2 float64) float64 {
if v1 < v2 {
return v2
}
return math.NaN()
})
if err != nil {
return nil, err
}
if ret == nil {
return false, nil
}
return !math.IsNaN(ret.(float64)), nil
}
func (opLessEqual) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
ret, err := reduceOperate(applier, data, params, math.Inf(-1), func(v1, v2 float64) float64 {
if v1 <= v2 {
return v2
}
return math.NaN()
})
if err != nil {
return nil, err
}
if ret == nil {
return false, nil
}
return !math.IsNaN(ret.(float64)), nil
}
func (opMax) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
return reduceOperate(applier, data, params, math.Inf(-1), func(v1, v2 float64) float64 {
if v1 > v2 {
return v1
}
return v2
})
}
func (opMin) Operate(applier LogicApplier, data DataType, params []RuleType) (DataType, error) {
return reduceOperate(applier, data, params, math.Inf(+1), func(v1, v2 float64) float64 {
if v1 < v2 {
return v1
}
return v2
})
} | op_numeric.go | 0.558568 | 0.467879 | op_numeric.go | starcoder |
package water
import (
"github.com/willbeason/worldproc/pkg/geodesic"
"math"
"sort"
)
type IndexHeight struct {
Index int
Height float64
Water float64
}
type Lake struct {
// IndexHeights is the set of Geodesic indices this lake contains and how
// much water they contain.
IndexHeights []IndexHeight
// WaterVolume is the total volume of water
WaterVolume float64
}
func (l *Lake) Add(i int, h, w float64) {
l.IndexHeights = append(l.IndexHeights, IndexHeight{
Index: i,
Height: h,
})
l.WaterVolume += w
}
func (l *Lake) Merge(other *Lake) {
if other == nil {
return
}
l.IndexHeights = append(l.IndexHeights, other.IndexHeights...)
l.WaterVolume += l.WaterVolume
}
func (l *Lake) Equalize() {
// Sort heights from lowest to highest.
sort.Slice(l.IndexHeights, func(i, j int) bool {
return l.IndexHeights[i].Height < l.IndexHeights[j].Height
})
// Now we're going to calculate the exact volume of water which would fill
// every cell until we reach the volume of water we're looking for.
i := 0
volume := 0.0
waterLevel := 0.0
maxI := len(l.IndexHeights)
landVolume := 0.0
for i < maxI {
ih := l.IndexHeights[i]
landVolume += ih.Height
newVolume := volume + float64(i)*(ih.Height-waterLevel)
if newVolume > l.WaterVolume {
waterLevel += (l.WaterVolume - volume) / float64(i)
break
}
volume = newVolume
waterLevel = ih.Height
i++
}
// Check for the case that everywhere ends up with water.
if i == maxI {
waterLevel = (l.WaterVolume + landVolume) / float64(maxI)
}
for j := range l.IndexHeights[:i] {
l.IndexHeights[j].Water = waterLevel - l.IndexHeights[j].Height
}
}
func Equalize(waters, heights []float64, sphere *geodesic.Geodesic) {
var lakes []Lake
// Visit nodes from lowest to highest.
toVisit := make([]Ordinal, len(heights))
for i, h := range heights {
toVisit[i].Index = i
toVisit[i].Height = h
}
sort.Slice(toVisit, func(i, j int) bool {
return toVisit[i].Height < toVisit[j].Height
})
visited := make(map[int]bool, len(sphere.Centers))
for _, v := range toVisit {
if visited[v.Index] {
continue
}
newLake := visitEqualize(v.Index, waters, heights, visited, sphere)
if newLake != nil && newLake.WaterVolume > 0.0 {
lakes = append(lakes, *newLake)
}
}
sort.Slice(lakes, func(i, j int) bool {
return len(lakes[i].IndexHeights) > len(lakes[j].IndexHeights)
})
for _, l := range lakes {
l.Equalize()
for _, ih := range l.IndexHeights {
waters[ih.Index] += ih.Water
}
}
}
func visitEqualize(i int, waters, heights []float64, visited map[int]bool, sphere *geodesic.Geodesic) *Lake {
if visited[i] {
return nil
}
visited[i] = true
l := &Lake{}
var toVisit OrdinalList
hi := heights[i]
toVisit.Insert(Ordinal{Index: i, Height: hi})
// We are guaranteed that every neighbor is the same level or higher.
for _, n := range sphere.Faces[i].Neighbors {
toVisit.Insert(Ordinal{Index: n, Height: hi})
}
for cell := toVisit.Pop(); cell != nil; cell = toVisit.Pop() {
if i != cell.Index && visited[cell.Index] {
continue
}
visited[cell.Index] = true
// cell.Height actually records the minimum height of the water we may
// take from this cell.
hc := heights[cell.Index]
w := math.Max(0.0, hc+waters[cell.Index]-cell.Height)
w = math.Min(w, waters[cell.Index])
waters[cell.Index] -= w
l.Add(cell.Index, math.Max(hc, cell.Height), w)
// hi tracks the level at which we can start taking water from a cell
// adjacent to the current cell.
hi := math.Max(cell.Height, hc)
for _, n := range sphere.Faces[cell.Index].Neighbors {
if visited[n] {
// Don't add already-visited cells.
continue
}
if heights[n]+waters[n]-hi < 0.001 {
// Don't add cells we can't possibly take water from.
continue
}
toVisit.Insert(Ordinal{Index: n, Height: hi})
}
}
return l
} | pkg/water/equalize.go | 0.623721 | 0.489259 | equalize.go | starcoder |
package plot
import (
"fmt"
"io"
"os"
"strings"
)
// Target for chart rendering. Defaults to standard output.
var chartWriter io.Writer = os.Stdout
// Plotable defines an interface for object which can be represented on a chart.
type Plotable interface {
// GetX() float64
GetY() float64
GetLabel() string
}
// Entry represents a single point on the chart.
type Entry struct {
Label string
LabelAbbr string
XValue float64
YValue float64
}
// func (e *Entry) GetX() float64 {
// return e.XValue
// }
func (e Entry) GetY() float64 {
return e.YValue
}
func (e Entry) GetLabel() string {
return e.LabelAbbr
}
// Chart holds all required data to render the chart.
type Chart struct {
Title string
Debug bool
Spacing Spacing
Entries []Plotable
Theme Theme
}
// Spacing defines sizes of various spacing elements (margin, padding, ...).
type Spacing struct {
Margin int
Padding int
Bar int
Axis int
}
// BarChart draws the bar chart to the cmd.
func BarChart(chart Chart) {
numEntries := len(chart.Entries)
if numEntries == 0 {
print(chartWriter, "No chart entries available")
return
}
width := calculateWidth(chart.Spacing, numEntries)
if chart.Debug {
print(chartWriter, formatDebugInfo(numEntries, width))
}
xA := calculateAxis(chart.Entries)
// TODO: Determine max with of yaxis label
axisLabelWidth := 5
if chart.Title != "" {
print(chartWriter, formatTitle(chart.Title, width+axisLabelWidth+3))
}
print(chartWriter, formatChart(chart.Entries, xA, chart.Theme, axisLabelWidth))
print(chartWriter, formatXAxis(chart.Theme, width, axisLabelWidth, xA.Low))
print(chartWriter, formatXAxisLabels(chart.Entries, axisLabelWidth))
print(chartWriter, "\n")
}
func getExtremes(entries []Plotable) (float64, float64) {
if len(entries) == 0 {
return 0.0, 0.0
}
firstY := entries[0].GetY()
low, high := firstY, firstY
for _, entry := range entries {
y := entry.GetY()
if y < low {
low = y
} else if y > high {
high = y
}
}
return low, high
}
func calculateAxis(entries []Plotable) Axis {
low, high := getExtremes(entries)
stepCount := 5.0
steps := high / stepCount
return Axis{Low: low, High: high, Steps: steps}
}
func formatChart(entries []Plotable, axis Axis, theme Theme, axisLabelWidth int) string {
// Start the chart with a line with only the y axis drawn
output := fmt.Sprintf("%s%s\n", strings.Repeat(" ", axisLabelWidth+3), theme.YAxis)
labelFmt := getLabelFormat(axisLabelWidth)
for val := axis.High; val > axis.Low; val -= axis.Steps {
// Print current y axis value
output = fmt.Sprintf("%s"+labelFmt, output, val)
// Print the y axis and the margin until the first bar
output = fmt.Sprint(output, yAxisChar, strings.Repeat(" ", 2))
// Print the bars with padding between each bar
for idx, entry := range entries {
// If it is not the first element, draw the padding
if idx != 0 {
output = fmt.Sprint(output, strings.Repeat(" ", 2))
}
// If the bar reaches up to the current value, draw the bar.
// If not, draw a spacing.
if entry.GetY() >= val {
output = fmt.Sprint(output, theme.Bar)
} else {
output = fmt.Sprint(output, " ")
}
}
// print remaining margin and newline
output = fmt.Sprintf("%s%s\n", output, strings.Repeat(" ", 2))
}
return output
}
func getLabelFormat(axisLabelWidth int) string {
return fmt.Sprintf("%%%d.0f - ", axisLabelWidth)
}
func formatXAxis(theme Theme, width int, axisLabelWidth int, axisVal float64) string {
labelFmt := getLabelFormat(axisLabelWidth)
return fmt.Sprintf("%s%s%s\n",
fmt.Sprintf(labelFmt, axisVal),
theme.CrossAxis,
strings.Repeat(theme.XAxis, width-1))
}
// TODO: Pass margin and padding as spacing
func formatXAxisLabels(entries []Plotable, axisLabelWidth int) string {
if len(entries) == 0 {
return ""
}
output := fmt.Sprint(strings.Repeat(" ", axisLabelWidth+6)) // axis + margin
for idx, entry := range entries {
if idx != 0 {
output = fmt.Sprint(output, strings.Repeat(" ", 2)) // pad
}
output = fmt.Sprint(output, entry.GetLabel())
}
return output
} | plot.go | 0.630799 | 0.403537 | plot.go | starcoder |
package pcm
import (
"fmt"
"io"
)
var _ Reader = &IOReader{}
// A Reader mimics io.Reader for pcm data.
type Reader interface {
Formatted
ReadPCM(b []byte) (n int, err error)
}
// An IOReader converts an io.Reader into a pcm.Reader
type IOReader struct {
Format
io.Reader
}
func (ior *IOReader) ReadPCM(p []byte) (n int, err error) {
return ior.Read(p)
}
// A Writer can have PCM formatted audio data written to it. It mimics io.Writer.
type Writer interface {
io.Closer
Formatted
// WritePCM expects PCM bytes matching this Writer's format.
// WritePCM will block until all of the bytes are consumed.
WritePCM([]byte) (n int, err error)
}
// The Formatted interface represents types that are aware of a PCM Format they expect or provide.
type Formatted interface {
// PCMFormat will return the Format used by an encoded audio or expected by an audio consumer.
// Implementations can embed a Format struct to simplify this.
PCMFormat() Format
}
// Format is a PCM format; it defines how binary audio data should be converted into real audio.
type Format struct {
// SampleRate defines how many times per second a consumer should read a single value. An example
// of a common value for this is 44100 or 44.1khz.
SampleRate uint32
// Channels defines how many concurrent audio channels are present in audio data. Common values are
// 1 for mono and 2 for stereo.
Channels uint16
// Bits determines how many bits a single sample value takes up. 8, 16, and 32 are common values.
// TODO: Do we need LE vs BE, float vs int representation?
Bits uint16
}
// PCMFormat returns this format.
func (f Format) PCMFormat() Format {
return f
}
// BytesPerSecond returns how many bytes this format would be encoded into per second in an audio stream.
func (f Format) BytesPerSecond() uint32 {
return f.SampleRate * uint32(f.SampleSize())
}
func (f Format) SampleSize() int {
return int(f.Channels) * int(f.Bits/8)
}
// ReadFloat reads a single sample from an audio stream, respecting bits and channels:
// f.Bits / 8 bytes * f.Channels bytes will be read from b, and this count will be returned as 'read'.
// the length of values will be equal to f.Channels, if no error is returned. If an error is returned,
// it will be io.ErrUnexpectedEOF or ErrUnsupportedBits
func (f Format) SampleFloat(b []byte) (values []float64, read int, err error) {
values = make([]float64, 0, f.Channels)
read = f.SampleSize()
if len(b) < read {
return nil, 0, io.ErrUnexpectedEOF
}
_ = b[read-1]
switch f.Bits {
case 8:
for i := 0; i < int(f.Channels); i++ {
v := int8(b[i])
values = append(values, float64(v))
}
case 16:
for i := 0; i < int(f.Channels)*2; i += 2 {
v := int16(b[i]) +
int16(b[i+1])<<8
values = append(values, float64(v))
}
case 32:
for i := 0; i < int(f.Channels)*4; i += 4 {
v := int32(b[i]) +
int32(b[i+1])<<8 +
int32(b[i+2])<<16 +
int32(b[i+3])<<24
values = append(values, float64(v))
}
default:
return nil, read, ErrUnsupportedBits
}
return
}
// ErrUnsupportedBits represents that the Bits value for a Format was not supported for some operation.
var ErrUnsupportedBits = fmt.Errorf("unsupported bits in pcm format") | audio/pcm/interface.go | 0.542379 | 0.400632 | interface.go | starcoder |
package xsort
// MergeAndUints computes intersection of ids list.
func MergeAndUints(ids ...[]uint32) []uint32 {
if len(ids) == 0 {
return nil
} else if len(ids) == 1 {
return ids[0]
}
// Find out the shortest list.
shortest := 0
for i := 1; i < len(ids); i++ {
if len(ids[i]) < len(ids[shortest]) {
shortest = i
}
}
res := make([]uint32, 0, len(ids[shortest]))
// Cursors of all lists.
js := make([]int, len(ids))
// Intersection.
for _, id := range ids[shortest] {
exists := true
for m := 0; m < len(ids); m++ {
if m == shortest {
continue
}
mIds := ids[m]
j := js[m]
if j >= len(mIds) {
exists = false
break
} else if id < mIds[j] {
exists = false
break
} else if id > mIds[j] {
// If the value of the cursor is less than the current id value, move the cursor until the value indicated is as equal as possible to the current id value.
jj := j
for ; jj < len(mIds) && mIds[jj] < id; jj++ {
}
js[m] = jj
// If the cursor is not equal to the current id after the end of the movement, this value is not in the intersection.
if jj >= len(mIds) || mIds[jj] != id {
exists = false
}
}
}
if exists {
res = append(res, id)
}
}
return res
}
// MergeOrUints merge sort ids list.
func MergeOrUints(ids ...[]uint32) []uint32 {
if len(ids) == 0 {
return nil
} else if len(ids) == 1 {
return ids[0]
}
res := make([]uint32, len(ids[0]))
copy(res, ids[0])
for i := 1; i < len(ids); i++ {
res = MergeUInt(res, ids[i])
}
return res
}
// MergeUInt merge sort a & b.
func MergeUInt(a, b []uint32) []uint32 {
i, j := 0, 0
res := make([]uint32, 0, len(a)+len(b))
for i <= len(a)-1 && j <= len(b)-1 {
if a[i] < b[j] {
res = append(res, a[i])
i++
} else if a[i] > b[j] {
res = append(res, b[j])
j++
} else { // a[i] == b[j]
res = append(res, a[i])
i++
j++
}
}
for i <= len(a)-1 {
res = append(res, a[i])
i++
}
for j <= len(b)-1 {
res = append(res, b[j])
j++
}
return res
} | xsort/merge_xsort.go | 0.630912 | 0.526586 | merge_xsort.go | starcoder |
package model
import (
"strings"
)
// A Rola represents a song, it contains the information present in
// various frames from the id3v2 tag, namely, artist, title, album
// track number, year, genre, and additionally, the path of the song
// file, and the id assigned by the database to the song.
type Rola struct {
artist string
title string
album string
track int
year int
genre string
path string
id int64
}
// NewRola creates a Rola with default values; text fields are "Unknown"
// and numeric fields are 0.
func NewRola() *Rola {
initial := "Unknown"
return &Rola{
artist: initial,
title: initial,
album: initial,
track: 0,
year: 2018,
genre: initial,
path: initial,
id: 0,
}
}
// Artist returns the performer of the Rola.
func (rola *Rola) Artist() string {
return rola.artist
}
// Title returns the title of the Rola.
func (rola *Rola) Title() string {
return rola.title
}
// Album returns the album where the Rola is included.
func (rola *Rola) Album() string {
return rola.album
}
// Track returns the track number of the Rola as an int.
func (rola *Rola) Track() int {
return rola.track
}
// Year returns the year of the Rola as an int.
func (rola *Rola) Year() int {
return rola.year
}
// Genre returns the genre of the Rola as a string.
func (rola *Rola) Genre() string {
return rola.genre
}
// Path returns the path of the song file where the Rola was mined.
func (rola *Rola) Path() string {
return rola.path
}
// ID returns the ID assigned to the Rola by the database at insertion.
func (rola *Rola) ID() int64 {
return rola.id
}
// SetArtist sets the Rola performer.
func (rola *Rola) SetArtist(artist string) {
rola.artist = strings.TrimSpace(artist)
}
// SetTitle sets the Rola title.
func (rola *Rola) SetTitle(title string) {
rola.title = strings.TrimSpace(title)
}
// SetAlbum sets the album title of the Rola.
func (rola *Rola) SetAlbum(album string) {
rola.album = strings.TrimSpace(album)
}
// SetTrack sets the track number of the Rola. Should be an int.
func (rola *Rola) SetTrack(track int) {
rola.track = track
}
// SetYear sets the year of the Rola. Should be an int.
func (rola *Rola) SetYear(year int) {
rola.year = year
}
// SetGenre sets the genre of the Rola.
func (rola *Rola) SetGenre(genre string) {
rola.genre = strings.TrimSpace(genre)
}
// SetPath sets the path of the file where the song represented by the Rola is.
func (rola *Rola) SetPath(path string) {
rola.path = strings.TrimSpace(path)
}
// SetID sets the ID of the Rola. This value should not be changed unless
// the corresponding value changes in the Database.
func (rola *Rola) SetID(id int64) {
rola.id = id
} | pkg/model/rola.go | 0.810366 | 0.449997 | rola.go | starcoder |
package main
/*
Day 6, part A
Given a set of memory banks, each having a number of blocks stored in them, go through in turn to rebalance the banks by:
Zero out the largest bank, saving the number of blocks. Starting with the next bank, deposit one block at a time, circling around to banks until all the blocks are gone. Keep track of the final balanced configurations and stop balancing once a repeat configuration is encountered; print how many balancing passes it took to reach.
*/
import (
"bufio"
"bytes"
"flag"
"fmt"
"os"
"strconv"
"strings"
)
var inputFile = flag.String("inputFile", "./inputs/day06-example.txt", "Input file")
var partB = flag.Bool("partB", false, "Perform part B solution?")
// This will modify the original slice
func balance(banks []int) ([]int, string) {
index := 0 // index of the highest element
highest := banks[index] // we'll assume the first element is the biggest
for bank := 0; bank < len(banks); bank++ {
if banks[bank] > highest {
highest = banks[bank]
index = bank
}
} //end find highest
banks[index] = 0
rounds := 0 // we've balanced this many times
index += 1 // Index into the array, starting with the "next" memory bank after highest; could wrap
if index >= len(banks) {
// wrap around if we need to
index = 0
}
// Loop highest times and do manual positioning management of the list
for rounds < highest {
banks[index] += 1
rounds += 1
index += 1
if index >= len(banks) {
// wrap around if we need to
index = 0
}
}
return banks, arrayToString(banks)
}
func arrayToString(ary []int) string {
var ret string
for i := 0; i < len(ary); i++ {
ret += strconv.Itoa(ary[i])
}
return ret
}
func main() {
flag.Parse()
input, err := os.Open(*inputFile)
if err != nil {
fmt.Printf("Couldn't open %s for read: %v", inputFile, err)
os.Exit(1)
}
defer input.Close()
lineReader := bufio.NewScanner(input)
var line string
banks := 0
for lineReader.Scan() {
line = lineReader.Text()
banks = bytes.Count([]byte(line[:len(line)]), []byte{'\t'})
}
banks += 1 // n-1 tabs
// Create our memory banks
memoryBanks := make([]int, banks)
// there is a place in hell for people that use synonyms like this, but,
// we'll be converting memoryBanks to a string (length `banks`) and
// using that as the key to store stuff we've already seen.
observedPatterns := make(map[string]bool)
// build memory bank
for i, d := range strings.Split(line, "\t") {
mag, err := strconv.Atoi(d)
if err != nil {
fmt.Printf("Couldn't convert: %s\n", err)
os.Exit(1)
}
fmt.Printf("Assigning %d to a memory bank\n", mag)
memoryBanks[i] = mag
}
fmt.Printf("Memory banks: %v\n", memoryBanks)
count := 0
countSinceFirstObservation := 0
partBCount := false
var partBFirstSeen string
balanceLoop:
for {
_, strFormatted := balance(memoryBanks)
count += 1
if *partB && partBCount {
countSinceFirstObservation += 1
}
if observedPatterns[strFormatted] {
if *partB {
if partBFirstSeen == "" {
// set the pattern to look for
partBFirstSeen = strFormatted
} else if partBFirstSeen == strFormatted {
// We did it! We're done
break balanceLoop
}
partBCount = true
} else {
break balanceLoop
}
} else {
observedPatterns[strFormatted] = true
}
}
if *partB {
fmt.Printf("Encountered the first pattern (%s) after %d loops\n", partBFirstSeen, countSinceFirstObservation)
} else {
fmt.Printf("Saw a duplicate pattern after %d iterations\n", count)
}
} | 2017/day06.go | 0.558327 | 0.47993 | day06.go | starcoder |
package input
import (
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
)
func init() {
Constructors[TypeAzureQueueStorage] = TypeSpec{
constructor: fromSimpleConstructor(func(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
r, err := newAzureQueueStorage(conf.AzureQueueStorage, log, stats)
if err != nil {
return nil, err
}
return NewAsyncReader(TypeAzureQueueStorage, false, r, log, stats)
}),
Status: docs.StatusBeta,
Version: "3.42.0",
Summary: `
Dequeue objects from an Azure Storage Queue.`,
Description: `
Dequeue objects from an Azure Storage Queue.
This input adds the following metadata fields to each message:
` + "```" + `
- queue_storage_insertion_time
- All user defined queue metadata
` + "```" + `
Only one authentication method is required, ` + "`storage_connection_string`" + ` or ` + "`storage_account` and `storage_access_key`" + `. If both are set then the ` + "`storage_connection_string`" + ` is given priority.`,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon(
"storage_account",
"The storage account to dequeue messages from. This field is ignored if `storage_connection_string` is set.",
),
docs.FieldCommon(
"storage_access_key",
"The storage account access key. This field is ignored if `storage_connection_string` is set.",
),
docs.FieldCommon(
"storage_sas_token",
"The storage account SAS token. This field is ignored if `storage_connection_string` or `storage_access_key` are set.",
),
docs.FieldCommon(
"storage_connection_string",
"A storage account connection string. This field is required if `storage_account` and `storage_access_key` / `storage_sas_token` are not set.",
),
docs.FieldCommon(
"queue_name", "The name of the target Storage queue.",
),
docs.FieldAdvanced(
"dequeue_visibility_timeout", "The timeout duration until a dequeued message gets visible again, 30s by default",
).AtVersion("3.45.0"),
docs.FieldAdvanced("max_in_flight", "The maximum number of unprocessed messages to fetch at a given time."),
},
Categories: []Category{
CategoryServices,
CategoryAzure,
},
}
}
//------------------------------------------------------------------------------
// AzureQueueStorageConfig contains configuration fields for the AzureQueueStorage
// input type.
type AzureQueueStorageConfig struct {
StorageAccount string `json:"storage_account" yaml:"storage_account"`
StorageAccessKey string `json:"storage_access_key" yaml:"storage_access_key"`
StorageSASToken string `json:"storage_sas_token" yaml:"storage_sas_token"`
StorageConnectionString string `json:"storage_connection_string" yaml:"storage_connection_string"`
QueueName string `json:"queue_name" yaml:"queue_name"`
DequeueVisibilityTimeout string `json:"dequeue_visibility_timeout" yaml:"dequeue_visibility_timeout"`
MaxInFlight int32 `json:"max_in_flight" yaml:"max_in_flight"`
}
// NewAzureQueueStorageConfig creates a new AzureQueueStorageConfig with default
// values.
func NewAzureQueueStorageConfig() AzureQueueStorageConfig {
return AzureQueueStorageConfig{
DequeueVisibilityTimeout: "30s",
MaxInFlight: 10,
}
} | lib/input/azure_queue_storage_config.go | 0.735831 | 0.649801 | azure_queue_storage_config.go | starcoder |
package tree
import (
"fmt"
)
const (
MAX_OBJECTS = 12
MAX_LEVELS = 5
)
const (
TOP_LEFT = iota
TOP_RIGHT
BOTTOM_LEFT
BOTTOM_RIGHT
)
type QuadTree struct {
bounds *Rectangle
objects []*Rectangle
nodes map[int]*QuadTree
level int
}
// NewQuadTree creates a new quad tree at level pLevel and bounds
func NewQuadTree(pLevel int, bounds *Rectangle) *QuadTree {
return &QuadTree{
level: pLevel,
bounds: bounds,
objects: make([]*Rectangle, 0),
nodes: make(map[int]*QuadTree, 4),
}
}
/*
* Insert the object into the QuadTree. If the node exceeds the capacity, it
* will split and push objects into the sub nodes
*/
func (q *QuadTree) Insert(rectangle *Rectangle) {
// There are sub-nodes so push the object down to one of them if it
// can fit in any of them
if len(q.nodes) != 0 {
index := q.index(rectangle)
if index != -1 {
q.nodes[index].Insert(rectangle)
return
}
}
// object didn't fit into the any of the sub-nodes, so push in into this
// node
q.objects = append(q.objects, rectangle)
// We hit the objects limit and are still allowed to create more nodes,
// split and push objects into sub-nodes
if len(q.objects) > MAX_OBJECTS && q.level < MAX_LEVELS {
// there are no sub nodes in this node
if len(q.nodes) == 0 {
q.split()
}
for i := len(q.objects) - 1; i >= 0; i-- {
index := q.index(q.objects[i])
if index != -1 {
q.nodes[index].Insert(q.objects[i])
q.objects = append(q.objects[:i], q.objects[i+1:]...)
}
}
}
}
/**
* Retrieve will populate the result with the triangles that intersects with
* the rectangle
*/
func (q *QuadTree) Retrieve(rectangle *Rectangle, result *[]*Rectangle) {
*result = append(*result, q.objects...)
for _, node := range q.nodes {
if node.bounds.Intersects(rectangle) {
node.Retrieve(rectangle, result)
}
}
}
/**
* Clear will remove all objects and sub-nodes from this node and sub-nodes
* recursively
*/
func (q *QuadTree) Clear() {
q.objects = make([]*Rectangle, 0)
for _, node := range q.nodes {
node.Clear()
}
q.nodes = make(map[int]*QuadTree, 4)
}
/**
* Boundaries will return all "rectangles" in this tree
*/
func (q *QuadTree) Boundaries() []*Rectangle {
boundaries := make([]*Rectangle, 0)
boundaries = append(boundaries, q.bounds)
for _, node := range q.nodes {
boundaries = append(boundaries, node.Boundaries()...)
}
return boundaries
}
/**
* Debug will print a debug out to the console
*/
func (q *QuadTree) Debug() {
for _, obj := range q.objects {
for i := 0; i < q.level; i++ {
fmt.Print("\t")
}
fmt.Printf("object x:%d y:%d w:%d h:%d\n", int(obj.position.X), int(obj.position.Y), int(obj.maxX-obj.minX), int(obj.maxY-obj.minY))
}
for index, node := range q.nodes {
for i := 0; i < node.level; i++ {
fmt.Print("\t")
}
fmt.Printf("node %d.%d\n", node.level, index)
node.Debug()
}
}
/*
* index determines which node the object belongs to. nil means
* object cannot completely fit within a child node and is part
* of the parent node
*/
func (q *QuadTree) index(r *Rectangle) int {
// Object can completely fit within the left quadrants
left := (r.position.X < q.bounds.position.X) && (r.maxX < q.bounds.position.X)
// Object can completely fit within the top quadrants
top := (r.position.Y < q.bounds.position.Y) && (r.maxY < q.bounds.position.Y)
if top && left {
return TOP_LEFT
}
// Object can completely fit within the right quadrants
right := (r.position.X > q.bounds.position.X) && (r.minX > q.bounds.position.X)
if top && right {
return TOP_RIGHT
}
// Object can completely fit within the bottom quadrants
bottom := (r.position.Y > q.bounds.position.Y) && (r.minY > q.bounds.position.Y)
if bottom && left {
return BOTTOM_LEFT
}
if bottom && right {
return BOTTOM_RIGHT
}
// object can't fit in any of the sub nodes
return -1
}
// split creates four sub-nodes for this node
func (q *QuadTree) split() {
subWidth := q.bounds.maxX - q.bounds.minX
subHeight := q.bounds.maxY - q.bounds.minY
x := q.bounds.position.X
y := q.bounds.position.Y
q.nodes[TOP_LEFT] = NewQuadTree(q.level+1, NewRectangle(x-subWidth, y-subHeight, subWidth, subHeight))
q.nodes[TOP_RIGHT] = NewQuadTree(q.level+1, NewRectangle(x+subWidth, y-subHeight, subWidth, subHeight))
q.nodes[BOTTOM_LEFT] = NewQuadTree(q.level+1, NewRectangle(x-subWidth, y+subHeight, subWidth, subHeight))
q.nodes[BOTTOM_RIGHT] = NewQuadTree(q.level+1, NewRectangle(x+subWidth, y+subHeight, subWidth, subHeight))
} | quadtree.go | 0.627837 | 0.438966 | quadtree.go | starcoder |
package types
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"github.com/MinterTeam/minter-go-node/hexutil"
"github.com/tendermint/tendermint/crypto/ed25519"
"math/big"
"math/rand"
"reflect"
"strconv"
"strings"
)
// Types lengths
const (
HashLength = 32
AddressLength = 20
PubKeyLength = 32
CoinSymbolLength = 10
TendermintAddressLength = 20
)
const (
// BasecoinID is an ID of a base coin
BasecoinID = 0
)
var (
hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{})
)
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
type Hash [HashLength]byte
// BytesToHash converts given byte slice to Hash
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
// Str returns the string representation of the underlying hash
func (h Hash) Str() string { return string(h[:]) }
// Bytes returns the bytes representation of the underlying hash
func (h Hash) Bytes() []byte { return h[:] }
// Big returns the big.Int representation of the underlying hash
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
// Hex returns the hex-string representation of the underlying hash
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (h Hash) TerminalString() string {
return fmt.Sprintf("%x…%x", h[:3], h[29:])
}
// String implements the stringer interface and is used also by the logger when
// doing full logging into a file.
func (h Hash) String() string {
return h.Hex()
}
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
// without going through the stringer interface used for logging.
func (h Hash) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "%"+string(c), h[:])
}
// UnmarshalText parses a hash in hex syntax.
func (h *Hash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Hash", input, h[:])
}
// UnmarshalJSON parses a hash in hex syntax.
func (h *Hash) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(hashT, input, h[:])
}
// MarshalText returns the hex representation of h.
func (h Hash) MarshalText() ([]byte, error) {
return hexutil.Bytes(h[:]).MarshalText()
}
// SetBytes Sets the hash to the value of b. If b is larger than len(h), 'b' will be cropped (from the left).
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
}
copy(h[HashLength-len(b):], b)
}
// SetString sets string `s` to h. If s is larger than len(h) s will be cropped (from left) to fit.
func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) }
// Set h to other
func (h *Hash) Set(other Hash) {
for i, v := range other {
h[i] = v
}
}
// Generate implements testing/quick.Generator.
func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
m := rand.Intn(len(h))
for i := len(h) - 1; i > m; i-- {
h[i] = byte(rand.Uint32())
}
return reflect.ValueOf(h)
}
// EmptyHash checks if given Hash is empty
func EmptyHash(h Hash) bool {
return h == Hash{}
}
// UnprefixedHash allows marshaling a Hash without 0x prefix.
type UnprefixedHash Hash
// UnmarshalText decodes the hash from hex. The 0x prefix is optional.
func (h *UnprefixedHash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedUnprefixedText("UnprefixedHash", input, h[:])
}
// MarshalText encodes the hash as hex.
func (h UnprefixedHash) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(h[:])), nil
}
// ///////// Coin
// CoinSymbol represents the 10 byte coin symbol.
type CoinSymbol [CoinSymbolLength]byte
func (c CoinSymbol) String() string { return string(bytes.Trim(c[:], "\x00")) }
// Bytes returns the bytes representation of the underlying CoinSymbol
func (c CoinSymbol) Bytes() []byte { return c[:] }
// MarshalJSON encodes coin to json
func (c CoinSymbol) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString("\"")
buffer.WriteString(c.String())
buffer.WriteString("\"")
return buffer.Bytes(), nil
}
// UnmarshalJSON parses a coinSymbol from json
func (c *CoinSymbol) UnmarshalJSON(input []byte) error {
*c = StrToCoinSymbol(string(input[1 : len(input)-1]))
return nil
}
// Compare compares coin symbols.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
func (c CoinSymbol) Compare(c2 CoinSymbol) int {
return bytes.Compare(c.Bytes(), c2.Bytes())
}
// IsBaseCoin checks if coin is a base coin
func (c CoinSymbol) IsBaseCoin() bool {
return c.Compare(GetBaseCoin()) == 0
}
// StrToCoinSymbol converts given string to a coin symbol
func StrToCoinSymbol(s string) CoinSymbol {
var symbol CoinSymbol
copy(symbol[:], s)
return symbol
}
// StrToCoinBaseSymbol converts give string to a coin base symbol
func StrToCoinBaseSymbol(s string) CoinSymbol {
delimiter := strings.Index(s, "-")
if delimiter > 3 {
return StrToCoinSymbol(s[:delimiter])
}
return StrToCoinSymbol(s)
}
// GetVersionFromSymbol returns coin version extracted from symbol
func GetVersionFromSymbol(s string) CoinVersion {
parts := strings.Split(s, "-")
if len(parts) == 1 {
return 0
}
if len(parts[0]) < 3 {
return 0
}
v, _ := strconv.ParseUint(parts[1], 10, 16)
return CoinVersion(v)
}
// CoinID represents coin id
type CoinID uint32
// IsBaseCoin checks if
func (c CoinID) IsBaseCoin() bool {
return c == GetBaseCoinID()
}
func (c CoinID) String() string {
return strconv.Itoa(int(c))
}
// Bytes returns LittleEndian encoded bytes of coin id
func (c CoinID) Bytes() []byte {
b := make([]byte, 4)
binary.LittleEndian.PutUint32(b, c.Uint32())
return b
}
// Uint32 returns coin id as uint32
func (c CoinID) Uint32() uint32 {
return uint32(c)
}
// BytesToCoinID converts bytes to coin id. Expects LittleEndian encoding.
func BytesToCoinID(bytes []byte) CoinID {
return CoinID(binary.LittleEndian.Uint32(bytes))
}
// CoinVersion represents coin version info
type CoinVersion = uint16
// ///////// Address
// Address represents 20-byte address in Minter Blockchain
type Address [AddressLength]byte
// BytesToAddress converts given byte slice to Address
func BytesToAddress(b []byte) Address {
var a Address
a.SetBytes(b)
return a
}
// StringToAddress converts given string to Address
func StringToAddress(s string) Address { return BytesToAddress([]byte(s)) }
// BigToAddress converts given big.Int to Address
func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
// HexToAddress converts given hex string to Address
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s, "Mx")) }
// IsHexAddress verifies whether a string can represent a valid hex-encoded
// Minter address or not.
func IsHexAddress(s string) bool {
if hasHexPrefix(s, "Mx") {
s = s[2:]
}
return len(s) == 2*AddressLength && isHex(s)
}
// Str returns the string representation of the underlying address
func (a Address) Str() string { return string(a[:]) }
// Bytes returns the byte representation of the underlying address
func (a Address) Bytes() []byte { return a[:] }
// Big returns the big.Int representation of the underlying address
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
// Hash returns the Hash representation of the underlying address
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Hex returns the hex-string representation of the underlying address
func (a Address) Hex() string {
return "Mx" + hex.EncodeToString(a[:])
}
// String implements the stringer interface and is used also by the logger.
func (a Address) String() string {
return a.Hex()
}
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
// without going through the stringer interface used for logging.
func (a Address) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "%"+string(c), a[:])
}
// SetBytes Sets the address to the value of b. If b is larger than len(a) it will panic
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-AddressLength:]
}
copy(a[AddressLength-len(b):], b)
}
// SetString set string `s` to a. If s is larger than len(a) it will panic
func (a *Address) SetString(s string) { a.SetBytes([]byte(s)) }
// Set Sets a to other
func (a *Address) Set(other Address) {
for i, v := range other {
a[i] = v
}
}
// MarshalText returns the hex representation of a.
func (a Address) MarshalText() ([]byte, error) {
return hexutil.Bytes(a[:]).MarshalText()
}
// UnmarshalText parses a hash in hex syntax.
func (a *Address) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Address", input, a[:])
}
// Unmarshal parses a hash from byte slice.
func (a *Address) Unmarshal(input []byte) error {
copy(a[:], input)
return nil
}
// MarshalJSON marshals given address to json format.
func (a Address) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", a.String())), nil
}
// UnmarshalJSON parses a hash in hex syntax.
func (a *Address) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
}
// Compare compares addresses.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
func (a *Address) Compare(a2 Address) int {
return bytes.Compare(a.Bytes(), a2.Bytes())
}
// Pubkey represents 32 byte public key of a validator
type Pubkey [PubKeyLength]byte
// HexToPubkey decodes given string into Pubkey
func HexToPubkey(s string) Pubkey { return BytesToPubkey(FromHex(s, "Mp")) }
// BytesToPubkey decodes given bytes into Pubkey
func BytesToPubkey(b []byte) Pubkey {
var p Pubkey
p.SetBytes(b)
return p
}
// SetBytes sets given bytes as public key
func (p *Pubkey) SetBytes(b []byte) {
if len(b) > len(p) {
b = b[len(b)-PubKeyLength:]
}
copy(p[PubKeyLength-len(b):], b)
}
// Bytes returns underlying bytes
func (p Pubkey) Bytes() []byte { return p[:] }
func (p Pubkey) String() string {
return fmt.Sprintf("Mp%x", p[:])
}
// MarshalText encodes Pubkey from to text.
func (p Pubkey) MarshalText() ([]byte, error) {
return []byte(p.String()), nil
}
// MarshalJSON encodes Pubkey from to json format.
func (p Pubkey) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", p.String())), nil
}
// UnmarshalJSON decodes Pubkey from json format.
func (p *Pubkey) UnmarshalJSON(input []byte) error {
b, err := hex.DecodeString(string(input)[3 : len(input)-1])
copy(p[:], b)
return err
}
// Equals checks if public keys are equal
func (p Pubkey) Equals(p2 Pubkey) bool {
return p == p2
}
// TmAddress represents Tendermint address
type TmAddress [TendermintAddressLength]byte
func GetTmAddress(publicKey Pubkey) TmAddress {
// set tm address
var pubkey ed25519.PubKey
copy(pubkey[:], publicKey[:])
var address TmAddress
copy(address[:], pubkey.Address().Bytes())
return address
} | coreV2/types/types.go | 0.814938 | 0.458894 | types.go | starcoder |
package result
import (
"errors"
"fmt"
)
// Result is a type that represents either success (T) or failure (error).
type Result[T any] struct {
ok T
err error
}
func Wrap[T any](some T, err error) Result[T] {
if err != nil {
return Err[T](err)
}
return Ok(some)
}
func Ok[T any](ok T) Result[T] {
return Result[T]{ok: ok}
}
func Err[T any](err any) Result[T] {
return Result[T]{err: newAnyError(err)}
}
// IsOk returns true if the result is Ok.
func (r Result[T]) IsOk() bool {
return !r.IsErr()
}
// IsOkAnd returns true if the result is Ok and the value inside of it matches a predicate.
func (r Result[T]) IsOkAnd(f func(T) bool) bool {
if r.IsOk() {
return f(r.ok)
}
return false
}
// IsErr returns true if the result is error.
func (r Result[T]) IsErr() bool {
return r.err != nil
}
// IsErrAnd returns true if the result is Err and the value inside of it matches a predicate.
func (r Result[T]) IsErrAnd(f func(error) bool) bool {
if r.IsErr() {
return f(r.err)
}
return false
}
// Ok returns T, and returns empty if it is an error.
func (r Result[T]) Ok() *T {
if r.IsOk() {
return &r.ok
}
return nil
}
// Err returns error.
func (r Result[T]) Err() error {
return r.err
}
// ErrVal returns error inner value.
func (r Result[T]) ErrVal() any {
if r.IsErr() {
return nil
}
if ev, _ := r.err.(*errorWithVal); ev != nil {
return ev.val
}
return r.err
}
// Map maps a Result[T] to Result[T] by applying a function to a contained Ok value, leaving an Err value untouched.
// This function can be used to compose the results of two functions.
func (r Result[T]) Map(f func(T) T) Result[T] {
if r.IsOk() {
return Ok[T](f(r.ok))
}
return Err[T](r.err)
}
// Map maps a Result[T] to Result[U] by applying a function to a contained Ok value, leaving an Err value untouched.
// This function can be used to compose the results of two functions.
func Map[T any, U any](r Result[T], f func(T) U) Result[U] {
if r.IsOk() {
return Ok[U](f(r.ok))
}
return Err[U](r.err)
}
// MapOr returns the provided default (if Err), or applies a function to the contained value (if Ok),
// Arguments passed to map_or are eagerly evaluated; if you are passing the result of a function call, it is recommended to use map_or_else, which is lazily evaluated.
func (r Result[T]) MapOr(defaultOk T, f func(T) T) T {
if r.IsOk() {
return f(r.ok)
}
return defaultOk
}
// MapOr returns the provided default (if Err), or applies a function to the contained value (if Ok),
// Arguments passed to map_or are eagerly evaluated; if you are passing the result of a function call, it is recommended to use map_or_else, which is lazily evaluated.
func MapOr[T any, U any](r Result[T], defaultOk U, f func(T) U) U {
if r.IsOk() {
return f(r.ok)
}
return defaultOk
}
// MapOrElse maps a Result[T] to T by applying fallback function default to a contained Err value, or function f to a contained Ok value.
// This function can be used to unpack a successful result while handling an error.
func (r Result[T]) MapOrElse(defaultFn func(error) T, f func(T) T) T {
if r.IsOk() {
return f(r.ok)
}
return defaultFn(r.err)
}
// MapOrElse maps a Result[T] to U by applying fallback function default to a contained Err value, or function f to a contained Ok value.
// This function can be used to unpack a successful result while handling an error.
func MapOrElse[T any, U any](r Result[T], defaultFn func(error) U, f func(T) U) U {
if r.IsOk() {
return f(r.ok)
}
return defaultFn(r.err)
}
// MapErr maps a Result[T] to Result[T] by applying a function to a contained Err value, leaving an Ok value untouched.
// This function can be used to pass through a successful result while handling an error.
func (r *Result[T]) MapErr(op func(error) error) Result[T] {
if r.IsErr() {
r.err = op(r.err)
}
return *r
}
// MapErr maps a Result[T] to Result[T] by applying a function to a contained Err value, leaving an Ok value untouched.
// This function can be used to pass through a successful result while handling an error.
func MapErr[T any](r *Result[T], op func(error) error) Result[T] {
if r.IsErr() {
r.err = op(r.err)
}
return *r
}
// Inspect calls the provided closure with a reference to the contained value (if Ok).
func (r Result[T]) Inspect(f func(T)) Result[T] {
if r.IsOk() {
f(r.ok)
}
return r
}
// InspectErr calls the provided closure with a reference to the contained error (if Err).
func (r Result[T]) InspectErr(f func(error)) Result[T] {
if r.IsErr() {
f(r.err)
}
return r
}
// Expect returns the contained Ok value, consuming the self value.
func (r Result[T]) Expect(msg string) T {
if r.IsErr() {
panic(fmt.Errorf("%s: %w", msg, r.err))
}
return r.ok
}
// Unwrap returns the contained Ok value, consuming the self value.
// Because this function may panic, its use is generally discouraged. Instead, prefer to use pattern matching and handle the Err case explicitly, or call unwrap_or, unwrap_or_else, or unwrap_or_default.
func (r Result[T]) Unwrap() T {
if r.IsErr() {
panic(fmt.Errorf("called `Result.Unwrap()` on an `err` value: %w", r.err))
}
return r.ok
}
// ExpectErr returns the contained Err value, consuming the self value.
func (r Result[T]) ExpectErr(msg string) error {
if r.IsErr() {
return r.err
}
panic(fmt.Errorf("%s: %v", msg, r.ok))
}
// UnwrapErr returns the contained Err value, consuming the self value.
func (r Result[T]) UnwrapErr() error {
if r.IsErr() {
return r.err
}
panic(fmt.Errorf("called `Result.UnwrapErr()` on an `ok` value: %v", r.ok))
}
// And returns res if the result is Ok, otherwise returns the Err value of self.
func (r Result[T]) And(res Result[T]) Result[T] {
if r.IsErr() {
return r
}
return res
}
// And returns r2 if the result is Ok, otherwise returns the Err value of r.
func And[T any, U any](r Result[T], r2 Result[U]) Result[U] {
if r.IsErr() {
return Result[U]{err: r.err}
}
return r2
}
// AndThen calls op if the result is Ok, otherwise returns the Err value of self.
// This function can be used for control flow based on Result values.
func (r Result[T]) AndThen(op func(T) Result[T]) Result[T] {
if r.IsErr() {
return r
}
return op(r.ok)
}
// AndThen calls op if the result is Ok, otherwise returns the Err value of self.
// This function can be used for control flow based on Result values.
func AndThen[T any, U any](r Result[T], op func(T) Result[U]) Result[U] {
if r.IsErr() {
return Result[U]{err: r.err}
}
return op(r.ok)
}
// Or returns res if the result is Err, otherwise returns the Ok value of r.
// Arguments passed to or are eagerly evaluated; if you are passing the result of a function call, it is recommended to use or_else, which is lazily evaluated.
func (r Result[T]) Or(res Result[T]) Result[T] {
if r.IsErr() {
return res
}
return r
}
// OrElse calls op if the result is Err, otherwise returns the Ok value of self.
// This function can be used for control flow based on result values.
func (r Result[T]) OrElse(op func(error) Result[T]) Result[T] {
if r.IsErr() {
return op(r.err)
}
return r
}
// UnwrapOr returns the contained Ok value or a provided default.
// Arguments passed to unwrap_or are eagerly evaluated; if you are passing the result of a function call, it is recommended to use unwrap_or_else, which is lazily evaluated.
func (r Result[T]) UnwrapOr(defaultOk T) T {
if r.IsErr() {
return defaultOk
}
return r.ok
}
// UnwrapOrElse returns the contained Ok value or computes it from a closure.
func (r Result[T]) UnwrapOrElse(defaultFn func(error) T) T {
if r.IsErr() {
return defaultFn(r.err)
}
return r.ok
}
// UnwrapUnchecked returns the contained Ok value, consuming the self value, without checking that the value is not an Err.
func (r Result[T]) UnwrapUnchecked() T {
return r.ok
}
// Contains returns true if the result is an Ok value containing the given value.
func Contains[T comparable](r Result[T], x T) bool {
if r.IsErr() {
return false
}
return r.ok == x
}
// ContainsErr returns true if the result is an Err value containing the given value.
func (r Result[T]) ContainsErr(err error) bool {
if r.IsOk() {
return false
}
if err == r.err {
return true
}
return errors.Is(r.err, err)
}
// Flatten converts from Result[Result[T]] to Result[T].
func Flatten[T any](r Result[Result[T]]) Result[T] {
return AndThen(r, func(rr Result[T]) Result[T] { return rr })
}
func (r Result[T]) String() string {
if r.IsErr() {
return fmt.Sprintf("Err(%s)", r.err.Error())
}
return fmt.Sprintf("Ok(%v)", r.ok)
} | result.go | 0.762689 | 0.548613 | result.go | starcoder |
package process
import "fmt"
// ChildrenIDs returns the list of node IDs with a dependency to the current node
func (g Process) ChildrenKeys(nodeKey string) []string {
nodeKeys := make([]string, 0)
for _, edge := range g.EdgesFrom(nodeKey) {
nodeKeys = append(nodeKeys, edge.Dst)
}
return nodeKeys
}
// ParentIDs returns the list of node IDs with the current node as child
func (g Process) ParentKeys(nodeKey string) []string {
nodeIDs := make([]string, 0)
for _, edge := range g.Edges {
if edge.Dst == nodeKey {
nodeIDs = append(nodeIDs, edge.Src)
}
}
return nodeIDs
}
// FindParentWithType returns the first parent matching the type.
// Only works on mono-parental graph.
// TODO: make it works on multi-parents
// TODO: find a better way than having to pass this match function
func (g Process) FindParentWithType(nodeKey string, match func(*Process_Node) bool) (*Process_Node, error) {
// get parent node's instance hash
parents := g.ParentKeys(nodeKey)
if len(parents) != 1 {
return nil, fmt.Errorf("the node must have exactly 1 parent")
}
parentNode, err := g.FindNode(parents[0])
if err != nil {
return nil, err
}
if match(parentNode) {
return parentNode, nil
}
return g.FindParentWithType(parentNode.Key, match)
}
// FindNodes returns a list of nodes matching a specific filter
func (g Process) FindNodes(filter func(n *Process_Node) bool) []*Process_Node {
nodes := make([]*Process_Node, 0)
for _, node := range g.Nodes {
if filter(node) {
nodes = append(nodes, node)
}
}
return nodes
}
// FindNode return a specific node in a graph identifies by its ID. Returns an error if there is no match or multiple matches
func (g Process) FindNode(nodeKey string) (*Process_Node, error) {
nodes := g.FindNodes(func(n *Process_Node) bool {
return n.Key == nodeKey
})
if len(nodes) == 0 {
return nil, fmt.Errorf("node %q not found", nodeKey)
}
if len(nodes) > 1 {
return nil, fmt.Errorf("multiple nodes with the id %q", nodeKey)
}
return nodes[0], nil
}
// EdgesFrom return all the edges that has a common source
func (g Process) EdgesFrom(src string) []*Process_Edge {
edges := make([]*Process_Edge, 0)
for _, edge := range g.Edges {
if edge.Src == src {
edges = append(edges, edge)
}
}
return edges
}
// A null graph is a graph that contains no nodes
func (g Process) hasNodes() bool {
return len(g.Nodes) > 0
}
// An acyclic graph is a graph that doesn't contain a cycle. If you walk through the graph you will go maximum one time on each node.
func (g Process) isAcyclic() bool {
visited := make(map[string]bool)
recursive := make(map[string]bool)
for _, node := range g.Nodes {
if g.hasCycle(node.Key, visited, recursive) {
return false
}
}
return true
}
// Check if the descendant of a node are creating any cycle. https://algorithms.tutorialhorizon.com/graph-detect-cycle-in-a-directed-graph/
func (g Process) hasCycle(node string, visited map[string]bool, recursive map[string]bool) bool {
visited[node] = true
recursive[node] = true
for _, child := range g.ChildrenKeys(node) {
if !visited[child] && g.hasCycle(child, visited, recursive) {
return true
}
if recursive[child] {
return true
}
}
recursive[node] = false
return false
}
// A connected graph is a graph where all the nodes are connected with each other through edges.
// Warning: this function will have a stack overflow if the graph is not acyclic.
func (g Process) isConnected() bool {
root := g.getRoot(g.Nodes[0].Key)
visited := make(map[string]bool)
g.dfs(root, func(node string) {
visited[node] = true
})
return len(visited) == len(g.Nodes)
}
// walk through all the children of a node and populate a map of visited children.
func (g Process) dfs(node string, fn func(node string)) {
fn(node)
for _, n := range g.ChildrenKeys(node) {
g.dfs(n, fn)
}
}
// getRoot get the root of the tree graph
func (g Process) getRoot(node string) string {
parents := g.ParentKeys(node)
if len(parents) == 0 {
return node
}
if len(parents) > 1 {
panic("multiple parents is not supported")
}
return g.getRoot(parents[0])
}
// Return the maximum number of parent found in the graph.
func (g Process) maximumParents() int {
max := 0
for _, node := range g.Nodes {
if l := len(g.ParentKeys(node.Key)); max < l {
max = l
}
}
return max
}
func (g Process) shouldBeDirectedTree() error {
if !g.hasNodes() {
return fmt.Errorf("process needs to have at least one node")
}
if !g.isAcyclic() {
return fmt.Errorf("process should not contain any cycles")
}
if !g.isConnected() {
return fmt.Errorf("process should be a connected graph")
}
if g.maximumParents() > 1 {
return fmt.Errorf("process should contain nodes with one parent maximum")
}
return nil
}
func (g Process) validate() error {
for _, edge := range g.Edges {
if _, err := g.FindNode(edge.Src); err != nil {
return err
}
if _, err := g.FindNode(edge.Dst); err != nil {
return err
}
}
return nil
} | process/graph.go | 0.554953 | 0.556761 | graph.go | starcoder |
package main
import (
"github.com/otyg/threagile/model"
"github.com/otyg/threagile/model/confidentiality"
"github.com/otyg/threagile/model/criticality"
)
type missingMonitoring string
var RiskRule missingMonitoring
func (r missingMonitoring) Category() model.RiskCategory {
return model.RiskCategory{
Id: "missing-monitoring",
Title: "Missing Monitoring",
Description: "The model is missing a monitoring target for collecting, analysis and alerting on logdata and events.",
Impact: "Without an external platform for monitoring an attacker might go undetected and might be able to tamper with logfiles etc.",
ASVS: "[v4.0.3-7 - Error Handling and Logging Verification Requirements](https://github.com/OWASP/ASVS/blob/v4.0.3_release/4.0/en/0x15-V7-Error-Logging.md)",
CheatSheet: "",
Action: "Logging and monitoring",
Mitigation: "Send logdata and other events to an external platform for storage and analysis.",
Check: "Are relevant logs sent to an external monitoring platform?",
Function: model.Architecture,
STRIDE: model.Repudiation,
DetectionLogic: "Models without a Monitoring platform",
RiskAssessment: "The risk rating depends on the sensitivity of the technical assets and data processed.",
FalsePositives: "None",
ModelFailurePossibleReason: true,
CWE: 778,
}
}
func (r missingMonitoring) SupportedTags() []string {
return []string{}
}
func (r missingMonitoring) GenerateRisks() []model.Risk {
risks := make([]model.Risk, 0)
hasMonitoring := false
var mostRelevantAsset model.TechnicalAsset
impact := model.MediumImpact
probability := model.Likely
for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset
techAsset := model.ParsedModelRoot.TechnicalAssets[id]
if techAsset.Technology == model.Monitoring {
hasMonitoring = true
break
}
if techAsset.HighestConfidentiality() == confidentiality.Confidential ||
techAsset.HighestIntegrity() == criticality.Critical ||
techAsset.HighestAvailability() == criticality.Critical {
impact = model.HighImpact
probability = model.VeryLikely
} else if techAsset.HighestConfidentiality() == confidentiality.StrictlyConfidential ||
techAsset.HighestIntegrity() == criticality.MissionCritical ||
techAsset.HighestAvailability() == criticality.MissionCritical {
impact = model.VeryHighImpact
probability = model.VeryLikely
}
if techAsset.Confidentiality == confidentiality.Confidential ||
techAsset.Integrity == criticality.Critical ||
techAsset.Availability == criticality.Critical {
impact = model.HighImpact
probability = model.VeryLikely
} else if techAsset.Confidentiality == confidentiality.StrictlyConfidential ||
techAsset.Integrity == criticality.MissionCritical ||
techAsset.Availability == criticality.MissionCritical {
impact = model.VeryHighImpact
probability = model.VeryLikely
}
// just for referencing the most interesting asset
if techAsset.HighestSensitivityScore() > mostRelevantAsset.HighestSensitivityScore() {
mostRelevantAsset = techAsset
}
}
if !hasMonitoring {
risks = append(risks, createRisk(mostRelevantAsset, impact, probability))
} else {
for _, id := range model.SortedTechnicalAssetIDs() { // use the sorted one to always get the same tech asset with highest sensitivity as example asset
techAsset := model.ParsedModelRoot.TechnicalAssets[id]
if techAsset.OutOfScope || techAsset.Technology == model.Monitoring {
continue
}
targetMonitor := false
impact := model.MediumImpact
probability := model.Likely
if techAsset.Confidentiality == confidentiality.Confidential ||
techAsset.Integrity == criticality.Critical ||
techAsset.Availability == criticality.Critical {
impact = model.HighImpact
probability = model.VeryLikely
} else if techAsset.Confidentiality == confidentiality.StrictlyConfidential ||
techAsset.Integrity == criticality.MissionCritical ||
techAsset.Availability == criticality.MissionCritical {
impact = model.VeryHighImpact
probability = model.VeryLikely
}
commLinks := techAsset.CommunicationLinks
for _, commLink := range commLinks {
destination := model.ParsedModelRoot.TechnicalAssets[commLink.TargetId]
if destination.Technology == model.Monitoring {
targetMonitor = true
}
}
if !targetMonitor {
risks = append(risks, createRisk(techAsset, impact, probability))
}
}
}
return risks
}
func createRisk(technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact, probability model.RiskExploitationLikelihood) model.Risk {
title := "<b>Missing Monitoring (Logging platform)</b> in the threat model (referencing asset <b>" + technicalAsset.Title + "</b> as an example)"
risk := model.Risk{
Category: RiskRule.Category(),
Severity: model.CalculateSeverity(probability, impact),
ExploitationLikelihood: probability,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
DataBreachProbability: model.Improbable,
DataBreachTechnicalAssetIDs: []string{},
}
risk.SyntheticId = risk.Category.Id + "@" + technicalAsset.Id
return risk
} | risks/missing-monitoring/missing-monitoring-rule.go | 0.663887 | 0.457379 | missing-monitoring-rule.go | starcoder |
package types
import (
"encoding/binary"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
)
type bytesBacked interface {
Bytes() []byte
}
const (
// BloomByteLength represents the number of bytes used in a header log bloom.
BloomByteLength = 256
// BloomBitLength represents the number of bits used in a header log bloom.
BloomBitLength = 8 * BloomByteLength
)
// Bloom represents a 2048 bit bloom filter.
type Bloom [BloomByteLength]byte
// BytesToBloom converts a byte slice to a bloom filter.
// It panics if b is not of suitable size.
func BytesToBloom(b []byte) Bloom {
var bloom Bloom
bloom.SetBytes(b)
return bloom
}
// SetBytes sets the content of b to the given bytes.
// It panics if d is not of suitable size.
func (b *Bloom) SetBytes(d []byte) {
if len(b) < len(d) {
panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d)))
}
copy(b[BloomByteLength-len(d):], d)
}
// Add adds d to the filter. Future calls of Test(d) will return true.
func (b *Bloom) Add(d []byte) {
b.add(d, make([]byte, 6))
}
// add is internal version of Add, which takes a scratch buffer for reuse (needs to be at least 6 bytes)
func (b *Bloom) add(d []byte, buf []byte) {
i1, v1, i2, v2, i3, v3 := bloomValues(d, buf)
b[i1] |= v1
b[i2] |= v2
b[i3] |= v3
}
// Big converts b to a big integer.
// Note: Converting a bloom filter to a big.Int and then calling GetBytes
// does not return the same bytes, since big.Int will trim leading zeroes
func (b Bloom) Big() *big.Int {
return new(big.Int).SetBytes(b[:])
}
// Bytes returns the backing byte slice of the bloom
func (b Bloom) Bytes() []byte {
return b[:]
}
// Test checks if the given topic is present in the bloom filter
func (b Bloom) Test(topic []byte) bool {
i1, v1, i2, v2, i3, v3 := bloomValues(topic, make([]byte, 6))
return v1 == v1&b[i1] &&
v2 == v2&b[i2] &&
v3 == v3&b[i3]
}
// MarshalText encodes b as a hex string with 0x prefix.
func (b Bloom) MarshalText() ([]byte, error) {
return hexutil.Bytes(b[:]).MarshalText()
}
// UnmarshalText b as a hex string with 0x prefix.
func (b *Bloom) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Bloom", input, b[:])
}
// CreateBloom creates a bloom filter out of the give Receipts (+Logs)
func CreateBloom(receipts Receipts) Bloom {
buf := make([]byte, 6)
var bin Bloom
for _, receipt := range receipts {
for _, log := range receipt.Logs {
bin.add(log.Address.Bytes(), buf)
for _, b := range log.Topics {
bin.add(b[:], buf)
}
}
}
return bin
}
// LogsBloom returns the bloom bytes for the given logs
func LogsBloom(logs []*Log) []byte {
buf := make([]byte, 6)
var bin Bloom
for _, log := range logs {
bin.add(log.Address.Bytes(), buf)
for _, b := range log.Topics {
bin.add(b[:], buf)
}
}
return bin[:]
}
// Bloom9 returns the bloom filter for the given data
func Bloom9(data []byte) []byte {
var b Bloom
b.SetBytes(data)
return b.Bytes()
}
// bloomValues returns the bytes (index-value pairs) to set for the given data
func bloomValues(data []byte, hashbuf []byte) (uint, byte, uint, byte, uint, byte) {
sha := hasherPool.Get().(crypto.KeccakState)
sha.Reset()
sha.Write(data)
sha.Read(hashbuf)
hasherPool.Put(sha)
// The actual bits to flip
v1 := byte(1 << (hashbuf[1] & 0x7))
v2 := byte(1 << (hashbuf[3] & 0x7))
v3 := byte(1 << (hashbuf[5] & 0x7))
// The indices for the bytes to OR in
i1 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf)&0x7ff)>>3) - 1
i2 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf[2:])&0x7ff)>>3) - 1
i3 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf[4:])&0x7ff)>>3) - 1
return i1, v1, i2, v2, i3, v3
}
// BloomLookup is a convenience-method to check presence int he bloom filter
func BloomLookup(bin Bloom, topic bytesBacked) bool {
return bin.Test(topic.Bytes())
} | ethereum_pack/core/types/bloom9.go | 0.792585 | 0.538194 | bloom9.go | starcoder |
package main
import (
"bytes"
"errors"
"github.com/michaelcmartin/tiledither/dither"
"image"
"image/color"
"sort"
)
type c64bmp struct {
src image.Image
pixmap [][]int
palettes [][]color.Palette
bg int
}
func newC64bmp(w, h int) *c64bmp {
bmp := new(c64bmp)
matrix := make([][]int, w)
buf := make([]int, w*h)
for i := range matrix {
matrix[i], buf = buf[:h], buf[h:]
}
bmp.pixmap = matrix
cw := (w + 3) / 4
ch := (h + 7) / 8
palmatrix := make([][]color.Palette, cw)
palbuf := make([]color.Palette, cw*ch)
for i := range palmatrix {
palmatrix[i], palbuf = palbuf[:ch], palbuf[ch:]
}
bmp.palettes = palmatrix
return bmp
}
func convertMC(src image.Image) (*DitherResult, error) {
// Step one: Produce a half-width image since multicolor
// images are doubled pixels.
bounds := src.Bounds()
w := bounds.Dx() / 2
h := bounds.Dy()
if w != 160 || h != 200 {
return nil, errors.New("Image to convert must be 320x200")
}
dest := image.NewRGBA(image.Rect(0, 0, w, h))
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
r1, g1, b1, a1 := src.At(x*2+bounds.Min.X, y+bounds.Min.Y).RGBA()
r2, g2, b2, a2 := src.At(x*2+bounds.Min.X+1, y+bounds.Min.Y).RGBA()
r := uint16((r1 + r2) / 2)
g := uint16((g1 + g2) / 2)
b := uint16((b1 + b2) / 2)
a := uint16((a1 + a2) / 2)
dest.Set(x, y, color.RGBA64{r, g, b, a})
}
}
// Step 2: Convert freely to the C64 palette. Store as 2D
// array of ints (palette entries).
pass1 := dither.ToPalette(dest, C64)
bounds = pass1.Bounds()
bmp := newC64bmp(w, h)
bmp.src = dest
p := bmp.pixmap
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
p[x][y] = C64.Index(pass1.At(x, y))
}
}
// Step 3: Try each of the 16 colors to see which background
// color involves the fewest compromises.
besterr := 65000
bmp.bg = -1
for proposedBG := 0; proposedBG < 16; proposedBG++ {
pixerr := 0
for y := 0; y < h; y += 8 {
for x := 0; x < w; x += 4 {
var charUse [16]int
charColorCount := 0
for cy := 0; cy < 8; cy++ {
if y+cy >= h {
break
}
for cx := 0; cx < 4; cx++ {
if x+cx >= w {
break
}
c := p[x+cx][y+cy]
if c == proposedBG {
continue
}
if charUse[c] == 0 {
charColorCount++
}
charUse[c]++
}
}
if charColorCount > 3 {
sort.Ints(charUse[:])
for _, v := range charUse[:13] {
pixerr += v
}
}
}
}
if pixerr < besterr {
besterr = pixerr
bmp.bg = proposedBG
}
}
// Now that we have a background color we can compute the
// remaining palettes based on that. TODO: this is based on
// the three most common non-background pixels in each 4x8
// block. A more thorough system - especially one that gets to
// select its own colors - would probably want to use
// something like median cut.
for y := 0; y < h; y += 8 {
for x := 0; x < w; x += 4 {
var charUse [16]int
for cy := 0; cy < 8; cy++ {
if y+cy >= h {
break
}
for cx := 0; cx < 4; cx++ {
if x+cx >= w {
break
}
c := p[x+cx][y+cy]
if c == bmp.bg {
continue
}
charUse[c]++
}
}
// Get the three most common pixels left
i1, i2, i3 := 0, 0, 0
c1, c2, c3 := -1, -1, -1
for i, c := range charUse {
if c > c3 {
c1, c2, c3 = c2, c3, c
i1, i2, i3 = i2, i3, i
} else if c > c2 {
c1, c2 = c2, c
i1, i2 = i2, i
} else if c > c1 {
c1 = c
i1 = i
}
}
bmp.palettes[x/4][y/8] = color.Palette{C64[bmp.bg], C64[i1], C64[i2], C64[i3]}
}
}
dither.Convert(bmp)
// Package up our results
result := new(DitherResult)
// Preview image
w, h = bmp.Width(), bmp.Height()
result.Preview = image.NewRGBA(image.Rect(0, 0, w*2, h))
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
c := bmp.PaletteAt(x, y)[bmp.pixmap[x][y]]
result.Preview.Set(x*2, y, c)
result.Preview.Set(x*2+1, y, c)
}
}
// The actual data
var outbuf bytes.Buffer
outbuf.Write([]byte{0x00, 0x60})
outbuf.Write(bitmap(bmp))
outbuf.Write(textmap(bmp))
outbuf.Write(colormap(bmp))
outbuf.Write([]byte{byte(bmp.bg)})
result.Data = outbuf.Bytes()
return result, nil
}
// Implement dither.Context methods for c64bmp.
func (ctx *c64bmp) Width() int {
return len(ctx.pixmap)
}
func (ctx *c64bmp) Height() int {
return len(ctx.pixmap[0])
}
func (ctx *c64bmp) At(x, y int) color.Color {
return ctx.src.At(x, y)
}
func (ctx *c64bmp) PaletteAt(x, y int) color.Palette {
return ctx.palettes[x/4][y/8]
}
func (ctx *c64bmp) Set(x, y int, c color.Color) {
p := ctx.PaletteAt(x, y)
ctx.pixmap[x][y] = p.Index(c)
}
// Compute the various bitmap arrays from our arrays.
func colormap(bmp *c64bmp) []uint8 {
w, h := len(bmp.palettes), len(bmp.palettes[0])
result := make([]uint8, w*h)
i := 0
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
result[i] = uint8(C64.Index(bmp.palettes[x][y][3]))
i++
}
}
return result
}
func textmap(bmp *c64bmp) []uint8 {
w, h := len(bmp.palettes), len(bmp.palettes[0])
result := make([]uint8, w*h)
i := 0
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
c1 := uint8(C64.Index(bmp.palettes[x][y][1]))
c2 := uint8(C64.Index(bmp.palettes[x][y][2]))
result[i] = c1*16 + c2
i++
}
}
return result
}
func bitmap(bmp *c64bmp) []uint8 {
w, h := len(bmp.pixmap), len(bmp.pixmap[0])
// We're computing output size based on whole characters, so
// the result array's size is based on the palette array, not
// the pixmap. In a sensible input image these should be the
// same.
result := make([]uint8, len(bmp.palettes)*len(bmp.palettes[0])*8)
i := 0
for y := 0; y < h; y += 8 {
for x := 0; x < w; x += 4 {
for cy := 0; cy < 8; cy++ {
var v uint8 = 0
if y+cy < h {
for cx := 0; cx < 4; cx++ {
v = v * 4
if x+cx < w {
v += uint8(bmp.pixmap[x+cx][y+cy])
}
}
}
result[i] = v
i++
}
}
}
return result
} | c64mcbmp.go | 0.502686 | 0.417984 | c64mcbmp.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.